Files
storkit/src-tauri/src/commands/chat.rs
Dave 64d1b788be Story 18: Token-by-token streaming responses
- Backend: Added OllamaProvider::chat_stream() with newline-delimited JSON parsing
- Backend: Emit chat:token events for each token received from Ollama
- Backend: Added futures dependency and stream feature for reqwest
- Frontend: Added streamingContent state and chat:token event listener
- Frontend: Real-time token display with auto-scroll
- Frontend: Markdown and syntax highlighting support for streaming content
- Fixed all TypeScript errors (tsc --noEmit)
- Fixed all Biome warnings and errors
- Fixed all Clippy warnings
- Added comprehensive code quality documentation
- Added tsc --noEmit to verification checklist

Tested and verified:
- Tokens stream in real-time
- Auto-scroll works during streaming
- Tool calls interrupt streaming correctly
- Multi-turn conversations work
- Smooth performance with no lag
2025-12-27 16:50:18 +00:00

292 lines
11 KiB
Rust

use crate::commands::{fs, search, shell};
use crate::llm::ollama::OllamaProvider;
use crate::llm::prompts::SYSTEM_PROMPT;
use crate::llm::types::{Message, Role, ToolCall, ToolDefinition, ToolFunctionDefinition};
use crate::state::SessionState;
use serde::Deserialize;
use serde_json::json;
use tauri::{AppHandle, Emitter, State};
#[derive(Deserialize)]
pub struct ProviderConfig {
pub provider: String, // "ollama"
pub model: String,
pub base_url: Option<String>,
pub enable_tools: Option<bool>,
}
const MAX_TURNS: usize = 30;
#[tauri::command]
pub async fn get_ollama_models(base_url: Option<String>) -> Result<Vec<String>, String> {
let url = base_url.unwrap_or_else(|| "http://localhost:11434".to_string());
OllamaProvider::get_models(&url).await
}
#[tauri::command]
pub async fn chat(
app: AppHandle,
messages: Vec<Message>,
config: ProviderConfig,
state: State<'_, SessionState>,
) -> Result<Vec<Message>, String> {
// 1. Setup Provider
let base_url = config
.base_url
.clone()
.unwrap_or_else(|| "http://localhost:11434".to_string());
if config.provider.as_str() != "ollama" {
return Err(format!("Unsupported provider: {}", config.provider));
}
let provider = OllamaProvider::new(base_url);
// 2. Define Tools
let tool_defs = get_tool_definitions();
let tools = if config.enable_tools.unwrap_or(true) {
tool_defs.as_slice()
} else {
&[]
};
// 3. Agent Loop
let mut current_history = messages.clone();
// Inject System Prompt
current_history.insert(
0,
Message {
role: Role::System,
content: SYSTEM_PROMPT.to_string(),
tool_calls: None,
tool_call_id: None,
},
);
// Inject reminder as a second system message
current_history.insert(
1,
Message {
role: Role::System,
content: "REMINDER: Distinguish between showing examples (use code blocks in chat) vs implementing changes (use write_file tool). Keywords like 'show me', 'example', 'how does' = chat response. Keywords like 'create', 'add', 'implement', 'fix' = use tools.".to_string(),
tool_calls: None,
tool_call_id: None,
},
);
let mut new_messages: Vec<Message> = Vec::new();
let mut turn_count = 0;
loop {
if turn_count >= MAX_TURNS {
return Err("Max conversation turns reached.".to_string());
}
turn_count += 1;
// Call LLM with streaming
let response = provider
.chat_stream(&app, &config.model, &current_history, tools)
.await
.map_err(|e| format!("LLM Error: {}", e))?;
// Process Response
if let Some(tool_calls) = response.tool_calls {
// The Assistant wants to run tools
let assistant_msg = Message {
role: Role::Assistant,
content: response.content.unwrap_or_default(),
tool_calls: Some(tool_calls.clone()),
tool_call_id: None,
};
current_history.push(assistant_msg.clone());
new_messages.push(assistant_msg);
// Emit history excluding system prompts (indices 0 and 1)
app.emit("chat:update", &current_history[2..])
.map_err(|e| e.to_string())?;
// Execute Tools
for call in tool_calls {
let output = execute_tool(&call, &state).await;
let tool_msg = Message {
role: Role::Tool,
content: output,
tool_calls: None,
// For Ollama/Simple flow, we just append.
// For OpenAI strict, this needs to match call.id.
tool_call_id: call.id,
};
current_history.push(tool_msg.clone());
new_messages.push(tool_msg);
// Emit history excluding system prompts (indices 0 and 1)
app.emit("chat:update", &current_history[2..])
.map_err(|e| e.to_string())?;
}
} else {
// Final text response
let assistant_msg = Message {
role: Role::Assistant,
content: response.content.unwrap_or_default(),
tool_calls: None,
tool_call_id: None,
};
// We don't push to current_history needed for next loop, because we are done.
new_messages.push(assistant_msg.clone());
current_history.push(assistant_msg);
// Emit history excluding system prompts (indices 0 and 1)
app.emit("chat:update", &current_history[2..])
.map_err(|e| e.to_string())?;
break;
}
}
Ok(new_messages)
}
async fn execute_tool(call: &ToolCall, state: &State<'_, SessionState>) -> String {
let name = call.function.name.as_str();
// Parse arguments. They come as a JSON string from the LLM abstraction.
let args: serde_json::Value = match serde_json::from_str(&call.function.arguments) {
Ok(v) => v,
Err(e) => return format!("Error parsing arguments: {}", e),
};
match name {
"read_file" => {
let path = args["path"].as_str().unwrap_or("").to_string();
match fs::read_file(path, state.clone()).await {
Ok(content) => content,
Err(e) => format!("Error: {}", e),
}
}
"write_file" => {
let path = args["path"].as_str().unwrap_or("").to_string();
let content = args["content"].as_str().unwrap_or("").to_string();
match fs::write_file(path, content, state.clone()).await {
Ok(_) => "File written successfully.".to_string(),
Err(e) => format!("Error: {}", e),
}
}
"list_directory" => {
let path = args["path"].as_str().unwrap_or("").to_string();
match fs::list_directory(path, state.clone()).await {
Ok(entries) => serde_json::to_string(&entries).unwrap_or_default(),
Err(e) => format!("Error: {}", e),
}
}
"search_files" => {
let query = args["query"].as_str().unwrap_or("").to_string();
match search::search_files(query, state.clone()).await {
Ok(results) => serde_json::to_string(&results).unwrap_or_default(),
Err(e) => format!("Error: {}", e),
}
}
"exec_shell" => {
let command = args["command"].as_str().unwrap_or("").to_string();
let args_vec: Vec<String> = args["args"]
.as_array()
.map(|arr| {
arr.iter()
.map(|v| v.as_str().unwrap_or("").to_string())
.collect()
})
.unwrap_or_default();
match shell::exec_shell(command, args_vec, state.clone()).await {
Ok(output) => serde_json::to_string(&output).unwrap_or_default(),
Err(e) => format!("Error: {}", e),
}
}
_ => format!("Unknown tool: {}", name),
}
}
fn get_tool_definitions() -> Vec<ToolDefinition> {
vec![
ToolDefinition {
kind: "function".to_string(),
function: ToolFunctionDefinition {
name: "read_file".to_string(),
description: "Reads the complete content of a file from the project. Use this to understand existing code before making changes.".to_string(),
parameters: json!({
"type": "object",
"properties": {
"path": { "type": "string", "description": "Relative path to the file from project root" }
},
"required": ["path"]
}),
},
},
ToolDefinition {
kind: "function".to_string(),
function: ToolFunctionDefinition {
name: "write_file".to_string(),
description: "Creates or completely overwrites a file with new content. YOU MUST USE THIS to implement code changes - do not suggest code to the user. The content parameter must contain the COMPLETE file including all imports, functions, and unchanged code.".to_string(),
parameters: json!({
"type": "object",
"properties": {
"path": { "type": "string", "description": "Relative path to the file from project root" },
"content": { "type": "string", "description": "The complete file content to write (not a diff or partial code)" }
},
"required": ["path", "content"]
}),
},
},
ToolDefinition {
kind: "function".to_string(),
function: ToolFunctionDefinition {
name: "list_directory".to_string(),
description: "Lists all files and directories at a given path. Use this to explore the project structure.".to_string(),
parameters: json!({
"type": "object",
"properties": {
"path": { "type": "string", "description": "Relative path to list (use '.' for project root)" }
},
"required": ["path"]
}),
},
},
ToolDefinition {
kind: "function".to_string(),
function: ToolFunctionDefinition {
name: "search_files".to_string(),
description: "Searches for text patterns across all files in the project. Use this to find functions, variables, or code patterns when you don't know which file they're in."
.to_string(),
parameters: json!({
"type": "object",
"properties": {
"query": { "type": "string", "description": "The text pattern to search for across all files" }
},
"required": ["query"]
}),
},
},
ToolDefinition {
kind: "function".to_string(),
function: ToolFunctionDefinition {
name: "exec_shell".to_string(),
description: "Executes a shell command in the project root directory. Use this to run tests, build commands, git operations, or any command-line tool. Examples: cargo check, npm test, git status.".to_string(),
parameters: json!({
"type": "object",
"properties": {
"command": {
"type": "string",
"description": "The command binary to execute (e.g., 'git', 'cargo', 'npm', 'ls')"
},
"args": {
"type": "array",
"items": { "type": "string" },
"description": "Array of arguments to pass to the command (e.g., ['status'] for git status)"
}
},
"required": ["command", "args"]
}),
},
},
]
}