- Fixed silent API key save failure by switching from keyring to Tauri store - Removed keyring dependency (didn't work in macOS dev mode for unsigned apps) - Implemented reliable cross-platform storage using tauri-plugin-store - Added pendingMessageRef to preserve user message during API key dialog flow - Refactored sendMessage to accept optional message parameter for retry - Removed all debug logging and test code - Removed unused entitlements.plist and macOS config - API key now persists correctly between sessions - Auto-retry after saving key works properly Story 12 complete and archived.
403 lines
15 KiB
Rust
403 lines
15 KiB
Rust
use crate::commands::{fs, search, shell};
|
|
use crate::llm::anthropic::AnthropicProvider;
|
|
use crate::llm::ollama::OllamaProvider;
|
|
use crate::llm::prompts::SYSTEM_PROMPT;
|
|
use crate::llm::types::{Message, Role, ToolCall, ToolDefinition, ToolFunctionDefinition};
|
|
use crate::state::SessionState;
|
|
use serde::Deserialize;
|
|
use serde_json::json;
|
|
use tauri::{AppHandle, Emitter, State};
|
|
use tauri_plugin_store::StoreExt;
|
|
|
|
#[derive(Deserialize)]
|
|
pub struct ProviderConfig {
|
|
pub provider: String, // "ollama"
|
|
pub model: String,
|
|
pub base_url: Option<String>,
|
|
pub enable_tools: Option<bool>,
|
|
}
|
|
|
|
const MAX_TURNS: usize = 30;
|
|
|
|
#[tauri::command]
|
|
pub async fn get_ollama_models(base_url: Option<String>) -> Result<Vec<String>, String> {
|
|
let url = base_url.unwrap_or_else(|| "http://localhost:11434".to_string());
|
|
OllamaProvider::get_models(&url).await
|
|
}
|
|
|
|
#[tauri::command]
|
|
pub async fn get_anthropic_api_key_exists(app: AppHandle) -> Result<bool, String> {
|
|
let store = app
|
|
.store("store.json")
|
|
.map_err(|e| format!("Failed to access store: {}", e))?;
|
|
|
|
match store.get("anthropic_api_key") {
|
|
Some(value) => {
|
|
if let Some(key) = value.as_str() {
|
|
Ok(!key.is_empty())
|
|
} else {
|
|
Ok(false)
|
|
}
|
|
}
|
|
None => Ok(false),
|
|
}
|
|
}
|
|
|
|
#[tauri::command]
|
|
pub async fn set_anthropic_api_key(app: AppHandle, api_key: String) -> Result<(), String> {
|
|
let store = app
|
|
.store("store.json")
|
|
.map_err(|e| format!("Failed to access store: {}", e))?;
|
|
|
|
store.set("anthropic_api_key", json!(api_key));
|
|
|
|
store
|
|
.save()
|
|
.map_err(|e| format!("Failed to save store: {}", e))?;
|
|
|
|
// Verify it was saved
|
|
match store.get("anthropic_api_key") {
|
|
Some(value) => {
|
|
if let Some(retrieved) = value.as_str() {
|
|
if retrieved != api_key {
|
|
return Err("Retrieved key does not match saved key".to_string());
|
|
}
|
|
} else {
|
|
return Err("Stored value is not a string".to_string());
|
|
}
|
|
}
|
|
None => {
|
|
return Err("API key was saved but cannot be retrieved".to_string());
|
|
}
|
|
}
|
|
|
|
Ok(())
|
|
}
|
|
|
|
fn get_anthropic_api_key(app: &AppHandle) -> Result<String, String> {
|
|
let store = app
|
|
.store("store.json")
|
|
.map_err(|e| format!("Failed to access store: {}", e))?;
|
|
|
|
match store.get("anthropic_api_key") {
|
|
Some(value) => {
|
|
if let Some(key) = value.as_str() {
|
|
if key.is_empty() {
|
|
Err("Anthropic API key is empty. Please set your API key.".to_string())
|
|
} else {
|
|
Ok(key.to_string())
|
|
}
|
|
} else {
|
|
Err("Stored API key is not a string".to_string())
|
|
}
|
|
}
|
|
None => Err("Anthropic API key not found. Please set your API key.".to_string()),
|
|
}
|
|
}
|
|
|
|
#[tauri::command]
|
|
pub async fn chat(
|
|
app: AppHandle,
|
|
messages: Vec<Message>,
|
|
config: ProviderConfig,
|
|
state: State<'_, SessionState>,
|
|
) -> Result<Vec<Message>, String> {
|
|
// Reset cancel flag at start of new request
|
|
let _ = state.cancel_tx.send(false);
|
|
|
|
// Get a clone of the cancellation receiver
|
|
let mut cancel_rx = state.cancel_rx.clone();
|
|
|
|
// Mark the receiver as having seen the current (false) value
|
|
// This prevents changed() from firing immediately due to stale state
|
|
cancel_rx.borrow_and_update();
|
|
|
|
// 1. Setup Provider
|
|
let base_url = config
|
|
.base_url
|
|
.clone()
|
|
.unwrap_or_else(|| "http://localhost:11434".to_string());
|
|
|
|
// Determine provider from model name
|
|
let is_claude = config.model.starts_with("claude-");
|
|
|
|
if !is_claude && config.provider.as_str() != "ollama" {
|
|
return Err(format!("Unsupported provider: {}", config.provider));
|
|
}
|
|
|
|
// 2. Define Tools
|
|
let tool_defs = get_tool_definitions();
|
|
let tools = if config.enable_tools.unwrap_or(true) {
|
|
tool_defs.as_slice()
|
|
} else {
|
|
&[]
|
|
};
|
|
|
|
// 3. Agent Loop
|
|
let mut current_history = messages.clone();
|
|
|
|
// Inject System Prompt
|
|
current_history.insert(
|
|
0,
|
|
Message {
|
|
role: Role::System,
|
|
content: SYSTEM_PROMPT.to_string(),
|
|
tool_calls: None,
|
|
tool_call_id: None,
|
|
},
|
|
);
|
|
|
|
// Inject reminder as a second system message
|
|
current_history.insert(
|
|
1,
|
|
Message {
|
|
role: Role::System,
|
|
content: "REMINDER: Distinguish between showing examples (use code blocks in chat) vs implementing changes (use write_file tool). Keywords like 'show me', 'example', 'how does' = chat response. Keywords like 'create', 'add', 'implement', 'fix' = use tools.".to_string(),
|
|
tool_calls: None,
|
|
tool_call_id: None,
|
|
},
|
|
);
|
|
|
|
let mut new_messages: Vec<Message> = Vec::new();
|
|
let mut turn_count = 0;
|
|
|
|
loop {
|
|
// Check for cancellation at start of loop
|
|
if *cancel_rx.borrow() {
|
|
return Err("Chat cancelled by user".to_string());
|
|
}
|
|
|
|
if turn_count >= MAX_TURNS {
|
|
return Err("Max conversation turns reached.".to_string());
|
|
}
|
|
turn_count += 1;
|
|
|
|
// Call LLM with streaming
|
|
let response = if is_claude {
|
|
// Use Anthropic provider
|
|
let api_key = get_anthropic_api_key(&app)?;
|
|
let anthropic_provider = AnthropicProvider::new(api_key);
|
|
anthropic_provider
|
|
.chat_stream(&app, &config.model, ¤t_history, tools, &mut cancel_rx)
|
|
.await
|
|
.map_err(|e| format!("Anthropic Error: {}", e))?
|
|
} else {
|
|
// Use Ollama provider
|
|
let ollama_provider = OllamaProvider::new(base_url.clone());
|
|
ollama_provider
|
|
.chat_stream(&app, &config.model, ¤t_history, tools, &mut cancel_rx)
|
|
.await
|
|
.map_err(|e| format!("Ollama Error: {}", e))?
|
|
};
|
|
|
|
// Process Response
|
|
if let Some(tool_calls) = response.tool_calls {
|
|
// The Assistant wants to run tools
|
|
let assistant_msg = Message {
|
|
role: Role::Assistant,
|
|
content: response.content.unwrap_or_default(),
|
|
tool_calls: Some(tool_calls.clone()),
|
|
tool_call_id: None,
|
|
};
|
|
|
|
current_history.push(assistant_msg.clone());
|
|
new_messages.push(assistant_msg);
|
|
// Emit history excluding system prompts (indices 0 and 1)
|
|
app.emit("chat:update", ¤t_history[2..])
|
|
.map_err(|e| e.to_string())?;
|
|
|
|
// Execute Tools
|
|
for call in tool_calls {
|
|
// Check for cancellation before executing each tool
|
|
if *cancel_rx.borrow() {
|
|
return Err("Chat cancelled before tool execution".to_string());
|
|
}
|
|
|
|
let output = execute_tool(&call, &state).await;
|
|
|
|
let tool_msg = Message {
|
|
role: Role::Tool,
|
|
content: output,
|
|
tool_calls: None,
|
|
// For Ollama/Simple flow, we just append.
|
|
// For OpenAI strict, this needs to match call.id.
|
|
tool_call_id: call.id,
|
|
};
|
|
|
|
current_history.push(tool_msg.clone());
|
|
new_messages.push(tool_msg);
|
|
// Emit history excluding system prompts (indices 0 and 1)
|
|
app.emit("chat:update", ¤t_history[2..])
|
|
.map_err(|e| e.to_string())?;
|
|
}
|
|
} else {
|
|
// Final text response
|
|
let assistant_msg = Message {
|
|
role: Role::Assistant,
|
|
content: response.content.unwrap_or_default(),
|
|
tool_calls: None,
|
|
tool_call_id: None,
|
|
};
|
|
|
|
// We don't push to current_history needed for next loop, because we are done.
|
|
new_messages.push(assistant_msg.clone());
|
|
current_history.push(assistant_msg);
|
|
// Emit history excluding system prompts (indices 0 and 1)
|
|
app.emit("chat:update", ¤t_history[2..])
|
|
.map_err(|e| e.to_string())?;
|
|
break;
|
|
}
|
|
}
|
|
|
|
Ok(new_messages)
|
|
}
|
|
|
|
async fn execute_tool(call: &ToolCall, state: &State<'_, SessionState>) -> String {
|
|
let name = call.function.name.as_str();
|
|
// Parse arguments. They come as a JSON string from the LLM abstraction.
|
|
let args: serde_json::Value = match serde_json::from_str(&call.function.arguments) {
|
|
Ok(v) => v,
|
|
Err(e) => return format!("Error parsing arguments: {}", e),
|
|
};
|
|
|
|
match name {
|
|
"read_file" => {
|
|
let path = args["path"].as_str().unwrap_or("").to_string();
|
|
match fs::read_file(path, state.clone()).await {
|
|
Ok(content) => content,
|
|
Err(e) => format!("Error: {}", e),
|
|
}
|
|
}
|
|
"write_file" => {
|
|
let path = args["path"].as_str().unwrap_or("").to_string();
|
|
let content = args["content"].as_str().unwrap_or("").to_string();
|
|
match fs::write_file(path, content, state.clone()).await {
|
|
Ok(_) => "File written successfully.".to_string(),
|
|
Err(e) => format!("Error: {}", e),
|
|
}
|
|
}
|
|
"list_directory" => {
|
|
let path = args["path"].as_str().unwrap_or("").to_string();
|
|
match fs::list_directory(path, state.clone()).await {
|
|
Ok(entries) => serde_json::to_string(&entries).unwrap_or_default(),
|
|
Err(e) => format!("Error: {}", e),
|
|
}
|
|
}
|
|
"search_files" => {
|
|
let query = args["query"].as_str().unwrap_or("").to_string();
|
|
match search::search_files(query, state.clone()).await {
|
|
Ok(results) => serde_json::to_string(&results).unwrap_or_default(),
|
|
Err(e) => format!("Error: {}", e),
|
|
}
|
|
}
|
|
"exec_shell" => {
|
|
let command = args["command"].as_str().unwrap_or("").to_string();
|
|
let args_vec: Vec<String> = args["args"]
|
|
.as_array()
|
|
.map(|arr| {
|
|
arr.iter()
|
|
.map(|v| v.as_str().unwrap_or("").to_string())
|
|
.collect()
|
|
})
|
|
.unwrap_or_default();
|
|
|
|
match shell::exec_shell(command, args_vec, state.clone()).await {
|
|
Ok(output) => serde_json::to_string(&output).unwrap_or_default(),
|
|
Err(e) => format!("Error: {}", e),
|
|
}
|
|
}
|
|
_ => format!("Unknown tool: {}", name),
|
|
}
|
|
}
|
|
|
|
fn get_tool_definitions() -> Vec<ToolDefinition> {
|
|
vec![
|
|
ToolDefinition {
|
|
kind: "function".to_string(),
|
|
function: ToolFunctionDefinition {
|
|
name: "read_file".to_string(),
|
|
description: "Reads the complete content of a file from the project. Use this to understand existing code before making changes.".to_string(),
|
|
parameters: json!({
|
|
"type": "object",
|
|
"properties": {
|
|
"path": { "type": "string", "description": "Relative path to the file from project root" }
|
|
},
|
|
"required": ["path"]
|
|
}),
|
|
},
|
|
},
|
|
ToolDefinition {
|
|
kind: "function".to_string(),
|
|
function: ToolFunctionDefinition {
|
|
name: "write_file".to_string(),
|
|
description: "Creates or completely overwrites a file with new content. YOU MUST USE THIS to implement code changes - do not suggest code to the user. The content parameter must contain the COMPLETE file including all imports, functions, and unchanged code.".to_string(),
|
|
parameters: json!({
|
|
"type": "object",
|
|
"properties": {
|
|
"path": { "type": "string", "description": "Relative path to the file from project root" },
|
|
"content": { "type": "string", "description": "The complete file content to write (not a diff or partial code)" }
|
|
},
|
|
"required": ["path", "content"]
|
|
}),
|
|
},
|
|
},
|
|
ToolDefinition {
|
|
kind: "function".to_string(),
|
|
function: ToolFunctionDefinition {
|
|
name: "list_directory".to_string(),
|
|
description: "Lists all files and directories at a given path. Use this to explore the project structure.".to_string(),
|
|
parameters: json!({
|
|
"type": "object",
|
|
"properties": {
|
|
"path": { "type": "string", "description": "Relative path to list (use '.' for project root)" }
|
|
},
|
|
"required": ["path"]
|
|
}),
|
|
},
|
|
},
|
|
ToolDefinition {
|
|
kind: "function".to_string(),
|
|
function: ToolFunctionDefinition {
|
|
name: "search_files".to_string(),
|
|
description: "Searches for text patterns across all files in the project. Use this to find functions, variables, or code patterns when you don't know which file they're in."
|
|
.to_string(),
|
|
parameters: json!({
|
|
"type": "object",
|
|
"properties": {
|
|
"query": { "type": "string", "description": "The text pattern to search for across all files" }
|
|
},
|
|
"required": ["query"]
|
|
}),
|
|
},
|
|
},
|
|
ToolDefinition {
|
|
kind: "function".to_string(),
|
|
function: ToolFunctionDefinition {
|
|
name: "exec_shell".to_string(),
|
|
description: "Executes a shell command in the project root directory. Use this to run tests, build commands, git operations, or any command-line tool. Examples: cargo check, npm test, git status.".to_string(),
|
|
parameters: json!({
|
|
"type": "object",
|
|
"properties": {
|
|
"command": {
|
|
"type": "string",
|
|
"description": "The command binary to execute (e.g., 'git', 'cargo', 'npm', 'ls')"
|
|
},
|
|
"args": {
|
|
"type": "array",
|
|
"items": { "type": "string" },
|
|
"description": "Array of arguments to pass to the command (e.g., ['status'] for git status)"
|
|
}
|
|
},
|
|
"required": ["command", "args"]
|
|
}),
|
|
},
|
|
},
|
|
]
|
|
}
|
|
|
|
#[tauri::command]
|
|
pub async fn cancel_chat(state: State<'_, SessionState>) -> Result<(), String> {
|
|
state.cancel_tx.send(true).map_err(|e| e.to_string())?;
|
|
Ok(())
|
|
}
|