diff --git a/.living_spec/stories/05_persist_project_selection.md b/.living_spec/stories/archive/05_persist_project_selection.md similarity index 100% rename from .living_spec/stories/05_persist_project_selection.md rename to .living_spec/stories/archive/05_persist_project_selection.md diff --git a/src-tauri/src/commands/chat.rs b/src-tauri/src/commands/chat.rs index 1152120..40b8206 100644 --- a/src-tauri/src/commands/chat.rs +++ b/src-tauri/src/commands/chat.rs @@ -18,6 +18,12 @@ pub struct ProviderConfig { const MAX_TURNS: usize = 10; +#[tauri::command] +pub async fn get_ollama_models(base_url: Option) -> Result, String> { + let url = base_url.unwrap_or_else(|| "http://localhost:11434".to_string()); + OllamaProvider::get_models(&url).await +} + #[tauri::command] pub async fn chat( messages: Vec, diff --git a/src-tauri/src/lib.rs b/src-tauri/src/lib.rs index ffd5666..daa7632 100644 --- a/src-tauri/src/lib.rs +++ b/src-tauri/src/lib.rs @@ -20,7 +20,8 @@ pub fn run() { commands::fs::list_directory, commands::search::search_files, commands::shell::exec_shell, - commands::chat::chat + commands::chat::chat, + commands::chat::get_ollama_models ]) .run(tauri::generate_context!()) .expect("error while running tauri application"); diff --git a/src-tauri/src/llm/ollama.rs b/src-tauri/src/llm/ollama.rs index 975ef9e..d56d510 100644 --- a/src-tauri/src/llm/ollama.rs +++ b/src-tauri/src/llm/ollama.rs @@ -13,6 +13,40 @@ impl OllamaProvider { pub fn new(base_url: String) -> Self { Self { base_url } } + + pub async fn get_models(base_url: &str) -> Result, String> { + let client = reqwest::Client::new(); + let url = format!("{}/api/tags", base_url.trim_end_matches('/')); + + let res = client + .get(&url) + .send() + .await + .map_err(|e| format!("Request failed: {}", e))?; + + if !res.status().is_success() { + let status = res.status(); + let text = res.text().await.unwrap_or_default(); + return Err(format!("Ollama API error {}: {}", status, text)); + } + + let body: OllamaTagsResponse = res + .json() + .await + .map_err(|e| format!("Failed to parse response: {}", e))?; + + Ok(body.models.into_iter().map(|m| m.name).collect()) + } +} + +#[derive(Deserialize)] +struct OllamaTagsResponse { + models: Vec, +} + +#[derive(Deserialize)] +struct OllamaModelTag { + name: String, } // --- Request Types --- diff --git a/src/components/Chat.tsx b/src/components/Chat.tsx index 4179d06..59002cb 100644 --- a/src/components/Chat.tsx +++ b/src/components/Chat.tsx @@ -9,8 +9,24 @@ export function Chat() { const [loading, setLoading] = useState(false); const [model, setModel] = useState("llama3.1"); // Default local model const [enableTools, setEnableTools] = useState(true); + const [availableModels, setAvailableModels] = useState([]); const messagesEndRef = useRef(null); + useEffect(() => { + invoke("get_ollama_models") + .then((models) => { + if (models.length > 0) { + setAvailableModels(models); + // If we have models and current one isn't valid, switch to first + if (!models.includes(model)) { + setModel(models[0]); + } + } + }) + .catch((err) => console.error(err)); + // eslint-disable-next-line react-hooks/exhaustive-deps + }, []); + const scrollToBottom = () => { messagesEndRef.current?.scrollIntoView({ behavior: "smooth" }); }; @@ -76,12 +92,26 @@ export function Chat() { }} > - setModel(e.target.value)} - placeholder="e.g. llama3, mistral" - style={{ padding: "5px" }} - /> + {availableModels.length > 0 ? ( + + ) : ( + setModel(e.target.value)} + placeholder="e.g. llama3, mistral" + style={{ padding: "5px" }} + /> + )}