feat: auto-detect ollama models
This commit is contained in:
@@ -18,6 +18,12 @@ pub struct ProviderConfig {
|
||||
|
||||
const MAX_TURNS: usize = 10;
|
||||
|
||||
#[tauri::command]
|
||||
pub async fn get_ollama_models(base_url: Option<String>) -> Result<Vec<String>, String> {
|
||||
let url = base_url.unwrap_or_else(|| "http://localhost:11434".to_string());
|
||||
OllamaProvider::get_models(&url).await
|
||||
}
|
||||
|
||||
#[tauri::command]
|
||||
pub async fn chat(
|
||||
messages: Vec<Message>,
|
||||
|
||||
@@ -20,7 +20,8 @@ pub fn run() {
|
||||
commands::fs::list_directory,
|
||||
commands::search::search_files,
|
||||
commands::shell::exec_shell,
|
||||
commands::chat::chat
|
||||
commands::chat::chat,
|
||||
commands::chat::get_ollama_models
|
||||
])
|
||||
.run(tauri::generate_context!())
|
||||
.expect("error while running tauri application");
|
||||
|
||||
@@ -13,6 +13,40 @@ impl OllamaProvider {
|
||||
pub fn new(base_url: String) -> Self {
|
||||
Self { base_url }
|
||||
}
|
||||
|
||||
pub async fn get_models(base_url: &str) -> Result<Vec<String>, String> {
|
||||
let client = reqwest::Client::new();
|
||||
let url = format!("{}/api/tags", base_url.trim_end_matches('/'));
|
||||
|
||||
let res = client
|
||||
.get(&url)
|
||||
.send()
|
||||
.await
|
||||
.map_err(|e| format!("Request failed: {}", e))?;
|
||||
|
||||
if !res.status().is_success() {
|
||||
let status = res.status();
|
||||
let text = res.text().await.unwrap_or_default();
|
||||
return Err(format!("Ollama API error {}: {}", status, text));
|
||||
}
|
||||
|
||||
let body: OllamaTagsResponse = res
|
||||
.json()
|
||||
.await
|
||||
.map_err(|e| format!("Failed to parse response: {}", e))?;
|
||||
|
||||
Ok(body.models.into_iter().map(|m| m.name).collect())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
struct OllamaTagsResponse {
|
||||
models: Vec<OllamaModelTag>,
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
struct OllamaModelTag {
|
||||
name: String,
|
||||
}
|
||||
|
||||
// --- Request Types ---
|
||||
|
||||
@@ -9,8 +9,24 @@ export function Chat() {
|
||||
const [loading, setLoading] = useState(false);
|
||||
const [model, setModel] = useState("llama3.1"); // Default local model
|
||||
const [enableTools, setEnableTools] = useState(true);
|
||||
const [availableModels, setAvailableModels] = useState<string[]>([]);
|
||||
const messagesEndRef = useRef<HTMLDivElement>(null);
|
||||
|
||||
useEffect(() => {
|
||||
invoke<string[]>("get_ollama_models")
|
||||
.then((models) => {
|
||||
if (models.length > 0) {
|
||||
setAvailableModels(models);
|
||||
// If we have models and current one isn't valid, switch to first
|
||||
if (!models.includes(model)) {
|
||||
setModel(models[0]);
|
||||
}
|
||||
}
|
||||
})
|
||||
.catch((err) => console.error(err));
|
||||
// eslint-disable-next-line react-hooks/exhaustive-deps
|
||||
}, []);
|
||||
|
||||
const scrollToBottom = () => {
|
||||
messagesEndRef.current?.scrollIntoView({ behavior: "smooth" });
|
||||
};
|
||||
@@ -76,12 +92,26 @@ export function Chat() {
|
||||
}}
|
||||
>
|
||||
<label>Ollama Model:</label>
|
||||
<input
|
||||
value={model}
|
||||
onChange={(e) => setModel(e.target.value)}
|
||||
placeholder="e.g. llama3, mistral"
|
||||
style={{ padding: "5px" }}
|
||||
/>
|
||||
{availableModels.length > 0 ? (
|
||||
<select
|
||||
value={model}
|
||||
onChange={(e) => setModel(e.target.value)}
|
||||
style={{ padding: "5px" }}
|
||||
>
|
||||
{availableModels.map((m) => (
|
||||
<option key={m} value={m}>
|
||||
{m}
|
||||
</option>
|
||||
))}
|
||||
</select>
|
||||
) : (
|
||||
<input
|
||||
value={model}
|
||||
onChange={(e) => setModel(e.target.value)}
|
||||
placeholder="e.g. llama3, mistral"
|
||||
style={{ padding: "5px" }}
|
||||
/>
|
||||
)}
|
||||
<label
|
||||
style={{
|
||||
display: "flex",
|
||||
|
||||
Reference in New Issue
Block a user