fix: make llm provider async and add tool toggle

This commit is contained in:
Dave
2025-12-24 17:32:46 +00:00
parent d9cd16601b
commit b241c47fd9
8 changed files with 149 additions and 28 deletions

1
src-tauri/Cargo.lock generated
View File

@@ -2056,6 +2056,7 @@ checksum = "6373607a59f0be73a39b6fe456b8192fcc3585f602af20751600e974dd455e77"
name = "living-spec-standalone"
version = "0.1.0"
dependencies = [
"async-trait",
"chrono",
"ignore",
"reqwest",

View File

@@ -28,4 +28,5 @@ walkdir = "2.5.0"
reqwest = { version = "0.12.28", features = ["json", "blocking"] }
uuid = { version = "1.19.0", features = ["v4", "serde"] }
chrono = { version = "0.4.42", features = ["serde"] }
async-trait = "0.1.89"

View File

@@ -4,7 +4,7 @@ use crate::llm::types::{
Message, ModelProvider, Role, ToolCall, ToolDefinition, ToolFunctionDefinition,
};
use crate::state::SessionState;
use serde::{Deserialize, Serialize};
use serde::Deserialize;
use serde_json::json;
use tauri::State;
@@ -13,6 +13,7 @@ pub struct ProviderConfig {
pub provider: String, // "ollama"
pub model: String,
pub base_url: Option<String>,
pub enable_tools: Option<bool>,
}
const MAX_TURNS: usize = 10;
@@ -34,7 +35,12 @@ pub async fn chat(
};
// 2. Define Tools
let tools = get_tool_definitions();
let tool_defs = get_tool_definitions();
let tools = if config.enable_tools.unwrap_or(true) {
tool_defs.as_slice()
} else {
&[]
};
// 3. Agent Loop
let mut current_history = messages.clone();
@@ -49,7 +55,8 @@ pub async fn chat(
// Call LLM
let response = provider
.chat(&config.model, &current_history, &tools)
.chat(&config.model, &current_history, tools)
.await
.map_err(|e| format!("LLM Error: {}", e))?;
// Process Response

View File

@@ -1,6 +1,7 @@
use crate::llm::types::{
CompletionResponse, FunctionCall, Message, ModelProvider, Role, ToolCall, ToolDefinition,
};
use async_trait::async_trait;
use serde::{Deserialize, Serialize};
use serde_json::Value;
@@ -76,14 +77,15 @@ struct OllamaResponseFunctionCall {
arguments: Value, // Ollama returns Object, we convert to String for internal storage
}
#[async_trait]
impl ModelProvider for OllamaProvider {
fn chat(
async fn chat(
&self,
model: &str,
messages: &[Message],
tools: &[ToolDefinition],
) -> Result<CompletionResponse, String> {
let client = reqwest::blocking::Client::new();
let client = reqwest::Client::new();
let url = format!("{}/api/chat", self.base_url.trim_end_matches('/'));
// Convert domain Messages to Ollama Messages (handling String -> Object args mismatch)
@@ -129,16 +131,18 @@ impl ModelProvider for OllamaProvider {
.post(&url)
.json(&request_body)
.send()
.await
.map_err(|e| format!("Request failed: {}", e))?;
if !res.status().is_success() {
let status = res.status();
let text = res.text().unwrap_or_default();
let text = res.text().await.unwrap_or_default();
return Err(format!("Ollama API error {}: {}", status, text));
}
let response_body: OllamaResponse = res
.json()
.await
.map_err(|e| format!("Failed to parse response: {}", e))?;
// Convert Response back to Domain types

View File

@@ -1,3 +1,4 @@
use async_trait::async_trait;
use serde::{Deserialize, Serialize};
use std::fmt::Debug;
@@ -62,8 +63,9 @@ pub struct CompletionResponse {
}
/// The abstraction for different LLM providers (Ollama, Anthropic, etc.)
#[async_trait]
pub trait ModelProvider: Send + Sync {
fn chat(
async fn chat(
&self,
model: &str,
messages: &[Message],