feat: Backend cancellation support for interrupting model responses
Merged from feature/interrupt-on-type branch. Backend cancellation infrastructure: - Added tokio watch channel to SessionState for cancellation signaling - Implemented cancel_chat command - Modified chat command to use tokio::select! for racing requests vs cancellation - When cancelled, HTTP request to Ollama is dropped and returns early - Added tokio dependency with sync feature Story updates: - Story 13: Updated to use Stop button pattern (industry standard) - Story 18: Created placeholder for streaming responses - Stories 15-17: Placeholders for future features Frontend changes: - Removed auto-interrupt on typing behavior (too confusing) - Backend infrastructure ready for Stop button implementation Note: Story 13 UI (Stop button) not yet implemented - backend ready
This commit is contained in:
@@ -8,6 +8,7 @@ use crate::state::SessionState;
|
||||
use serde::Deserialize;
|
||||
use serde_json::json;
|
||||
use tauri::{AppHandle, Emitter, State};
|
||||
use tokio::select;
|
||||
|
||||
#[derive(Deserialize)]
|
||||
pub struct ProviderConfig {
|
||||
@@ -25,6 +26,12 @@ pub async fn get_ollama_models(base_url: Option<String>) -> Result<Vec<String>,
|
||||
OllamaProvider::get_models(&url).await
|
||||
}
|
||||
|
||||
#[tauri::command]
|
||||
pub async fn cancel_chat(state: State<'_, SessionState>) -> Result<(), String> {
|
||||
state.cancel_tx.send(true).map_err(|e| e.to_string())?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tauri::command]
|
||||
pub async fn chat(
|
||||
app: AppHandle,
|
||||
@@ -32,6 +39,9 @@ pub async fn chat(
|
||||
config: ProviderConfig,
|
||||
state: State<'_, SessionState>,
|
||||
) -> Result<Vec<Message>, String> {
|
||||
// Reset cancellation flag at start
|
||||
let _ = state.cancel_tx.send(false);
|
||||
let mut cancel_rx = state.cancel_rx.clone();
|
||||
// 1. Setup Provider
|
||||
let provider: Box<dyn ModelProvider> = match config.provider.as_str() {
|
||||
"ollama" => Box::new(OllamaProvider::new(
|
||||
@@ -84,11 +94,23 @@ pub async fn chat(
|
||||
}
|
||||
turn_count += 1;
|
||||
|
||||
// Call LLM
|
||||
let response = provider
|
||||
.chat(&config.model, ¤t_history, tools)
|
||||
.await
|
||||
.map_err(|e| format!("LLM Error: {}", e))?;
|
||||
// Call LLM with cancellation support
|
||||
let chat_future = provider.chat(&config.model, ¤t_history, tools);
|
||||
|
||||
let response = select! {
|
||||
result = chat_future => {
|
||||
result.map_err(|e| format!("LLM Error: {}", e))?
|
||||
}
|
||||
_ = cancel_rx.changed() => {
|
||||
if *cancel_rx.borrow() {
|
||||
return Err("Chat cancelled by user".to_string());
|
||||
}
|
||||
// False alarm, continue
|
||||
provider.chat(&config.model, ¤t_history, tools)
|
||||
.await
|
||||
.map_err(|e| format!("LLM Error: {}", e))?
|
||||
}
|
||||
};
|
||||
|
||||
// Process Response
|
||||
if let Some(tool_calls) = response.tool_calls {
|
||||
|
||||
Reference in New Issue
Block a user