huskies: merge 858
This commit is contained in:
@@ -1,6 +1,11 @@
|
||||
//! LLM subsystem — chat orchestration, prompts, OAuth, and provider integrations.
|
||||
/// Chat session orchestration — manages multi-turn LLM conversations with streaming.
|
||||
pub mod chat;
|
||||
/// OAuth credential flow for LLM API access (e.g. Anthropic OAuth PKCE).
|
||||
pub mod oauth;
|
||||
/// System prompt templates for agent and onboarding sessions.
|
||||
pub mod prompts;
|
||||
/// LLM provider implementations (Anthropic, Claude Code, Ollama).
|
||||
pub mod providers;
|
||||
/// Core LLM data types: `Message`, `Role`, `ToolCall`, and `ModelProvider`.
|
||||
pub mod types;
|
||||
|
||||
@@ -1,4 +1,6 @@
|
||||
//! System prompts — static prompt templates for the LLM chat and onboarding flows.
|
||||
|
||||
/// The default system prompt injected at the start of every agent chat session.
|
||||
pub const SYSTEM_PROMPT: &str = r#"You are an AI Agent with direct access to the user's filesystem and development environment.
|
||||
|
||||
CRITICAL INSTRUCTIONS:
|
||||
@@ -91,6 +93,7 @@ REMEMBER:
|
||||
Remember: You are an autonomous agent that can both explain concepts and take action. Choose appropriately based on the user's request.
|
||||
"#;
|
||||
|
||||
/// System prompt override used when a project is newly scaffolded and needs onboarding.
|
||||
pub const ONBOARDING_PROMPT: &str = r#"ONBOARDING MODE ACTIVE — This is a newly scaffolded project. The spec files still contain placeholder content and must be replaced with real project information before any stories can be written.
|
||||
|
||||
Guide the user through each step below. Ask ONE category of questions at a time — do not overwhelm the user with everything at once.
|
||||
|
||||
@@ -36,9 +36,11 @@ mod parse;
|
||||
|
||||
use events::{handle_stream_event, process_json_event};
|
||||
|
||||
/// Orchestrates Claude Code CLI sessions via a PTY for streaming agent chat.
|
||||
pub struct ClaudeCodeProvider;
|
||||
|
||||
impl ClaudeCodeProvider {
|
||||
/// Creates a new `ClaudeCodeProvider`.
|
||||
pub fn new() -> Self {
|
||||
Self
|
||||
}
|
||||
|
||||
@@ -1,4 +1,7 @@
|
||||
//! LLM providers — module declarations for Anthropic, Claude Code, and Ollama backends.
|
||||
/// Anthropic API provider — drives chat completions via the Anthropic Messages API.
|
||||
pub mod anthropic;
|
||||
/// Claude Code CLI provider — runs `claude` in a PTY and parses structured NDJSON output.
|
||||
pub mod claude_code;
|
||||
/// Ollama provider — streaming completion client for locally-hosted Ollama models.
|
||||
pub mod ollama;
|
||||
|
||||
@@ -7,11 +7,13 @@ use futures::StreamExt;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json::Value;
|
||||
|
||||
/// Ollama HTTP/streaming client that connects to a local Ollama server.
|
||||
pub struct OllamaProvider {
|
||||
base_url: String,
|
||||
}
|
||||
|
||||
impl OllamaProvider {
|
||||
/// Creates a new `OllamaProvider` pointing at the given Ollama server base URL.
|
||||
pub fn new(base_url: String) -> Self {
|
||||
Self { base_url }
|
||||
}
|
||||
|
||||
@@ -3,6 +3,7 @@ use async_trait::async_trait;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::fmt::Debug;
|
||||
|
||||
/// The role of a message participant in an LLM conversation.
|
||||
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
pub enum Role {
|
||||
@@ -12,6 +13,7 @@ pub enum Role {
|
||||
Tool,
|
||||
}
|
||||
|
||||
/// A single message in an LLM conversation, including optional tool call attachments.
|
||||
#[derive(Debug, Serialize, Deserialize, Clone)]
|
||||
pub struct Message {
|
||||
pub role: Role,
|
||||
@@ -24,6 +26,7 @@ pub struct Message {
|
||||
pub tool_call_id: Option<String>,
|
||||
}
|
||||
|
||||
/// A tool invocation requested by the LLM, containing the call ID and function details.
|
||||
#[derive(Debug, Serialize, Deserialize, Clone)]
|
||||
pub struct ToolCall {
|
||||
pub id: Option<String>,
|
||||
@@ -32,12 +35,14 @@ pub struct ToolCall {
|
||||
pub kind: String,
|
||||
}
|
||||
|
||||
/// The function name and JSON-encoded arguments for a tool call.
|
||||
#[derive(Debug, Serialize, Deserialize, Clone)]
|
||||
pub struct FunctionCall {
|
||||
pub name: String,
|
||||
pub arguments: String,
|
||||
}
|
||||
|
||||
/// A tool definition passed to the LLM describing an available function and its schema.
|
||||
#[derive(Debug, Serialize, Deserialize, Clone)]
|
||||
pub struct ToolDefinition {
|
||||
#[serde(rename = "type")]
|
||||
@@ -45,6 +50,7 @@ pub struct ToolDefinition {
|
||||
pub function: ToolFunctionDefinition,
|
||||
}
|
||||
|
||||
/// The name, description, and JSON schema for a single tool function.
|
||||
#[derive(Debug, Serialize, Deserialize, Clone)]
|
||||
pub struct ToolFunctionDefinition {
|
||||
pub name: String,
|
||||
@@ -52,6 +58,7 @@ pub struct ToolFunctionDefinition {
|
||||
pub parameters: serde_json::Value,
|
||||
}
|
||||
|
||||
/// The response from an LLM completion request, containing text and/or tool calls.
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct CompletionResponse {
|
||||
pub content: Option<String>,
|
||||
@@ -61,6 +68,7 @@ pub struct CompletionResponse {
|
||||
pub session_id: Option<String>,
|
||||
}
|
||||
|
||||
/// Trait for LLM backends; implementations drive chat completions with optional tool use.
|
||||
#[async_trait]
|
||||
#[allow(dead_code)]
|
||||
pub trait ModelProvider: Send + Sync {
|
||||
|
||||
Reference in New Issue
Block a user