feat: system prompt and persona
This commit is contained in:
@@ -1,5 +1,6 @@
|
||||
use crate::commands::{fs, search, shell};
|
||||
use crate::llm::ollama::OllamaProvider;
|
||||
use crate::llm::prompts::SYSTEM_PROMPT;
|
||||
use crate::llm::types::{
|
||||
Message, ModelProvider, Role, ToolCall, ToolDefinition, ToolFunctionDefinition,
|
||||
};
|
||||
@@ -51,6 +52,18 @@ pub async fn chat(
|
||||
|
||||
// 3. Agent Loop
|
||||
let mut current_history = messages.clone();
|
||||
|
||||
// Inject System Prompt
|
||||
current_history.insert(
|
||||
0,
|
||||
Message {
|
||||
role: Role::System,
|
||||
content: SYSTEM_PROMPT.to_string(),
|
||||
tool_calls: None,
|
||||
tool_call_id: None,
|
||||
},
|
||||
);
|
||||
|
||||
let mut new_messages: Vec<Message> = Vec::new();
|
||||
let mut turn_count = 0;
|
||||
|
||||
@@ -78,7 +91,8 @@ pub async fn chat(
|
||||
|
||||
current_history.push(assistant_msg.clone());
|
||||
new_messages.push(assistant_msg);
|
||||
app.emit("chat:update", ¤t_history)
|
||||
// Emit history excluding system prompt (index 0)
|
||||
app.emit("chat:update", ¤t_history[1..])
|
||||
.map_err(|e| e.to_string())?;
|
||||
|
||||
// Execute Tools
|
||||
@@ -96,7 +110,8 @@ pub async fn chat(
|
||||
|
||||
current_history.push(tool_msg.clone());
|
||||
new_messages.push(tool_msg);
|
||||
app.emit("chat:update", ¤t_history)
|
||||
// Emit history excluding system prompt (index 0)
|
||||
app.emit("chat:update", ¤t_history[1..])
|
||||
.map_err(|e| e.to_string())?;
|
||||
}
|
||||
} else {
|
||||
@@ -111,7 +126,8 @@ pub async fn chat(
|
||||
// We don't push to current_history needed for next loop, because we are done.
|
||||
new_messages.push(assistant_msg.clone());
|
||||
current_history.push(assistant_msg);
|
||||
app.emit("chat:update", ¤t_history)
|
||||
// Emit history excluding system prompt (index 0)
|
||||
app.emit("chat:update", ¤t_history[1..])
|
||||
.map_err(|e| e.to_string())?;
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -1,2 +1,3 @@
|
||||
pub mod ollama;
|
||||
pub mod prompts;
|
||||
pub mod types;
|
||||
|
||||
17
src-tauri/src/llm/prompts.rs
Normal file
17
src-tauri/src/llm/prompts.rs
Normal file
@@ -0,0 +1,17 @@
|
||||
pub const SYSTEM_PROMPT: &str = r#"You are an expert Senior Software Engineer and AI Agent running directly in the user's local development environment.
|
||||
|
||||
Your Capabilities:
|
||||
1. **Filesystem Access:** You can read, write, and list files in the current project using the provided tools.
|
||||
2. **Shell Execution:** You can run commands like `git`, `cargo`, `npm`, `ls`, etc.
|
||||
3. **Search:** You can search the codebase for patterns.
|
||||
|
||||
Your Operational Rules:
|
||||
1. **Process Awareness:** You MUST read `.living_spec/README.md` to understand the development process (Story-Driven Spec Workflow).
|
||||
2. **Read Before Write:** ALWAYS read the relevant files before you propose or apply changes. Do not guess the file content.
|
||||
3. **Overwrite Warning:** The `write_file` tool OVERWRITES the entire file. When you edit a file, you must output the COMPLETED full content of the file, including all imports and unchanged parts. Do not output partial diffs or placeholders like `// ... rest of code`.
|
||||
4. **Conciseness:** Be direct. Do not waffle. If you need to run a tool, just run it. You don't need to say "I will now run...".
|
||||
5. **Verification:** After writing code, it is good practice to run a quick check (e.g., `cargo check` or `npm test`) if applicable to verify your changes.
|
||||
|
||||
Your Goal:
|
||||
Complete the user's request accurately and safely. If the request is ambiguous, ask for clarification.
|
||||
"#;
|
||||
Reference in New Issue
Block a user