feat(story-93): expose server logs to agents via get_server_logs MCP tool

- Add log_buffer module: bounded 1000-line ring buffer with push/get_recent API
- Add slog! macro: drop-in for eprintln! that also captures to ring buffer
- Replace all eprintln! calls across agents, watcher, search, chat, worktree, claude_code with slog!
- Add get_server_logs MCP tool: accepts count (1-500) and optional filter params
- 5 unit tests for log_buffer covering push/retrieve, eviction, filtering, count limits, empty buffer
- 262 tests passing, clippy clean

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
This commit is contained in:
Dave
2026-02-23 20:38:19 +00:00
parent 3d480e7c22
commit 8c6bd4cf74
10 changed files with 243 additions and 66 deletions

View File

@@ -1,3 +1,4 @@
use crate::slog;
use crate::config::ProjectConfig; use crate::config::ProjectConfig;
use crate::worktree::{self, WorktreeInfo}; use crate::worktree::{self, WorktreeInfo};
use portable_pty::{CommandBuilder, PtySize, native_pty_system}; use portable_pty::{CommandBuilder, PtySize, native_pty_system};
@@ -357,7 +358,7 @@ impl AgentPool {
// Preserve worktree for inspection — don't destroy agent's work on stop. // Preserve worktree for inspection — don't destroy agent's work on stop.
if let Some(ref wt) = worktree_info { if let Some(ref wt) = worktree_info {
eprintln!( slog!(
"[agents] Worktree preserved for {story_id}:{agent_name}: {}", "[agents] Worktree preserved for {story_id}:{agent_name}: {}",
wt.path.display() wt.path.display()
); );
@@ -553,7 +554,7 @@ impl AgentPool {
let agents = match self.agents.lock() { let agents = match self.agents.lock() {
Ok(a) => a, Ok(a) => a,
Err(e) => { Err(e) => {
eprintln!("[pipeline] Failed to lock agents for '{story_id}:{agent_name}': {e}"); slog!("[pipeline] Failed to lock agents for '{story_id}:{agent_name}': {e}");
return; return;
} }
}; };
@@ -571,14 +572,14 @@ impl AgentPool {
let completion = match completion { let completion = match completion {
Some(c) => c, Some(c) => c,
None => { None => {
eprintln!("[pipeline] No completion report for '{story_id}:{agent_name}'"); slog!("[pipeline] No completion report for '{story_id}:{agent_name}'");
return; return;
} }
}; };
let project_root = match project_root { let project_root = match project_root {
Some(p) => p, Some(p) => p,
None => { None => {
eprintln!("[pipeline] No project_root for '{story_id}:{agent_name}'"); slog!("[pipeline] No project_root for '{story_id}:{agent_name}'");
return; return;
} }
}; };
@@ -591,23 +592,23 @@ impl AgentPool {
} }
PipelineStage::Coder => { PipelineStage::Coder => {
if completion.gates_passed { if completion.gates_passed {
eprintln!( slog!(
"[pipeline] Coder '{agent_name}' passed gates for '{story_id}'. Moving to QA." "[pipeline] Coder '{agent_name}' passed gates for '{story_id}'. Moving to QA."
); );
if let Err(e) = move_story_to_qa(&project_root, story_id) { if let Err(e) = move_story_to_qa(&project_root, story_id) {
eprintln!("[pipeline] Failed to move '{story_id}' to 3_qa/: {e}"); slog!("[pipeline] Failed to move '{story_id}' to 3_qa/: {e}");
return; return;
} }
if let Err(e) = self if let Err(e) = self
.start_agent(&project_root, story_id, Some("qa"), None) .start_agent(&project_root, story_id, Some("qa"), None)
.await .await
{ {
eprintln!("[pipeline] Failed to start qa agent for '{story_id}': {e}"); slog!("[pipeline] Failed to start qa agent for '{story_id}': {e}");
} }
// Coder slot is now free — pick up any other unassigned work in 2_current/. // Coder slot is now free — pick up any other unassigned work in 2_current/.
self.auto_assign_available_work(&project_root).await; self.auto_assign_available_work(&project_root).await;
} else { } else {
eprintln!( slog!(
"[pipeline] Coder '{agent_name}' failed gates for '{story_id}'. Restarting." "[pipeline] Coder '{agent_name}' failed gates for '{story_id}'. Restarting."
); );
let context = format!( let context = format!(
@@ -620,7 +621,7 @@ impl AgentPool {
.start_agent(&project_root, story_id, Some(agent_name), Some(&context)) .start_agent(&project_root, story_id, Some(agent_name), Some(&context))
.await .await
{ {
eprintln!( slog!(
"[pipeline] Failed to restart coder '{agent_name}' for '{story_id}': {e}" "[pipeline] Failed to restart coder '{agent_name}' for '{story_id}': {e}"
); );
} }
@@ -635,7 +636,7 @@ impl AgentPool {
tokio::task::spawn_blocking(move || run_coverage_gate(&cp)) tokio::task::spawn_blocking(move || run_coverage_gate(&cp))
.await .await
.unwrap_or_else(|e| { .unwrap_or_else(|e| {
eprintln!("[pipeline] Coverage gate task panicked: {e}"); slog!("[pipeline] Coverage gate task panicked: {e}");
Ok((false, format!("Coverage gate task panicked: {e}"))) Ok((false, format!("Coverage gate task panicked: {e}")))
}); });
let (coverage_passed, coverage_output) = match coverage_result { let (coverage_passed, coverage_output) = match coverage_result {
@@ -644,23 +645,23 @@ impl AgentPool {
}; };
if coverage_passed { if coverage_passed {
eprintln!( slog!(
"[pipeline] QA passed gates and coverage for '{story_id}'. Moving to merge." "[pipeline] QA passed gates and coverage for '{story_id}'. Moving to merge."
); );
if let Err(e) = move_story_to_merge(&project_root, story_id) { if let Err(e) = move_story_to_merge(&project_root, story_id) {
eprintln!("[pipeline] Failed to move '{story_id}' to 4_merge/: {e}"); slog!("[pipeline] Failed to move '{story_id}' to 4_merge/: {e}");
return; return;
} }
if let Err(e) = self if let Err(e) = self
.start_agent(&project_root, story_id, Some("mergemaster"), None) .start_agent(&project_root, story_id, Some("mergemaster"), None)
.await .await
{ {
eprintln!("[pipeline] Failed to start mergemaster for '{story_id}': {e}"); slog!("[pipeline] Failed to start mergemaster for '{story_id}': {e}");
} }
// QA slot is now free — pick up any other unassigned work in 3_qa/. // QA slot is now free — pick up any other unassigned work in 3_qa/.
self.auto_assign_available_work(&project_root).await; self.auto_assign_available_work(&project_root).await;
} else { } else {
eprintln!( slog!(
"[pipeline] QA coverage gate failed for '{story_id}'. Restarting QA." "[pipeline] QA coverage gate failed for '{story_id}'. Restarting QA."
); );
let context = format!( let context = format!(
@@ -673,11 +674,11 @@ impl AgentPool {
.start_agent(&project_root, story_id, Some("qa"), Some(&context)) .start_agent(&project_root, story_id, Some("qa"), Some(&context))
.await .await
{ {
eprintln!("[pipeline] Failed to restart qa for '{story_id}': {e}"); slog!("[pipeline] Failed to restart qa for '{story_id}': {e}");
} }
} }
} else { } else {
eprintln!( slog!(
"[pipeline] QA failed gates for '{story_id}'. Restarting." "[pipeline] QA failed gates for '{story_id}'. Restarting."
); );
let context = format!( let context = format!(
@@ -690,20 +691,20 @@ impl AgentPool {
.start_agent(&project_root, story_id, Some("qa"), Some(&context)) .start_agent(&project_root, story_id, Some("qa"), Some(&context))
.await .await
{ {
eprintln!("[pipeline] Failed to restart qa for '{story_id}': {e}"); slog!("[pipeline] Failed to restart qa for '{story_id}': {e}");
} }
} }
} }
PipelineStage::Mergemaster => { PipelineStage::Mergemaster => {
// Run script/test on master (project_root) as the post-merge verification. // Run script/test on master (project_root) as the post-merge verification.
eprintln!( slog!(
"[pipeline] Mergemaster completed for '{story_id}'. Running post-merge tests on master." "[pipeline] Mergemaster completed for '{story_id}'. Running post-merge tests on master."
); );
let root = project_root.clone(); let root = project_root.clone();
let test_result = tokio::task::spawn_blocking(move || run_project_tests(&root)) let test_result = tokio::task::spawn_blocking(move || run_project_tests(&root))
.await .await
.unwrap_or_else(|e| { .unwrap_or_else(|e| {
eprintln!("[pipeline] Post-merge test task panicked: {e}"); slog!("[pipeline] Post-merge test task panicked: {e}");
Ok((false, format!("Test task panicked: {e}"))) Ok((false, format!("Test task panicked: {e}")))
}); });
let (passed, output) = match test_result { let (passed, output) = match test_result {
@@ -712,11 +713,11 @@ impl AgentPool {
}; };
if passed { if passed {
eprintln!( slog!(
"[pipeline] Post-merge tests passed for '{story_id}'. Archiving." "[pipeline] Post-merge tests passed for '{story_id}'. Archiving."
); );
if let Err(e) = move_story_to_archived(&project_root, story_id) { if let Err(e) = move_story_to_archived(&project_root, story_id) {
eprintln!("[pipeline] Failed to archive '{story_id}': {e}"); slog!("[pipeline] Failed to archive '{story_id}': {e}");
} }
// Mergemaster slot is now free — pick up any other items in 4_merge/. // Mergemaster slot is now free — pick up any other items in 4_merge/.
self.auto_assign_available_work(&project_root).await; self.auto_assign_available_work(&project_root).await;
@@ -728,15 +729,15 @@ impl AgentPool {
// worktree::remove_worktree_by_story_id(&project_root, story_id, &config) // worktree::remove_worktree_by_story_id(&project_root, story_id, &config)
// .await // .await
// { // {
// eprintln!( // slog!(
// "[pipeline] Failed to remove worktree for '{story_id}': {e}" // "[pipeline] Failed to remove worktree for '{story_id}': {e}"
// ); // );
// } // }
eprintln!( slog!(
"[pipeline] Story '{story_id}' archived. Worktree preserved for inspection." "[pipeline] Story '{story_id}' archived. Worktree preserved for inspection."
); );
} else { } else {
eprintln!( slog!(
"[pipeline] Post-merge tests failed for '{story_id}'. Restarting mergemaster." "[pipeline] Post-merge tests failed for '{story_id}'. Restarting mergemaster."
); );
let context = format!( let context = format!(
@@ -749,7 +750,7 @@ impl AgentPool {
.start_agent(&project_root, story_id, Some("mergemaster"), Some(&context)) .start_agent(&project_root, story_id, Some("mergemaster"), Some(&context))
.await .await
{ {
eprintln!( slog!(
"[pipeline] Failed to restart mergemaster for '{story_id}': {e}" "[pipeline] Failed to restart mergemaster for '{story_id}': {e}"
); );
} }
@@ -1037,7 +1038,7 @@ impl AgentPool {
let config = match ProjectConfig::load(project_root) { let config = match ProjectConfig::load(project_root) {
Ok(c) => c, Ok(c) => c,
Err(e) => { Err(e) => {
eprintln!("[auto-assign] Failed to load project config: {e}"); slog!("[auto-assign] Failed to load project config: {e}");
return; return;
} }
}; };
@@ -1062,7 +1063,7 @@ impl AgentPool {
let agents = match self.agents.lock() { let agents = match self.agents.lock() {
Ok(a) => a, Ok(a) => a,
Err(e) => { Err(e) => {
eprintln!("[auto-assign] Failed to lock agents: {e}"); slog!("[auto-assign] Failed to lock agents: {e}");
break; break;
} }
}; };
@@ -1083,21 +1084,21 @@ impl AgentPool {
match free_agent { match free_agent {
Some(agent_name) => { Some(agent_name) => {
eprintln!( slog!(
"[auto-assign] Assigning '{agent_name}' to '{story_id}' in {stage_dir}/" "[auto-assign] Assigning '{agent_name}' to '{story_id}' in {stage_dir}/"
); );
if let Err(e) = self if let Err(e) = self
.start_agent(project_root, story_id, Some(&agent_name), None) .start_agent(project_root, story_id, Some(&agent_name), None)
.await .await
{ {
eprintln!( slog!(
"[auto-assign] Failed to start '{agent_name}' for '{story_id}': {e}" "[auto-assign] Failed to start '{agent_name}' for '{story_id}': {e}"
); );
} }
} }
None => { None => {
// No free agents of this type — stop scanning this stage. // No free agents of this type — stop scanning this stage.
eprintln!( slog!(
"[auto-assign] All {:?} agents busy; remaining items in {stage_dir}/ will wait.", "[auto-assign] All {:?} agents busy; remaining items in {stage_dir}/ will wait.",
stage stage
); );
@@ -1228,7 +1229,7 @@ async fn run_server_owned_completion(
}; };
match lock.get(&key) { match lock.get(&key) {
Some(agent) if agent.completion.is_some() => { Some(agent) if agent.completion.is_some() => {
eprintln!( slog!(
"[agents] Completion already recorded for '{story_id}:{agent_name}'; \ "[agents] Completion already recorded for '{story_id}:{agent_name}'; \
skipping server-owned gates." skipping server-owned gates."
); );
@@ -1269,7 +1270,7 @@ async fn run_server_owned_completion(
) )
}; };
eprintln!( slog!(
"[agents] Server-owned completion for '{story_id}:{agent_name}': gates_passed={gates_passed}" "[agents] Server-owned completion for '{story_id}:{agent_name}': gates_passed={gates_passed}"
); );
@@ -1386,7 +1387,7 @@ pub fn move_story_to_current(project_root: &Path, story_id: &str) -> Result<(),
let source_path = source_dir.join(format!("{story_id}.md")); let source_path = source_dir.join(format!("{story_id}.md"));
if !source_path.exists() { if !source_path.exists() {
eprintln!( slog!(
"[lifecycle] Work item '{story_id}' not found in {}; skipping move to 2_current/", "[lifecycle] Work item '{story_id}' not found in {}; skipping move to 2_current/",
source_dir.display() source_dir.display()
); );
@@ -1399,7 +1400,7 @@ pub fn move_story_to_current(project_root: &Path, story_id: &str) -> Result<(),
std::fs::rename(&source_path, &current_path) std::fs::rename(&source_path, &current_path)
.map_err(|e| format!("Failed to move '{story_id}' to 2_current/: {e}"))?; .map_err(|e| format!("Failed to move '{story_id}' to 2_current/: {e}"))?;
eprintln!( slog!(
"[lifecycle] Moved '{story_id}' from {} to work/2_current/", "[lifecycle] Moved '{story_id}' from {} to work/2_current/",
source_dir.display() source_dir.display()
); );
@@ -1446,7 +1447,7 @@ pub fn move_story_to_archived(project_root: &Path, story_id: &str) -> Result<(),
} else { } else {
"work/4_merge/" "work/4_merge/"
}; };
eprintln!("[lifecycle] Moved story '{story_id}' from {from_dir} to work/5_archived/"); slog!("[lifecycle] Moved story '{story_id}' from {from_dir} to work/5_archived/");
Ok(()) Ok(())
} }
@@ -1488,7 +1489,7 @@ pub fn move_story_to_merge(project_root: &Path, story_id: &str) -> Result<(), St
} else { } else {
"work/3_qa/" "work/3_qa/"
}; };
eprintln!("[lifecycle] Moved '{story_id}' from {from_dir} to work/4_merge/"); slog!("[lifecycle] Moved '{story_id}' from {from_dir} to work/4_merge/");
Ok(()) Ok(())
} }
@@ -1519,7 +1520,7 @@ pub fn move_story_to_qa(project_root: &Path, story_id: &str) -> Result<(), Strin
std::fs::rename(&current_path, &qa_path) std::fs::rename(&current_path, &qa_path)
.map_err(|e| format!("Failed to move '{story_id}' to 3_qa/: {e}"))?; .map_err(|e| format!("Failed to move '{story_id}' to 3_qa/: {e}"))?;
eprintln!("[lifecycle] Moved '{story_id}' from work/2_current/ to work/3_qa/"); slog!("[lifecycle] Moved '{story_id}' from work/2_current/ to work/3_qa/");
Ok(()) Ok(())
} }
@@ -1556,7 +1557,7 @@ pub fn close_bug_to_archive(project_root: &Path, bug_id: &str) -> Result<(), Str
std::fs::rename(&source_path, &archive_path) std::fs::rename(&source_path, &archive_path)
.map_err(|e| format!("Failed to move bug '{bug_id}' to 5_archived/: {e}"))?; .map_err(|e| format!("Failed to move bug '{bug_id}' to 5_archived/: {e}"))?;
eprintln!( slog!(
"[lifecycle] Closed bug '{bug_id}' → work/5_archived/" "[lifecycle] Closed bug '{bug_id}' → work/5_archived/"
); );
@@ -1956,7 +1957,7 @@ fn run_agent_pty_blocking(
cmd.env_remove("CLAUDECODE"); cmd.env_remove("CLAUDECODE");
cmd.env_remove("CLAUDE_CODE_ENTRYPOINT"); cmd.env_remove("CLAUDE_CODE_ENTRYPOINT");
eprintln!("[agent:{story_id}:{agent_name}] Spawning {command} in {cwd} with args: {args:?}"); slog!("[agent:{story_id}:{agent_name}] Spawning {command} in {cwd} with args: {args:?}");
let mut child = pair let mut child = pair
.slave .slave
@@ -2050,7 +2051,7 @@ fn run_agent_pty_blocking(
let _ = child.kill(); let _ = child.kill();
let _ = child.wait(); let _ = child.wait();
eprintln!( slog!(
"[agent:{story_id}:{agent_name}] Done. Session: {:?}", "[agent:{story_id}:{agent_name}] Done. Session: {:?}",
session_id session_id
); );

View File

@@ -1,3 +1,4 @@
use crate::slog;
use serde::Deserialize; use serde::Deserialize;
use std::collections::HashSet; use std::collections::HashSet;
use std::path::Path; use std::path::Path;
@@ -123,7 +124,7 @@ impl ProjectConfig {
// Try legacy format. // Try legacy format.
if let Ok(legacy) = toml::from_str::<LegacyProjectConfig>(content) if let Ok(legacy) = toml::from_str::<LegacyProjectConfig>(content)
&& let Some(agent) = legacy.agent { && let Some(agent) = legacy.agent {
eprintln!( slog!(
"[config] Warning: [agent] table is deprecated. \ "[config] Warning: [agent] table is deprecated. \
Use [[agent]] array format instead." Use [[agent]] array format instead."
); );
@@ -142,7 +143,7 @@ impl ProjectConfig {
let legacy: LegacyProjectConfig = let legacy: LegacyProjectConfig =
toml::from_str(content).map_err(|e| format!("Parse config: {e}"))?; toml::from_str(content).map_err(|e| format!("Parse config: {e}"))?;
if let Some(agent) = legacy.agent { if let Some(agent) = legacy.agent {
eprintln!( slog!(
"[config] Warning: [agent] table is deprecated. \ "[config] Warning: [agent] table is deprecated. \
Use [[agent]] array format instead." Use [[agent]] array format instead."
); );

View File

@@ -1,5 +1,6 @@
use crate::agents::{close_bug_to_archive, move_story_to_archived, move_story_to_merge, move_story_to_qa}; use crate::agents::{close_bug_to_archive, move_story_to_archived, move_story_to_merge, move_story_to_qa};
use crate::config::ProjectConfig; use crate::config::ProjectConfig;
use crate::log_buffer;
use crate::http::context::AppContext; use crate::http::context::AppContext;
use crate::http::settings::get_editor_command_from_store; use crate::http::settings::get_editor_command_from_store;
use crate::http::workflow::{ use crate::http::workflow::{
@@ -742,6 +743,23 @@ fn handle_tools_list(id: Option<Value>) -> JsonRpcResponse {
}, },
"required": ["story_id"] "required": ["story_id"]
} }
},
{
"name": "get_server_logs",
"description": "Return recent server log lines captured in the in-process ring buffer. Useful for diagnosing runtime behaviour such as WebSocket events, MCP call flow, and filesystem watcher activity.",
"inputSchema": {
"type": "object",
"properties": {
"lines": {
"type": "integer",
"description": "Number of recent lines to return (default 100, max 1000)"
},
"filter": {
"type": "string",
"description": "Optional substring filter (e.g. 'watcher', 'mcp', 'permission')"
}
}
}
} }
] ]
}), }),
@@ -798,6 +816,8 @@ async fn handle_tools_call(
"move_story_to_merge" => tool_move_story_to_merge(&args, ctx).await, "move_story_to_merge" => tool_move_story_to_merge(&args, ctx).await,
// QA tools // QA tools
"request_qa" => tool_request_qa(&args, ctx).await, "request_qa" => tool_request_qa(&args, ctx).await,
// Diagnostics
"get_server_logs" => tool_get_server_logs(&args),
_ => Err(format!("Unknown tool: {tool_name}")), _ => Err(format!("Unknown tool: {tool_name}")),
}; };
@@ -1445,6 +1465,18 @@ fn parse_test_cases(value: Option<&Value>) -> Result<Vec<TestCaseResult>, String
.collect() .collect()
} }
fn tool_get_server_logs(args: &Value) -> Result<String, String> {
let lines = args
.get("lines")
.and_then(|v| v.as_u64())
.map(|n| n.min(1000) as usize)
.unwrap_or(100);
let filter = args.get("filter").and_then(|v| v.as_str());
let recent = log_buffer::global().get_recent(lines, filter);
Ok(recent.join("\n"))
}
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;
@@ -1541,7 +1573,8 @@ mod tests {
assert!(names.contains(&"merge_agent_work")); assert!(names.contains(&"merge_agent_work"));
assert!(names.contains(&"move_story_to_merge")); assert!(names.contains(&"move_story_to_merge"));
assert!(names.contains(&"request_qa")); assert!(names.contains(&"request_qa"));
assert_eq!(tools.len(), 26); assert!(names.contains(&"get_server_logs"));
assert_eq!(tools.len(), 27);
} }
#[test] #[test]

View File

@@ -1,3 +1,4 @@
use crate::slog;
use crate::state::SessionState; use crate::state::SessionState;
use ignore::WalkBuilder; use ignore::WalkBuilder;
use serde::Serialize; use serde::Serialize;
@@ -52,7 +53,7 @@ pub async fn search_files_impl(query: String, root: PathBuf) -> Result<Vec<Searc
}); });
} }
} }
Err(err) => eprintln!("Error walking dir: {}", err), Err(err) => slog!("Error walking dir: {}", err),
} }
} }

View File

@@ -15,6 +15,7 @@
//! via exit-code inspection and silently skips the commit while still broadcasting //! via exit-code inspection and silently skips the commit while still broadcasting
//! the event so connected clients stay in sync. //! the event so connected clients stay in sync.
use crate::slog;
use notify::{EventKind, RecommendedWatcher, RecursiveMode, Watcher, recommended_watcher}; use notify::{EventKind, RecommendedWatcher, RecursiveMode, Watcher, recommended_watcher};
use serde::Serialize; use serde::Serialize;
use std::collections::HashMap; use std::collections::HashMap;
@@ -151,13 +152,13 @@ fn flush_pending(
("remove", item.to_string(), format!("story-kit: remove {item}")) ("remove", item.to_string(), format!("story-kit: remove {item}"))
}; };
eprintln!("[watcher] flush: {commit_msg}"); slog!("[watcher] flush: {commit_msg}");
match git_add_work_and_commit(git_root, &commit_msg) { match git_add_work_and_commit(git_root, &commit_msg) {
Ok(committed) => { Ok(committed) => {
if committed { if committed {
eprintln!("[watcher] committed: {commit_msg}"); slog!("[watcher] committed: {commit_msg}");
} else { } else {
eprintln!("[watcher] skipped (already committed): {commit_msg}"); slog!("[watcher] skipped (already committed): {commit_msg}");
} }
let stage = additions.first().map_or("unknown", |(_, s)| s); let stage = additions.first().map_or("unknown", |(_, s)| s);
let evt = WatcherEvent { let evt = WatcherEvent {
@@ -169,7 +170,7 @@ fn flush_pending(
let _ = event_tx.send(evt); let _ = event_tx.send(evt);
} }
Err(e) => { Err(e) => {
eprintln!("[watcher] git error: {e}"); slog!("[watcher] git error: {e}");
} }
} }
} }
@@ -192,17 +193,17 @@ pub fn start_watcher(
}) { }) {
Ok(w) => w, Ok(w) => w,
Err(e) => { Err(e) => {
eprintln!("[watcher] failed to create watcher: {e}"); slog!("[watcher] failed to create watcher: {e}");
return; return;
} }
}; };
if let Err(e) = watcher.watch(&work_dir, RecursiveMode::Recursive) { if let Err(e) = watcher.watch(&work_dir, RecursiveMode::Recursive) {
eprintln!("[watcher] failed to watch {}: {e}", work_dir.display()); slog!("[watcher] failed to watch {}: {e}", work_dir.display());
return; return;
} }
eprintln!("[watcher] watching {}", work_dir.display()); slog!("[watcher] watching {}", work_dir.display());
const DEBOUNCE: Duration = Duration::from_millis(300); const DEBOUNCE: Duration = Duration::from_millis(300);
@@ -237,13 +238,13 @@ pub fn start_watcher(
false false
} }
Ok(Err(e)) => { Ok(Err(e)) => {
eprintln!("[watcher] notify error: {e}"); slog!("[watcher] notify error: {e}");
false false
} }
// Debounce window expired — time to flush. // Debounce window expired — time to flush.
Err(mpsc::RecvTimeoutError::Timeout) => true, Err(mpsc::RecvTimeoutError::Timeout) => true,
Err(mpsc::RecvTimeoutError::Disconnected) => { Err(mpsc::RecvTimeoutError::Disconnected) => {
eprintln!("[watcher] channel disconnected, shutting down"); slog!("[watcher] channel disconnected, shutting down");
break; break;
} }
}; };

View File

@@ -1,3 +1,4 @@
use crate::slog;
use crate::llm::prompts::SYSTEM_PROMPT; use crate::llm::prompts::SYSTEM_PROMPT;
use crate::llm::providers::claude_code::ClaudeCodeResult; use crate::llm::providers::claude_code::ClaudeCodeResult;
use crate::llm::types::{Message, Role, ToolCall, ToolDefinition, ToolFunctionDefinition}; use crate::llm::types::{Message, Role, ToolCall, ToolDefinition, ToolFunctionDefinition};
@@ -208,7 +209,7 @@ where
.clone() .clone()
.unwrap_or_else(|| "http://localhost:11434".to_string()); .unwrap_or_else(|| "http://localhost:11434".to_string());
eprintln!("[chat] provider={} model={}", config.provider, config.model); slog!("[chat] provider={} model={}", config.provider, config.model);
let is_claude_code = config.provider == "claude-code"; let is_claude_code = config.provider == "claude-code";
let is_claude = !is_claude_code && config.model.starts_with("claude-"); let is_claude = !is_claude_code && config.model.starts_with("claude-");

View File

@@ -1,3 +1,4 @@
use crate::slog;
use portable_pty::{CommandBuilder, PtySize, native_pty_system}; use portable_pty::{CommandBuilder, PtySize, native_pty_system};
use std::io::{BufRead, BufReader, Write}; use std::io::{BufRead, BufReader, Write};
use std::sync::Arc; use std::sync::Arc;
@@ -152,7 +153,7 @@ fn run_pty_session(
// Allow nested spawning when the server itself runs inside Claude Code // Allow nested spawning when the server itself runs inside Claude Code
cmd.env("CLAUDECODE", ""); cmd.env("CLAUDECODE", "");
eprintln!( slog!(
"[pty-debug] Spawning: claude -p \"{}\" {} --output-format stream-json --verbose", "[pty-debug] Spawning: claude -p \"{}\" {} --output-format stream-json --verbose",
user_message, user_message,
resume_session_id resume_session_id
@@ -165,7 +166,7 @@ fn run_pty_session(
.spawn_command(cmd) .spawn_command(cmd)
.map_err(|e| format!("Failed to spawn claude: {e}"))?; .map_err(|e| format!("Failed to spawn claude: {e}"))?;
eprintln!( slog!(
"[pty-debug] Process spawned, pid: {:?}", "[pty-debug] Process spawned, pid: {:?}",
child.process_id() child.process_id()
); );
@@ -187,23 +188,23 @@ fn run_pty_session(
std::thread::spawn(move || { std::thread::spawn(move || {
let buf_reader = BufReader::new(reader); let buf_reader = BufReader::new(reader);
eprintln!("[pty-debug] Reader thread started"); slog!("[pty-debug] Reader thread started");
for line in buf_reader.lines() { for line in buf_reader.lines() {
match line { match line {
Ok(l) => { Ok(l) => {
eprintln!("[pty-debug] raw line: {}", l); slog!("[pty-debug] raw line: {}", l);
if line_tx.send(Some(l)).is_err() { if line_tx.send(Some(l)).is_err() {
break; break;
} }
} }
Err(e) => { Err(e) => {
eprintln!("[pty-debug] read error: {e}"); slog!("[pty-debug] read error: {e}");
let _ = line_tx.send(None); let _ = line_tx.send(None);
break; break;
} }
} }
} }
eprintln!("[pty-debug] Reader thread done"); slog!("[pty-debug] Reader thread done");
let _ = line_tx.send(None); let _ = line_tx.send(None);
}); });
@@ -223,7 +224,7 @@ fn run_pty_session(
continue; continue;
} }
eprintln!( slog!(
"[pty-debug] processing: {}...", "[pty-debug] processing: {}...",
&trimmed[..trimmed.len().min(120)] &trimmed[..trimmed.len().min(120)]
); );

136
server/src/log_buffer.rs Normal file
View File

@@ -0,0 +1,136 @@
//! Bounded in-memory ring buffer for server log output.
//!
//! Use the [`slog!`] macro as a drop-in replacement for `eprintln!`. It writes
//! to stderr (same as before) and simultaneously appends the line to the global
//! ring buffer, making it retrievable via the `get_server_logs` MCP tool.
use std::collections::VecDeque;
use std::sync::{Mutex, OnceLock};
const CAPACITY: usize = 1000;
pub struct LogBuffer {
lines: Mutex<VecDeque<String>>,
}
impl LogBuffer {
fn new() -> Self {
Self {
lines: Mutex::new(VecDeque::with_capacity(CAPACITY)),
}
}
/// Append a log line, evicting the oldest entry when at capacity.
pub fn push(&self, line: String) {
if let Ok(mut buf) = self.lines.lock() {
if buf.len() >= CAPACITY {
buf.pop_front();
}
buf.push_back(line);
}
}
/// Return up to `count` recent lines, optionally filtered by a substring.
/// Lines are returned in chronological order (oldest first).
pub fn get_recent(&self, count: usize, filter: Option<&str>) -> Vec<String> {
let buf = match self.lines.lock() {
Ok(b) => b,
Err(_) => return vec![],
};
let filtered: Vec<&String> = buf
.iter()
.filter(|line| filter.is_none_or(|f| line.contains(f)))
.collect();
let start = filtered.len().saturating_sub(count);
filtered[start..].iter().map(|s| (*s).clone()).collect()
}
}
static GLOBAL: OnceLock<LogBuffer> = OnceLock::new();
/// Access the process-wide log ring buffer.
pub fn global() -> &'static LogBuffer {
GLOBAL.get_or_init(LogBuffer::new)
}
/// Write a formatted message to stderr **and** capture it in the ring buffer.
///
/// Usage is identical to `eprintln!`:
/// ```ignore
/// slog!("agent {} started", name);
/// ```
#[macro_export]
macro_rules! slog {
($($arg:tt)*) => {{
let _line = format!($($arg)*);
eprintln!("{}", _line);
$crate::log_buffer::global().push(_line);
}};
}
#[cfg(test)]
mod tests {
use super::*;
fn fresh_buffer() -> LogBuffer {
LogBuffer::new()
}
#[test]
fn push_and_retrieve() {
let buf = fresh_buffer();
buf.push("line one".into());
buf.push("line two".into());
let recent = buf.get_recent(10, None);
assert_eq!(recent, vec!["line one", "line two"]);
}
#[test]
fn evicts_oldest_at_capacity() {
let buf = LogBuffer {
lines: Mutex::new(VecDeque::with_capacity(CAPACITY)),
};
// Fill past capacity
for i in 0..=CAPACITY {
buf.push(format!("line {i}"));
}
let recent = buf.get_recent(CAPACITY + 1, None);
// Should have exactly CAPACITY lines
assert_eq!(recent.len(), CAPACITY);
// The oldest (line 0) should have been evicted
assert!(!recent.iter().any(|l| l == "line 0"));
// The newest should be present
assert!(recent.iter().any(|l| l == &format!("line {CAPACITY}")));
}
#[test]
fn filter_by_substring() {
let buf = fresh_buffer();
buf.push("watcher started".into());
buf.push("mcp call received".into());
buf.push("watcher event".into());
let filtered = buf.get_recent(100, Some("watcher"));
assert_eq!(filtered.len(), 2);
assert_eq!(filtered[0], "watcher started");
assert_eq!(filtered[1], "watcher event");
}
#[test]
fn count_limits_results() {
let buf = fresh_buffer();
for i in 0..10 {
buf.push(format!("line {i}"));
}
let recent = buf.get_recent(3, None);
assert_eq!(recent.len(), 3);
// Most recent 3
assert_eq!(recent, vec!["line 7", "line 8", "line 9"]);
}
#[test]
fn empty_buffer_returns_empty() {
let buf = fresh_buffer();
assert!(buf.get_recent(10, None).is_empty());
}
}

View File

@@ -3,6 +3,7 @@ mod config;
mod http; mod http;
mod io; mod io;
mod llm; mod llm;
pub mod log_buffer;
mod state; mod state;
mod store; mod store;
mod workflow; mod workflow;
@@ -39,7 +40,7 @@ async fn main() -> Result<(), std::io::Error> {
) )
.await .await
.unwrap_or_else(|e| { .unwrap_or_else(|e| {
eprintln!("Warning: failed to auto-open project at {project_root:?}: {e}"); slog!("Warning: failed to auto-open project at {project_root:?}: {e}");
project_root.to_string_lossy().to_string() project_root.to_string_lossy().to_string()
}); });
@@ -82,7 +83,7 @@ async fn main() -> Result<(), std::io::Error> {
// active pipeline stages (2_current/, 3_qa/, 4_merge/). // active pipeline stages (2_current/, 3_qa/, 4_merge/).
if let Some(root) = startup_root { if let Some(root) = startup_root {
tokio::spawn(async move { tokio::spawn(async move {
eprintln!("[auto-assign] Server startup: scanning pipeline stages for unassigned work."); slog!("[auto-assign] Server startup: scanning pipeline stages for unassigned work.");
startup_agents.auto_assign_available_work(&root).await; startup_agents.auto_assign_available_work(&root).await;
}); });
} }

View File

@@ -1,3 +1,4 @@
use crate::slog;
use crate::config::ProjectConfig; use crate::config::ProjectConfig;
use std::path::{Path, PathBuf}; use std::path::{Path, PathBuf};
use std::process::Command; use std::process::Command;
@@ -245,7 +246,7 @@ fn remove_worktree_sync(
if !output.status.success() { if !output.status.success() {
let stderr = String::from_utf8_lossy(&output.stderr); let stderr = String::from_utf8_lossy(&output.stderr);
eprintln!("[worktree] remove warning: {stderr}"); slog!("[worktree] remove warning: {stderr}");
} }
// Delete branch (best effort) // Delete branch (best effort)
@@ -273,7 +274,7 @@ async fn run_teardown_commands(wt_path: &Path, config: &ProjectConfig) -> Result
for cmd in &component.teardown { for cmd in &component.teardown {
// Best effort — don't fail teardown // Best effort — don't fail teardown
if let Err(e) = run_shell_command(cmd, &cmd_dir).await { if let Err(e) = run_shell_command(cmd, &cmd_dir).await {
eprintln!("[worktree] teardown warning for {}: {e}", component.name); slog!("[worktree] teardown warning for {}: {e}", component.name);
} }
} }
} }
@@ -285,7 +286,7 @@ async fn run_shell_command(cmd: &str, cwd: &Path) -> Result<(), String> {
let cwd = cwd.to_path_buf(); let cwd = cwd.to_path_buf();
tokio::task::spawn_blocking(move || { tokio::task::spawn_blocking(move || {
eprintln!("[worktree] Running: {cmd} in {}", cwd.display()); slog!("[worktree] Running: {cmd} in {}", cwd.display());
let output = Command::new("sh") let output = Command::new("sh")
.args(["-c", &cmd]) .args(["-c", &cmd])
.current_dir(&cwd) .current_dir(&cwd)