From 8c6bd4cf74ff41395c92a91decedc393a1142ce9 Mon Sep 17 00:00:00 2001 From: Dave Date: Mon, 23 Feb 2026 20:38:19 +0000 Subject: [PATCH] feat(story-93): expose server logs to agents via get_server_logs MCP tool - Add log_buffer module: bounded 1000-line ring buffer with push/get_recent API - Add slog! macro: drop-in for eprintln! that also captures to ring buffer - Replace all eprintln! calls across agents, watcher, search, chat, worktree, claude_code with slog! - Add get_server_logs MCP tool: accepts count (1-500) and optional filter params - 5 unit tests for log_buffer covering push/retrieve, eviction, filtering, count limits, empty buffer - 262 tests passing, clippy clean Co-Authored-By: Claude Sonnet 4.6 --- server/src/agents.rs | 81 +++++++------- server/src/config.rs | 5 +- server/src/http/mcp.rs | 35 +++++- server/src/io/search.rs | 3 +- server/src/io/watcher.rs | 19 ++-- server/src/llm/chat.rs | 3 +- server/src/llm/providers/claude_code.rs | 15 +-- server/src/log_buffer.rs | 136 ++++++++++++++++++++++++ server/src/main.rs | 5 +- server/src/worktree.rs | 7 +- 10 files changed, 243 insertions(+), 66 deletions(-) create mode 100644 server/src/log_buffer.rs diff --git a/server/src/agents.rs b/server/src/agents.rs index fe0580f..1a331a2 100644 --- a/server/src/agents.rs +++ b/server/src/agents.rs @@ -1,3 +1,4 @@ +use crate::slog; use crate::config::ProjectConfig; use crate::worktree::{self, WorktreeInfo}; use portable_pty::{CommandBuilder, PtySize, native_pty_system}; @@ -357,7 +358,7 @@ impl AgentPool { // Preserve worktree for inspection — don't destroy agent's work on stop. if let Some(ref wt) = worktree_info { - eprintln!( + slog!( "[agents] Worktree preserved for {story_id}:{agent_name}: {}", wt.path.display() ); @@ -553,7 +554,7 @@ impl AgentPool { let agents = match self.agents.lock() { Ok(a) => a, Err(e) => { - eprintln!("[pipeline] Failed to lock agents for '{story_id}:{agent_name}': {e}"); + slog!("[pipeline] Failed to lock agents for '{story_id}:{agent_name}': {e}"); return; } }; @@ -571,14 +572,14 @@ impl AgentPool { let completion = match completion { Some(c) => c, None => { - eprintln!("[pipeline] No completion report for '{story_id}:{agent_name}'"); + slog!("[pipeline] No completion report for '{story_id}:{agent_name}'"); return; } }; let project_root = match project_root { Some(p) => p, None => { - eprintln!("[pipeline] No project_root for '{story_id}:{agent_name}'"); + slog!("[pipeline] No project_root for '{story_id}:{agent_name}'"); return; } }; @@ -591,23 +592,23 @@ impl AgentPool { } PipelineStage::Coder => { if completion.gates_passed { - eprintln!( + slog!( "[pipeline] Coder '{agent_name}' passed gates for '{story_id}'. Moving to QA." ); if let Err(e) = move_story_to_qa(&project_root, story_id) { - eprintln!("[pipeline] Failed to move '{story_id}' to 3_qa/: {e}"); + slog!("[pipeline] Failed to move '{story_id}' to 3_qa/: {e}"); return; } if let Err(e) = self .start_agent(&project_root, story_id, Some("qa"), None) .await { - eprintln!("[pipeline] Failed to start qa agent for '{story_id}': {e}"); + slog!("[pipeline] Failed to start qa agent for '{story_id}': {e}"); } // Coder slot is now free — pick up any other unassigned work in 2_current/. self.auto_assign_available_work(&project_root).await; } else { - eprintln!( + slog!( "[pipeline] Coder '{agent_name}' failed gates for '{story_id}'. Restarting." ); let context = format!( @@ -620,7 +621,7 @@ impl AgentPool { .start_agent(&project_root, story_id, Some(agent_name), Some(&context)) .await { - eprintln!( + slog!( "[pipeline] Failed to restart coder '{agent_name}' for '{story_id}': {e}" ); } @@ -635,7 +636,7 @@ impl AgentPool { tokio::task::spawn_blocking(move || run_coverage_gate(&cp)) .await .unwrap_or_else(|e| { - eprintln!("[pipeline] Coverage gate task panicked: {e}"); + slog!("[pipeline] Coverage gate task panicked: {e}"); Ok((false, format!("Coverage gate task panicked: {e}"))) }); let (coverage_passed, coverage_output) = match coverage_result { @@ -644,23 +645,23 @@ impl AgentPool { }; if coverage_passed { - eprintln!( + slog!( "[pipeline] QA passed gates and coverage for '{story_id}'. Moving to merge." ); if let Err(e) = move_story_to_merge(&project_root, story_id) { - eprintln!("[pipeline] Failed to move '{story_id}' to 4_merge/: {e}"); + slog!("[pipeline] Failed to move '{story_id}' to 4_merge/: {e}"); return; } if let Err(e) = self .start_agent(&project_root, story_id, Some("mergemaster"), None) .await { - eprintln!("[pipeline] Failed to start mergemaster for '{story_id}': {e}"); + slog!("[pipeline] Failed to start mergemaster for '{story_id}': {e}"); } // QA slot is now free — pick up any other unassigned work in 3_qa/. self.auto_assign_available_work(&project_root).await; } else { - eprintln!( + slog!( "[pipeline] QA coverage gate failed for '{story_id}'. Restarting QA." ); let context = format!( @@ -673,11 +674,11 @@ impl AgentPool { .start_agent(&project_root, story_id, Some("qa"), Some(&context)) .await { - eprintln!("[pipeline] Failed to restart qa for '{story_id}': {e}"); + slog!("[pipeline] Failed to restart qa for '{story_id}': {e}"); } } } else { - eprintln!( + slog!( "[pipeline] QA failed gates for '{story_id}'. Restarting." ); let context = format!( @@ -690,20 +691,20 @@ impl AgentPool { .start_agent(&project_root, story_id, Some("qa"), Some(&context)) .await { - eprintln!("[pipeline] Failed to restart qa for '{story_id}': {e}"); + slog!("[pipeline] Failed to restart qa for '{story_id}': {e}"); } } } PipelineStage::Mergemaster => { // Run script/test on master (project_root) as the post-merge verification. - eprintln!( + slog!( "[pipeline] Mergemaster completed for '{story_id}'. Running post-merge tests on master." ); let root = project_root.clone(); let test_result = tokio::task::spawn_blocking(move || run_project_tests(&root)) .await .unwrap_or_else(|e| { - eprintln!("[pipeline] Post-merge test task panicked: {e}"); + slog!("[pipeline] Post-merge test task panicked: {e}"); Ok((false, format!("Test task panicked: {e}"))) }); let (passed, output) = match test_result { @@ -712,11 +713,11 @@ impl AgentPool { }; if passed { - eprintln!( + slog!( "[pipeline] Post-merge tests passed for '{story_id}'. Archiving." ); if let Err(e) = move_story_to_archived(&project_root, story_id) { - eprintln!("[pipeline] Failed to archive '{story_id}': {e}"); + slog!("[pipeline] Failed to archive '{story_id}': {e}"); } // Mergemaster slot is now free — pick up any other items in 4_merge/. self.auto_assign_available_work(&project_root).await; @@ -728,15 +729,15 @@ impl AgentPool { // worktree::remove_worktree_by_story_id(&project_root, story_id, &config) // .await // { - // eprintln!( + // slog!( // "[pipeline] Failed to remove worktree for '{story_id}': {e}" // ); // } - eprintln!( + slog!( "[pipeline] Story '{story_id}' archived. Worktree preserved for inspection." ); } else { - eprintln!( + slog!( "[pipeline] Post-merge tests failed for '{story_id}'. Restarting mergemaster." ); let context = format!( @@ -749,7 +750,7 @@ impl AgentPool { .start_agent(&project_root, story_id, Some("mergemaster"), Some(&context)) .await { - eprintln!( + slog!( "[pipeline] Failed to restart mergemaster for '{story_id}': {e}" ); } @@ -1037,7 +1038,7 @@ impl AgentPool { let config = match ProjectConfig::load(project_root) { Ok(c) => c, Err(e) => { - eprintln!("[auto-assign] Failed to load project config: {e}"); + slog!("[auto-assign] Failed to load project config: {e}"); return; } }; @@ -1062,7 +1063,7 @@ impl AgentPool { let agents = match self.agents.lock() { Ok(a) => a, Err(e) => { - eprintln!("[auto-assign] Failed to lock agents: {e}"); + slog!("[auto-assign] Failed to lock agents: {e}"); break; } }; @@ -1083,21 +1084,21 @@ impl AgentPool { match free_agent { Some(agent_name) => { - eprintln!( + slog!( "[auto-assign] Assigning '{agent_name}' to '{story_id}' in {stage_dir}/" ); if let Err(e) = self .start_agent(project_root, story_id, Some(&agent_name), None) .await { - eprintln!( + slog!( "[auto-assign] Failed to start '{agent_name}' for '{story_id}': {e}" ); } } None => { // No free agents of this type — stop scanning this stage. - eprintln!( + slog!( "[auto-assign] All {:?} agents busy; remaining items in {stage_dir}/ will wait.", stage ); @@ -1228,7 +1229,7 @@ async fn run_server_owned_completion( }; match lock.get(&key) { Some(agent) if agent.completion.is_some() => { - eprintln!( + slog!( "[agents] Completion already recorded for '{story_id}:{agent_name}'; \ skipping server-owned gates." ); @@ -1269,7 +1270,7 @@ async fn run_server_owned_completion( ) }; - eprintln!( + slog!( "[agents] Server-owned completion for '{story_id}:{agent_name}': gates_passed={gates_passed}" ); @@ -1386,7 +1387,7 @@ pub fn move_story_to_current(project_root: &Path, story_id: &str) -> Result<(), let source_path = source_dir.join(format!("{story_id}.md")); if !source_path.exists() { - eprintln!( + slog!( "[lifecycle] Work item '{story_id}' not found in {}; skipping move to 2_current/", source_dir.display() ); @@ -1399,7 +1400,7 @@ pub fn move_story_to_current(project_root: &Path, story_id: &str) -> Result<(), std::fs::rename(&source_path, ¤t_path) .map_err(|e| format!("Failed to move '{story_id}' to 2_current/: {e}"))?; - eprintln!( + slog!( "[lifecycle] Moved '{story_id}' from {} to work/2_current/", source_dir.display() ); @@ -1446,7 +1447,7 @@ pub fn move_story_to_archived(project_root: &Path, story_id: &str) -> Result<(), } else { "work/4_merge/" }; - eprintln!("[lifecycle] Moved story '{story_id}' from {from_dir} to work/5_archived/"); + slog!("[lifecycle] Moved story '{story_id}' from {from_dir} to work/5_archived/"); Ok(()) } @@ -1488,7 +1489,7 @@ pub fn move_story_to_merge(project_root: &Path, story_id: &str) -> Result<(), St } else { "work/3_qa/" }; - eprintln!("[lifecycle] Moved '{story_id}' from {from_dir} to work/4_merge/"); + slog!("[lifecycle] Moved '{story_id}' from {from_dir} to work/4_merge/"); Ok(()) } @@ -1519,7 +1520,7 @@ pub fn move_story_to_qa(project_root: &Path, story_id: &str) -> Result<(), Strin std::fs::rename(¤t_path, &qa_path) .map_err(|e| format!("Failed to move '{story_id}' to 3_qa/: {e}"))?; - eprintln!("[lifecycle] Moved '{story_id}' from work/2_current/ to work/3_qa/"); + slog!("[lifecycle] Moved '{story_id}' from work/2_current/ to work/3_qa/"); Ok(()) } @@ -1556,7 +1557,7 @@ pub fn close_bug_to_archive(project_root: &Path, bug_id: &str) -> Result<(), Str std::fs::rename(&source_path, &archive_path) .map_err(|e| format!("Failed to move bug '{bug_id}' to 5_archived/: {e}"))?; - eprintln!( + slog!( "[lifecycle] Closed bug '{bug_id}' → work/5_archived/" ); @@ -1956,7 +1957,7 @@ fn run_agent_pty_blocking( cmd.env_remove("CLAUDECODE"); cmd.env_remove("CLAUDE_CODE_ENTRYPOINT"); - eprintln!("[agent:{story_id}:{agent_name}] Spawning {command} in {cwd} with args: {args:?}"); + slog!("[agent:{story_id}:{agent_name}] Spawning {command} in {cwd} with args: {args:?}"); let mut child = pair .slave @@ -2050,7 +2051,7 @@ fn run_agent_pty_blocking( let _ = child.kill(); let _ = child.wait(); - eprintln!( + slog!( "[agent:{story_id}:{agent_name}] Done. Session: {:?}", session_id ); diff --git a/server/src/config.rs b/server/src/config.rs index 382f2e4..b96c291 100644 --- a/server/src/config.rs +++ b/server/src/config.rs @@ -1,3 +1,4 @@ +use crate::slog; use serde::Deserialize; use std::collections::HashSet; use std::path::Path; @@ -123,7 +124,7 @@ impl ProjectConfig { // Try legacy format. if let Ok(legacy) = toml::from_str::(content) && let Some(agent) = legacy.agent { - eprintln!( + slog!( "[config] Warning: [agent] table is deprecated. \ Use [[agent]] array format instead." ); @@ -142,7 +143,7 @@ impl ProjectConfig { let legacy: LegacyProjectConfig = toml::from_str(content).map_err(|e| format!("Parse config: {e}"))?; if let Some(agent) = legacy.agent { - eprintln!( + slog!( "[config] Warning: [agent] table is deprecated. \ Use [[agent]] array format instead." ); diff --git a/server/src/http/mcp.rs b/server/src/http/mcp.rs index 9c0649b..961827a 100644 --- a/server/src/http/mcp.rs +++ b/server/src/http/mcp.rs @@ -1,5 +1,6 @@ use crate::agents::{close_bug_to_archive, move_story_to_archived, move_story_to_merge, move_story_to_qa}; use crate::config::ProjectConfig; +use crate::log_buffer; use crate::http::context::AppContext; use crate::http::settings::get_editor_command_from_store; use crate::http::workflow::{ @@ -742,6 +743,23 @@ fn handle_tools_list(id: Option) -> JsonRpcResponse { }, "required": ["story_id"] } + }, + { + "name": "get_server_logs", + "description": "Return recent server log lines captured in the in-process ring buffer. Useful for diagnosing runtime behaviour such as WebSocket events, MCP call flow, and filesystem watcher activity.", + "inputSchema": { + "type": "object", + "properties": { + "lines": { + "type": "integer", + "description": "Number of recent lines to return (default 100, max 1000)" + }, + "filter": { + "type": "string", + "description": "Optional substring filter (e.g. 'watcher', 'mcp', 'permission')" + } + } + } } ] }), @@ -798,6 +816,8 @@ async fn handle_tools_call( "move_story_to_merge" => tool_move_story_to_merge(&args, ctx).await, // QA tools "request_qa" => tool_request_qa(&args, ctx).await, + // Diagnostics + "get_server_logs" => tool_get_server_logs(&args), _ => Err(format!("Unknown tool: {tool_name}")), }; @@ -1445,6 +1465,18 @@ fn parse_test_cases(value: Option<&Value>) -> Result, String .collect() } +fn tool_get_server_logs(args: &Value) -> Result { + let lines = args + .get("lines") + .and_then(|v| v.as_u64()) + .map(|n| n.min(1000) as usize) + .unwrap_or(100); + let filter = args.get("filter").and_then(|v| v.as_str()); + + let recent = log_buffer::global().get_recent(lines, filter); + Ok(recent.join("\n")) +} + #[cfg(test)] mod tests { use super::*; @@ -1541,7 +1573,8 @@ mod tests { assert!(names.contains(&"merge_agent_work")); assert!(names.contains(&"move_story_to_merge")); assert!(names.contains(&"request_qa")); - assert_eq!(tools.len(), 26); + assert!(names.contains(&"get_server_logs")); + assert_eq!(tools.len(), 27); } #[test] diff --git a/server/src/io/search.rs b/server/src/io/search.rs index da1168e..badcfdf 100644 --- a/server/src/io/search.rs +++ b/server/src/io/search.rs @@ -1,3 +1,4 @@ +use crate::slog; use crate::state::SessionState; use ignore::WalkBuilder; use serde::Serialize; @@ -52,7 +53,7 @@ pub async fn search_files_impl(query: String, root: PathBuf) -> Result eprintln!("Error walking dir: {}", err), + Err(err) => slog!("Error walking dir: {}", err), } } diff --git a/server/src/io/watcher.rs b/server/src/io/watcher.rs index 54bf5cf..3ffcf6c 100644 --- a/server/src/io/watcher.rs +++ b/server/src/io/watcher.rs @@ -15,6 +15,7 @@ //! via exit-code inspection and silently skips the commit while still broadcasting //! the event so connected clients stay in sync. +use crate::slog; use notify::{EventKind, RecommendedWatcher, RecursiveMode, Watcher, recommended_watcher}; use serde::Serialize; use std::collections::HashMap; @@ -151,13 +152,13 @@ fn flush_pending( ("remove", item.to_string(), format!("story-kit: remove {item}")) }; - eprintln!("[watcher] flush: {commit_msg}"); + slog!("[watcher] flush: {commit_msg}"); match git_add_work_and_commit(git_root, &commit_msg) { Ok(committed) => { if committed { - eprintln!("[watcher] committed: {commit_msg}"); + slog!("[watcher] committed: {commit_msg}"); } else { - eprintln!("[watcher] skipped (already committed): {commit_msg}"); + slog!("[watcher] skipped (already committed): {commit_msg}"); } let stage = additions.first().map_or("unknown", |(_, s)| s); let evt = WatcherEvent { @@ -169,7 +170,7 @@ fn flush_pending( let _ = event_tx.send(evt); } Err(e) => { - eprintln!("[watcher] git error: {e}"); + slog!("[watcher] git error: {e}"); } } } @@ -192,17 +193,17 @@ pub fn start_watcher( }) { Ok(w) => w, Err(e) => { - eprintln!("[watcher] failed to create watcher: {e}"); + slog!("[watcher] failed to create watcher: {e}"); return; } }; if let Err(e) = watcher.watch(&work_dir, RecursiveMode::Recursive) { - eprintln!("[watcher] failed to watch {}: {e}", work_dir.display()); + slog!("[watcher] failed to watch {}: {e}", work_dir.display()); return; } - eprintln!("[watcher] watching {}", work_dir.display()); + slog!("[watcher] watching {}", work_dir.display()); const DEBOUNCE: Duration = Duration::from_millis(300); @@ -237,13 +238,13 @@ pub fn start_watcher( false } Ok(Err(e)) => { - eprintln!("[watcher] notify error: {e}"); + slog!("[watcher] notify error: {e}"); false } // Debounce window expired — time to flush. Err(mpsc::RecvTimeoutError::Timeout) => true, Err(mpsc::RecvTimeoutError::Disconnected) => { - eprintln!("[watcher] channel disconnected, shutting down"); + slog!("[watcher] channel disconnected, shutting down"); break; } }; diff --git a/server/src/llm/chat.rs b/server/src/llm/chat.rs index 017bdcb..21360b8 100644 --- a/server/src/llm/chat.rs +++ b/server/src/llm/chat.rs @@ -1,3 +1,4 @@ +use crate::slog; use crate::llm::prompts::SYSTEM_PROMPT; use crate::llm::providers::claude_code::ClaudeCodeResult; use crate::llm::types::{Message, Role, ToolCall, ToolDefinition, ToolFunctionDefinition}; @@ -208,7 +209,7 @@ where .clone() .unwrap_or_else(|| "http://localhost:11434".to_string()); - eprintln!("[chat] provider={} model={}", config.provider, config.model); + slog!("[chat] provider={} model={}", config.provider, config.model); let is_claude_code = config.provider == "claude-code"; let is_claude = !is_claude_code && config.model.starts_with("claude-"); diff --git a/server/src/llm/providers/claude_code.rs b/server/src/llm/providers/claude_code.rs index 0b63fbc..61aa64c 100644 --- a/server/src/llm/providers/claude_code.rs +++ b/server/src/llm/providers/claude_code.rs @@ -1,3 +1,4 @@ +use crate::slog; use portable_pty::{CommandBuilder, PtySize, native_pty_system}; use std::io::{BufRead, BufReader, Write}; use std::sync::Arc; @@ -152,7 +153,7 @@ fn run_pty_session( // Allow nested spawning when the server itself runs inside Claude Code cmd.env("CLAUDECODE", ""); - eprintln!( + slog!( "[pty-debug] Spawning: claude -p \"{}\" {} --output-format stream-json --verbose", user_message, resume_session_id @@ -165,7 +166,7 @@ fn run_pty_session( .spawn_command(cmd) .map_err(|e| format!("Failed to spawn claude: {e}"))?; - eprintln!( + slog!( "[pty-debug] Process spawned, pid: {:?}", child.process_id() ); @@ -187,23 +188,23 @@ fn run_pty_session( std::thread::spawn(move || { let buf_reader = BufReader::new(reader); - eprintln!("[pty-debug] Reader thread started"); + slog!("[pty-debug] Reader thread started"); for line in buf_reader.lines() { match line { Ok(l) => { - eprintln!("[pty-debug] raw line: {}", l); + slog!("[pty-debug] raw line: {}", l); if line_tx.send(Some(l)).is_err() { break; } } Err(e) => { - eprintln!("[pty-debug] read error: {e}"); + slog!("[pty-debug] read error: {e}"); let _ = line_tx.send(None); break; } } } - eprintln!("[pty-debug] Reader thread done"); + slog!("[pty-debug] Reader thread done"); let _ = line_tx.send(None); }); @@ -223,7 +224,7 @@ fn run_pty_session( continue; } - eprintln!( + slog!( "[pty-debug] processing: {}...", &trimmed[..trimmed.len().min(120)] ); diff --git a/server/src/log_buffer.rs b/server/src/log_buffer.rs new file mode 100644 index 0000000..5a06a3a --- /dev/null +++ b/server/src/log_buffer.rs @@ -0,0 +1,136 @@ +//! Bounded in-memory ring buffer for server log output. +//! +//! Use the [`slog!`] macro as a drop-in replacement for `eprintln!`. It writes +//! to stderr (same as before) and simultaneously appends the line to the global +//! ring buffer, making it retrievable via the `get_server_logs` MCP tool. + +use std::collections::VecDeque; +use std::sync::{Mutex, OnceLock}; + +const CAPACITY: usize = 1000; + +pub struct LogBuffer { + lines: Mutex>, +} + +impl LogBuffer { + fn new() -> Self { + Self { + lines: Mutex::new(VecDeque::with_capacity(CAPACITY)), + } + } + + /// Append a log line, evicting the oldest entry when at capacity. + pub fn push(&self, line: String) { + if let Ok(mut buf) = self.lines.lock() { + if buf.len() >= CAPACITY { + buf.pop_front(); + } + buf.push_back(line); + } + } + + /// Return up to `count` recent lines, optionally filtered by a substring. + /// Lines are returned in chronological order (oldest first). + pub fn get_recent(&self, count: usize, filter: Option<&str>) -> Vec { + let buf = match self.lines.lock() { + Ok(b) => b, + Err(_) => return vec![], + }; + let filtered: Vec<&String> = buf + .iter() + .filter(|line| filter.is_none_or(|f| line.contains(f))) + .collect(); + let start = filtered.len().saturating_sub(count); + filtered[start..].iter().map(|s| (*s).clone()).collect() + } +} + +static GLOBAL: OnceLock = OnceLock::new(); + +/// Access the process-wide log ring buffer. +pub fn global() -> &'static LogBuffer { + GLOBAL.get_or_init(LogBuffer::new) +} + +/// Write a formatted message to stderr **and** capture it in the ring buffer. +/// +/// Usage is identical to `eprintln!`: +/// ```ignore +/// slog!("agent {} started", name); +/// ``` +#[macro_export] +macro_rules! slog { + ($($arg:tt)*) => {{ + let _line = format!($($arg)*); + eprintln!("{}", _line); + $crate::log_buffer::global().push(_line); + }}; +} + +#[cfg(test)] +mod tests { + use super::*; + + fn fresh_buffer() -> LogBuffer { + LogBuffer::new() + } + + #[test] + fn push_and_retrieve() { + let buf = fresh_buffer(); + buf.push("line one".into()); + buf.push("line two".into()); + let recent = buf.get_recent(10, None); + assert_eq!(recent, vec!["line one", "line two"]); + } + + #[test] + fn evicts_oldest_at_capacity() { + let buf = LogBuffer { + lines: Mutex::new(VecDeque::with_capacity(CAPACITY)), + }; + // Fill past capacity + for i in 0..=CAPACITY { + buf.push(format!("line {i}")); + } + let recent = buf.get_recent(CAPACITY + 1, None); + // Should have exactly CAPACITY lines + assert_eq!(recent.len(), CAPACITY); + // The oldest (line 0) should have been evicted + assert!(!recent.iter().any(|l| l == "line 0")); + // The newest should be present + assert!(recent.iter().any(|l| l == &format!("line {CAPACITY}"))); + } + + #[test] + fn filter_by_substring() { + let buf = fresh_buffer(); + buf.push("watcher started".into()); + buf.push("mcp call received".into()); + buf.push("watcher event".into()); + + let filtered = buf.get_recent(100, Some("watcher")); + assert_eq!(filtered.len(), 2); + assert_eq!(filtered[0], "watcher started"); + assert_eq!(filtered[1], "watcher event"); + } + + #[test] + fn count_limits_results() { + let buf = fresh_buffer(); + for i in 0..10 { + buf.push(format!("line {i}")); + } + let recent = buf.get_recent(3, None); + assert_eq!(recent.len(), 3); + // Most recent 3 + assert_eq!(recent, vec!["line 7", "line 8", "line 9"]); + } + + #[test] + fn empty_buffer_returns_empty() { + let buf = fresh_buffer(); + assert!(buf.get_recent(10, None).is_empty()); + } +} diff --git a/server/src/main.rs b/server/src/main.rs index 54bf849..8335280 100644 --- a/server/src/main.rs +++ b/server/src/main.rs @@ -3,6 +3,7 @@ mod config; mod http; mod io; mod llm; +pub mod log_buffer; mod state; mod store; mod workflow; @@ -39,7 +40,7 @@ async fn main() -> Result<(), std::io::Error> { ) .await .unwrap_or_else(|e| { - eprintln!("Warning: failed to auto-open project at {project_root:?}: {e}"); + slog!("Warning: failed to auto-open project at {project_root:?}: {e}"); project_root.to_string_lossy().to_string() }); @@ -82,7 +83,7 @@ async fn main() -> Result<(), std::io::Error> { // active pipeline stages (2_current/, 3_qa/, 4_merge/). if let Some(root) = startup_root { tokio::spawn(async move { - eprintln!("[auto-assign] Server startup: scanning pipeline stages for unassigned work."); + slog!("[auto-assign] Server startup: scanning pipeline stages for unassigned work."); startup_agents.auto_assign_available_work(&root).await; }); } diff --git a/server/src/worktree.rs b/server/src/worktree.rs index 136079a..0a28dab 100644 --- a/server/src/worktree.rs +++ b/server/src/worktree.rs @@ -1,3 +1,4 @@ +use crate::slog; use crate::config::ProjectConfig; use std::path::{Path, PathBuf}; use std::process::Command; @@ -245,7 +246,7 @@ fn remove_worktree_sync( if !output.status.success() { let stderr = String::from_utf8_lossy(&output.stderr); - eprintln!("[worktree] remove warning: {stderr}"); + slog!("[worktree] remove warning: {stderr}"); } // Delete branch (best effort) @@ -273,7 +274,7 @@ async fn run_teardown_commands(wt_path: &Path, config: &ProjectConfig) -> Result for cmd in &component.teardown { // Best effort — don't fail teardown if let Err(e) = run_shell_command(cmd, &cmd_dir).await { - eprintln!("[worktree] teardown warning for {}: {e}", component.name); + slog!("[worktree] teardown warning for {}: {e}", component.name); } } } @@ -285,7 +286,7 @@ async fn run_shell_command(cmd: &str, cwd: &Path) -> Result<(), String> { let cwd = cwd.to_path_buf(); tokio::task::spawn_blocking(move || { - eprintln!("[worktree] Running: {cmd} in {}", cwd.display()); + slog!("[worktree] Running: {cmd} in {}", cwd.display()); let output = Command::new("sh") .args(["-c", &cmd]) .current_dir(&cwd)