story-kit: merge 93_story_expose_server_logs_to_agents_via_mcp

Adds log_buffer ring buffer and slog! macro for in-memory server log
capture, plus get_server_logs MCP tool for agents to read recent logs.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
Dave
2026-02-23 20:53:37 +00:00
11 changed files with 245 additions and 68 deletions

View File

@@ -20,7 +20,7 @@ export default defineConfig({
command:
"pnpm exec vite --config vite.config.ts --host 127.0.0.1 --port 41700 --strictPort",
url: "http://127.0.0.1:41700/@vite/client",
reuseExistingServer: false,
reuseExistingServer: true,
timeout: 120_000,
cwd: frontendRoot,
},

View File

@@ -1,4 +1,5 @@
use crate::agent_log::AgentLogWriter;
use crate::slog;
use crate::config::ProjectConfig;
use crate::worktree::{self, WorktreeInfo};
use portable_pty::{CommandBuilder, PtySize, native_pty_system};
@@ -384,7 +385,7 @@ impl AgentPool {
// Preserve worktree for inspection — don't destroy agent's work on stop.
if let Some(ref wt) = worktree_info {
eprintln!(
slog!(
"[agents] Worktree preserved for {story_id}:{agent_name}: {}",
wt.path.display()
);
@@ -581,7 +582,7 @@ impl AgentPool {
let agents = match self.agents.lock() {
Ok(a) => a,
Err(e) => {
eprintln!("[pipeline] Failed to lock agents for '{story_id}:{agent_name}': {e}");
slog!("[pipeline] Failed to lock agents for '{story_id}:{agent_name}': {e}");
return;
}
};
@@ -599,14 +600,14 @@ impl AgentPool {
let completion = match completion {
Some(c) => c,
None => {
eprintln!("[pipeline] No completion report for '{story_id}:{agent_name}'");
slog!("[pipeline] No completion report for '{story_id}:{agent_name}'");
return;
}
};
let project_root = match project_root {
Some(p) => p,
None => {
eprintln!("[pipeline] No project_root for '{story_id}:{agent_name}'");
slog!("[pipeline] No project_root for '{story_id}:{agent_name}'");
return;
}
};
@@ -619,23 +620,23 @@ impl AgentPool {
}
PipelineStage::Coder => {
if completion.gates_passed {
eprintln!(
slog!(
"[pipeline] Coder '{agent_name}' passed gates for '{story_id}'. Moving to QA."
);
if let Err(e) = move_story_to_qa(&project_root, story_id) {
eprintln!("[pipeline] Failed to move '{story_id}' to 3_qa/: {e}");
slog!("[pipeline] Failed to move '{story_id}' to 3_qa/: {e}");
return;
}
if let Err(e) = self
.start_agent(&project_root, story_id, Some("qa"), None)
.await
{
eprintln!("[pipeline] Failed to start qa agent for '{story_id}': {e}");
slog!("[pipeline] Failed to start qa agent for '{story_id}': {e}");
}
// Coder slot is now free — pick up any other unassigned work in 2_current/.
self.auto_assign_available_work(&project_root).await;
} else {
eprintln!(
slog!(
"[pipeline] Coder '{agent_name}' failed gates for '{story_id}'. Restarting."
);
let context = format!(
@@ -648,7 +649,7 @@ impl AgentPool {
.start_agent(&project_root, story_id, Some(agent_name), Some(&context))
.await
{
eprintln!(
slog!(
"[pipeline] Failed to restart coder '{agent_name}' for '{story_id}': {e}"
);
}
@@ -663,7 +664,7 @@ impl AgentPool {
tokio::task::spawn_blocking(move || run_coverage_gate(&cp))
.await
.unwrap_or_else(|e| {
eprintln!("[pipeline] Coverage gate task panicked: {e}");
slog!("[pipeline] Coverage gate task panicked: {e}");
Ok((false, format!("Coverage gate task panicked: {e}")))
});
let (coverage_passed, coverage_output) = match coverage_result {
@@ -672,23 +673,23 @@ impl AgentPool {
};
if coverage_passed {
eprintln!(
slog!(
"[pipeline] QA passed gates and coverage for '{story_id}'. Moving to merge."
);
if let Err(e) = move_story_to_merge(&project_root, story_id) {
eprintln!("[pipeline] Failed to move '{story_id}' to 4_merge/: {e}");
slog!("[pipeline] Failed to move '{story_id}' to 4_merge/: {e}");
return;
}
if let Err(e) = self
.start_agent(&project_root, story_id, Some("mergemaster"), None)
.await
{
eprintln!("[pipeline] Failed to start mergemaster for '{story_id}': {e}");
slog!("[pipeline] Failed to start mergemaster for '{story_id}': {e}");
}
// QA slot is now free — pick up any other unassigned work in 3_qa/.
self.auto_assign_available_work(&project_root).await;
} else {
eprintln!(
slog!(
"[pipeline] QA coverage gate failed for '{story_id}'. Restarting QA."
);
let context = format!(
@@ -701,11 +702,11 @@ impl AgentPool {
.start_agent(&project_root, story_id, Some("qa"), Some(&context))
.await
{
eprintln!("[pipeline] Failed to restart qa for '{story_id}': {e}");
slog!("[pipeline] Failed to restart qa for '{story_id}': {e}");
}
}
} else {
eprintln!(
slog!(
"[pipeline] QA failed gates for '{story_id}'. Restarting."
);
let context = format!(
@@ -718,20 +719,20 @@ impl AgentPool {
.start_agent(&project_root, story_id, Some("qa"), Some(&context))
.await
{
eprintln!("[pipeline] Failed to restart qa for '{story_id}': {e}");
slog!("[pipeline] Failed to restart qa for '{story_id}': {e}");
}
}
}
PipelineStage::Mergemaster => {
// Run script/test on master (project_root) as the post-merge verification.
eprintln!(
slog!(
"[pipeline] Mergemaster completed for '{story_id}'. Running post-merge tests on master."
);
let root = project_root.clone();
let test_result = tokio::task::spawn_blocking(move || run_project_tests(&root))
.await
.unwrap_or_else(|e| {
eprintln!("[pipeline] Post-merge test task panicked: {e}");
slog!("[pipeline] Post-merge test task panicked: {e}");
Ok((false, format!("Test task panicked: {e}")))
});
let (passed, output) = match test_result {
@@ -740,11 +741,11 @@ impl AgentPool {
};
if passed {
eprintln!(
slog!(
"[pipeline] Post-merge tests passed for '{story_id}'. Archiving."
);
if let Err(e) = move_story_to_archived(&project_root, story_id) {
eprintln!("[pipeline] Failed to archive '{story_id}': {e}");
slog!("[pipeline] Failed to archive '{story_id}': {e}");
}
// Mergemaster slot is now free — pick up any other items in 4_merge/.
self.auto_assign_available_work(&project_root).await;
@@ -756,15 +757,15 @@ impl AgentPool {
// worktree::remove_worktree_by_story_id(&project_root, story_id, &config)
// .await
// {
// eprintln!(
// slog!(
// "[pipeline] Failed to remove worktree for '{story_id}': {e}"
// );
// }
eprintln!(
slog!(
"[pipeline] Story '{story_id}' archived. Worktree preserved for inspection."
);
} else {
eprintln!(
slog!(
"[pipeline] Post-merge tests failed for '{story_id}'. Restarting mergemaster."
);
let context = format!(
@@ -777,7 +778,7 @@ impl AgentPool {
.start_agent(&project_root, story_id, Some("mergemaster"), Some(&context))
.await
{
eprintln!(
slog!(
"[pipeline] Failed to restart mergemaster for '{story_id}': {e}"
);
}
@@ -1083,7 +1084,7 @@ impl AgentPool {
let config = match ProjectConfig::load(project_root) {
Ok(c) => c,
Err(e) => {
eprintln!("[auto-assign] Failed to load project config: {e}");
slog!("[auto-assign] Failed to load project config: {e}");
return;
}
};
@@ -1108,7 +1109,7 @@ impl AgentPool {
let agents = match self.agents.lock() {
Ok(a) => a,
Err(e) => {
eprintln!("[auto-assign] Failed to lock agents: {e}");
slog!("[auto-assign] Failed to lock agents: {e}");
break;
}
};
@@ -1129,21 +1130,21 @@ impl AgentPool {
match free_agent {
Some(agent_name) => {
eprintln!(
slog!(
"[auto-assign] Assigning '{agent_name}' to '{story_id}' in {stage_dir}/"
);
if let Err(e) = self
.start_agent(project_root, story_id, Some(&agent_name), None)
.await
{
eprintln!(
slog!(
"[auto-assign] Failed to start '{agent_name}' for '{story_id}': {e}"
);
}
}
None => {
// No free agents of this type — stop scanning this stage.
eprintln!(
slog!(
"[auto-assign] All {:?} agents busy; remaining items in {stage_dir}/ will wait.",
stage
);
@@ -1437,7 +1438,7 @@ async fn run_server_owned_completion(
};
match lock.get(&key) {
Some(agent) if agent.completion.is_some() => {
eprintln!(
slog!(
"[agents] Completion already recorded for '{story_id}:{agent_name}'; \
skipping server-owned gates."
);
@@ -1478,7 +1479,7 @@ async fn run_server_owned_completion(
)
};
eprintln!(
slog!(
"[agents] Server-owned completion for '{story_id}:{agent_name}': gates_passed={gates_passed}"
);
@@ -1595,7 +1596,7 @@ pub fn move_story_to_current(project_root: &Path, story_id: &str) -> Result<(),
let source_path = source_dir.join(format!("{story_id}.md"));
if !source_path.exists() {
eprintln!(
slog!(
"[lifecycle] Work item '{story_id}' not found in {}; skipping move to 2_current/",
source_dir.display()
);
@@ -1608,7 +1609,7 @@ pub fn move_story_to_current(project_root: &Path, story_id: &str) -> Result<(),
std::fs::rename(&source_path, &current_path)
.map_err(|e| format!("Failed to move '{story_id}' to 2_current/: {e}"))?;
eprintln!(
slog!(
"[lifecycle] Moved '{story_id}' from {} to work/2_current/",
source_dir.display()
);
@@ -1655,7 +1656,7 @@ pub fn move_story_to_archived(project_root: &Path, story_id: &str) -> Result<(),
} else {
"work/4_merge/"
};
eprintln!("[lifecycle] Moved story '{story_id}' from {from_dir} to work/5_archived/");
slog!("[lifecycle] Moved story '{story_id}' from {from_dir} to work/5_archived/");
Ok(())
}
@@ -1697,7 +1698,7 @@ pub fn move_story_to_merge(project_root: &Path, story_id: &str) -> Result<(), St
} else {
"work/3_qa/"
};
eprintln!("[lifecycle] Moved '{story_id}' from {from_dir} to work/4_merge/");
slog!("[lifecycle] Moved '{story_id}' from {from_dir} to work/4_merge/");
Ok(())
}
@@ -1728,7 +1729,7 @@ pub fn move_story_to_qa(project_root: &Path, story_id: &str) -> Result<(), Strin
std::fs::rename(&current_path, &qa_path)
.map_err(|e| format!("Failed to move '{story_id}' to 3_qa/: {e}"))?;
eprintln!("[lifecycle] Moved '{story_id}' from work/2_current/ to work/3_qa/");
slog!("[lifecycle] Moved '{story_id}' from work/2_current/ to work/3_qa/");
Ok(())
}
@@ -1765,7 +1766,7 @@ pub fn close_bug_to_archive(project_root: &Path, bug_id: &str) -> Result<(), Str
std::fs::rename(&source_path, &archive_path)
.map_err(|e| format!("Failed to move bug '{bug_id}' to 5_archived/: {e}"))?;
eprintln!(
slog!(
"[lifecycle] Closed bug '{bug_id}' → work/5_archived/"
);
@@ -2221,7 +2222,7 @@ fn run_agent_pty_blocking(
cmd.env_remove("CLAUDECODE");
cmd.env_remove("CLAUDE_CODE_ENTRYPOINT");
eprintln!("[agent:{story_id}:{agent_name}] Spawning {command} in {cwd} with args: {args:?}");
slog!("[agent:{story_id}:{agent_name}] Spawning {command} in {cwd} with args: {args:?}");
let mut child = pair
.slave
@@ -2318,7 +2319,7 @@ fn run_agent_pty_blocking(
let _ = child.kill();
let _ = child.wait();
eprintln!(
slog!(
"[agent:{story_id}:{agent_name}] Done. Session: {:?}",
session_id
);

View File

@@ -1,3 +1,4 @@
use crate::slog;
use serde::Deserialize;
use std::collections::HashSet;
use std::path::Path;
@@ -123,7 +124,7 @@ impl ProjectConfig {
// Try legacy format.
if let Ok(legacy) = toml::from_str::<LegacyProjectConfig>(content)
&& let Some(agent) = legacy.agent {
eprintln!(
slog!(
"[config] Warning: [agent] table is deprecated. \
Use [[agent]] array format instead."
);
@@ -142,7 +143,7 @@ impl ProjectConfig {
let legacy: LegacyProjectConfig =
toml::from_str(content).map_err(|e| format!("Parse config: {e}"))?;
if let Some(agent) = legacy.agent {
eprintln!(
slog!(
"[config] Warning: [agent] table is deprecated. \
Use [[agent]] array format instead."
);

View File

@@ -1,5 +1,6 @@
use crate::agents::{close_bug_to_archive, move_story_to_archived, move_story_to_merge, move_story_to_qa};
use crate::config::ProjectConfig;
use crate::log_buffer;
use crate::http::context::AppContext;
use crate::http::settings::get_editor_command_from_store;
use crate::http::workflow::{
@@ -742,6 +743,23 @@ fn handle_tools_list(id: Option<Value>) -> JsonRpcResponse {
},
"required": ["story_id"]
}
},
{
"name": "get_server_logs",
"description": "Return recent server log lines captured in the in-process ring buffer. Useful for diagnosing runtime behaviour such as WebSocket events, MCP call flow, and filesystem watcher activity.",
"inputSchema": {
"type": "object",
"properties": {
"lines": {
"type": "integer",
"description": "Number of recent lines to return (default 100, max 1000)"
},
"filter": {
"type": "string",
"description": "Optional substring filter (e.g. 'watcher', 'mcp', 'permission')"
}
}
}
}
]
}),
@@ -798,6 +816,8 @@ async fn handle_tools_call(
"move_story_to_merge" => tool_move_story_to_merge(&args, ctx).await,
// QA tools
"request_qa" => tool_request_qa(&args, ctx).await,
// Diagnostics
"get_server_logs" => tool_get_server_logs(&args),
_ => Err(format!("Unknown tool: {tool_name}")),
};
@@ -1518,6 +1538,18 @@ fn parse_test_cases(value: Option<&Value>) -> Result<Vec<TestCaseResult>, String
.collect()
}
fn tool_get_server_logs(args: &Value) -> Result<String, String> {
let lines = args
.get("lines")
.and_then(|v| v.as_u64())
.map(|n| n.min(1000) as usize)
.unwrap_or(100);
let filter = args.get("filter").and_then(|v| v.as_str());
let recent = log_buffer::global().get_recent(lines, filter);
Ok(recent.join("\n"))
}
#[cfg(test)]
mod tests {
use super::*;
@@ -1614,7 +1646,8 @@ mod tests {
assert!(names.contains(&"merge_agent_work"));
assert!(names.contains(&"move_story_to_merge"));
assert!(names.contains(&"request_qa"));
assert_eq!(tools.len(), 26);
assert!(names.contains(&"get_server_logs"));
assert_eq!(tools.len(), 27);
}
#[test]

View File

@@ -1,3 +1,4 @@
use crate::slog;
use crate::state::SessionState;
use ignore::WalkBuilder;
use serde::Serialize;
@@ -52,7 +53,7 @@ pub async fn search_files_impl(query: String, root: PathBuf) -> Result<Vec<Searc
});
}
}
Err(err) => eprintln!("Error walking dir: {}", err),
Err(err) => slog!("Error walking dir: {}", err),
}
}

View File

@@ -15,6 +15,7 @@
//! via exit-code inspection and silently skips the commit while still broadcasting
//! the event so connected clients stay in sync.
use crate::slog;
use notify::{EventKind, RecommendedWatcher, RecursiveMode, Watcher, recommended_watcher};
use serde::Serialize;
use std::collections::HashMap;
@@ -151,13 +152,13 @@ fn flush_pending(
("remove", item.to_string(), format!("story-kit: remove {item}"))
};
eprintln!("[watcher] flush: {commit_msg}");
slog!("[watcher] flush: {commit_msg}");
match git_add_work_and_commit(git_root, &commit_msg) {
Ok(committed) => {
if committed {
eprintln!("[watcher] committed: {commit_msg}");
slog!("[watcher] committed: {commit_msg}");
} else {
eprintln!("[watcher] skipped (already committed): {commit_msg}");
slog!("[watcher] skipped (already committed): {commit_msg}");
}
let stage = additions.first().map_or("unknown", |(_, s)| s);
let evt = WatcherEvent {
@@ -169,7 +170,7 @@ fn flush_pending(
let _ = event_tx.send(evt);
}
Err(e) => {
eprintln!("[watcher] git error: {e}");
slog!("[watcher] git error: {e}");
}
}
}
@@ -192,17 +193,17 @@ pub fn start_watcher(
}) {
Ok(w) => w,
Err(e) => {
eprintln!("[watcher] failed to create watcher: {e}");
slog!("[watcher] failed to create watcher: {e}");
return;
}
};
if let Err(e) = watcher.watch(&work_dir, RecursiveMode::Recursive) {
eprintln!("[watcher] failed to watch {}: {e}", work_dir.display());
slog!("[watcher] failed to watch {}: {e}", work_dir.display());
return;
}
eprintln!("[watcher] watching {}", work_dir.display());
slog!("[watcher] watching {}", work_dir.display());
const DEBOUNCE: Duration = Duration::from_millis(300);
@@ -237,13 +238,13 @@ pub fn start_watcher(
false
}
Ok(Err(e)) => {
eprintln!("[watcher] notify error: {e}");
slog!("[watcher] notify error: {e}");
false
}
// Debounce window expired — time to flush.
Err(mpsc::RecvTimeoutError::Timeout) => true,
Err(mpsc::RecvTimeoutError::Disconnected) => {
eprintln!("[watcher] channel disconnected, shutting down");
slog!("[watcher] channel disconnected, shutting down");
break;
}
};

View File

@@ -1,3 +1,4 @@
use crate::slog;
use crate::llm::prompts::SYSTEM_PROMPT;
use crate::llm::providers::claude_code::ClaudeCodeResult;
use crate::llm::types::{Message, Role, ToolCall, ToolDefinition, ToolFunctionDefinition};
@@ -208,7 +209,7 @@ where
.clone()
.unwrap_or_else(|| "http://localhost:11434".to_string());
eprintln!("[chat] provider={} model={}", config.provider, config.model);
slog!("[chat] provider={} model={}", config.provider, config.model);
let is_claude_code = config.provider == "claude-code";
let is_claude = !is_claude_code && config.model.starts_with("claude-");

View File

@@ -1,3 +1,4 @@
use crate::slog;
use portable_pty::{CommandBuilder, PtySize, native_pty_system};
use std::io::{BufRead, BufReader, Write};
use std::sync::Arc;
@@ -152,7 +153,7 @@ fn run_pty_session(
// Allow nested spawning when the server itself runs inside Claude Code
cmd.env("CLAUDECODE", "");
eprintln!(
slog!(
"[pty-debug] Spawning: claude -p \"{}\" {} --output-format stream-json --verbose",
user_message,
resume_session_id
@@ -165,7 +166,7 @@ fn run_pty_session(
.spawn_command(cmd)
.map_err(|e| format!("Failed to spawn claude: {e}"))?;
eprintln!(
slog!(
"[pty-debug] Process spawned, pid: {:?}",
child.process_id()
);
@@ -187,23 +188,23 @@ fn run_pty_session(
std::thread::spawn(move || {
let buf_reader = BufReader::new(reader);
eprintln!("[pty-debug] Reader thread started");
slog!("[pty-debug] Reader thread started");
for line in buf_reader.lines() {
match line {
Ok(l) => {
eprintln!("[pty-debug] raw line: {}", l);
slog!("[pty-debug] raw line: {}", l);
if line_tx.send(Some(l)).is_err() {
break;
}
}
Err(e) => {
eprintln!("[pty-debug] read error: {e}");
slog!("[pty-debug] read error: {e}");
let _ = line_tx.send(None);
break;
}
}
}
eprintln!("[pty-debug] Reader thread done");
slog!("[pty-debug] Reader thread done");
let _ = line_tx.send(None);
});
@@ -223,7 +224,7 @@ fn run_pty_session(
continue;
}
eprintln!(
slog!(
"[pty-debug] processing: {}...",
&trimmed[..trimmed.len().min(120)]
);

136
server/src/log_buffer.rs Normal file
View File

@@ -0,0 +1,136 @@
//! Bounded in-memory ring buffer for server log output.
//!
//! Use the [`slog!`] macro as a drop-in replacement for `eprintln!`. It writes
//! to stderr (same as before) and simultaneously appends the line to the global
//! ring buffer, making it retrievable via the `get_server_logs` MCP tool.
use std::collections::VecDeque;
use std::sync::{Mutex, OnceLock};
const CAPACITY: usize = 1000;
pub struct LogBuffer {
lines: Mutex<VecDeque<String>>,
}
impl LogBuffer {
fn new() -> Self {
Self {
lines: Mutex::new(VecDeque::with_capacity(CAPACITY)),
}
}
/// Append a log line, evicting the oldest entry when at capacity.
pub fn push(&self, line: String) {
if let Ok(mut buf) = self.lines.lock() {
if buf.len() >= CAPACITY {
buf.pop_front();
}
buf.push_back(line);
}
}
/// Return up to `count` recent lines, optionally filtered by a substring.
/// Lines are returned in chronological order (oldest first).
pub fn get_recent(&self, count: usize, filter: Option<&str>) -> Vec<String> {
let buf = match self.lines.lock() {
Ok(b) => b,
Err(_) => return vec![],
};
let filtered: Vec<&String> = buf
.iter()
.filter(|line| filter.is_none_or(|f| line.contains(f)))
.collect();
let start = filtered.len().saturating_sub(count);
filtered[start..].iter().map(|s| (*s).clone()).collect()
}
}
static GLOBAL: OnceLock<LogBuffer> = OnceLock::new();
/// Access the process-wide log ring buffer.
pub fn global() -> &'static LogBuffer {
GLOBAL.get_or_init(LogBuffer::new)
}
/// Write a formatted message to stderr **and** capture it in the ring buffer.
///
/// Usage is identical to `eprintln!`:
/// ```ignore
/// slog!("agent {} started", name);
/// ```
#[macro_export]
macro_rules! slog {
($($arg:tt)*) => {{
let _line = format!($($arg)*);
eprintln!("{}", _line);
$crate::log_buffer::global().push(_line);
}};
}
#[cfg(test)]
mod tests {
use super::*;
fn fresh_buffer() -> LogBuffer {
LogBuffer::new()
}
#[test]
fn push_and_retrieve() {
let buf = fresh_buffer();
buf.push("line one".into());
buf.push("line two".into());
let recent = buf.get_recent(10, None);
assert_eq!(recent, vec!["line one", "line two"]);
}
#[test]
fn evicts_oldest_at_capacity() {
let buf = LogBuffer {
lines: Mutex::new(VecDeque::with_capacity(CAPACITY)),
};
// Fill past capacity
for i in 0..=CAPACITY {
buf.push(format!("line {i}"));
}
let recent = buf.get_recent(CAPACITY + 1, None);
// Should have exactly CAPACITY lines
assert_eq!(recent.len(), CAPACITY);
// The oldest (line 0) should have been evicted
assert!(!recent.iter().any(|l| l == "line 0"));
// The newest should be present
assert!(recent.iter().any(|l| l == &format!("line {CAPACITY}")));
}
#[test]
fn filter_by_substring() {
let buf = fresh_buffer();
buf.push("watcher started".into());
buf.push("mcp call received".into());
buf.push("watcher event".into());
let filtered = buf.get_recent(100, Some("watcher"));
assert_eq!(filtered.len(), 2);
assert_eq!(filtered[0], "watcher started");
assert_eq!(filtered[1], "watcher event");
}
#[test]
fn count_limits_results() {
let buf = fresh_buffer();
for i in 0..10 {
buf.push(format!("line {i}"));
}
let recent = buf.get_recent(3, None);
assert_eq!(recent.len(), 3);
// Most recent 3
assert_eq!(recent, vec!["line 7", "line 8", "line 9"]);
}
#[test]
fn empty_buffer_returns_empty() {
let buf = fresh_buffer();
assert!(buf.get_recent(10, None).is_empty());
}
}

View File

@@ -4,6 +4,7 @@ mod config;
mod http;
mod io;
mod llm;
pub mod log_buffer;
mod state;
mod store;
mod workflow;
@@ -40,7 +41,7 @@ async fn main() -> Result<(), std::io::Error> {
)
.await
.unwrap_or_else(|e| {
eprintln!("Warning: failed to auto-open project at {project_root:?}: {e}");
slog!("Warning: failed to auto-open project at {project_root:?}: {e}");
project_root.to_string_lossy().to_string()
});
@@ -85,11 +86,11 @@ async fn main() -> Result<(), std::io::Error> {
// 2. Auto-assign free agents to remaining unassigned work in the pipeline.
if let Some(root) = startup_root {
tokio::spawn(async move {
eprintln!(
slog!(
"[startup] Reconciling completed worktrees from previous session."
);
startup_agents.reconcile_on_startup(&root).await;
eprintln!(
slog!(
"[auto-assign] Scanning pipeline stages for unassigned work."
);
startup_agents.auto_assign_available_work(&root).await;

View File

@@ -1,3 +1,4 @@
use crate::slog;
use crate::config::ProjectConfig;
use std::path::{Path, PathBuf};
use std::process::Command;
@@ -245,7 +246,7 @@ fn remove_worktree_sync(
if !output.status.success() {
let stderr = String::from_utf8_lossy(&output.stderr);
eprintln!("[worktree] remove warning: {stderr}");
slog!("[worktree] remove warning: {stderr}");
}
// Delete branch (best effort)
@@ -273,7 +274,7 @@ async fn run_teardown_commands(wt_path: &Path, config: &ProjectConfig) -> Result
for cmd in &component.teardown {
// Best effort — don't fail teardown
if let Err(e) = run_shell_command(cmd, &cmd_dir).await {
eprintln!("[worktree] teardown warning for {}: {e}", component.name);
slog!("[worktree] teardown warning for {}: {e}", component.name);
}
}
}
@@ -285,7 +286,7 @@ async fn run_shell_command(cmd: &str, cwd: &Path) -> Result<(), String> {
let cwd = cwd.to_path_buf();
tokio::task::spawn_blocking(move || {
eprintln!("[worktree] Running: {cmd} in {}", cwd.display());
slog!("[worktree] Running: {cmd} in {}", cwd.display());
let output = Command::new("sh")
.args(["-c", &cmd])
.current_dir(&cwd)