2026-02-23 20:52:06 +00:00
|
|
|
mod agent_log;
|
2026-02-19 15:25:22 +00:00
|
|
|
mod agents;
|
2026-02-19 17:58:53 +00:00
|
|
|
mod config;
|
2026-02-16 16:24:21 +00:00
|
|
|
mod http;
|
|
|
|
|
mod io;
|
2026-02-13 12:31:36 +00:00
|
|
|
mod llm;
|
feat(story-93): expose server logs to agents via get_server_logs MCP tool
- Add log_buffer module: bounded 1000-line ring buffer with push/get_recent API
- Add slog! macro: drop-in for eprintln! that also captures to ring buffer
- Replace all eprintln! calls across agents, watcher, search, chat, worktree, claude_code with slog!
- Add get_server_logs MCP tool: accepts count (1-500) and optional filter params
- 5 unit tests for log_buffer covering push/retrieve, eviction, filtering, count limits, empty buffer
- 262 tests passing, clippy clean
Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
2026-02-23 20:38:19 +00:00
|
|
|
pub mod log_buffer;
|
2026-02-13 12:31:36 +00:00
|
|
|
mod state;
|
|
|
|
|
mod store;
|
2026-02-19 12:54:04 +00:00
|
|
|
mod workflow;
|
2026-02-19 17:58:53 +00:00
|
|
|
mod worktree;
|
2026-02-13 12:31:36 +00:00
|
|
|
|
2026-02-19 15:25:22 +00:00
|
|
|
use crate::agents::AgentPool;
|
2026-02-16 16:24:21 +00:00
|
|
|
use crate::http::build_routes;
|
|
|
|
|
use crate::http::context::AppContext;
|
2026-02-23 11:39:22 +00:00
|
|
|
use crate::http::{remove_port_file, resolve_port, write_port_file};
|
|
|
|
|
use crate::io::fs::find_story_kit_root;
|
2026-02-13 12:31:36 +00:00
|
|
|
use crate::state::SessionState;
|
|
|
|
|
use crate::store::JsonFileStore;
|
2026-02-19 12:54:04 +00:00
|
|
|
use crate::workflow::WorkflowState;
|
2026-02-16 16:24:21 +00:00
|
|
|
use poem::Server;
|
|
|
|
|
use poem::listener::TcpListener;
|
2026-02-23 11:39:22 +00:00
|
|
|
use std::path::PathBuf;
|
2026-02-13 12:31:36 +00:00
|
|
|
use std::sync::Arc;
|
2026-02-20 19:39:19 +00:00
|
|
|
use tokio::sync::broadcast;
|
2026-02-13 12:31:36 +00:00
|
|
|
|
|
|
|
|
#[tokio::main]
|
|
|
|
|
async fn main() -> Result<(), std::io::Error> {
|
|
|
|
|
let app_state = Arc::new(SessionState::default());
|
Accept story 34: Per-Project Agent Configuration and Role Definitions
Replace single [agent] config with multi-agent [[agent]] roster system.
Each agent has name, role, model, allowed_tools, max_turns, max_budget_usd,
and system_prompt fields that map to Claude CLI flags at spawn time.
- AgentConfig expanded with structured fields, validated at startup (panics
on duplicate names, empty names, non-positive budgets/turns)
- Backwards-compatible: legacy [agent] format auto-wraps with deprecation warning
- AgentPool uses composite "story_id:agent_name" keys for concurrent agents
- agent_name added to AgentEvent variants, AgentInfo, start/stop/subscribe APIs
- GET /agents/config returns roster, POST /agents/config/reload hot-reloads
- POST /agents/start accepts optional agent_name, /agents/stop requires it
- SSE route updated to /agents/:story_id/:agent_name/stream
- Frontend: roster badges, agent selector dropdown, composite-key state
- Project root initialized to cwd at startup so config endpoints work immediately
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-19 18:46:14 +00:00
|
|
|
let cwd = std::env::current_dir().unwrap_or_else(|_| PathBuf::from("."));
|
2026-02-13 12:31:36 +00:00
|
|
|
let store = Arc::new(
|
|
|
|
|
JsonFileStore::from_path(PathBuf::from("store.json")).map_err(std::io::Error::other)?,
|
|
|
|
|
);
|
2026-02-20 14:11:53 +00:00
|
|
|
|
|
|
|
|
// Auto-detect a .story_kit/ project in cwd or parent directories.
|
|
|
|
|
if let Some(project_root) = find_story_kit_root(&cwd) {
|
|
|
|
|
io::fs::open_project(
|
|
|
|
|
project_root.to_string_lossy().to_string(),
|
|
|
|
|
&app_state,
|
|
|
|
|
store.as_ref(),
|
|
|
|
|
)
|
|
|
|
|
.await
|
|
|
|
|
.unwrap_or_else(|e| {
|
feat(story-93): expose server logs to agents via get_server_logs MCP tool
- Add log_buffer module: bounded 1000-line ring buffer with push/get_recent API
- Add slog! macro: drop-in for eprintln! that also captures to ring buffer
- Replace all eprintln! calls across agents, watcher, search, chat, worktree, claude_code with slog!
- Add get_server_logs MCP tool: accepts count (1-500) and optional filter params
- 5 unit tests for log_buffer covering push/retrieve, eviction, filtering, count limits, empty buffer
- 262 tests passing, clippy clean
Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
2026-02-23 20:38:19 +00:00
|
|
|
slog!("Warning: failed to auto-open project at {project_root:?}: {e}");
|
2026-02-20 14:11:53 +00:00
|
|
|
project_root.to_string_lossy().to_string()
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
// Validate agent config for the detected project root.
|
|
|
|
|
config::ProjectConfig::load(&project_root)
|
|
|
|
|
.unwrap_or_else(|e| panic!("Invalid project.toml: {e}"));
|
|
|
|
|
} else {
|
|
|
|
|
// No .story_kit/ found — fall back to cwd so existing behaviour is preserved.
|
|
|
|
|
*app_state.project_root.lock().unwrap() = Some(cwd.clone());
|
|
|
|
|
}
|
|
|
|
|
|
2026-02-19 12:54:04 +00:00
|
|
|
let workflow = Arc::new(std::sync::Mutex::new(WorkflowState::default()));
|
2026-02-20 13:24:35 +00:00
|
|
|
let port = resolve_port();
|
2026-02-24 23:09:13 +00:00
|
|
|
|
|
|
|
|
// Filesystem watcher: broadcast channel for work/ pipeline changes.
|
|
|
|
|
// Created before AgentPool so the pool can emit AgentStateChanged events.
|
|
|
|
|
let (watcher_tx, _) = broadcast::channel::<io::watcher::WatcherEvent>(1024);
|
|
|
|
|
let agents = Arc::new(AgentPool::new(port, watcher_tx.clone()));
|
2026-02-13 12:31:36 +00:00
|
|
|
|
2026-02-24 13:13:16 +00:00
|
|
|
// Start the background watchdog that detects and cleans up orphaned Running agents.
|
2026-02-24 17:28:45 +00:00
|
|
|
// When orphans are found, auto-assign is triggered to reassign free agents.
|
|
|
|
|
let watchdog_root: Option<PathBuf> = app_state.project_root.lock().unwrap().clone();
|
|
|
|
|
AgentPool::spawn_watchdog(Arc::clone(&agents), watchdog_root);
|
2026-02-20 19:39:19 +00:00
|
|
|
if let Some(ref root) = *app_state.project_root.lock().unwrap() {
|
|
|
|
|
let work_dir = root.join(".story_kit").join("work");
|
|
|
|
|
if work_dir.is_dir() {
|
|
|
|
|
io::watcher::start_watcher(work_dir, root.clone(), watcher_tx.clone());
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2026-02-24 17:28:45 +00:00
|
|
|
// Subscribe to watcher events so that auto-assign triggers when a work item
|
|
|
|
|
// file is moved into an active pipeline stage (2_current/, 3_qa/, 4_merge/).
|
|
|
|
|
{
|
|
|
|
|
let watcher_auto_rx = watcher_tx.subscribe();
|
|
|
|
|
let watcher_auto_agents = Arc::clone(&agents);
|
|
|
|
|
let watcher_auto_root: Option<PathBuf> =
|
|
|
|
|
app_state.project_root.lock().unwrap().clone();
|
|
|
|
|
if let Some(root) = watcher_auto_root {
|
|
|
|
|
tokio::spawn(async move {
|
|
|
|
|
let mut rx = watcher_auto_rx;
|
|
|
|
|
while let Ok(event) = rx.recv().await {
|
|
|
|
|
if let io::watcher::WatcherEvent::WorkItem { ref stage, .. } = event
|
|
|
|
|
&& matches!(stage.as_str(), "2_current" | "3_qa" | "4_merge")
|
|
|
|
|
{
|
|
|
|
|
slog!(
|
|
|
|
|
"[auto-assign] Watcher detected work item in {stage}/; \
|
|
|
|
|
triggering auto-assign."
|
|
|
|
|
);
|
|
|
|
|
watcher_auto_agents
|
|
|
|
|
.auto_assign_available_work(&root)
|
|
|
|
|
.await;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
});
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2026-02-23 22:50:57 +00:00
|
|
|
// Reconciliation progress channel: startup reconciliation → WebSocket clients.
|
|
|
|
|
let (reconciliation_tx, _) =
|
|
|
|
|
broadcast::channel::<agents::ReconciliationEvent>(64);
|
|
|
|
|
|
2026-02-23 21:38:45 +00:00
|
|
|
// Permission channel: MCP prompt_permission → WebSocket handler.
|
|
|
|
|
let (perm_tx, perm_rx) = tokio::sync::mpsc::unbounded_channel();
|
|
|
|
|
|
2026-02-23 22:50:57 +00:00
|
|
|
// Capture project root, agents Arc, and reconciliation sender before ctx
|
|
|
|
|
// is consumed by build_routes.
|
2026-02-23 18:20:24 +00:00
|
|
|
let startup_root: Option<PathBuf> = app_state.project_root.lock().unwrap().clone();
|
|
|
|
|
let startup_agents = Arc::clone(&agents);
|
2026-02-23 22:50:57 +00:00
|
|
|
let startup_reconciliation_tx = reconciliation_tx.clone();
|
2026-02-24 17:56:40 +00:00
|
|
|
// Clone for shutdown cleanup — kill orphaned PTY children before exiting.
|
|
|
|
|
let agents_for_shutdown = Arc::clone(&agents);
|
2026-02-23 18:20:24 +00:00
|
|
|
|
2026-02-13 12:31:36 +00:00
|
|
|
let ctx = AppContext {
|
|
|
|
|
state: app_state,
|
|
|
|
|
store,
|
2026-02-19 12:54:04 +00:00
|
|
|
workflow,
|
2026-02-19 15:25:22 +00:00
|
|
|
agents,
|
2026-02-20 19:39:19 +00:00
|
|
|
watcher_tx,
|
2026-02-23 22:50:57 +00:00
|
|
|
reconciliation_tx,
|
2026-02-23 21:38:45 +00:00
|
|
|
perm_tx,
|
|
|
|
|
perm_rx: Arc::new(tokio::sync::Mutex::new(perm_rx)),
|
2026-02-13 12:31:36 +00:00
|
|
|
};
|
|
|
|
|
|
2026-02-16 16:24:21 +00:00
|
|
|
let app = build_routes(ctx);
|
2026-02-23 18:20:24 +00:00
|
|
|
|
2026-02-24 13:20:59 +00:00
|
|
|
// Background reaper: periodically remove completed/failed agent entries
|
|
|
|
|
// that have exceeded the TTL.
|
|
|
|
|
{
|
|
|
|
|
let reaper_agents = Arc::clone(&startup_agents);
|
|
|
|
|
let ttl = std::time::Duration::from_secs(agents::DEFAULT_AGENT_TTL_SECS);
|
|
|
|
|
tokio::spawn(async move {
|
|
|
|
|
// Check every 5 minutes.
|
|
|
|
|
let interval = std::time::Duration::from_secs(300);
|
|
|
|
|
loop {
|
|
|
|
|
tokio::time::sleep(interval).await;
|
|
|
|
|
reaper_agents.reap_expired_agents(ttl);
|
|
|
|
|
}
|
|
|
|
|
});
|
|
|
|
|
}
|
|
|
|
|
|
2026-02-23 20:38:17 +00:00
|
|
|
// On startup:
|
|
|
|
|
// 1. Reconcile any stories whose agent work was committed while the server was
|
|
|
|
|
// offline (worktree has commits ahead of master but pipeline didn't advance).
|
|
|
|
|
// 2. Auto-assign free agents to remaining unassigned work in the pipeline.
|
2026-02-23 18:20:24 +00:00
|
|
|
if let Some(root) = startup_root {
|
|
|
|
|
tokio::spawn(async move {
|
2026-02-23 20:53:37 +00:00
|
|
|
slog!(
|
2026-02-23 20:38:17 +00:00
|
|
|
"[startup] Reconciling completed worktrees from previous session."
|
|
|
|
|
);
|
2026-02-23 22:50:57 +00:00
|
|
|
startup_agents
|
|
|
|
|
.reconcile_on_startup(&root, &startup_reconciliation_tx)
|
|
|
|
|
.await;
|
2026-02-23 20:53:37 +00:00
|
|
|
slog!(
|
2026-02-23 20:38:17 +00:00
|
|
|
"[auto-assign] Scanning pipeline stages for unassigned work."
|
|
|
|
|
);
|
2026-02-23 18:20:24 +00:00
|
|
|
startup_agents.auto_assign_available_work(&root).await;
|
|
|
|
|
});
|
|
|
|
|
}
|
2026-02-19 17:14:33 +00:00
|
|
|
let addr = format!("127.0.0.1:{port}");
|
|
|
|
|
|
2026-02-16 17:10:23 +00:00
|
|
|
println!(
|
|
|
|
|
"\x1b[95;1m ____ _ _ ___ _ \n / ___|| |_ ___ _ __| | _|_ _| |_ \n \\___ \\| __/ _ \\| '__| |/ /| || __|\n ___) | || (_) | | | < | || |_ \n |____/ \\__\\___/|_| |_|\\_\\___|\\__|\n\x1b[0m"
|
|
|
|
|
);
|
2026-02-19 17:14:33 +00:00
|
|
|
println!("STORYKIT_PORT={port}");
|
|
|
|
|
println!("\x1b[96;1mFrontend:\x1b[0m \x1b[94mhttp://{addr}\x1b[0m");
|
|
|
|
|
println!("\x1b[92;1mOpenAPI Docs:\x1b[0m \x1b[94mhttp://{addr}/docs\x1b[0m");
|
|
|
|
|
|
|
|
|
|
let port_file = write_port_file(&cwd, port);
|
|
|
|
|
|
|
|
|
|
let result = Server::new(TcpListener::bind(&addr)).run(app).await;
|
|
|
|
|
|
2026-02-24 17:56:40 +00:00
|
|
|
// Kill all active PTY child processes before exiting to prevent orphaned
|
|
|
|
|
// Claude Code processes from running after the server restarts.
|
|
|
|
|
agents_for_shutdown.kill_all_children();
|
|
|
|
|
|
2026-02-19 17:14:33 +00:00
|
|
|
if let Some(ref path) = port_file {
|
|
|
|
|
remove_port_file(path);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
result
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[cfg(test)]
|
|
|
|
|
mod tests {
|
|
|
|
|
use super::*;
|
|
|
|
|
|
Accept story 34: Per-Project Agent Configuration and Role Definitions
Replace single [agent] config with multi-agent [[agent]] roster system.
Each agent has name, role, model, allowed_tools, max_turns, max_budget_usd,
and system_prompt fields that map to Claude CLI flags at spawn time.
- AgentConfig expanded with structured fields, validated at startup (panics
on duplicate names, empty names, non-positive budgets/turns)
- Backwards-compatible: legacy [agent] format auto-wraps with deprecation warning
- AgentPool uses composite "story_id:agent_name" keys for concurrent agents
- agent_name added to AgentEvent variants, AgentInfo, start/stop/subscribe APIs
- GET /agents/config returns roster, POST /agents/config/reload hot-reloads
- POST /agents/start accepts optional agent_name, /agents/stop requires it
- SSE route updated to /agents/:story_id/:agent_name/stream
- Frontend: roster badges, agent selector dropdown, composite-key state
- Project root initialized to cwd at startup so config endpoints work immediately
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-19 18:46:14 +00:00
|
|
|
#[test]
|
|
|
|
|
#[should_panic(expected = "Invalid project.toml: Duplicate agent name")]
|
|
|
|
|
fn panics_on_duplicate_agent_names() {
|
|
|
|
|
let tmp = tempfile::tempdir().unwrap();
|
|
|
|
|
let sk = tmp.path().join(".story_kit");
|
|
|
|
|
std::fs::create_dir_all(&sk).unwrap();
|
|
|
|
|
std::fs::write(
|
|
|
|
|
sk.join("project.toml"),
|
|
|
|
|
r#"
|
|
|
|
|
[[agent]]
|
|
|
|
|
name = "coder"
|
|
|
|
|
|
|
|
|
|
[[agent]]
|
|
|
|
|
name = "coder"
|
|
|
|
|
"#,
|
|
|
|
|
)
|
|
|
|
|
.unwrap();
|
|
|
|
|
|
|
|
|
|
config::ProjectConfig::load(tmp.path())
|
|
|
|
|
.unwrap_or_else(|e| panic!("Invalid project.toml: {e}"));
|
|
|
|
|
}
|
2026-02-13 12:31:36 +00:00
|
|
|
}
|