2026-02-23 20:52:06 +00:00
|
|
|
use crate::agent_log::AgentLogWriter;
|
2026-02-24 23:09:13 +00:00
|
|
|
use crate::io::watcher::WatcherEvent;
|
feat(story-93): expose server logs to agents via get_server_logs MCP tool
- Add log_buffer module: bounded 1000-line ring buffer with push/get_recent API
- Add slog! macro: drop-in for eprintln! that also captures to ring buffer
- Replace all eprintln! calls across agents, watcher, search, chat, worktree, claude_code with slog!
- Add get_server_logs MCP tool: accepts count (1-500) and optional filter params
- 5 unit tests for log_buffer covering push/retrieve, eviction, filtering, count limits, empty buffer
- 262 tests passing, clippy clean
Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
2026-02-23 20:38:19 +00:00
|
|
|
use crate::slog;
|
2026-02-24 13:48:25 +00:00
|
|
|
use crate::slog_error;
|
|
|
|
|
use crate::slog_warn;
|
2026-02-24 15:50:34 +00:00
|
|
|
use crate::config::{AgentConfig, ProjectConfig};
|
2026-02-19 17:58:53 +00:00
|
|
|
use crate::worktree::{self, WorktreeInfo};
|
2026-02-24 17:56:40 +00:00
|
|
|
use portable_pty::{ChildKiller, CommandBuilder, PtySize, native_pty_system};
|
2026-02-19 17:58:53 +00:00
|
|
|
use serde::Serialize;
|
2026-02-19 15:25:22 +00:00
|
|
|
use std::collections::HashMap;
|
|
|
|
|
use std::io::{BufRead, BufReader};
|
2026-02-19 17:58:53 +00:00
|
|
|
use std::path::{Path, PathBuf};
|
2026-02-20 15:02:34 +00:00
|
|
|
use std::process::Command;
|
2026-02-19 17:58:53 +00:00
|
|
|
use std::sync::{Arc, Mutex};
|
|
|
|
|
use tokio::sync::broadcast;
|
|
|
|
|
|
2026-02-23 22:50:57 +00:00
|
|
|
/// Events emitted during server startup reconciliation to broadcast real-time
|
|
|
|
|
/// progress to connected WebSocket clients.
|
|
|
|
|
#[derive(Debug, Clone, Serialize)]
|
|
|
|
|
pub struct ReconciliationEvent {
|
|
|
|
|
/// The story being reconciled, or empty string for the overall "done" event.
|
|
|
|
|
pub story_id: String,
|
|
|
|
|
/// Coarse status: "checking", "gates_running", "advanced", "skipped", "failed", "done"
|
|
|
|
|
pub status: String,
|
|
|
|
|
/// Human-readable details.
|
|
|
|
|
pub message: String,
|
|
|
|
|
}
|
|
|
|
|
|
Accept story 34: Per-Project Agent Configuration and Role Definitions
Replace single [agent] config with multi-agent [[agent]] roster system.
Each agent has name, role, model, allowed_tools, max_turns, max_budget_usd,
and system_prompt fields that map to Claude CLI flags at spawn time.
- AgentConfig expanded with structured fields, validated at startup (panics
on duplicate names, empty names, non-positive budgets/turns)
- Backwards-compatible: legacy [agent] format auto-wraps with deprecation warning
- AgentPool uses composite "story_id:agent_name" keys for concurrent agents
- agent_name added to AgentEvent variants, AgentInfo, start/stop/subscribe APIs
- GET /agents/config returns roster, POST /agents/config/reload hot-reloads
- POST /agents/start accepts optional agent_name, /agents/stop requires it
- SSE route updated to /agents/:story_id/:agent_name/stream
- Frontend: roster badges, agent selector dropdown, composite-key state
- Project root initialized to cwd at startup so config endpoints work immediately
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-19 18:46:14 +00:00
|
|
|
/// Build the composite key used to track agents in the pool.
|
|
|
|
|
fn composite_key(story_id: &str, agent_name: &str) -> String {
|
|
|
|
|
format!("{story_id}:{agent_name}")
|
|
|
|
|
}
|
|
|
|
|
|
2026-02-23 22:53:23 +00:00
|
|
|
/// RAII guard that removes a pending agent entry from the pool on drop.
|
|
|
|
|
///
|
|
|
|
|
/// Created after inserting a `Pending` entry into the agent HashMap.
|
|
|
|
|
/// If `start_agent` succeeds (the agent process is spawned and status
|
|
|
|
|
/// transitions to `Running`), call [`disarm`](Self::disarm) to prevent
|
|
|
|
|
/// cleanup. If any intermediate step fails and the guard is dropped
|
|
|
|
|
/// without being disarmed, the pending entry is removed so it cannot
|
|
|
|
|
/// block future auto-assign dispatches.
|
|
|
|
|
struct PendingGuard {
|
|
|
|
|
agents: Arc<Mutex<HashMap<String, StoryAgent>>>,
|
|
|
|
|
key: String,
|
|
|
|
|
armed: bool,
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
impl PendingGuard {
|
|
|
|
|
fn new(agents: Arc<Mutex<HashMap<String, StoryAgent>>>, key: String) -> Self {
|
|
|
|
|
Self {
|
|
|
|
|
agents,
|
|
|
|
|
key,
|
|
|
|
|
armed: true,
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Prevent the guard from cleaning up the entry (call after
|
|
|
|
|
/// successful spawn).
|
|
|
|
|
fn disarm(&mut self) {
|
|
|
|
|
self.armed = false;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
impl Drop for PendingGuard {
|
|
|
|
|
fn drop(&mut self) {
|
|
|
|
|
if self.armed
|
|
|
|
|
&& let Ok(mut agents) = self.agents.lock()
|
|
|
|
|
&& agents
|
|
|
|
|
.get(&self.key)
|
|
|
|
|
.is_some_and(|a| a.status == AgentStatus::Pending)
|
|
|
|
|
{
|
|
|
|
|
agents.remove(&self.key);
|
|
|
|
|
slog!(
|
|
|
|
|
"[agents] Cleaned up leaked Pending entry for '{}'",
|
|
|
|
|
self.key
|
|
|
|
|
);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2026-02-19 17:58:53 +00:00
|
|
|
/// Events streamed from a running agent to SSE clients.
|
|
|
|
|
#[derive(Debug, Clone, Serialize)]
|
|
|
|
|
#[serde(tag = "type", rename_all = "snake_case")]
|
|
|
|
|
pub enum AgentEvent {
|
|
|
|
|
/// Agent status changed.
|
Accept story 34: Per-Project Agent Configuration and Role Definitions
Replace single [agent] config with multi-agent [[agent]] roster system.
Each agent has name, role, model, allowed_tools, max_turns, max_budget_usd,
and system_prompt fields that map to Claude CLI flags at spawn time.
- AgentConfig expanded with structured fields, validated at startup (panics
on duplicate names, empty names, non-positive budgets/turns)
- Backwards-compatible: legacy [agent] format auto-wraps with deprecation warning
- AgentPool uses composite "story_id:agent_name" keys for concurrent agents
- agent_name added to AgentEvent variants, AgentInfo, start/stop/subscribe APIs
- GET /agents/config returns roster, POST /agents/config/reload hot-reloads
- POST /agents/start accepts optional agent_name, /agents/stop requires it
- SSE route updated to /agents/:story_id/:agent_name/stream
- Frontend: roster badges, agent selector dropdown, composite-key state
- Project root initialized to cwd at startup so config endpoints work immediately
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-19 18:46:14 +00:00
|
|
|
Status {
|
|
|
|
|
story_id: String,
|
|
|
|
|
agent_name: String,
|
|
|
|
|
status: String,
|
|
|
|
|
},
|
2026-02-19 17:58:53 +00:00
|
|
|
/// Raw text output from the agent process.
|
Accept story 34: Per-Project Agent Configuration and Role Definitions
Replace single [agent] config with multi-agent [[agent]] roster system.
Each agent has name, role, model, allowed_tools, max_turns, max_budget_usd,
and system_prompt fields that map to Claude CLI flags at spawn time.
- AgentConfig expanded with structured fields, validated at startup (panics
on duplicate names, empty names, non-positive budgets/turns)
- Backwards-compatible: legacy [agent] format auto-wraps with deprecation warning
- AgentPool uses composite "story_id:agent_name" keys for concurrent agents
- agent_name added to AgentEvent variants, AgentInfo, start/stop/subscribe APIs
- GET /agents/config returns roster, POST /agents/config/reload hot-reloads
- POST /agents/start accepts optional agent_name, /agents/stop requires it
- SSE route updated to /agents/:story_id/:agent_name/stream
- Frontend: roster badges, agent selector dropdown, composite-key state
- Project root initialized to cwd at startup so config endpoints work immediately
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-19 18:46:14 +00:00
|
|
|
Output {
|
|
|
|
|
story_id: String,
|
|
|
|
|
agent_name: String,
|
|
|
|
|
text: String,
|
|
|
|
|
},
|
2026-02-19 17:58:53 +00:00
|
|
|
/// Agent produced a JSON event from `--output-format stream-json`.
|
Accept story 34: Per-Project Agent Configuration and Role Definitions
Replace single [agent] config with multi-agent [[agent]] roster system.
Each agent has name, role, model, allowed_tools, max_turns, max_budget_usd,
and system_prompt fields that map to Claude CLI flags at spawn time.
- AgentConfig expanded with structured fields, validated at startup (panics
on duplicate names, empty names, non-positive budgets/turns)
- Backwards-compatible: legacy [agent] format auto-wraps with deprecation warning
- AgentPool uses composite "story_id:agent_name" keys for concurrent agents
- agent_name added to AgentEvent variants, AgentInfo, start/stop/subscribe APIs
- GET /agents/config returns roster, POST /agents/config/reload hot-reloads
- POST /agents/start accepts optional agent_name, /agents/stop requires it
- SSE route updated to /agents/:story_id/:agent_name/stream
- Frontend: roster badges, agent selector dropdown, composite-key state
- Project root initialized to cwd at startup so config endpoints work immediately
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-19 18:46:14 +00:00
|
|
|
AgentJson {
|
|
|
|
|
story_id: String,
|
|
|
|
|
agent_name: String,
|
|
|
|
|
data: serde_json::Value,
|
|
|
|
|
},
|
2026-02-19 17:58:53 +00:00
|
|
|
/// Agent finished.
|
|
|
|
|
Done {
|
|
|
|
|
story_id: String,
|
Accept story 34: Per-Project Agent Configuration and Role Definitions
Replace single [agent] config with multi-agent [[agent]] roster system.
Each agent has name, role, model, allowed_tools, max_turns, max_budget_usd,
and system_prompt fields that map to Claude CLI flags at spawn time.
- AgentConfig expanded with structured fields, validated at startup (panics
on duplicate names, empty names, non-positive budgets/turns)
- Backwards-compatible: legacy [agent] format auto-wraps with deprecation warning
- AgentPool uses composite "story_id:agent_name" keys for concurrent agents
- agent_name added to AgentEvent variants, AgentInfo, start/stop/subscribe APIs
- GET /agents/config returns roster, POST /agents/config/reload hot-reloads
- POST /agents/start accepts optional agent_name, /agents/stop requires it
- SSE route updated to /agents/:story_id/:agent_name/stream
- Frontend: roster badges, agent selector dropdown, composite-key state
- Project root initialized to cwd at startup so config endpoints work immediately
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-19 18:46:14 +00:00
|
|
|
agent_name: String,
|
2026-02-19 17:58:53 +00:00
|
|
|
session_id: Option<String>,
|
|
|
|
|
},
|
|
|
|
|
/// Agent errored.
|
Accept story 34: Per-Project Agent Configuration and Role Definitions
Replace single [agent] config with multi-agent [[agent]] roster system.
Each agent has name, role, model, allowed_tools, max_turns, max_budget_usd,
and system_prompt fields that map to Claude CLI flags at spawn time.
- AgentConfig expanded with structured fields, validated at startup (panics
on duplicate names, empty names, non-positive budgets/turns)
- Backwards-compatible: legacy [agent] format auto-wraps with deprecation warning
- AgentPool uses composite "story_id:agent_name" keys for concurrent agents
- agent_name added to AgentEvent variants, AgentInfo, start/stop/subscribe APIs
- GET /agents/config returns roster, POST /agents/config/reload hot-reloads
- POST /agents/start accepts optional agent_name, /agents/stop requires it
- SSE route updated to /agents/:story_id/:agent_name/stream
- Frontend: roster badges, agent selector dropdown, composite-key state
- Project root initialized to cwd at startup so config endpoints work immediately
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-19 18:46:14 +00:00
|
|
|
Error {
|
|
|
|
|
story_id: String,
|
|
|
|
|
agent_name: String,
|
|
|
|
|
message: String,
|
|
|
|
|
},
|
2026-02-24 18:03:08 +00:00
|
|
|
/// Thinking tokens from an extended-thinking block.
|
|
|
|
|
Thinking {
|
|
|
|
|
story_id: String,
|
|
|
|
|
agent_name: String,
|
|
|
|
|
text: String,
|
|
|
|
|
},
|
2026-02-19 15:25:22 +00:00
|
|
|
}
|
|
|
|
|
|
2026-02-19 17:58:53 +00:00
|
|
|
#[derive(Debug, Clone, Serialize, PartialEq)]
|
2026-02-19 15:25:22 +00:00
|
|
|
#[serde(rename_all = "snake_case")]
|
|
|
|
|
pub enum AgentStatus {
|
2026-02-19 17:58:53 +00:00
|
|
|
Pending,
|
2026-02-19 15:25:22 +00:00
|
|
|
Running,
|
2026-02-19 17:58:53 +00:00
|
|
|
Completed,
|
|
|
|
|
Failed,
|
2026-02-19 15:25:22 +00:00
|
|
|
}
|
|
|
|
|
|
2026-02-19 17:58:53 +00:00
|
|
|
impl std::fmt::Display for AgentStatus {
|
|
|
|
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
|
|
|
|
match self {
|
|
|
|
|
Self::Pending => write!(f, "pending"),
|
|
|
|
|
Self::Running => write!(f, "running"),
|
|
|
|
|
Self::Completed => write!(f, "completed"),
|
|
|
|
|
Self::Failed => write!(f, "failed"),
|
|
|
|
|
}
|
|
|
|
|
}
|
2026-02-19 15:25:22 +00:00
|
|
|
}
|
|
|
|
|
|
2026-02-23 13:13:41 +00:00
|
|
|
/// Pipeline stages for automatic story advancement.
|
|
|
|
|
#[derive(Debug, Clone, PartialEq)]
|
|
|
|
|
pub enum PipelineStage {
|
|
|
|
|
/// Coding agents (coder-1, coder-2, etc.)
|
|
|
|
|
Coder,
|
|
|
|
|
/// QA review agent
|
|
|
|
|
Qa,
|
|
|
|
|
/// Mergemaster agent
|
|
|
|
|
Mergemaster,
|
|
|
|
|
/// Supervisors and unknown agents — no automatic advancement.
|
|
|
|
|
Other,
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Determine the pipeline stage from an agent name.
|
|
|
|
|
pub fn pipeline_stage(agent_name: &str) -> PipelineStage {
|
|
|
|
|
match agent_name {
|
|
|
|
|
"qa" => PipelineStage::Qa,
|
|
|
|
|
"mergemaster" => PipelineStage::Mergemaster,
|
|
|
|
|
name if name.starts_with("coder") => PipelineStage::Coder,
|
|
|
|
|
_ => PipelineStage::Other,
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2026-02-24 15:50:34 +00:00
|
|
|
/// Determine the pipeline stage for a configured agent.
|
|
|
|
|
///
|
|
|
|
|
/// Prefers the explicit `stage` config field (added in Bug 150) over the
|
|
|
|
|
/// legacy name-based heuristic so that agents with non-standard names
|
|
|
|
|
/// (e.g. `qa-2`, `coder-opus`) are assigned to the correct stage.
|
|
|
|
|
fn agent_config_stage(cfg: &AgentConfig) -> PipelineStage {
|
|
|
|
|
match cfg.stage.as_deref() {
|
|
|
|
|
Some("coder") => PipelineStage::Coder,
|
|
|
|
|
Some("qa") => PipelineStage::Qa,
|
|
|
|
|
Some("mergemaster") => PipelineStage::Mergemaster,
|
|
|
|
|
Some(_) => PipelineStage::Other,
|
|
|
|
|
None => pipeline_stage(&cfg.name),
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2026-02-23 15:00:10 +00:00
|
|
|
/// Completion report produced when acceptance gates are run.
|
|
|
|
|
///
|
|
|
|
|
/// Created automatically by the server when an agent process exits normally,
|
|
|
|
|
/// or via the internal `report_completion` method.
|
2026-02-20 15:02:34 +00:00
|
|
|
#[derive(Debug, Serialize, Clone)]
|
|
|
|
|
pub struct CompletionReport {
|
|
|
|
|
pub summary: String,
|
|
|
|
|
pub gates_passed: bool,
|
|
|
|
|
pub gate_output: String,
|
|
|
|
|
}
|
|
|
|
|
|
2026-02-20 13:16:04 +00:00
|
|
|
#[derive(Debug, Serialize, Clone)]
|
2026-02-19 17:58:53 +00:00
|
|
|
pub struct AgentInfo {
|
|
|
|
|
pub story_id: String,
|
Accept story 34: Per-Project Agent Configuration and Role Definitions
Replace single [agent] config with multi-agent [[agent]] roster system.
Each agent has name, role, model, allowed_tools, max_turns, max_budget_usd,
and system_prompt fields that map to Claude CLI flags at spawn time.
- AgentConfig expanded with structured fields, validated at startup (panics
on duplicate names, empty names, non-positive budgets/turns)
- Backwards-compatible: legacy [agent] format auto-wraps with deprecation warning
- AgentPool uses composite "story_id:agent_name" keys for concurrent agents
- agent_name added to AgentEvent variants, AgentInfo, start/stop/subscribe APIs
- GET /agents/config returns roster, POST /agents/config/reload hot-reloads
- POST /agents/start accepts optional agent_name, /agents/stop requires it
- SSE route updated to /agents/:story_id/:agent_name/stream
- Frontend: roster badges, agent selector dropdown, composite-key state
- Project root initialized to cwd at startup so config endpoints work immediately
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-19 18:46:14 +00:00
|
|
|
pub agent_name: String,
|
2026-02-19 17:58:53 +00:00
|
|
|
pub status: AgentStatus,
|
|
|
|
|
pub session_id: Option<String>,
|
|
|
|
|
pub worktree_path: Option<String>,
|
2026-02-20 12:48:50 +00:00
|
|
|
pub base_branch: Option<String>,
|
2026-02-20 15:02:34 +00:00
|
|
|
pub completion: Option<CompletionReport>,
|
2026-02-23 20:52:06 +00:00
|
|
|
/// UUID identifying the persistent log file for this session.
|
|
|
|
|
pub log_session_id: Option<String>,
|
2026-02-19 15:25:22 +00:00
|
|
|
}
|
|
|
|
|
|
2026-02-19 17:58:53 +00:00
|
|
|
struct StoryAgent {
|
Accept story 34: Per-Project Agent Configuration and Role Definitions
Replace single [agent] config with multi-agent [[agent]] roster system.
Each agent has name, role, model, allowed_tools, max_turns, max_budget_usd,
and system_prompt fields that map to Claude CLI flags at spawn time.
- AgentConfig expanded with structured fields, validated at startup (panics
on duplicate names, empty names, non-positive budgets/turns)
- Backwards-compatible: legacy [agent] format auto-wraps with deprecation warning
- AgentPool uses composite "story_id:agent_name" keys for concurrent agents
- agent_name added to AgentEvent variants, AgentInfo, start/stop/subscribe APIs
- GET /agents/config returns roster, POST /agents/config/reload hot-reloads
- POST /agents/start accepts optional agent_name, /agents/stop requires it
- SSE route updated to /agents/:story_id/:agent_name/stream
- Frontend: roster badges, agent selector dropdown, composite-key state
- Project root initialized to cwd at startup so config endpoints work immediately
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-19 18:46:14 +00:00
|
|
|
agent_name: String,
|
2026-02-19 17:58:53 +00:00
|
|
|
status: AgentStatus,
|
|
|
|
|
worktree_info: Option<WorktreeInfo>,
|
|
|
|
|
session_id: Option<String>,
|
|
|
|
|
tx: broadcast::Sender<AgentEvent>,
|
|
|
|
|
task_handle: Option<tokio::task::JoinHandle<()>>,
|
2026-02-20 11:57:25 +00:00
|
|
|
/// Accumulated events for polling via get_agent_output.
|
|
|
|
|
event_log: Arc<Mutex<Vec<AgentEvent>>>,
|
2026-02-20 15:02:34 +00:00
|
|
|
/// Set when the agent calls report_completion.
|
|
|
|
|
completion: Option<CompletionReport>,
|
2026-02-23 13:13:41 +00:00
|
|
|
/// Project root, stored for pipeline advancement after completion.
|
|
|
|
|
project_root: Option<PathBuf>,
|
2026-02-23 20:52:06 +00:00
|
|
|
/// UUID identifying the log file for this session.
|
|
|
|
|
log_session_id: Option<String>,
|
2026-02-26 16:12:23 +00:00
|
|
|
/// Set to `true` when the agent calls `report_merge_failure`.
|
|
|
|
|
/// Prevents the pipeline from blindly advancing to `5_done/` after a
|
|
|
|
|
/// failed merge: the server-owned gate check runs in the feature-branch
|
|
|
|
|
/// worktree (which compiles fine) and returns `gates_passed=true` even
|
|
|
|
|
/// though the code was never squash-merged onto master.
|
|
|
|
|
merge_failure_reported: bool,
|
2026-02-19 15:25:22 +00:00
|
|
|
}
|
|
|
|
|
|
2026-02-20 13:16:04 +00:00
|
|
|
/// Build an `AgentInfo` snapshot from a `StoryAgent` map entry.
|
|
|
|
|
fn agent_info_from_entry(story_id: &str, agent: &StoryAgent) -> AgentInfo {
|
|
|
|
|
AgentInfo {
|
|
|
|
|
story_id: story_id.to_string(),
|
|
|
|
|
agent_name: agent.agent_name.clone(),
|
|
|
|
|
status: agent.status.clone(),
|
|
|
|
|
session_id: agent.session_id.clone(),
|
|
|
|
|
worktree_path: agent
|
|
|
|
|
.worktree_info
|
|
|
|
|
.as_ref()
|
|
|
|
|
.map(|wt| wt.path.to_string_lossy().to_string()),
|
|
|
|
|
base_branch: agent
|
|
|
|
|
.worktree_info
|
|
|
|
|
.as_ref()
|
|
|
|
|
.map(|wt| wt.base_branch.clone()),
|
2026-02-20 15:02:34 +00:00
|
|
|
completion: agent.completion.clone(),
|
2026-02-23 20:52:06 +00:00
|
|
|
log_session_id: agent.log_session_id.clone(),
|
2026-02-20 13:16:04 +00:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2026-02-19 17:58:53 +00:00
|
|
|
/// Manages concurrent story agents, each in its own worktree.
|
|
|
|
|
pub struct AgentPool {
|
|
|
|
|
agents: Arc<Mutex<HashMap<String, StoryAgent>>>,
|
2026-02-20 13:24:35 +00:00
|
|
|
port: u16,
|
2026-02-24 17:56:40 +00:00
|
|
|
/// Registry of active PTY child process killers, keyed by "{story_id}:{agent_name}".
|
|
|
|
|
/// Used to terminate child processes on server shutdown or agent stop, preventing
|
|
|
|
|
/// orphaned Claude Code processes from running after the server exits.
|
|
|
|
|
child_killers: Arc<Mutex<HashMap<String, Box<dyn ChildKiller + Send + Sync>>>>,
|
2026-02-24 23:09:13 +00:00
|
|
|
/// Broadcast channel for notifying WebSocket clients of agent state changes.
|
|
|
|
|
/// When an agent transitions state (Pending, Running, Completed, Failed, Stopped),
|
|
|
|
|
/// an `AgentStateChanged` event is emitted so the frontend can refresh the
|
|
|
|
|
/// pipeline board without waiting for a filesystem event.
|
|
|
|
|
watcher_tx: broadcast::Sender<WatcherEvent>,
|
2026-02-24 17:56:40 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// RAII guard that removes a child killer from the registry on drop.
|
|
|
|
|
///
|
|
|
|
|
/// This ensures the killer is always cleaned up when `run_agent_pty_blocking`
|
|
|
|
|
/// returns, regardless of the exit path (normal completion, timeout, or error).
|
|
|
|
|
struct ChildKillerGuard {
|
|
|
|
|
killers: Arc<Mutex<HashMap<String, Box<dyn ChildKiller + Send + Sync>>>>,
|
|
|
|
|
key: String,
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
impl Drop for ChildKillerGuard {
|
|
|
|
|
fn drop(&mut self) {
|
|
|
|
|
if let Ok(mut killers) = self.killers.lock() {
|
|
|
|
|
killers.remove(&self.key);
|
|
|
|
|
}
|
|
|
|
|
}
|
2026-02-19 15:25:22 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
impl AgentPool {
|
2026-02-24 23:09:13 +00:00
|
|
|
pub fn new(port: u16, watcher_tx: broadcast::Sender<WatcherEvent>) -> Self {
|
2026-02-19 15:25:22 +00:00
|
|
|
Self {
|
2026-02-19 17:58:53 +00:00
|
|
|
agents: Arc::new(Mutex::new(HashMap::new())),
|
2026-02-20 13:24:35 +00:00
|
|
|
port,
|
2026-02-24 17:56:40 +00:00
|
|
|
child_killers: Arc::new(Mutex::new(HashMap::new())),
|
2026-02-24 23:09:13 +00:00
|
|
|
watcher_tx,
|
2026-02-24 17:56:40 +00:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2026-02-24 23:09:13 +00:00
|
|
|
/// Create a pool with a dummy watcher channel for unit tests.
|
|
|
|
|
#[cfg(test)]
|
|
|
|
|
pub fn new_test(port: u16) -> Self {
|
|
|
|
|
let (watcher_tx, _) = broadcast::channel(16);
|
|
|
|
|
Self::new(port, watcher_tx)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Notify WebSocket clients that agent state has changed, so the pipeline
|
|
|
|
|
/// board and agent panel can refresh.
|
|
|
|
|
fn notify_agent_state_changed(watcher_tx: &broadcast::Sender<WatcherEvent>) {
|
|
|
|
|
let _ = watcher_tx.send(WatcherEvent::AgentStateChanged);
|
|
|
|
|
}
|
|
|
|
|
|
2026-02-24 17:56:40 +00:00
|
|
|
/// Kill all active PTY child processes.
|
|
|
|
|
///
|
|
|
|
|
/// Called on server shutdown to prevent orphaned Claude Code processes from
|
|
|
|
|
/// continuing to run after the server exits. Each registered killer is called
|
|
|
|
|
/// once, then the registry is cleared.
|
|
|
|
|
pub fn kill_all_children(&self) {
|
|
|
|
|
if let Ok(mut killers) = self.child_killers.lock() {
|
|
|
|
|
for (key, killer) in killers.iter_mut() {
|
|
|
|
|
slog!("[agents] Killing child process for {key} on shutdown");
|
|
|
|
|
let _ = killer.kill();
|
|
|
|
|
}
|
|
|
|
|
killers.clear();
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Kill and deregister the child process for a specific agent key.
|
|
|
|
|
///
|
|
|
|
|
/// Used by `stop_agent` to ensure the PTY child is terminated even though
|
|
|
|
|
/// aborting a `spawn_blocking` task handle does not interrupt the blocking thread.
|
|
|
|
|
fn kill_child_for_key(&self, key: &str) {
|
|
|
|
|
if let Ok(mut killers) = self.child_killers.lock()
|
|
|
|
|
&& let Some(mut killer) = killers.remove(key)
|
|
|
|
|
{
|
|
|
|
|
slog!("[agents] Killing child process for {key} on stop");
|
|
|
|
|
let _ = killer.kill();
|
2026-02-19 15:25:22 +00:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2026-02-19 17:58:53 +00:00
|
|
|
/// Start an agent for a story: load config, create worktree, spawn agent.
|
2026-02-25 16:17:38 +00:00
|
|
|
///
|
|
|
|
|
/// When `agent_name` is `None`, automatically selects the first idle coder
|
|
|
|
|
/// agent (story 190). If all coders are busy the call fails with an error
|
|
|
|
|
/// indicating the story will be picked up when one becomes available.
|
|
|
|
|
///
|
2026-02-23 13:13:41 +00:00
|
|
|
/// If `resume_context` is provided, it is appended to the rendered prompt
|
|
|
|
|
/// so the agent can pick up from a previous failed attempt.
|
2026-02-19 17:58:53 +00:00
|
|
|
pub async fn start_agent(
|
|
|
|
|
&self,
|
|
|
|
|
project_root: &Path,
|
|
|
|
|
story_id: &str,
|
Accept story 34: Per-Project Agent Configuration and Role Definitions
Replace single [agent] config with multi-agent [[agent]] roster system.
Each agent has name, role, model, allowed_tools, max_turns, max_budget_usd,
and system_prompt fields that map to Claude CLI flags at spawn time.
- AgentConfig expanded with structured fields, validated at startup (panics
on duplicate names, empty names, non-positive budgets/turns)
- Backwards-compatible: legacy [agent] format auto-wraps with deprecation warning
- AgentPool uses composite "story_id:agent_name" keys for concurrent agents
- agent_name added to AgentEvent variants, AgentInfo, start/stop/subscribe APIs
- GET /agents/config returns roster, POST /agents/config/reload hot-reloads
- POST /agents/start accepts optional agent_name, /agents/stop requires it
- SSE route updated to /agents/:story_id/:agent_name/stream
- Frontend: roster badges, agent selector dropdown, composite-key state
- Project root initialized to cwd at startup so config endpoints work immediately
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-19 18:46:14 +00:00
|
|
|
agent_name: Option<&str>,
|
2026-02-23 13:13:41 +00:00
|
|
|
resume_context: Option<&str>,
|
2026-02-19 17:58:53 +00:00
|
|
|
) -> Result<AgentInfo, String> {
|
Accept story 34: Per-Project Agent Configuration and Role Definitions
Replace single [agent] config with multi-agent [[agent]] roster system.
Each agent has name, role, model, allowed_tools, max_turns, max_budget_usd,
and system_prompt fields that map to Claude CLI flags at spawn time.
- AgentConfig expanded with structured fields, validated at startup (panics
on duplicate names, empty names, non-positive budgets/turns)
- Backwards-compatible: legacy [agent] format auto-wraps with deprecation warning
- AgentPool uses composite "story_id:agent_name" keys for concurrent agents
- agent_name added to AgentEvent variants, AgentInfo, start/stop/subscribe APIs
- GET /agents/config returns roster, POST /agents/config/reload hot-reloads
- POST /agents/start accepts optional agent_name, /agents/stop requires it
- SSE route updated to /agents/:story_id/:agent_name/stream
- Frontend: roster badges, agent selector dropdown, composite-key state
- Project root initialized to cwd at startup so config endpoints work immediately
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-19 18:46:14 +00:00
|
|
|
let config = ProjectConfig::load(project_root)?;
|
|
|
|
|
|
2026-02-25 16:17:38 +00:00
|
|
|
// Validate explicit agent name early (no lock needed).
|
|
|
|
|
if let Some(name) = agent_name {
|
|
|
|
|
config
|
|
|
|
|
.find_agent(name)
|
|
|
|
|
.ok_or_else(|| format!("No agent named '{name}' in config"))?;
|
|
|
|
|
}
|
Accept story 34: Per-Project Agent Configuration and Role Definitions
Replace single [agent] config with multi-agent [[agent]] roster system.
Each agent has name, role, model, allowed_tools, max_turns, max_budget_usd,
and system_prompt fields that map to Claude CLI flags at spawn time.
- AgentConfig expanded with structured fields, validated at startup (panics
on duplicate names, empty names, non-positive budgets/turns)
- Backwards-compatible: legacy [agent] format auto-wraps with deprecation warning
- AgentPool uses composite "story_id:agent_name" keys for concurrent agents
- agent_name added to AgentEvent variants, AgentInfo, start/stop/subscribe APIs
- GET /agents/config returns roster, POST /agents/config/reload hot-reloads
- POST /agents/start accepts optional agent_name, /agents/stop requires it
- SSE route updated to /agents/:story_id/:agent_name/stream
- Frontend: roster badges, agent selector dropdown, composite-key state
- Project root initialized to cwd at startup so config endpoints work immediately
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-19 18:46:14 +00:00
|
|
|
|
2026-02-25 16:17:38 +00:00
|
|
|
// Create name-independent shared resources before the lock so they are
|
|
|
|
|
// ready for the atomic check-and-insert (story 132).
|
2026-02-24 12:49:29 +00:00
|
|
|
let (tx, _) = broadcast::channel::<AgentEvent>(1024);
|
|
|
|
|
let event_log: Arc<Mutex<Vec<AgentEvent>>> = Arc::new(Mutex::new(Vec::new()));
|
|
|
|
|
let log_session_id = uuid::Uuid::new_v4().to_string();
|
|
|
|
|
|
2026-02-26 12:41:12 +00:00
|
|
|
// Move story from upcoming/ to current/ before checking agent
|
|
|
|
|
// availability so that auto_assign_available_work can pick it up even
|
|
|
|
|
// when all coders are currently busy (story 203). This is idempotent:
|
|
|
|
|
// if the story is already in 2_current/ or a later stage, the call is
|
|
|
|
|
// a no-op.
|
|
|
|
|
move_story_to_current(project_root, story_id)?;
|
|
|
|
|
|
2026-02-25 16:17:38 +00:00
|
|
|
// Atomically resolve agent name, check availability, and register as
|
|
|
|
|
// Pending. When `agent_name` is `None` the first idle coder is
|
|
|
|
|
// selected inside the lock so no TOCTOU race can occur between the
|
|
|
|
|
// availability check and the Pending insert (story 132, story 190).
|
2026-02-24 12:49:29 +00:00
|
|
|
//
|
|
|
|
|
// The `PendingGuard` ensures that if any step below fails the entry is
|
|
|
|
|
// removed from the pool so it does not permanently block auto-assign
|
|
|
|
|
// (bug 118).
|
2026-02-25 16:17:38 +00:00
|
|
|
let resolved_name: String;
|
|
|
|
|
let key: String;
|
2026-02-19 17:58:53 +00:00
|
|
|
{
|
2026-02-24 12:49:29 +00:00
|
|
|
let mut agents = self.agents.lock().map_err(|e| e.to_string())?;
|
2026-02-25 16:17:38 +00:00
|
|
|
|
|
|
|
|
resolved_name = match agent_name {
|
|
|
|
|
Some(name) => name.to_string(),
|
|
|
|
|
None => find_free_agent_for_stage(&config, &agents, &PipelineStage::Coder)
|
|
|
|
|
.map(|s| s.to_string())
|
|
|
|
|
.ok_or_else(|| {
|
|
|
|
|
if config
|
|
|
|
|
.agent
|
|
|
|
|
.iter()
|
|
|
|
|
.any(|a| agent_config_stage(a) == PipelineStage::Coder)
|
|
|
|
|
{
|
|
|
|
|
format!(
|
2026-02-26 12:41:12 +00:00
|
|
|
"All coder agents are busy; story '{story_id}' has been \
|
|
|
|
|
queued in work/2_current/ and will be auto-assigned when \
|
|
|
|
|
one becomes available"
|
2026-02-25 16:17:38 +00:00
|
|
|
)
|
|
|
|
|
} else {
|
|
|
|
|
"No coder agent configured. Specify an agent_name explicitly."
|
|
|
|
|
.to_string()
|
|
|
|
|
}
|
|
|
|
|
})?,
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
key = composite_key(story_id, &resolved_name);
|
|
|
|
|
|
|
|
|
|
// Check for duplicate assignment (same story + same agent already active).
|
Accept story 34: Per-Project Agent Configuration and Role Definitions
Replace single [agent] config with multi-agent [[agent]] roster system.
Each agent has name, role, model, allowed_tools, max_turns, max_budget_usd,
and system_prompt fields that map to Claude CLI flags at spawn time.
- AgentConfig expanded with structured fields, validated at startup (panics
on duplicate names, empty names, non-positive budgets/turns)
- Backwards-compatible: legacy [agent] format auto-wraps with deprecation warning
- AgentPool uses composite "story_id:agent_name" keys for concurrent agents
- agent_name added to AgentEvent variants, AgentInfo, start/stop/subscribe APIs
- GET /agents/config returns roster, POST /agents/config/reload hot-reloads
- POST /agents/start accepts optional agent_name, /agents/stop requires it
- SSE route updated to /agents/:story_id/:agent_name/stream
- Frontend: roster badges, agent selector dropdown, composite-key state
- Project root initialized to cwd at startup so config endpoints work immediately
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-19 18:46:14 +00:00
|
|
|
if let Some(agent) = agents.get(&key)
|
|
|
|
|
&& (agent.status == AgentStatus::Running || agent.status == AgentStatus::Pending)
|
|
|
|
|
{
|
|
|
|
|
return Err(format!(
|
|
|
|
|
"Agent '{resolved_name}' for story '{story_id}' is already {}",
|
|
|
|
|
agent.status
|
|
|
|
|
));
|
|
|
|
|
}
|
2026-02-25 16:17:38 +00:00
|
|
|
// Enforce single-instance concurrency for explicitly-named agents:
|
|
|
|
|
// if this agent is already running on any other story, reject.
|
|
|
|
|
// Auto-selected agents are already guaranteed idle by
|
|
|
|
|
// find_free_agent_for_stage, so this check is only needed for
|
|
|
|
|
// explicit requests.
|
|
|
|
|
if agent_name.is_some()
|
|
|
|
|
&& let Some(busy_story) = agents.iter().find_map(|(k, a)| {
|
|
|
|
|
if a.agent_name == resolved_name
|
|
|
|
|
&& matches!(a.status, AgentStatus::Running | AgentStatus::Pending)
|
|
|
|
|
{
|
|
|
|
|
Some(
|
|
|
|
|
k.rsplit_once(':')
|
|
|
|
|
.map(|(sid, _)| sid)
|
|
|
|
|
.unwrap_or(k)
|
|
|
|
|
.to_string(),
|
|
|
|
|
)
|
|
|
|
|
} else {
|
|
|
|
|
None
|
|
|
|
|
}
|
|
|
|
|
})
|
|
|
|
|
{
|
2026-02-23 20:46:51 +00:00
|
|
|
return Err(format!(
|
|
|
|
|
"Agent '{resolved_name}' is already running on story '{busy_story}'; \
|
|
|
|
|
story '{story_id}' will be picked up when the agent becomes available"
|
|
|
|
|
));
|
|
|
|
|
}
|
2026-02-19 17:58:53 +00:00
|
|
|
agents.insert(
|
Accept story 34: Per-Project Agent Configuration and Role Definitions
Replace single [agent] config with multi-agent [[agent]] roster system.
Each agent has name, role, model, allowed_tools, max_turns, max_budget_usd,
and system_prompt fields that map to Claude CLI flags at spawn time.
- AgentConfig expanded with structured fields, validated at startup (panics
on duplicate names, empty names, non-positive budgets/turns)
- Backwards-compatible: legacy [agent] format auto-wraps with deprecation warning
- AgentPool uses composite "story_id:agent_name" keys for concurrent agents
- agent_name added to AgentEvent variants, AgentInfo, start/stop/subscribe APIs
- GET /agents/config returns roster, POST /agents/config/reload hot-reloads
- POST /agents/start accepts optional agent_name, /agents/stop requires it
- SSE route updated to /agents/:story_id/:agent_name/stream
- Frontend: roster badges, agent selector dropdown, composite-key state
- Project root initialized to cwd at startup so config endpoints work immediately
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-19 18:46:14 +00:00
|
|
|
key.clone(),
|
2026-02-19 17:58:53 +00:00
|
|
|
StoryAgent {
|
Accept story 34: Per-Project Agent Configuration and Role Definitions
Replace single [agent] config with multi-agent [[agent]] roster system.
Each agent has name, role, model, allowed_tools, max_turns, max_budget_usd,
and system_prompt fields that map to Claude CLI flags at spawn time.
- AgentConfig expanded with structured fields, validated at startup (panics
on duplicate names, empty names, non-positive budgets/turns)
- Backwards-compatible: legacy [agent] format auto-wraps with deprecation warning
- AgentPool uses composite "story_id:agent_name" keys for concurrent agents
- agent_name added to AgentEvent variants, AgentInfo, start/stop/subscribe APIs
- GET /agents/config returns roster, POST /agents/config/reload hot-reloads
- POST /agents/start accepts optional agent_name, /agents/stop requires it
- SSE route updated to /agents/:story_id/:agent_name/stream
- Frontend: roster badges, agent selector dropdown, composite-key state
- Project root initialized to cwd at startup so config endpoints work immediately
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-19 18:46:14 +00:00
|
|
|
agent_name: resolved_name.clone(),
|
2026-02-19 17:58:53 +00:00
|
|
|
status: AgentStatus::Pending,
|
|
|
|
|
worktree_info: None,
|
|
|
|
|
session_id: None,
|
|
|
|
|
tx: tx.clone(),
|
|
|
|
|
task_handle: None,
|
2026-02-20 11:57:25 +00:00
|
|
|
event_log: event_log.clone(),
|
2026-02-20 15:02:34 +00:00
|
|
|
completion: None,
|
2026-02-23 13:13:41 +00:00
|
|
|
project_root: Some(project_root.to_path_buf()),
|
2026-02-23 20:52:06 +00:00
|
|
|
log_session_id: Some(log_session_id.clone()),
|
2026-02-26 16:12:23 +00:00
|
|
|
merge_failure_reported: false,
|
2026-02-19 17:58:53 +00:00
|
|
|
},
|
|
|
|
|
);
|
2026-02-19 15:25:22 +00:00
|
|
|
}
|
2026-02-23 22:53:23 +00:00
|
|
|
let mut pending_guard = PendingGuard::new(self.agents.clone(), key.clone());
|
2026-02-19 15:25:22 +00:00
|
|
|
|
2026-02-25 16:17:38 +00:00
|
|
|
// Create persistent log writer (needs resolved_name, so must be after
|
|
|
|
|
// the atomic resolution above).
|
|
|
|
|
let log_writer = match AgentLogWriter::new(
|
|
|
|
|
project_root,
|
|
|
|
|
story_id,
|
|
|
|
|
&resolved_name,
|
|
|
|
|
&log_session_id,
|
|
|
|
|
) {
|
|
|
|
|
Ok(w) => Some(Arc::new(Mutex::new(w))),
|
|
|
|
|
Err(e) => {
|
|
|
|
|
eprintln!(
|
|
|
|
|
"[agents] Failed to create log writer for {story_id}:{resolved_name}: {e}"
|
|
|
|
|
);
|
|
|
|
|
None
|
|
|
|
|
}
|
|
|
|
|
};
|
|
|
|
|
|
2026-02-24 23:09:13 +00:00
|
|
|
// Notify WebSocket clients that a new agent is pending.
|
|
|
|
|
Self::notify_agent_state_changed(&self.watcher_tx);
|
|
|
|
|
|
2026-02-19 17:58:53 +00:00
|
|
|
let _ = tx.send(AgentEvent::Status {
|
|
|
|
|
story_id: story_id.to_string(),
|
Accept story 34: Per-Project Agent Configuration and Role Definitions
Replace single [agent] config with multi-agent [[agent]] roster system.
Each agent has name, role, model, allowed_tools, max_turns, max_budget_usd,
and system_prompt fields that map to Claude CLI flags at spawn time.
- AgentConfig expanded with structured fields, validated at startup (panics
on duplicate names, empty names, non-positive budgets/turns)
- Backwards-compatible: legacy [agent] format auto-wraps with deprecation warning
- AgentPool uses composite "story_id:agent_name" keys for concurrent agents
- agent_name added to AgentEvent variants, AgentInfo, start/stop/subscribe APIs
- GET /agents/config returns roster, POST /agents/config/reload hot-reloads
- POST /agents/start accepts optional agent_name, /agents/stop requires it
- SSE route updated to /agents/:story_id/:agent_name/stream
- Frontend: roster badges, agent selector dropdown, composite-key state
- Project root initialized to cwd at startup so config endpoints work immediately
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-19 18:46:14 +00:00
|
|
|
agent_name: resolved_name.clone(),
|
2026-02-19 17:58:53 +00:00
|
|
|
status: "pending".to_string(),
|
|
|
|
|
});
|
|
|
|
|
|
2026-02-24 16:50:56 +00:00
|
|
|
// Extract inactivity timeout from the agent config before cloning config.
|
2026-02-24 13:13:16 +00:00
|
|
|
let inactivity_timeout_secs = config
|
|
|
|
|
.find_agent(&resolved_name)
|
|
|
|
|
.map(|a| a.inactivity_timeout_secs)
|
|
|
|
|
.unwrap_or(300);
|
|
|
|
|
|
2026-02-24 16:50:56 +00:00
|
|
|
// Clone all values needed inside the background spawn.
|
|
|
|
|
let project_root_clone = project_root.to_path_buf();
|
|
|
|
|
let config_clone = config.clone();
|
|
|
|
|
let resume_context_owned = resume_context.map(str::to_string);
|
2026-02-19 17:58:53 +00:00
|
|
|
let sid = story_id.to_string();
|
Accept story 34: Per-Project Agent Configuration and Role Definitions
Replace single [agent] config with multi-agent [[agent]] roster system.
Each agent has name, role, model, allowed_tools, max_turns, max_budget_usd,
and system_prompt fields that map to Claude CLI flags at spawn time.
- AgentConfig expanded with structured fields, validated at startup (panics
on duplicate names, empty names, non-positive budgets/turns)
- Backwards-compatible: legacy [agent] format auto-wraps with deprecation warning
- AgentPool uses composite "story_id:agent_name" keys for concurrent agents
- agent_name added to AgentEvent variants, AgentInfo, start/stop/subscribe APIs
- GET /agents/config returns roster, POST /agents/config/reload hot-reloads
- POST /agents/start accepts optional agent_name, /agents/stop requires it
- SSE route updated to /agents/:story_id/:agent_name/stream
- Frontend: roster badges, agent selector dropdown, composite-key state
- Project root initialized to cwd at startup so config endpoints work immediately
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-19 18:46:14 +00:00
|
|
|
let aname = resolved_name.clone();
|
2026-02-19 17:58:53 +00:00
|
|
|
let tx_clone = tx.clone();
|
|
|
|
|
let agents_ref = self.agents.clone();
|
Accept story 34: Per-Project Agent Configuration and Role Definitions
Replace single [agent] config with multi-agent [[agent]] roster system.
Each agent has name, role, model, allowed_tools, max_turns, max_budget_usd,
and system_prompt fields that map to Claude CLI flags at spawn time.
- AgentConfig expanded with structured fields, validated at startup (panics
on duplicate names, empty names, non-positive budgets/turns)
- Backwards-compatible: legacy [agent] format auto-wraps with deprecation warning
- AgentPool uses composite "story_id:agent_name" keys for concurrent agents
- agent_name added to AgentEvent variants, AgentInfo, start/stop/subscribe APIs
- GET /agents/config returns roster, POST /agents/config/reload hot-reloads
- POST /agents/start accepts optional agent_name, /agents/stop requires it
- SSE route updated to /agents/:story_id/:agent_name/stream
- Frontend: roster badges, agent selector dropdown, composite-key state
- Project root initialized to cwd at startup so config endpoints work immediately
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-19 18:46:14 +00:00
|
|
|
let key_clone = key.clone();
|
2026-02-20 11:57:25 +00:00
|
|
|
let log_clone = event_log.clone();
|
2026-02-23 15:00:10 +00:00
|
|
|
let port_for_task = self.port;
|
2026-02-23 20:52:06 +00:00
|
|
|
let log_writer_clone = log_writer.clone();
|
2026-02-24 17:56:40 +00:00
|
|
|
let child_killers_clone = self.child_killers.clone();
|
2026-02-24 23:09:13 +00:00
|
|
|
let watcher_tx_clone = self.watcher_tx.clone();
|
2026-02-19 17:58:53 +00:00
|
|
|
|
2026-02-24 16:50:56 +00:00
|
|
|
// Spawn the background task. Worktree creation and agent launch happen here
|
|
|
|
|
// so `start_agent` returns immediately after registering the agent as
|
|
|
|
|
// Pending — non-blocking by design (story 157).
|
2026-02-19 17:58:53 +00:00
|
|
|
let handle = tokio::spawn(async move {
|
2026-02-24 16:50:56 +00:00
|
|
|
// Step 1: create the worktree (slow — git checkout, pnpm install, etc.)
|
|
|
|
|
let wt_info = match worktree::create_worktree(
|
|
|
|
|
&project_root_clone,
|
|
|
|
|
&sid,
|
|
|
|
|
&config_clone,
|
|
|
|
|
port_for_task,
|
|
|
|
|
)
|
|
|
|
|
.await
|
|
|
|
|
{
|
|
|
|
|
Ok(wt) => wt,
|
|
|
|
|
Err(e) => {
|
|
|
|
|
// Worktree creation failed — mark agent as Failed so the UI shows the error.
|
|
|
|
|
let _ = tx_clone.send(AgentEvent::Error {
|
|
|
|
|
story_id: sid.clone(),
|
|
|
|
|
agent_name: aname.clone(),
|
|
|
|
|
message: format!("Failed to create worktree: {e}"),
|
|
|
|
|
});
|
2026-02-25 14:59:20 +00:00
|
|
|
if let Ok(mut agents) = agents_ref.lock() {
|
|
|
|
|
agents.remove(&key_clone);
|
|
|
|
|
}
|
2026-02-24 23:09:13 +00:00
|
|
|
Self::notify_agent_state_changed(&watcher_tx_clone);
|
2026-02-24 16:50:56 +00:00
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
// Step 2: store worktree info and render agent command/args/prompt.
|
|
|
|
|
let wt_path_str = wt_info.path.to_string_lossy().to_string();
|
|
|
|
|
{
|
|
|
|
|
if let Ok(mut agents) = agents_ref.lock()
|
|
|
|
|
&& let Some(agent) = agents.get_mut(&key_clone)
|
|
|
|
|
{
|
|
|
|
|
agent.worktree_info = Some(wt_info.clone());
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
let (command, args, mut prompt) = match config_clone.render_agent_args(
|
|
|
|
|
&wt_path_str,
|
|
|
|
|
&sid,
|
|
|
|
|
Some(&aname),
|
|
|
|
|
Some(&wt_info.base_branch),
|
|
|
|
|
) {
|
|
|
|
|
Ok(result) => result,
|
|
|
|
|
Err(e) => {
|
|
|
|
|
let _ = tx_clone.send(AgentEvent::Error {
|
|
|
|
|
story_id: sid.clone(),
|
|
|
|
|
agent_name: aname.clone(),
|
|
|
|
|
message: format!("Failed to render agent args: {e}"),
|
|
|
|
|
});
|
2026-02-25 14:59:20 +00:00
|
|
|
if let Ok(mut agents) = agents_ref.lock() {
|
|
|
|
|
agents.remove(&key_clone);
|
|
|
|
|
}
|
2026-02-24 23:09:13 +00:00
|
|
|
Self::notify_agent_state_changed(&watcher_tx_clone);
|
2026-02-24 16:50:56 +00:00
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
// Append resume context if this is a restart with failure information.
|
|
|
|
|
if let Some(ctx) = resume_context_owned {
|
|
|
|
|
prompt.push_str(&ctx);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Step 3: transition to Running now that the worktree is ready.
|
|
|
|
|
{
|
|
|
|
|
if let Ok(mut agents) = agents_ref.lock()
|
|
|
|
|
&& let Some(agent) = agents.get_mut(&key_clone)
|
|
|
|
|
{
|
|
|
|
|
agent.status = AgentStatus::Running;
|
|
|
|
|
}
|
|
|
|
|
}
|
2026-02-19 17:58:53 +00:00
|
|
|
let _ = tx_clone.send(AgentEvent::Status {
|
|
|
|
|
story_id: sid.clone(),
|
Accept story 34: Per-Project Agent Configuration and Role Definitions
Replace single [agent] config with multi-agent [[agent]] roster system.
Each agent has name, role, model, allowed_tools, max_turns, max_budget_usd,
and system_prompt fields that map to Claude CLI flags at spawn time.
- AgentConfig expanded with structured fields, validated at startup (panics
on duplicate names, empty names, non-positive budgets/turns)
- Backwards-compatible: legacy [agent] format auto-wraps with deprecation warning
- AgentPool uses composite "story_id:agent_name" keys for concurrent agents
- agent_name added to AgentEvent variants, AgentInfo, start/stop/subscribe APIs
- GET /agents/config returns roster, POST /agents/config/reload hot-reloads
- POST /agents/start accepts optional agent_name, /agents/stop requires it
- SSE route updated to /agents/:story_id/:agent_name/stream
- Frontend: roster badges, agent selector dropdown, composite-key state
- Project root initialized to cwd at startup so config endpoints work immediately
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-19 18:46:14 +00:00
|
|
|
agent_name: aname.clone(),
|
2026-02-19 17:58:53 +00:00
|
|
|
status: "running".to_string(),
|
|
|
|
|
});
|
2026-02-24 23:09:13 +00:00
|
|
|
Self::notify_agent_state_changed(&watcher_tx_clone);
|
2026-02-19 17:58:53 +00:00
|
|
|
|
2026-02-24 16:50:56 +00:00
|
|
|
// Step 4: launch the agent process.
|
2026-02-20 11:57:25 +00:00
|
|
|
match run_agent_pty_streaming(
|
2026-02-24 16:50:56 +00:00
|
|
|
&sid,
|
|
|
|
|
&aname,
|
|
|
|
|
&command,
|
|
|
|
|
&args,
|
|
|
|
|
&prompt,
|
|
|
|
|
&wt_path_str,
|
|
|
|
|
&tx_clone,
|
|
|
|
|
&log_clone,
|
2026-02-23 20:52:06 +00:00
|
|
|
log_writer_clone,
|
2026-02-24 13:13:16 +00:00
|
|
|
inactivity_timeout_secs,
|
2026-02-24 17:56:40 +00:00
|
|
|
child_killers_clone,
|
2026-02-20 11:57:25 +00:00
|
|
|
)
|
|
|
|
|
.await
|
Accept story 34: Per-Project Agent Configuration and Role Definitions
Replace single [agent] config with multi-agent [[agent]] roster system.
Each agent has name, role, model, allowed_tools, max_turns, max_budget_usd,
and system_prompt fields that map to Claude CLI flags at spawn time.
- AgentConfig expanded with structured fields, validated at startup (panics
on duplicate names, empty names, non-positive budgets/turns)
- Backwards-compatible: legacy [agent] format auto-wraps with deprecation warning
- AgentPool uses composite "story_id:agent_name" keys for concurrent agents
- agent_name added to AgentEvent variants, AgentInfo, start/stop/subscribe APIs
- GET /agents/config returns roster, POST /agents/config/reload hot-reloads
- POST /agents/start accepts optional agent_name, /agents/stop requires it
- SSE route updated to /agents/:story_id/:agent_name/stream
- Frontend: roster badges, agent selector dropdown, composite-key state
- Project root initialized to cwd at startup so config endpoints work immediately
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-19 18:46:14 +00:00
|
|
|
{
|
2026-02-19 17:58:53 +00:00
|
|
|
Ok(session_id) => {
|
2026-02-23 15:00:10 +00:00
|
|
|
// Server-owned completion: run acceptance gates automatically
|
|
|
|
|
// when the agent process exits normally.
|
|
|
|
|
run_server_owned_completion(
|
|
|
|
|
&agents_ref,
|
|
|
|
|
port_for_task,
|
|
|
|
|
&sid,
|
|
|
|
|
&aname,
|
2026-02-19 17:58:53 +00:00
|
|
|
session_id,
|
2026-02-24 23:57:07 +00:00
|
|
|
watcher_tx_clone.clone(),
|
2026-02-23 15:00:10 +00:00
|
|
|
)
|
|
|
|
|
.await;
|
2026-02-24 23:09:13 +00:00
|
|
|
Self::notify_agent_state_changed(&watcher_tx_clone);
|
2026-02-19 17:58:53 +00:00
|
|
|
}
|
|
|
|
|
Err(e) => {
|
|
|
|
|
let _ = tx_clone.send(AgentEvent::Error {
|
|
|
|
|
story_id: sid.clone(),
|
Accept story 34: Per-Project Agent Configuration and Role Definitions
Replace single [agent] config with multi-agent [[agent]] roster system.
Each agent has name, role, model, allowed_tools, max_turns, max_budget_usd,
and system_prompt fields that map to Claude CLI flags at spawn time.
- AgentConfig expanded with structured fields, validated at startup (panics
on duplicate names, empty names, non-positive budgets/turns)
- Backwards-compatible: legacy [agent] format auto-wraps with deprecation warning
- AgentPool uses composite "story_id:agent_name" keys for concurrent agents
- agent_name added to AgentEvent variants, AgentInfo, start/stop/subscribe APIs
- GET /agents/config returns roster, POST /agents/config/reload hot-reloads
- POST /agents/start accepts optional agent_name, /agents/stop requires it
- SSE route updated to /agents/:story_id/:agent_name/stream
- Frontend: roster badges, agent selector dropdown, composite-key state
- Project root initialized to cwd at startup so config endpoints work immediately
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-19 18:46:14 +00:00
|
|
|
agent_name: aname.clone(),
|
2026-02-19 17:58:53 +00:00
|
|
|
message: e,
|
|
|
|
|
});
|
2026-02-25 14:59:20 +00:00
|
|
|
if let Ok(mut agents) = agents_ref.lock() {
|
|
|
|
|
agents.remove(&key_clone);
|
|
|
|
|
}
|
2026-02-24 23:09:13 +00:00
|
|
|
Self::notify_agent_state_changed(&watcher_tx_clone);
|
2026-02-19 17:58:53 +00:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
});
|
|
|
|
|
|
2026-02-24 16:50:56 +00:00
|
|
|
// Store the task handle while the agent is still Pending.
|
2026-02-19 17:58:53 +00:00
|
|
|
{
|
|
|
|
|
let mut agents = self.agents.lock().map_err(|e| e.to_string())?;
|
Accept story 34: Per-Project Agent Configuration and Role Definitions
Replace single [agent] config with multi-agent [[agent]] roster system.
Each agent has name, role, model, allowed_tools, max_turns, max_budget_usd,
and system_prompt fields that map to Claude CLI flags at spawn time.
- AgentConfig expanded with structured fields, validated at startup (panics
on duplicate names, empty names, non-positive budgets/turns)
- Backwards-compatible: legacy [agent] format auto-wraps with deprecation warning
- AgentPool uses composite "story_id:agent_name" keys for concurrent agents
- agent_name added to AgentEvent variants, AgentInfo, start/stop/subscribe APIs
- GET /agents/config returns roster, POST /agents/config/reload hot-reloads
- POST /agents/start accepts optional agent_name, /agents/stop requires it
- SSE route updated to /agents/:story_id/:agent_name/stream
- Frontend: roster badges, agent selector dropdown, composite-key state
- Project root initialized to cwd at startup so config endpoints work immediately
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-19 18:46:14 +00:00
|
|
|
if let Some(agent) = agents.get_mut(&key) {
|
2026-02-19 17:58:53 +00:00
|
|
|
agent.task_handle = Some(handle);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2026-02-23 22:53:23 +00:00
|
|
|
// Agent successfully spawned — prevent the guard from removing the entry.
|
|
|
|
|
pending_guard.disarm();
|
|
|
|
|
|
2026-02-19 17:58:53 +00:00
|
|
|
Ok(AgentInfo {
|
|
|
|
|
story_id: story_id.to_string(),
|
Accept story 34: Per-Project Agent Configuration and Role Definitions
Replace single [agent] config with multi-agent [[agent]] roster system.
Each agent has name, role, model, allowed_tools, max_turns, max_budget_usd,
and system_prompt fields that map to Claude CLI flags at spawn time.
- AgentConfig expanded with structured fields, validated at startup (panics
on duplicate names, empty names, non-positive budgets/turns)
- Backwards-compatible: legacy [agent] format auto-wraps with deprecation warning
- AgentPool uses composite "story_id:agent_name" keys for concurrent agents
- agent_name added to AgentEvent variants, AgentInfo, start/stop/subscribe APIs
- GET /agents/config returns roster, POST /agents/config/reload hot-reloads
- POST /agents/start accepts optional agent_name, /agents/stop requires it
- SSE route updated to /agents/:story_id/:agent_name/stream
- Frontend: roster badges, agent selector dropdown, composite-key state
- Project root initialized to cwd at startup so config endpoints work immediately
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-19 18:46:14 +00:00
|
|
|
agent_name: resolved_name,
|
2026-02-24 16:50:56 +00:00
|
|
|
status: AgentStatus::Pending,
|
2026-02-19 15:25:22 +00:00
|
|
|
session_id: None,
|
2026-02-24 16:50:56 +00:00
|
|
|
worktree_path: None,
|
|
|
|
|
base_branch: None,
|
2026-02-20 15:02:34 +00:00
|
|
|
completion: None,
|
2026-02-23 20:52:06 +00:00
|
|
|
log_session_id: Some(log_session_id),
|
2026-02-19 17:58:53 +00:00
|
|
|
})
|
|
|
|
|
}
|
|
|
|
|
|
2026-02-20 11:57:25 +00:00
|
|
|
/// Stop a running agent. Worktree is preserved for inspection.
|
Accept story 34: Per-Project Agent Configuration and Role Definitions
Replace single [agent] config with multi-agent [[agent]] roster system.
Each agent has name, role, model, allowed_tools, max_turns, max_budget_usd,
and system_prompt fields that map to Claude CLI flags at spawn time.
- AgentConfig expanded with structured fields, validated at startup (panics
on duplicate names, empty names, non-positive budgets/turns)
- Backwards-compatible: legacy [agent] format auto-wraps with deprecation warning
- AgentPool uses composite "story_id:agent_name" keys for concurrent agents
- agent_name added to AgentEvent variants, AgentInfo, start/stop/subscribe APIs
- GET /agents/config returns roster, POST /agents/config/reload hot-reloads
- POST /agents/start accepts optional agent_name, /agents/stop requires it
- SSE route updated to /agents/:story_id/:agent_name/stream
- Frontend: roster badges, agent selector dropdown, composite-key state
- Project root initialized to cwd at startup so config endpoints work immediately
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-19 18:46:14 +00:00
|
|
|
pub async fn stop_agent(
|
|
|
|
|
&self,
|
2026-02-20 11:57:25 +00:00
|
|
|
_project_root: &Path,
|
Accept story 34: Per-Project Agent Configuration and Role Definitions
Replace single [agent] config with multi-agent [[agent]] roster system.
Each agent has name, role, model, allowed_tools, max_turns, max_budget_usd,
and system_prompt fields that map to Claude CLI flags at spawn time.
- AgentConfig expanded with structured fields, validated at startup (panics
on duplicate names, empty names, non-positive budgets/turns)
- Backwards-compatible: legacy [agent] format auto-wraps with deprecation warning
- AgentPool uses composite "story_id:agent_name" keys for concurrent agents
- agent_name added to AgentEvent variants, AgentInfo, start/stop/subscribe APIs
- GET /agents/config returns roster, POST /agents/config/reload hot-reloads
- POST /agents/start accepts optional agent_name, /agents/stop requires it
- SSE route updated to /agents/:story_id/:agent_name/stream
- Frontend: roster badges, agent selector dropdown, composite-key state
- Project root initialized to cwd at startup so config endpoints work immediately
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-19 18:46:14 +00:00
|
|
|
story_id: &str,
|
|
|
|
|
agent_name: &str,
|
|
|
|
|
) -> Result<(), String> {
|
|
|
|
|
let key = composite_key(story_id, agent_name);
|
|
|
|
|
|
2026-02-20 11:57:25 +00:00
|
|
|
let (worktree_info, task_handle, tx) = {
|
2026-02-19 17:58:53 +00:00
|
|
|
let mut agents = self.agents.lock().map_err(|e| e.to_string())?;
|
|
|
|
|
let agent = agents
|
Accept story 34: Per-Project Agent Configuration and Role Definitions
Replace single [agent] config with multi-agent [[agent]] roster system.
Each agent has name, role, model, allowed_tools, max_turns, max_budget_usd,
and system_prompt fields that map to Claude CLI flags at spawn time.
- AgentConfig expanded with structured fields, validated at startup (panics
on duplicate names, empty names, non-positive budgets/turns)
- Backwards-compatible: legacy [agent] format auto-wraps with deprecation warning
- AgentPool uses composite "story_id:agent_name" keys for concurrent agents
- agent_name added to AgentEvent variants, AgentInfo, start/stop/subscribe APIs
- GET /agents/config returns roster, POST /agents/config/reload hot-reloads
- POST /agents/start accepts optional agent_name, /agents/stop requires it
- SSE route updated to /agents/:story_id/:agent_name/stream
- Frontend: roster badges, agent selector dropdown, composite-key state
- Project root initialized to cwd at startup so config endpoints work immediately
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-19 18:46:14 +00:00
|
|
|
.get_mut(&key)
|
|
|
|
|
.ok_or_else(|| format!("No agent '{agent_name}' for story '{story_id}'"))?;
|
2026-02-19 17:58:53 +00:00
|
|
|
|
|
|
|
|
let wt = agent.worktree_info.clone();
|
|
|
|
|
let handle = agent.task_handle.take();
|
|
|
|
|
let tx = agent.tx.clone();
|
|
|
|
|
agent.status = AgentStatus::Failed;
|
2026-02-20 11:57:25 +00:00
|
|
|
(wt, handle, tx)
|
2026-02-19 15:25:22 +00:00
|
|
|
};
|
|
|
|
|
|
2026-02-24 17:56:40 +00:00
|
|
|
// Abort the task and kill the PTY child process.
|
|
|
|
|
// Note: aborting a spawn_blocking task handle does not interrupt the blocking
|
|
|
|
|
// thread, so we must also kill the child process directly via the killer registry.
|
2026-02-19 17:58:53 +00:00
|
|
|
if let Some(handle) = task_handle {
|
|
|
|
|
handle.abort();
|
|
|
|
|
let _ = handle.await;
|
|
|
|
|
}
|
2026-02-24 17:56:40 +00:00
|
|
|
self.kill_child_for_key(&key);
|
2026-02-19 17:58:53 +00:00
|
|
|
|
2026-02-20 11:57:25 +00:00
|
|
|
// Preserve worktree for inspection — don't destroy agent's work on stop.
|
|
|
|
|
if let Some(ref wt) = worktree_info {
|
feat(story-93): expose server logs to agents via get_server_logs MCP tool
- Add log_buffer module: bounded 1000-line ring buffer with push/get_recent API
- Add slog! macro: drop-in for eprintln! that also captures to ring buffer
- Replace all eprintln! calls across agents, watcher, search, chat, worktree, claude_code with slog!
- Add get_server_logs MCP tool: accepts count (1-500) and optional filter params
- 5 unit tests for log_buffer covering push/retrieve, eviction, filtering, count limits, empty buffer
- 262 tests passing, clippy clean
Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
2026-02-23 20:38:19 +00:00
|
|
|
slog!(
|
2026-02-20 11:57:25 +00:00
|
|
|
"[agents] Worktree preserved for {story_id}:{agent_name}: {}",
|
|
|
|
|
wt.path.display()
|
|
|
|
|
);
|
Accept story 34: Per-Project Agent Configuration and Role Definitions
Replace single [agent] config with multi-agent [[agent]] roster system.
Each agent has name, role, model, allowed_tools, max_turns, max_budget_usd,
and system_prompt fields that map to Claude CLI flags at spawn time.
- AgentConfig expanded with structured fields, validated at startup (panics
on duplicate names, empty names, non-positive budgets/turns)
- Backwards-compatible: legacy [agent] format auto-wraps with deprecation warning
- AgentPool uses composite "story_id:agent_name" keys for concurrent agents
- agent_name added to AgentEvent variants, AgentInfo, start/stop/subscribe APIs
- GET /agents/config returns roster, POST /agents/config/reload hot-reloads
- POST /agents/start accepts optional agent_name, /agents/stop requires it
- SSE route updated to /agents/:story_id/:agent_name/stream
- Frontend: roster badges, agent selector dropdown, composite-key state
- Project root initialized to cwd at startup so config endpoints work immediately
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-19 18:46:14 +00:00
|
|
|
}
|
2026-02-19 17:58:53 +00:00
|
|
|
|
|
|
|
|
let _ = tx.send(AgentEvent::Status {
|
|
|
|
|
story_id: story_id.to_string(),
|
Accept story 34: Per-Project Agent Configuration and Role Definitions
Replace single [agent] config with multi-agent [[agent]] roster system.
Each agent has name, role, model, allowed_tools, max_turns, max_budget_usd,
and system_prompt fields that map to Claude CLI flags at spawn time.
- AgentConfig expanded with structured fields, validated at startup (panics
on duplicate names, empty names, non-positive budgets/turns)
- Backwards-compatible: legacy [agent] format auto-wraps with deprecation warning
- AgentPool uses composite "story_id:agent_name" keys for concurrent agents
- agent_name added to AgentEvent variants, AgentInfo, start/stop/subscribe APIs
- GET /agents/config returns roster, POST /agents/config/reload hot-reloads
- POST /agents/start accepts optional agent_name, /agents/stop requires it
- SSE route updated to /agents/:story_id/:agent_name/stream
- Frontend: roster badges, agent selector dropdown, composite-key state
- Project root initialized to cwd at startup so config endpoints work immediately
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-19 18:46:14 +00:00
|
|
|
agent_name: agent_name.to_string(),
|
2026-02-19 17:58:53 +00:00
|
|
|
status: "stopped".to_string(),
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
// Remove from map
|
|
|
|
|
{
|
|
|
|
|
let mut agents = self.agents.lock().map_err(|e| e.to_string())?;
|
Accept story 34: Per-Project Agent Configuration and Role Definitions
Replace single [agent] config with multi-agent [[agent]] roster system.
Each agent has name, role, model, allowed_tools, max_turns, max_budget_usd,
and system_prompt fields that map to Claude CLI flags at spawn time.
- AgentConfig expanded with structured fields, validated at startup (panics
on duplicate names, empty names, non-positive budgets/turns)
- Backwards-compatible: legacy [agent] format auto-wraps with deprecation warning
- AgentPool uses composite "story_id:agent_name" keys for concurrent agents
- agent_name added to AgentEvent variants, AgentInfo, start/stop/subscribe APIs
- GET /agents/config returns roster, POST /agents/config/reload hot-reloads
- POST /agents/start accepts optional agent_name, /agents/stop requires it
- SSE route updated to /agents/:story_id/:agent_name/stream
- Frontend: roster badges, agent selector dropdown, composite-key state
- Project root initialized to cwd at startup so config endpoints work immediately
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-19 18:46:14 +00:00
|
|
|
agents.remove(&key);
|
2026-02-19 17:58:53 +00:00
|
|
|
}
|
|
|
|
|
|
2026-02-24 23:09:13 +00:00
|
|
|
// Notify WebSocket clients so pipeline board and agent panel update.
|
|
|
|
|
Self::notify_agent_state_changed(&self.watcher_tx);
|
|
|
|
|
|
2026-02-19 17:58:53 +00:00
|
|
|
Ok(())
|
2026-02-19 15:25:22 +00:00
|
|
|
}
|
|
|
|
|
|
2026-02-25 16:17:38 +00:00
|
|
|
/// Return the names of configured agents for `stage` that are not currently
|
|
|
|
|
/// running or pending.
|
|
|
|
|
pub fn available_agents_for_stage(
|
|
|
|
|
&self,
|
|
|
|
|
config: &ProjectConfig,
|
|
|
|
|
stage: &PipelineStage,
|
|
|
|
|
) -> Result<Vec<String>, String> {
|
|
|
|
|
let agents = self.agents.lock().map_err(|e| e.to_string())?;
|
|
|
|
|
Ok(config
|
|
|
|
|
.agent
|
|
|
|
|
.iter()
|
|
|
|
|
.filter(|cfg| agent_config_stage(cfg) == *stage)
|
|
|
|
|
.filter(|cfg| {
|
|
|
|
|
!agents.values().any(|a| {
|
|
|
|
|
a.agent_name == cfg.name
|
|
|
|
|
&& matches!(a.status, AgentStatus::Running | AgentStatus::Pending)
|
|
|
|
|
})
|
|
|
|
|
})
|
|
|
|
|
.map(|cfg| cfg.name.clone())
|
|
|
|
|
.collect())
|
|
|
|
|
}
|
|
|
|
|
|
2026-02-19 17:58:53 +00:00
|
|
|
/// List all agents with their status.
|
2026-02-19 15:25:22 +00:00
|
|
|
pub fn list_agents(&self) -> Result<Vec<AgentInfo>, String> {
|
|
|
|
|
let agents = self.agents.lock().map_err(|e| e.to_string())?;
|
|
|
|
|
Ok(agents
|
|
|
|
|
.iter()
|
Accept story 34: Per-Project Agent Configuration and Role Definitions
Replace single [agent] config with multi-agent [[agent]] roster system.
Each agent has name, role, model, allowed_tools, max_turns, max_budget_usd,
and system_prompt fields that map to Claude CLI flags at spawn time.
- AgentConfig expanded with structured fields, validated at startup (panics
on duplicate names, empty names, non-positive budgets/turns)
- Backwards-compatible: legacy [agent] format auto-wraps with deprecation warning
- AgentPool uses composite "story_id:agent_name" keys for concurrent agents
- agent_name added to AgentEvent variants, AgentInfo, start/stop/subscribe APIs
- GET /agents/config returns roster, POST /agents/config/reload hot-reloads
- POST /agents/start accepts optional agent_name, /agents/stop requires it
- SSE route updated to /agents/:story_id/:agent_name/stream
- Frontend: roster badges, agent selector dropdown, composite-key state
- Project root initialized to cwd at startup so config endpoints work immediately
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-19 18:46:14 +00:00
|
|
|
.map(|(key, agent)| {
|
|
|
|
|
// Extract story_id from composite key "story_id:agent_name"
|
|
|
|
|
let story_id = key
|
|
|
|
|
.rsplit_once(':')
|
|
|
|
|
.map(|(sid, _)| sid.to_string())
|
|
|
|
|
.unwrap_or_else(|| key.clone());
|
2026-02-20 13:16:04 +00:00
|
|
|
agent_info_from_entry(&story_id, agent)
|
2026-02-19 15:25:22 +00:00
|
|
|
})
|
|
|
|
|
.collect())
|
|
|
|
|
}
|
|
|
|
|
|
2026-02-19 17:58:53 +00:00
|
|
|
/// Subscribe to events for a story agent.
|
Accept story 34: Per-Project Agent Configuration and Role Definitions
Replace single [agent] config with multi-agent [[agent]] roster system.
Each agent has name, role, model, allowed_tools, max_turns, max_budget_usd,
and system_prompt fields that map to Claude CLI flags at spawn time.
- AgentConfig expanded with structured fields, validated at startup (panics
on duplicate names, empty names, non-positive budgets/turns)
- Backwards-compatible: legacy [agent] format auto-wraps with deprecation warning
- AgentPool uses composite "story_id:agent_name" keys for concurrent agents
- agent_name added to AgentEvent variants, AgentInfo, start/stop/subscribe APIs
- GET /agents/config returns roster, POST /agents/config/reload hot-reloads
- POST /agents/start accepts optional agent_name, /agents/stop requires it
- SSE route updated to /agents/:story_id/:agent_name/stream
- Frontend: roster badges, agent selector dropdown, composite-key state
- Project root initialized to cwd at startup so config endpoints work immediately
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-19 18:46:14 +00:00
|
|
|
pub fn subscribe(
|
|
|
|
|
&self,
|
|
|
|
|
story_id: &str,
|
|
|
|
|
agent_name: &str,
|
|
|
|
|
) -> Result<broadcast::Receiver<AgentEvent>, String> {
|
|
|
|
|
let key = composite_key(story_id, agent_name);
|
2026-02-19 17:58:53 +00:00
|
|
|
let agents = self.agents.lock().map_err(|e| e.to_string())?;
|
|
|
|
|
let agent = agents
|
Accept story 34: Per-Project Agent Configuration and Role Definitions
Replace single [agent] config with multi-agent [[agent]] roster system.
Each agent has name, role, model, allowed_tools, max_turns, max_budget_usd,
and system_prompt fields that map to Claude CLI flags at spawn time.
- AgentConfig expanded with structured fields, validated at startup (panics
on duplicate names, empty names, non-positive budgets/turns)
- Backwards-compatible: legacy [agent] format auto-wraps with deprecation warning
- AgentPool uses composite "story_id:agent_name" keys for concurrent agents
- agent_name added to AgentEvent variants, AgentInfo, start/stop/subscribe APIs
- GET /agents/config returns roster, POST /agents/config/reload hot-reloads
- POST /agents/start accepts optional agent_name, /agents/stop requires it
- SSE route updated to /agents/:story_id/:agent_name/stream
- Frontend: roster badges, agent selector dropdown, composite-key state
- Project root initialized to cwd at startup so config endpoints work immediately
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-19 18:46:14 +00:00
|
|
|
.get(&key)
|
|
|
|
|
.ok_or_else(|| format!("No agent '{agent_name}' for story '{story_id}'"))?;
|
2026-02-19 17:58:53 +00:00
|
|
|
Ok(agent.tx.subscribe())
|
|
|
|
|
}
|
2026-02-19 15:25:22 +00:00
|
|
|
|
2026-02-20 11:57:25 +00:00
|
|
|
/// Drain accumulated events for polling. Returns all events since the last drain.
|
|
|
|
|
pub fn drain_events(
|
|
|
|
|
&self,
|
|
|
|
|
story_id: &str,
|
|
|
|
|
agent_name: &str,
|
|
|
|
|
) -> Result<Vec<AgentEvent>, String> {
|
|
|
|
|
let key = composite_key(story_id, agent_name);
|
|
|
|
|
let agents = self.agents.lock().map_err(|e| e.to_string())?;
|
|
|
|
|
let agent = agents
|
|
|
|
|
.get(&key)
|
|
|
|
|
.ok_or_else(|| format!("No agent '{agent_name}' for story '{story_id}'"))?;
|
|
|
|
|
let mut log = agent.event_log.lock().map_err(|e| e.to_string())?;
|
|
|
|
|
Ok(log.drain(..).collect())
|
|
|
|
|
}
|
|
|
|
|
|
2026-02-20 13:16:04 +00:00
|
|
|
/// Block until the agent reaches a terminal state (completed, failed, stopped).
|
|
|
|
|
/// Returns the agent's final `AgentInfo`.
|
|
|
|
|
/// `timeout_ms` caps how long to wait; returns an error if the deadline passes.
|
|
|
|
|
pub async fn wait_for_agent(
|
|
|
|
|
&self,
|
|
|
|
|
story_id: &str,
|
|
|
|
|
agent_name: &str,
|
|
|
|
|
timeout_ms: u64,
|
|
|
|
|
) -> Result<AgentInfo, String> {
|
|
|
|
|
// Subscribe before checking status so we don't miss the terminal event
|
|
|
|
|
// if the agent completes in the window between the two operations.
|
|
|
|
|
let mut rx = self.subscribe(story_id, agent_name)?;
|
|
|
|
|
|
|
|
|
|
// Return immediately if already in a terminal state.
|
|
|
|
|
{
|
|
|
|
|
let agents = self.agents.lock().map_err(|e| e.to_string())?;
|
|
|
|
|
let key = composite_key(story_id, agent_name);
|
|
|
|
|
if let Some(agent) = agents.get(&key)
|
|
|
|
|
&& matches!(agent.status, AgentStatus::Completed | AgentStatus::Failed)
|
|
|
|
|
{
|
|
|
|
|
return Ok(agent_info_from_entry(story_id, agent));
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
let deadline =
|
|
|
|
|
tokio::time::Instant::now() + std::time::Duration::from_millis(timeout_ms);
|
|
|
|
|
|
|
|
|
|
loop {
|
|
|
|
|
let remaining = deadline.saturating_duration_since(tokio::time::Instant::now());
|
|
|
|
|
if remaining.is_zero() {
|
|
|
|
|
return Err(format!(
|
|
|
|
|
"Timed out after {timeout_ms}ms waiting for agent '{agent_name}' on story '{story_id}'"
|
|
|
|
|
));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
match tokio::time::timeout(remaining, rx.recv()).await {
|
|
|
|
|
Ok(Ok(event)) => {
|
|
|
|
|
let is_terminal = match &event {
|
|
|
|
|
AgentEvent::Done { .. } | AgentEvent::Error { .. } => true,
|
|
|
|
|
AgentEvent::Status { status, .. } if status == "stopped" => true,
|
|
|
|
|
_ => false,
|
|
|
|
|
};
|
|
|
|
|
if is_terminal {
|
|
|
|
|
let agents = self.agents.lock().map_err(|e| e.to_string())?;
|
|
|
|
|
let key = composite_key(story_id, agent_name);
|
|
|
|
|
return Ok(if let Some(agent) = agents.get(&key) {
|
|
|
|
|
agent_info_from_entry(story_id, agent)
|
|
|
|
|
} else {
|
|
|
|
|
// Agent was removed from map (e.g. stop_agent removes it after
|
|
|
|
|
// the "stopped" status event is sent).
|
|
|
|
|
let (status, session_id) = match event {
|
|
|
|
|
AgentEvent::Done { session_id, .. } => {
|
|
|
|
|
(AgentStatus::Completed, session_id)
|
|
|
|
|
}
|
|
|
|
|
_ => (AgentStatus::Failed, None),
|
|
|
|
|
};
|
|
|
|
|
AgentInfo {
|
|
|
|
|
story_id: story_id.to_string(),
|
|
|
|
|
agent_name: agent_name.to_string(),
|
|
|
|
|
status,
|
|
|
|
|
session_id,
|
|
|
|
|
worktree_path: None,
|
|
|
|
|
base_branch: None,
|
2026-02-20 15:02:34 +00:00
|
|
|
completion: None,
|
2026-02-23 20:52:06 +00:00
|
|
|
log_session_id: None,
|
2026-02-20 13:16:04 +00:00
|
|
|
}
|
|
|
|
|
});
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
Ok(Err(broadcast::error::RecvError::Lagged(_))) => {
|
|
|
|
|
// Missed some buffered events — check current status before resuming.
|
|
|
|
|
let agents = self.agents.lock().map_err(|e| e.to_string())?;
|
|
|
|
|
let key = composite_key(story_id, agent_name);
|
|
|
|
|
if let Some(agent) = agents.get(&key)
|
|
|
|
|
&& matches!(agent.status, AgentStatus::Completed | AgentStatus::Failed)
|
|
|
|
|
{
|
|
|
|
|
return Ok(agent_info_from_entry(story_id, agent));
|
|
|
|
|
}
|
|
|
|
|
// Still running — continue the loop.
|
|
|
|
|
}
|
|
|
|
|
Ok(Err(broadcast::error::RecvError::Closed)) => {
|
|
|
|
|
// Channel closed: no more events will arrive. Return current state.
|
|
|
|
|
let agents = self.agents.lock().map_err(|e| e.to_string())?;
|
|
|
|
|
let key = composite_key(story_id, agent_name);
|
|
|
|
|
if let Some(agent) = agents.get(&key) {
|
|
|
|
|
return Ok(agent_info_from_entry(story_id, agent));
|
|
|
|
|
}
|
|
|
|
|
return Err(format!(
|
|
|
|
|
"Agent '{agent_name}' for story '{story_id}' channel closed unexpectedly"
|
|
|
|
|
));
|
|
|
|
|
}
|
|
|
|
|
Err(_) => {
|
|
|
|
|
return Err(format!(
|
|
|
|
|
"Timed out after {timeout_ms}ms waiting for agent '{agent_name}' on story '{story_id}'"
|
|
|
|
|
));
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2026-02-20 14:09:52 +00:00
|
|
|
/// Create a worktree for the given story using the server port (writes .mcp.json).
|
|
|
|
|
pub async fn create_worktree(
|
|
|
|
|
&self,
|
|
|
|
|
project_root: &Path,
|
|
|
|
|
story_id: &str,
|
|
|
|
|
) -> Result<worktree::WorktreeInfo, String> {
|
|
|
|
|
let config = ProjectConfig::load(project_root)?;
|
|
|
|
|
worktree::create_worktree(project_root, story_id, &config, self.port).await
|
|
|
|
|
}
|
|
|
|
|
|
2026-02-23 13:13:41 +00:00
|
|
|
/// Advance the pipeline after an agent completes.
|
|
|
|
|
///
|
|
|
|
|
/// Called internally by `report_completion` as a background task.
|
|
|
|
|
/// Reads the stored completion report and project_root from the agent,
|
|
|
|
|
/// then drives the next pipeline stage based on the agent's role:
|
|
|
|
|
///
|
|
|
|
|
/// - **Coder** + gates passed → move story to `work/3_qa/`, start `qa` agent.
|
|
|
|
|
/// - **Coder** + gates failed → restart the same coder agent with failure context.
|
2026-02-23 13:40:12 +00:00
|
|
|
/// - **QA** + gates passed + coverage passed → move story to `work/4_merge/`, start `mergemaster` agent.
|
|
|
|
|
/// - **QA** + gates passed + coverage failed → restart `qa` with coverage failure context.
|
2026-02-23 13:13:41 +00:00
|
|
|
/// - **QA** + gates failed → restart `qa` with failure context.
|
|
|
|
|
/// - **Mergemaster** → run `script/test` on master; if pass: archive + cleanup worktree;
|
|
|
|
|
/// if fail: restart `mergemaster` with failure context.
|
|
|
|
|
/// - **Other** (supervisor, unknown) → no automatic advancement.
|
2026-02-25 14:59:20 +00:00
|
|
|
async fn run_pipeline_advance(
|
|
|
|
|
&self,
|
|
|
|
|
story_id: &str,
|
|
|
|
|
agent_name: &str,
|
|
|
|
|
completion: CompletionReport,
|
|
|
|
|
project_root: Option<PathBuf>,
|
|
|
|
|
worktree_path: Option<PathBuf>,
|
2026-02-26 16:12:23 +00:00
|
|
|
merge_failure_reported: bool,
|
2026-02-25 14:59:20 +00:00
|
|
|
) {
|
2026-02-23 13:13:41 +00:00
|
|
|
let project_root = match project_root {
|
|
|
|
|
Some(p) => p,
|
|
|
|
|
None => {
|
2026-02-24 13:48:25 +00:00
|
|
|
slog_warn!("[pipeline] No project_root for '{story_id}:{agent_name}'");
|
2026-02-23 13:13:41 +00:00
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
};
|
|
|
|
|
|
2026-02-24 15:50:34 +00:00
|
|
|
let config = ProjectConfig::load(&project_root).unwrap_or_default();
|
|
|
|
|
let stage = config
|
|
|
|
|
.find_agent(agent_name)
|
|
|
|
|
.map(agent_config_stage)
|
|
|
|
|
.unwrap_or_else(|| pipeline_stage(agent_name));
|
2026-02-23 13:13:41 +00:00
|
|
|
|
|
|
|
|
match stage {
|
|
|
|
|
PipelineStage::Other => {
|
|
|
|
|
// Supervisors and unknown agents do not advance the pipeline.
|
|
|
|
|
}
|
|
|
|
|
PipelineStage::Coder => {
|
|
|
|
|
if completion.gates_passed {
|
feat(story-93): expose server logs to agents via get_server_logs MCP tool
- Add log_buffer module: bounded 1000-line ring buffer with push/get_recent API
- Add slog! macro: drop-in for eprintln! that also captures to ring buffer
- Replace all eprintln! calls across agents, watcher, search, chat, worktree, claude_code with slog!
- Add get_server_logs MCP tool: accepts count (1-500) and optional filter params
- 5 unit tests for log_buffer covering push/retrieve, eviction, filtering, count limits, empty buffer
- 262 tests passing, clippy clean
Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
2026-02-23 20:38:19 +00:00
|
|
|
slog!(
|
2026-02-23 13:13:41 +00:00
|
|
|
"[pipeline] Coder '{agent_name}' passed gates for '{story_id}'. Moving to QA."
|
|
|
|
|
);
|
|
|
|
|
if let Err(e) = move_story_to_qa(&project_root, story_id) {
|
2026-02-24 13:48:25 +00:00
|
|
|
slog_error!("[pipeline] Failed to move '{story_id}' to 3_qa/: {e}");
|
2026-02-23 13:13:41 +00:00
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
if let Err(e) = self
|
|
|
|
|
.start_agent(&project_root, story_id, Some("qa"), None)
|
|
|
|
|
.await
|
|
|
|
|
{
|
2026-02-24 13:48:25 +00:00
|
|
|
slog_error!("[pipeline] Failed to start qa agent for '{story_id}': {e}");
|
2026-02-23 13:13:41 +00:00
|
|
|
}
|
2026-02-23 18:20:24 +00:00
|
|
|
// Coder slot is now free — pick up any other unassigned work in 2_current/.
|
|
|
|
|
self.auto_assign_available_work(&project_root).await;
|
2026-02-23 13:13:41 +00:00
|
|
|
} else {
|
feat(story-93): expose server logs to agents via get_server_logs MCP tool
- Add log_buffer module: bounded 1000-line ring buffer with push/get_recent API
- Add slog! macro: drop-in for eprintln! that also captures to ring buffer
- Replace all eprintln! calls across agents, watcher, search, chat, worktree, claude_code with slog!
- Add get_server_logs MCP tool: accepts count (1-500) and optional filter params
- 5 unit tests for log_buffer covering push/retrieve, eviction, filtering, count limits, empty buffer
- 262 tests passing, clippy clean
Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
2026-02-23 20:38:19 +00:00
|
|
|
slog!(
|
2026-02-23 13:13:41 +00:00
|
|
|
"[pipeline] Coder '{agent_name}' failed gates for '{story_id}'. Restarting."
|
|
|
|
|
);
|
|
|
|
|
let context = format!(
|
|
|
|
|
"\n\n---\n## Previous Attempt Failed\n\
|
|
|
|
|
The acceptance gates failed with the following output:\n{}\n\n\
|
|
|
|
|
Please review the failures above, fix the issues, and try again.",
|
|
|
|
|
completion.gate_output
|
|
|
|
|
);
|
|
|
|
|
if let Err(e) = self
|
|
|
|
|
.start_agent(&project_root, story_id, Some(agent_name), Some(&context))
|
|
|
|
|
.await
|
|
|
|
|
{
|
2026-02-24 13:48:25 +00:00
|
|
|
slog_error!(
|
2026-02-23 13:13:41 +00:00
|
|
|
"[pipeline] Failed to restart coder '{agent_name}' for '{story_id}': {e}"
|
|
|
|
|
);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
PipelineStage::Qa => {
|
|
|
|
|
if completion.gates_passed {
|
2026-02-23 13:40:12 +00:00
|
|
|
// Run coverage gate in the QA worktree before advancing to merge.
|
|
|
|
|
let coverage_path = worktree_path.clone().unwrap_or_else(|| project_root.clone());
|
|
|
|
|
let cp = coverage_path.clone();
|
|
|
|
|
let coverage_result =
|
|
|
|
|
tokio::task::spawn_blocking(move || run_coverage_gate(&cp))
|
|
|
|
|
.await
|
|
|
|
|
.unwrap_or_else(|e| {
|
2026-02-24 13:48:25 +00:00
|
|
|
slog_warn!("[pipeline] Coverage gate task panicked: {e}");
|
2026-02-23 13:40:12 +00:00
|
|
|
Ok((false, format!("Coverage gate task panicked: {e}")))
|
|
|
|
|
});
|
|
|
|
|
let (coverage_passed, coverage_output) = match coverage_result {
|
|
|
|
|
Ok(pair) => pair,
|
|
|
|
|
Err(e) => (false, e),
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
if coverage_passed {
|
feat(story-93): expose server logs to agents via get_server_logs MCP tool
- Add log_buffer module: bounded 1000-line ring buffer with push/get_recent API
- Add slog! macro: drop-in for eprintln! that also captures to ring buffer
- Replace all eprintln! calls across agents, watcher, search, chat, worktree, claude_code with slog!
- Add get_server_logs MCP tool: accepts count (1-500) and optional filter params
- 5 unit tests for log_buffer covering push/retrieve, eviction, filtering, count limits, empty buffer
- 262 tests passing, clippy clean
Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
2026-02-23 20:38:19 +00:00
|
|
|
slog!(
|
2026-02-23 13:40:12 +00:00
|
|
|
"[pipeline] QA passed gates and coverage for '{story_id}'. Moving to merge."
|
|
|
|
|
);
|
|
|
|
|
if let Err(e) = move_story_to_merge(&project_root, story_id) {
|
2026-02-24 13:48:25 +00:00
|
|
|
slog_error!("[pipeline] Failed to move '{story_id}' to 4_merge/: {e}");
|
2026-02-23 13:40:12 +00:00
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
if let Err(e) = self
|
|
|
|
|
.start_agent(&project_root, story_id, Some("mergemaster"), None)
|
|
|
|
|
.await
|
|
|
|
|
{
|
2026-02-24 13:48:25 +00:00
|
|
|
slog_error!("[pipeline] Failed to start mergemaster for '{story_id}': {e}");
|
2026-02-23 13:40:12 +00:00
|
|
|
}
|
2026-02-23 18:20:24 +00:00
|
|
|
// QA slot is now free — pick up any other unassigned work in 3_qa/.
|
|
|
|
|
self.auto_assign_available_work(&project_root).await;
|
2026-02-23 13:40:12 +00:00
|
|
|
} else {
|
feat(story-93): expose server logs to agents via get_server_logs MCP tool
- Add log_buffer module: bounded 1000-line ring buffer with push/get_recent API
- Add slog! macro: drop-in for eprintln! that also captures to ring buffer
- Replace all eprintln! calls across agents, watcher, search, chat, worktree, claude_code with slog!
- Add get_server_logs MCP tool: accepts count (1-500) and optional filter params
- 5 unit tests for log_buffer covering push/retrieve, eviction, filtering, count limits, empty buffer
- 262 tests passing, clippy clean
Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
2026-02-23 20:38:19 +00:00
|
|
|
slog!(
|
2026-02-23 13:40:12 +00:00
|
|
|
"[pipeline] QA coverage gate failed for '{story_id}'. Restarting QA."
|
|
|
|
|
);
|
|
|
|
|
let context = format!(
|
|
|
|
|
"\n\n---\n## Coverage Gate Failed\n\
|
|
|
|
|
The coverage gate (script/test_coverage) failed with the following output:\n{}\n\n\
|
|
|
|
|
Please improve test coverage until the coverage gate passes.",
|
|
|
|
|
coverage_output
|
|
|
|
|
);
|
|
|
|
|
if let Err(e) = self
|
|
|
|
|
.start_agent(&project_root, story_id, Some("qa"), Some(&context))
|
|
|
|
|
.await
|
|
|
|
|
{
|
2026-02-24 13:48:25 +00:00
|
|
|
slog_error!("[pipeline] Failed to restart qa for '{story_id}': {e}");
|
2026-02-23 13:40:12 +00:00
|
|
|
}
|
2026-02-23 13:13:41 +00:00
|
|
|
}
|
|
|
|
|
} else {
|
feat(story-93): expose server logs to agents via get_server_logs MCP tool
- Add log_buffer module: bounded 1000-line ring buffer with push/get_recent API
- Add slog! macro: drop-in for eprintln! that also captures to ring buffer
- Replace all eprintln! calls across agents, watcher, search, chat, worktree, claude_code with slog!
- Add get_server_logs MCP tool: accepts count (1-500) and optional filter params
- 5 unit tests for log_buffer covering push/retrieve, eviction, filtering, count limits, empty buffer
- 262 tests passing, clippy clean
Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
2026-02-23 20:38:19 +00:00
|
|
|
slog!(
|
2026-02-23 13:13:41 +00:00
|
|
|
"[pipeline] QA failed gates for '{story_id}'. Restarting."
|
|
|
|
|
);
|
|
|
|
|
let context = format!(
|
|
|
|
|
"\n\n---\n## Previous QA Attempt Failed\n\
|
|
|
|
|
The acceptance gates failed with the following output:\n{}\n\n\
|
|
|
|
|
Please re-run and fix the issues.",
|
|
|
|
|
completion.gate_output
|
|
|
|
|
);
|
|
|
|
|
if let Err(e) = self
|
|
|
|
|
.start_agent(&project_root, story_id, Some("qa"), Some(&context))
|
|
|
|
|
.await
|
|
|
|
|
{
|
2026-02-24 13:48:25 +00:00
|
|
|
slog_error!("[pipeline] Failed to restart qa for '{story_id}': {e}");
|
2026-02-23 13:13:41 +00:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
PipelineStage::Mergemaster => {
|
2026-02-26 16:12:23 +00:00
|
|
|
// Block advancement if the mergemaster explicitly reported a failure.
|
|
|
|
|
// The server-owned gate check runs in the feature-branch worktree (not
|
|
|
|
|
// master), so `gates_passed=true` is misleading when no code was merged.
|
|
|
|
|
if merge_failure_reported {
|
|
|
|
|
slog!(
|
|
|
|
|
"[pipeline] Pipeline advancement blocked for '{story_id}': \
|
|
|
|
|
mergemaster explicitly reported a merge failure. \
|
|
|
|
|
Story stays in 4_merge/ for human review."
|
|
|
|
|
);
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
2026-02-23 13:13:41 +00:00
|
|
|
// Run script/test on master (project_root) as the post-merge verification.
|
feat(story-93): expose server logs to agents via get_server_logs MCP tool
- Add log_buffer module: bounded 1000-line ring buffer with push/get_recent API
- Add slog! macro: drop-in for eprintln! that also captures to ring buffer
- Replace all eprintln! calls across agents, watcher, search, chat, worktree, claude_code with slog!
- Add get_server_logs MCP tool: accepts count (1-500) and optional filter params
- 5 unit tests for log_buffer covering push/retrieve, eviction, filtering, count limits, empty buffer
- 262 tests passing, clippy clean
Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
2026-02-23 20:38:19 +00:00
|
|
|
slog!(
|
2026-02-23 13:13:41 +00:00
|
|
|
"[pipeline] Mergemaster completed for '{story_id}'. Running post-merge tests on master."
|
|
|
|
|
);
|
|
|
|
|
let root = project_root.clone();
|
|
|
|
|
let test_result = tokio::task::spawn_blocking(move || run_project_tests(&root))
|
|
|
|
|
.await
|
|
|
|
|
.unwrap_or_else(|e| {
|
2026-02-24 13:48:25 +00:00
|
|
|
slog_warn!("[pipeline] Post-merge test task panicked: {e}");
|
2026-02-23 13:13:41 +00:00
|
|
|
Ok((false, format!("Test task panicked: {e}")))
|
|
|
|
|
});
|
|
|
|
|
let (passed, output) = match test_result {
|
|
|
|
|
Ok(pair) => pair,
|
|
|
|
|
Err(e) => (false, e),
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
if passed {
|
feat(story-93): expose server logs to agents via get_server_logs MCP tool
- Add log_buffer module: bounded 1000-line ring buffer with push/get_recent API
- Add slog! macro: drop-in for eprintln! that also captures to ring buffer
- Replace all eprintln! calls across agents, watcher, search, chat, worktree, claude_code with slog!
- Add get_server_logs MCP tool: accepts count (1-500) and optional filter params
- 5 unit tests for log_buffer covering push/retrieve, eviction, filtering, count limits, empty buffer
- 262 tests passing, clippy clean
Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
2026-02-23 20:38:19 +00:00
|
|
|
slog!(
|
2026-02-24 18:38:13 +00:00
|
|
|
"[pipeline] Post-merge tests passed for '{story_id}'. Moving to done."
|
2026-02-23 13:13:41 +00:00
|
|
|
);
|
|
|
|
|
if let Err(e) = move_story_to_archived(&project_root, story_id) {
|
2026-02-24 18:38:13 +00:00
|
|
|
slog_error!("[pipeline] Failed to move '{story_id}' to done: {e}");
|
2026-02-23 13:13:41 +00:00
|
|
|
}
|
2026-02-24 13:20:59 +00:00
|
|
|
self.remove_agents_for_story(story_id);
|
2026-02-23 18:20:24 +00:00
|
|
|
// Mergemaster slot is now free — pick up any other items in 4_merge/.
|
|
|
|
|
self.auto_assign_available_work(&project_root).await;
|
2026-02-23 18:05:26 +00:00
|
|
|
// TODO: Re-enable worktree cleanup once we have persistent agent logs.
|
|
|
|
|
// Removing worktrees destroys evidence needed to debug empty-commit agents.
|
|
|
|
|
// let config =
|
|
|
|
|
// crate::config::ProjectConfig::load(&project_root).unwrap_or_default();
|
|
|
|
|
// if let Err(e) =
|
|
|
|
|
// worktree::remove_worktree_by_story_id(&project_root, story_id, &config)
|
|
|
|
|
// .await
|
|
|
|
|
// {
|
feat(story-93): expose server logs to agents via get_server_logs MCP tool
- Add log_buffer module: bounded 1000-line ring buffer with push/get_recent API
- Add slog! macro: drop-in for eprintln! that also captures to ring buffer
- Replace all eprintln! calls across agents, watcher, search, chat, worktree, claude_code with slog!
- Add get_server_logs MCP tool: accepts count (1-500) and optional filter params
- 5 unit tests for log_buffer covering push/retrieve, eviction, filtering, count limits, empty buffer
- 262 tests passing, clippy clean
Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
2026-02-23 20:38:19 +00:00
|
|
|
// slog!(
|
2026-02-23 18:05:26 +00:00
|
|
|
// "[pipeline] Failed to remove worktree for '{story_id}': {e}"
|
|
|
|
|
// );
|
|
|
|
|
// }
|
feat(story-93): expose server logs to agents via get_server_logs MCP tool
- Add log_buffer module: bounded 1000-line ring buffer with push/get_recent API
- Add slog! macro: drop-in for eprintln! that also captures to ring buffer
- Replace all eprintln! calls across agents, watcher, search, chat, worktree, claude_code with slog!
- Add get_server_logs MCP tool: accepts count (1-500) and optional filter params
- 5 unit tests for log_buffer covering push/retrieve, eviction, filtering, count limits, empty buffer
- 262 tests passing, clippy clean
Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
2026-02-23 20:38:19 +00:00
|
|
|
slog!(
|
2026-02-24 18:38:13 +00:00
|
|
|
"[pipeline] Story '{story_id}' done. Worktree preserved for inspection."
|
2026-02-23 13:13:41 +00:00
|
|
|
);
|
|
|
|
|
} else {
|
feat(story-93): expose server logs to agents via get_server_logs MCP tool
- Add log_buffer module: bounded 1000-line ring buffer with push/get_recent API
- Add slog! macro: drop-in for eprintln! that also captures to ring buffer
- Replace all eprintln! calls across agents, watcher, search, chat, worktree, claude_code with slog!
- Add get_server_logs MCP tool: accepts count (1-500) and optional filter params
- 5 unit tests for log_buffer covering push/retrieve, eviction, filtering, count limits, empty buffer
- 262 tests passing, clippy clean
Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
2026-02-23 20:38:19 +00:00
|
|
|
slog!(
|
2026-02-23 13:13:41 +00:00
|
|
|
"[pipeline] Post-merge tests failed for '{story_id}'. Restarting mergemaster."
|
|
|
|
|
);
|
|
|
|
|
let context = format!(
|
|
|
|
|
"\n\n---\n## Post-Merge Test Failed\n\
|
|
|
|
|
The tests on master failed with the following output:\n{}\n\n\
|
|
|
|
|
Please investigate and resolve the failures, then call merge_agent_work again.",
|
|
|
|
|
output
|
|
|
|
|
);
|
|
|
|
|
if let Err(e) = self
|
|
|
|
|
.start_agent(&project_root, story_id, Some("mergemaster"), Some(&context))
|
|
|
|
|
.await
|
|
|
|
|
{
|
2026-02-24 13:48:25 +00:00
|
|
|
slog_error!(
|
2026-02-23 13:13:41 +00:00
|
|
|
"[pipeline] Failed to restart mergemaster for '{story_id}': {e}"
|
|
|
|
|
);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2026-02-23 15:00:10 +00:00
|
|
|
/// Internal: report that an agent has finished work on a story.
|
|
|
|
|
///
|
|
|
|
|
/// **Note:** This is no longer exposed as an MCP tool. The server now
|
|
|
|
|
/// automatically runs completion gates when an agent process exits
|
|
|
|
|
/// (see `run_server_owned_completion`). This method is retained for
|
|
|
|
|
/// backwards compatibility and testing.
|
2026-02-20 15:02:34 +00:00
|
|
|
///
|
|
|
|
|
/// - Rejects with an error if the worktree has uncommitted changes.
|
|
|
|
|
/// - Runs acceptance gates (cargo clippy + cargo nextest run / cargo test).
|
|
|
|
|
/// - Stores the `CompletionReport` on the agent record.
|
|
|
|
|
/// - Transitions status to `Completed` (gates passed) or `Failed` (gates failed).
|
|
|
|
|
/// - Emits a `Done` event so `wait_for_agent` unblocks.
|
2026-02-23 15:00:10 +00:00
|
|
|
#[allow(dead_code)]
|
2026-02-20 15:02:34 +00:00
|
|
|
pub async fn report_completion(
|
|
|
|
|
&self,
|
|
|
|
|
story_id: &str,
|
|
|
|
|
agent_name: &str,
|
|
|
|
|
summary: &str,
|
|
|
|
|
) -> Result<CompletionReport, String> {
|
|
|
|
|
let key = composite_key(story_id, agent_name);
|
|
|
|
|
|
|
|
|
|
// Verify agent exists, is Running, and grab its worktree path.
|
|
|
|
|
let worktree_path = {
|
|
|
|
|
let agents = self.agents.lock().map_err(|e| e.to_string())?;
|
|
|
|
|
let agent = agents
|
|
|
|
|
.get(&key)
|
|
|
|
|
.ok_or_else(|| format!("No agent '{agent_name}' for story '{story_id}'"))?;
|
|
|
|
|
|
|
|
|
|
if agent.status != AgentStatus::Running {
|
|
|
|
|
return Err(format!(
|
|
|
|
|
"Agent '{agent_name}' for story '{story_id}' is not running (status: {}). \
|
|
|
|
|
report_completion can only be called by a running agent.",
|
|
|
|
|
agent.status
|
|
|
|
|
));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
agent
|
|
|
|
|
.worktree_info
|
|
|
|
|
.as_ref()
|
|
|
|
|
.map(|wt| wt.path.clone())
|
|
|
|
|
.ok_or_else(|| {
|
|
|
|
|
format!(
|
|
|
|
|
"Agent '{agent_name}' for story '{story_id}' has no worktree. \
|
|
|
|
|
Cannot run acceptance gates."
|
|
|
|
|
)
|
|
|
|
|
})?
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
let path = worktree_path.clone();
|
|
|
|
|
|
|
|
|
|
// Run gate checks in a blocking thread to avoid stalling the async runtime.
|
|
|
|
|
let (gates_passed, gate_output) = tokio::task::spawn_blocking(move || {
|
|
|
|
|
// Step 1: Reject if worktree is dirty.
|
|
|
|
|
check_uncommitted_changes(&path)?;
|
|
|
|
|
// Step 2: Run clippy + tests and return (passed, output).
|
|
|
|
|
run_acceptance_gates(&path)
|
|
|
|
|
})
|
|
|
|
|
.await
|
|
|
|
|
.map_err(|e| format!("Gate check task panicked: {e}"))??;
|
|
|
|
|
|
|
|
|
|
let report = CompletionReport {
|
|
|
|
|
summary: summary.to_string(),
|
|
|
|
|
gates_passed,
|
|
|
|
|
gate_output,
|
|
|
|
|
};
|
|
|
|
|
|
2026-02-25 14:59:20 +00:00
|
|
|
// Extract data for pipeline advance, then remove the entry so
|
|
|
|
|
// completed agents never appear in list_agents.
|
2026-02-26 16:12:23 +00:00
|
|
|
let (tx, session_id, project_root_for_advance, wt_path_for_advance, merge_failure_reported_for_advance) = {
|
2026-02-20 15:02:34 +00:00
|
|
|
let mut agents = self.agents.lock().map_err(|e| e.to_string())?;
|
|
|
|
|
let agent = agents.get_mut(&key).ok_or_else(|| {
|
|
|
|
|
format!("Agent '{agent_name}' for story '{story_id}' disappeared during gate check")
|
|
|
|
|
})?;
|
|
|
|
|
agent.completion = Some(report.clone());
|
2026-02-25 14:59:20 +00:00
|
|
|
let tx = agent.tx.clone();
|
|
|
|
|
let sid = agent.session_id.clone();
|
|
|
|
|
let pr = agent.project_root.clone();
|
|
|
|
|
let wt = agent.worktree_info.as_ref().map(|w| w.path.clone());
|
2026-02-26 16:12:23 +00:00
|
|
|
let mfr = agent.merge_failure_reported;
|
2026-02-25 14:59:20 +00:00
|
|
|
agents.remove(&key);
|
2026-02-26 16:12:23 +00:00
|
|
|
(tx, sid, pr, wt, mfr)
|
2026-02-20 15:02:34 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
|
|
// Emit Done so wait_for_agent unblocks.
|
|
|
|
|
let _ = tx.send(AgentEvent::Done {
|
|
|
|
|
story_id: story_id.to_string(),
|
|
|
|
|
agent_name: agent_name.to_string(),
|
|
|
|
|
session_id,
|
|
|
|
|
});
|
|
|
|
|
|
2026-02-25 14:59:20 +00:00
|
|
|
// Notify WebSocket clients that the agent is gone.
|
|
|
|
|
Self::notify_agent_state_changed(&self.watcher_tx);
|
|
|
|
|
|
2026-02-23 13:13:41 +00:00
|
|
|
// Advance the pipeline state machine in a background task.
|
|
|
|
|
let pool_clone = Self {
|
|
|
|
|
agents: Arc::clone(&self.agents),
|
|
|
|
|
port: self.port,
|
2026-02-24 17:56:40 +00:00
|
|
|
child_killers: Arc::clone(&self.child_killers),
|
2026-02-24 23:09:13 +00:00
|
|
|
watcher_tx: self.watcher_tx.clone(),
|
2026-02-23 13:13:41 +00:00
|
|
|
};
|
|
|
|
|
let sid = story_id.to_string();
|
|
|
|
|
let aname = agent_name.to_string();
|
2026-02-25 14:59:20 +00:00
|
|
|
let report_for_advance = report.clone();
|
2026-02-23 13:13:41 +00:00
|
|
|
tokio::spawn(async move {
|
|
|
|
|
pool_clone
|
2026-02-25 14:59:20 +00:00
|
|
|
.run_pipeline_advance(
|
|
|
|
|
&sid,
|
|
|
|
|
&aname,
|
|
|
|
|
report_for_advance,
|
|
|
|
|
project_root_for_advance,
|
|
|
|
|
wt_path_for_advance,
|
2026-02-26 16:12:23 +00:00
|
|
|
merge_failure_reported_for_advance,
|
2026-02-25 14:59:20 +00:00
|
|
|
)
|
2026-02-23 13:13:41 +00:00
|
|
|
.await;
|
|
|
|
|
});
|
|
|
|
|
|
2026-02-20 15:02:34 +00:00
|
|
|
Ok(report)
|
|
|
|
|
}
|
|
|
|
|
|
2026-02-20 17:36:35 +00:00
|
|
|
/// Run the full mergemaster pipeline for a completed story:
|
|
|
|
|
///
|
|
|
|
|
/// 1. Squash-merge the story's feature branch into the current branch (master).
|
|
|
|
|
/// 2. If conflicts are found: abort the merge and report them.
|
2026-02-24 13:56:11 +00:00
|
|
|
/// 3. Quality gates run **inside the merge worktree** before master is touched.
|
2026-02-26 14:16:35 +00:00
|
|
|
/// 4. If gates pass: cherry-pick the squash commit onto master and archive the story.
|
2026-02-20 17:36:35 +00:00
|
|
|
///
|
|
|
|
|
/// Returns a `MergeReport` with full details of what happened.
|
|
|
|
|
pub async fn merge_agent_work(
|
|
|
|
|
&self,
|
|
|
|
|
project_root: &Path,
|
|
|
|
|
story_id: &str,
|
|
|
|
|
) -> Result<MergeReport, String> {
|
|
|
|
|
let branch = format!("feature/story-{story_id}");
|
|
|
|
|
let wt_path = worktree::worktree_path(project_root, story_id);
|
|
|
|
|
let root = project_root.to_path_buf();
|
|
|
|
|
let sid = story_id.to_string();
|
|
|
|
|
let br = branch.clone();
|
|
|
|
|
|
2026-02-24 13:56:11 +00:00
|
|
|
// Run blocking operations (git + cargo + quality gates) off the async runtime.
|
|
|
|
|
// Quality gates now run inside run_squash_merge before the fast-forward.
|
2026-02-23 23:22:24 +00:00
|
|
|
let merge_result =
|
2026-02-20 17:36:35 +00:00
|
|
|
tokio::task::spawn_blocking(move || run_squash_merge(&root, &br, &sid))
|
|
|
|
|
.await
|
|
|
|
|
.map_err(|e| format!("Merge task panicked: {e}"))??;
|
|
|
|
|
|
2026-02-23 23:22:24 +00:00
|
|
|
if !merge_result.success {
|
2026-02-20 17:36:35 +00:00
|
|
|
return Ok(MergeReport {
|
|
|
|
|
story_id: story_id.to_string(),
|
|
|
|
|
success: false,
|
2026-02-23 23:22:24 +00:00
|
|
|
had_conflicts: merge_result.had_conflicts,
|
|
|
|
|
conflicts_resolved: merge_result.conflicts_resolved,
|
|
|
|
|
conflict_details: merge_result.conflict_details,
|
2026-02-24 13:56:11 +00:00
|
|
|
gates_passed: merge_result.gates_passed,
|
2026-02-23 23:22:24 +00:00
|
|
|
gate_output: merge_result.output,
|
2026-02-20 17:36:35 +00:00
|
|
|
worktree_cleaned_up: false,
|
|
|
|
|
story_archived: false,
|
|
|
|
|
});
|
|
|
|
|
}
|
|
|
|
|
|
2026-02-24 13:56:11 +00:00
|
|
|
// Merge + gates both passed — archive the story and clean up agent entries.
|
2026-02-20 17:36:35 +00:00
|
|
|
let story_archived = move_story_to_archived(project_root, story_id).is_ok();
|
2026-02-24 13:20:59 +00:00
|
|
|
if story_archived {
|
|
|
|
|
self.remove_agents_for_story(story_id);
|
|
|
|
|
}
|
2026-02-20 17:36:35 +00:00
|
|
|
|
|
|
|
|
// Clean up the worktree if it exists.
|
|
|
|
|
let worktree_cleaned_up = if wt_path.exists() {
|
|
|
|
|
let config = crate::config::ProjectConfig::load(project_root)
|
|
|
|
|
.unwrap_or_default();
|
|
|
|
|
worktree::remove_worktree_by_story_id(project_root, story_id, &config)
|
|
|
|
|
.await
|
|
|
|
|
.is_ok()
|
|
|
|
|
} else {
|
|
|
|
|
false
|
|
|
|
|
};
|
|
|
|
|
|
2026-02-26 18:24:27 +00:00
|
|
|
// Mergemaster slot is now free — trigger auto-assign so remaining
|
|
|
|
|
// items in 4_merge/ (or other stages) get picked up. The normal
|
|
|
|
|
// server-owned completion handler won't run because we already
|
|
|
|
|
// removed the agent entry above.
|
|
|
|
|
self.auto_assign_available_work(project_root).await;
|
|
|
|
|
|
2026-02-20 17:36:35 +00:00
|
|
|
Ok(MergeReport {
|
|
|
|
|
story_id: story_id.to_string(),
|
|
|
|
|
success: true,
|
2026-02-23 23:22:24 +00:00
|
|
|
had_conflicts: merge_result.had_conflicts,
|
|
|
|
|
conflicts_resolved: merge_result.conflicts_resolved,
|
|
|
|
|
conflict_details: merge_result.conflict_details,
|
2026-02-20 17:36:35 +00:00
|
|
|
gates_passed: true,
|
2026-02-24 13:56:11 +00:00
|
|
|
gate_output: merge_result.output,
|
2026-02-20 17:36:35 +00:00
|
|
|
worktree_cleaned_up,
|
|
|
|
|
story_archived,
|
|
|
|
|
})
|
|
|
|
|
}
|
|
|
|
|
|
2026-02-20 14:09:52 +00:00
|
|
|
/// Return the port this server is running on.
|
|
|
|
|
pub fn port(&self) -> u16 {
|
|
|
|
|
self.port
|
|
|
|
|
}
|
|
|
|
|
|
2026-02-19 17:58:53 +00:00
|
|
|
/// Get project root helper.
|
|
|
|
|
pub fn get_project_root(
|
|
|
|
|
&self,
|
|
|
|
|
state: &crate::state::SessionState,
|
|
|
|
|
) -> Result<PathBuf, String> {
|
|
|
|
|
state.get_project_root()
|
2026-02-19 15:25:22 +00:00
|
|
|
}
|
2026-02-20 13:16:04 +00:00
|
|
|
|
2026-02-23 20:52:06 +00:00
|
|
|
/// Get the log session ID and project root for an agent, if available.
|
|
|
|
|
///
|
|
|
|
|
/// Used by MCP tools to find the persistent log file for a completed agent.
|
|
|
|
|
pub fn get_log_info(
|
|
|
|
|
&self,
|
|
|
|
|
story_id: &str,
|
|
|
|
|
agent_name: &str,
|
|
|
|
|
) -> Option<(String, PathBuf)> {
|
|
|
|
|
let key = composite_key(story_id, agent_name);
|
|
|
|
|
let agents = self.agents.lock().ok()?;
|
|
|
|
|
let agent = agents.get(&key)?;
|
|
|
|
|
let session_id = agent.log_session_id.clone()?;
|
|
|
|
|
let project_root = agent.project_root.clone()?;
|
|
|
|
|
Some((session_id, project_root))
|
|
|
|
|
}
|
|
|
|
|
|
2026-02-26 16:12:23 +00:00
|
|
|
/// Record that the mergemaster agent for `story_id` explicitly reported a
|
|
|
|
|
/// merge failure via the `report_merge_failure` MCP tool.
|
|
|
|
|
///
|
|
|
|
|
/// Sets `merge_failure_reported = true` on the active mergemaster agent so
|
|
|
|
|
/// that `run_pipeline_advance` can block advancement to `5_done/` even when
|
|
|
|
|
/// the server-owned gate check returns `gates_passed=true` (those gates run
|
|
|
|
|
/// in the feature-branch worktree, not on master).
|
|
|
|
|
pub fn set_merge_failure_reported(&self, story_id: &str) {
|
|
|
|
|
match self.agents.lock() {
|
|
|
|
|
Ok(mut lock) => {
|
|
|
|
|
let found = lock.iter_mut().find(|(key, agent)| {
|
|
|
|
|
let key_story_id = key
|
|
|
|
|
.rsplit_once(':')
|
|
|
|
|
.map(|(sid, _)| sid)
|
|
|
|
|
.unwrap_or(key.as_str());
|
|
|
|
|
key_story_id == story_id
|
|
|
|
|
&& pipeline_stage(&agent.agent_name) == PipelineStage::Mergemaster
|
|
|
|
|
});
|
|
|
|
|
match found {
|
|
|
|
|
Some((_, agent)) => {
|
|
|
|
|
agent.merge_failure_reported = true;
|
|
|
|
|
slog!(
|
|
|
|
|
"[pipeline] Merge failure flag set for '{story_id}:{}'",
|
|
|
|
|
agent.agent_name
|
|
|
|
|
);
|
|
|
|
|
}
|
|
|
|
|
None => {
|
|
|
|
|
slog_warn!(
|
|
|
|
|
"[pipeline] set_merge_failure_reported: no running mergemaster found \
|
|
|
|
|
for story '{story_id}' — flag not set"
|
|
|
|
|
);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
Err(e) => {
|
|
|
|
|
slog_error!(
|
|
|
|
|
"[pipeline] set_merge_failure_reported: could not lock agents: {e}"
|
|
|
|
|
);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2026-02-20 13:16:04 +00:00
|
|
|
/// Test helper: inject a pre-built agent entry so unit tests can exercise
|
|
|
|
|
/// wait/subscribe logic without spawning a real process.
|
|
|
|
|
#[cfg(test)]
|
|
|
|
|
pub fn inject_test_agent(
|
|
|
|
|
&self,
|
|
|
|
|
story_id: &str,
|
|
|
|
|
agent_name: &str,
|
|
|
|
|
status: AgentStatus,
|
|
|
|
|
) -> broadcast::Sender<AgentEvent> {
|
|
|
|
|
let (tx, _) = broadcast::channel::<AgentEvent>(64);
|
|
|
|
|
let key = composite_key(story_id, agent_name);
|
|
|
|
|
let mut agents = self.agents.lock().unwrap();
|
|
|
|
|
agents.insert(
|
|
|
|
|
key,
|
|
|
|
|
StoryAgent {
|
|
|
|
|
agent_name: agent_name.to_string(),
|
|
|
|
|
status,
|
|
|
|
|
worktree_info: None,
|
|
|
|
|
session_id: None,
|
|
|
|
|
tx: tx.clone(),
|
|
|
|
|
task_handle: None,
|
|
|
|
|
event_log: Arc::new(Mutex::new(Vec::new())),
|
2026-02-20 15:02:34 +00:00
|
|
|
completion: None,
|
2026-02-23 13:13:41 +00:00
|
|
|
project_root: None,
|
2026-02-23 20:52:06 +00:00
|
|
|
log_session_id: None,
|
2026-02-26 16:12:23 +00:00
|
|
|
merge_failure_reported: false,
|
2026-02-20 15:02:34 +00:00
|
|
|
},
|
|
|
|
|
);
|
|
|
|
|
tx
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Test helper: inject an agent with a specific worktree path for testing
|
|
|
|
|
/// gate-related logic.
|
|
|
|
|
#[cfg(test)]
|
|
|
|
|
pub fn inject_test_agent_with_path(
|
|
|
|
|
&self,
|
|
|
|
|
story_id: &str,
|
|
|
|
|
agent_name: &str,
|
|
|
|
|
status: AgentStatus,
|
|
|
|
|
worktree_path: PathBuf,
|
|
|
|
|
) -> broadcast::Sender<AgentEvent> {
|
|
|
|
|
let (tx, _) = broadcast::channel::<AgentEvent>(64);
|
|
|
|
|
let key = composite_key(story_id, agent_name);
|
|
|
|
|
let mut agents = self.agents.lock().unwrap();
|
|
|
|
|
agents.insert(
|
|
|
|
|
key,
|
|
|
|
|
StoryAgent {
|
|
|
|
|
agent_name: agent_name.to_string(),
|
|
|
|
|
status,
|
|
|
|
|
worktree_info: Some(WorktreeInfo {
|
|
|
|
|
path: worktree_path,
|
|
|
|
|
branch: format!("feature/story-{story_id}"),
|
|
|
|
|
base_branch: "master".to_string(),
|
|
|
|
|
}),
|
|
|
|
|
session_id: None,
|
|
|
|
|
tx: tx.clone(),
|
|
|
|
|
task_handle: None,
|
|
|
|
|
event_log: Arc::new(Mutex::new(Vec::new())),
|
|
|
|
|
completion: None,
|
2026-02-23 13:13:41 +00:00
|
|
|
project_root: None,
|
2026-02-23 20:52:06 +00:00
|
|
|
log_session_id: None,
|
2026-02-26 16:12:23 +00:00
|
|
|
merge_failure_reported: false,
|
2026-02-23 13:13:41 +00:00
|
|
|
},
|
|
|
|
|
);
|
|
|
|
|
tx
|
|
|
|
|
}
|
|
|
|
|
|
2026-02-23 18:20:24 +00:00
|
|
|
/// Automatically assign free agents to stories waiting in the active pipeline stages.
|
|
|
|
|
///
|
|
|
|
|
/// Scans `work/2_current/`, `work/3_qa/`, and `work/4_merge/` for items that have no
|
|
|
|
|
/// active agent and assigns the first free agent of the appropriate role. Items in
|
|
|
|
|
/// `work/1_upcoming/` are never auto-started.
|
|
|
|
|
///
|
|
|
|
|
/// Respects the configured agent roster: the maximum number of concurrently active agents
|
|
|
|
|
/// per role is bounded by the count of agents of that role defined in `project.toml`.
|
|
|
|
|
pub async fn auto_assign_available_work(&self, project_root: &Path) {
|
|
|
|
|
let config = match ProjectConfig::load(project_root) {
|
|
|
|
|
Ok(c) => c,
|
|
|
|
|
Err(e) => {
|
2026-02-24 13:48:25 +00:00
|
|
|
slog_warn!("[auto-assign] Failed to load project config: {e}");
|
2026-02-23 18:20:24 +00:00
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
// Process each active pipeline stage in order.
|
|
|
|
|
let stages: [(&str, PipelineStage); 3] = [
|
|
|
|
|
("2_current", PipelineStage::Coder),
|
|
|
|
|
("3_qa", PipelineStage::Qa),
|
|
|
|
|
("4_merge", PipelineStage::Mergemaster),
|
|
|
|
|
];
|
|
|
|
|
|
|
|
|
|
for (stage_dir, stage) in &stages {
|
|
|
|
|
let items = scan_stage_items(project_root, stage_dir);
|
|
|
|
|
if items.is_empty() {
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
for story_id in &items {
|
|
|
|
|
// Re-acquire the lock on each iteration to see state changes
|
|
|
|
|
// from previous start_agent calls in the same pass.
|
|
|
|
|
let (already_assigned, free_agent) = {
|
|
|
|
|
let agents = match self.agents.lock() {
|
|
|
|
|
Ok(a) => a,
|
|
|
|
|
Err(e) => {
|
2026-02-24 13:48:25 +00:00
|
|
|
slog_error!("[auto-assign] Failed to lock agents: {e}");
|
2026-02-23 18:20:24 +00:00
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
};
|
2026-02-24 15:50:34 +00:00
|
|
|
let assigned = is_story_assigned_for_stage(&config, &agents, story_id, stage);
|
2026-02-23 18:20:24 +00:00
|
|
|
let free = if assigned {
|
|
|
|
|
None
|
|
|
|
|
} else {
|
|
|
|
|
find_free_agent_for_stage(&config, &agents, stage)
|
|
|
|
|
.map(|s| s.to_string())
|
|
|
|
|
};
|
|
|
|
|
(assigned, free)
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
if already_assigned {
|
|
|
|
|
// Story already has an active agent — skip silently.
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
match free_agent {
|
|
|
|
|
Some(agent_name) => {
|
feat(story-93): expose server logs to agents via get_server_logs MCP tool
- Add log_buffer module: bounded 1000-line ring buffer with push/get_recent API
- Add slog! macro: drop-in for eprintln! that also captures to ring buffer
- Replace all eprintln! calls across agents, watcher, search, chat, worktree, claude_code with slog!
- Add get_server_logs MCP tool: accepts count (1-500) and optional filter params
- 5 unit tests for log_buffer covering push/retrieve, eviction, filtering, count limits, empty buffer
- 262 tests passing, clippy clean
Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
2026-02-23 20:38:19 +00:00
|
|
|
slog!(
|
2026-02-23 18:20:24 +00:00
|
|
|
"[auto-assign] Assigning '{agent_name}' to '{story_id}' in {stage_dir}/"
|
|
|
|
|
);
|
|
|
|
|
if let Err(e) = self
|
|
|
|
|
.start_agent(project_root, story_id, Some(&agent_name), None)
|
|
|
|
|
.await
|
|
|
|
|
{
|
feat(story-93): expose server logs to agents via get_server_logs MCP tool
- Add log_buffer module: bounded 1000-line ring buffer with push/get_recent API
- Add slog! macro: drop-in for eprintln! that also captures to ring buffer
- Replace all eprintln! calls across agents, watcher, search, chat, worktree, claude_code with slog!
- Add get_server_logs MCP tool: accepts count (1-500) and optional filter params
- 5 unit tests for log_buffer covering push/retrieve, eviction, filtering, count limits, empty buffer
- 262 tests passing, clippy clean
Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
2026-02-23 20:38:19 +00:00
|
|
|
slog!(
|
2026-02-23 18:20:24 +00:00
|
|
|
"[auto-assign] Failed to start '{agent_name}' for '{story_id}': {e}"
|
|
|
|
|
);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
None => {
|
|
|
|
|
// No free agents of this type — stop scanning this stage.
|
feat(story-93): expose server logs to agents via get_server_logs MCP tool
- Add log_buffer module: bounded 1000-line ring buffer with push/get_recent API
- Add slog! macro: drop-in for eprintln! that also captures to ring buffer
- Replace all eprintln! calls across agents, watcher, search, chat, worktree, claude_code with slog!
- Add get_server_logs MCP tool: accepts count (1-500) and optional filter params
- 5 unit tests for log_buffer covering push/retrieve, eviction, filtering, count limits, empty buffer
- 262 tests passing, clippy clean
Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
2026-02-23 20:38:19 +00:00
|
|
|
slog!(
|
2026-02-23 18:20:24 +00:00
|
|
|
"[auto-assign] All {:?} agents busy; remaining items in {stage_dir}/ will wait.",
|
|
|
|
|
stage
|
|
|
|
|
);
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2026-02-23 20:38:17 +00:00
|
|
|
/// Reconcile stories whose agent work was committed while the server was offline.
|
|
|
|
|
///
|
|
|
|
|
/// On server startup the in-memory agent pool is empty, so any story that an agent
|
|
|
|
|
/// completed during a previous session is stuck: the worktree has committed work but
|
|
|
|
|
/// the pipeline never advanced. This method detects those stories, re-runs the
|
|
|
|
|
/// acceptance gates, and advances the pipeline stage so that `auto_assign_available_work`
|
|
|
|
|
/// (called immediately after) picks up the right next-stage agents.
|
|
|
|
|
///
|
|
|
|
|
/// Algorithm:
|
|
|
|
|
/// 1. List all worktree directories under `{project_root}/.story_kit/worktrees/`.
|
|
|
|
|
/// 2. For each worktree, check whether its feature branch has commits ahead of the
|
|
|
|
|
/// base branch (`master` / `main`).
|
|
|
|
|
/// 3. If committed work is found AND the story is in `2_current/` or `3_qa/`:
|
|
|
|
|
/// - Run acceptance gates (uncommitted-change check + clippy + tests).
|
|
|
|
|
/// - On pass + `2_current/`: move the story to `3_qa/`.
|
|
|
|
|
/// - On pass + `3_qa/`: run the coverage gate; if that also passes move to `4_merge/`.
|
|
|
|
|
/// - On failure: leave the story where it is so `auto_assign_available_work` can
|
|
|
|
|
/// start a fresh agent to retry.
|
|
|
|
|
/// 4. Stories in `4_merge/` are left for `auto_assign_available_work` to handle via a
|
|
|
|
|
/// fresh mergemaster (squash-merge must be re-executed by the mergemaster agent).
|
2026-02-23 22:50:57 +00:00
|
|
|
pub async fn reconcile_on_startup(
|
|
|
|
|
&self,
|
|
|
|
|
project_root: &Path,
|
|
|
|
|
progress_tx: &broadcast::Sender<ReconciliationEvent>,
|
|
|
|
|
) {
|
2026-02-23 20:38:17 +00:00
|
|
|
let worktrees = match worktree::list_worktrees(project_root) {
|
|
|
|
|
Ok(wt) => wt,
|
|
|
|
|
Err(e) => {
|
|
|
|
|
eprintln!("[startup:reconcile] Failed to list worktrees: {e}");
|
2026-02-23 22:50:57 +00:00
|
|
|
let _ = progress_tx.send(ReconciliationEvent {
|
|
|
|
|
story_id: String::new(),
|
|
|
|
|
status: "done".to_string(),
|
|
|
|
|
message: format!("Reconciliation failed: {e}"),
|
|
|
|
|
});
|
2026-02-23 20:38:17 +00:00
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
for wt_entry in &worktrees {
|
|
|
|
|
let story_id = &wt_entry.story_id;
|
|
|
|
|
let wt_path = wt_entry.path.clone();
|
|
|
|
|
|
|
|
|
|
// Determine which active stage the story is in.
|
|
|
|
|
let stage_dir = match find_active_story_stage(project_root, story_id) {
|
|
|
|
|
Some(s) => s,
|
|
|
|
|
None => continue, // Not in any active stage (upcoming/archived or unknown).
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
// 4_merge/ is left for auto_assign to handle with a fresh mergemaster.
|
|
|
|
|
if stage_dir == "4_merge" {
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
2026-02-23 22:50:57 +00:00
|
|
|
let _ = progress_tx.send(ReconciliationEvent {
|
|
|
|
|
story_id: story_id.clone(),
|
|
|
|
|
status: "checking".to_string(),
|
|
|
|
|
message: format!("Checking for committed work in {stage_dir}/"),
|
|
|
|
|
});
|
|
|
|
|
|
2026-02-23 20:38:17 +00:00
|
|
|
// Check whether the worktree has commits ahead of the base branch.
|
|
|
|
|
let wt_path_for_check = wt_path.clone();
|
|
|
|
|
let has_work = tokio::task::spawn_blocking(move || {
|
|
|
|
|
worktree_has_committed_work(&wt_path_for_check)
|
|
|
|
|
})
|
|
|
|
|
.await
|
|
|
|
|
.unwrap_or(false);
|
|
|
|
|
|
|
|
|
|
if !has_work {
|
|
|
|
|
eprintln!(
|
|
|
|
|
"[startup:reconcile] No committed work for '{story_id}' in {stage_dir}/; skipping."
|
|
|
|
|
);
|
2026-02-23 22:50:57 +00:00
|
|
|
let _ = progress_tx.send(ReconciliationEvent {
|
|
|
|
|
story_id: story_id.clone(),
|
|
|
|
|
status: "skipped".to_string(),
|
|
|
|
|
message: "No committed work found; skipping.".to_string(),
|
|
|
|
|
});
|
2026-02-23 20:38:17 +00:00
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
eprintln!(
|
|
|
|
|
"[startup:reconcile] Found committed work for '{story_id}' in {stage_dir}/. Running acceptance gates."
|
|
|
|
|
);
|
2026-02-23 22:50:57 +00:00
|
|
|
let _ = progress_tx.send(ReconciliationEvent {
|
|
|
|
|
story_id: story_id.clone(),
|
|
|
|
|
status: "gates_running".to_string(),
|
|
|
|
|
message: "Running acceptance gates…".to_string(),
|
|
|
|
|
});
|
2026-02-23 20:38:17 +00:00
|
|
|
|
|
|
|
|
// Run acceptance gates on the worktree.
|
|
|
|
|
let wt_path_for_gates = wt_path.clone();
|
|
|
|
|
let gates_result = tokio::task::spawn_blocking(move || {
|
|
|
|
|
check_uncommitted_changes(&wt_path_for_gates)?;
|
|
|
|
|
run_acceptance_gates(&wt_path_for_gates)
|
|
|
|
|
})
|
|
|
|
|
.await;
|
|
|
|
|
|
|
|
|
|
let (gates_passed, gate_output) = match gates_result {
|
|
|
|
|
Ok(Ok(pair)) => pair,
|
|
|
|
|
Ok(Err(e)) => {
|
|
|
|
|
eprintln!("[startup:reconcile] Gate check error for '{story_id}': {e}");
|
2026-02-23 22:50:57 +00:00
|
|
|
let _ = progress_tx.send(ReconciliationEvent {
|
|
|
|
|
story_id: story_id.clone(),
|
|
|
|
|
status: "failed".to_string(),
|
|
|
|
|
message: format!("Gate error: {e}"),
|
|
|
|
|
});
|
2026-02-23 20:38:17 +00:00
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
Err(e) => {
|
|
|
|
|
eprintln!(
|
|
|
|
|
"[startup:reconcile] Gate check task panicked for '{story_id}': {e}"
|
|
|
|
|
);
|
2026-02-23 22:50:57 +00:00
|
|
|
let _ = progress_tx.send(ReconciliationEvent {
|
|
|
|
|
story_id: story_id.clone(),
|
|
|
|
|
status: "failed".to_string(),
|
|
|
|
|
message: format!("Gate task panicked: {e}"),
|
|
|
|
|
});
|
2026-02-23 20:38:17 +00:00
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
if !gates_passed {
|
|
|
|
|
eprintln!(
|
|
|
|
|
"[startup:reconcile] Gates failed for '{story_id}': {gate_output}\n\
|
|
|
|
|
Leaving in {stage_dir}/ for auto-assign to restart the agent."
|
|
|
|
|
);
|
2026-02-23 22:50:57 +00:00
|
|
|
let _ = progress_tx.send(ReconciliationEvent {
|
|
|
|
|
story_id: story_id.clone(),
|
|
|
|
|
status: "failed".to_string(),
|
|
|
|
|
message: "Gates failed; will be retried by auto-assign.".to_string(),
|
|
|
|
|
});
|
2026-02-23 20:38:17 +00:00
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
eprintln!(
|
|
|
|
|
"[startup:reconcile] Gates passed for '{story_id}' (stage: {stage_dir}/)."
|
|
|
|
|
);
|
|
|
|
|
|
|
|
|
|
if stage_dir == "2_current" {
|
|
|
|
|
// Coder stage → advance to QA.
|
|
|
|
|
if let Err(e) = move_story_to_qa(project_root, story_id) {
|
|
|
|
|
eprintln!("[startup:reconcile] Failed to move '{story_id}' to 3_qa/: {e}");
|
2026-02-23 22:50:57 +00:00
|
|
|
let _ = progress_tx.send(ReconciliationEvent {
|
|
|
|
|
story_id: story_id.clone(),
|
|
|
|
|
status: "failed".to_string(),
|
|
|
|
|
message: format!("Failed to advance to QA: {e}"),
|
|
|
|
|
});
|
2026-02-23 20:38:17 +00:00
|
|
|
} else {
|
|
|
|
|
eprintln!("[startup:reconcile] Moved '{story_id}' → 3_qa/.");
|
2026-02-23 22:50:57 +00:00
|
|
|
let _ = progress_tx.send(ReconciliationEvent {
|
|
|
|
|
story_id: story_id.clone(),
|
|
|
|
|
status: "advanced".to_string(),
|
|
|
|
|
message: "Gates passed — moved to QA.".to_string(),
|
|
|
|
|
});
|
2026-02-23 20:38:17 +00:00
|
|
|
}
|
|
|
|
|
} else if stage_dir == "3_qa" {
|
|
|
|
|
// QA stage → run coverage gate before advancing to merge.
|
|
|
|
|
let wt_path_for_cov = wt_path.clone();
|
|
|
|
|
let coverage_result =
|
|
|
|
|
tokio::task::spawn_blocking(move || run_coverage_gate(&wt_path_for_cov))
|
|
|
|
|
.await;
|
|
|
|
|
|
|
|
|
|
let (coverage_passed, coverage_output) = match coverage_result {
|
|
|
|
|
Ok(Ok(pair)) => pair,
|
|
|
|
|
Ok(Err(e)) => {
|
|
|
|
|
eprintln!(
|
|
|
|
|
"[startup:reconcile] Coverage gate error for '{story_id}': {e}"
|
|
|
|
|
);
|
2026-02-23 22:50:57 +00:00
|
|
|
let _ = progress_tx.send(ReconciliationEvent {
|
|
|
|
|
story_id: story_id.clone(),
|
|
|
|
|
status: "failed".to_string(),
|
|
|
|
|
message: format!("Coverage gate error: {e}"),
|
|
|
|
|
});
|
2026-02-23 20:38:17 +00:00
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
Err(e) => {
|
|
|
|
|
eprintln!(
|
|
|
|
|
"[startup:reconcile] Coverage gate panicked for '{story_id}': {e}"
|
|
|
|
|
);
|
2026-02-23 22:50:57 +00:00
|
|
|
let _ = progress_tx.send(ReconciliationEvent {
|
|
|
|
|
story_id: story_id.clone(),
|
|
|
|
|
status: "failed".to_string(),
|
|
|
|
|
message: format!("Coverage gate panicked: {e}"),
|
|
|
|
|
});
|
2026-02-23 20:38:17 +00:00
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
if coverage_passed {
|
|
|
|
|
if let Err(e) = move_story_to_merge(project_root, story_id) {
|
|
|
|
|
eprintln!(
|
|
|
|
|
"[startup:reconcile] Failed to move '{story_id}' to 4_merge/: {e}"
|
|
|
|
|
);
|
2026-02-23 22:50:57 +00:00
|
|
|
let _ = progress_tx.send(ReconciliationEvent {
|
|
|
|
|
story_id: story_id.clone(),
|
|
|
|
|
status: "failed".to_string(),
|
|
|
|
|
message: format!("Failed to advance to merge: {e}"),
|
|
|
|
|
});
|
2026-02-23 20:38:17 +00:00
|
|
|
} else {
|
|
|
|
|
eprintln!("[startup:reconcile] Moved '{story_id}' → 4_merge/.");
|
2026-02-23 22:50:57 +00:00
|
|
|
let _ = progress_tx.send(ReconciliationEvent {
|
|
|
|
|
story_id: story_id.clone(),
|
|
|
|
|
status: "advanced".to_string(),
|
|
|
|
|
message: "Gates passed — moved to merge.".to_string(),
|
|
|
|
|
});
|
2026-02-23 20:38:17 +00:00
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
eprintln!(
|
|
|
|
|
"[startup:reconcile] Coverage gate failed for '{story_id}': {coverage_output}\n\
|
|
|
|
|
Leaving in 3_qa/ for auto-assign to restart the QA agent."
|
|
|
|
|
);
|
2026-02-23 22:50:57 +00:00
|
|
|
let _ = progress_tx.send(ReconciliationEvent {
|
|
|
|
|
story_id: story_id.clone(),
|
|
|
|
|
status: "failed".to_string(),
|
|
|
|
|
message: "Coverage gate failed; will be retried.".to_string(),
|
|
|
|
|
});
|
2026-02-23 20:38:17 +00:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
2026-02-23 22:50:57 +00:00
|
|
|
|
|
|
|
|
// Signal that reconciliation is complete.
|
|
|
|
|
let _ = progress_tx.send(ReconciliationEvent {
|
|
|
|
|
story_id: String::new(),
|
|
|
|
|
status: "done".to_string(),
|
|
|
|
|
message: "Startup reconciliation complete.".to_string(),
|
|
|
|
|
});
|
2026-02-23 20:38:17 +00:00
|
|
|
}
|
|
|
|
|
|
2026-02-23 13:13:41 +00:00
|
|
|
/// Test helper: inject an agent with a completion report and project_root
|
|
|
|
|
/// for testing pipeline advance logic without spawning real agents.
|
|
|
|
|
#[cfg(test)]
|
|
|
|
|
pub fn inject_test_agent_with_completion(
|
|
|
|
|
&self,
|
|
|
|
|
story_id: &str,
|
|
|
|
|
agent_name: &str,
|
|
|
|
|
status: AgentStatus,
|
|
|
|
|
project_root: PathBuf,
|
|
|
|
|
completion: CompletionReport,
|
|
|
|
|
) -> broadcast::Sender<AgentEvent> {
|
|
|
|
|
let (tx, _) = broadcast::channel::<AgentEvent>(64);
|
|
|
|
|
let key = composite_key(story_id, agent_name);
|
|
|
|
|
let mut agents = self.agents.lock().unwrap();
|
|
|
|
|
agents.insert(
|
|
|
|
|
key,
|
|
|
|
|
StoryAgent {
|
|
|
|
|
agent_name: agent_name.to_string(),
|
|
|
|
|
status,
|
|
|
|
|
worktree_info: None,
|
|
|
|
|
session_id: None,
|
|
|
|
|
tx: tx.clone(),
|
|
|
|
|
task_handle: None,
|
|
|
|
|
event_log: Arc::new(Mutex::new(Vec::new())),
|
|
|
|
|
completion: Some(completion),
|
|
|
|
|
project_root: Some(project_root),
|
2026-02-23 20:52:06 +00:00
|
|
|
log_session_id: None,
|
2026-02-26 16:12:23 +00:00
|
|
|
merge_failure_reported: false,
|
2026-02-20 13:16:04 +00:00
|
|
|
},
|
|
|
|
|
);
|
|
|
|
|
tx
|
|
|
|
|
}
|
2026-02-24 13:13:16 +00:00
|
|
|
|
|
|
|
|
/// Inject a Running agent with a pre-built (possibly finished) task handle.
|
|
|
|
|
/// Used by watchdog tests to simulate an orphaned agent.
|
|
|
|
|
#[cfg(test)]
|
|
|
|
|
pub fn inject_test_agent_with_handle(
|
|
|
|
|
&self,
|
|
|
|
|
story_id: &str,
|
|
|
|
|
agent_name: &str,
|
|
|
|
|
status: AgentStatus,
|
|
|
|
|
task_handle: tokio::task::JoinHandle<()>,
|
|
|
|
|
) -> broadcast::Sender<AgentEvent> {
|
|
|
|
|
let (tx, _) = broadcast::channel::<AgentEvent>(64);
|
|
|
|
|
let key = composite_key(story_id, agent_name);
|
|
|
|
|
let mut agents = self.agents.lock().unwrap();
|
|
|
|
|
agents.insert(
|
|
|
|
|
key,
|
|
|
|
|
StoryAgent {
|
|
|
|
|
agent_name: agent_name.to_string(),
|
|
|
|
|
status,
|
|
|
|
|
worktree_info: None,
|
|
|
|
|
session_id: None,
|
|
|
|
|
tx: tx.clone(),
|
|
|
|
|
task_handle: Some(task_handle),
|
|
|
|
|
event_log: Arc::new(Mutex::new(Vec::new())),
|
|
|
|
|
completion: None,
|
|
|
|
|
project_root: None,
|
|
|
|
|
log_session_id: None,
|
2026-02-26 16:12:23 +00:00
|
|
|
merge_failure_reported: false,
|
2026-02-24 13:13:16 +00:00
|
|
|
},
|
|
|
|
|
);
|
|
|
|
|
tx
|
|
|
|
|
}
|
|
|
|
|
|
2026-02-24 17:56:40 +00:00
|
|
|
/// Test helper: inject a child killer into the registry.
|
|
|
|
|
#[cfg(test)]
|
|
|
|
|
pub fn inject_child_killer(&self, key: &str, killer: Box<dyn ChildKiller + Send + Sync>) {
|
|
|
|
|
let mut killers = self.child_killers.lock().unwrap();
|
|
|
|
|
killers.insert(key.to_string(), killer);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Test helper: return the number of registered child killers.
|
|
|
|
|
#[cfg(test)]
|
|
|
|
|
pub fn child_killer_count(&self) -> usize {
|
|
|
|
|
self.child_killers.lock().unwrap().len()
|
|
|
|
|
}
|
|
|
|
|
|
2026-02-24 13:13:16 +00:00
|
|
|
/// Run a single watchdog pass synchronously (test helper).
|
|
|
|
|
#[cfg(test)]
|
|
|
|
|
pub fn run_watchdog_once(&self) {
|
|
|
|
|
check_orphaned_agents(&self.agents);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Spawn a background watchdog task that periodically checks for Running agents
|
|
|
|
|
/// whose underlying task has already finished (orphaned entries). Any such agent
|
|
|
|
|
/// is marked Failed and an Error event is emitted so that `wait_for_agent` unblocks.
|
|
|
|
|
///
|
|
|
|
|
/// The watchdog runs every 30 seconds. It is a safety net for edge cases where the
|
|
|
|
|
/// PTY read loop exits without updating the agent status (e.g. a panic in the
|
|
|
|
|
/// spawn_blocking task, or an external SIGKILL that closes the PTY fd immediately).
|
2026-02-24 17:28:45 +00:00
|
|
|
///
|
|
|
|
|
/// When orphaned agents are detected and a `project_root` is provided, auto-assign
|
|
|
|
|
/// is triggered so that free agents can pick up unassigned work.
|
|
|
|
|
pub fn spawn_watchdog(pool: Arc<AgentPool>, project_root: Option<PathBuf>) {
|
2026-02-24 13:13:16 +00:00
|
|
|
tokio::spawn(async move {
|
|
|
|
|
let mut interval =
|
|
|
|
|
tokio::time::interval(std::time::Duration::from_secs(30));
|
|
|
|
|
loop {
|
|
|
|
|
interval.tick().await;
|
2026-02-24 17:28:45 +00:00
|
|
|
let found = check_orphaned_agents(&pool.agents);
|
|
|
|
|
if found > 0
|
|
|
|
|
&& let Some(ref root) = project_root
|
|
|
|
|
{
|
|
|
|
|
slog!(
|
|
|
|
|
"[watchdog] {found} orphaned agent(s) detected; triggering auto-assign."
|
|
|
|
|
);
|
|
|
|
|
pool.auto_assign_available_work(root).await;
|
|
|
|
|
}
|
2026-02-24 13:13:16 +00:00
|
|
|
}
|
|
|
|
|
});
|
2026-02-24 13:24:39 +00:00
|
|
|
}
|
|
|
|
|
|
2026-02-24 13:20:59 +00:00
|
|
|
/// Remove all agent entries for a given story_id from the pool.
|
|
|
|
|
///
|
|
|
|
|
/// Called when a story is archived so that stale entries don't accumulate.
|
|
|
|
|
/// Returns the number of entries removed.
|
|
|
|
|
pub fn remove_agents_for_story(&self, story_id: &str) -> usize {
|
|
|
|
|
let mut agents = match self.agents.lock() {
|
|
|
|
|
Ok(a) => a,
|
|
|
|
|
Err(e) => {
|
2026-02-24 13:48:25 +00:00
|
|
|
slog_error!("[agents] Failed to lock pool for cleanup of '{story_id}': {e}");
|
2026-02-24 13:20:59 +00:00
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
};
|
|
|
|
|
let prefix = format!("{story_id}:");
|
|
|
|
|
let keys_to_remove: Vec<String> = agents
|
|
|
|
|
.keys()
|
|
|
|
|
.filter(|k| k.starts_with(&prefix))
|
|
|
|
|
.cloned()
|
|
|
|
|
.collect();
|
|
|
|
|
let count = keys_to_remove.len();
|
|
|
|
|
for key in &keys_to_remove {
|
|
|
|
|
agents.remove(key);
|
|
|
|
|
}
|
|
|
|
|
if count > 0 {
|
|
|
|
|
slog!("[agents] Removed {count} agent entries for archived story '{story_id}'");
|
|
|
|
|
}
|
|
|
|
|
count
|
|
|
|
|
}
|
2026-02-19 15:25:22 +00:00
|
|
|
}
|
|
|
|
|
|
2026-02-23 20:38:17 +00:00
|
|
|
/// Return the active pipeline stage directory name for `story_id`, or `None` if the
|
|
|
|
|
/// story is not in any active stage (`2_current/`, `3_qa/`, `4_merge/`).
|
|
|
|
|
fn find_active_story_stage(project_root: &Path, story_id: &str) -> Option<&'static str> {
|
|
|
|
|
const STAGES: [&str; 3] = ["2_current", "3_qa", "4_merge"];
|
|
|
|
|
for stage in &STAGES {
|
|
|
|
|
let path = project_root
|
|
|
|
|
.join(".story_kit")
|
|
|
|
|
.join("work")
|
|
|
|
|
.join(stage)
|
|
|
|
|
.join(format!("{story_id}.md"));
|
|
|
|
|
if path.exists() {
|
|
|
|
|
return Some(stage);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
None
|
|
|
|
|
}
|
|
|
|
|
|
2026-02-23 18:20:24 +00:00
|
|
|
/// Scan a work pipeline stage directory and return story IDs, sorted alphabetically.
|
|
|
|
|
/// Returns an empty `Vec` if the directory does not exist.
|
|
|
|
|
fn scan_stage_items(project_root: &Path, stage_dir: &str) -> Vec<String> {
|
|
|
|
|
let dir = project_root
|
|
|
|
|
.join(".story_kit")
|
|
|
|
|
.join("work")
|
|
|
|
|
.join(stage_dir);
|
|
|
|
|
if !dir.is_dir() {
|
|
|
|
|
return Vec::new();
|
|
|
|
|
}
|
|
|
|
|
let mut items = Vec::new();
|
|
|
|
|
if let Ok(entries) = std::fs::read_dir(&dir) {
|
|
|
|
|
for entry in entries.flatten() {
|
|
|
|
|
let path = entry.path();
|
|
|
|
|
if path.extension().and_then(|e| e.to_str()) == Some("md")
|
|
|
|
|
&& let Some(stem) = path.file_stem().and_then(|s| s.to_str())
|
|
|
|
|
{
|
|
|
|
|
items.push(stem.to_string());
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
items.sort();
|
|
|
|
|
items
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Return `true` if `story_id` has any active (pending/running) agent matching `stage`.
|
2026-02-24 15:50:34 +00:00
|
|
|
///
|
|
|
|
|
/// Uses the explicit `stage` config field when the agent is found in `config`;
|
|
|
|
|
/// falls back to the legacy name-based heuristic for unlisted agents.
|
2026-02-23 18:20:24 +00:00
|
|
|
fn is_story_assigned_for_stage(
|
2026-02-24 15:50:34 +00:00
|
|
|
config: &ProjectConfig,
|
2026-02-23 18:20:24 +00:00
|
|
|
agents: &HashMap<String, StoryAgent>,
|
|
|
|
|
story_id: &str,
|
|
|
|
|
stage: &PipelineStage,
|
|
|
|
|
) -> bool {
|
|
|
|
|
agents.iter().any(|(key, agent)| {
|
|
|
|
|
// Composite key format: "{story_id}:{agent_name}"
|
|
|
|
|
let key_story_id = key.rsplit_once(':').map(|(sid, _)| sid).unwrap_or(key);
|
2026-02-24 15:50:34 +00:00
|
|
|
let agent_stage = config
|
|
|
|
|
.find_agent(&agent.agent_name)
|
|
|
|
|
.map(agent_config_stage)
|
|
|
|
|
.unwrap_or_else(|| pipeline_stage(&agent.agent_name));
|
2026-02-23 18:20:24 +00:00
|
|
|
key_story_id == story_id
|
2026-02-24 15:50:34 +00:00
|
|
|
&& agent_stage == *stage
|
2026-02-23 18:20:24 +00:00
|
|
|
&& matches!(agent.status, AgentStatus::Running | AgentStatus::Pending)
|
|
|
|
|
})
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Find the first configured agent for `stage` that has no active (pending/running) assignment.
|
|
|
|
|
/// Returns `None` if all agents for that stage are busy or none are configured.
|
2026-02-24 15:50:34 +00:00
|
|
|
/// Uses the agent's explicit `stage` config field (preferred) or falls back to name-based detection.
|
2026-02-23 18:20:24 +00:00
|
|
|
fn find_free_agent_for_stage<'a>(
|
|
|
|
|
config: &'a ProjectConfig,
|
|
|
|
|
agents: &HashMap<String, StoryAgent>,
|
|
|
|
|
stage: &PipelineStage,
|
|
|
|
|
) -> Option<&'a str> {
|
|
|
|
|
for agent_config in &config.agent {
|
2026-02-24 15:50:34 +00:00
|
|
|
if agent_config_stage(agent_config) != *stage {
|
2026-02-23 18:20:24 +00:00
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
let is_busy = agents.values().any(|a| {
|
|
|
|
|
a.agent_name == agent_config.name
|
|
|
|
|
&& matches!(a.status, AgentStatus::Running | AgentStatus::Pending)
|
|
|
|
|
});
|
|
|
|
|
if !is_busy {
|
|
|
|
|
return Some(&agent_config.name);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
None
|
|
|
|
|
}
|
|
|
|
|
|
2026-02-24 13:13:16 +00:00
|
|
|
/// Scan the agent pool for Running entries whose backing tokio task has already
|
|
|
|
|
/// finished and mark them as Failed.
|
|
|
|
|
///
|
|
|
|
|
/// This handles the case where the PTY read loop or the spawned task exits
|
|
|
|
|
/// without updating the agent status — for example when the process is killed
|
|
|
|
|
/// externally and the PTY master fd returns EOF before our inactivity timeout
|
|
|
|
|
/// fires, but some other edge case prevents the normal cleanup path from running.
|
2026-02-24 17:28:45 +00:00
|
|
|
fn check_orphaned_agents(agents: &Mutex<HashMap<String, StoryAgent>>) -> usize {
|
2026-02-24 13:13:16 +00:00
|
|
|
let mut lock = match agents.lock() {
|
|
|
|
|
Ok(l) => l,
|
2026-02-24 17:28:45 +00:00
|
|
|
Err(_) => return 0,
|
2026-02-24 13:13:16 +00:00
|
|
|
};
|
|
|
|
|
|
2026-02-24 16:50:56 +00:00
|
|
|
// Collect orphaned entries: Running or Pending agents whose task handle is finished.
|
|
|
|
|
// Pending agents can be orphaned if worktree creation panics before setting status.
|
|
|
|
|
let orphaned: Vec<(String, String, broadcast::Sender<AgentEvent>, AgentStatus)> = lock
|
2026-02-24 13:13:16 +00:00
|
|
|
.iter()
|
|
|
|
|
.filter_map(|(key, agent)| {
|
2026-02-24 16:50:56 +00:00
|
|
|
if matches!(agent.status, AgentStatus::Running | AgentStatus::Pending)
|
2026-02-24 13:13:16 +00:00
|
|
|
&& let Some(handle) = &agent.task_handle
|
|
|
|
|
&& handle.is_finished()
|
|
|
|
|
{
|
|
|
|
|
let story_id = key
|
|
|
|
|
.rsplit_once(':')
|
|
|
|
|
.map(|(s, _)| s.to_string())
|
|
|
|
|
.unwrap_or_else(|| key.clone());
|
2026-02-24 16:50:56 +00:00
|
|
|
return Some((key.clone(), story_id, agent.tx.clone(), agent.status.clone()));
|
2026-02-24 13:13:16 +00:00
|
|
|
}
|
|
|
|
|
None
|
|
|
|
|
})
|
|
|
|
|
.collect();
|
|
|
|
|
|
2026-02-24 17:28:45 +00:00
|
|
|
let count = orphaned.len();
|
2026-02-24 16:50:56 +00:00
|
|
|
for (key, story_id, tx, prev_status) in orphaned {
|
2026-02-24 13:13:16 +00:00
|
|
|
if let Some(agent) = lock.get_mut(&key) {
|
|
|
|
|
agent.status = AgentStatus::Failed;
|
|
|
|
|
slog!(
|
2026-02-24 16:50:56 +00:00
|
|
|
"[watchdog] Orphaned agent '{key}': task finished but status was {prev_status}. \
|
2026-02-24 13:13:16 +00:00
|
|
|
Marking Failed."
|
|
|
|
|
);
|
|
|
|
|
let _ = tx.send(AgentEvent::Error {
|
|
|
|
|
story_id,
|
|
|
|
|
agent_name: agent.agent_name.clone(),
|
|
|
|
|
message: "Agent process terminated unexpectedly (watchdog detected orphan)"
|
|
|
|
|
.to_string(),
|
|
|
|
|
});
|
|
|
|
|
}
|
|
|
|
|
}
|
2026-02-24 17:28:45 +00:00
|
|
|
count
|
2026-02-24 13:13:16 +00:00
|
|
|
}
|
|
|
|
|
|
2026-02-23 15:00:10 +00:00
|
|
|
/// Server-owned completion: runs acceptance gates when an agent process exits
|
|
|
|
|
/// normally, and advances the pipeline based on results.
|
|
|
|
|
///
|
|
|
|
|
/// This is a **free function** (not a method on `AgentPool`) to break the
|
|
|
|
|
/// opaque type cycle that would otherwise arise: `start_agent` → spawned task
|
|
|
|
|
/// → server-owned completion → pipeline advance → `start_agent`.
|
|
|
|
|
///
|
|
|
|
|
/// If the agent already has a completion report (e.g. from a legacy
|
|
|
|
|
/// `report_completion` call), this is a no-op to avoid double-running gates.
|
|
|
|
|
async fn run_server_owned_completion(
|
|
|
|
|
agents: &Arc<Mutex<HashMap<String, StoryAgent>>>,
|
|
|
|
|
port: u16,
|
|
|
|
|
story_id: &str,
|
|
|
|
|
agent_name: &str,
|
|
|
|
|
session_id: Option<String>,
|
2026-02-24 23:57:07 +00:00
|
|
|
watcher_tx: broadcast::Sender<WatcherEvent>,
|
2026-02-23 15:00:10 +00:00
|
|
|
) {
|
|
|
|
|
let key = composite_key(story_id, agent_name);
|
|
|
|
|
|
|
|
|
|
// Guard: skip if completion was already recorded (legacy path).
|
|
|
|
|
{
|
|
|
|
|
let lock = match agents.lock() {
|
|
|
|
|
Ok(a) => a,
|
|
|
|
|
Err(_) => return,
|
|
|
|
|
};
|
|
|
|
|
match lock.get(&key) {
|
|
|
|
|
Some(agent) if agent.completion.is_some() => {
|
feat(story-93): expose server logs to agents via get_server_logs MCP tool
- Add log_buffer module: bounded 1000-line ring buffer with push/get_recent API
- Add slog! macro: drop-in for eprintln! that also captures to ring buffer
- Replace all eprintln! calls across agents, watcher, search, chat, worktree, claude_code with slog!
- Add get_server_logs MCP tool: accepts count (1-500) and optional filter params
- 5 unit tests for log_buffer covering push/retrieve, eviction, filtering, count limits, empty buffer
- 262 tests passing, clippy clean
Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
2026-02-23 20:38:19 +00:00
|
|
|
slog!(
|
2026-02-23 15:00:10 +00:00
|
|
|
"[agents] Completion already recorded for '{story_id}:{agent_name}'; \
|
|
|
|
|
skipping server-owned gates."
|
|
|
|
|
);
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
Some(_) => {}
|
|
|
|
|
None => return,
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Get worktree path for running gates.
|
|
|
|
|
let worktree_path = {
|
|
|
|
|
let lock = match agents.lock() {
|
|
|
|
|
Ok(a) => a,
|
|
|
|
|
Err(_) => return,
|
|
|
|
|
};
|
|
|
|
|
lock.get(&key)
|
|
|
|
|
.and_then(|a| a.worktree_info.as_ref().map(|wt| wt.path.clone()))
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
// Run acceptance gates.
|
|
|
|
|
let (gates_passed, gate_output) = if let Some(wt_path) = worktree_path {
|
|
|
|
|
let path = wt_path;
|
|
|
|
|
match tokio::task::spawn_blocking(move || {
|
|
|
|
|
check_uncommitted_changes(&path)?;
|
|
|
|
|
run_acceptance_gates(&path)
|
|
|
|
|
})
|
|
|
|
|
.await
|
|
|
|
|
{
|
|
|
|
|
Ok(Ok(result)) => result,
|
|
|
|
|
Ok(Err(e)) => (false, e),
|
|
|
|
|
Err(e) => (false, format!("Gate check task panicked: {e}")),
|
|
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
(
|
|
|
|
|
false,
|
|
|
|
|
"No worktree path available to run acceptance gates".to_string(),
|
|
|
|
|
)
|
|
|
|
|
};
|
|
|
|
|
|
feat(story-93): expose server logs to agents via get_server_logs MCP tool
- Add log_buffer module: bounded 1000-line ring buffer with push/get_recent API
- Add slog! macro: drop-in for eprintln! that also captures to ring buffer
- Replace all eprintln! calls across agents, watcher, search, chat, worktree, claude_code with slog!
- Add get_server_logs MCP tool: accepts count (1-500) and optional filter params
- 5 unit tests for log_buffer covering push/retrieve, eviction, filtering, count limits, empty buffer
- 262 tests passing, clippy clean
Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
2026-02-23 20:38:19 +00:00
|
|
|
slog!(
|
2026-02-23 15:00:10 +00:00
|
|
|
"[agents] Server-owned completion for '{story_id}:{agent_name}': gates_passed={gates_passed}"
|
|
|
|
|
);
|
|
|
|
|
|
|
|
|
|
let report = CompletionReport {
|
|
|
|
|
summary: "Agent process exited normally".to_string(),
|
|
|
|
|
gates_passed,
|
|
|
|
|
gate_output,
|
|
|
|
|
};
|
|
|
|
|
|
2026-02-25 14:59:20 +00:00
|
|
|
// Store completion report, extract data for pipeline advance, then
|
|
|
|
|
// remove the entry so completed agents never appear in list_agents.
|
2026-02-26 16:12:23 +00:00
|
|
|
let (tx, project_root_for_advance, wt_path_for_advance, merge_failure_reported_for_advance) = {
|
2026-02-23 15:00:10 +00:00
|
|
|
let mut lock = match agents.lock() {
|
|
|
|
|
Ok(a) => a,
|
|
|
|
|
Err(_) => return,
|
|
|
|
|
};
|
|
|
|
|
let agent = match lock.get_mut(&key) {
|
|
|
|
|
Some(a) => a,
|
|
|
|
|
None => return,
|
|
|
|
|
};
|
2026-02-25 14:59:20 +00:00
|
|
|
agent.completion = Some(report.clone());
|
2026-02-23 15:00:10 +00:00
|
|
|
agent.session_id = session_id.clone();
|
2026-02-25 14:59:20 +00:00
|
|
|
let tx = agent.tx.clone();
|
|
|
|
|
let pr = agent.project_root.clone();
|
|
|
|
|
let wt = agent.worktree_info.as_ref().map(|w| w.path.clone());
|
2026-02-26 16:12:23 +00:00
|
|
|
let mfr = agent.merge_failure_reported;
|
2026-02-25 14:59:20 +00:00
|
|
|
lock.remove(&key);
|
2026-02-26 16:12:23 +00:00
|
|
|
(tx, pr, wt, mfr)
|
2026-02-23 15:00:10 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
|
|
// Emit Done so wait_for_agent unblocks.
|
|
|
|
|
let _ = tx.send(AgentEvent::Done {
|
|
|
|
|
story_id: story_id.to_string(),
|
|
|
|
|
agent_name: agent_name.to_string(),
|
|
|
|
|
session_id,
|
|
|
|
|
});
|
|
|
|
|
|
2026-02-25 14:59:20 +00:00
|
|
|
// Notify WebSocket clients that the agent is gone.
|
|
|
|
|
AgentPool::notify_agent_state_changed(&watcher_tx);
|
|
|
|
|
|
2026-02-23 15:00:10 +00:00
|
|
|
// Advance the pipeline state machine in a background task.
|
2026-02-25 14:59:20 +00:00
|
|
|
spawn_pipeline_advance(
|
|
|
|
|
Arc::clone(agents),
|
|
|
|
|
port,
|
|
|
|
|
story_id,
|
|
|
|
|
agent_name,
|
|
|
|
|
report,
|
|
|
|
|
project_root_for_advance,
|
|
|
|
|
wt_path_for_advance,
|
|
|
|
|
watcher_tx,
|
2026-02-26 16:12:23 +00:00
|
|
|
merge_failure_reported_for_advance,
|
2026-02-25 14:59:20 +00:00
|
|
|
);
|
2026-02-23 15:00:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Spawn pipeline advancement as a background task.
|
|
|
|
|
///
|
|
|
|
|
/// This is a **non-async** function so it does not participate in the opaque
|
|
|
|
|
/// type cycle between `start_agent` and `run_server_owned_completion`.
|
2026-02-25 14:59:20 +00:00
|
|
|
#[allow(clippy::too_many_arguments)]
|
2026-02-23 15:00:10 +00:00
|
|
|
fn spawn_pipeline_advance(
|
|
|
|
|
agents: Arc<Mutex<HashMap<String, StoryAgent>>>,
|
|
|
|
|
port: u16,
|
|
|
|
|
story_id: &str,
|
|
|
|
|
agent_name: &str,
|
2026-02-25 14:59:20 +00:00
|
|
|
completion: CompletionReport,
|
|
|
|
|
project_root: Option<PathBuf>,
|
|
|
|
|
worktree_path: Option<PathBuf>,
|
2026-02-24 23:57:07 +00:00
|
|
|
watcher_tx: broadcast::Sender<WatcherEvent>,
|
2026-02-26 16:12:23 +00:00
|
|
|
merge_failure_reported: bool,
|
2026-02-23 15:00:10 +00:00
|
|
|
) {
|
|
|
|
|
let sid = story_id.to_string();
|
|
|
|
|
let aname = agent_name.to_string();
|
|
|
|
|
tokio::spawn(async move {
|
2026-02-24 17:56:40 +00:00
|
|
|
let pool = AgentPool {
|
|
|
|
|
agents,
|
|
|
|
|
port,
|
|
|
|
|
child_killers: Arc::new(Mutex::new(HashMap::new())),
|
2026-02-24 23:09:13 +00:00
|
|
|
watcher_tx,
|
2026-02-24 17:56:40 +00:00
|
|
|
};
|
2026-02-26 16:12:23 +00:00
|
|
|
pool.run_pipeline_advance(
|
|
|
|
|
&sid,
|
|
|
|
|
&aname,
|
|
|
|
|
completion,
|
|
|
|
|
project_root,
|
|
|
|
|
worktree_path,
|
|
|
|
|
merge_failure_reported,
|
|
|
|
|
)
|
|
|
|
|
.await;
|
2026-02-23 15:00:10 +00:00
|
|
|
});
|
|
|
|
|
}
|
|
|
|
|
|
2026-02-20 17:36:35 +00:00
|
|
|
/// Result of a mergemaster merge operation.
|
|
|
|
|
#[derive(Debug, Serialize, Clone)]
|
|
|
|
|
pub struct MergeReport {
|
|
|
|
|
pub story_id: String,
|
|
|
|
|
pub success: bool,
|
|
|
|
|
pub had_conflicts: bool,
|
2026-02-23 23:22:24 +00:00
|
|
|
/// `true` when conflicts were detected but automatically resolved.
|
|
|
|
|
pub conflicts_resolved: bool,
|
2026-02-20 17:36:35 +00:00
|
|
|
pub conflict_details: Option<String>,
|
|
|
|
|
pub gates_passed: bool,
|
|
|
|
|
pub gate_output: String,
|
|
|
|
|
pub worktree_cleaned_up: bool,
|
|
|
|
|
pub story_archived: bool,
|
|
|
|
|
}
|
|
|
|
|
|
2026-02-20 17:16:48 +00:00
|
|
|
/// Determine the work item type from its ID (new naming: `{N}_{type}_{slug}`).
|
|
|
|
|
/// Returns "bug", "spike", or "story".
|
2026-02-20 17:36:35 +00:00
|
|
|
#[allow(dead_code)]
|
2026-02-20 16:21:30 +00:00
|
|
|
fn item_type_from_id(item_id: &str) -> &'static str {
|
2026-02-20 17:16:48 +00:00
|
|
|
// New format: {digits}_{type}_{slug}
|
|
|
|
|
let after_num = item_id.trim_start_matches(|c: char| c.is_ascii_digit());
|
|
|
|
|
if after_num.starts_with("_bug_") {
|
2026-02-20 16:21:30 +00:00
|
|
|
"bug"
|
2026-02-20 17:16:48 +00:00
|
|
|
} else if after_num.starts_with("_spike_") {
|
2026-02-20 16:21:30 +00:00
|
|
|
"spike"
|
|
|
|
|
} else {
|
|
|
|
|
"story"
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2026-02-20 17:16:48 +00:00
|
|
|
/// Return the source directory path for a work item (always work/1_upcoming/).
|
|
|
|
|
fn item_source_dir(project_root: &Path, _item_id: &str) -> PathBuf {
|
|
|
|
|
project_root.join(".story_kit").join("work").join("1_upcoming")
|
2026-02-20 16:21:30 +00:00
|
|
|
}
|
|
|
|
|
|
2026-02-24 17:01:57 +00:00
|
|
|
/// Return the done directory path for a work item (always work/5_done/).
|
2026-02-20 17:16:48 +00:00
|
|
|
fn item_archive_dir(project_root: &Path, _item_id: &str) -> PathBuf {
|
2026-02-24 17:01:57 +00:00
|
|
|
project_root.join(".story_kit").join("work").join("5_done")
|
2026-02-20 16:21:30 +00:00
|
|
|
}
|
|
|
|
|
|
2026-02-20 17:16:48 +00:00
|
|
|
/// Move a work item (story, bug, or spike) from `work/1_upcoming/` to `work/2_current/`.
|
2026-02-20 15:31:13 +00:00
|
|
|
///
|
2026-02-20 17:16:48 +00:00
|
|
|
/// Idempotent: if the item is already in `2_current/`, returns Ok without committing.
|
|
|
|
|
/// If the item is not found in `1_upcoming/`, logs a warning and returns Ok.
|
2026-02-20 15:31:13 +00:00
|
|
|
pub fn move_story_to_current(project_root: &Path, story_id: &str) -> Result<(), String> {
|
2026-02-20 17:16:48 +00:00
|
|
|
let sk = project_root.join(".story_kit").join("work");
|
|
|
|
|
let current_dir = sk.join("2_current");
|
2026-02-20 16:21:30 +00:00
|
|
|
let current_path = current_dir.join(format!("{story_id}.md"));
|
2026-02-20 15:31:13 +00:00
|
|
|
|
|
|
|
|
if current_path.exists() {
|
2026-02-20 17:16:48 +00:00
|
|
|
// Already in 2_current/ — idempotent, nothing to do.
|
2026-02-20 15:31:13 +00:00
|
|
|
return Ok(());
|
|
|
|
|
}
|
|
|
|
|
|
2026-02-20 16:21:30 +00:00
|
|
|
let source_dir = item_source_dir(project_root, story_id);
|
|
|
|
|
let source_path = source_dir.join(format!("{story_id}.md"));
|
|
|
|
|
|
|
|
|
|
if !source_path.exists() {
|
feat(story-93): expose server logs to agents via get_server_logs MCP tool
- Add log_buffer module: bounded 1000-line ring buffer with push/get_recent API
- Add slog! macro: drop-in for eprintln! that also captures to ring buffer
- Replace all eprintln! calls across agents, watcher, search, chat, worktree, claude_code with slog!
- Add get_server_logs MCP tool: accepts count (1-500) and optional filter params
- 5 unit tests for log_buffer covering push/retrieve, eviction, filtering, count limits, empty buffer
- 262 tests passing, clippy clean
Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
2026-02-23 20:38:19 +00:00
|
|
|
slog!(
|
2026-02-20 17:16:48 +00:00
|
|
|
"[lifecycle] Work item '{story_id}' not found in {}; skipping move to 2_current/",
|
2026-02-20 16:21:30 +00:00
|
|
|
source_dir.display()
|
2026-02-20 15:31:13 +00:00
|
|
|
);
|
|
|
|
|
return Ok(());
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
std::fs::create_dir_all(¤t_dir)
|
2026-02-20 17:16:48 +00:00
|
|
|
.map_err(|e| format!("Failed to create work/2_current/ directory: {e}"))?;
|
2026-02-20 15:31:13 +00:00
|
|
|
|
2026-02-20 16:21:30 +00:00
|
|
|
std::fs::rename(&source_path, ¤t_path)
|
2026-02-20 17:16:48 +00:00
|
|
|
.map_err(|e| format!("Failed to move '{story_id}' to 2_current/: {e}"))?;
|
2026-02-20 15:31:13 +00:00
|
|
|
|
feat(story-93): expose server logs to agents via get_server_logs MCP tool
- Add log_buffer module: bounded 1000-line ring buffer with push/get_recent API
- Add slog! macro: drop-in for eprintln! that also captures to ring buffer
- Replace all eprintln! calls across agents, watcher, search, chat, worktree, claude_code with slog!
- Add get_server_logs MCP tool: accepts count (1-500) and optional filter params
- 5 unit tests for log_buffer covering push/retrieve, eviction, filtering, count limits, empty buffer
- 262 tests passing, clippy clean
Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
2026-02-23 20:38:19 +00:00
|
|
|
slog!(
|
2026-02-20 17:16:48 +00:00
|
|
|
"[lifecycle] Moved '{story_id}' from {} to work/2_current/",
|
2026-02-20 16:21:30 +00:00
|
|
|
source_dir.display()
|
|
|
|
|
);
|
2026-02-20 15:31:13 +00:00
|
|
|
|
2026-02-20 19:39:19 +00:00
|
|
|
Ok(())
|
2026-02-20 15:31:13 +00:00
|
|
|
}
|
|
|
|
|
|
2026-02-27 10:37:27 +00:00
|
|
|
/// Check whether a feature branch `feature/story-{story_id}` exists and has
|
|
|
|
|
/// commits that are not yet on master. Returns `true` when there is unmerged
|
|
|
|
|
/// work, `false` when there is no branch or all its commits are already
|
|
|
|
|
/// reachable from master.
|
|
|
|
|
pub fn feature_branch_has_unmerged_changes(project_root: &Path, story_id: &str) -> bool {
|
|
|
|
|
let branch = format!("feature/story-{story_id}");
|
|
|
|
|
|
|
|
|
|
// Check if the branch exists.
|
|
|
|
|
let branch_check = Command::new("git")
|
|
|
|
|
.args(["rev-parse", "--verify", &branch])
|
|
|
|
|
.current_dir(project_root)
|
|
|
|
|
.output();
|
|
|
|
|
match branch_check {
|
|
|
|
|
Ok(out) if out.status.success() => {}
|
|
|
|
|
_ => return false, // No feature branch → nothing to merge.
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Check if the branch has commits not reachable from master.
|
|
|
|
|
let log = Command::new("git")
|
|
|
|
|
.args(["log", &format!("master..{branch}"), "--oneline"])
|
|
|
|
|
.current_dir(project_root)
|
|
|
|
|
.output();
|
|
|
|
|
match log {
|
|
|
|
|
Ok(out) => {
|
|
|
|
|
let stdout = String::from_utf8_lossy(&out.stdout);
|
|
|
|
|
!stdout.trim().is_empty()
|
|
|
|
|
}
|
|
|
|
|
Err(_) => false,
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2026-02-24 17:01:57 +00:00
|
|
|
/// Move a story from `work/2_current/` to `work/5_done/` and auto-commit.
|
2026-02-20 15:09:39 +00:00
|
|
|
///
|
2026-02-24 17:01:57 +00:00
|
|
|
/// * If the story is in `2_current/`, it is moved to `5_done/` and committed.
|
|
|
|
|
/// * If the story is in `4_merge/`, it is moved to `5_done/` and committed.
|
|
|
|
|
/// * If the story is already in `5_done/` or `6_archived/`, this is a no-op (idempotent).
|
|
|
|
|
/// * If the story is not found in `2_current/`, `4_merge/`, `5_done/`, or `6_archived/`, an error is returned.
|
2026-02-20 15:09:39 +00:00
|
|
|
pub fn move_story_to_archived(project_root: &Path, story_id: &str) -> Result<(), String> {
|
2026-02-20 17:16:48 +00:00
|
|
|
let sk = project_root.join(".story_kit").join("work");
|
|
|
|
|
let current_path = sk.join("2_current").join(format!("{story_id}.md"));
|
2026-02-20 17:36:35 +00:00
|
|
|
let merge_path = sk.join("4_merge").join(format!("{story_id}.md"));
|
2026-02-24 17:01:57 +00:00
|
|
|
let done_dir = sk.join("5_done");
|
|
|
|
|
let done_path = done_dir.join(format!("{story_id}.md"));
|
|
|
|
|
let archived_path = sk.join("6_archived").join(format!("{story_id}.md"));
|
2026-02-20 15:09:39 +00:00
|
|
|
|
2026-02-24 17:01:57 +00:00
|
|
|
if done_path.exists() || archived_path.exists() {
|
|
|
|
|
// Already in done or archived — idempotent, nothing to do.
|
2026-02-20 15:09:39 +00:00
|
|
|
return Ok(());
|
|
|
|
|
}
|
|
|
|
|
|
2026-02-20 17:36:35 +00:00
|
|
|
// Check 2_current/ first, then 4_merge/
|
|
|
|
|
let source_path = if current_path.exists() {
|
|
|
|
|
current_path.clone()
|
|
|
|
|
} else if merge_path.exists() {
|
|
|
|
|
merge_path.clone()
|
|
|
|
|
} else {
|
|
|
|
|
return Err(format!(
|
|
|
|
|
"Story '{story_id}' not found in work/2_current/ or work/4_merge/. Cannot accept story."
|
|
|
|
|
));
|
|
|
|
|
};
|
|
|
|
|
|
2026-02-24 17:01:57 +00:00
|
|
|
std::fs::create_dir_all(&done_dir)
|
|
|
|
|
.map_err(|e| format!("Failed to create work/5_done/ directory: {e}"))?;
|
|
|
|
|
std::fs::rename(&source_path, &done_path)
|
|
|
|
|
.map_err(|e| format!("Failed to move story '{story_id}' to 5_done/: {e}"))?;
|
2026-02-20 17:36:35 +00:00
|
|
|
|
|
|
|
|
let from_dir = if source_path == current_path {
|
|
|
|
|
"work/2_current/"
|
|
|
|
|
} else {
|
|
|
|
|
"work/4_merge/"
|
|
|
|
|
};
|
2026-02-24 17:01:57 +00:00
|
|
|
slog!("[lifecycle] Moved story '{story_id}' from {from_dir} to work/5_done/");
|
2026-02-20 17:36:35 +00:00
|
|
|
|
2026-02-20 19:39:19 +00:00
|
|
|
Ok(())
|
2026-02-20 17:36:35 +00:00
|
|
|
}
|
|
|
|
|
|
2026-02-23 13:13:41 +00:00
|
|
|
/// Move a story/bug from `work/2_current/` or `work/3_qa/` to `work/4_merge/`.
|
2026-02-20 17:36:35 +00:00
|
|
|
///
|
|
|
|
|
/// This stages a work item as ready for the mergemaster to pick up and merge into master.
|
|
|
|
|
/// Idempotent: if already in `4_merge/`, returns Ok without committing.
|
|
|
|
|
pub fn move_story_to_merge(project_root: &Path, story_id: &str) -> Result<(), String> {
|
|
|
|
|
let sk = project_root.join(".story_kit").join("work");
|
|
|
|
|
let current_path = sk.join("2_current").join(format!("{story_id}.md"));
|
2026-02-23 13:13:41 +00:00
|
|
|
let qa_path = sk.join("3_qa").join(format!("{story_id}.md"));
|
2026-02-20 17:36:35 +00:00
|
|
|
let merge_dir = sk.join("4_merge");
|
|
|
|
|
let merge_path = merge_dir.join(format!("{story_id}.md"));
|
|
|
|
|
|
|
|
|
|
if merge_path.exists() {
|
|
|
|
|
// Already in 4_merge/ — idempotent, nothing to do.
|
2026-02-20 15:09:39 +00:00
|
|
|
return Ok(());
|
|
|
|
|
}
|
|
|
|
|
|
2026-02-23 13:13:41 +00:00
|
|
|
// Accept from 2_current/ (manual trigger) or 3_qa/ (pipeline advancement from QA stage).
|
|
|
|
|
let source_path = if current_path.exists() {
|
|
|
|
|
current_path.clone()
|
|
|
|
|
} else if qa_path.exists() {
|
|
|
|
|
qa_path.clone()
|
|
|
|
|
} else {
|
2026-02-20 17:36:35 +00:00
|
|
|
return Err(format!(
|
2026-02-23 13:13:41 +00:00
|
|
|
"Work item '{story_id}' not found in work/2_current/ or work/3_qa/. Cannot move to 4_merge/."
|
2026-02-20 17:36:35 +00:00
|
|
|
));
|
2026-02-23 13:13:41 +00:00
|
|
|
};
|
2026-02-20 17:36:35 +00:00
|
|
|
|
|
|
|
|
std::fs::create_dir_all(&merge_dir)
|
|
|
|
|
.map_err(|e| format!("Failed to create work/4_merge/ directory: {e}"))?;
|
2026-02-23 13:13:41 +00:00
|
|
|
std::fs::rename(&source_path, &merge_path)
|
2026-02-20 17:36:35 +00:00
|
|
|
.map_err(|e| format!("Failed to move '{story_id}' to 4_merge/: {e}"))?;
|
|
|
|
|
|
2026-02-23 13:13:41 +00:00
|
|
|
let from_dir = if source_path == current_path {
|
|
|
|
|
"work/2_current/"
|
|
|
|
|
} else {
|
|
|
|
|
"work/3_qa/"
|
|
|
|
|
};
|
feat(story-93): expose server logs to agents via get_server_logs MCP tool
- Add log_buffer module: bounded 1000-line ring buffer with push/get_recent API
- Add slog! macro: drop-in for eprintln! that also captures to ring buffer
- Replace all eprintln! calls across agents, watcher, search, chat, worktree, claude_code with slog!
- Add get_server_logs MCP tool: accepts count (1-500) and optional filter params
- 5 unit tests for log_buffer covering push/retrieve, eviction, filtering, count limits, empty buffer
- 262 tests passing, clippy clean
Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
2026-02-23 20:38:19 +00:00
|
|
|
slog!("[lifecycle] Moved '{story_id}' from {from_dir} to work/4_merge/");
|
2026-02-20 17:36:35 +00:00
|
|
|
|
2026-02-20 19:39:19 +00:00
|
|
|
Ok(())
|
2026-02-20 15:09:39 +00:00
|
|
|
}
|
|
|
|
|
|
2026-02-20 17:44:06 +00:00
|
|
|
/// Move a story/bug from `work/2_current/` to `work/3_qa/` and auto-commit.
|
|
|
|
|
///
|
|
|
|
|
/// This stages a work item for QA review before merging to master.
|
|
|
|
|
/// Idempotent: if already in `3_qa/`, returns Ok without committing.
|
|
|
|
|
pub fn move_story_to_qa(project_root: &Path, story_id: &str) -> Result<(), String> {
|
|
|
|
|
let sk = project_root.join(".story_kit").join("work");
|
|
|
|
|
let current_path = sk.join("2_current").join(format!("{story_id}.md"));
|
|
|
|
|
let qa_dir = sk.join("3_qa");
|
|
|
|
|
let qa_path = qa_dir.join(format!("{story_id}.md"));
|
|
|
|
|
|
|
|
|
|
if qa_path.exists() {
|
|
|
|
|
// Already in 3_qa/ — idempotent, nothing to do.
|
|
|
|
|
return Ok(());
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if !current_path.exists() {
|
|
|
|
|
return Err(format!(
|
|
|
|
|
"Work item '{story_id}' not found in work/2_current/. Cannot move to 3_qa/."
|
|
|
|
|
));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
std::fs::create_dir_all(&qa_dir)
|
|
|
|
|
.map_err(|e| format!("Failed to create work/3_qa/ directory: {e}"))?;
|
|
|
|
|
std::fs::rename(¤t_path, &qa_path)
|
|
|
|
|
.map_err(|e| format!("Failed to move '{story_id}' to 3_qa/: {e}"))?;
|
|
|
|
|
|
feat(story-93): expose server logs to agents via get_server_logs MCP tool
- Add log_buffer module: bounded 1000-line ring buffer with push/get_recent API
- Add slog! macro: drop-in for eprintln! that also captures to ring buffer
- Replace all eprintln! calls across agents, watcher, search, chat, worktree, claude_code with slog!
- Add get_server_logs MCP tool: accepts count (1-500) and optional filter params
- 5 unit tests for log_buffer covering push/retrieve, eviction, filtering, count limits, empty buffer
- 262 tests passing, clippy clean
Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
2026-02-23 20:38:19 +00:00
|
|
|
slog!("[lifecycle] Moved '{story_id}' from work/2_current/ to work/3_qa/");
|
2026-02-20 17:44:06 +00:00
|
|
|
|
2026-02-20 19:39:19 +00:00
|
|
|
Ok(())
|
2026-02-20 17:44:06 +00:00
|
|
|
}
|
|
|
|
|
|
2026-02-24 17:01:57 +00:00
|
|
|
/// Move a bug from `work/2_current/` or `work/1_upcoming/` to `work/5_done/` and auto-commit.
|
2026-02-20 16:21:30 +00:00
|
|
|
///
|
2026-02-24 17:01:57 +00:00
|
|
|
/// * If the bug is in `2_current/`, it is moved to `5_done/` and committed.
|
|
|
|
|
/// * If the bug is still in `1_upcoming/` (never started), it is moved directly to `5_done/`.
|
|
|
|
|
/// * If the bug is already in `5_done/`, this is a no-op (idempotent).
|
2026-02-20 16:21:30 +00:00
|
|
|
/// * If the bug is not found anywhere, an error is returned.
|
|
|
|
|
pub fn close_bug_to_archive(project_root: &Path, bug_id: &str) -> Result<(), String> {
|
2026-02-20 17:16:48 +00:00
|
|
|
let sk = project_root.join(".story_kit").join("work");
|
|
|
|
|
let current_path = sk.join("2_current").join(format!("{bug_id}.md"));
|
|
|
|
|
let upcoming_path = sk.join("1_upcoming").join(format!("{bug_id}.md"));
|
2026-02-20 16:34:32 +00:00
|
|
|
let archive_dir = item_archive_dir(project_root, bug_id);
|
2026-02-20 16:21:30 +00:00
|
|
|
let archive_path = archive_dir.join(format!("{bug_id}.md"));
|
|
|
|
|
|
|
|
|
|
if archive_path.exists() {
|
|
|
|
|
return Ok(());
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
let source_path = if current_path.exists() {
|
|
|
|
|
current_path.clone()
|
2026-02-20 17:16:48 +00:00
|
|
|
} else if upcoming_path.exists() {
|
|
|
|
|
upcoming_path.clone()
|
2026-02-20 16:21:30 +00:00
|
|
|
} else {
|
|
|
|
|
return Err(format!(
|
2026-02-20 17:16:48 +00:00
|
|
|
"Bug '{bug_id}' not found in work/2_current/ or work/1_upcoming/. Cannot close bug."
|
2026-02-20 16:21:30 +00:00
|
|
|
));
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
std::fs::create_dir_all(&archive_dir)
|
2026-02-24 17:01:57 +00:00
|
|
|
.map_err(|e| format!("Failed to create work/5_done/ directory: {e}"))?;
|
2026-02-20 16:21:30 +00:00
|
|
|
std::fs::rename(&source_path, &archive_path)
|
2026-02-24 17:01:57 +00:00
|
|
|
.map_err(|e| format!("Failed to move bug '{bug_id}' to 5_done/: {e}"))?;
|
2026-02-20 16:21:30 +00:00
|
|
|
|
feat(story-93): expose server logs to agents via get_server_logs MCP tool
- Add log_buffer module: bounded 1000-line ring buffer with push/get_recent API
- Add slog! macro: drop-in for eprintln! that also captures to ring buffer
- Replace all eprintln! calls across agents, watcher, search, chat, worktree, claude_code with slog!
- Add get_server_logs MCP tool: accepts count (1-500) and optional filter params
- 5 unit tests for log_buffer covering push/retrieve, eviction, filtering, count limits, empty buffer
- 262 tests passing, clippy clean
Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
2026-02-23 20:38:19 +00:00
|
|
|
slog!(
|
2026-02-24 17:01:57 +00:00
|
|
|
"[lifecycle] Closed bug '{bug_id}' → work/5_done/"
|
2026-02-20 16:21:30 +00:00
|
|
|
);
|
|
|
|
|
|
2026-02-20 19:39:19 +00:00
|
|
|
Ok(())
|
2026-02-20 16:21:30 +00:00
|
|
|
}
|
|
|
|
|
|
2026-02-20 15:02:34 +00:00
|
|
|
// ── Acceptance-gate helpers ───────────────────────────────────────────────────
|
|
|
|
|
|
2026-02-23 20:38:17 +00:00
|
|
|
/// Detect the base branch for a git worktree by checking common default branch names.
|
|
|
|
|
///
|
|
|
|
|
/// Tries `master` then `main`; falls back to `"master"` if neither is resolvable.
|
|
|
|
|
fn detect_worktree_base_branch(wt_path: &Path) -> String {
|
|
|
|
|
for branch in &["master", "main"] {
|
|
|
|
|
let ok = Command::new("git")
|
|
|
|
|
.args(["rev-parse", "--verify", branch])
|
|
|
|
|
.current_dir(wt_path)
|
|
|
|
|
.output()
|
|
|
|
|
.map(|o| o.status.success())
|
|
|
|
|
.unwrap_or(false);
|
|
|
|
|
if ok {
|
|
|
|
|
return branch.to_string();
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
"master".to_string()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Return `true` if the git worktree at `wt_path` has commits on its current
|
|
|
|
|
/// branch that are not present on the base branch (`master` or `main`).
|
|
|
|
|
///
|
|
|
|
|
/// Used during server startup reconciliation to detect stories whose agent work
|
|
|
|
|
/// was committed while the server was offline.
|
|
|
|
|
fn worktree_has_committed_work(wt_path: &Path) -> bool {
|
|
|
|
|
let base_branch = detect_worktree_base_branch(wt_path);
|
|
|
|
|
let output = Command::new("git")
|
|
|
|
|
.args(["log", &format!("{base_branch}..HEAD"), "--oneline"])
|
|
|
|
|
.current_dir(wt_path)
|
|
|
|
|
.output();
|
|
|
|
|
match output {
|
|
|
|
|
Ok(out) if out.status.success() => {
|
|
|
|
|
!String::from_utf8_lossy(&out.stdout).trim().is_empty()
|
|
|
|
|
}
|
|
|
|
|
_ => false,
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2026-02-20 15:02:34 +00:00
|
|
|
/// Check whether the given directory has any uncommitted git changes.
|
|
|
|
|
/// Returns `Err` with a descriptive message if there are any.
|
|
|
|
|
fn check_uncommitted_changes(path: &Path) -> Result<(), String> {
|
|
|
|
|
let output = Command::new("git")
|
|
|
|
|
.args(["status", "--porcelain"])
|
|
|
|
|
.current_dir(path)
|
|
|
|
|
.output()
|
|
|
|
|
.map_err(|e| format!("Failed to run git status: {e}"))?;
|
|
|
|
|
|
|
|
|
|
let stdout = String::from_utf8_lossy(&output.stdout);
|
|
|
|
|
if !stdout.trim().is_empty() {
|
|
|
|
|
return Err(format!(
|
2026-02-23 15:00:10 +00:00
|
|
|
"Worktree has uncommitted changes. Please commit all work before \
|
|
|
|
|
the agent exits:\n{stdout}"
|
2026-02-20 15:02:34 +00:00
|
|
|
));
|
|
|
|
|
}
|
|
|
|
|
Ok(())
|
|
|
|
|
}
|
|
|
|
|
|
2026-02-23 12:59:55 +00:00
|
|
|
/// Run the project's test suite.
|
|
|
|
|
///
|
|
|
|
|
/// Uses `script/test` if present, treating it as the canonical single test entry point.
|
|
|
|
|
/// Falls back to `cargo nextest run` / `cargo test` when `script/test` is absent.
|
|
|
|
|
/// Returns `(tests_passed, output)`.
|
|
|
|
|
fn run_project_tests(path: &Path) -> Result<(bool, String), String> {
|
|
|
|
|
let script_test = path.join("script").join("test");
|
|
|
|
|
if script_test.exists() {
|
|
|
|
|
let mut output = String::from("=== script/test ===\n");
|
|
|
|
|
let result = Command::new(&script_test)
|
|
|
|
|
.current_dir(path)
|
|
|
|
|
.output()
|
|
|
|
|
.map_err(|e| format!("Failed to run script/test: {e}"))?;
|
|
|
|
|
let out = format!(
|
|
|
|
|
"{}{}",
|
|
|
|
|
String::from_utf8_lossy(&result.stdout),
|
|
|
|
|
String::from_utf8_lossy(&result.stderr)
|
|
|
|
|
);
|
|
|
|
|
output.push_str(&out);
|
|
|
|
|
output.push('\n');
|
|
|
|
|
return Ok((result.status.success(), output));
|
2026-02-20 15:02:34 +00:00
|
|
|
}
|
|
|
|
|
|
2026-02-23 12:59:55 +00:00
|
|
|
// Fallback: cargo nextest run / cargo test
|
|
|
|
|
let mut output = String::from("=== tests ===\n");
|
|
|
|
|
let (success, test_out) = match Command::new("cargo")
|
2026-02-20 15:02:34 +00:00
|
|
|
.args(["nextest", "run"])
|
|
|
|
|
.current_dir(path)
|
|
|
|
|
.output()
|
|
|
|
|
{
|
|
|
|
|
Ok(o) => {
|
|
|
|
|
let combined = format!(
|
|
|
|
|
"{}{}",
|
|
|
|
|
String::from_utf8_lossy(&o.stdout),
|
|
|
|
|
String::from_utf8_lossy(&o.stderr)
|
|
|
|
|
);
|
|
|
|
|
(o.status.success(), combined)
|
|
|
|
|
}
|
|
|
|
|
Err(_) => {
|
|
|
|
|
// nextest not available — fall back to cargo test
|
|
|
|
|
let o = Command::new("cargo")
|
|
|
|
|
.args(["test"])
|
|
|
|
|
.current_dir(path)
|
|
|
|
|
.output()
|
|
|
|
|
.map_err(|e| format!("Failed to run cargo test: {e}"))?;
|
|
|
|
|
let combined = format!(
|
|
|
|
|
"{}{}",
|
|
|
|
|
String::from_utf8_lossy(&o.stdout),
|
|
|
|
|
String::from_utf8_lossy(&o.stderr)
|
|
|
|
|
);
|
|
|
|
|
(o.status.success(), combined)
|
|
|
|
|
}
|
|
|
|
|
};
|
2026-02-23 12:59:55 +00:00
|
|
|
output.push_str(&test_out);
|
|
|
|
|
output.push('\n');
|
|
|
|
|
Ok((success, output))
|
|
|
|
|
}
|
2026-02-20 15:02:34 +00:00
|
|
|
|
2026-02-23 12:59:55 +00:00
|
|
|
/// Run `cargo clippy` and the project test suite (via `script/test` if present,
|
|
|
|
|
/// otherwise `cargo nextest run` / `cargo test`) in the given directory.
|
|
|
|
|
/// Returns `(gates_passed, combined_output)`.
|
|
|
|
|
fn run_acceptance_gates(path: &Path) -> Result<(bool, String), String> {
|
|
|
|
|
let mut all_output = String::new();
|
|
|
|
|
let mut all_passed = true;
|
|
|
|
|
|
|
|
|
|
// ── cargo clippy ──────────────────────────────────────────────
|
|
|
|
|
let clippy = Command::new("cargo")
|
|
|
|
|
.args(["clippy", "--all-targets", "--all-features"])
|
|
|
|
|
.current_dir(path)
|
|
|
|
|
.output()
|
|
|
|
|
.map_err(|e| format!("Failed to run cargo clippy: {e}"))?;
|
|
|
|
|
|
|
|
|
|
all_output.push_str("=== cargo clippy ===\n");
|
|
|
|
|
let clippy_stdout = String::from_utf8_lossy(&clippy.stdout);
|
|
|
|
|
let clippy_stderr = String::from_utf8_lossy(&clippy.stderr);
|
|
|
|
|
if !clippy_stdout.is_empty() {
|
|
|
|
|
all_output.push_str(&clippy_stdout);
|
|
|
|
|
}
|
|
|
|
|
if !clippy_stderr.is_empty() {
|
|
|
|
|
all_output.push_str(&clippy_stderr);
|
|
|
|
|
}
|
2026-02-20 15:02:34 +00:00
|
|
|
all_output.push('\n');
|
|
|
|
|
|
2026-02-23 12:59:55 +00:00
|
|
|
if !clippy.status.success() {
|
|
|
|
|
all_passed = false;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// ── tests (script/test if available, else cargo nextest/test) ─
|
|
|
|
|
let (test_success, test_out) = run_project_tests(path)?;
|
|
|
|
|
all_output.push_str(&test_out);
|
2026-02-20 15:02:34 +00:00
|
|
|
if !test_success {
|
|
|
|
|
all_passed = false;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
Ok((all_passed, all_output))
|
|
|
|
|
}
|
|
|
|
|
|
2026-02-23 13:40:12 +00:00
|
|
|
/// Run `script/test_coverage` in the given directory if the script exists.
|
|
|
|
|
///
|
|
|
|
|
/// Used as a QA gate before advancing a story from `3_qa/` to `4_merge/`.
|
|
|
|
|
/// Returns `(passed, output)`. If the script does not exist, returns `(true, …)`.
|
|
|
|
|
fn run_coverage_gate(path: &Path) -> Result<(bool, String), String> {
|
|
|
|
|
let script = path.join("script").join("test_coverage");
|
|
|
|
|
if !script.exists() {
|
|
|
|
|
return Ok((
|
|
|
|
|
true,
|
|
|
|
|
"script/test_coverage not found; coverage gate skipped.\n".to_string(),
|
|
|
|
|
));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
let mut output = String::from("=== script/test_coverage ===\n");
|
|
|
|
|
let result = Command::new(&script)
|
|
|
|
|
.current_dir(path)
|
|
|
|
|
.output()
|
|
|
|
|
.map_err(|e| format!("Failed to run script/test_coverage: {e}"))?;
|
|
|
|
|
|
|
|
|
|
let combined = format!(
|
|
|
|
|
"{}{}",
|
|
|
|
|
String::from_utf8_lossy(&result.stdout),
|
|
|
|
|
String::from_utf8_lossy(&result.stderr)
|
|
|
|
|
);
|
|
|
|
|
output.push_str(&combined);
|
|
|
|
|
output.push('\n');
|
|
|
|
|
|
|
|
|
|
Ok((result.status.success(), output))
|
|
|
|
|
}
|
|
|
|
|
|
2026-02-20 17:36:35 +00:00
|
|
|
// ── Mergemaster helpers ───────────────────────────────────────────────────────
|
|
|
|
|
|
2026-02-23 23:22:24 +00:00
|
|
|
/// Result of a squash-merge operation.
|
|
|
|
|
struct SquashMergeResult {
|
|
|
|
|
success: bool,
|
|
|
|
|
had_conflicts: bool,
|
|
|
|
|
/// `true` when conflicts were detected but automatically resolved.
|
|
|
|
|
conflicts_resolved: bool,
|
|
|
|
|
conflict_details: Option<String>,
|
|
|
|
|
output: String,
|
2026-02-24 13:56:11 +00:00
|
|
|
/// Whether quality gates ran and passed. `false` when `success` is `false`
|
|
|
|
|
/// due to a gate failure; callers can use this to distinguish gate failures
|
|
|
|
|
/// from merge/commit/FF failures in the `MergeReport`.
|
|
|
|
|
gates_passed: bool,
|
2026-02-23 23:22:24 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Squash-merge a feature branch into the current branch using a temporary
|
2026-02-26 14:16:35 +00:00
|
|
|
/// merge-queue worktree for quality-gate isolation.
|
2026-02-20 17:36:35 +00:00
|
|
|
///
|
2026-02-23 23:22:24 +00:00
|
|
|
/// **Flow:**
|
|
|
|
|
/// 1. Create a temporary `merge-queue/{story_id}` branch at current HEAD.
|
|
|
|
|
/// 2. Create a temporary worktree for that branch.
|
|
|
|
|
/// 3. Run `git merge --squash` in the temporary worktree (not the main worktree).
|
|
|
|
|
/// 4. If conflicts arise, attempt automatic resolution for simple additive cases.
|
|
|
|
|
/// 5. If clean (or resolved), commit in the temp worktree.
|
2026-02-24 13:56:11 +00:00
|
|
|
/// 6. Run quality gates **in the merge worktree** before touching master.
|
2026-02-26 14:16:35 +00:00
|
|
|
/// 7. If gates pass: cherry-pick the squash commit onto master.
|
2026-02-24 13:56:11 +00:00
|
|
|
/// 8. Clean up the temporary worktree and branch.
|
2026-02-26 14:16:35 +00:00
|
|
|
///
|
|
|
|
|
/// Step 7 uses `git cherry-pick` instead of `git merge --ff-only` so that
|
|
|
|
|
/// concurrent filesystem-watcher commits on master (pipeline file moves) do
|
|
|
|
|
/// not block the merge.
|
2026-02-20 17:36:35 +00:00
|
|
|
fn run_squash_merge(
|
|
|
|
|
project_root: &Path,
|
|
|
|
|
branch: &str,
|
|
|
|
|
story_id: &str,
|
2026-02-23 23:22:24 +00:00
|
|
|
) -> Result<SquashMergeResult, String> {
|
2026-02-20 17:36:35 +00:00
|
|
|
let mut all_output = String::new();
|
2026-02-23 23:22:24 +00:00
|
|
|
let merge_branch = format!("merge-queue/{story_id}");
|
|
|
|
|
let merge_wt_path = project_root
|
|
|
|
|
.join(".story_kit")
|
|
|
|
|
.join("merge_workspace");
|
|
|
|
|
|
|
|
|
|
// Ensure we start clean: remove any leftover merge workspace.
|
|
|
|
|
cleanup_merge_workspace(project_root, &merge_wt_path, &merge_branch);
|
|
|
|
|
|
|
|
|
|
// ── Create merge-queue branch at current HEAD ─────────────────
|
|
|
|
|
all_output.push_str(&format!(
|
|
|
|
|
"=== Creating merge-queue branch '{merge_branch}' ===\n"
|
|
|
|
|
));
|
|
|
|
|
let create_branch = Command::new("git")
|
|
|
|
|
.args(["branch", &merge_branch])
|
|
|
|
|
.current_dir(project_root)
|
|
|
|
|
.output()
|
|
|
|
|
.map_err(|e| format!("Failed to create merge-queue branch: {e}"))?;
|
|
|
|
|
if !create_branch.status.success() {
|
|
|
|
|
let stderr = String::from_utf8_lossy(&create_branch.stderr);
|
|
|
|
|
all_output.push_str(&format!("Branch creation failed: {stderr}\n"));
|
|
|
|
|
return Err(format!("Failed to create merge-queue branch: {stderr}"));
|
|
|
|
|
}
|
2026-02-20 17:36:35 +00:00
|
|
|
|
2026-02-23 23:22:24 +00:00
|
|
|
// ── Create temporary worktree ─────────────────────────────────
|
|
|
|
|
all_output.push_str("=== Creating temporary merge worktree ===\n");
|
|
|
|
|
let wt_str = merge_wt_path.to_string_lossy().to_string();
|
|
|
|
|
let create_wt = Command::new("git")
|
|
|
|
|
.args(["worktree", "add", &wt_str, &merge_branch])
|
|
|
|
|
.current_dir(project_root)
|
|
|
|
|
.output()
|
|
|
|
|
.map_err(|e| format!("Failed to create merge worktree: {e}"))?;
|
|
|
|
|
if !create_wt.status.success() {
|
|
|
|
|
let stderr = String::from_utf8_lossy(&create_wt.stderr);
|
|
|
|
|
all_output.push_str(&format!("Worktree creation failed: {stderr}\n"));
|
|
|
|
|
cleanup_merge_workspace(project_root, &merge_wt_path, &merge_branch);
|
|
|
|
|
return Err(format!("Failed to create merge worktree: {stderr}"));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// ── Squash-merge in the temporary worktree ────────────────────
|
2026-02-20 17:36:35 +00:00
|
|
|
all_output.push_str(&format!("=== git merge --squash {branch} ===\n"));
|
|
|
|
|
let merge = Command::new("git")
|
|
|
|
|
.args(["merge", "--squash", branch])
|
2026-02-23 23:22:24 +00:00
|
|
|
.current_dir(&merge_wt_path)
|
2026-02-20 17:36:35 +00:00
|
|
|
.output()
|
|
|
|
|
.map_err(|e| format!("Failed to run git merge: {e}"))?;
|
|
|
|
|
|
|
|
|
|
let merge_stdout = String::from_utf8_lossy(&merge.stdout).to_string();
|
|
|
|
|
let merge_stderr = String::from_utf8_lossy(&merge.stderr).to_string();
|
|
|
|
|
all_output.push_str(&merge_stdout);
|
|
|
|
|
all_output.push_str(&merge_stderr);
|
|
|
|
|
all_output.push('\n');
|
|
|
|
|
|
2026-02-23 23:22:24 +00:00
|
|
|
let mut had_conflicts = false;
|
|
|
|
|
let mut conflicts_resolved = false;
|
|
|
|
|
let mut conflict_details: Option<String> = None;
|
2026-02-20 17:36:35 +00:00
|
|
|
|
2026-02-23 23:22:24 +00:00
|
|
|
if !merge.status.success() {
|
|
|
|
|
had_conflicts = true;
|
|
|
|
|
all_output.push_str("=== Conflicts detected, attempting auto-resolution ===\n");
|
|
|
|
|
|
|
|
|
|
// Try to automatically resolve simple conflicts.
|
|
|
|
|
match try_resolve_conflicts(&merge_wt_path) {
|
|
|
|
|
Ok((resolved, resolution_log)) => {
|
|
|
|
|
all_output.push_str(&resolution_log);
|
|
|
|
|
if resolved {
|
|
|
|
|
conflicts_resolved = true;
|
|
|
|
|
all_output
|
|
|
|
|
.push_str("=== All conflicts resolved automatically ===\n");
|
|
|
|
|
} else {
|
|
|
|
|
// Could not resolve — abort, clean up, and report.
|
|
|
|
|
let details = format!(
|
|
|
|
|
"Merge conflicts in branch '{branch}':\n{merge_stdout}{merge_stderr}\n{resolution_log}"
|
|
|
|
|
);
|
|
|
|
|
conflict_details = Some(details);
|
|
|
|
|
all_output
|
|
|
|
|
.push_str("=== Unresolvable conflicts, aborting merge ===\n");
|
|
|
|
|
cleanup_merge_workspace(
|
|
|
|
|
project_root,
|
|
|
|
|
&merge_wt_path,
|
|
|
|
|
&merge_branch,
|
|
|
|
|
);
|
|
|
|
|
return Ok(SquashMergeResult {
|
|
|
|
|
success: false,
|
|
|
|
|
had_conflicts: true,
|
|
|
|
|
conflicts_resolved: false,
|
|
|
|
|
conflict_details,
|
|
|
|
|
output: all_output,
|
2026-02-24 13:56:11 +00:00
|
|
|
gates_passed: false,
|
2026-02-23 23:22:24 +00:00
|
|
|
});
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
Err(e) => {
|
|
|
|
|
all_output.push_str(&format!("Auto-resolution error: {e}\n"));
|
|
|
|
|
cleanup_merge_workspace(
|
|
|
|
|
project_root,
|
|
|
|
|
&merge_wt_path,
|
|
|
|
|
&merge_branch,
|
|
|
|
|
);
|
|
|
|
|
return Ok(SquashMergeResult {
|
|
|
|
|
success: false,
|
|
|
|
|
had_conflicts: true,
|
|
|
|
|
conflicts_resolved: false,
|
|
|
|
|
conflict_details: Some(format!(
|
|
|
|
|
"Merge conflicts in branch '{branch}' (auto-resolution failed: {e}):\n{merge_stdout}{merge_stderr}"
|
|
|
|
|
)),
|
|
|
|
|
output: all_output,
|
2026-02-24 13:56:11 +00:00
|
|
|
gates_passed: false,
|
2026-02-23 23:22:24 +00:00
|
|
|
});
|
|
|
|
|
}
|
|
|
|
|
}
|
2026-02-20 17:36:35 +00:00
|
|
|
}
|
|
|
|
|
|
2026-02-23 23:22:24 +00:00
|
|
|
// ── Commit in the temporary worktree ──────────────────────────
|
2026-02-20 17:36:35 +00:00
|
|
|
all_output.push_str("=== git commit ===\n");
|
|
|
|
|
let commit_msg = format!("story-kit: merge {story_id}");
|
|
|
|
|
let commit = Command::new("git")
|
|
|
|
|
.args(["commit", "-m", &commit_msg])
|
2026-02-23 23:22:24 +00:00
|
|
|
.current_dir(&merge_wt_path)
|
2026-02-20 17:36:35 +00:00
|
|
|
.output()
|
|
|
|
|
.map_err(|e| format!("Failed to run git commit: {e}"))?;
|
|
|
|
|
|
|
|
|
|
let commit_stdout = String::from_utf8_lossy(&commit.stdout).to_string();
|
|
|
|
|
let commit_stderr = String::from_utf8_lossy(&commit.stderr).to_string();
|
|
|
|
|
all_output.push_str(&commit_stdout);
|
|
|
|
|
all_output.push_str(&commit_stderr);
|
|
|
|
|
all_output.push('\n');
|
|
|
|
|
|
|
|
|
|
if !commit.status.success() {
|
2026-02-27 10:37:27 +00:00
|
|
|
// Bug 226: "nothing to commit" means the feature branch has no changes
|
|
|
|
|
// beyond what's already on master. This must NOT be treated as success
|
|
|
|
|
// — it means the code was never actually merged.
|
2026-02-20 17:36:35 +00:00
|
|
|
if commit_stderr.contains("nothing to commit")
|
|
|
|
|
|| commit_stdout.contains("nothing to commit")
|
|
|
|
|
{
|
2026-02-27 10:37:27 +00:00
|
|
|
all_output.push_str(
|
|
|
|
|
"=== Nothing to commit — feature branch has no changes beyond master ===\n",
|
|
|
|
|
);
|
2026-02-23 23:22:24 +00:00
|
|
|
cleanup_merge_workspace(project_root, &merge_wt_path, &merge_branch);
|
|
|
|
|
return Ok(SquashMergeResult {
|
2026-02-27 10:37:27 +00:00
|
|
|
success: false,
|
2026-02-23 23:22:24 +00:00
|
|
|
had_conflicts,
|
|
|
|
|
conflicts_resolved,
|
2026-02-27 10:37:27 +00:00
|
|
|
conflict_details: Some(
|
|
|
|
|
"Squash-merge resulted in an empty diff — the feature branch has no \
|
|
|
|
|
code changes to merge into master."
|
|
|
|
|
.to_string(),
|
|
|
|
|
),
|
2026-02-23 23:22:24 +00:00
|
|
|
output: all_output,
|
2026-02-27 10:37:27 +00:00
|
|
|
gates_passed: false,
|
2026-02-23 23:22:24 +00:00
|
|
|
});
|
|
|
|
|
}
|
|
|
|
|
cleanup_merge_workspace(project_root, &merge_wt_path, &merge_branch);
|
|
|
|
|
return Ok(SquashMergeResult {
|
|
|
|
|
success: false,
|
|
|
|
|
had_conflicts,
|
|
|
|
|
conflicts_resolved,
|
|
|
|
|
conflict_details,
|
|
|
|
|
output: all_output,
|
2026-02-24 13:56:11 +00:00
|
|
|
gates_passed: false,
|
2026-02-23 23:22:24 +00:00
|
|
|
});
|
|
|
|
|
}
|
|
|
|
|
|
2026-02-27 10:37:27 +00:00
|
|
|
// ── Bug 226: Verify the commit contains real code changes ─────
|
|
|
|
|
// If the merge only brought in .story_kit/ files (pipeline file moves),
|
|
|
|
|
// there are no actual code changes to land on master. Abort.
|
|
|
|
|
{
|
|
|
|
|
let diff_check = Command::new("git")
|
|
|
|
|
.args(["diff", "--name-only", "HEAD~1..HEAD"])
|
|
|
|
|
.current_dir(&merge_wt_path)
|
|
|
|
|
.output()
|
|
|
|
|
.map_err(|e| format!("Failed to check merge diff: {e}"))?;
|
|
|
|
|
let changed_files = String::from_utf8_lossy(&diff_check.stdout);
|
|
|
|
|
let has_code_changes = changed_files
|
|
|
|
|
.lines()
|
|
|
|
|
.any(|f| !f.starts_with(".story_kit/"));
|
|
|
|
|
if !has_code_changes {
|
|
|
|
|
all_output.push_str(
|
|
|
|
|
"=== Merge commit contains only .story_kit/ file moves, no code changes ===\n",
|
|
|
|
|
);
|
|
|
|
|
cleanup_merge_workspace(project_root, &merge_wt_path, &merge_branch);
|
|
|
|
|
return Ok(SquashMergeResult {
|
|
|
|
|
success: false,
|
|
|
|
|
had_conflicts,
|
|
|
|
|
conflicts_resolved,
|
|
|
|
|
conflict_details: Some(
|
|
|
|
|
"Feature branch has no code changes outside .story_kit/ — only \
|
|
|
|
|
pipeline file moves were found."
|
|
|
|
|
.to_string(),
|
|
|
|
|
),
|
|
|
|
|
output: all_output,
|
|
|
|
|
gates_passed: false,
|
|
|
|
|
});
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2026-02-26 19:30:26 +00:00
|
|
|
// ── Run component setup from project.toml (same as worktree creation) ──────────
|
|
|
|
|
{
|
|
|
|
|
let config = ProjectConfig::load(&merge_wt_path).unwrap_or_default();
|
|
|
|
|
if !config.component.is_empty() {
|
|
|
|
|
all_output.push_str("=== component setup (merge worktree) ===\n");
|
|
|
|
|
}
|
|
|
|
|
for component in &config.component {
|
|
|
|
|
let cmd_dir = merge_wt_path.join(&component.path);
|
|
|
|
|
for cmd in &component.setup {
|
|
|
|
|
all_output.push_str(&format!("--- {}: {cmd} ---\n", component.name));
|
|
|
|
|
match Command::new("sh")
|
|
|
|
|
.args(["-c", cmd])
|
|
|
|
|
.current_dir(&cmd_dir)
|
|
|
|
|
.output()
|
|
|
|
|
{
|
|
|
|
|
Ok(out) => {
|
|
|
|
|
all_output.push_str(&String::from_utf8_lossy(&out.stdout));
|
|
|
|
|
all_output.push_str(&String::from_utf8_lossy(&out.stderr));
|
|
|
|
|
all_output.push('\n');
|
|
|
|
|
if !out.status.success() {
|
|
|
|
|
all_output.push_str(&format!(
|
|
|
|
|
"=== setup warning: '{}' failed: {cmd} ===\n",
|
|
|
|
|
component.name
|
|
|
|
|
));
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
Err(e) => {
|
|
|
|
|
all_output.push_str(&format!(
|
|
|
|
|
"=== setup warning: failed to run '{cmd}': {e} ===\n"
|
|
|
|
|
));
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
2026-02-24 15:55:36 +00:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2026-02-24 13:56:11 +00:00
|
|
|
// ── Quality gates in merge workspace (BEFORE fast-forward) ────
|
|
|
|
|
// Run gates in the merge worktree so that failures abort before master moves.
|
|
|
|
|
all_output.push_str("=== Running quality gates before fast-forward ===\n");
|
|
|
|
|
match run_merge_quality_gates(&merge_wt_path) {
|
|
|
|
|
Ok((true, gate_out)) => {
|
|
|
|
|
all_output.push_str(&gate_out);
|
|
|
|
|
all_output.push('\n');
|
|
|
|
|
all_output.push_str("=== Quality gates passed ===\n");
|
|
|
|
|
}
|
|
|
|
|
Ok((false, gate_out)) => {
|
|
|
|
|
all_output.push_str(&gate_out);
|
|
|
|
|
all_output.push('\n');
|
|
|
|
|
all_output
|
|
|
|
|
.push_str("=== Quality gates FAILED — aborting fast-forward, master unchanged ===\n");
|
|
|
|
|
cleanup_merge_workspace(project_root, &merge_wt_path, &merge_branch);
|
|
|
|
|
return Ok(SquashMergeResult {
|
|
|
|
|
success: false,
|
|
|
|
|
had_conflicts,
|
|
|
|
|
conflicts_resolved,
|
|
|
|
|
conflict_details,
|
|
|
|
|
output: all_output,
|
|
|
|
|
gates_passed: false,
|
|
|
|
|
});
|
|
|
|
|
}
|
|
|
|
|
Err(e) => {
|
|
|
|
|
all_output.push_str(&format!("Gate check error: {e}\n"));
|
|
|
|
|
cleanup_merge_workspace(project_root, &merge_wt_path, &merge_branch);
|
|
|
|
|
return Ok(SquashMergeResult {
|
|
|
|
|
success: false,
|
|
|
|
|
had_conflicts,
|
|
|
|
|
conflicts_resolved,
|
|
|
|
|
conflict_details,
|
|
|
|
|
output: all_output,
|
|
|
|
|
gates_passed: false,
|
|
|
|
|
});
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2026-02-26 14:16:35 +00:00
|
|
|
// ── Cherry-pick the squash commit onto master ──────────────────
|
|
|
|
|
// We cherry-pick instead of fast-forward so that concurrent filesystem
|
|
|
|
|
// watcher commits on master (e.g. pipeline file moves) don't block the
|
|
|
|
|
// merge. Cherry-pick applies the diff of the squash commit cleanly on
|
|
|
|
|
// top of master's current HEAD.
|
2026-02-23 23:22:24 +00:00
|
|
|
all_output.push_str(&format!(
|
2026-02-26 14:16:35 +00:00
|
|
|
"=== Cherry-picking squash commit from {merge_branch} onto master ===\n"
|
2026-02-23 23:22:24 +00:00
|
|
|
));
|
2026-02-26 14:16:35 +00:00
|
|
|
let cp = Command::new("git")
|
|
|
|
|
.args(["cherry-pick", &merge_branch])
|
2026-02-23 23:22:24 +00:00
|
|
|
.current_dir(project_root)
|
|
|
|
|
.output()
|
2026-02-26 14:16:35 +00:00
|
|
|
.map_err(|e| format!("Failed to cherry-pick merge-queue commit: {e}"))?;
|
2026-02-23 23:22:24 +00:00
|
|
|
|
2026-02-26 14:16:35 +00:00
|
|
|
let cp_stdout = String::from_utf8_lossy(&cp.stdout).to_string();
|
|
|
|
|
let cp_stderr = String::from_utf8_lossy(&cp.stderr).to_string();
|
|
|
|
|
all_output.push_str(&cp_stdout);
|
|
|
|
|
all_output.push_str(&cp_stderr);
|
2026-02-23 23:22:24 +00:00
|
|
|
all_output.push('\n');
|
|
|
|
|
|
2026-02-26 14:16:35 +00:00
|
|
|
if !cp.status.success() {
|
|
|
|
|
// Abort the cherry-pick so master is left clean.
|
|
|
|
|
let _ = Command::new("git")
|
|
|
|
|
.args(["cherry-pick", "--abort"])
|
|
|
|
|
.current_dir(project_root)
|
|
|
|
|
.output();
|
|
|
|
|
all_output.push_str("=== Cherry-pick failed — aborting, master unchanged ===\n");
|
2026-02-23 23:22:24 +00:00
|
|
|
cleanup_merge_workspace(project_root, &merge_wt_path, &merge_branch);
|
|
|
|
|
return Ok(SquashMergeResult {
|
|
|
|
|
success: false,
|
|
|
|
|
had_conflicts,
|
|
|
|
|
conflicts_resolved,
|
|
|
|
|
conflict_details: Some(format!(
|
2026-02-26 14:16:35 +00:00
|
|
|
"Cherry-pick of squash commit failed (conflict with master?):\n{cp_stderr}"
|
2026-02-23 23:22:24 +00:00
|
|
|
)),
|
|
|
|
|
output: all_output,
|
2026-02-24 13:56:11 +00:00
|
|
|
gates_passed: true,
|
2026-02-23 23:22:24 +00:00
|
|
|
});
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// ── Clean up ──────────────────────────────────────────────────
|
|
|
|
|
cleanup_merge_workspace(project_root, &merge_wt_path, &merge_branch);
|
|
|
|
|
all_output.push_str("=== Merge-queue cleanup complete ===\n");
|
|
|
|
|
|
|
|
|
|
Ok(SquashMergeResult {
|
|
|
|
|
success: true,
|
|
|
|
|
had_conflicts,
|
|
|
|
|
conflicts_resolved,
|
|
|
|
|
conflict_details,
|
|
|
|
|
output: all_output,
|
2026-02-24 13:56:11 +00:00
|
|
|
gates_passed: true,
|
2026-02-23 23:22:24 +00:00
|
|
|
})
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Remove the temporary merge worktree and branch. Best-effort — errors are
|
|
|
|
|
/// silently ignored because this is cleanup code.
|
|
|
|
|
fn cleanup_merge_workspace(
|
|
|
|
|
project_root: &Path,
|
|
|
|
|
merge_wt_path: &Path,
|
|
|
|
|
merge_branch: &str,
|
|
|
|
|
) {
|
|
|
|
|
let wt_str = merge_wt_path.to_string_lossy().to_string();
|
|
|
|
|
let _ = Command::new("git")
|
|
|
|
|
.args(["worktree", "remove", "--force", &wt_str])
|
|
|
|
|
.current_dir(project_root)
|
|
|
|
|
.output();
|
2026-02-26 14:16:35 +00:00
|
|
|
// If the directory still exists (e.g. it was a plain directory from a
|
|
|
|
|
// previous failed run, not a registered git worktree), remove it so
|
|
|
|
|
// the next `git worktree add` can succeed.
|
|
|
|
|
if merge_wt_path.exists() {
|
|
|
|
|
let _ = std::fs::remove_dir_all(merge_wt_path);
|
|
|
|
|
}
|
2026-02-23 23:22:24 +00:00
|
|
|
let _ = Command::new("git")
|
|
|
|
|
.args(["branch", "-D", merge_branch])
|
|
|
|
|
.current_dir(project_root)
|
|
|
|
|
.output();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Attempt to automatically resolve merge conflicts in the given worktree.
|
|
|
|
|
///
|
|
|
|
|
/// Finds all conflicted files and tries [`resolve_simple_conflicts`] on each.
|
|
|
|
|
/// If **all** conflicts can be resolved, stages the resolved files and returns
|
|
|
|
|
/// `Ok((true, log))`. If any file has a complex conflict that cannot be
|
|
|
|
|
/// auto-resolved, returns `Ok((false, log))` without staging anything.
|
|
|
|
|
fn try_resolve_conflicts(worktree: &Path) -> Result<(bool, String), String> {
|
|
|
|
|
let mut log = String::new();
|
|
|
|
|
|
|
|
|
|
// List conflicted files.
|
|
|
|
|
let ls = Command::new("git")
|
|
|
|
|
.args(["diff", "--name-only", "--diff-filter=U"])
|
|
|
|
|
.current_dir(worktree)
|
|
|
|
|
.output()
|
|
|
|
|
.map_err(|e| format!("Failed to list conflicted files: {e}"))?;
|
|
|
|
|
|
|
|
|
|
let file_list = String::from_utf8_lossy(&ls.stdout);
|
|
|
|
|
let conflicted_files: Vec<&str> =
|
|
|
|
|
file_list.lines().filter(|l| !l.is_empty()).collect();
|
|
|
|
|
|
|
|
|
|
if conflicted_files.is_empty() {
|
|
|
|
|
log.push_str("No conflicted files found (conflict may be index-only).\n");
|
|
|
|
|
return Ok((false, log));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
log.push_str(&format!(
|
|
|
|
|
"Conflicted files ({}):\n",
|
|
|
|
|
conflicted_files.len()
|
|
|
|
|
));
|
|
|
|
|
for f in &conflicted_files {
|
|
|
|
|
log.push_str(&format!(" - {f}\n"));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// First pass: check that all files can be resolved before touching any.
|
|
|
|
|
let mut resolutions: Vec<(&str, String)> = Vec::new();
|
|
|
|
|
for file in &conflicted_files {
|
|
|
|
|
let file_path = worktree.join(file);
|
|
|
|
|
let content = std::fs::read_to_string(&file_path)
|
|
|
|
|
.map_err(|e| format!("Failed to read conflicted file '{file}': {e}"))?;
|
|
|
|
|
|
|
|
|
|
match resolve_simple_conflicts(&content) {
|
|
|
|
|
Some(resolved) => {
|
|
|
|
|
log.push_str(&format!(" [auto-resolve] {file}\n"));
|
|
|
|
|
resolutions.push((file, resolved));
|
|
|
|
|
}
|
|
|
|
|
None => {
|
|
|
|
|
log.push_str(&format!(
|
|
|
|
|
" [COMPLEX — cannot auto-resolve] {file}\n"
|
|
|
|
|
));
|
|
|
|
|
return Ok((false, log));
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Second pass: write resolved content and stage.
|
|
|
|
|
for (file, resolved) in &resolutions {
|
|
|
|
|
let file_path = worktree.join(file);
|
|
|
|
|
std::fs::write(&file_path, resolved)
|
|
|
|
|
.map_err(|e| format!("Failed to write resolved file '{file}': {e}"))?;
|
|
|
|
|
|
|
|
|
|
let add = Command::new("git")
|
|
|
|
|
.args(["add", file])
|
|
|
|
|
.current_dir(worktree)
|
|
|
|
|
.output()
|
|
|
|
|
.map_err(|e| format!("Failed to stage resolved file '{file}': {e}"))?;
|
|
|
|
|
if !add.status.success() {
|
|
|
|
|
return Err(format!(
|
|
|
|
|
"git add failed for '{file}': {}",
|
|
|
|
|
String::from_utf8_lossy(&add.stderr)
|
|
|
|
|
));
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
Ok((true, log))
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Try to resolve simple additive merge conflicts in a file's content.
|
|
|
|
|
///
|
|
|
|
|
/// A conflict is considered "simple additive" when both sides add new content
|
|
|
|
|
/// at the same location without modifying existing lines. In that case we keep
|
|
|
|
|
/// both additions (ours first, then theirs).
|
|
|
|
|
///
|
|
|
|
|
/// Returns `Some(resolved)` if all conflict blocks in the file are simple, or
|
|
|
|
|
/// `None` if any block is too complex to auto-resolve.
|
|
|
|
|
fn resolve_simple_conflicts(content: &str) -> Option<String> {
|
|
|
|
|
// Quick check: if there are no conflict markers at all, nothing to do.
|
|
|
|
|
if !content.contains("<<<<<<<") {
|
|
|
|
|
return Some(content.to_string());
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
let mut result = String::new();
|
|
|
|
|
let mut lines = content.lines().peekable();
|
|
|
|
|
|
|
|
|
|
while let Some(line) = lines.next() {
|
|
|
|
|
if line.starts_with("<<<<<<<") {
|
|
|
|
|
// Collect the "ours" side (between <<<<<<< and =======).
|
|
|
|
|
let mut ours = Vec::new();
|
|
|
|
|
let mut found_separator = false;
|
|
|
|
|
for next_line in lines.by_ref() {
|
|
|
|
|
if next_line.starts_with("=======") {
|
|
|
|
|
found_separator = true;
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
ours.push(next_line);
|
|
|
|
|
}
|
|
|
|
|
if !found_separator {
|
|
|
|
|
return None; // Malformed conflict block.
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Collect the "theirs" side (between ======= and >>>>>>>).
|
|
|
|
|
let mut theirs = Vec::new();
|
|
|
|
|
let mut found_end = false;
|
|
|
|
|
for next_line in lines.by_ref() {
|
|
|
|
|
if next_line.starts_with(">>>>>>>") {
|
|
|
|
|
found_end = true;
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
theirs.push(next_line);
|
|
|
|
|
}
|
|
|
|
|
if !found_end {
|
|
|
|
|
return None; // Malformed conflict block.
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Both sides must be non-empty additions to be considered simple.
|
|
|
|
|
// If either side is empty, it means one side deleted something — complex.
|
|
|
|
|
if ours.is_empty() && theirs.is_empty() {
|
|
|
|
|
// Both empty — nothing to add, skip.
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Accept both: ours first, then theirs.
|
|
|
|
|
for l in &ours {
|
|
|
|
|
result.push_str(l);
|
|
|
|
|
result.push('\n');
|
|
|
|
|
}
|
|
|
|
|
for l in &theirs {
|
|
|
|
|
result.push_str(l);
|
|
|
|
|
result.push('\n');
|
|
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
result.push_str(line);
|
|
|
|
|
result.push('\n');
|
2026-02-20 17:36:35 +00:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2026-02-23 23:22:24 +00:00
|
|
|
// Preserve trailing newline consistency: if original ended without
|
|
|
|
|
// newline, strip the trailing one we added.
|
|
|
|
|
if !content.ends_with('\n') && result.ends_with('\n') {
|
|
|
|
|
result.pop();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
Some(result)
|
2026-02-20 17:36:35 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Run quality gates in the project root after a successful merge.
|
|
|
|
|
///
|
2026-02-26 19:30:26 +00:00
|
|
|
/// Runs quality gates in the merge workspace.
|
|
|
|
|
///
|
|
|
|
|
/// When `script/test` is present it is the single source of truth and is the
|
|
|
|
|
/// only gate that runs — it is expected to cover the full suite (clippy, unit
|
|
|
|
|
/// tests, frontend tests, etc.). When `script/test` is absent the function
|
|
|
|
|
/// falls back to `cargo clippy` + `cargo nextest`/`cargo test` for Rust
|
|
|
|
|
/// projects. No hardcoded references to pnpm or frontend/ are used.
|
|
|
|
|
///
|
2026-02-20 17:36:35 +00:00
|
|
|
/// Returns `(gates_passed, combined_output)`.
|
|
|
|
|
fn run_merge_quality_gates(project_root: &Path) -> Result<(bool, String), String> {
|
|
|
|
|
let mut all_output = String::new();
|
|
|
|
|
let mut all_passed = true;
|
|
|
|
|
|
2026-02-24 13:56:11 +00:00
|
|
|
let script_test = project_root.join("script").join("test");
|
2026-02-20 17:36:35 +00:00
|
|
|
|
2026-02-26 19:30:26 +00:00
|
|
|
if script_test.exists() {
|
|
|
|
|
// Delegate entirely to script/test — it is the single source of truth
|
|
|
|
|
// for the full test suite (clippy, cargo tests, frontend builds, etc.).
|
|
|
|
|
let (success, output) = run_project_tests(project_root)?;
|
|
|
|
|
all_output.push_str(&output);
|
|
|
|
|
if !success {
|
|
|
|
|
all_passed = false;
|
|
|
|
|
}
|
|
|
|
|
return Ok((all_passed, all_output));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// No script/test — fall back to cargo gates for Rust projects.
|
|
|
|
|
let cargo_toml = project_root.join("Cargo.toml");
|
2026-02-24 13:56:11 +00:00
|
|
|
if cargo_toml.exists() {
|
|
|
|
|
let clippy = Command::new("cargo")
|
|
|
|
|
.args(["clippy", "--all-targets", "--all-features"])
|
|
|
|
|
.current_dir(project_root)
|
|
|
|
|
.output()
|
|
|
|
|
.map_err(|e| format!("Failed to run cargo clippy: {e}"))?;
|
2026-02-20 17:36:35 +00:00
|
|
|
|
2026-02-24 13:56:11 +00:00
|
|
|
all_output.push_str("=== cargo clippy ===\n");
|
|
|
|
|
let clippy_out = format!(
|
|
|
|
|
"{}{}",
|
|
|
|
|
String::from_utf8_lossy(&clippy.stdout),
|
|
|
|
|
String::from_utf8_lossy(&clippy.stderr)
|
|
|
|
|
);
|
|
|
|
|
all_output.push_str(&clippy_out);
|
|
|
|
|
all_output.push('\n');
|
|
|
|
|
|
|
|
|
|
if !clippy.status.success() {
|
|
|
|
|
all_passed = false;
|
|
|
|
|
}
|
2026-02-20 17:36:35 +00:00
|
|
|
|
2026-02-24 13:56:11 +00:00
|
|
|
let (test_success, test_out) = run_project_tests(project_root)?;
|
|
|
|
|
all_output.push_str(&test_out);
|
|
|
|
|
if !test_success {
|
|
|
|
|
all_passed = false;
|
|
|
|
|
}
|
2026-02-20 17:36:35 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
Ok((all_passed, all_output))
|
|
|
|
|
}
|
|
|
|
|
|
2026-02-19 17:58:53 +00:00
|
|
|
/// Spawn claude agent in a PTY and stream events through the broadcast channel.
|
2026-02-20 11:57:25 +00:00
|
|
|
#[allow(clippy::too_many_arguments)]
|
2026-02-19 17:58:53 +00:00
|
|
|
async fn run_agent_pty_streaming(
|
|
|
|
|
story_id: &str,
|
Accept story 34: Per-Project Agent Configuration and Role Definitions
Replace single [agent] config with multi-agent [[agent]] roster system.
Each agent has name, role, model, allowed_tools, max_turns, max_budget_usd,
and system_prompt fields that map to Claude CLI flags at spawn time.
- AgentConfig expanded with structured fields, validated at startup (panics
on duplicate names, empty names, non-positive budgets/turns)
- Backwards-compatible: legacy [agent] format auto-wraps with deprecation warning
- AgentPool uses composite "story_id:agent_name" keys for concurrent agents
- agent_name added to AgentEvent variants, AgentInfo, start/stop/subscribe APIs
- GET /agents/config returns roster, POST /agents/config/reload hot-reloads
- POST /agents/start accepts optional agent_name, /agents/stop requires it
- SSE route updated to /agents/:story_id/:agent_name/stream
- Frontend: roster badges, agent selector dropdown, composite-key state
- Project root initialized to cwd at startup so config endpoints work immediately
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-19 18:46:14 +00:00
|
|
|
agent_name: &str,
|
2026-02-19 17:58:53 +00:00
|
|
|
command: &str,
|
|
|
|
|
args: &[String],
|
|
|
|
|
prompt: &str,
|
2026-02-19 15:25:22 +00:00
|
|
|
cwd: &str,
|
2026-02-19 17:58:53 +00:00
|
|
|
tx: &broadcast::Sender<AgentEvent>,
|
2026-02-20 11:57:25 +00:00
|
|
|
event_log: &Arc<Mutex<Vec<AgentEvent>>>,
|
2026-02-23 20:52:06 +00:00
|
|
|
log_writer: Option<Arc<Mutex<AgentLogWriter>>>,
|
2026-02-24 13:13:16 +00:00
|
|
|
inactivity_timeout_secs: u64,
|
2026-02-24 17:56:40 +00:00
|
|
|
child_killers: Arc<Mutex<HashMap<String, Box<dyn ChildKiller + Send + Sync>>>>,
|
2026-02-19 17:58:53 +00:00
|
|
|
) -> Result<Option<String>, String> {
|
|
|
|
|
let sid = story_id.to_string();
|
Accept story 34: Per-Project Agent Configuration and Role Definitions
Replace single [agent] config with multi-agent [[agent]] roster system.
Each agent has name, role, model, allowed_tools, max_turns, max_budget_usd,
and system_prompt fields that map to Claude CLI flags at spawn time.
- AgentConfig expanded with structured fields, validated at startup (panics
on duplicate names, empty names, non-positive budgets/turns)
- Backwards-compatible: legacy [agent] format auto-wraps with deprecation warning
- AgentPool uses composite "story_id:agent_name" keys for concurrent agents
- agent_name added to AgentEvent variants, AgentInfo, start/stop/subscribe APIs
- GET /agents/config returns roster, POST /agents/config/reload hot-reloads
- POST /agents/start accepts optional agent_name, /agents/stop requires it
- SSE route updated to /agents/:story_id/:agent_name/stream
- Frontend: roster badges, agent selector dropdown, composite-key state
- Project root initialized to cwd at startup so config endpoints work immediately
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-19 18:46:14 +00:00
|
|
|
let aname = agent_name.to_string();
|
2026-02-19 17:58:53 +00:00
|
|
|
let cmd = command.to_string();
|
|
|
|
|
let args = args.to_vec();
|
|
|
|
|
let prompt = prompt.to_string();
|
|
|
|
|
let cwd = cwd.to_string();
|
|
|
|
|
let tx = tx.clone();
|
2026-02-20 11:57:25 +00:00
|
|
|
let event_log = event_log.clone();
|
2026-02-19 17:58:53 +00:00
|
|
|
|
|
|
|
|
tokio::task::spawn_blocking(move || {
|
2026-02-23 20:52:06 +00:00
|
|
|
run_agent_pty_blocking(
|
|
|
|
|
&sid,
|
|
|
|
|
&aname,
|
|
|
|
|
&cmd,
|
|
|
|
|
&args,
|
|
|
|
|
&prompt,
|
|
|
|
|
&cwd,
|
|
|
|
|
&tx,
|
|
|
|
|
&event_log,
|
|
|
|
|
log_writer.as_deref(),
|
2026-02-24 13:13:16 +00:00
|
|
|
inactivity_timeout_secs,
|
2026-02-24 17:56:40 +00:00
|
|
|
&child_killers,
|
2026-02-23 20:52:06 +00:00
|
|
|
)
|
2026-02-19 17:58:53 +00:00
|
|
|
})
|
|
|
|
|
.await
|
|
|
|
|
.map_err(|e| format!("Agent task panicked: {e}"))?
|
|
|
|
|
}
|
|
|
|
|
|
2026-02-24 19:35:06 +00:00
|
|
|
/// Dispatch a `stream_event` from Claude Code's `--include-partial-messages` output.
|
|
|
|
|
///
|
|
|
|
|
/// Extracts `thinking_delta` and `text_delta` from `content_block_delta` events
|
|
|
|
|
/// and routes them as `AgentEvent::Thinking` and `AgentEvent::Output` respectively.
|
|
|
|
|
/// This ensures thinking traces flow through the dedicated `ThinkingBlock` UI
|
|
|
|
|
/// component rather than appearing as unbounded regular output.
|
|
|
|
|
fn handle_agent_stream_event(
|
|
|
|
|
event: &serde_json::Value,
|
|
|
|
|
story_id: &str,
|
|
|
|
|
agent_name: &str,
|
|
|
|
|
tx: &broadcast::Sender<AgentEvent>,
|
|
|
|
|
event_log: &Mutex<Vec<AgentEvent>>,
|
|
|
|
|
log_writer: Option<&Mutex<AgentLogWriter>>,
|
|
|
|
|
) {
|
|
|
|
|
let event_type = event.get("type").and_then(|t| t.as_str()).unwrap_or("");
|
|
|
|
|
|
|
|
|
|
if event_type == "content_block_delta"
|
|
|
|
|
&& let Some(delta) = event.get("delta")
|
|
|
|
|
{
|
|
|
|
|
let delta_type = delta.get("type").and_then(|t| t.as_str()).unwrap_or("");
|
|
|
|
|
match delta_type {
|
|
|
|
|
"thinking_delta" => {
|
|
|
|
|
if let Some(thinking) = delta.get("thinking").and_then(|t| t.as_str()) {
|
|
|
|
|
emit_event(
|
|
|
|
|
AgentEvent::Thinking {
|
|
|
|
|
story_id: story_id.to_string(),
|
|
|
|
|
agent_name: agent_name.to_string(),
|
|
|
|
|
text: thinking.to_string(),
|
|
|
|
|
},
|
|
|
|
|
tx,
|
|
|
|
|
event_log,
|
|
|
|
|
log_writer,
|
|
|
|
|
);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
"text_delta" => {
|
|
|
|
|
if let Some(text) = delta.get("text").and_then(|t| t.as_str()) {
|
|
|
|
|
emit_event(
|
|
|
|
|
AgentEvent::Output {
|
|
|
|
|
story_id: story_id.to_string(),
|
|
|
|
|
agent_name: agent_name.to_string(),
|
|
|
|
|
text: text.to_string(),
|
|
|
|
|
},
|
|
|
|
|
tx,
|
|
|
|
|
event_log,
|
|
|
|
|
log_writer,
|
|
|
|
|
);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
_ => {}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2026-02-23 20:52:06 +00:00
|
|
|
/// Helper to send an event to broadcast, event log, and optional persistent log file.
|
2026-02-20 11:57:25 +00:00
|
|
|
fn emit_event(
|
|
|
|
|
event: AgentEvent,
|
|
|
|
|
tx: &broadcast::Sender<AgentEvent>,
|
|
|
|
|
event_log: &Mutex<Vec<AgentEvent>>,
|
2026-02-23 20:52:06 +00:00
|
|
|
log_writer: Option<&Mutex<AgentLogWriter>>,
|
2026-02-20 11:57:25 +00:00
|
|
|
) {
|
|
|
|
|
if let Ok(mut log) = event_log.lock() {
|
|
|
|
|
log.push(event.clone());
|
|
|
|
|
}
|
2026-02-23 20:52:06 +00:00
|
|
|
if let Some(writer) = log_writer
|
|
|
|
|
&& let Ok(mut w) = writer.lock()
|
|
|
|
|
&& let Err(e) = w.write_event(&event)
|
|
|
|
|
{
|
|
|
|
|
eprintln!("[agent_log] Failed to write event to log file: {e}");
|
|
|
|
|
}
|
2026-02-20 11:57:25 +00:00
|
|
|
let _ = tx.send(event);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[allow(clippy::too_many_arguments)]
|
2026-02-19 17:58:53 +00:00
|
|
|
fn run_agent_pty_blocking(
|
|
|
|
|
story_id: &str,
|
Accept story 34: Per-Project Agent Configuration and Role Definitions
Replace single [agent] config with multi-agent [[agent]] roster system.
Each agent has name, role, model, allowed_tools, max_turns, max_budget_usd,
and system_prompt fields that map to Claude CLI flags at spawn time.
- AgentConfig expanded with structured fields, validated at startup (panics
on duplicate names, empty names, non-positive budgets/turns)
- Backwards-compatible: legacy [agent] format auto-wraps with deprecation warning
- AgentPool uses composite "story_id:agent_name" keys for concurrent agents
- agent_name added to AgentEvent variants, AgentInfo, start/stop/subscribe APIs
- GET /agents/config returns roster, POST /agents/config/reload hot-reloads
- POST /agents/start accepts optional agent_name, /agents/stop requires it
- SSE route updated to /agents/:story_id/:agent_name/stream
- Frontend: roster badges, agent selector dropdown, composite-key state
- Project root initialized to cwd at startup so config endpoints work immediately
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-19 18:46:14 +00:00
|
|
|
agent_name: &str,
|
2026-02-19 17:58:53 +00:00
|
|
|
command: &str,
|
|
|
|
|
args: &[String],
|
|
|
|
|
prompt: &str,
|
|
|
|
|
cwd: &str,
|
|
|
|
|
tx: &broadcast::Sender<AgentEvent>,
|
2026-02-20 11:57:25 +00:00
|
|
|
event_log: &Mutex<Vec<AgentEvent>>,
|
2026-02-23 20:52:06 +00:00
|
|
|
log_writer: Option<&Mutex<AgentLogWriter>>,
|
2026-02-24 13:13:16 +00:00
|
|
|
inactivity_timeout_secs: u64,
|
2026-02-24 17:56:40 +00:00
|
|
|
child_killers: &Arc<Mutex<HashMap<String, Box<dyn ChildKiller + Send + Sync>>>>,
|
2026-02-19 17:58:53 +00:00
|
|
|
) -> Result<Option<String>, String> {
|
2026-02-19 15:25:22 +00:00
|
|
|
let pty_system = native_pty_system();
|
|
|
|
|
|
|
|
|
|
let pair = pty_system
|
|
|
|
|
.openpty(PtySize {
|
|
|
|
|
rows: 50,
|
|
|
|
|
cols: 200,
|
|
|
|
|
pixel_width: 0,
|
|
|
|
|
pixel_height: 0,
|
|
|
|
|
})
|
|
|
|
|
.map_err(|e| format!("Failed to open PTY: {e}"))?;
|
|
|
|
|
|
2026-02-19 17:58:53 +00:00
|
|
|
let mut cmd = CommandBuilder::new(command);
|
|
|
|
|
|
|
|
|
|
// -p <prompt> must come first
|
2026-02-19 15:25:22 +00:00
|
|
|
cmd.arg("-p");
|
2026-02-19 17:58:53 +00:00
|
|
|
cmd.arg(prompt);
|
|
|
|
|
|
Accept story 34: Per-Project Agent Configuration and Role Definitions
Replace single [agent] config with multi-agent [[agent]] roster system.
Each agent has name, role, model, allowed_tools, max_turns, max_budget_usd,
and system_prompt fields that map to Claude CLI flags at spawn time.
- AgentConfig expanded with structured fields, validated at startup (panics
on duplicate names, empty names, non-positive budgets/turns)
- Backwards-compatible: legacy [agent] format auto-wraps with deprecation warning
- AgentPool uses composite "story_id:agent_name" keys for concurrent agents
- agent_name added to AgentEvent variants, AgentInfo, start/stop/subscribe APIs
- GET /agents/config returns roster, POST /agents/config/reload hot-reloads
- POST /agents/start accepts optional agent_name, /agents/stop requires it
- SSE route updated to /agents/:story_id/:agent_name/stream
- Frontend: roster badges, agent selector dropdown, composite-key state
- Project root initialized to cwd at startup so config endpoints work immediately
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-19 18:46:14 +00:00
|
|
|
// Add configured args (e.g., --directory /path/to/worktree, --model, etc.)
|
2026-02-19 17:58:53 +00:00
|
|
|
for arg in args {
|
|
|
|
|
cmd.arg(arg);
|
|
|
|
|
}
|
|
|
|
|
|
2026-02-19 15:25:22 +00:00
|
|
|
cmd.arg("--output-format");
|
|
|
|
|
cmd.arg("stream-json");
|
|
|
|
|
cmd.arg("--verbose");
|
2026-02-24 19:35:06 +00:00
|
|
|
// Enable partial streaming so we receive thinking_delta and text_delta
|
|
|
|
|
// events in real-time, rather than only complete assistant events.
|
|
|
|
|
// Without this, thinking traces may not appear in the structured output
|
|
|
|
|
// and instead leak as unstructured PTY text.
|
|
|
|
|
cmd.arg("--include-partial-messages");
|
2026-02-19 15:25:22 +00:00
|
|
|
|
2026-02-19 15:56:05 +00:00
|
|
|
// Supervised agents don't need interactive permission prompts
|
|
|
|
|
cmd.arg("--permission-mode");
|
|
|
|
|
cmd.arg("bypassPermissions");
|
|
|
|
|
|
2026-02-19 15:25:22 +00:00
|
|
|
cmd.cwd(cwd);
|
|
|
|
|
cmd.env("NO_COLOR", "1");
|
|
|
|
|
|
2026-02-20 11:57:25 +00:00
|
|
|
// Allow spawning Claude Code from within a Claude Code session
|
|
|
|
|
cmd.env_remove("CLAUDECODE");
|
|
|
|
|
cmd.env_remove("CLAUDE_CODE_ENTRYPOINT");
|
|
|
|
|
|
feat(story-93): expose server logs to agents via get_server_logs MCP tool
- Add log_buffer module: bounded 1000-line ring buffer with push/get_recent API
- Add slog! macro: drop-in for eprintln! that also captures to ring buffer
- Replace all eprintln! calls across agents, watcher, search, chat, worktree, claude_code with slog!
- Add get_server_logs MCP tool: accepts count (1-500) and optional filter params
- 5 unit tests for log_buffer covering push/retrieve, eviction, filtering, count limits, empty buffer
- 262 tests passing, clippy clean
Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
2026-02-23 20:38:19 +00:00
|
|
|
slog!("[agent:{story_id}:{agent_name}] Spawning {command} in {cwd} with args: {args:?}");
|
2026-02-19 15:25:22 +00:00
|
|
|
|
|
|
|
|
let mut child = pair
|
|
|
|
|
.slave
|
|
|
|
|
.spawn_command(cmd)
|
Accept story 34: Per-Project Agent Configuration and Role Definitions
Replace single [agent] config with multi-agent [[agent]] roster system.
Each agent has name, role, model, allowed_tools, max_turns, max_budget_usd,
and system_prompt fields that map to Claude CLI flags at spawn time.
- AgentConfig expanded with structured fields, validated at startup (panics
on duplicate names, empty names, non-positive budgets/turns)
- Backwards-compatible: legacy [agent] format auto-wraps with deprecation warning
- AgentPool uses composite "story_id:agent_name" keys for concurrent agents
- agent_name added to AgentEvent variants, AgentInfo, start/stop/subscribe APIs
- GET /agents/config returns roster, POST /agents/config/reload hot-reloads
- POST /agents/start accepts optional agent_name, /agents/stop requires it
- SSE route updated to /agents/:story_id/:agent_name/stream
- Frontend: roster badges, agent selector dropdown, composite-key state
- Project root initialized to cwd at startup so config endpoints work immediately
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-19 18:46:14 +00:00
|
|
|
.map_err(|e| format!("Failed to spawn agent for {story_id}:{agent_name}: {e}"))?;
|
2026-02-19 15:25:22 +00:00
|
|
|
|
2026-02-24 17:56:40 +00:00
|
|
|
// Register the child killer so that kill_all_children() / stop_agent() can
|
|
|
|
|
// terminate this process on server shutdown, even if the blocking thread
|
|
|
|
|
// cannot be interrupted. The ChildKillerGuard deregisters on function exit.
|
|
|
|
|
let killer_key = composite_key(story_id, agent_name);
|
|
|
|
|
{
|
|
|
|
|
let killer = child.clone_killer();
|
|
|
|
|
if let Ok(mut killers) = child_killers.lock() {
|
|
|
|
|
killers.insert(killer_key.clone(), killer);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
let _killer_guard = ChildKillerGuard {
|
|
|
|
|
killers: Arc::clone(child_killers),
|
|
|
|
|
key: killer_key,
|
|
|
|
|
};
|
|
|
|
|
|
2026-02-19 15:25:22 +00:00
|
|
|
drop(pair.slave);
|
|
|
|
|
|
|
|
|
|
let reader = pair
|
|
|
|
|
.master
|
|
|
|
|
.try_clone_reader()
|
|
|
|
|
.map_err(|e| format!("Failed to clone PTY reader: {e}"))?;
|
|
|
|
|
|
|
|
|
|
drop(pair.master);
|
|
|
|
|
|
2026-02-24 13:13:16 +00:00
|
|
|
// Spawn a reader thread to collect PTY output lines.
|
|
|
|
|
// We use a channel so the main thread can apply an inactivity deadline
|
|
|
|
|
// via recv_timeout: if no output arrives within the configured window
|
|
|
|
|
// the process is killed and the agent is marked Failed.
|
|
|
|
|
let (line_tx, line_rx) = std::sync::mpsc::channel::<std::io::Result<String>>();
|
|
|
|
|
std::thread::spawn(move || {
|
|
|
|
|
let buf_reader = BufReader::new(reader);
|
|
|
|
|
for line in buf_reader.lines() {
|
|
|
|
|
if line_tx.send(line).is_err() {
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
let timeout_dur = if inactivity_timeout_secs > 0 {
|
|
|
|
|
Some(std::time::Duration::from_secs(inactivity_timeout_secs))
|
|
|
|
|
} else {
|
|
|
|
|
None
|
|
|
|
|
};
|
|
|
|
|
|
2026-02-19 17:58:53 +00:00
|
|
|
let mut session_id: Option<String> = None;
|
2026-02-19 15:25:22 +00:00
|
|
|
|
2026-02-24 13:13:16 +00:00
|
|
|
loop {
|
|
|
|
|
let recv_result = match timeout_dur {
|
|
|
|
|
Some(dur) => line_rx.recv_timeout(dur),
|
|
|
|
|
None => line_rx
|
|
|
|
|
.recv()
|
|
|
|
|
.map_err(|_| std::sync::mpsc::RecvTimeoutError::Disconnected),
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
let line = match recv_result {
|
|
|
|
|
Ok(Ok(l)) => l,
|
|
|
|
|
Ok(Err(_)) => {
|
|
|
|
|
// IO error reading from PTY — treat as EOF.
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
Err(std::sync::mpsc::RecvTimeoutError::Disconnected) => {
|
|
|
|
|
// Reader thread exited (EOF from PTY).
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
Err(std::sync::mpsc::RecvTimeoutError::Timeout) => {
|
2026-02-24 13:48:25 +00:00
|
|
|
slog_warn!(
|
2026-02-24 13:13:16 +00:00
|
|
|
"[agent:{story_id}:{agent_name}] Inactivity timeout after \
|
|
|
|
|
{inactivity_timeout_secs}s with no output. Killing process."
|
|
|
|
|
);
|
|
|
|
|
let _ = child.kill();
|
|
|
|
|
let _ = child.wait();
|
|
|
|
|
return Err(format!(
|
|
|
|
|
"Agent inactivity timeout: no output received for {inactivity_timeout_secs}s"
|
|
|
|
|
));
|
|
|
|
|
}
|
2026-02-19 15:25:22 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
|
|
let trimmed = line.trim();
|
|
|
|
|
if trimmed.is_empty() {
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
2026-02-19 17:58:53 +00:00
|
|
|
// Try to parse as JSON
|
2026-02-19 15:25:22 +00:00
|
|
|
let json: serde_json::Value = match serde_json::from_str(trimmed) {
|
|
|
|
|
Ok(j) => j,
|
2026-02-19 17:58:53 +00:00
|
|
|
Err(_) => {
|
|
|
|
|
// Non-JSON output (terminal escapes etc.) — send as raw output
|
2026-02-20 11:57:25 +00:00
|
|
|
emit_event(
|
|
|
|
|
AgentEvent::Output {
|
|
|
|
|
story_id: story_id.to_string(),
|
|
|
|
|
agent_name: agent_name.to_string(),
|
|
|
|
|
text: trimmed.to_string(),
|
|
|
|
|
},
|
|
|
|
|
tx,
|
|
|
|
|
event_log,
|
2026-02-23 20:52:06 +00:00
|
|
|
log_writer,
|
2026-02-20 11:57:25 +00:00
|
|
|
);
|
2026-02-19 17:58:53 +00:00
|
|
|
continue;
|
|
|
|
|
}
|
2026-02-19 15:25:22 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
|
|
let event_type = json.get("type").and_then(|t| t.as_str()).unwrap_or("");
|
|
|
|
|
|
|
|
|
|
match event_type {
|
|
|
|
|
"system" => {
|
2026-02-19 17:58:53 +00:00
|
|
|
session_id = json
|
2026-02-19 15:25:22 +00:00
|
|
|
.get("session_id")
|
|
|
|
|
.and_then(|s| s.as_str())
|
|
|
|
|
.map(|s| s.to_string());
|
|
|
|
|
}
|
2026-02-24 19:35:06 +00:00
|
|
|
// With --include-partial-messages, thinking and text arrive
|
|
|
|
|
// incrementally via stream_event → content_block_delta. Handle
|
|
|
|
|
// them here for real-time streaming to the frontend.
|
|
|
|
|
"stream_event" => {
|
|
|
|
|
if let Some(event) = json.get("event") {
|
|
|
|
|
handle_agent_stream_event(
|
|
|
|
|
event,
|
|
|
|
|
story_id,
|
|
|
|
|
agent_name,
|
|
|
|
|
tx,
|
|
|
|
|
event_log,
|
|
|
|
|
log_writer,
|
|
|
|
|
);
|
Accept story 34: Per-Project Agent Configuration and Role Definitions
Replace single [agent] config with multi-agent [[agent]] roster system.
Each agent has name, role, model, allowed_tools, max_turns, max_budget_usd,
and system_prompt fields that map to Claude CLI flags at spawn time.
- AgentConfig expanded with structured fields, validated at startup (panics
on duplicate names, empty names, non-positive budgets/turns)
- Backwards-compatible: legacy [agent] format auto-wraps with deprecation warning
- AgentPool uses composite "story_id:agent_name" keys for concurrent agents
- agent_name added to AgentEvent variants, AgentInfo, start/stop/subscribe APIs
- GET /agents/config returns roster, POST /agents/config/reload hot-reloads
- POST /agents/start accepts optional agent_name, /agents/stop requires it
- SSE route updated to /agents/:story_id/:agent_name/stream
- Frontend: roster badges, agent selector dropdown, composite-key state
- Project root initialized to cwd at startup so config endpoints work immediately
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-19 18:46:14 +00:00
|
|
|
}
|
2026-02-19 15:25:22 +00:00
|
|
|
}
|
2026-02-24 19:35:06 +00:00
|
|
|
// Complete assistant events are skipped for content extraction
|
|
|
|
|
// because thinking and text already arrived via stream_event.
|
|
|
|
|
// The raw JSON is still forwarded as AgentJson below.
|
|
|
|
|
"assistant" | "user" | "result" => {}
|
2026-02-19 15:25:22 +00:00
|
|
|
_ => {}
|
|
|
|
|
}
|
2026-02-19 17:58:53 +00:00
|
|
|
|
|
|
|
|
// Forward all JSON events
|
2026-02-20 11:57:25 +00:00
|
|
|
emit_event(
|
|
|
|
|
AgentEvent::AgentJson {
|
|
|
|
|
story_id: story_id.to_string(),
|
|
|
|
|
agent_name: agent_name.to_string(),
|
|
|
|
|
data: json,
|
|
|
|
|
},
|
|
|
|
|
tx,
|
|
|
|
|
event_log,
|
2026-02-23 20:52:06 +00:00
|
|
|
log_writer,
|
2026-02-20 11:57:25 +00:00
|
|
|
);
|
2026-02-19 15:25:22 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
let _ = child.kill();
|
2026-02-20 11:57:25 +00:00
|
|
|
let _ = child.wait();
|
2026-02-19 15:25:22 +00:00
|
|
|
|
feat(story-93): expose server logs to agents via get_server_logs MCP tool
- Add log_buffer module: bounded 1000-line ring buffer with push/get_recent API
- Add slog! macro: drop-in for eprintln! that also captures to ring buffer
- Replace all eprintln! calls across agents, watcher, search, chat, worktree, claude_code with slog!
- Add get_server_logs MCP tool: accepts count (1-500) and optional filter params
- 5 unit tests for log_buffer covering push/retrieve, eviction, filtering, count limits, empty buffer
- 262 tests passing, clippy clean
Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
2026-02-23 20:38:19 +00:00
|
|
|
slog!(
|
Accept story 34: Per-Project Agent Configuration and Role Definitions
Replace single [agent] config with multi-agent [[agent]] roster system.
Each agent has name, role, model, allowed_tools, max_turns, max_budget_usd,
and system_prompt fields that map to Claude CLI flags at spawn time.
- AgentConfig expanded with structured fields, validated at startup (panics
on duplicate names, empty names, non-positive budgets/turns)
- Backwards-compatible: legacy [agent] format auto-wraps with deprecation warning
- AgentPool uses composite "story_id:agent_name" keys for concurrent agents
- agent_name added to AgentEvent variants, AgentInfo, start/stop/subscribe APIs
- GET /agents/config returns roster, POST /agents/config/reload hot-reloads
- POST /agents/start accepts optional agent_name, /agents/stop requires it
- SSE route updated to /agents/:story_id/:agent_name/stream
- Frontend: roster badges, agent selector dropdown, composite-key state
- Project root initialized to cwd at startup so config endpoints work immediately
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-19 18:46:14 +00:00
|
|
|
"[agent:{story_id}:{agent_name}] Done. Session: {:?}",
|
2026-02-19 17:58:53 +00:00
|
|
|
session_id
|
2026-02-19 15:25:22 +00:00
|
|
|
);
|
|
|
|
|
|
2026-02-19 17:58:53 +00:00
|
|
|
Ok(session_id)
|
2026-02-19 15:25:22 +00:00
|
|
|
}
|
2026-02-20 13:16:04 +00:00
|
|
|
|
|
|
|
|
#[cfg(test)]
|
|
|
|
|
mod tests {
|
|
|
|
|
use super::*;
|
|
|
|
|
|
|
|
|
|
#[tokio::test]
|
|
|
|
|
async fn wait_for_agent_returns_immediately_if_completed() {
|
2026-02-24 23:09:13 +00:00
|
|
|
let pool = AgentPool::new_test(3001);
|
2026-02-20 13:16:04 +00:00
|
|
|
pool.inject_test_agent("s1", "bot", AgentStatus::Completed);
|
|
|
|
|
|
|
|
|
|
let info = pool.wait_for_agent("s1", "bot", 1000).await.unwrap();
|
|
|
|
|
assert_eq!(info.status, AgentStatus::Completed);
|
|
|
|
|
assert_eq!(info.story_id, "s1");
|
|
|
|
|
assert_eq!(info.agent_name, "bot");
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[tokio::test]
|
|
|
|
|
async fn wait_for_agent_returns_immediately_if_failed() {
|
2026-02-24 23:09:13 +00:00
|
|
|
let pool = AgentPool::new_test(3001);
|
2026-02-20 13:16:04 +00:00
|
|
|
pool.inject_test_agent("s2", "bot", AgentStatus::Failed);
|
|
|
|
|
|
|
|
|
|
let info = pool.wait_for_agent("s2", "bot", 1000).await.unwrap();
|
|
|
|
|
assert_eq!(info.status, AgentStatus::Failed);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[tokio::test]
|
|
|
|
|
async fn wait_for_agent_completes_on_done_event() {
|
2026-02-24 23:09:13 +00:00
|
|
|
let pool = AgentPool::new_test(3001);
|
2026-02-20 13:16:04 +00:00
|
|
|
let tx = pool.inject_test_agent("s3", "bot", AgentStatus::Running);
|
|
|
|
|
|
|
|
|
|
// Send Done event after a short delay
|
|
|
|
|
let tx_clone = tx.clone();
|
|
|
|
|
tokio::spawn(async move {
|
|
|
|
|
tokio::time::sleep(std::time::Duration::from_millis(50)).await;
|
|
|
|
|
// Mark status via event; real code also updates the map, but for
|
|
|
|
|
// this unit test the map entry stays Running — we verify the
|
|
|
|
|
// wait loop reacts to the event.
|
|
|
|
|
let _ = tx_clone.send(AgentEvent::Done {
|
|
|
|
|
story_id: "s3".to_string(),
|
|
|
|
|
agent_name: "bot".to_string(),
|
|
|
|
|
session_id: Some("sess-abc".to_string()),
|
|
|
|
|
});
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
let info = pool.wait_for_agent("s3", "bot", 2000).await.unwrap();
|
|
|
|
|
// Status comes from the map entry (Running in this unit test)
|
|
|
|
|
// — the important thing is that wait_for_agent returned without timing out.
|
|
|
|
|
assert_eq!(info.story_id, "s3");
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[tokio::test]
|
|
|
|
|
async fn wait_for_agent_times_out() {
|
2026-02-24 23:09:13 +00:00
|
|
|
let pool = AgentPool::new_test(3001);
|
2026-02-20 13:16:04 +00:00
|
|
|
pool.inject_test_agent("s4", "bot", AgentStatus::Running);
|
|
|
|
|
|
|
|
|
|
let result = pool.wait_for_agent("s4", "bot", 50).await;
|
|
|
|
|
assert!(result.is_err());
|
|
|
|
|
let msg = result.unwrap_err();
|
|
|
|
|
assert!(msg.contains("Timed out"), "unexpected message: {msg}");
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[tokio::test]
|
|
|
|
|
async fn wait_for_agent_errors_for_nonexistent() {
|
2026-02-24 23:09:13 +00:00
|
|
|
let pool = AgentPool::new_test(3001);
|
2026-02-20 13:16:04 +00:00
|
|
|
let result = pool.wait_for_agent("no_story", "no_bot", 100).await;
|
|
|
|
|
assert!(result.is_err());
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[tokio::test]
|
|
|
|
|
async fn wait_for_agent_completes_on_stopped_status_event() {
|
2026-02-24 23:09:13 +00:00
|
|
|
let pool = AgentPool::new_test(3001);
|
2026-02-20 13:16:04 +00:00
|
|
|
let tx = pool.inject_test_agent("s5", "bot", AgentStatus::Running);
|
|
|
|
|
|
|
|
|
|
let tx_clone = tx.clone();
|
|
|
|
|
tokio::spawn(async move {
|
|
|
|
|
tokio::time::sleep(std::time::Duration::from_millis(30)).await;
|
|
|
|
|
let _ = tx_clone.send(AgentEvent::Status {
|
|
|
|
|
story_id: "s5".to_string(),
|
|
|
|
|
agent_name: "bot".to_string(),
|
|
|
|
|
status: "stopped".to_string(),
|
|
|
|
|
});
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
let info = pool.wait_for_agent("s5", "bot", 2000).await.unwrap();
|
|
|
|
|
assert_eq!(info.story_id, "s5");
|
|
|
|
|
}
|
2026-02-20 15:02:34 +00:00
|
|
|
|
|
|
|
|
// ── report_completion tests ────────────────────────────────────
|
|
|
|
|
|
|
|
|
|
#[tokio::test]
|
|
|
|
|
async fn report_completion_rejects_nonexistent_agent() {
|
2026-02-24 23:09:13 +00:00
|
|
|
let pool = AgentPool::new_test(3001);
|
2026-02-20 15:02:34 +00:00
|
|
|
let result = pool
|
|
|
|
|
.report_completion("no_story", "no_bot", "done")
|
|
|
|
|
.await;
|
|
|
|
|
assert!(result.is_err());
|
|
|
|
|
let msg = result.unwrap_err();
|
|
|
|
|
assert!(msg.contains("No agent"), "unexpected: {msg}");
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[tokio::test]
|
|
|
|
|
async fn report_completion_rejects_non_running_agent() {
|
2026-02-24 23:09:13 +00:00
|
|
|
let pool = AgentPool::new_test(3001);
|
2026-02-20 15:02:34 +00:00
|
|
|
pool.inject_test_agent("s6", "bot", AgentStatus::Completed);
|
|
|
|
|
|
|
|
|
|
let result = pool.report_completion("s6", "bot", "done").await;
|
|
|
|
|
assert!(result.is_err());
|
|
|
|
|
let msg = result.unwrap_err();
|
|
|
|
|
assert!(
|
|
|
|
|
msg.contains("not running"),
|
|
|
|
|
"expected 'not running' in: {msg}"
|
|
|
|
|
);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[tokio::test]
|
|
|
|
|
async fn report_completion_rejects_dirty_worktree() {
|
|
|
|
|
use std::fs;
|
|
|
|
|
use tempfile::tempdir;
|
|
|
|
|
|
|
|
|
|
let tmp = tempdir().unwrap();
|
|
|
|
|
let repo = tmp.path();
|
|
|
|
|
|
|
|
|
|
// Init a real git repo and make an initial commit
|
|
|
|
|
Command::new("git")
|
|
|
|
|
.args(["init"])
|
|
|
|
|
.current_dir(repo)
|
|
|
|
|
.output()
|
|
|
|
|
.unwrap();
|
|
|
|
|
Command::new("git")
|
|
|
|
|
.args(["commit", "--allow-empty", "-m", "init"])
|
|
|
|
|
.current_dir(repo)
|
|
|
|
|
.output()
|
|
|
|
|
.unwrap();
|
|
|
|
|
|
|
|
|
|
// Write an uncommitted file
|
|
|
|
|
fs::write(repo.join("dirty.txt"), "not committed").unwrap();
|
|
|
|
|
|
2026-02-24 23:09:13 +00:00
|
|
|
let pool = AgentPool::new_test(3001);
|
2026-02-20 15:02:34 +00:00
|
|
|
pool.inject_test_agent_with_path("s7", "bot", AgentStatus::Running, repo.to_path_buf());
|
|
|
|
|
|
|
|
|
|
let result = pool.report_completion("s7", "bot", "done").await;
|
|
|
|
|
assert!(result.is_err());
|
|
|
|
|
let msg = result.unwrap_err();
|
|
|
|
|
assert!(
|
|
|
|
|
msg.contains("uncommitted"),
|
|
|
|
|
"expected 'uncommitted' in: {msg}"
|
|
|
|
|
);
|
|
|
|
|
}
|
2026-02-20 15:31:13 +00:00
|
|
|
|
2026-02-23 15:00:10 +00:00
|
|
|
// ── server-owned completion tests ───────────────────────────────────────────
|
|
|
|
|
|
|
|
|
|
#[tokio::test]
|
|
|
|
|
async fn server_owned_completion_skips_when_already_completed() {
|
2026-02-24 23:09:13 +00:00
|
|
|
let pool = AgentPool::new_test(3001);
|
2026-02-23 15:00:10 +00:00
|
|
|
let report = CompletionReport {
|
|
|
|
|
summary: "Already done".to_string(),
|
|
|
|
|
gates_passed: true,
|
|
|
|
|
gate_output: String::new(),
|
|
|
|
|
};
|
|
|
|
|
pool.inject_test_agent_with_completion(
|
|
|
|
|
"s10",
|
|
|
|
|
"coder-1",
|
|
|
|
|
AgentStatus::Completed,
|
|
|
|
|
PathBuf::from("/tmp/nonexistent"),
|
|
|
|
|
report,
|
|
|
|
|
);
|
|
|
|
|
|
|
|
|
|
// Subscribe before calling so we can check if Done event was emitted.
|
|
|
|
|
let mut rx = pool.subscribe("s10", "coder-1").unwrap();
|
|
|
|
|
|
2026-02-24 23:57:07 +00:00
|
|
|
run_server_owned_completion(&pool.agents, pool.port, "s10", "coder-1", Some("sess-1".to_string()), pool.watcher_tx.clone())
|
2026-02-23 15:00:10 +00:00
|
|
|
.await;
|
|
|
|
|
|
|
|
|
|
// Status should remain Completed (unchanged) — no gate re-run.
|
|
|
|
|
let agents = pool.agents.lock().unwrap();
|
|
|
|
|
let key = composite_key("s10", "coder-1");
|
|
|
|
|
let agent = agents.get(&key).unwrap();
|
|
|
|
|
assert_eq!(agent.status, AgentStatus::Completed);
|
|
|
|
|
// Summary should still be the original, not overwritten.
|
|
|
|
|
assert_eq!(
|
|
|
|
|
agent.completion.as_ref().unwrap().summary,
|
|
|
|
|
"Already done"
|
|
|
|
|
);
|
|
|
|
|
drop(agents);
|
|
|
|
|
|
|
|
|
|
// No Done event should have been emitted.
|
|
|
|
|
assert!(
|
|
|
|
|
rx.try_recv().is_err(),
|
|
|
|
|
"should not emit Done when completion already exists"
|
|
|
|
|
);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[tokio::test]
|
|
|
|
|
async fn server_owned_completion_runs_gates_on_clean_worktree() {
|
|
|
|
|
use tempfile::tempdir;
|
|
|
|
|
|
|
|
|
|
let tmp = tempdir().unwrap();
|
|
|
|
|
let repo = tmp.path();
|
|
|
|
|
init_git_repo(repo);
|
|
|
|
|
|
2026-02-24 23:09:13 +00:00
|
|
|
let pool = AgentPool::new_test(3001);
|
2026-02-23 15:00:10 +00:00
|
|
|
pool.inject_test_agent_with_path(
|
|
|
|
|
"s11",
|
|
|
|
|
"coder-1",
|
|
|
|
|
AgentStatus::Running,
|
|
|
|
|
repo.to_path_buf(),
|
|
|
|
|
);
|
|
|
|
|
|
|
|
|
|
let mut rx = pool.subscribe("s11", "coder-1").unwrap();
|
|
|
|
|
|
2026-02-24 23:57:07 +00:00
|
|
|
run_server_owned_completion(&pool.agents, pool.port, "s11", "coder-1", Some("sess-2".to_string()), pool.watcher_tx.clone())
|
2026-02-23 15:00:10 +00:00
|
|
|
.await;
|
|
|
|
|
|
2026-02-25 14:59:20 +00:00
|
|
|
// Agent entry should be removed from the map after completion.
|
2026-02-23 15:00:10 +00:00
|
|
|
let agents = pool.agents.lock().unwrap();
|
|
|
|
|
let key = composite_key("s11", "coder-1");
|
|
|
|
|
assert!(
|
2026-02-25 14:59:20 +00:00
|
|
|
agents.get(&key).is_none(),
|
|
|
|
|
"agent should be removed from map after completion"
|
2026-02-23 15:00:10 +00:00
|
|
|
);
|
|
|
|
|
drop(agents);
|
|
|
|
|
|
2026-02-25 14:59:20 +00:00
|
|
|
// A Done event should have been emitted with the session_id.
|
2026-02-23 15:00:10 +00:00
|
|
|
let event = rx.try_recv().expect("should emit Done event");
|
2026-02-25 14:59:20 +00:00
|
|
|
match &event {
|
|
|
|
|
AgentEvent::Done { session_id, .. } => {
|
|
|
|
|
assert_eq!(*session_id, Some("sess-2".to_string()));
|
|
|
|
|
}
|
|
|
|
|
other => panic!("expected Done event, got: {other:?}"),
|
|
|
|
|
}
|
2026-02-23 15:00:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[tokio::test]
|
|
|
|
|
async fn server_owned_completion_fails_on_dirty_worktree() {
|
|
|
|
|
use std::fs;
|
|
|
|
|
use tempfile::tempdir;
|
|
|
|
|
|
|
|
|
|
let tmp = tempdir().unwrap();
|
|
|
|
|
let repo = tmp.path();
|
|
|
|
|
init_git_repo(repo);
|
|
|
|
|
// Create an uncommitted file.
|
|
|
|
|
fs::write(repo.join("dirty.txt"), "not committed").unwrap();
|
|
|
|
|
|
2026-02-24 23:09:13 +00:00
|
|
|
let pool = AgentPool::new_test(3001);
|
2026-02-23 15:00:10 +00:00
|
|
|
pool.inject_test_agent_with_path(
|
|
|
|
|
"s12",
|
|
|
|
|
"coder-1",
|
|
|
|
|
AgentStatus::Running,
|
|
|
|
|
repo.to_path_buf(),
|
|
|
|
|
);
|
|
|
|
|
|
2026-02-25 14:59:20 +00:00
|
|
|
let mut rx = pool.subscribe("s12", "coder-1").unwrap();
|
|
|
|
|
|
2026-02-24 23:57:07 +00:00
|
|
|
run_server_owned_completion(&pool.agents, pool.port, "s12", "coder-1", None, pool.watcher_tx.clone())
|
2026-02-23 15:00:10 +00:00
|
|
|
.await;
|
|
|
|
|
|
2026-02-25 14:59:20 +00:00
|
|
|
// Agent entry should be removed from the map after completion (even on failure).
|
2026-02-23 15:00:10 +00:00
|
|
|
let agents = pool.agents.lock().unwrap();
|
|
|
|
|
let key = composite_key("s12", "coder-1");
|
|
|
|
|
assert!(
|
2026-02-25 14:59:20 +00:00
|
|
|
agents.get(&key).is_none(),
|
|
|
|
|
"agent should be removed from map after failed completion"
|
|
|
|
|
);
|
|
|
|
|
drop(agents);
|
|
|
|
|
|
|
|
|
|
// A Done event should have been emitted.
|
|
|
|
|
let event = rx.try_recv().expect("should emit Done event");
|
|
|
|
|
assert!(
|
|
|
|
|
matches!(event, AgentEvent::Done { .. }),
|
|
|
|
|
"expected Done event, got: {event:?}"
|
2026-02-23 15:00:10 +00:00
|
|
|
);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[tokio::test]
|
|
|
|
|
async fn server_owned_completion_nonexistent_agent_is_noop() {
|
2026-02-24 23:09:13 +00:00
|
|
|
let pool = AgentPool::new_test(3001);
|
2026-02-23 15:00:10 +00:00
|
|
|
// Should not panic or error — just silently return.
|
2026-02-24 23:57:07 +00:00
|
|
|
run_server_owned_completion(&pool.agents, pool.port, "nonexistent", "bot", None, pool.watcher_tx.clone())
|
2026-02-23 15:00:10 +00:00
|
|
|
.await;
|
|
|
|
|
}
|
|
|
|
|
|
2026-02-20 15:31:13 +00:00
|
|
|
// ── move_story_to_current tests ────────────────────────────────────────────
|
2026-02-20 19:39:19 +00:00
|
|
|
// No git repo needed: the watcher handles commits asynchronously.
|
2026-02-20 15:31:13 +00:00
|
|
|
|
|
|
|
|
fn init_git_repo(repo: &std::path::Path) {
|
|
|
|
|
Command::new("git")
|
|
|
|
|
.args(["init"])
|
|
|
|
|
.current_dir(repo)
|
|
|
|
|
.output()
|
|
|
|
|
.unwrap();
|
|
|
|
|
Command::new("git")
|
|
|
|
|
.args(["config", "user.email", "test@test.com"])
|
|
|
|
|
.current_dir(repo)
|
|
|
|
|
.output()
|
|
|
|
|
.unwrap();
|
|
|
|
|
Command::new("git")
|
|
|
|
|
.args(["config", "user.name", "Test"])
|
|
|
|
|
.current_dir(repo)
|
|
|
|
|
.output()
|
|
|
|
|
.unwrap();
|
|
|
|
|
Command::new("git")
|
|
|
|
|
.args(["commit", "--allow-empty", "-m", "init"])
|
|
|
|
|
.current_dir(repo)
|
|
|
|
|
.output()
|
|
|
|
|
.unwrap();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[test]
|
2026-02-20 19:39:19 +00:00
|
|
|
fn move_story_to_current_moves_file() {
|
2026-02-20 15:31:13 +00:00
|
|
|
use std::fs;
|
2026-02-20 19:39:19 +00:00
|
|
|
let tmp = tempfile::tempdir().unwrap();
|
|
|
|
|
let root = tmp.path();
|
|
|
|
|
let upcoming = root.join(".story_kit/work/1_upcoming");
|
|
|
|
|
let current = root.join(".story_kit/work/2_current");
|
2026-02-20 15:31:13 +00:00
|
|
|
fs::create_dir_all(&upcoming).unwrap();
|
2026-02-20 19:39:19 +00:00
|
|
|
fs::create_dir_all(¤t).unwrap();
|
|
|
|
|
fs::write(upcoming.join("10_story_foo.md"), "test").unwrap();
|
2026-02-20 15:31:13 +00:00
|
|
|
|
2026-02-20 19:39:19 +00:00
|
|
|
move_story_to_current(root, "10_story_foo").unwrap();
|
2026-02-20 15:31:13 +00:00
|
|
|
|
2026-02-20 19:39:19 +00:00
|
|
|
assert!(!upcoming.join("10_story_foo.md").exists());
|
|
|
|
|
assert!(current.join("10_story_foo.md").exists());
|
2026-02-20 15:31:13 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
fn move_story_to_current_is_idempotent_when_already_current() {
|
|
|
|
|
use std::fs;
|
2026-02-20 19:39:19 +00:00
|
|
|
let tmp = tempfile::tempdir().unwrap();
|
|
|
|
|
let root = tmp.path();
|
|
|
|
|
let current = root.join(".story_kit/work/2_current");
|
|
|
|
|
fs::create_dir_all(¤t).unwrap();
|
|
|
|
|
fs::write(current.join("11_story_foo.md"), "test").unwrap();
|
2026-02-20 15:31:13 +00:00
|
|
|
|
2026-02-20 19:39:19 +00:00
|
|
|
move_story_to_current(root, "11_story_foo").unwrap();
|
|
|
|
|
assert!(current.join("11_story_foo.md").exists());
|
2026-02-20 15:31:13 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
fn move_story_to_current_noop_when_not_in_upcoming() {
|
2026-02-20 19:39:19 +00:00
|
|
|
let tmp = tempfile::tempdir().unwrap();
|
|
|
|
|
assert!(move_story_to_current(tmp.path(), "99_missing").is_ok());
|
2026-02-20 15:31:13 +00:00
|
|
|
}
|
|
|
|
|
|
2026-02-20 16:21:30 +00:00
|
|
|
#[test]
|
2026-02-20 19:39:19 +00:00
|
|
|
fn move_bug_to_current_moves_from_upcoming() {
|
2026-02-20 16:21:30 +00:00
|
|
|
use std::fs;
|
2026-02-20 19:39:19 +00:00
|
|
|
let tmp = tempfile::tempdir().unwrap();
|
|
|
|
|
let root = tmp.path();
|
|
|
|
|
let upcoming = root.join(".story_kit/work/1_upcoming");
|
|
|
|
|
let current = root.join(".story_kit/work/2_current");
|
|
|
|
|
fs::create_dir_all(&upcoming).unwrap();
|
|
|
|
|
fs::create_dir_all(¤t).unwrap();
|
|
|
|
|
fs::write(upcoming.join("1_bug_test.md"), "# Bug 1\n").unwrap();
|
2026-02-20 16:21:30 +00:00
|
|
|
|
2026-02-20 19:39:19 +00:00
|
|
|
move_story_to_current(root, "1_bug_test").unwrap();
|
2026-02-20 16:21:30 +00:00
|
|
|
|
2026-02-20 19:39:19 +00:00
|
|
|
assert!(!upcoming.join("1_bug_test.md").exists());
|
|
|
|
|
assert!(current.join("1_bug_test.md").exists());
|
2026-02-20 16:21:30 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
fn close_bug_moves_from_current_to_archive() {
|
|
|
|
|
use std::fs;
|
2026-02-20 19:39:19 +00:00
|
|
|
let tmp = tempfile::tempdir().unwrap();
|
|
|
|
|
let root = tmp.path();
|
|
|
|
|
let current = root.join(".story_kit/work/2_current");
|
|
|
|
|
fs::create_dir_all(¤t).unwrap();
|
|
|
|
|
fs::write(current.join("2_bug_test.md"), "# Bug 2\n").unwrap();
|
2026-02-20 16:21:30 +00:00
|
|
|
|
2026-02-20 19:39:19 +00:00
|
|
|
close_bug_to_archive(root, "2_bug_test").unwrap();
|
2026-02-20 16:21:30 +00:00
|
|
|
|
2026-02-20 19:39:19 +00:00
|
|
|
assert!(!current.join("2_bug_test.md").exists());
|
2026-02-24 17:01:57 +00:00
|
|
|
assert!(root.join(".story_kit/work/5_done/2_bug_test.md").exists());
|
2026-02-20 16:21:30 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[test]
|
2026-02-20 19:39:19 +00:00
|
|
|
fn close_bug_moves_from_upcoming_when_not_started() {
|
2026-02-20 16:21:30 +00:00
|
|
|
use std::fs;
|
2026-02-20 19:39:19 +00:00
|
|
|
let tmp = tempfile::tempdir().unwrap();
|
|
|
|
|
let root = tmp.path();
|
|
|
|
|
let upcoming = root.join(".story_kit/work/1_upcoming");
|
|
|
|
|
fs::create_dir_all(&upcoming).unwrap();
|
|
|
|
|
fs::write(upcoming.join("3_bug_test.md"), "# Bug 3\n").unwrap();
|
2026-02-20 16:21:30 +00:00
|
|
|
|
2026-02-20 19:39:19 +00:00
|
|
|
close_bug_to_archive(root, "3_bug_test").unwrap();
|
2026-02-20 16:21:30 +00:00
|
|
|
|
2026-02-20 19:39:19 +00:00
|
|
|
assert!(!upcoming.join("3_bug_test.md").exists());
|
2026-02-24 17:01:57 +00:00
|
|
|
assert!(root.join(".story_kit/work/5_done/3_bug_test.md").exists());
|
2026-02-20 16:21:30 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
fn item_type_from_id_detects_types() {
|
2026-02-20 17:24:26 +00:00
|
|
|
assert_eq!(item_type_from_id("1_bug_test"), "bug");
|
|
|
|
|
assert_eq!(item_type_from_id("1_spike_research"), "spike");
|
|
|
|
|
assert_eq!(item_type_from_id("50_story_my_story"), "story");
|
|
|
|
|
assert_eq!(item_type_from_id("1_story_simple"), "story");
|
2026-02-20 16:21:30 +00:00
|
|
|
}
|
|
|
|
|
|
2026-02-23 13:13:41 +00:00
|
|
|
// ── pipeline_stage tests ──────────────────────────────────────────────────
|
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
fn pipeline_stage_detects_coders() {
|
|
|
|
|
assert_eq!(pipeline_stage("coder-1"), PipelineStage::Coder);
|
|
|
|
|
assert_eq!(pipeline_stage("coder-2"), PipelineStage::Coder);
|
|
|
|
|
assert_eq!(pipeline_stage("coder-3"), PipelineStage::Coder);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
fn pipeline_stage_detects_qa() {
|
|
|
|
|
assert_eq!(pipeline_stage("qa"), PipelineStage::Qa);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
fn pipeline_stage_detects_mergemaster() {
|
|
|
|
|
assert_eq!(pipeline_stage("mergemaster"), PipelineStage::Mergemaster);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
fn pipeline_stage_supervisor_is_other() {
|
|
|
|
|
assert_eq!(pipeline_stage("supervisor"), PipelineStage::Other);
|
|
|
|
|
assert_eq!(pipeline_stage("default"), PipelineStage::Other);
|
|
|
|
|
assert_eq!(pipeline_stage("unknown"), PipelineStage::Other);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// ── pipeline advance tests ────────────────────────────────────────────────
|
|
|
|
|
|
|
|
|
|
#[tokio::test]
|
|
|
|
|
async fn pipeline_advance_coder_gates_pass_moves_story_to_qa() {
|
|
|
|
|
use std::fs;
|
|
|
|
|
let tmp = tempfile::tempdir().unwrap();
|
|
|
|
|
let root = tmp.path();
|
|
|
|
|
|
|
|
|
|
// Set up story in 2_current/
|
|
|
|
|
let current = root.join(".story_kit/work/2_current");
|
|
|
|
|
fs::create_dir_all(¤t).unwrap();
|
|
|
|
|
fs::write(current.join("50_story_test.md"), "test").unwrap();
|
|
|
|
|
|
2026-02-24 23:09:13 +00:00
|
|
|
let pool = AgentPool::new_test(3001);
|
2026-02-25 14:59:20 +00:00
|
|
|
// Call pipeline advance directly with completion data.
|
|
|
|
|
pool.run_pipeline_advance(
|
2026-02-23 13:13:41 +00:00
|
|
|
"50_story_test",
|
|
|
|
|
"coder-1",
|
|
|
|
|
CompletionReport {
|
|
|
|
|
summary: "done".to_string(),
|
|
|
|
|
gates_passed: true,
|
|
|
|
|
gate_output: String::new(),
|
|
|
|
|
},
|
2026-02-25 14:59:20 +00:00
|
|
|
Some(root.to_path_buf()),
|
|
|
|
|
None,
|
2026-02-26 16:12:23 +00:00
|
|
|
false,
|
2026-02-25 14:59:20 +00:00
|
|
|
)
|
2026-02-23 13:13:41 +00:00
|
|
|
.await;
|
|
|
|
|
|
|
|
|
|
// Story should have moved to 3_qa/ (start_agent for qa will fail in tests but
|
|
|
|
|
// the file move happens before that).
|
|
|
|
|
assert!(
|
|
|
|
|
root.join(".story_kit/work/3_qa/50_story_test.md").exists(),
|
|
|
|
|
"story should be in 3_qa/"
|
|
|
|
|
);
|
|
|
|
|
assert!(
|
|
|
|
|
!current.join("50_story_test.md").exists(),
|
|
|
|
|
"story should not still be in 2_current/"
|
|
|
|
|
);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[tokio::test]
|
|
|
|
|
async fn pipeline_advance_qa_gates_pass_moves_story_to_merge() {
|
|
|
|
|
use std::fs;
|
|
|
|
|
let tmp = tempfile::tempdir().unwrap();
|
|
|
|
|
let root = tmp.path();
|
|
|
|
|
|
|
|
|
|
// Set up story in 3_qa/
|
|
|
|
|
let qa_dir = root.join(".story_kit/work/3_qa");
|
|
|
|
|
fs::create_dir_all(&qa_dir).unwrap();
|
|
|
|
|
fs::write(qa_dir.join("51_story_test.md"), "test").unwrap();
|
|
|
|
|
|
2026-02-24 23:09:13 +00:00
|
|
|
let pool = AgentPool::new_test(3001);
|
2026-02-25 14:59:20 +00:00
|
|
|
pool.run_pipeline_advance(
|
2026-02-23 13:13:41 +00:00
|
|
|
"51_story_test",
|
|
|
|
|
"qa",
|
|
|
|
|
CompletionReport {
|
|
|
|
|
summary: "QA done".to_string(),
|
|
|
|
|
gates_passed: true,
|
|
|
|
|
gate_output: String::new(),
|
|
|
|
|
},
|
2026-02-25 14:59:20 +00:00
|
|
|
Some(root.to_path_buf()),
|
|
|
|
|
None,
|
2026-02-26 16:12:23 +00:00
|
|
|
false,
|
2026-02-25 14:59:20 +00:00
|
|
|
)
|
2026-02-23 13:13:41 +00:00
|
|
|
.await;
|
|
|
|
|
|
|
|
|
|
// Story should have moved to 4_merge/
|
|
|
|
|
assert!(
|
|
|
|
|
root.join(".story_kit/work/4_merge/51_story_test.md").exists(),
|
|
|
|
|
"story should be in 4_merge/"
|
|
|
|
|
);
|
|
|
|
|
assert!(
|
|
|
|
|
!qa_dir.join("51_story_test.md").exists(),
|
|
|
|
|
"story should not still be in 3_qa/"
|
|
|
|
|
);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[tokio::test]
|
|
|
|
|
async fn pipeline_advance_supervisor_does_not_advance() {
|
|
|
|
|
use std::fs;
|
|
|
|
|
let tmp = tempfile::tempdir().unwrap();
|
|
|
|
|
let root = tmp.path();
|
|
|
|
|
|
|
|
|
|
let current = root.join(".story_kit/work/2_current");
|
|
|
|
|
fs::create_dir_all(¤t).unwrap();
|
|
|
|
|
fs::write(current.join("52_story_test.md"), "test").unwrap();
|
|
|
|
|
|
2026-02-24 23:09:13 +00:00
|
|
|
let pool = AgentPool::new_test(3001);
|
2026-02-25 14:59:20 +00:00
|
|
|
pool.run_pipeline_advance(
|
2026-02-23 13:13:41 +00:00
|
|
|
"52_story_test",
|
|
|
|
|
"supervisor",
|
|
|
|
|
CompletionReport {
|
|
|
|
|
summary: "supervised".to_string(),
|
|
|
|
|
gates_passed: true,
|
|
|
|
|
gate_output: String::new(),
|
|
|
|
|
},
|
2026-02-25 14:59:20 +00:00
|
|
|
Some(root.to_path_buf()),
|
|
|
|
|
None,
|
2026-02-26 16:12:23 +00:00
|
|
|
false,
|
2026-02-25 14:59:20 +00:00
|
|
|
)
|
2026-02-23 13:13:41 +00:00
|
|
|
.await;
|
|
|
|
|
|
|
|
|
|
// Story should NOT have moved (supervisors don't advance pipeline)
|
|
|
|
|
assert!(
|
|
|
|
|
current.join("52_story_test.md").exists(),
|
|
|
|
|
"story should still be in 2_current/ for supervisor"
|
|
|
|
|
);
|
|
|
|
|
}
|
|
|
|
|
|
2026-02-20 17:36:35 +00:00
|
|
|
// ── move_story_to_merge tests ──────────────────────────────────────────────
|
|
|
|
|
|
|
|
|
|
#[test]
|
2026-02-20 19:39:19 +00:00
|
|
|
fn move_story_to_merge_moves_file() {
|
2026-02-20 17:36:35 +00:00
|
|
|
use std::fs;
|
2026-02-20 19:39:19 +00:00
|
|
|
let tmp = tempfile::tempdir().unwrap();
|
|
|
|
|
let root = tmp.path();
|
|
|
|
|
let current = root.join(".story_kit/work/2_current");
|
|
|
|
|
fs::create_dir_all(¤t).unwrap();
|
|
|
|
|
fs::write(current.join("20_story_foo.md"), "test").unwrap();
|
2026-02-20 17:36:35 +00:00
|
|
|
|
2026-02-20 19:39:19 +00:00
|
|
|
move_story_to_merge(root, "20_story_foo").unwrap();
|
2026-02-20 17:36:35 +00:00
|
|
|
|
2026-02-20 19:39:19 +00:00
|
|
|
assert!(!current.join("20_story_foo.md").exists());
|
|
|
|
|
assert!(root.join(".story_kit/work/4_merge/20_story_foo.md").exists());
|
2026-02-20 17:36:35 +00:00
|
|
|
}
|
|
|
|
|
|
2026-02-23 13:13:41 +00:00
|
|
|
#[test]
|
|
|
|
|
fn move_story_to_merge_from_qa_dir() {
|
|
|
|
|
use std::fs;
|
|
|
|
|
let tmp = tempfile::tempdir().unwrap();
|
|
|
|
|
let root = tmp.path();
|
|
|
|
|
let qa_dir = root.join(".story_kit/work/3_qa");
|
|
|
|
|
fs::create_dir_all(&qa_dir).unwrap();
|
|
|
|
|
fs::write(qa_dir.join("40_story_test.md"), "test").unwrap();
|
|
|
|
|
|
|
|
|
|
move_story_to_merge(root, "40_story_test").unwrap();
|
|
|
|
|
|
|
|
|
|
assert!(!qa_dir.join("40_story_test.md").exists());
|
|
|
|
|
assert!(root.join(".story_kit/work/4_merge/40_story_test.md").exists());
|
|
|
|
|
}
|
|
|
|
|
|
2026-02-20 17:36:35 +00:00
|
|
|
#[test]
|
|
|
|
|
fn move_story_to_merge_idempotent_when_already_in_merge() {
|
|
|
|
|
use std::fs;
|
2026-02-20 19:39:19 +00:00
|
|
|
let tmp = tempfile::tempdir().unwrap();
|
|
|
|
|
let root = tmp.path();
|
|
|
|
|
let merge_dir = root.join(".story_kit/work/4_merge");
|
2026-02-20 17:36:35 +00:00
|
|
|
fs::create_dir_all(&merge_dir).unwrap();
|
2026-02-20 19:39:19 +00:00
|
|
|
fs::write(merge_dir.join("21_story_test.md"), "test").unwrap();
|
|
|
|
|
|
|
|
|
|
move_story_to_merge(root, "21_story_test").unwrap();
|
2026-02-20 17:36:35 +00:00
|
|
|
assert!(merge_dir.join("21_story_test.md").exists());
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[test]
|
2026-02-23 13:13:41 +00:00
|
|
|
fn move_story_to_merge_errors_when_not_in_current_or_qa() {
|
2026-02-20 19:39:19 +00:00
|
|
|
let tmp = tempfile::tempdir().unwrap();
|
|
|
|
|
let result = move_story_to_merge(tmp.path(), "99_nonexistent");
|
2026-02-23 13:13:41 +00:00
|
|
|
assert!(result.unwrap_err().contains("not found in work/2_current/ or work/3_qa/"));
|
2026-02-20 17:36:35 +00:00
|
|
|
}
|
|
|
|
|
|
2026-02-20 17:44:06 +00:00
|
|
|
// ── move_story_to_qa tests ────────────────────────────────────────────────
|
|
|
|
|
|
|
|
|
|
#[test]
|
2026-02-20 19:39:19 +00:00
|
|
|
fn move_story_to_qa_moves_file() {
|
2026-02-20 17:44:06 +00:00
|
|
|
use std::fs;
|
2026-02-20 19:39:19 +00:00
|
|
|
let tmp = tempfile::tempdir().unwrap();
|
|
|
|
|
let root = tmp.path();
|
|
|
|
|
let current = root.join(".story_kit/work/2_current");
|
|
|
|
|
fs::create_dir_all(¤t).unwrap();
|
|
|
|
|
fs::write(current.join("30_story_qa.md"), "test").unwrap();
|
2026-02-20 17:44:06 +00:00
|
|
|
|
2026-02-20 19:39:19 +00:00
|
|
|
move_story_to_qa(root, "30_story_qa").unwrap();
|
2026-02-20 17:44:06 +00:00
|
|
|
|
2026-02-20 19:39:19 +00:00
|
|
|
assert!(!current.join("30_story_qa.md").exists());
|
|
|
|
|
assert!(root.join(".story_kit/work/3_qa/30_story_qa.md").exists());
|
2026-02-20 17:44:06 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
fn move_story_to_qa_idempotent_when_already_in_qa() {
|
|
|
|
|
use std::fs;
|
2026-02-20 19:39:19 +00:00
|
|
|
let tmp = tempfile::tempdir().unwrap();
|
|
|
|
|
let root = tmp.path();
|
|
|
|
|
let qa_dir = root.join(".story_kit/work/3_qa");
|
2026-02-20 17:44:06 +00:00
|
|
|
fs::create_dir_all(&qa_dir).unwrap();
|
2026-02-20 19:39:19 +00:00
|
|
|
fs::write(qa_dir.join("31_story_test.md"), "test").unwrap();
|
|
|
|
|
|
|
|
|
|
move_story_to_qa(root, "31_story_test").unwrap();
|
2026-02-20 17:44:06 +00:00
|
|
|
assert!(qa_dir.join("31_story_test.md").exists());
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
fn move_story_to_qa_errors_when_not_in_current() {
|
2026-02-20 19:39:19 +00:00
|
|
|
let tmp = tempfile::tempdir().unwrap();
|
|
|
|
|
let result = move_story_to_qa(tmp.path(), "99_nonexistent");
|
2026-02-20 17:44:06 +00:00
|
|
|
assert!(result.unwrap_err().contains("not found in work/2_current/"));
|
|
|
|
|
}
|
|
|
|
|
|
2026-02-20 19:39:19 +00:00
|
|
|
// ── move_story_to_archived tests ──────────────────────────────────────────
|
2026-02-20 17:36:35 +00:00
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
fn move_story_to_archived_finds_in_merge_dir() {
|
|
|
|
|
use std::fs;
|
2026-02-20 19:39:19 +00:00
|
|
|
let tmp = tempfile::tempdir().unwrap();
|
|
|
|
|
let root = tmp.path();
|
|
|
|
|
let merge_dir = root.join(".story_kit/work/4_merge");
|
2026-02-20 17:36:35 +00:00
|
|
|
fs::create_dir_all(&merge_dir).unwrap();
|
2026-02-20 19:39:19 +00:00
|
|
|
fs::write(merge_dir.join("22_story_test.md"), "test").unwrap();
|
2026-02-20 17:36:35 +00:00
|
|
|
|
2026-02-20 19:39:19 +00:00
|
|
|
move_story_to_archived(root, "22_story_test").unwrap();
|
2026-02-20 17:36:35 +00:00
|
|
|
|
2026-02-20 19:39:19 +00:00
|
|
|
assert!(!merge_dir.join("22_story_test.md").exists());
|
2026-02-24 17:01:57 +00:00
|
|
|
assert!(root.join(".story_kit/work/5_done/22_story_test.md").exists());
|
2026-02-20 17:36:35 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
fn move_story_to_archived_error_when_not_in_current_or_merge() {
|
2026-02-20 19:39:19 +00:00
|
|
|
let tmp = tempfile::tempdir().unwrap();
|
|
|
|
|
let result = move_story_to_archived(tmp.path(), "99_nonexistent");
|
|
|
|
|
assert!(result.unwrap_err().contains("4_merge"));
|
2026-02-20 17:36:35 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// ── merge_agent_work tests ────────────────────────────────────────────────
|
|
|
|
|
|
|
|
|
|
#[tokio::test]
|
|
|
|
|
async fn merge_agent_work_returns_error_when_branch_not_found() {
|
|
|
|
|
use tempfile::tempdir;
|
|
|
|
|
|
|
|
|
|
let tmp = tempdir().unwrap();
|
|
|
|
|
let repo = tmp.path();
|
|
|
|
|
init_git_repo(repo);
|
|
|
|
|
|
2026-02-24 23:09:13 +00:00
|
|
|
let pool = AgentPool::new_test(3001);
|
2026-02-20 17:36:35 +00:00
|
|
|
// branch feature/story-99_nonexistent does not exist
|
|
|
|
|
let result = pool
|
|
|
|
|
.merge_agent_work(repo, "99_nonexistent")
|
|
|
|
|
.await
|
|
|
|
|
.unwrap();
|
|
|
|
|
// Should fail (no branch) — not panic
|
|
|
|
|
assert!(!result.success, "should fail when branch missing");
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[tokio::test]
|
|
|
|
|
async fn merge_agent_work_succeeds_on_clean_branch() {
|
|
|
|
|
use std::fs;
|
|
|
|
|
use tempfile::tempdir;
|
|
|
|
|
|
|
|
|
|
let tmp = tempdir().unwrap();
|
|
|
|
|
let repo = tmp.path();
|
|
|
|
|
init_git_repo(repo);
|
|
|
|
|
|
|
|
|
|
// Create a feature branch with a commit
|
|
|
|
|
Command::new("git")
|
|
|
|
|
.args(["checkout", "-b", "feature/story-23_test"])
|
|
|
|
|
.current_dir(repo)
|
|
|
|
|
.output()
|
|
|
|
|
.unwrap();
|
|
|
|
|
fs::write(repo.join("feature.txt"), "feature content").unwrap();
|
|
|
|
|
Command::new("git")
|
|
|
|
|
.args(["add", "."])
|
|
|
|
|
.current_dir(repo)
|
|
|
|
|
.output()
|
|
|
|
|
.unwrap();
|
|
|
|
|
Command::new("git")
|
|
|
|
|
.args(["commit", "-m", "add feature"])
|
|
|
|
|
.current_dir(repo)
|
|
|
|
|
.output()
|
|
|
|
|
.unwrap();
|
|
|
|
|
|
|
|
|
|
// Switch back to master (initial branch)
|
|
|
|
|
Command::new("git")
|
|
|
|
|
.args(["checkout", "master"])
|
|
|
|
|
.current_dir(repo)
|
|
|
|
|
.output()
|
|
|
|
|
.unwrap();
|
|
|
|
|
|
|
|
|
|
// Create the story file in 4_merge/ so we can test archival
|
|
|
|
|
let merge_dir = repo.join(".story_kit/work/4_merge");
|
|
|
|
|
fs::create_dir_all(&merge_dir).unwrap();
|
|
|
|
|
let story_file = merge_dir.join("23_test.md");
|
2026-02-23 19:12:05 +00:00
|
|
|
fs::write(&story_file, "---\nname: Test\n---\n").unwrap();
|
2026-02-20 17:36:35 +00:00
|
|
|
Command::new("git")
|
|
|
|
|
.args(["add", "."])
|
|
|
|
|
.current_dir(repo)
|
|
|
|
|
.output()
|
|
|
|
|
.unwrap();
|
|
|
|
|
Command::new("git")
|
|
|
|
|
.args(["commit", "-m", "add story in merge"])
|
|
|
|
|
.current_dir(repo)
|
|
|
|
|
.output()
|
|
|
|
|
.unwrap();
|
|
|
|
|
|
2026-02-24 23:09:13 +00:00
|
|
|
let pool = AgentPool::new_test(3001);
|
2026-02-20 17:36:35 +00:00
|
|
|
let report = pool.merge_agent_work(repo, "23_test").await.unwrap();
|
|
|
|
|
|
|
|
|
|
// Merge should succeed (gates will run but cargo/pnpm results will depend on env)
|
|
|
|
|
// At minimum the merge itself should succeed
|
|
|
|
|
assert!(!report.had_conflicts, "should have no conflicts");
|
|
|
|
|
// Note: gates_passed may be false in test env without Rust project, that's OK
|
|
|
|
|
// The important thing is the merge itself ran
|
|
|
|
|
assert!(
|
|
|
|
|
report.success || report.gate_output.contains("Failed to run") || !report.gates_passed,
|
|
|
|
|
"report should be coherent: {report:?}"
|
|
|
|
|
);
|
2026-02-24 17:01:57 +00:00
|
|
|
// Story should be in done if gates passed
|
2026-02-20 17:36:35 +00:00
|
|
|
if report.story_archived {
|
2026-02-24 17:01:57 +00:00
|
|
|
let done = repo.join(".story_kit/work/5_done/23_test.md");
|
|
|
|
|
assert!(done.exists(), "done file should exist");
|
2026-02-20 17:36:35 +00:00
|
|
|
}
|
|
|
|
|
}
|
2026-02-23 12:59:55 +00:00
|
|
|
|
2026-02-24 13:56:11 +00:00
|
|
|
// ── quality gate ordering test ────────────────────────────────
|
|
|
|
|
|
|
|
|
|
/// Regression test for bug 142: quality gates must run BEFORE the fast-forward
|
|
|
|
|
/// to master so that broken code never lands on master.
|
|
|
|
|
///
|
|
|
|
|
/// Setup: a repo with a failing `script/test`, a feature branch with one commit.
|
|
|
|
|
/// When `run_squash_merge` is called, the gates must detect failure and abort the
|
|
|
|
|
/// fast-forward, leaving master HEAD unchanged.
|
|
|
|
|
#[cfg(unix)]
|
|
|
|
|
#[test]
|
|
|
|
|
fn quality_gates_run_before_fast_forward_to_master() {
|
|
|
|
|
use std::fs;
|
|
|
|
|
use std::os::unix::fs::PermissionsExt;
|
|
|
|
|
use tempfile::tempdir;
|
|
|
|
|
|
|
|
|
|
let tmp = tempdir().unwrap();
|
|
|
|
|
let repo = tmp.path();
|
|
|
|
|
init_git_repo(repo);
|
|
|
|
|
|
|
|
|
|
// Add a failing script/test so quality gates will fail.
|
|
|
|
|
let script_dir = repo.join("script");
|
|
|
|
|
fs::create_dir_all(&script_dir).unwrap();
|
|
|
|
|
let script_test = script_dir.join("test");
|
|
|
|
|
fs::write(&script_test, "#!/usr/bin/env bash\nexit 1\n").unwrap();
|
|
|
|
|
let mut perms = fs::metadata(&script_test).unwrap().permissions();
|
|
|
|
|
perms.set_mode(0o755);
|
|
|
|
|
fs::set_permissions(&script_test, perms).unwrap();
|
|
|
|
|
Command::new("git")
|
|
|
|
|
.args(["add", "."])
|
|
|
|
|
.current_dir(repo)
|
|
|
|
|
.output()
|
|
|
|
|
.unwrap();
|
|
|
|
|
Command::new("git")
|
|
|
|
|
.args(["commit", "-m", "add failing script/test"])
|
|
|
|
|
.current_dir(repo)
|
|
|
|
|
.output()
|
|
|
|
|
.unwrap();
|
|
|
|
|
|
|
|
|
|
// Create a feature branch with a commit.
|
|
|
|
|
Command::new("git")
|
|
|
|
|
.args(["checkout", "-b", "feature/story-142_test"])
|
|
|
|
|
.current_dir(repo)
|
|
|
|
|
.output()
|
|
|
|
|
.unwrap();
|
|
|
|
|
fs::write(repo.join("change.txt"), "feature change").unwrap();
|
|
|
|
|
Command::new("git")
|
|
|
|
|
.args(["add", "."])
|
|
|
|
|
.current_dir(repo)
|
|
|
|
|
.output()
|
|
|
|
|
.unwrap();
|
|
|
|
|
Command::new("git")
|
|
|
|
|
.args(["commit", "-m", "feature work"])
|
|
|
|
|
.current_dir(repo)
|
|
|
|
|
.output()
|
|
|
|
|
.unwrap();
|
|
|
|
|
|
|
|
|
|
// Switch back to master and record its HEAD.
|
|
|
|
|
Command::new("git")
|
|
|
|
|
.args(["checkout", "master"])
|
|
|
|
|
.current_dir(repo)
|
|
|
|
|
.output()
|
|
|
|
|
.unwrap();
|
|
|
|
|
let head_before = String::from_utf8(
|
|
|
|
|
Command::new("git")
|
|
|
|
|
.args(["rev-parse", "HEAD"])
|
|
|
|
|
.current_dir(repo)
|
|
|
|
|
.output()
|
|
|
|
|
.unwrap()
|
|
|
|
|
.stdout,
|
|
|
|
|
)
|
|
|
|
|
.unwrap()
|
|
|
|
|
.trim()
|
|
|
|
|
.to_string();
|
|
|
|
|
|
|
|
|
|
// Run the squash-merge. The failing script/test makes quality gates
|
|
|
|
|
// fail → fast-forward must NOT happen.
|
|
|
|
|
let result = run_squash_merge(repo, "feature/story-142_test", "142_test").unwrap();
|
|
|
|
|
|
|
|
|
|
let head_after = String::from_utf8(
|
|
|
|
|
Command::new("git")
|
|
|
|
|
.args(["rev-parse", "HEAD"])
|
|
|
|
|
.current_dir(repo)
|
|
|
|
|
.output()
|
|
|
|
|
.unwrap()
|
|
|
|
|
.stdout,
|
|
|
|
|
)
|
|
|
|
|
.unwrap()
|
|
|
|
|
.trim()
|
|
|
|
|
.to_string();
|
|
|
|
|
|
|
|
|
|
// Gates must have failed (script/test exits 1) so master should be untouched.
|
|
|
|
|
assert!(
|
|
|
|
|
!result.success,
|
|
|
|
|
"run_squash_merge must report failure when gates fail"
|
|
|
|
|
);
|
|
|
|
|
assert_eq!(
|
|
|
|
|
head_before, head_after,
|
|
|
|
|
"master HEAD must not advance when quality gates fail (bug 142)"
|
|
|
|
|
);
|
|
|
|
|
}
|
|
|
|
|
|
2026-02-23 12:59:55 +00:00
|
|
|
// ── run_project_tests tests ───────────────────────────────────
|
|
|
|
|
|
|
|
|
|
#[cfg(unix)]
|
|
|
|
|
#[test]
|
|
|
|
|
fn run_project_tests_uses_script_test_when_present_and_passes() {
|
|
|
|
|
use std::fs;
|
|
|
|
|
use std::os::unix::fs::PermissionsExt;
|
|
|
|
|
use tempfile::tempdir;
|
|
|
|
|
|
|
|
|
|
let tmp = tempdir().unwrap();
|
|
|
|
|
let path = tmp.path();
|
|
|
|
|
let script_dir = path.join("script");
|
|
|
|
|
fs::create_dir_all(&script_dir).unwrap();
|
|
|
|
|
let script_test = script_dir.join("test");
|
|
|
|
|
fs::write(&script_test, "#!/usr/bin/env bash\necho 'all tests passed'\nexit 0\n").unwrap();
|
|
|
|
|
let mut perms = fs::metadata(&script_test).unwrap().permissions();
|
|
|
|
|
perms.set_mode(0o755);
|
|
|
|
|
fs::set_permissions(&script_test, perms).unwrap();
|
|
|
|
|
|
|
|
|
|
let (passed, output) = run_project_tests(path).unwrap();
|
|
|
|
|
assert!(passed, "script/test exiting 0 should pass");
|
|
|
|
|
assert!(output.contains("script/test"), "output should mention script/test");
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[cfg(unix)]
|
|
|
|
|
#[test]
|
|
|
|
|
fn run_project_tests_reports_failure_when_script_test_exits_nonzero() {
|
|
|
|
|
use std::fs;
|
|
|
|
|
use std::os::unix::fs::PermissionsExt;
|
|
|
|
|
use tempfile::tempdir;
|
|
|
|
|
|
|
|
|
|
let tmp = tempdir().unwrap();
|
|
|
|
|
let path = tmp.path();
|
|
|
|
|
let script_dir = path.join("script");
|
|
|
|
|
fs::create_dir_all(&script_dir).unwrap();
|
|
|
|
|
let script_test = script_dir.join("test");
|
|
|
|
|
fs::write(&script_test, "#!/usr/bin/env bash\nexit 1\n").unwrap();
|
|
|
|
|
let mut perms = fs::metadata(&script_test).unwrap().permissions();
|
|
|
|
|
perms.set_mode(0o755);
|
|
|
|
|
fs::set_permissions(&script_test, perms).unwrap();
|
|
|
|
|
|
|
|
|
|
let (passed, output) = run_project_tests(path).unwrap();
|
|
|
|
|
assert!(!passed, "script/test exiting 1 should fail");
|
|
|
|
|
assert!(output.contains("script/test"), "output should mention script/test");
|
|
|
|
|
}
|
2026-02-23 13:40:12 +00:00
|
|
|
|
|
|
|
|
// ── run_coverage_gate tests ───────────────────────────────────────────────
|
|
|
|
|
|
|
|
|
|
#[cfg(unix)]
|
|
|
|
|
#[test]
|
|
|
|
|
fn coverage_gate_passes_when_script_absent() {
|
|
|
|
|
use tempfile::tempdir;
|
|
|
|
|
let tmp = tempdir().unwrap();
|
|
|
|
|
let (passed, output) = run_coverage_gate(tmp.path()).unwrap();
|
|
|
|
|
assert!(passed, "coverage gate should pass when script is absent");
|
|
|
|
|
assert!(
|
|
|
|
|
output.contains("not found"),
|
|
|
|
|
"output should mention script not found"
|
|
|
|
|
);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[cfg(unix)]
|
|
|
|
|
#[test]
|
|
|
|
|
fn coverage_gate_passes_when_script_exits_zero() {
|
|
|
|
|
use std::fs;
|
|
|
|
|
use std::os::unix::fs::PermissionsExt;
|
|
|
|
|
use tempfile::tempdir;
|
|
|
|
|
|
|
|
|
|
let tmp = tempdir().unwrap();
|
|
|
|
|
let path = tmp.path();
|
|
|
|
|
let script_dir = path.join("script");
|
|
|
|
|
fs::create_dir_all(&script_dir).unwrap();
|
|
|
|
|
let script = script_dir.join("test_coverage");
|
|
|
|
|
fs::write(
|
|
|
|
|
&script,
|
|
|
|
|
"#!/usr/bin/env bash\necho 'Rust line coverage: 85%'\necho 'PASS: Coverage 85% meets threshold 0%'\nexit 0\n",
|
|
|
|
|
)
|
|
|
|
|
.unwrap();
|
|
|
|
|
let mut perms = fs::metadata(&script).unwrap().permissions();
|
|
|
|
|
perms.set_mode(0o755);
|
|
|
|
|
fs::set_permissions(&script, perms).unwrap();
|
|
|
|
|
|
|
|
|
|
let (passed, output) = run_coverage_gate(path).unwrap();
|
|
|
|
|
assert!(passed, "coverage gate should pass when script exits 0");
|
|
|
|
|
assert!(
|
|
|
|
|
output.contains("script/test_coverage"),
|
|
|
|
|
"output should mention script/test_coverage"
|
|
|
|
|
);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[cfg(unix)]
|
|
|
|
|
#[test]
|
|
|
|
|
fn coverage_gate_fails_when_script_exits_nonzero() {
|
|
|
|
|
use std::fs;
|
|
|
|
|
use std::os::unix::fs::PermissionsExt;
|
|
|
|
|
use tempfile::tempdir;
|
|
|
|
|
|
|
|
|
|
let tmp = tempdir().unwrap();
|
|
|
|
|
let path = tmp.path();
|
|
|
|
|
let script_dir = path.join("script");
|
|
|
|
|
fs::create_dir_all(&script_dir).unwrap();
|
|
|
|
|
let script = script_dir.join("test_coverage");
|
|
|
|
|
fs::write(
|
|
|
|
|
&script,
|
|
|
|
|
"#!/usr/bin/env bash\necho 'FAIL: Coverage 40% is below threshold 80%'\nexit 1\n",
|
|
|
|
|
)
|
|
|
|
|
.unwrap();
|
|
|
|
|
let mut perms = fs::metadata(&script).unwrap().permissions();
|
|
|
|
|
perms.set_mode(0o755);
|
|
|
|
|
fs::set_permissions(&script, perms).unwrap();
|
|
|
|
|
|
|
|
|
|
let (passed, output) = run_coverage_gate(path).unwrap();
|
|
|
|
|
assert!(!passed, "coverage gate should fail when script exits 1");
|
|
|
|
|
assert!(
|
|
|
|
|
output.contains("script/test_coverage"),
|
|
|
|
|
"output should mention script/test_coverage"
|
|
|
|
|
);
|
|
|
|
|
}
|
2026-02-23 18:20:24 +00:00
|
|
|
|
|
|
|
|
// ── auto-assign helper tests ───────────────────────────────────
|
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
fn scan_stage_items_returns_empty_for_missing_dir() {
|
|
|
|
|
let tmp = tempfile::tempdir().unwrap();
|
|
|
|
|
let items = scan_stage_items(tmp.path(), "2_current");
|
|
|
|
|
assert!(items.is_empty());
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
fn scan_stage_items_returns_sorted_story_ids() {
|
|
|
|
|
use std::fs;
|
|
|
|
|
let tmp = tempfile::tempdir().unwrap();
|
|
|
|
|
let stage_dir = tmp.path().join(".story_kit").join("work").join("2_current");
|
|
|
|
|
fs::create_dir_all(&stage_dir).unwrap();
|
|
|
|
|
fs::write(stage_dir.join("42_story_foo.md"), "---\nname: foo\n---").unwrap();
|
|
|
|
|
fs::write(stage_dir.join("10_story_bar.md"), "---\nname: bar\n---").unwrap();
|
|
|
|
|
fs::write(stage_dir.join("5_story_baz.md"), "---\nname: baz\n---").unwrap();
|
|
|
|
|
// non-md file should be ignored
|
|
|
|
|
fs::write(stage_dir.join("README.txt"), "ignore me").unwrap();
|
|
|
|
|
|
|
|
|
|
let items = scan_stage_items(tmp.path(), "2_current");
|
|
|
|
|
assert_eq!(items, vec!["10_story_bar", "42_story_foo", "5_story_baz"]);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
fn is_story_assigned_returns_true_for_running_coder() {
|
2026-02-24 15:50:34 +00:00
|
|
|
use crate::config::ProjectConfig;
|
|
|
|
|
let config = ProjectConfig::default();
|
2026-02-24 23:09:13 +00:00
|
|
|
let pool = AgentPool::new_test(3001);
|
2026-02-23 18:20:24 +00:00
|
|
|
pool.inject_test_agent("42_story_foo", "coder-1", AgentStatus::Running);
|
|
|
|
|
|
|
|
|
|
let agents = pool.agents.lock().unwrap();
|
|
|
|
|
assert!(is_story_assigned_for_stage(
|
2026-02-24 15:50:34 +00:00
|
|
|
&config,
|
2026-02-23 18:20:24 +00:00
|
|
|
&agents,
|
|
|
|
|
"42_story_foo",
|
|
|
|
|
&PipelineStage::Coder
|
|
|
|
|
));
|
|
|
|
|
// Same story but wrong stage — should be false
|
|
|
|
|
assert!(!is_story_assigned_for_stage(
|
2026-02-24 15:50:34 +00:00
|
|
|
&config,
|
2026-02-23 18:20:24 +00:00
|
|
|
&agents,
|
|
|
|
|
"42_story_foo",
|
|
|
|
|
&PipelineStage::Qa
|
|
|
|
|
));
|
|
|
|
|
// Different story — should be false
|
|
|
|
|
assert!(!is_story_assigned_for_stage(
|
2026-02-24 15:50:34 +00:00
|
|
|
&config,
|
2026-02-23 18:20:24 +00:00
|
|
|
&agents,
|
|
|
|
|
"99_story_other",
|
|
|
|
|
&PipelineStage::Coder
|
|
|
|
|
));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
fn is_story_assigned_returns_false_for_completed_agent() {
|
2026-02-24 15:50:34 +00:00
|
|
|
use crate::config::ProjectConfig;
|
|
|
|
|
let config = ProjectConfig::default();
|
2026-02-24 23:09:13 +00:00
|
|
|
let pool = AgentPool::new_test(3001);
|
2026-02-23 18:20:24 +00:00
|
|
|
pool.inject_test_agent("42_story_foo", "coder-1", AgentStatus::Completed);
|
|
|
|
|
|
|
|
|
|
let agents = pool.agents.lock().unwrap();
|
|
|
|
|
// Completed agents don't count as assigned
|
|
|
|
|
assert!(!is_story_assigned_for_stage(
|
2026-02-24 15:50:34 +00:00
|
|
|
&config,
|
2026-02-23 18:20:24 +00:00
|
|
|
&agents,
|
|
|
|
|
"42_story_foo",
|
|
|
|
|
&PipelineStage::Coder
|
|
|
|
|
));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
fn find_free_agent_returns_none_when_all_busy() {
|
|
|
|
|
use crate::config::ProjectConfig;
|
|
|
|
|
let config = ProjectConfig::parse(
|
|
|
|
|
r#"
|
|
|
|
|
[[agent]]
|
|
|
|
|
name = "coder-1"
|
|
|
|
|
[[agent]]
|
|
|
|
|
name = "coder-2"
|
|
|
|
|
"#,
|
|
|
|
|
)
|
|
|
|
|
.unwrap();
|
|
|
|
|
|
2026-02-24 23:09:13 +00:00
|
|
|
let pool = AgentPool::new_test(3001);
|
2026-02-23 18:20:24 +00:00
|
|
|
pool.inject_test_agent("s1", "coder-1", AgentStatus::Running);
|
|
|
|
|
pool.inject_test_agent("s2", "coder-2", AgentStatus::Running);
|
|
|
|
|
|
|
|
|
|
let agents = pool.agents.lock().unwrap();
|
|
|
|
|
let free = find_free_agent_for_stage(&config, &agents, &PipelineStage::Coder);
|
|
|
|
|
assert!(free.is_none(), "no free coders should be available");
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
fn find_free_agent_returns_first_free_coder() {
|
|
|
|
|
use crate::config::ProjectConfig;
|
|
|
|
|
let config = ProjectConfig::parse(
|
|
|
|
|
r#"
|
|
|
|
|
[[agent]]
|
|
|
|
|
name = "coder-1"
|
|
|
|
|
[[agent]]
|
|
|
|
|
name = "coder-2"
|
|
|
|
|
[[agent]]
|
|
|
|
|
name = "coder-3"
|
|
|
|
|
"#,
|
|
|
|
|
)
|
|
|
|
|
.unwrap();
|
|
|
|
|
|
2026-02-24 23:09:13 +00:00
|
|
|
let pool = AgentPool::new_test(3001);
|
2026-02-23 18:20:24 +00:00
|
|
|
// coder-1 is busy, coder-2 is free
|
|
|
|
|
pool.inject_test_agent("s1", "coder-1", AgentStatus::Running);
|
|
|
|
|
|
|
|
|
|
let agents = pool.agents.lock().unwrap();
|
|
|
|
|
let free = find_free_agent_for_stage(&config, &agents, &PipelineStage::Coder);
|
|
|
|
|
assert_eq!(free, Some("coder-2"), "coder-2 should be the first free coder");
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
fn find_free_agent_ignores_completed_agents() {
|
|
|
|
|
use crate::config::ProjectConfig;
|
|
|
|
|
let config = ProjectConfig::parse(
|
|
|
|
|
r#"
|
|
|
|
|
[[agent]]
|
|
|
|
|
name = "coder-1"
|
|
|
|
|
"#,
|
|
|
|
|
)
|
|
|
|
|
.unwrap();
|
|
|
|
|
|
2026-02-24 23:09:13 +00:00
|
|
|
let pool = AgentPool::new_test(3001);
|
2026-02-23 18:20:24 +00:00
|
|
|
// coder-1 completed its previous story — it's free for a new one
|
|
|
|
|
pool.inject_test_agent("s1", "coder-1", AgentStatus::Completed);
|
|
|
|
|
|
|
|
|
|
let agents = pool.agents.lock().unwrap();
|
|
|
|
|
let free = find_free_agent_for_stage(&config, &agents, &PipelineStage::Coder);
|
|
|
|
|
assert_eq!(free, Some("coder-1"), "completed coder-1 should be free");
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
fn find_free_agent_returns_none_for_wrong_stage() {
|
|
|
|
|
use crate::config::ProjectConfig;
|
|
|
|
|
let config = ProjectConfig::parse(
|
|
|
|
|
r#"
|
|
|
|
|
[[agent]]
|
|
|
|
|
name = "qa"
|
|
|
|
|
"#,
|
|
|
|
|
)
|
|
|
|
|
.unwrap();
|
|
|
|
|
|
|
|
|
|
let agents: HashMap<String, StoryAgent> = HashMap::new();
|
|
|
|
|
// Looking for a Coder but only QA is configured
|
|
|
|
|
let free = find_free_agent_for_stage(&config, &agents, &PipelineStage::Coder);
|
|
|
|
|
assert!(free.is_none());
|
|
|
|
|
// Looking for QA should find it
|
|
|
|
|
let free_qa = find_free_agent_for_stage(&config, &agents, &PipelineStage::Qa);
|
|
|
|
|
assert_eq!(free_qa, Some("qa"));
|
|
|
|
|
}
|
2026-02-23 20:38:17 +00:00
|
|
|
|
2026-02-24 15:50:34 +00:00
|
|
|
// ── agent_config_stage / stage field tests ────────────────────────────────
|
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
fn find_free_agent_uses_config_stage_field_not_name() {
|
|
|
|
|
// Agents named "qa-2" and "coder-opus" don't match the legacy name heuristic
|
|
|
|
|
// but should be picked up via their explicit stage field.
|
|
|
|
|
use crate::config::ProjectConfig;
|
|
|
|
|
let config = ProjectConfig::parse(
|
|
|
|
|
r#"
|
|
|
|
|
[[agent]]
|
|
|
|
|
name = "qa-2"
|
|
|
|
|
stage = "qa"
|
|
|
|
|
|
|
|
|
|
[[agent]]
|
|
|
|
|
name = "coder-opus"
|
|
|
|
|
stage = "coder"
|
|
|
|
|
"#,
|
|
|
|
|
)
|
|
|
|
|
.unwrap();
|
|
|
|
|
|
|
|
|
|
let agents: HashMap<String, StoryAgent> = HashMap::new();
|
|
|
|
|
|
|
|
|
|
// qa-2 should be found for PipelineStage::Qa via config stage field
|
|
|
|
|
let free_qa = find_free_agent_for_stage(&config, &agents, &PipelineStage::Qa);
|
|
|
|
|
assert_eq!(free_qa, Some("qa-2"), "qa-2 with stage=qa should be found");
|
|
|
|
|
|
|
|
|
|
// coder-opus should be found for PipelineStage::Coder via config stage field
|
|
|
|
|
let free_coder = find_free_agent_for_stage(&config, &agents, &PipelineStage::Coder);
|
|
|
|
|
assert_eq!(
|
|
|
|
|
free_coder,
|
|
|
|
|
Some("coder-opus"),
|
|
|
|
|
"coder-opus with stage=coder should be found"
|
|
|
|
|
);
|
|
|
|
|
|
|
|
|
|
// Neither should match the other stage
|
|
|
|
|
let free_merge = find_free_agent_for_stage(&config, &agents, &PipelineStage::Mergemaster);
|
|
|
|
|
assert!(free_merge.is_none());
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
fn is_story_assigned_uses_config_stage_field_for_nonstandard_names() {
|
|
|
|
|
use crate::config::ProjectConfig;
|
|
|
|
|
let config = ProjectConfig::parse(
|
|
|
|
|
r#"
|
|
|
|
|
[[agent]]
|
|
|
|
|
name = "qa-2"
|
|
|
|
|
stage = "qa"
|
|
|
|
|
"#,
|
|
|
|
|
)
|
|
|
|
|
.unwrap();
|
|
|
|
|
|
2026-02-24 23:09:13 +00:00
|
|
|
let pool = AgentPool::new_test(3001);
|
2026-02-24 15:50:34 +00:00
|
|
|
pool.inject_test_agent("42_story_foo", "qa-2", AgentStatus::Running);
|
|
|
|
|
|
|
|
|
|
let agents = pool.agents.lock().unwrap();
|
|
|
|
|
// qa-2 with stage=qa should be recognised as a QA agent
|
|
|
|
|
assert!(
|
|
|
|
|
is_story_assigned_for_stage(&config, &agents, "42_story_foo", &PipelineStage::Qa),
|
|
|
|
|
"qa-2 should be detected as assigned to QA stage"
|
|
|
|
|
);
|
|
|
|
|
// Should NOT appear as a coder
|
|
|
|
|
assert!(
|
|
|
|
|
!is_story_assigned_for_stage(
|
|
|
|
|
&config,
|
|
|
|
|
&agents,
|
|
|
|
|
"42_story_foo",
|
|
|
|
|
&PipelineStage::Coder
|
|
|
|
|
),
|
|
|
|
|
"qa-2 should not be detected as a coder"
|
|
|
|
|
);
|
|
|
|
|
}
|
|
|
|
|
|
2026-02-23 20:38:17 +00:00
|
|
|
// ── find_active_story_stage tests ─────────────────────────────────────────
|
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
fn find_active_story_stage_detects_current() {
|
|
|
|
|
use std::fs;
|
|
|
|
|
let tmp = tempfile::tempdir().unwrap();
|
|
|
|
|
let root = tmp.path();
|
|
|
|
|
let current = root.join(".story_kit/work/2_current");
|
|
|
|
|
fs::create_dir_all(¤t).unwrap();
|
|
|
|
|
fs::write(current.join("10_story_test.md"), "test").unwrap();
|
|
|
|
|
|
|
|
|
|
assert_eq!(
|
|
|
|
|
find_active_story_stage(root, "10_story_test"),
|
|
|
|
|
Some("2_current")
|
|
|
|
|
);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
fn find_active_story_stage_detects_qa() {
|
|
|
|
|
use std::fs;
|
|
|
|
|
let tmp = tempfile::tempdir().unwrap();
|
|
|
|
|
let root = tmp.path();
|
|
|
|
|
let qa = root.join(".story_kit/work/3_qa");
|
|
|
|
|
fs::create_dir_all(&qa).unwrap();
|
|
|
|
|
fs::write(qa.join("11_story_test.md"), "test").unwrap();
|
|
|
|
|
|
|
|
|
|
assert_eq!(
|
|
|
|
|
find_active_story_stage(root, "11_story_test"),
|
|
|
|
|
Some("3_qa")
|
|
|
|
|
);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
fn find_active_story_stage_detects_merge() {
|
|
|
|
|
use std::fs;
|
|
|
|
|
let tmp = tempfile::tempdir().unwrap();
|
|
|
|
|
let root = tmp.path();
|
|
|
|
|
let merge = root.join(".story_kit/work/4_merge");
|
|
|
|
|
fs::create_dir_all(&merge).unwrap();
|
|
|
|
|
fs::write(merge.join("12_story_test.md"), "test").unwrap();
|
|
|
|
|
|
|
|
|
|
assert_eq!(
|
|
|
|
|
find_active_story_stage(root, "12_story_test"),
|
|
|
|
|
Some("4_merge")
|
|
|
|
|
);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
fn find_active_story_stage_returns_none_for_unknown_story() {
|
|
|
|
|
let tmp = tempfile::tempdir().unwrap();
|
|
|
|
|
assert_eq!(find_active_story_stage(tmp.path(), "99_nonexistent"), None);
|
|
|
|
|
}
|
|
|
|
|
|
2026-02-23 20:46:51 +00:00
|
|
|
// ── start_agent single-instance concurrency tests ─────────────────────────
|
|
|
|
|
|
|
|
|
|
/// Regression test for bug 97: the agent pool must reject a second concurrent
|
|
|
|
|
/// instance of the same agent name even if it would run on a different story.
|
|
|
|
|
#[tokio::test]
|
|
|
|
|
async fn start_agent_rejects_when_same_agent_already_running_on_another_story() {
|
|
|
|
|
use std::fs;
|
|
|
|
|
|
|
|
|
|
let tmp = tempfile::tempdir().unwrap();
|
|
|
|
|
let root = tmp.path();
|
|
|
|
|
|
|
|
|
|
// Write a minimal project.toml so ProjectConfig::load can find the "qa" agent.
|
|
|
|
|
let sk_dir = root.join(".story_kit");
|
|
|
|
|
fs::create_dir_all(&sk_dir).unwrap();
|
|
|
|
|
fs::write(
|
|
|
|
|
sk_dir.join("project.toml"),
|
|
|
|
|
"[[agent]]\nname = \"qa\"\n",
|
|
|
|
|
)
|
|
|
|
|
.unwrap();
|
|
|
|
|
|
2026-02-24 23:09:13 +00:00
|
|
|
let pool = AgentPool::new_test(3001);
|
2026-02-23 20:46:51 +00:00
|
|
|
// Simulate qa already running on story-a.
|
|
|
|
|
pool.inject_test_agent("story-a", "qa", AgentStatus::Running);
|
|
|
|
|
|
|
|
|
|
// Attempt to start qa on story-b — must be rejected.
|
|
|
|
|
let result = pool
|
|
|
|
|
.start_agent(root, "story-b", Some("qa"), None)
|
|
|
|
|
.await;
|
|
|
|
|
|
|
|
|
|
assert!(
|
|
|
|
|
result.is_err(),
|
|
|
|
|
"start_agent should fail when qa is already running on another story"
|
|
|
|
|
);
|
|
|
|
|
let err = result.unwrap_err();
|
|
|
|
|
assert!(
|
|
|
|
|
err.contains("already running") || err.contains("becomes available"),
|
|
|
|
|
"error message should explain why: got '{err}'"
|
|
|
|
|
);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Verify that the concurrency guard does NOT block an agent that is merely
|
|
|
|
|
/// Completed (not Running/Pending) — completed agents are free for new work.
|
|
|
|
|
#[tokio::test]
|
|
|
|
|
async fn start_agent_allows_new_story_when_previous_run_is_completed() {
|
|
|
|
|
use std::fs;
|
|
|
|
|
|
|
|
|
|
let tmp = tempfile::tempdir().unwrap();
|
|
|
|
|
let root = tmp.path();
|
|
|
|
|
|
|
|
|
|
let sk_dir = root.join(".story_kit");
|
|
|
|
|
fs::create_dir_all(&sk_dir).unwrap();
|
|
|
|
|
fs::write(
|
|
|
|
|
sk_dir.join("project.toml"),
|
|
|
|
|
"[[agent]]\nname = \"qa\"\n",
|
|
|
|
|
)
|
|
|
|
|
.unwrap();
|
|
|
|
|
|
2026-02-24 23:09:13 +00:00
|
|
|
let pool = AgentPool::new_test(3001);
|
2026-02-23 20:46:51 +00:00
|
|
|
// Previous run completed — should NOT block a new story.
|
|
|
|
|
pool.inject_test_agent("story-a", "qa", AgentStatus::Completed);
|
|
|
|
|
|
|
|
|
|
// The call will fail eventually (no real worktree / Claude CLI), but it must
|
|
|
|
|
// NOT fail at the concurrency check. We detect the difference by inspecting
|
|
|
|
|
// the error message: a concurrency rejection says "already running", while a
|
|
|
|
|
// later failure (missing story file, missing claude binary, etc.) says something else.
|
|
|
|
|
let result = pool
|
|
|
|
|
.start_agent(root, "story-b", Some("qa"), None)
|
|
|
|
|
.await;
|
|
|
|
|
|
|
|
|
|
if let Err(ref e) = result {
|
|
|
|
|
assert!(
|
|
|
|
|
!e.contains("already running") && !e.contains("becomes available"),
|
|
|
|
|
"completed agent must not trigger the concurrency guard: got '{e}'"
|
|
|
|
|
);
|
|
|
|
|
}
|
|
|
|
|
// result may be Ok (unlikely in test env) or Err for infra reasons — both fine.
|
|
|
|
|
}
|
|
|
|
|
|
2026-02-23 20:38:17 +00:00
|
|
|
// ── worktree_has_committed_work tests ─────────────────────────────────────
|
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
fn worktree_has_committed_work_false_on_fresh_repo() {
|
|
|
|
|
let tmp = tempfile::tempdir().unwrap();
|
|
|
|
|
let repo = tmp.path();
|
|
|
|
|
// init_git_repo creates the initial commit on the default branch.
|
|
|
|
|
// HEAD IS the base branch — no commits ahead.
|
|
|
|
|
init_git_repo(repo);
|
|
|
|
|
assert!(!worktree_has_committed_work(repo));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
fn worktree_has_committed_work_true_after_commit_on_feature_branch() {
|
|
|
|
|
use std::fs;
|
|
|
|
|
let tmp = tempfile::tempdir().unwrap();
|
|
|
|
|
let project_root = tmp.path().join("project");
|
|
|
|
|
fs::create_dir_all(&project_root).unwrap();
|
|
|
|
|
init_git_repo(&project_root);
|
|
|
|
|
|
|
|
|
|
// Create a git worktree on a feature branch.
|
|
|
|
|
let wt_path = tmp.path().join("wt");
|
|
|
|
|
Command::new("git")
|
|
|
|
|
.args([
|
|
|
|
|
"worktree",
|
|
|
|
|
"add",
|
|
|
|
|
&wt_path.to_string_lossy(),
|
|
|
|
|
"-b",
|
|
|
|
|
"feature/story-99_test",
|
|
|
|
|
])
|
|
|
|
|
.current_dir(&project_root)
|
|
|
|
|
.output()
|
|
|
|
|
.unwrap();
|
|
|
|
|
|
|
|
|
|
// No commits on the feature branch yet — same as base branch.
|
|
|
|
|
assert!(!worktree_has_committed_work(&wt_path));
|
|
|
|
|
|
|
|
|
|
// Add a commit to the feature branch in the worktree.
|
|
|
|
|
fs::write(wt_path.join("work.txt"), "done").unwrap();
|
|
|
|
|
Command::new("git")
|
|
|
|
|
.args(["add", "."])
|
|
|
|
|
.current_dir(&wt_path)
|
|
|
|
|
.output()
|
|
|
|
|
.unwrap();
|
|
|
|
|
Command::new("git")
|
|
|
|
|
.args([
|
|
|
|
|
"-c",
|
|
|
|
|
"user.email=test@test.com",
|
|
|
|
|
"-c",
|
|
|
|
|
"user.name=Test",
|
|
|
|
|
"commit",
|
|
|
|
|
"-m",
|
|
|
|
|
"coder: implement story",
|
|
|
|
|
])
|
|
|
|
|
.current_dir(&wt_path)
|
|
|
|
|
.output()
|
|
|
|
|
.unwrap();
|
|
|
|
|
|
|
|
|
|
// Now the feature branch is ahead of the base branch.
|
|
|
|
|
assert!(worktree_has_committed_work(&wt_path));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// ── reconcile_on_startup tests ────────────────────────────────────────────
|
|
|
|
|
|
|
|
|
|
#[tokio::test]
|
|
|
|
|
async fn reconcile_on_startup_noop_when_no_worktrees() {
|
|
|
|
|
let tmp = tempfile::tempdir().unwrap();
|
2026-02-24 23:09:13 +00:00
|
|
|
let pool = AgentPool::new_test(3001);
|
2026-02-23 22:50:57 +00:00
|
|
|
let (tx, _rx) = broadcast::channel(16);
|
2026-02-23 20:38:17 +00:00
|
|
|
// Should not panic; no worktrees to reconcile.
|
2026-02-23 22:50:57 +00:00
|
|
|
pool.reconcile_on_startup(tmp.path(), &tx).await;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[tokio::test]
|
|
|
|
|
async fn reconcile_on_startup_emits_done_event() {
|
|
|
|
|
let tmp = tempfile::tempdir().unwrap();
|
2026-02-24 23:09:13 +00:00
|
|
|
let pool = AgentPool::new_test(3001);
|
2026-02-23 22:50:57 +00:00
|
|
|
let (tx, mut rx) = broadcast::channel::<ReconciliationEvent>(16);
|
|
|
|
|
pool.reconcile_on_startup(tmp.path(), &tx).await;
|
|
|
|
|
|
|
|
|
|
// Collect all events; the last must be "done".
|
|
|
|
|
let mut events: Vec<ReconciliationEvent> = Vec::new();
|
|
|
|
|
while let Ok(evt) = rx.try_recv() {
|
|
|
|
|
events.push(evt);
|
|
|
|
|
}
|
|
|
|
|
assert!(
|
|
|
|
|
events.iter().any(|e| e.status == "done"),
|
|
|
|
|
"reconcile_on_startup must emit a 'done' event; got: {:?}",
|
|
|
|
|
events.iter().map(|e| &e.status).collect::<Vec<_>>()
|
|
|
|
|
);
|
2026-02-23 20:38:17 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[tokio::test]
|
|
|
|
|
async fn reconcile_on_startup_skips_story_without_committed_work() {
|
|
|
|
|
use std::fs;
|
|
|
|
|
let tmp = tempfile::tempdir().unwrap();
|
|
|
|
|
let root = tmp.path();
|
|
|
|
|
|
|
|
|
|
// Set up story in 2_current/.
|
|
|
|
|
let current = root.join(".story_kit/work/2_current");
|
|
|
|
|
fs::create_dir_all(¤t).unwrap();
|
|
|
|
|
fs::write(current.join("60_story_test.md"), "test").unwrap();
|
|
|
|
|
|
|
|
|
|
// Create a worktree directory that is a fresh git repo with no commits
|
|
|
|
|
// ahead of its own base branch (simulates a worktree where no work was done).
|
|
|
|
|
let wt_dir = root.join(".story_kit/worktrees/60_story_test");
|
|
|
|
|
fs::create_dir_all(&wt_dir).unwrap();
|
|
|
|
|
init_git_repo(&wt_dir);
|
|
|
|
|
|
2026-02-24 23:09:13 +00:00
|
|
|
let pool = AgentPool::new_test(3001);
|
2026-02-23 22:50:57 +00:00
|
|
|
let (tx, _rx) = broadcast::channel(16);
|
|
|
|
|
pool.reconcile_on_startup(root, &tx).await;
|
2026-02-23 20:38:17 +00:00
|
|
|
|
|
|
|
|
// Story should still be in 2_current/ — nothing was reconciled.
|
|
|
|
|
assert!(
|
|
|
|
|
current.join("60_story_test.md").exists(),
|
|
|
|
|
"story should stay in 2_current/ when worktree has no committed work"
|
|
|
|
|
);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[tokio::test]
|
|
|
|
|
async fn reconcile_on_startup_runs_gates_on_worktree_with_committed_work() {
|
|
|
|
|
use std::fs;
|
|
|
|
|
let tmp = tempfile::tempdir().unwrap();
|
|
|
|
|
let root = tmp.path();
|
|
|
|
|
|
|
|
|
|
// Set up a git repo for the project root.
|
|
|
|
|
init_git_repo(root);
|
|
|
|
|
|
|
|
|
|
// Set up story in 2_current/ and commit it so the project root is clean.
|
|
|
|
|
let current = root.join(".story_kit/work/2_current");
|
|
|
|
|
fs::create_dir_all(¤t).unwrap();
|
|
|
|
|
fs::write(current.join("61_story_test.md"), "test").unwrap();
|
|
|
|
|
Command::new("git")
|
|
|
|
|
.args(["add", "."])
|
|
|
|
|
.current_dir(root)
|
|
|
|
|
.output()
|
|
|
|
|
.unwrap();
|
|
|
|
|
Command::new("git")
|
|
|
|
|
.args([
|
|
|
|
|
"-c",
|
|
|
|
|
"user.email=test@test.com",
|
|
|
|
|
"-c",
|
|
|
|
|
"user.name=Test",
|
|
|
|
|
"commit",
|
|
|
|
|
"-m",
|
|
|
|
|
"add story",
|
|
|
|
|
])
|
|
|
|
|
.current_dir(root)
|
|
|
|
|
.output()
|
|
|
|
|
.unwrap();
|
|
|
|
|
|
|
|
|
|
// Create a real git worktree for the story.
|
|
|
|
|
let wt_dir = root.join(".story_kit/worktrees/61_story_test");
|
|
|
|
|
fs::create_dir_all(wt_dir.parent().unwrap()).unwrap();
|
|
|
|
|
Command::new("git")
|
|
|
|
|
.args([
|
|
|
|
|
"worktree",
|
|
|
|
|
"add",
|
|
|
|
|
&wt_dir.to_string_lossy(),
|
|
|
|
|
"-b",
|
|
|
|
|
"feature/story-61_story_test",
|
|
|
|
|
])
|
|
|
|
|
.current_dir(root)
|
|
|
|
|
.output()
|
|
|
|
|
.unwrap();
|
|
|
|
|
|
|
|
|
|
// Add a commit to the feature branch (simulates coder completing work).
|
|
|
|
|
fs::write(wt_dir.join("implementation.txt"), "done").unwrap();
|
|
|
|
|
Command::new("git")
|
|
|
|
|
.args(["add", "."])
|
|
|
|
|
.current_dir(&wt_dir)
|
|
|
|
|
.output()
|
|
|
|
|
.unwrap();
|
|
|
|
|
Command::new("git")
|
|
|
|
|
.args([
|
|
|
|
|
"-c",
|
|
|
|
|
"user.email=test@test.com",
|
|
|
|
|
"-c",
|
|
|
|
|
"user.name=Test",
|
|
|
|
|
"commit",
|
|
|
|
|
"-m",
|
|
|
|
|
"implement story",
|
|
|
|
|
])
|
|
|
|
|
.current_dir(&wt_dir)
|
|
|
|
|
.output()
|
|
|
|
|
.unwrap();
|
|
|
|
|
|
|
|
|
|
assert!(
|
|
|
|
|
worktree_has_committed_work(&wt_dir),
|
|
|
|
|
"test setup: worktree should have committed work"
|
|
|
|
|
);
|
|
|
|
|
|
2026-02-24 23:09:13 +00:00
|
|
|
let pool = AgentPool::new_test(3001);
|
2026-02-23 22:50:57 +00:00
|
|
|
let (tx, _rx) = broadcast::channel(16);
|
|
|
|
|
pool.reconcile_on_startup(root, &tx).await;
|
2026-02-23 20:38:17 +00:00
|
|
|
|
|
|
|
|
// In the test env, cargo clippy will fail (no Cargo.toml) so gates fail
|
|
|
|
|
// and the story stays in 2_current/. The important assertion is that
|
|
|
|
|
// reconcile ran without panicking and the story is in a consistent state.
|
|
|
|
|
let in_current = current.join("61_story_test.md").exists();
|
|
|
|
|
let in_qa = root
|
|
|
|
|
.join(".story_kit/work/3_qa/61_story_test.md")
|
|
|
|
|
.exists();
|
|
|
|
|
assert!(
|
|
|
|
|
in_current || in_qa,
|
|
|
|
|
"story should be in 2_current/ or 3_qa/ after reconciliation"
|
|
|
|
|
);
|
|
|
|
|
}
|
2026-02-23 20:52:06 +00:00
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
fn test_emit_event_writes_to_log_writer() {
|
|
|
|
|
let tmp = tempfile::tempdir().unwrap();
|
|
|
|
|
let root = tmp.path();
|
|
|
|
|
|
|
|
|
|
let log_writer =
|
|
|
|
|
AgentLogWriter::new(root, "42_story_foo", "coder-1", "sess-emit").unwrap();
|
|
|
|
|
let log_mutex = Mutex::new(log_writer);
|
|
|
|
|
|
|
|
|
|
let (tx, _rx) = broadcast::channel::<AgentEvent>(64);
|
|
|
|
|
let event_log: Mutex<Vec<AgentEvent>> = Mutex::new(Vec::new());
|
|
|
|
|
|
|
|
|
|
let event = AgentEvent::Status {
|
|
|
|
|
story_id: "42_story_foo".to_string(),
|
|
|
|
|
agent_name: "coder-1".to_string(),
|
|
|
|
|
status: "running".to_string(),
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
emit_event(event, &tx, &event_log, Some(&log_mutex));
|
|
|
|
|
|
|
|
|
|
// Verify event was added to in-memory log
|
|
|
|
|
let mem_events = event_log.lock().unwrap();
|
|
|
|
|
assert_eq!(mem_events.len(), 1);
|
|
|
|
|
drop(mem_events);
|
|
|
|
|
|
|
|
|
|
// Verify event was written to the log file
|
|
|
|
|
let log_path =
|
|
|
|
|
crate::agent_log::log_file_path(root, "42_story_foo", "coder-1", "sess-emit");
|
|
|
|
|
let entries = crate::agent_log::read_log(&log_path).unwrap();
|
|
|
|
|
assert_eq!(entries.len(), 1);
|
|
|
|
|
assert_eq!(entries[0].event["type"], "status");
|
|
|
|
|
assert_eq!(entries[0].event["status"], "running");
|
|
|
|
|
}
|
2026-02-23 22:53:23 +00:00
|
|
|
|
2026-02-24 19:35:06 +00:00
|
|
|
// ── bug 167: handle_agent_stream_event routes thinking/text correctly ───
|
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
fn stream_event_thinking_delta_emits_thinking_event() {
|
|
|
|
|
let (tx, mut rx) = broadcast::channel::<AgentEvent>(64);
|
|
|
|
|
let event_log: Mutex<Vec<AgentEvent>> = Mutex::new(Vec::new());
|
|
|
|
|
|
|
|
|
|
let event = serde_json::json!({
|
|
|
|
|
"type": "content_block_delta",
|
|
|
|
|
"delta": {"type": "thinking_delta", "thinking": "Let me analyze this..."}
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
handle_agent_stream_event(&event, "s1", "coder-1", &tx, &event_log, None);
|
|
|
|
|
|
|
|
|
|
let received = rx.try_recv().unwrap();
|
|
|
|
|
match received {
|
|
|
|
|
AgentEvent::Thinking {
|
|
|
|
|
story_id,
|
|
|
|
|
agent_name,
|
|
|
|
|
text,
|
|
|
|
|
} => {
|
|
|
|
|
assert_eq!(story_id, "s1");
|
|
|
|
|
assert_eq!(agent_name, "coder-1");
|
|
|
|
|
assert_eq!(text, "Let me analyze this...");
|
|
|
|
|
}
|
|
|
|
|
other => panic!("Expected Thinking event, got: {other:?}"),
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
fn stream_event_text_delta_emits_output_event() {
|
|
|
|
|
let (tx, mut rx) = broadcast::channel::<AgentEvent>(64);
|
|
|
|
|
let event_log: Mutex<Vec<AgentEvent>> = Mutex::new(Vec::new());
|
|
|
|
|
|
|
|
|
|
let event = serde_json::json!({
|
|
|
|
|
"type": "content_block_delta",
|
|
|
|
|
"delta": {"type": "text_delta", "text": "Here is the result."}
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
handle_agent_stream_event(&event, "s1", "coder-1", &tx, &event_log, None);
|
|
|
|
|
|
|
|
|
|
let received = rx.try_recv().unwrap();
|
|
|
|
|
match received {
|
|
|
|
|
AgentEvent::Output {
|
|
|
|
|
story_id,
|
|
|
|
|
agent_name,
|
|
|
|
|
text,
|
|
|
|
|
} => {
|
|
|
|
|
assert_eq!(story_id, "s1");
|
|
|
|
|
assert_eq!(agent_name, "coder-1");
|
|
|
|
|
assert_eq!(text, "Here is the result.");
|
|
|
|
|
}
|
|
|
|
|
other => panic!("Expected Output event, got: {other:?}"),
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
fn stream_event_input_json_delta_ignored() {
|
|
|
|
|
let (tx, mut rx) = broadcast::channel::<AgentEvent>(64);
|
|
|
|
|
let event_log: Mutex<Vec<AgentEvent>> = Mutex::new(Vec::new());
|
|
|
|
|
|
|
|
|
|
let event = serde_json::json!({
|
|
|
|
|
"type": "content_block_delta",
|
|
|
|
|
"delta": {"type": "input_json_delta", "partial_json": "{\"file\":"}
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
handle_agent_stream_event(&event, "s1", "coder-1", &tx, &event_log, None);
|
|
|
|
|
|
|
|
|
|
// No event should be emitted for tool argument deltas
|
|
|
|
|
assert!(rx.try_recv().is_err());
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
fn stream_event_non_delta_type_ignored() {
|
|
|
|
|
let (tx, mut rx) = broadcast::channel::<AgentEvent>(64);
|
|
|
|
|
let event_log: Mutex<Vec<AgentEvent>> = Mutex::new(Vec::new());
|
|
|
|
|
|
|
|
|
|
let event = serde_json::json!({
|
|
|
|
|
"type": "message_start",
|
|
|
|
|
"message": {"role": "assistant"}
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
handle_agent_stream_event(&event, "s1", "coder-1", &tx, &event_log, None);
|
|
|
|
|
|
|
|
|
|
assert!(rx.try_recv().is_err());
|
|
|
|
|
}
|
|
|
|
|
|
2026-02-23 22:53:23 +00:00
|
|
|
// ── bug 118: pending entry cleanup on start_agent failure ────────────────
|
|
|
|
|
|
2026-02-24 16:50:56 +00:00
|
|
|
/// Regression test for bug 118: when worktree creation fails (e.g. because
|
|
|
|
|
/// there is no git repo), the Pending entry that was inserted into the agent
|
|
|
|
|
/// HashMap must not remain Pending — it must transition to Failed. This
|
|
|
|
|
/// prevents `find_free_agent_for_stage` / auto-assign from being permanently
|
|
|
|
|
/// blocked.
|
|
|
|
|
///
|
|
|
|
|
/// With story 157 the worktree creation moved into the background spawn, so
|
|
|
|
|
/// `start_agent` returns `Ok(Pending)` immediately. We use `wait_for_agent`
|
|
|
|
|
/// to block until the background task resolves.
|
2026-02-23 22:53:23 +00:00
|
|
|
#[tokio::test]
|
|
|
|
|
async fn start_agent_cleans_up_pending_entry_on_failure() {
|
|
|
|
|
use std::fs;
|
|
|
|
|
|
|
|
|
|
let tmp = tempfile::tempdir().unwrap();
|
|
|
|
|
let root = tmp.path();
|
|
|
|
|
|
|
|
|
|
// Minimal project.toml with a "qa" agent.
|
|
|
|
|
let sk_dir = root.join(".story_kit");
|
|
|
|
|
fs::create_dir_all(&sk_dir).unwrap();
|
|
|
|
|
fs::write(
|
|
|
|
|
sk_dir.join("project.toml"),
|
|
|
|
|
"[[agent]]\nname = \"qa\"\n",
|
|
|
|
|
)
|
|
|
|
|
.unwrap();
|
|
|
|
|
|
|
|
|
|
// Create the story in upcoming so `move_story_to_current` succeeds,
|
2026-02-24 16:50:56 +00:00
|
|
|
// but do NOT init a git repo — `create_worktree` will fail in the spawn.
|
2026-02-23 22:53:23 +00:00
|
|
|
let upcoming = root.join(".story_kit/work/1_upcoming");
|
|
|
|
|
fs::create_dir_all(&upcoming).unwrap();
|
|
|
|
|
fs::write(
|
|
|
|
|
upcoming.join("50_story_test.md"),
|
|
|
|
|
"---\nname: Test\n---\n",
|
|
|
|
|
)
|
|
|
|
|
.unwrap();
|
|
|
|
|
|
2026-02-24 23:09:13 +00:00
|
|
|
let pool = AgentPool::new_test(3099);
|
2026-02-23 22:53:23 +00:00
|
|
|
|
|
|
|
|
let result = pool
|
|
|
|
|
.start_agent(root, "50_story_test", Some("qa"), None)
|
|
|
|
|
.await;
|
|
|
|
|
|
2026-02-24 16:50:56 +00:00
|
|
|
// With the non-blocking flow, start_agent returns Ok(Pending) immediately.
|
|
|
|
|
// Worktree creation failure happens asynchronously in the background.
|
|
|
|
|
assert!(
|
|
|
|
|
result.is_ok(),
|
|
|
|
|
"start_agent should return Ok(Pending) immediately: {:?}",
|
|
|
|
|
result.err()
|
|
|
|
|
);
|
|
|
|
|
assert_eq!(
|
|
|
|
|
result.unwrap().status,
|
|
|
|
|
AgentStatus::Pending,
|
|
|
|
|
"initial status must be Pending"
|
|
|
|
|
);
|
|
|
|
|
|
|
|
|
|
// Wait for the background task to reach a terminal state.
|
|
|
|
|
// It must fail (no git repo → create_worktree returns an error).
|
|
|
|
|
let final_info = pool
|
|
|
|
|
.wait_for_agent("50_story_test", "qa", 5000)
|
|
|
|
|
.await
|
|
|
|
|
.expect("wait_for_agent should not time out");
|
|
|
|
|
assert_eq!(
|
|
|
|
|
final_info.status,
|
|
|
|
|
AgentStatus::Failed,
|
|
|
|
|
"agent must transition to Failed after worktree creation error"
|
|
|
|
|
);
|
2026-02-23 22:53:23 +00:00
|
|
|
|
2026-02-24 16:50:56 +00:00
|
|
|
// The pool must NOT retain a Pending or Running entry for this agent.
|
2026-02-23 22:53:23 +00:00
|
|
|
let agents = pool.agents.lock().unwrap();
|
|
|
|
|
let leaked = agents.values().any(|a| {
|
|
|
|
|
a.agent_name == "qa"
|
|
|
|
|
&& matches!(a.status, AgentStatus::Pending | AgentStatus::Running)
|
|
|
|
|
});
|
|
|
|
|
assert!(
|
|
|
|
|
!leaked,
|
2026-02-24 16:50:56 +00:00
|
|
|
"agent pool must not retain a Pending/Running entry after worktree creation fails"
|
2026-02-23 22:53:23 +00:00
|
|
|
);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Verify that a successful start_agent keeps the Running entry (guard is
|
|
|
|
|
/// disarmed). We cannot truly spawn an agent in tests, but we verify that
|
|
|
|
|
/// the concurrency check still blocks a second concurrent start — which
|
|
|
|
|
/// proves the first entry survived the guard.
|
|
|
|
|
#[tokio::test]
|
|
|
|
|
async fn start_agent_guard_does_not_remove_running_entry() {
|
|
|
|
|
use std::fs;
|
|
|
|
|
|
|
|
|
|
let tmp = tempfile::tempdir().unwrap();
|
|
|
|
|
let root = tmp.path();
|
|
|
|
|
|
|
|
|
|
let sk_dir = root.join(".story_kit");
|
|
|
|
|
fs::create_dir_all(&sk_dir).unwrap();
|
|
|
|
|
fs::write(
|
|
|
|
|
sk_dir.join("project.toml"),
|
|
|
|
|
"[[agent]]\nname = \"qa\"\n",
|
|
|
|
|
)
|
|
|
|
|
.unwrap();
|
|
|
|
|
|
2026-02-24 23:09:13 +00:00
|
|
|
let pool = AgentPool::new_test(3099);
|
2026-02-23 22:53:23 +00:00
|
|
|
|
|
|
|
|
// Manually inject a Running agent (simulates successful start).
|
|
|
|
|
pool.inject_test_agent("story-x", "qa", AgentStatus::Running);
|
|
|
|
|
|
|
|
|
|
// Attempting to start the same agent on a different story must be
|
|
|
|
|
// rejected — the Running entry must still be there.
|
|
|
|
|
let result = pool
|
|
|
|
|
.start_agent(root, "story-y", Some("qa"), None)
|
|
|
|
|
.await;
|
|
|
|
|
|
|
|
|
|
assert!(result.is_err());
|
|
|
|
|
let err = result.unwrap_err();
|
|
|
|
|
assert!(
|
|
|
|
|
err.contains("already running") || err.contains("becomes available"),
|
|
|
|
|
"running entry must survive: got '{err}'"
|
|
|
|
|
);
|
|
|
|
|
}
|
2026-02-23 23:22:24 +00:00
|
|
|
|
2026-02-24 12:49:29 +00:00
|
|
|
// ── TOCTOU race-condition regression tests (story 132) ───────────────────
|
|
|
|
|
|
|
|
|
|
/// Verify that a Pending entry (not just Running) blocks a concurrent
|
|
|
|
|
/// start_agent for the same agent name on a different story. This proves
|
|
|
|
|
/// the check-and-insert is atomic: the Pending entry is visible to the
|
|
|
|
|
/// second caller because it was inserted while the lock was still held.
|
|
|
|
|
#[tokio::test]
|
|
|
|
|
async fn toctou_pending_entry_blocks_same_agent_on_different_story() {
|
|
|
|
|
use std::fs;
|
|
|
|
|
|
|
|
|
|
let tmp = tempfile::tempdir().unwrap();
|
|
|
|
|
let root = tmp.path();
|
|
|
|
|
|
|
|
|
|
let sk_dir = root.join(".story_kit");
|
|
|
|
|
fs::create_dir_all(&sk_dir).unwrap();
|
|
|
|
|
fs::write(sk_dir.join("project.toml"), "[[agent]]\nname = \"coder-1\"\n").unwrap();
|
|
|
|
|
|
2026-02-24 23:09:13 +00:00
|
|
|
let pool = AgentPool::new_test(3099);
|
2026-02-24 12:49:29 +00:00
|
|
|
|
|
|
|
|
// Simulate what the winning concurrent call would have done: insert a
|
|
|
|
|
// Pending entry for coder-1 on story-86.
|
|
|
|
|
pool.inject_test_agent("86_story_foo", "coder-1", AgentStatus::Pending);
|
|
|
|
|
|
|
|
|
|
// Now attempt to start coder-1 on a *different* story — must be rejected.
|
|
|
|
|
let result = pool
|
|
|
|
|
.start_agent(root, "130_story_bar", Some("coder-1"), None)
|
|
|
|
|
.await;
|
|
|
|
|
|
|
|
|
|
assert!(result.is_err(), "second start_agent must be rejected");
|
|
|
|
|
let err = result.unwrap_err();
|
|
|
|
|
assert!(
|
|
|
|
|
err.contains("already running") || err.contains("becomes available"),
|
|
|
|
|
"expected concurrency-rejection message, got: '{err}'"
|
|
|
|
|
);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Concurrent start_agent calls for the same agent name on different stories
|
|
|
|
|
/// must result in exactly one rejection due to the concurrency check (not
|
|
|
|
|
/// due to an unrelated failure such as missing git repo).
|
|
|
|
|
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
|
|
|
|
async fn toctou_concurrent_start_agent_same_agent_exactly_one_concurrency_rejection() {
|
|
|
|
|
use std::fs;
|
|
|
|
|
use std::sync::Arc;
|
|
|
|
|
|
|
|
|
|
let tmp = tempfile::tempdir().unwrap();
|
|
|
|
|
let root = tmp.path().to_path_buf();
|
|
|
|
|
|
|
|
|
|
let sk_dir = root.join(".story_kit");
|
|
|
|
|
fs::create_dir_all(sk_dir.join("work/1_upcoming")).unwrap();
|
|
|
|
|
fs::write(
|
|
|
|
|
root.join(".story_kit/project.toml"),
|
|
|
|
|
"[[agent]]\nname = \"coder-1\"\n",
|
|
|
|
|
)
|
|
|
|
|
.unwrap();
|
|
|
|
|
// Both stories must exist in upcoming so move_story_to_current can run
|
|
|
|
|
// (only the winner reaches that point, but we set both up defensively).
|
|
|
|
|
fs::write(
|
|
|
|
|
root.join(".story_kit/work/1_upcoming/86_story_foo.md"),
|
|
|
|
|
"---\nname: Foo\n---\n",
|
|
|
|
|
)
|
|
|
|
|
.unwrap();
|
|
|
|
|
fs::write(
|
|
|
|
|
root.join(".story_kit/work/1_upcoming/130_story_bar.md"),
|
|
|
|
|
"---\nname: Bar\n---\n",
|
|
|
|
|
)
|
|
|
|
|
.unwrap();
|
|
|
|
|
|
2026-02-24 23:09:13 +00:00
|
|
|
let pool = Arc::new(AgentPool::new_test(3099));
|
2026-02-24 12:49:29 +00:00
|
|
|
|
|
|
|
|
let pool1 = pool.clone();
|
|
|
|
|
let root1 = root.clone();
|
|
|
|
|
let t1 = tokio::spawn(async move {
|
|
|
|
|
pool1
|
|
|
|
|
.start_agent(&root1, "86_story_foo", Some("coder-1"), None)
|
|
|
|
|
.await
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
let pool2 = pool.clone();
|
|
|
|
|
let root2 = root.clone();
|
|
|
|
|
let t2 = tokio::spawn(async move {
|
|
|
|
|
pool2
|
|
|
|
|
.start_agent(&root2, "130_story_bar", Some("coder-1"), None)
|
|
|
|
|
.await
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
let (r1, r2) = tokio::join!(t1, t2);
|
|
|
|
|
let r1 = r1.unwrap();
|
|
|
|
|
let r2 = r2.unwrap();
|
|
|
|
|
|
|
|
|
|
// The concurrency-rejection message always contains "already running" /
|
|
|
|
|
// "becomes available". Any other error (e.g., missing git repo) means
|
|
|
|
|
// that call *won* the atomic check-and-insert.
|
|
|
|
|
let concurrency_rejections = [&r1, &r2]
|
|
|
|
|
.iter()
|
|
|
|
|
.filter(|r| {
|
|
|
|
|
r.as_ref().is_err_and(|e| {
|
|
|
|
|
e.contains("already running") || e.contains("becomes available")
|
|
|
|
|
})
|
|
|
|
|
})
|
|
|
|
|
.count();
|
|
|
|
|
|
|
|
|
|
assert_eq!(
|
|
|
|
|
concurrency_rejections, 1,
|
|
|
|
|
"exactly one call must be rejected by the concurrency check; \
|
|
|
|
|
got r1={r1:?} r2={r2:?}"
|
|
|
|
|
);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Two concurrent auto_assign_available_work calls must not assign the same
|
|
|
|
|
/// agent to two stories simultaneously. After both complete, at most one
|
|
|
|
|
/// Pending/Running entry must exist per agent name.
|
|
|
|
|
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
|
|
|
|
async fn toctou_concurrent_auto_assign_no_duplicate_agent_assignments() {
|
|
|
|
|
use std::fs;
|
|
|
|
|
use std::sync::Arc;
|
|
|
|
|
|
|
|
|
|
let tmp = tempfile::tempdir().unwrap();
|
|
|
|
|
let root = tmp.path().to_path_buf();
|
|
|
|
|
|
|
|
|
|
let sk_dir = root.join(".story_kit");
|
|
|
|
|
// Two stories waiting in 2_current, one coder agent.
|
|
|
|
|
fs::create_dir_all(sk_dir.join("work/2_current")).unwrap();
|
|
|
|
|
fs::write(
|
|
|
|
|
sk_dir.join("project.toml"),
|
|
|
|
|
"[[agent]]\nname = \"coder-1\"\n",
|
|
|
|
|
)
|
|
|
|
|
.unwrap();
|
|
|
|
|
fs::write(
|
|
|
|
|
sk_dir.join("work/2_current/86_story_foo.md"),
|
|
|
|
|
"---\nname: Foo\n---\n",
|
|
|
|
|
)
|
|
|
|
|
.unwrap();
|
|
|
|
|
fs::write(
|
|
|
|
|
sk_dir.join("work/2_current/130_story_bar.md"),
|
|
|
|
|
"---\nname: Bar\n---\n",
|
|
|
|
|
)
|
|
|
|
|
.unwrap();
|
|
|
|
|
|
2026-02-24 23:09:13 +00:00
|
|
|
let pool = Arc::new(AgentPool::new_test(3099));
|
2026-02-24 12:49:29 +00:00
|
|
|
|
|
|
|
|
// Run two concurrent auto_assign calls.
|
|
|
|
|
let pool1 = pool.clone();
|
|
|
|
|
let root1 = root.clone();
|
|
|
|
|
let t1 = tokio::spawn(async move { pool1.auto_assign_available_work(&root1).await });
|
|
|
|
|
|
|
|
|
|
let pool2 = pool.clone();
|
|
|
|
|
let root2 = root.clone();
|
|
|
|
|
let t2 = tokio::spawn(async move { pool2.auto_assign_available_work(&root2).await });
|
|
|
|
|
|
|
|
|
|
let _ = tokio::join!(t1, t2);
|
|
|
|
|
|
|
|
|
|
// At most one Pending/Running entry should exist for coder-1.
|
|
|
|
|
let agents = pool.agents.lock().unwrap();
|
|
|
|
|
let active_coder_count = agents
|
|
|
|
|
.values()
|
|
|
|
|
.filter(|a| {
|
|
|
|
|
a.agent_name == "coder-1"
|
|
|
|
|
&& matches!(a.status, AgentStatus::Pending | AgentStatus::Running)
|
|
|
|
|
})
|
|
|
|
|
.count();
|
|
|
|
|
|
|
|
|
|
assert!(
|
|
|
|
|
active_coder_count <= 1,
|
|
|
|
|
"coder-1 must not be assigned to more than one story simultaneously; \
|
|
|
|
|
found {active_coder_count} active entries"
|
|
|
|
|
);
|
|
|
|
|
}
|
|
|
|
|
|
2026-02-23 23:22:24 +00:00
|
|
|
// ── resolve_simple_conflicts unit tests ──────────────────────────────────
|
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
fn resolve_simple_conflicts_no_markers() {
|
|
|
|
|
let input = "line 1\nline 2\nline 3\n";
|
|
|
|
|
let result = resolve_simple_conflicts(input);
|
|
|
|
|
assert_eq!(result, Some(input.to_string()));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
fn resolve_simple_conflicts_additive() {
|
|
|
|
|
let input = "\
|
|
|
|
|
before
|
|
|
|
|
ours line 1
|
|
|
|
|
ours line 2
|
|
|
|
|
theirs line 1
|
|
|
|
|
theirs line 2
|
|
|
|
|
after
|
|
|
|
|
";
|
|
|
|
|
let result = resolve_simple_conflicts(input).unwrap();
|
|
|
|
|
assert!(
|
|
|
|
|
!result.contains("<<<<<<<"),
|
|
|
|
|
"should not contain conflict markers"
|
|
|
|
|
);
|
|
|
|
|
assert!(
|
|
|
|
|
!result.contains(">>>>>>>"),
|
|
|
|
|
"should not contain conflict markers"
|
|
|
|
|
);
|
|
|
|
|
assert!(result.contains("ours line 1"));
|
|
|
|
|
assert!(result.contains("ours line 2"));
|
|
|
|
|
assert!(result.contains("theirs line 1"));
|
|
|
|
|
assert!(result.contains("theirs line 2"));
|
|
|
|
|
assert!(result.contains("before"));
|
|
|
|
|
assert!(result.contains("after"));
|
|
|
|
|
// Ours comes before theirs
|
|
|
|
|
let ours_pos = result.find("ours line 1").unwrap();
|
|
|
|
|
let theirs_pos = result.find("theirs line 1").unwrap();
|
|
|
|
|
assert!(
|
|
|
|
|
ours_pos < theirs_pos,
|
|
|
|
|
"ours should come before theirs"
|
|
|
|
|
);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
fn resolve_simple_conflicts_multiple_blocks() {
|
|
|
|
|
let input = "\
|
|
|
|
|
header
|
|
|
|
|
ours block 1
|
|
|
|
|
theirs block 1
|
|
|
|
|
middle
|
|
|
|
|
ours block 2
|
|
|
|
|
theirs block 2
|
|
|
|
|
footer
|
|
|
|
|
";
|
|
|
|
|
let result = resolve_simple_conflicts(input).unwrap();
|
|
|
|
|
assert!(!result.contains("<<<<<<<"));
|
|
|
|
|
assert!(result.contains("ours block 1"));
|
|
|
|
|
assert!(result.contains("theirs block 1"));
|
|
|
|
|
assert!(result.contains("ours block 2"));
|
|
|
|
|
assert!(result.contains("theirs block 2"));
|
|
|
|
|
assert!(result.contains("header"));
|
|
|
|
|
assert!(result.contains("middle"));
|
|
|
|
|
assert!(result.contains("footer"));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
fn resolve_simple_conflicts_malformed_no_separator() {
|
|
|
|
|
let input = "\
|
2026-02-24 13:24:39 +00:00
|
|
|
<<<<<<< HEAD
|
2026-02-23 23:22:24 +00:00
|
|
|
ours
|
|
|
|
|
>>>>>>> feature
|
|
|
|
|
";
|
|
|
|
|
let result = resolve_simple_conflicts(input);
|
|
|
|
|
assert!(result.is_none(), "malformed conflict (no separator) should return None");
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
fn resolve_simple_conflicts_malformed_no_end() {
|
|
|
|
|
let input = "\
|
2026-02-24 13:24:39 +00:00
|
|
|
<<<<<<< HEAD
|
2026-02-23 23:22:24 +00:00
|
|
|
ours
|
2026-02-24 13:24:39 +00:00
|
|
|
=======
|
2026-02-23 23:22:24 +00:00
|
|
|
theirs
|
|
|
|
|
";
|
|
|
|
|
let result = resolve_simple_conflicts(input);
|
|
|
|
|
assert!(result.is_none(), "malformed conflict (no end marker) should return None");
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
fn resolve_simple_conflicts_preserves_no_trailing_newline() {
|
|
|
|
|
let input = "before\n<<<<<<< HEAD\nours\n=======\ntheirs\n>>>>>>> branch\nafter";
|
|
|
|
|
let result = resolve_simple_conflicts(input).unwrap();
|
|
|
|
|
assert!(!result.ends_with('\n'), "should not add trailing newline if original lacks one");
|
|
|
|
|
assert!(result.ends_with("after"));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// ── merge-queue squash-merge integration tests ──────────────────────────
|
|
|
|
|
|
|
|
|
|
#[tokio::test]
|
|
|
|
|
async fn squash_merge_uses_merge_queue_no_conflict_markers_on_master() {
|
|
|
|
|
use std::fs;
|
|
|
|
|
use tempfile::tempdir;
|
|
|
|
|
|
|
|
|
|
let tmp = tempdir().unwrap();
|
|
|
|
|
let repo = tmp.path();
|
|
|
|
|
init_git_repo(repo);
|
|
|
|
|
|
|
|
|
|
// Create a file that will be conflicted on master.
|
|
|
|
|
fs::write(repo.join("shared.txt"), "line 1\nline 2\n").unwrap();
|
|
|
|
|
Command::new("git")
|
|
|
|
|
.args(["add", "."])
|
|
|
|
|
.current_dir(repo)
|
|
|
|
|
.output()
|
|
|
|
|
.unwrap();
|
|
|
|
|
Command::new("git")
|
|
|
|
|
.args(["commit", "-m", "initial shared file"])
|
|
|
|
|
.current_dir(repo)
|
|
|
|
|
.output()
|
|
|
|
|
.unwrap();
|
|
|
|
|
|
|
|
|
|
// Create a feature branch that modifies the file.
|
|
|
|
|
Command::new("git")
|
|
|
|
|
.args(["checkout", "-b", "feature/story-conflict_test"])
|
|
|
|
|
.current_dir(repo)
|
|
|
|
|
.output()
|
|
|
|
|
.unwrap();
|
|
|
|
|
fs::write(repo.join("shared.txt"), "line 1\nline 2\nfeature addition\n").unwrap();
|
|
|
|
|
Command::new("git")
|
|
|
|
|
.args(["add", "."])
|
|
|
|
|
.current_dir(repo)
|
|
|
|
|
.output()
|
|
|
|
|
.unwrap();
|
|
|
|
|
Command::new("git")
|
|
|
|
|
.args(["commit", "-m", "feature: add line"])
|
|
|
|
|
.current_dir(repo)
|
|
|
|
|
.output()
|
|
|
|
|
.unwrap();
|
|
|
|
|
|
|
|
|
|
// Switch to master and make a conflicting change.
|
|
|
|
|
Command::new("git")
|
|
|
|
|
.args(["checkout", "master"])
|
|
|
|
|
.current_dir(repo)
|
|
|
|
|
.output()
|
|
|
|
|
.unwrap();
|
|
|
|
|
fs::write(repo.join("shared.txt"), "line 1\nline 2\nmaster addition\n").unwrap();
|
|
|
|
|
Command::new("git")
|
|
|
|
|
.args(["add", "."])
|
|
|
|
|
.current_dir(repo)
|
|
|
|
|
.output()
|
|
|
|
|
.unwrap();
|
|
|
|
|
Command::new("git")
|
|
|
|
|
.args(["commit", "-m", "master: add line"])
|
|
|
|
|
.current_dir(repo)
|
|
|
|
|
.output()
|
|
|
|
|
.unwrap();
|
|
|
|
|
|
|
|
|
|
// Run the squash merge.
|
|
|
|
|
let result = run_squash_merge(repo, "feature/story-conflict_test", "conflict_test")
|
|
|
|
|
.unwrap();
|
|
|
|
|
|
|
|
|
|
// Master should NEVER contain conflict markers, regardless of outcome.
|
|
|
|
|
let master_content = fs::read_to_string(repo.join("shared.txt")).unwrap();
|
|
|
|
|
assert!(
|
|
|
|
|
!master_content.contains("<<<<<<<"),
|
|
|
|
|
"master must never contain conflict markers, got:\n{master_content}"
|
|
|
|
|
);
|
|
|
|
|
assert!(
|
|
|
|
|
!master_content.contains(">>>>>>>"),
|
|
|
|
|
"master must never contain conflict markers, got:\n{master_content}"
|
|
|
|
|
);
|
|
|
|
|
|
|
|
|
|
// The merge should have had conflicts.
|
|
|
|
|
assert!(result.had_conflicts, "should detect conflicts");
|
|
|
|
|
|
|
|
|
|
// Conflicts should have been auto-resolved (both are simple additions).
|
|
|
|
|
if result.conflicts_resolved {
|
|
|
|
|
assert!(result.success, "auto-resolved merge should succeed");
|
|
|
|
|
assert!(
|
|
|
|
|
master_content.contains("master addition"),
|
|
|
|
|
"master side should be present"
|
|
|
|
|
);
|
|
|
|
|
assert!(
|
|
|
|
|
master_content.contains("feature addition"),
|
|
|
|
|
"feature side should be present"
|
|
|
|
|
);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Verify no leftover merge-queue branch.
|
|
|
|
|
let branches = Command::new("git")
|
|
|
|
|
.args(["branch", "--list", "merge-queue/*"])
|
|
|
|
|
.current_dir(repo)
|
|
|
|
|
.output()
|
|
|
|
|
.unwrap();
|
|
|
|
|
let branch_list = String::from_utf8_lossy(&branches.stdout);
|
|
|
|
|
assert!(
|
|
|
|
|
branch_list.trim().is_empty(),
|
|
|
|
|
"merge-queue branch should be cleaned up, got: {branch_list}"
|
|
|
|
|
);
|
|
|
|
|
|
|
|
|
|
// Verify no leftover merge workspace directory.
|
|
|
|
|
assert!(
|
|
|
|
|
!repo.join(".story_kit/merge_workspace").exists(),
|
|
|
|
|
"merge workspace should be cleaned up"
|
|
|
|
|
);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[tokio::test]
|
|
|
|
|
async fn squash_merge_clean_merge_succeeds() {
|
|
|
|
|
use std::fs;
|
|
|
|
|
use tempfile::tempdir;
|
|
|
|
|
|
|
|
|
|
let tmp = tempdir().unwrap();
|
|
|
|
|
let repo = tmp.path();
|
|
|
|
|
init_git_repo(repo);
|
|
|
|
|
|
|
|
|
|
// Create feature branch with a new file.
|
|
|
|
|
Command::new("git")
|
|
|
|
|
.args(["checkout", "-b", "feature/story-clean_test"])
|
|
|
|
|
.current_dir(repo)
|
|
|
|
|
.output()
|
|
|
|
|
.unwrap();
|
|
|
|
|
fs::write(repo.join("new_file.txt"), "new content").unwrap();
|
|
|
|
|
Command::new("git")
|
|
|
|
|
.args(["add", "."])
|
|
|
|
|
.current_dir(repo)
|
|
|
|
|
.output()
|
|
|
|
|
.unwrap();
|
|
|
|
|
Command::new("git")
|
|
|
|
|
.args(["commit", "-m", "add new file"])
|
|
|
|
|
.current_dir(repo)
|
|
|
|
|
.output()
|
|
|
|
|
.unwrap();
|
|
|
|
|
|
|
|
|
|
// Switch back to master.
|
|
|
|
|
Command::new("git")
|
|
|
|
|
.args(["checkout", "master"])
|
|
|
|
|
.current_dir(repo)
|
|
|
|
|
.output()
|
|
|
|
|
.unwrap();
|
|
|
|
|
|
|
|
|
|
let result = run_squash_merge(repo, "feature/story-clean_test", "clean_test")
|
|
|
|
|
.unwrap();
|
|
|
|
|
|
|
|
|
|
assert!(result.success, "clean merge should succeed");
|
|
|
|
|
assert!(!result.had_conflicts, "clean merge should have no conflicts");
|
|
|
|
|
assert!(!result.conflicts_resolved, "no conflicts means nothing to resolve");
|
|
|
|
|
assert!(
|
|
|
|
|
repo.join("new_file.txt").exists(),
|
|
|
|
|
"merged file should exist on master"
|
|
|
|
|
);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[tokio::test]
|
|
|
|
|
async fn squash_merge_nonexistent_branch_fails() {
|
|
|
|
|
use tempfile::tempdir;
|
|
|
|
|
|
|
|
|
|
let tmp = tempdir().unwrap();
|
|
|
|
|
let repo = tmp.path();
|
|
|
|
|
init_git_repo(repo);
|
|
|
|
|
|
|
|
|
|
let result = run_squash_merge(repo, "feature/story-nope", "nope")
|
|
|
|
|
.unwrap();
|
|
|
|
|
|
|
|
|
|
assert!(!result.success, "merge of nonexistent branch should fail");
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[tokio::test]
|
|
|
|
|
async fn merge_agent_work_conflict_does_not_break_master() {
|
|
|
|
|
use std::fs;
|
|
|
|
|
use tempfile::tempdir;
|
|
|
|
|
|
|
|
|
|
let tmp = tempdir().unwrap();
|
|
|
|
|
let repo = tmp.path();
|
|
|
|
|
init_git_repo(repo);
|
|
|
|
|
|
|
|
|
|
// Create a file on master.
|
|
|
|
|
fs::write(repo.join("code.rs"), "fn main() {\n println!(\"hello\");\n}\n").unwrap();
|
|
|
|
|
Command::new("git")
|
|
|
|
|
.args(["add", "."])
|
|
|
|
|
.current_dir(repo)
|
|
|
|
|
.output()
|
|
|
|
|
.unwrap();
|
|
|
|
|
Command::new("git")
|
|
|
|
|
.args(["commit", "-m", "initial code"])
|
|
|
|
|
.current_dir(repo)
|
|
|
|
|
.output()
|
|
|
|
|
.unwrap();
|
|
|
|
|
|
|
|
|
|
// Feature branch: modify the same line differently.
|
|
|
|
|
Command::new("git")
|
|
|
|
|
.args(["checkout", "-b", "feature/story-42_story_foo"])
|
|
|
|
|
.current_dir(repo)
|
|
|
|
|
.output()
|
|
|
|
|
.unwrap();
|
|
|
|
|
fs::write(repo.join("code.rs"), "fn main() {\n println!(\"hello\");\n feature_fn();\n}\n").unwrap();
|
|
|
|
|
Command::new("git")
|
|
|
|
|
.args(["add", "."])
|
|
|
|
|
.current_dir(repo)
|
|
|
|
|
.output()
|
|
|
|
|
.unwrap();
|
|
|
|
|
Command::new("git")
|
|
|
|
|
.args(["commit", "-m", "feature: add fn call"])
|
|
|
|
|
.current_dir(repo)
|
|
|
|
|
.output()
|
|
|
|
|
.unwrap();
|
|
|
|
|
|
|
|
|
|
// Master: add different line at same location.
|
|
|
|
|
Command::new("git")
|
|
|
|
|
.args(["checkout", "master"])
|
|
|
|
|
.current_dir(repo)
|
|
|
|
|
.output()
|
|
|
|
|
.unwrap();
|
|
|
|
|
fs::write(repo.join("code.rs"), "fn main() {\n println!(\"hello\");\n master_fn();\n}\n").unwrap();
|
|
|
|
|
Command::new("git")
|
|
|
|
|
.args(["add", "."])
|
|
|
|
|
.current_dir(repo)
|
|
|
|
|
.output()
|
|
|
|
|
.unwrap();
|
|
|
|
|
Command::new("git")
|
|
|
|
|
.args(["commit", "-m", "master: add fn call"])
|
|
|
|
|
.current_dir(repo)
|
|
|
|
|
.output()
|
|
|
|
|
.unwrap();
|
|
|
|
|
|
|
|
|
|
// Create story file in 4_merge.
|
|
|
|
|
let merge_dir = repo.join(".story_kit/work/4_merge");
|
|
|
|
|
fs::create_dir_all(&merge_dir).unwrap();
|
|
|
|
|
fs::write(merge_dir.join("42_story_foo.md"), "---\nname: Test\n---\n").unwrap();
|
|
|
|
|
Command::new("git")
|
|
|
|
|
.args(["add", "."])
|
|
|
|
|
.current_dir(repo)
|
|
|
|
|
.output()
|
|
|
|
|
.unwrap();
|
|
|
|
|
Command::new("git")
|
|
|
|
|
.args(["commit", "-m", "add story"])
|
|
|
|
|
.current_dir(repo)
|
|
|
|
|
.output()
|
|
|
|
|
.unwrap();
|
|
|
|
|
|
2026-02-24 23:09:13 +00:00
|
|
|
let pool = AgentPool::new_test(3001);
|
2026-02-23 23:22:24 +00:00
|
|
|
let report = pool.merge_agent_work(repo, "42_story_foo").await.unwrap();
|
|
|
|
|
|
|
|
|
|
// Master should NEVER have conflict markers, regardless of merge outcome.
|
|
|
|
|
let master_code = fs::read_to_string(repo.join("code.rs")).unwrap();
|
|
|
|
|
assert!(
|
|
|
|
|
!master_code.contains("<<<<<<<"),
|
|
|
|
|
"master must never contain conflict markers:\n{master_code}"
|
|
|
|
|
);
|
|
|
|
|
assert!(
|
|
|
|
|
!master_code.contains(">>>>>>>"),
|
|
|
|
|
"master must never contain conflict markers:\n{master_code}"
|
|
|
|
|
);
|
|
|
|
|
|
|
|
|
|
// The report should accurately reflect what happened.
|
|
|
|
|
assert!(report.had_conflicts, "should report conflicts");
|
|
|
|
|
}
|
2026-02-24 13:13:16 +00:00
|
|
|
|
2026-02-26 14:16:35 +00:00
|
|
|
/// Verifies that `run_squash_merge` succeeds even when master has advanced
|
|
|
|
|
/// with unrelated commits after the merge-queue branch was created (the race
|
|
|
|
|
/// condition that previously caused fast-forward to fail).
|
|
|
|
|
#[tokio::test]
|
|
|
|
|
async fn squash_merge_succeeds_when_master_diverges() {
|
|
|
|
|
use std::fs;
|
|
|
|
|
use tempfile::tempdir;
|
|
|
|
|
|
|
|
|
|
let tmp = tempdir().unwrap();
|
|
|
|
|
let repo = tmp.path();
|
|
|
|
|
init_git_repo(repo);
|
|
|
|
|
|
|
|
|
|
// Create an initial file on master.
|
|
|
|
|
fs::write(repo.join("base.txt"), "base content\n").unwrap();
|
|
|
|
|
Command::new("git")
|
|
|
|
|
.args(["add", "."])
|
|
|
|
|
.current_dir(repo)
|
|
|
|
|
.output()
|
|
|
|
|
.unwrap();
|
|
|
|
|
Command::new("git")
|
|
|
|
|
.args(["commit", "-m", "initial"])
|
|
|
|
|
.current_dir(repo)
|
|
|
|
|
.output()
|
|
|
|
|
.unwrap();
|
|
|
|
|
|
|
|
|
|
// Create a feature branch with a new file (clean merge, no conflicts).
|
|
|
|
|
Command::new("git")
|
|
|
|
|
.args(["checkout", "-b", "feature/story-diverge_test"])
|
|
|
|
|
.current_dir(repo)
|
|
|
|
|
.output()
|
|
|
|
|
.unwrap();
|
|
|
|
|
fs::write(repo.join("feature.txt"), "feature content\n").unwrap();
|
|
|
|
|
Command::new("git")
|
|
|
|
|
.args(["add", "."])
|
|
|
|
|
.current_dir(repo)
|
|
|
|
|
.output()
|
|
|
|
|
.unwrap();
|
|
|
|
|
Command::new("git")
|
|
|
|
|
.args(["commit", "-m", "feature: add file"])
|
|
|
|
|
.current_dir(repo)
|
|
|
|
|
.output()
|
|
|
|
|
.unwrap();
|
|
|
|
|
|
|
|
|
|
// Switch back to master and simulate a filesystem watcher commit
|
|
|
|
|
// (e.g. a pipeline file move) that advances master beyond the point
|
|
|
|
|
// where the merge-queue branch will be created.
|
|
|
|
|
Command::new("git")
|
|
|
|
|
.args(["checkout", "master"])
|
|
|
|
|
.current_dir(repo)
|
|
|
|
|
.output()
|
|
|
|
|
.unwrap();
|
|
|
|
|
let sk_dir = repo.join(".story_kit/work/4_merge");
|
|
|
|
|
fs::create_dir_all(&sk_dir).unwrap();
|
|
|
|
|
fs::write(
|
|
|
|
|
sk_dir.join("diverge_test.md"),
|
|
|
|
|
"---\nname: test\n---\n",
|
|
|
|
|
)
|
|
|
|
|
.unwrap();
|
|
|
|
|
Command::new("git")
|
|
|
|
|
.args(["add", "."])
|
|
|
|
|
.current_dir(repo)
|
|
|
|
|
.output()
|
|
|
|
|
.unwrap();
|
|
|
|
|
Command::new("git")
|
|
|
|
|
.args(["commit", "-m", "story-kit: queue diverge_test for merge"])
|
|
|
|
|
.current_dir(repo)
|
|
|
|
|
.output()
|
|
|
|
|
.unwrap();
|
|
|
|
|
|
|
|
|
|
// Run the squash merge. With the old fast-forward approach, this
|
|
|
|
|
// would fail because master diverged. With cherry-pick, it succeeds.
|
|
|
|
|
let result =
|
|
|
|
|
run_squash_merge(repo, "feature/story-diverge_test", "diverge_test").unwrap();
|
|
|
|
|
|
|
|
|
|
assert!(
|
|
|
|
|
result.success,
|
|
|
|
|
"squash merge should succeed despite diverged master: {}",
|
|
|
|
|
result.output
|
|
|
|
|
);
|
|
|
|
|
assert!(
|
|
|
|
|
!result.had_conflicts,
|
|
|
|
|
"no conflicts expected"
|
|
|
|
|
);
|
|
|
|
|
|
|
|
|
|
// Verify the feature file landed on master.
|
|
|
|
|
assert!(
|
|
|
|
|
repo.join("feature.txt").exists(),
|
|
|
|
|
"feature file should be on master after cherry-pick"
|
|
|
|
|
);
|
|
|
|
|
let feature_content = fs::read_to_string(repo.join("feature.txt")).unwrap();
|
|
|
|
|
assert_eq!(feature_content, "feature content\n");
|
|
|
|
|
|
|
|
|
|
// Verify the watcher commit's file is still present.
|
|
|
|
|
assert!(
|
|
|
|
|
sk_dir.join("diverge_test.md").exists(),
|
|
|
|
|
"watcher-committed file should still be on master"
|
|
|
|
|
);
|
|
|
|
|
|
|
|
|
|
// Verify cleanup: no merge-queue branch, no merge workspace.
|
|
|
|
|
let branches = Command::new("git")
|
|
|
|
|
.args(["branch", "--list", "merge-queue/*"])
|
|
|
|
|
.current_dir(repo)
|
|
|
|
|
.output()
|
|
|
|
|
.unwrap();
|
|
|
|
|
let branch_list = String::from_utf8_lossy(&branches.stdout);
|
|
|
|
|
assert!(
|
|
|
|
|
branch_list.trim().is_empty(),
|
|
|
|
|
"merge-queue branch should be cleaned up, got: {branch_list}"
|
|
|
|
|
);
|
|
|
|
|
assert!(
|
|
|
|
|
!repo.join(".story_kit/merge_workspace").exists(),
|
|
|
|
|
"merge workspace should be cleaned up"
|
|
|
|
|
);
|
|
|
|
|
}
|
|
|
|
|
|
2026-02-27 10:37:27 +00:00
|
|
|
/// Bug 226: Verifies that `run_squash_merge` returns `success: false` when
|
|
|
|
|
/// the feature branch has no changes beyond what's already on master (empty diff).
|
2026-02-26 14:16:35 +00:00
|
|
|
#[tokio::test]
|
2026-02-27 10:37:27 +00:00
|
|
|
async fn squash_merge_empty_diff_fails() {
|
2026-02-26 14:16:35 +00:00
|
|
|
use std::fs;
|
|
|
|
|
use tempfile::tempdir;
|
|
|
|
|
|
|
|
|
|
let tmp = tempdir().unwrap();
|
|
|
|
|
let repo = tmp.path();
|
|
|
|
|
init_git_repo(repo);
|
|
|
|
|
|
|
|
|
|
// Create a file on master.
|
|
|
|
|
fs::write(repo.join("code.txt"), "content\n").unwrap();
|
|
|
|
|
Command::new("git")
|
|
|
|
|
.args(["add", "."])
|
|
|
|
|
.current_dir(repo)
|
|
|
|
|
.output()
|
|
|
|
|
.unwrap();
|
|
|
|
|
Command::new("git")
|
|
|
|
|
.args(["commit", "-m", "add code"])
|
|
|
|
|
.current_dir(repo)
|
|
|
|
|
.output()
|
|
|
|
|
.unwrap();
|
|
|
|
|
|
|
|
|
|
// Create a feature branch with NO additional changes (empty diff).
|
|
|
|
|
Command::new("git")
|
|
|
|
|
.args(["checkout", "-b", "feature/story-empty_test"])
|
|
|
|
|
.current_dir(repo)
|
|
|
|
|
.output()
|
|
|
|
|
.unwrap();
|
|
|
|
|
Command::new("git")
|
|
|
|
|
.args(["checkout", "master"])
|
|
|
|
|
.current_dir(repo)
|
|
|
|
|
.output()
|
|
|
|
|
.unwrap();
|
|
|
|
|
|
|
|
|
|
let result =
|
|
|
|
|
run_squash_merge(repo, "feature/story-empty_test", "empty_test").unwrap();
|
|
|
|
|
|
2026-02-27 10:37:27 +00:00
|
|
|
// Bug 226: empty diff must NOT be treated as success.
|
2026-02-26 14:16:35 +00:00
|
|
|
assert!(
|
2026-02-27 10:37:27 +00:00
|
|
|
!result.success,
|
|
|
|
|
"empty diff merge must fail, not silently succeed: {}",
|
|
|
|
|
result.output
|
|
|
|
|
);
|
|
|
|
|
|
|
|
|
|
// Cleanup should still happen.
|
|
|
|
|
assert!(
|
|
|
|
|
!repo.join(".story_kit/merge_workspace").exists(),
|
|
|
|
|
"merge workspace should be cleaned up"
|
|
|
|
|
);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Bug 226: Verifies that `run_squash_merge` fails when the feature branch
|
|
|
|
|
/// only contains .story_kit/ file moves with no real code changes.
|
|
|
|
|
#[tokio::test]
|
|
|
|
|
async fn squash_merge_md_only_changes_fails() {
|
|
|
|
|
use std::fs;
|
|
|
|
|
use tempfile::tempdir;
|
|
|
|
|
|
|
|
|
|
let tmp = tempdir().unwrap();
|
|
|
|
|
let repo = tmp.path();
|
|
|
|
|
init_git_repo(repo);
|
|
|
|
|
|
|
|
|
|
// Create a feature branch that only moves a .story_kit/ file.
|
|
|
|
|
Command::new("git")
|
|
|
|
|
.args(["checkout", "-b", "feature/story-md_only_test"])
|
|
|
|
|
.current_dir(repo)
|
|
|
|
|
.output()
|
|
|
|
|
.unwrap();
|
|
|
|
|
let sk_dir = repo.join(".story_kit/work/2_current");
|
|
|
|
|
fs::create_dir_all(&sk_dir).unwrap();
|
|
|
|
|
fs::write(
|
|
|
|
|
sk_dir.join("md_only_test.md"),
|
|
|
|
|
"---\nname: Test\n---\n",
|
|
|
|
|
)
|
|
|
|
|
.unwrap();
|
|
|
|
|
Command::new("git")
|
|
|
|
|
.args(["add", "."])
|
|
|
|
|
.current_dir(repo)
|
|
|
|
|
.output()
|
|
|
|
|
.unwrap();
|
|
|
|
|
Command::new("git")
|
|
|
|
|
.args(["commit", "-m", "move story file"])
|
|
|
|
|
.current_dir(repo)
|
|
|
|
|
.output()
|
|
|
|
|
.unwrap();
|
|
|
|
|
Command::new("git")
|
|
|
|
|
.args(["checkout", "master"])
|
|
|
|
|
.current_dir(repo)
|
|
|
|
|
.output()
|
|
|
|
|
.unwrap();
|
|
|
|
|
|
|
|
|
|
let result =
|
|
|
|
|
run_squash_merge(repo, "feature/story-md_only_test", "md_only_test").unwrap();
|
|
|
|
|
|
|
|
|
|
// The squash merge will commit the .story_kit/ file, but should fail because
|
|
|
|
|
// there are no code changes outside .story_kit/.
|
|
|
|
|
assert!(
|
|
|
|
|
!result.success,
|
|
|
|
|
"merge with only .story_kit/ changes must fail: {}",
|
2026-02-26 14:16:35 +00:00
|
|
|
result.output
|
|
|
|
|
);
|
|
|
|
|
|
|
|
|
|
// Cleanup should still happen.
|
|
|
|
|
assert!(
|
|
|
|
|
!repo.join(".story_kit/merge_workspace").exists(),
|
|
|
|
|
"merge workspace should be cleaned up"
|
|
|
|
|
);
|
|
|
|
|
}
|
|
|
|
|
|
2026-02-27 10:37:27 +00:00
|
|
|
/// Bug 226: feature_branch_has_unmerged_changes returns true when the
|
|
|
|
|
/// feature branch has commits not on master.
|
|
|
|
|
#[test]
|
|
|
|
|
fn feature_branch_has_unmerged_changes_detects_unmerged_code() {
|
|
|
|
|
use std::fs;
|
|
|
|
|
use tempfile::tempdir;
|
|
|
|
|
|
|
|
|
|
let tmp = tempdir().unwrap();
|
|
|
|
|
let repo = tmp.path();
|
|
|
|
|
init_git_repo(repo);
|
|
|
|
|
|
|
|
|
|
// Create a feature branch with a code commit.
|
|
|
|
|
Command::new("git")
|
|
|
|
|
.args(["checkout", "-b", "feature/story-50_story_test"])
|
|
|
|
|
.current_dir(repo)
|
|
|
|
|
.output()
|
|
|
|
|
.unwrap();
|
|
|
|
|
fs::write(repo.join("feature.rs"), "fn main() {}").unwrap();
|
|
|
|
|
Command::new("git")
|
|
|
|
|
.args(["add", "."])
|
|
|
|
|
.current_dir(repo)
|
|
|
|
|
.output()
|
|
|
|
|
.unwrap();
|
|
|
|
|
Command::new("git")
|
|
|
|
|
.args(["commit", "-m", "add feature"])
|
|
|
|
|
.current_dir(repo)
|
|
|
|
|
.output()
|
|
|
|
|
.unwrap();
|
|
|
|
|
Command::new("git")
|
|
|
|
|
.args(["checkout", "master"])
|
|
|
|
|
.current_dir(repo)
|
|
|
|
|
.output()
|
|
|
|
|
.unwrap();
|
|
|
|
|
|
|
|
|
|
assert!(
|
|
|
|
|
feature_branch_has_unmerged_changes(repo, "50_story_test"),
|
|
|
|
|
"should detect unmerged changes on feature branch"
|
|
|
|
|
);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Bug 226: feature_branch_has_unmerged_changes returns false when no
|
|
|
|
|
/// feature branch exists.
|
|
|
|
|
#[test]
|
|
|
|
|
fn feature_branch_has_unmerged_changes_false_when_no_branch() {
|
|
|
|
|
use tempfile::tempdir;
|
|
|
|
|
|
|
|
|
|
let tmp = tempdir().unwrap();
|
|
|
|
|
let repo = tmp.path();
|
|
|
|
|
init_git_repo(repo);
|
|
|
|
|
|
|
|
|
|
assert!(
|
|
|
|
|
!feature_branch_has_unmerged_changes(repo, "99_nonexistent"),
|
|
|
|
|
"should return false when no feature branch"
|
|
|
|
|
);
|
|
|
|
|
}
|
|
|
|
|
|
2026-02-26 14:16:35 +00:00
|
|
|
/// Verifies that stale merge_workspace directories from previous failed
|
|
|
|
|
/// merges are cleaned up before a new merge attempt.
|
|
|
|
|
#[tokio::test]
|
|
|
|
|
async fn squash_merge_cleans_up_stale_workspace() {
|
|
|
|
|
use std::fs;
|
|
|
|
|
use tempfile::tempdir;
|
|
|
|
|
|
|
|
|
|
let tmp = tempdir().unwrap();
|
|
|
|
|
let repo = tmp.path();
|
|
|
|
|
init_git_repo(repo);
|
|
|
|
|
|
|
|
|
|
// Create a feature branch with a file.
|
|
|
|
|
Command::new("git")
|
|
|
|
|
.args(["checkout", "-b", "feature/story-stale_test"])
|
|
|
|
|
.current_dir(repo)
|
|
|
|
|
.output()
|
|
|
|
|
.unwrap();
|
|
|
|
|
fs::write(repo.join("stale.txt"), "content\n").unwrap();
|
|
|
|
|
Command::new("git")
|
|
|
|
|
.args(["add", "."])
|
|
|
|
|
.current_dir(repo)
|
|
|
|
|
.output()
|
|
|
|
|
.unwrap();
|
|
|
|
|
Command::new("git")
|
|
|
|
|
.args(["commit", "-m", "feature: stale test"])
|
|
|
|
|
.current_dir(repo)
|
|
|
|
|
.output()
|
|
|
|
|
.unwrap();
|
|
|
|
|
Command::new("git")
|
|
|
|
|
.args(["checkout", "master"])
|
|
|
|
|
.current_dir(repo)
|
|
|
|
|
.output()
|
|
|
|
|
.unwrap();
|
|
|
|
|
|
|
|
|
|
// Simulate a stale merge workspace left from a previous failed merge.
|
|
|
|
|
let stale_ws = repo.join(".story_kit/merge_workspace");
|
|
|
|
|
fs::create_dir_all(&stale_ws).unwrap();
|
|
|
|
|
fs::write(stale_ws.join("leftover.txt"), "stale").unwrap();
|
|
|
|
|
|
|
|
|
|
// Run the merge — it should clean up the stale workspace first.
|
|
|
|
|
let result =
|
|
|
|
|
run_squash_merge(repo, "feature/story-stale_test", "stale_test").unwrap();
|
|
|
|
|
|
|
|
|
|
assert!(
|
|
|
|
|
result.success,
|
|
|
|
|
"merge should succeed after cleaning up stale workspace: {}",
|
|
|
|
|
result.output
|
|
|
|
|
);
|
|
|
|
|
assert!(
|
|
|
|
|
!stale_ws.exists(),
|
|
|
|
|
"stale merge workspace should be cleaned up"
|
|
|
|
|
);
|
|
|
|
|
}
|
|
|
|
|
|
2026-02-24 13:13:16 +00:00
|
|
|
// ── process health monitoring tests ──────────────────────────────────────
|
|
|
|
|
|
|
|
|
|
/// Demonstrates that the PTY read-loop inactivity timeout fires when no output
|
|
|
|
|
/// is produced by the agent process within the configured window.
|
|
|
|
|
///
|
|
|
|
|
/// A `HangingReader` simulates a hung agent process that never writes to the
|
|
|
|
|
/// PTY master. The test verifies that `recv_timeout` fires with a `Timeout`
|
|
|
|
|
/// error — the signal that causes `run_agent_pty_blocking` to kill the child
|
|
|
|
|
/// and return `Err("Agent inactivity timeout: …")`, which the error handler
|
|
|
|
|
/// in `start_agent` converts into `AgentStatus::Failed`.
|
|
|
|
|
#[test]
|
|
|
|
|
fn pty_inactivity_timeout_kills_hung_agent() {
|
|
|
|
|
struct HangingReader;
|
|
|
|
|
impl std::io::Read for HangingReader {
|
|
|
|
|
fn read(&mut self, _buf: &mut [u8]) -> std::io::Result<usize> {
|
|
|
|
|
std::thread::sleep(std::time::Duration::from_secs(300));
|
|
|
|
|
Ok(0)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
let (line_tx, line_rx) =
|
|
|
|
|
std::sync::mpsc::channel::<std::io::Result<String>>();
|
|
|
|
|
|
|
|
|
|
std::thread::spawn(move || {
|
|
|
|
|
let buf_reader = BufReader::new(HangingReader);
|
|
|
|
|
for line in buf_reader.lines() {
|
|
|
|
|
if line_tx.send(line).is_err() {
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
let timeout_dur = std::time::Duration::from_millis(100);
|
|
|
|
|
let result = line_rx.recv_timeout(timeout_dur);
|
|
|
|
|
|
|
|
|
|
assert!(
|
|
|
|
|
matches!(
|
|
|
|
|
result,
|
|
|
|
|
Err(std::sync::mpsc::RecvTimeoutError::Timeout)
|
|
|
|
|
),
|
|
|
|
|
"recv_timeout must fire when no PTY output arrives within the deadline"
|
|
|
|
|
);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Demonstrates that the background watchdog detects Running agents whose
|
|
|
|
|
/// backing tokio task has already finished (orphaned entries) and marks them
|
|
|
|
|
/// as Failed, emitting an Error event so that `wait_for_agent` unblocks.
|
|
|
|
|
#[tokio::test]
|
|
|
|
|
async fn watchdog_detects_orphaned_running_agent() {
|
2026-02-24 23:09:13 +00:00
|
|
|
let pool = AgentPool::new_test(3001);
|
2026-02-24 13:13:16 +00:00
|
|
|
|
|
|
|
|
let handle = tokio::spawn(async {});
|
|
|
|
|
tokio::time::sleep(std::time::Duration::from_millis(20)).await;
|
|
|
|
|
assert!(handle.is_finished(), "task should be finished before injection");
|
|
|
|
|
|
|
|
|
|
let tx =
|
|
|
|
|
pool.inject_test_agent_with_handle("orphan_story", "coder", AgentStatus::Running, handle);
|
|
|
|
|
let mut rx = tx.subscribe();
|
|
|
|
|
|
|
|
|
|
pool.run_watchdog_once();
|
|
|
|
|
|
|
|
|
|
{
|
|
|
|
|
let agents = pool.agents.lock().unwrap();
|
|
|
|
|
let key = composite_key("orphan_story", "coder");
|
|
|
|
|
let agent = agents.get(&key).unwrap();
|
|
|
|
|
assert_eq!(
|
|
|
|
|
agent.status,
|
|
|
|
|
AgentStatus::Failed,
|
|
|
|
|
"watchdog must mark an orphaned Running agent as Failed"
|
|
|
|
|
);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
let event = rx.try_recv().expect("watchdog must emit an Error event");
|
|
|
|
|
assert!(
|
|
|
|
|
matches!(event, AgentEvent::Error { .. }),
|
|
|
|
|
"expected AgentEvent::Error, got: {event:?}"
|
|
|
|
|
);
|
2026-02-24 13:22:11 +00:00
|
|
|
}
|
|
|
|
|
|
2026-02-24 13:20:59 +00:00
|
|
|
// ── remove_agents_for_story tests ────────────────────────────────────────
|
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
fn remove_agents_for_story_removes_all_entries() {
|
2026-02-24 23:09:13 +00:00
|
|
|
let pool = AgentPool::new_test(3001);
|
2026-02-24 13:20:59 +00:00
|
|
|
pool.inject_test_agent("story_a", "coder-1", AgentStatus::Completed);
|
|
|
|
|
pool.inject_test_agent("story_a", "qa", AgentStatus::Failed);
|
|
|
|
|
pool.inject_test_agent("story_b", "coder-1", AgentStatus::Running);
|
|
|
|
|
|
|
|
|
|
let removed = pool.remove_agents_for_story("story_a");
|
|
|
|
|
assert_eq!(removed, 2, "should remove both agents for story_a");
|
|
|
|
|
|
|
|
|
|
let agents = pool.list_agents().unwrap();
|
|
|
|
|
assert_eq!(agents.len(), 1, "only story_b agent should remain");
|
|
|
|
|
assert_eq!(agents[0].story_id, "story_b");
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
fn remove_agents_for_story_returns_zero_when_no_match() {
|
2026-02-24 23:09:13 +00:00
|
|
|
let pool = AgentPool::new_test(3001);
|
2026-02-24 13:20:59 +00:00
|
|
|
pool.inject_test_agent("story_a", "coder-1", AgentStatus::Running);
|
|
|
|
|
|
|
|
|
|
let removed = pool.remove_agents_for_story("nonexistent");
|
|
|
|
|
assert_eq!(removed, 0);
|
|
|
|
|
|
|
|
|
|
let agents = pool.list_agents().unwrap();
|
|
|
|
|
assert_eq!(agents.len(), 1, "existing agents should not be affected");
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// ── archive + cleanup integration test ───────────────────────────────────
|
|
|
|
|
|
|
|
|
|
#[tokio::test]
|
|
|
|
|
async fn archiving_story_removes_agent_entries_from_pool() {
|
|
|
|
|
use std::fs;
|
|
|
|
|
|
|
|
|
|
let tmp = tempfile::tempdir().unwrap();
|
|
|
|
|
let root = tmp.path();
|
|
|
|
|
|
|
|
|
|
// Set up story in 2_current/
|
|
|
|
|
let current = root.join(".story_kit/work/2_current");
|
|
|
|
|
fs::create_dir_all(¤t).unwrap();
|
|
|
|
|
fs::write(current.join("60_story_cleanup.md"), "test").unwrap();
|
|
|
|
|
|
2026-02-24 23:09:13 +00:00
|
|
|
let pool = AgentPool::new_test(3001);
|
2026-02-24 13:20:59 +00:00
|
|
|
pool.inject_test_agent("60_story_cleanup", "coder-1", AgentStatus::Completed);
|
|
|
|
|
pool.inject_test_agent("60_story_cleanup", "qa", AgentStatus::Completed);
|
|
|
|
|
pool.inject_test_agent("61_story_other", "coder-1", AgentStatus::Running);
|
|
|
|
|
|
|
|
|
|
// Verify all 3 agents exist.
|
|
|
|
|
assert_eq!(pool.list_agents().unwrap().len(), 3);
|
|
|
|
|
|
|
|
|
|
// Archive the story.
|
|
|
|
|
move_story_to_archived(root, "60_story_cleanup").unwrap();
|
|
|
|
|
pool.remove_agents_for_story("60_story_cleanup");
|
|
|
|
|
|
|
|
|
|
// Agent entries for the archived story should be gone.
|
|
|
|
|
let remaining = pool.list_agents().unwrap();
|
|
|
|
|
assert_eq!(remaining.len(), 1, "only the other story's agent should remain");
|
|
|
|
|
assert_eq!(remaining[0].story_id, "61_story_other");
|
|
|
|
|
|
2026-02-24 17:01:57 +00:00
|
|
|
// Story file should be in 5_done/
|
|
|
|
|
assert!(root.join(".story_kit/work/5_done/60_story_cleanup.md").exists());
|
2026-02-24 13:13:16 +00:00
|
|
|
}
|
2026-02-24 15:55:36 +00:00
|
|
|
|
2026-02-26 19:30:26 +00:00
|
|
|
// ── story 216: merge worktree uses project.toml component setup ───────────
|
2026-02-24 15:55:36 +00:00
|
|
|
|
2026-02-26 19:30:26 +00:00
|
|
|
/// When the project has `[[component]]` entries in `.story_kit/project.toml`,
|
|
|
|
|
/// `run_squash_merge` must run their setup commands in the merge worktree
|
|
|
|
|
/// before quality gates — matching the behaviour of `create_worktree`.
|
2026-02-24 15:55:36 +00:00
|
|
|
#[cfg(unix)]
|
|
|
|
|
#[test]
|
2026-02-26 19:30:26 +00:00
|
|
|
fn squash_merge_runs_component_setup_from_project_toml() {
|
2026-02-24 15:55:36 +00:00
|
|
|
use std::fs;
|
|
|
|
|
use tempfile::tempdir;
|
|
|
|
|
|
|
|
|
|
let tmp = tempdir().unwrap();
|
|
|
|
|
let repo = tmp.path();
|
|
|
|
|
init_git_repo(repo);
|
|
|
|
|
|
2026-02-26 19:30:26 +00:00
|
|
|
// Add a .story_kit/project.toml with a component whose setup writes a
|
|
|
|
|
// sentinel file so we can confirm the command ran.
|
|
|
|
|
let sk_dir = repo.join(".story_kit");
|
|
|
|
|
fs::create_dir_all(&sk_dir).unwrap();
|
2026-02-24 15:55:36 +00:00
|
|
|
fs::write(
|
2026-02-26 19:30:26 +00:00
|
|
|
sk_dir.join("project.toml"),
|
|
|
|
|
"[[component]]\nname = \"sentinel\"\npath = \".\"\nsetup = [\"touch setup_ran.txt\"]\n",
|
2026-02-24 15:55:36 +00:00
|
|
|
)
|
|
|
|
|
.unwrap();
|
|
|
|
|
Command::new("git")
|
|
|
|
|
.args(["add", "."])
|
|
|
|
|
.current_dir(repo)
|
|
|
|
|
.output()
|
|
|
|
|
.unwrap();
|
|
|
|
|
Command::new("git")
|
2026-02-26 19:30:26 +00:00
|
|
|
.args(["commit", "-m", "add project.toml with component setup"])
|
2026-02-24 15:55:36 +00:00
|
|
|
.current_dir(repo)
|
|
|
|
|
.output()
|
|
|
|
|
.unwrap();
|
|
|
|
|
|
|
|
|
|
// Create feature branch with a change.
|
|
|
|
|
Command::new("git")
|
2026-02-26 19:30:26 +00:00
|
|
|
.args(["checkout", "-b", "feature/story-216_setup_test"])
|
2026-02-24 15:55:36 +00:00
|
|
|
.current_dir(repo)
|
|
|
|
|
.output()
|
|
|
|
|
.unwrap();
|
|
|
|
|
fs::write(repo.join("feature.txt"), "change").unwrap();
|
|
|
|
|
Command::new("git")
|
|
|
|
|
.args(["add", "."])
|
|
|
|
|
.current_dir(repo)
|
|
|
|
|
.output()
|
|
|
|
|
.unwrap();
|
|
|
|
|
Command::new("git")
|
|
|
|
|
.args(["commit", "-m", "feature work"])
|
|
|
|
|
.current_dir(repo)
|
|
|
|
|
.output()
|
|
|
|
|
.unwrap();
|
|
|
|
|
|
|
|
|
|
// Switch back to master.
|
|
|
|
|
Command::new("git")
|
|
|
|
|
.args(["checkout", "master"])
|
|
|
|
|
.current_dir(repo)
|
|
|
|
|
.output()
|
|
|
|
|
.unwrap();
|
|
|
|
|
|
|
|
|
|
let result =
|
2026-02-26 19:30:26 +00:00
|
|
|
run_squash_merge(repo, "feature/story-216_setup_test", "216_setup_test").unwrap();
|
2026-02-24 15:55:36 +00:00
|
|
|
|
2026-02-26 19:30:26 +00:00
|
|
|
// The output must mention component setup, proving the new code path ran.
|
|
|
|
|
assert!(
|
|
|
|
|
result.output.contains("component setup"),
|
|
|
|
|
"merge output must mention component setup when project.toml has components, got:\n{}",
|
|
|
|
|
result.output
|
|
|
|
|
);
|
|
|
|
|
// The sentinel command must appear in the output.
|
2026-02-24 15:55:36 +00:00
|
|
|
assert!(
|
2026-02-26 19:30:26 +00:00
|
|
|
result.output.contains("sentinel"),
|
|
|
|
|
"merge output must name the component, got:\n{}",
|
2026-02-24 15:55:36 +00:00
|
|
|
result.output
|
|
|
|
|
);
|
|
|
|
|
}
|
|
|
|
|
|
2026-02-26 19:30:26 +00:00
|
|
|
/// When there are no `[[component]]` entries in project.toml (or no
|
|
|
|
|
/// project.toml at all), `run_squash_merge` must succeed without trying to
|
|
|
|
|
/// run any setup. No hardcoded pnpm or frontend/ references should appear.
|
2026-02-24 15:55:36 +00:00
|
|
|
#[cfg(unix)]
|
|
|
|
|
#[test]
|
2026-02-26 19:30:26 +00:00
|
|
|
fn squash_merge_succeeds_without_components_in_project_toml() {
|
2026-02-24 15:55:36 +00:00
|
|
|
use std::fs;
|
|
|
|
|
use tempfile::tempdir;
|
|
|
|
|
|
|
|
|
|
let tmp = tempdir().unwrap();
|
|
|
|
|
let repo = tmp.path();
|
|
|
|
|
init_git_repo(repo);
|
|
|
|
|
|
2026-02-26 19:30:26 +00:00
|
|
|
// No .story_kit/project.toml — no component setup.
|
|
|
|
|
fs::write(repo.join("file.txt"), "initial").unwrap();
|
2026-02-24 15:55:36 +00:00
|
|
|
Command::new("git")
|
|
|
|
|
.args(["add", "."])
|
|
|
|
|
.current_dir(repo)
|
|
|
|
|
.output()
|
|
|
|
|
.unwrap();
|
|
|
|
|
Command::new("git")
|
2026-02-26 19:30:26 +00:00
|
|
|
.args(["commit", "-m", "initial commit"])
|
2026-02-24 15:55:36 +00:00
|
|
|
.current_dir(repo)
|
|
|
|
|
.output()
|
|
|
|
|
.unwrap();
|
|
|
|
|
|
|
|
|
|
Command::new("git")
|
2026-02-26 19:30:26 +00:00
|
|
|
.args(["checkout", "-b", "feature/story-216_no_components"])
|
2026-02-24 15:55:36 +00:00
|
|
|
.current_dir(repo)
|
|
|
|
|
.output()
|
|
|
|
|
.unwrap();
|
2026-02-26 19:30:26 +00:00
|
|
|
fs::write(repo.join("change.txt"), "change").unwrap();
|
2026-02-24 15:55:36 +00:00
|
|
|
Command::new("git")
|
|
|
|
|
.args(["add", "."])
|
|
|
|
|
.current_dir(repo)
|
|
|
|
|
.output()
|
|
|
|
|
.unwrap();
|
|
|
|
|
Command::new("git")
|
|
|
|
|
.args(["commit", "-m", "feature"])
|
|
|
|
|
.current_dir(repo)
|
|
|
|
|
.output()
|
|
|
|
|
.unwrap();
|
|
|
|
|
|
|
|
|
|
Command::new("git")
|
|
|
|
|
.args(["checkout", "master"])
|
|
|
|
|
.current_dir(repo)
|
|
|
|
|
.output()
|
|
|
|
|
.unwrap();
|
|
|
|
|
|
|
|
|
|
let result =
|
2026-02-26 19:30:26 +00:00
|
|
|
run_squash_merge(repo, "feature/story-216_no_components", "216_no_components")
|
|
|
|
|
.unwrap();
|
2026-02-24 15:55:36 +00:00
|
|
|
|
2026-02-26 19:30:26 +00:00
|
|
|
// No pnpm or frontend references should appear in the output.
|
2026-02-24 15:55:36 +00:00
|
|
|
assert!(
|
2026-02-26 19:30:26 +00:00
|
|
|
!result.output.contains("pnpm"),
|
|
|
|
|
"output must not mention pnpm, got:\n{}",
|
|
|
|
|
result.output
|
2026-02-24 15:55:36 +00:00
|
|
|
);
|
|
|
|
|
assert!(
|
2026-02-26 19:30:26 +00:00
|
|
|
!result.output.contains("frontend/"),
|
|
|
|
|
"output must not mention frontend/, got:\n{}",
|
|
|
|
|
result.output
|
2026-02-24 15:55:36 +00:00
|
|
|
);
|
|
|
|
|
}
|
2026-02-24 17:28:45 +00:00
|
|
|
|
|
|
|
|
// ── check_orphaned_agents return value tests (bug 161) ──────────────────
|
|
|
|
|
|
|
|
|
|
#[tokio::test]
|
|
|
|
|
async fn check_orphaned_agents_returns_count_of_orphaned_agents() {
|
2026-02-24 23:09:13 +00:00
|
|
|
let pool = AgentPool::new_test(3001);
|
2026-02-24 17:28:45 +00:00
|
|
|
|
|
|
|
|
// Spawn two tasks that finish immediately.
|
|
|
|
|
let h1 = tokio::spawn(async {});
|
|
|
|
|
let h2 = tokio::spawn(async {});
|
|
|
|
|
tokio::time::sleep(std::time::Duration::from_millis(20)).await;
|
|
|
|
|
assert!(h1.is_finished());
|
|
|
|
|
assert!(h2.is_finished());
|
|
|
|
|
|
|
|
|
|
pool.inject_test_agent_with_handle("story_a", "coder", AgentStatus::Running, h1);
|
|
|
|
|
pool.inject_test_agent_with_handle("story_b", "coder", AgentStatus::Running, h2);
|
|
|
|
|
|
|
|
|
|
let found = check_orphaned_agents(&pool.agents);
|
|
|
|
|
assert_eq!(found, 2, "should detect both orphaned agents");
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
fn check_orphaned_agents_returns_zero_when_no_orphans() {
|
2026-02-24 23:09:13 +00:00
|
|
|
let pool = AgentPool::new_test(3001);
|
2026-02-24 17:28:45 +00:00
|
|
|
// Inject agents in terminal states — not orphaned.
|
|
|
|
|
pool.inject_test_agent("story_a", "coder", AgentStatus::Completed);
|
|
|
|
|
pool.inject_test_agent("story_b", "qa", AgentStatus::Failed);
|
|
|
|
|
|
|
|
|
|
let found = check_orphaned_agents(&pool.agents);
|
|
|
|
|
assert_eq!(found, 0, "no orphans should be detected for terminal agents");
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[tokio::test]
|
|
|
|
|
async fn watchdog_orphan_detection_returns_nonzero_enabling_auto_assign() {
|
|
|
|
|
// This test verifies the contract that `check_orphaned_agents` returns
|
|
|
|
|
// a non-zero count when orphans exist, which the watchdog uses to
|
|
|
|
|
// decide whether to trigger auto-assign (bug 161).
|
2026-02-24 23:09:13 +00:00
|
|
|
let pool = AgentPool::new_test(3001);
|
2026-02-24 17:28:45 +00:00
|
|
|
|
|
|
|
|
let handle = tokio::spawn(async {});
|
|
|
|
|
tokio::time::sleep(std::time::Duration::from_millis(20)).await;
|
|
|
|
|
|
|
|
|
|
pool.inject_test_agent_with_handle(
|
|
|
|
|
"orphan_story",
|
|
|
|
|
"coder",
|
|
|
|
|
AgentStatus::Running,
|
|
|
|
|
handle,
|
|
|
|
|
);
|
|
|
|
|
|
|
|
|
|
// Before watchdog: agent is Running.
|
|
|
|
|
{
|
|
|
|
|
let agents = pool.agents.lock().unwrap();
|
|
|
|
|
let key = composite_key("orphan_story", "coder");
|
|
|
|
|
assert_eq!(agents.get(&key).unwrap().status, AgentStatus::Running);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Run watchdog pass — should return 1 (orphan found).
|
|
|
|
|
let found = check_orphaned_agents(&pool.agents);
|
|
|
|
|
assert_eq!(
|
|
|
|
|
found, 1,
|
|
|
|
|
"watchdog must return 1 for a single orphaned agent"
|
|
|
|
|
);
|
|
|
|
|
|
|
|
|
|
// After watchdog: agent is Failed.
|
|
|
|
|
{
|
|
|
|
|
let agents = pool.agents.lock().unwrap();
|
|
|
|
|
let key = composite_key("orphan_story", "coder");
|
|
|
|
|
assert_eq!(
|
|
|
|
|
agents.get(&key).unwrap().status,
|
|
|
|
|
AgentStatus::Failed,
|
|
|
|
|
"orphaned agent must be marked Failed"
|
|
|
|
|
);
|
|
|
|
|
}
|
|
|
|
|
}
|
2026-02-24 17:56:40 +00:00
|
|
|
|
|
|
|
|
// ── kill_all_children tests ────────────────────────────────────
|
|
|
|
|
|
|
|
|
|
/// Returns true if a process with the given PID is currently running.
|
|
|
|
|
fn process_is_running(pid: u32) -> bool {
|
|
|
|
|
std::process::Command::new("ps")
|
|
|
|
|
.arg("-p")
|
|
|
|
|
.arg(pid.to_string())
|
|
|
|
|
.stdout(std::process::Stdio::null())
|
|
|
|
|
.stderr(std::process::Stdio::null())
|
|
|
|
|
.status()
|
|
|
|
|
.map(|s| s.success())
|
|
|
|
|
.unwrap_or(false)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
fn kill_all_children_is_safe_on_empty_pool() {
|
2026-02-24 23:09:13 +00:00
|
|
|
let pool = AgentPool::new_test(3001);
|
2026-02-24 17:56:40 +00:00
|
|
|
// Should not panic or deadlock on an empty registry.
|
|
|
|
|
pool.kill_all_children();
|
|
|
|
|
assert_eq!(pool.child_killer_count(), 0);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
fn kill_all_children_kills_real_process() {
|
|
|
|
|
// GIVEN: a real PTY child process (sleep 100) with its killer registered.
|
2026-02-24 23:09:13 +00:00
|
|
|
let pool = AgentPool::new_test(3001);
|
2026-02-24 17:56:40 +00:00
|
|
|
|
|
|
|
|
let pty_system = native_pty_system();
|
|
|
|
|
let pair = pty_system
|
|
|
|
|
.openpty(PtySize {
|
|
|
|
|
rows: 24,
|
|
|
|
|
cols: 80,
|
|
|
|
|
pixel_width: 0,
|
|
|
|
|
pixel_height: 0,
|
|
|
|
|
})
|
|
|
|
|
.expect("failed to open pty");
|
|
|
|
|
|
|
|
|
|
let mut cmd = CommandBuilder::new("sleep");
|
|
|
|
|
cmd.arg("100");
|
|
|
|
|
let mut child = pair
|
|
|
|
|
.slave
|
|
|
|
|
.spawn_command(cmd)
|
|
|
|
|
.expect("failed to spawn sleep");
|
|
|
|
|
let pid = child.process_id().expect("no pid");
|
|
|
|
|
|
|
|
|
|
pool.inject_child_killer("story:agent", child.clone_killer());
|
|
|
|
|
|
|
|
|
|
// Verify the process is alive before we kill it.
|
|
|
|
|
assert!(
|
|
|
|
|
process_is_running(pid),
|
|
|
|
|
"process {pid} should be running before kill_all_children"
|
|
|
|
|
);
|
|
|
|
|
|
|
|
|
|
// WHEN: kill_all_children() is called.
|
|
|
|
|
pool.kill_all_children();
|
|
|
|
|
|
|
|
|
|
// Collect the exit status (prevents zombie; also ensures signal was sent).
|
|
|
|
|
let _ = child.wait();
|
|
|
|
|
|
|
|
|
|
// THEN: the process should be dead.
|
|
|
|
|
assert!(
|
|
|
|
|
!process_is_running(pid),
|
|
|
|
|
"process {pid} should have been killed by kill_all_children"
|
|
|
|
|
);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
fn kill_all_children_clears_registry() {
|
|
|
|
|
// GIVEN: a pool with one registered killer.
|
2026-02-24 23:09:13 +00:00
|
|
|
let pool = AgentPool::new_test(3001);
|
2026-02-24 17:56:40 +00:00
|
|
|
|
|
|
|
|
let pty_system = native_pty_system();
|
|
|
|
|
let pair = pty_system
|
|
|
|
|
.openpty(PtySize {
|
|
|
|
|
rows: 24,
|
|
|
|
|
cols: 80,
|
|
|
|
|
pixel_width: 0,
|
|
|
|
|
pixel_height: 0,
|
|
|
|
|
})
|
|
|
|
|
.expect("failed to open pty");
|
|
|
|
|
|
|
|
|
|
let mut cmd = CommandBuilder::new("sleep");
|
|
|
|
|
cmd.arg("1");
|
|
|
|
|
let mut child = pair
|
|
|
|
|
.slave
|
|
|
|
|
.spawn_command(cmd)
|
|
|
|
|
.expect("failed to spawn sleep");
|
|
|
|
|
|
|
|
|
|
pool.inject_child_killer("story:agent", child.clone_killer());
|
|
|
|
|
assert_eq!(pool.child_killer_count(), 1);
|
|
|
|
|
|
|
|
|
|
// WHEN: kill_all_children() is called.
|
|
|
|
|
pool.kill_all_children();
|
|
|
|
|
let _ = child.wait();
|
|
|
|
|
|
|
|
|
|
// THEN: the registry is empty.
|
|
|
|
|
assert_eq!(
|
|
|
|
|
pool.child_killer_count(),
|
|
|
|
|
0,
|
|
|
|
|
"child_killers should be cleared after kill_all_children"
|
|
|
|
|
);
|
|
|
|
|
}
|
2026-02-24 23:57:07 +00:00
|
|
|
|
|
|
|
|
// ── Bug 173: pipeline advance sends AgentStateChanged via real watcher_tx ─
|
|
|
|
|
|
|
|
|
|
#[tokio::test]
|
|
|
|
|
async fn pipeline_advance_sends_agent_state_changed_to_watcher_tx() {
|
|
|
|
|
use std::fs;
|
|
|
|
|
|
|
|
|
|
let tmp = tempfile::tempdir().unwrap();
|
|
|
|
|
let root = tmp.path();
|
|
|
|
|
|
|
|
|
|
// Set up story in 2_current/
|
|
|
|
|
let current = root.join(".story_kit/work/2_current");
|
|
|
|
|
fs::create_dir_all(¤t).unwrap();
|
|
|
|
|
fs::write(current.join("173_story_test.md"), "test").unwrap();
|
|
|
|
|
// Ensure 3_qa/ exists for the move target
|
|
|
|
|
fs::create_dir_all(root.join(".story_kit/work/3_qa")).unwrap();
|
|
|
|
|
// Ensure 1_upcoming/ exists (start_agent calls move_story_to_current)
|
|
|
|
|
fs::create_dir_all(root.join(".story_kit/work/1_upcoming")).unwrap();
|
|
|
|
|
|
|
|
|
|
// Write a project.toml with a qa agent so start_agent can resolve it.
|
|
|
|
|
fs::create_dir_all(root.join(".story_kit")).unwrap();
|
|
|
|
|
fs::write(
|
|
|
|
|
root.join(".story_kit/project.toml"),
|
|
|
|
|
r#"
|
|
|
|
|
[[agent]]
|
|
|
|
|
name = "coder-1"
|
|
|
|
|
role = "Coder"
|
|
|
|
|
command = "echo"
|
|
|
|
|
args = ["noop"]
|
|
|
|
|
prompt = "test"
|
|
|
|
|
stage = "coder"
|
|
|
|
|
|
|
|
|
|
[[agent]]
|
|
|
|
|
name = "qa"
|
|
|
|
|
role = "QA"
|
|
|
|
|
command = "echo"
|
|
|
|
|
args = ["noop"]
|
|
|
|
|
prompt = "test"
|
|
|
|
|
stage = "qa"
|
|
|
|
|
"#,
|
|
|
|
|
)
|
|
|
|
|
.unwrap();
|
|
|
|
|
|
|
|
|
|
let pool = AgentPool::new_test(3001);
|
|
|
|
|
// Subscribe to the watcher channel BEFORE the pipeline advance.
|
|
|
|
|
let mut rx = pool.watcher_tx.subscribe();
|
|
|
|
|
|
|
|
|
|
// Call pipeline advance directly. This will:
|
|
|
|
|
// 1. Move the story to 3_qa/
|
|
|
|
|
// 2. Start the QA agent (which calls notify_agent_state_changed)
|
|
|
|
|
// Note: the actual agent process will fail (no real worktree), but the
|
|
|
|
|
// agent insertion and notification happen before the background spawn.
|
2026-02-25 14:59:20 +00:00
|
|
|
pool.run_pipeline_advance(
|
|
|
|
|
"173_story_test",
|
|
|
|
|
"coder-1",
|
|
|
|
|
CompletionReport {
|
|
|
|
|
summary: "done".to_string(),
|
|
|
|
|
gates_passed: true,
|
|
|
|
|
gate_output: String::new(),
|
|
|
|
|
},
|
|
|
|
|
Some(root.to_path_buf()),
|
|
|
|
|
None,
|
2026-02-26 16:12:23 +00:00
|
|
|
false,
|
2026-02-25 14:59:20 +00:00
|
|
|
)
|
2026-02-24 23:57:07 +00:00
|
|
|
.await;
|
|
|
|
|
|
|
|
|
|
// The pipeline advance should have sent AgentStateChanged events via
|
|
|
|
|
// the pool's watcher_tx (not a dummy channel). Collect all events.
|
|
|
|
|
let mut got_agent_state_changed = false;
|
|
|
|
|
while let Ok(evt) = rx.try_recv() {
|
|
|
|
|
if matches!(evt, WatcherEvent::AgentStateChanged) {
|
|
|
|
|
got_agent_state_changed = true;
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
assert!(
|
|
|
|
|
got_agent_state_changed,
|
|
|
|
|
"pipeline advance should send AgentStateChanged through the real watcher_tx \
|
|
|
|
|
(bug 173: lozenges must update when agents are assigned during pipeline advance)"
|
|
|
|
|
);
|
|
|
|
|
}
|
2026-02-25 16:17:38 +00:00
|
|
|
|
|
|
|
|
// ── available_agents_for_stage tests (story 190) ──────────────────────────
|
|
|
|
|
|
|
|
|
|
fn make_config(toml_str: &str) -> ProjectConfig {
|
|
|
|
|
ProjectConfig::parse(toml_str).unwrap()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
fn available_agents_for_stage_returns_idle_agents() {
|
|
|
|
|
let config = make_config(
|
|
|
|
|
r#"
|
|
|
|
|
[[agent]]
|
|
|
|
|
name = "coder-1"
|
|
|
|
|
stage = "coder"
|
|
|
|
|
|
|
|
|
|
[[agent]]
|
|
|
|
|
name = "coder-2"
|
|
|
|
|
stage = "coder"
|
|
|
|
|
|
|
|
|
|
[[agent]]
|
|
|
|
|
name = "qa"
|
|
|
|
|
stage = "qa"
|
|
|
|
|
"#,
|
|
|
|
|
);
|
|
|
|
|
let pool = AgentPool::new_test(3001);
|
|
|
|
|
// coder-1 is busy on story-1
|
|
|
|
|
pool.inject_test_agent("story-1", "coder-1", AgentStatus::Running);
|
|
|
|
|
|
|
|
|
|
let available = pool
|
|
|
|
|
.available_agents_for_stage(&config, &PipelineStage::Coder)
|
|
|
|
|
.unwrap();
|
|
|
|
|
assert_eq!(available, vec!["coder-2"]);
|
|
|
|
|
|
|
|
|
|
let available_qa = pool
|
|
|
|
|
.available_agents_for_stage(&config, &PipelineStage::Qa)
|
|
|
|
|
.unwrap();
|
|
|
|
|
assert_eq!(available_qa, vec!["qa"]);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
fn available_agents_for_stage_returns_empty_when_all_busy() {
|
|
|
|
|
let config = make_config(
|
|
|
|
|
r#"
|
|
|
|
|
[[agent]]
|
|
|
|
|
name = "coder-1"
|
|
|
|
|
stage = "coder"
|
|
|
|
|
"#,
|
|
|
|
|
);
|
|
|
|
|
let pool = AgentPool::new_test(3001);
|
|
|
|
|
pool.inject_test_agent("story-1", "coder-1", AgentStatus::Running);
|
|
|
|
|
|
|
|
|
|
let available = pool
|
|
|
|
|
.available_agents_for_stage(&config, &PipelineStage::Coder)
|
|
|
|
|
.unwrap();
|
|
|
|
|
assert!(available.is_empty());
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
fn available_agents_for_stage_ignores_completed_agents() {
|
|
|
|
|
let config = make_config(
|
|
|
|
|
r#"
|
|
|
|
|
[[agent]]
|
|
|
|
|
name = "coder-1"
|
|
|
|
|
stage = "coder"
|
|
|
|
|
"#,
|
|
|
|
|
);
|
|
|
|
|
let pool = AgentPool::new_test(3001);
|
|
|
|
|
// Completed agents should not count as busy.
|
|
|
|
|
pool.inject_test_agent("story-1", "coder-1", AgentStatus::Completed);
|
|
|
|
|
|
|
|
|
|
let available = pool
|
|
|
|
|
.available_agents_for_stage(&config, &PipelineStage::Coder)
|
|
|
|
|
.unwrap();
|
|
|
|
|
assert_eq!(available, vec!["coder-1"]);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[tokio::test]
|
|
|
|
|
async fn start_agent_auto_selects_second_coder_when_first_busy() {
|
|
|
|
|
let tmp = tempfile::tempdir().unwrap();
|
|
|
|
|
let sk = tmp.path().join(".story_kit");
|
|
|
|
|
std::fs::create_dir_all(&sk).unwrap();
|
|
|
|
|
std::fs::write(
|
|
|
|
|
sk.join("project.toml"),
|
|
|
|
|
r#"
|
|
|
|
|
[[agent]]
|
|
|
|
|
name = "supervisor"
|
|
|
|
|
stage = "other"
|
|
|
|
|
|
|
|
|
|
[[agent]]
|
|
|
|
|
name = "coder-1"
|
|
|
|
|
stage = "coder"
|
|
|
|
|
|
|
|
|
|
[[agent]]
|
|
|
|
|
name = "coder-2"
|
|
|
|
|
stage = "coder"
|
|
|
|
|
"#,
|
|
|
|
|
)
|
|
|
|
|
.unwrap();
|
|
|
|
|
|
|
|
|
|
let pool = AgentPool::new_test(3001);
|
|
|
|
|
// coder-1 is busy on another story
|
|
|
|
|
pool.inject_test_agent("other-story", "coder-1", AgentStatus::Running);
|
|
|
|
|
|
|
|
|
|
// Call start_agent without agent_name — should pick coder-2
|
|
|
|
|
let result = pool
|
|
|
|
|
.start_agent(tmp.path(), "42_my_story", None, None)
|
|
|
|
|
.await;
|
|
|
|
|
// Will fail for infrastructure reasons (no git repo), but should NOT
|
|
|
|
|
// fail with "All coder agents are busy" — that would mean it didn't
|
|
|
|
|
// try coder-2.
|
|
|
|
|
match result {
|
|
|
|
|
Ok(info) => {
|
|
|
|
|
assert_eq!(info.agent_name, "coder-2");
|
|
|
|
|
}
|
|
|
|
|
Err(err) => {
|
|
|
|
|
assert!(
|
|
|
|
|
!err.contains("All coder agents are busy"),
|
|
|
|
|
"should have selected coder-2 but got: {err}"
|
|
|
|
|
);
|
|
|
|
|
assert!(
|
|
|
|
|
!err.contains("No coder agent configured"),
|
|
|
|
|
"should not fail on agent selection, got: {err}"
|
|
|
|
|
);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[tokio::test]
|
|
|
|
|
async fn start_agent_returns_busy_when_all_coders_occupied() {
|
|
|
|
|
let tmp = tempfile::tempdir().unwrap();
|
|
|
|
|
let sk = tmp.path().join(".story_kit");
|
|
|
|
|
std::fs::create_dir_all(&sk).unwrap();
|
|
|
|
|
std::fs::write(
|
|
|
|
|
sk.join("project.toml"),
|
|
|
|
|
r#"
|
|
|
|
|
[[agent]]
|
|
|
|
|
name = "coder-1"
|
|
|
|
|
stage = "coder"
|
|
|
|
|
|
|
|
|
|
[[agent]]
|
|
|
|
|
name = "coder-2"
|
|
|
|
|
stage = "coder"
|
|
|
|
|
"#,
|
|
|
|
|
)
|
|
|
|
|
.unwrap();
|
|
|
|
|
|
|
|
|
|
let pool = AgentPool::new_test(3001);
|
|
|
|
|
pool.inject_test_agent("story-1", "coder-1", AgentStatus::Running);
|
|
|
|
|
pool.inject_test_agent("story-2", "coder-2", AgentStatus::Pending);
|
|
|
|
|
|
|
|
|
|
let result = pool
|
|
|
|
|
.start_agent(tmp.path(), "story-3", None, None)
|
|
|
|
|
.await;
|
|
|
|
|
assert!(result.is_err());
|
|
|
|
|
let err = result.unwrap_err();
|
|
|
|
|
assert!(
|
|
|
|
|
err.contains("All coder agents are busy"),
|
|
|
|
|
"expected busy error, got: {err}"
|
|
|
|
|
);
|
|
|
|
|
}
|
|
|
|
|
|
2026-02-26 12:41:12 +00:00
|
|
|
/// Story 203: when all coders are busy the story file must be moved from
|
|
|
|
|
/// 1_upcoming/ to 2_current/ so that auto_assign_available_work can pick
|
|
|
|
|
/// it up once a coder finishes.
|
|
|
|
|
#[tokio::test]
|
|
|
|
|
async fn start_agent_moves_story_to_current_when_coders_busy() {
|
|
|
|
|
let tmp = tempfile::tempdir().unwrap();
|
|
|
|
|
let sk = tmp.path().join(".story_kit");
|
|
|
|
|
let upcoming = sk.join("work/1_upcoming");
|
|
|
|
|
std::fs::create_dir_all(&upcoming).unwrap();
|
|
|
|
|
std::fs::write(
|
|
|
|
|
sk.join("project.toml"),
|
|
|
|
|
r#"
|
|
|
|
|
[[agent]]
|
|
|
|
|
name = "coder-1"
|
|
|
|
|
stage = "coder"
|
|
|
|
|
"#,
|
|
|
|
|
)
|
|
|
|
|
.unwrap();
|
|
|
|
|
// Place the story in 1_upcoming/.
|
|
|
|
|
std::fs::write(
|
|
|
|
|
upcoming.join("story-3.md"),
|
|
|
|
|
"---\nname: Story 3\n---\n",
|
|
|
|
|
)
|
|
|
|
|
.unwrap();
|
|
|
|
|
|
|
|
|
|
let pool = AgentPool::new_test(3001);
|
|
|
|
|
pool.inject_test_agent("story-1", "coder-1", AgentStatus::Running);
|
|
|
|
|
|
|
|
|
|
let result = pool
|
|
|
|
|
.start_agent(tmp.path(), "story-3", None, None)
|
|
|
|
|
.await;
|
|
|
|
|
|
|
|
|
|
// Should fail because all coders are busy.
|
|
|
|
|
assert!(result.is_err());
|
|
|
|
|
let err = result.unwrap_err();
|
|
|
|
|
assert!(
|
|
|
|
|
err.contains("All coder agents are busy"),
|
|
|
|
|
"expected busy error, got: {err}"
|
|
|
|
|
);
|
|
|
|
|
assert!(
|
|
|
|
|
err.contains("queued in work/2_current/"),
|
|
|
|
|
"expected story-to-current message, got: {err}"
|
|
|
|
|
);
|
|
|
|
|
|
|
|
|
|
// Story must have been moved to 2_current/.
|
|
|
|
|
let current_path = sk.join("work/2_current/story-3.md");
|
|
|
|
|
assert!(
|
|
|
|
|
current_path.exists(),
|
|
|
|
|
"story should be in 2_current/ after busy error, but was not"
|
|
|
|
|
);
|
|
|
|
|
let upcoming_path = upcoming.join("story-3.md");
|
|
|
|
|
assert!(
|
|
|
|
|
!upcoming_path.exists(),
|
|
|
|
|
"story should no longer be in 1_upcoming/"
|
|
|
|
|
);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Story 203: auto_assign_available_work must detect a story in 2_current/
|
|
|
|
|
/// with no active agent and start an agent for it.
|
|
|
|
|
#[tokio::test]
|
|
|
|
|
async fn auto_assign_picks_up_story_queued_in_current() {
|
|
|
|
|
let tmp = tempfile::tempdir().unwrap();
|
|
|
|
|
let sk = tmp.path().join(".story_kit");
|
|
|
|
|
let current = sk.join("work/2_current");
|
|
|
|
|
std::fs::create_dir_all(¤t).unwrap();
|
|
|
|
|
std::fs::write(
|
|
|
|
|
sk.join("project.toml"),
|
|
|
|
|
"[[agent]]\nname = \"coder-1\"\nstage = \"coder\"\n",
|
|
|
|
|
)
|
|
|
|
|
.unwrap();
|
|
|
|
|
// Place the story in 2_current/ (simulating the "queued" state).
|
|
|
|
|
std::fs::write(
|
|
|
|
|
current.join("story-3.md"),
|
|
|
|
|
"---\nname: Story 3\n---\n",
|
|
|
|
|
)
|
|
|
|
|
.unwrap();
|
|
|
|
|
|
|
|
|
|
let pool = AgentPool::new_test(3001);
|
|
|
|
|
// No agents are running — coder-1 is free.
|
|
|
|
|
|
|
|
|
|
// auto_assign will try to call start_agent, which will attempt to create
|
|
|
|
|
// a worktree (will fail without a git repo) — that is fine. We only need
|
|
|
|
|
// to verify the agent is registered as Pending before the background
|
|
|
|
|
// task eventually fails.
|
|
|
|
|
pool.auto_assign_available_work(tmp.path()).await;
|
|
|
|
|
|
|
|
|
|
let agents = pool.agents.lock().unwrap();
|
|
|
|
|
let has_pending = agents.values().any(|a| {
|
|
|
|
|
a.agent_name == "coder-1"
|
|
|
|
|
&& matches!(a.status, AgentStatus::Pending | AgentStatus::Running)
|
|
|
|
|
});
|
|
|
|
|
assert!(
|
|
|
|
|
has_pending,
|
|
|
|
|
"auto_assign should have started coder-1 for story-3, but pool is empty"
|
|
|
|
|
);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Story 203: if a story is already in 2_current/ or later, start_agent
|
|
|
|
|
/// must not fail — the move is a no-op.
|
|
|
|
|
#[tokio::test]
|
|
|
|
|
async fn start_agent_story_already_in_current_is_noop() {
|
|
|
|
|
let tmp = tempfile::tempdir().unwrap();
|
|
|
|
|
let sk = tmp.path().join(".story_kit");
|
|
|
|
|
let current = sk.join("work/2_current");
|
|
|
|
|
std::fs::create_dir_all(¤t).unwrap();
|
|
|
|
|
std::fs::write(
|
|
|
|
|
sk.join("project.toml"),
|
|
|
|
|
"[[agent]]\nname = \"coder-1\"\nstage = \"coder\"\n",
|
|
|
|
|
)
|
|
|
|
|
.unwrap();
|
|
|
|
|
// Place the story directly in 2_current/.
|
|
|
|
|
std::fs::write(
|
|
|
|
|
current.join("story-5.md"),
|
|
|
|
|
"---\nname: Story 5\n---\n",
|
|
|
|
|
)
|
|
|
|
|
.unwrap();
|
|
|
|
|
|
|
|
|
|
let pool = AgentPool::new_test(3001);
|
|
|
|
|
|
|
|
|
|
// start_agent should attempt to assign coder-1 (no infra, so it will
|
|
|
|
|
// fail for git reasons), but must NOT fail due to the story already
|
|
|
|
|
// being in 2_current/.
|
|
|
|
|
let result = pool
|
|
|
|
|
.start_agent(tmp.path(), "story-5", None, None)
|
|
|
|
|
.await;
|
|
|
|
|
match result {
|
|
|
|
|
Ok(_) => {}
|
|
|
|
|
Err(e) => {
|
|
|
|
|
assert!(
|
|
|
|
|
!e.contains("Failed to move"),
|
|
|
|
|
"should not fail on idempotent move, got: {e}"
|
|
|
|
|
);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2026-02-25 16:17:38 +00:00
|
|
|
#[tokio::test]
|
|
|
|
|
async fn start_agent_explicit_name_unchanged_when_busy() {
|
|
|
|
|
let tmp = tempfile::tempdir().unwrap();
|
|
|
|
|
let sk = tmp.path().join(".story_kit");
|
|
|
|
|
std::fs::create_dir_all(&sk).unwrap();
|
|
|
|
|
std::fs::write(
|
|
|
|
|
sk.join("project.toml"),
|
|
|
|
|
r#"
|
|
|
|
|
[[agent]]
|
|
|
|
|
name = "coder-1"
|
|
|
|
|
stage = "coder"
|
|
|
|
|
|
|
|
|
|
[[agent]]
|
|
|
|
|
name = "coder-2"
|
|
|
|
|
stage = "coder"
|
|
|
|
|
"#,
|
|
|
|
|
)
|
|
|
|
|
.unwrap();
|
|
|
|
|
|
|
|
|
|
let pool = AgentPool::new_test(3001);
|
|
|
|
|
pool.inject_test_agent("story-1", "coder-1", AgentStatus::Running);
|
|
|
|
|
|
|
|
|
|
// Explicit request for coder-1 (busy) should fail even though coder-2 is free.
|
|
|
|
|
let result = pool
|
|
|
|
|
.start_agent(tmp.path(), "story-2", Some("coder-1"), None)
|
|
|
|
|
.await;
|
|
|
|
|
assert!(result.is_err());
|
|
|
|
|
let err = result.unwrap_err();
|
|
|
|
|
assert!(
|
|
|
|
|
err.contains("coder-1") && err.contains("already running"),
|
|
|
|
|
"expected explicit busy error, got: {err}"
|
|
|
|
|
);
|
|
|
|
|
}
|
2026-02-20 13:16:04 +00:00
|
|
|
}
|