From 7c3a756a5cb6413f34272a1a24d8d381da14a6ce Mon Sep 17 00:00:00 2001 From: Dave Date: Mon, 16 Mar 2026 23:06:14 +0000 Subject: [PATCH] Refactor agents.rs (7631 lines) into agents/ module directory Split the monolithic agents.rs into 6 focused modules: - mod.rs: shared types (AgentEvent, AgentStatus, etc.) and re-exports - pool.rs: AgentPool struct, all methods, and helper free functions - pty.rs: PTY streaming (run_agent_pty_blocking, emit_event) - lifecycle.rs: story movement functions (move_story_to_qa, etc.) - gates.rs: acceptance gates (clippy, tests, coverage) - merge.rs: squash-merge, conflict resolution, quality gates All 121 original tests are preserved and distributed across modules. Also adds clear_front_matter_field to story_metadata.rs to strip stale merge_failure from front matter when stories move to done. Co-Authored-By: Claude Opus 4.6 --- server/src/agents/gates.rs | 393 ++ server/src/agents/lifecycle.rs | 556 +++ server/src/agents/merge.rs | 1638 +++++++ server/src/agents/mod.rs | 181 + server/src/{agents.rs => agents/pool.rs} | 5142 +++++----------------- server/src/agents/pty.rs | 490 +++ server/src/io/story_metadata.rs | 80 + 7 files changed, 4331 insertions(+), 4149 deletions(-) create mode 100644 server/src/agents/gates.rs create mode 100644 server/src/agents/lifecycle.rs create mode 100644 server/src/agents/merge.rs create mode 100644 server/src/agents/mod.rs rename server/src/{agents.rs => agents/pool.rs} (58%) create mode 100644 server/src/agents/pty.rs diff --git a/server/src/agents/gates.rs b/server/src/agents/gates.rs new file mode 100644 index 0000000..ebf20e2 --- /dev/null +++ b/server/src/agents/gates.rs @@ -0,0 +1,393 @@ +use std::path::Path; +use std::process::Command; + +/// Detect whether the base branch in a worktree is `master` or `main`. +/// Falls back to `"master"` if neither is found. +pub(crate) fn detect_worktree_base_branch(wt_path: &Path) -> String { + for branch in &["master", "main"] { + let ok = Command::new("git") + .args(["rev-parse", "--verify", branch]) + .current_dir(wt_path) + .output() + .map(|o| o.status.success()) + .unwrap_or(false); + if ok { + return branch.to_string(); + } + } + "master".to_string() +} + +/// Return `true` if the git worktree at `wt_path` has commits on its current +/// branch that are not present on the base branch (`master` or `main`). +/// +/// Used during server startup reconciliation to detect stories whose agent work +/// was committed while the server was offline. +pub(crate) fn worktree_has_committed_work(wt_path: &Path) -> bool { + let base_branch = detect_worktree_base_branch(wt_path); + let output = Command::new("git") + .args(["log", &format!("{base_branch}..HEAD"), "--oneline"]) + .current_dir(wt_path) + .output(); + match output { + Ok(out) if out.status.success() => { + !String::from_utf8_lossy(&out.stdout).trim().is_empty() + } + _ => false, + } +} + +/// Check whether the given directory has any uncommitted git changes. +/// Returns `Err` with a descriptive message if there are any. +pub(crate) fn check_uncommitted_changes(path: &Path) -> Result<(), String> { + let output = Command::new("git") + .args(["status", "--porcelain"]) + .current_dir(path) + .output() + .map_err(|e| format!("Failed to run git status: {e}"))?; + + let stdout = String::from_utf8_lossy(&output.stdout); + if !stdout.trim().is_empty() { + return Err(format!( + "Worktree has uncommitted changes. Please commit all work before \ + the agent exits:\n{stdout}" + )); + } + Ok(()) +} + +/// Run the project's test suite. +/// +/// Uses `script/test` if present, treating it as the canonical single test entry point. +/// Falls back to `cargo nextest run` / `cargo test` when `script/test` is absent. +/// Returns `(tests_passed, output)`. +pub(crate) fn run_project_tests(path: &Path) -> Result<(bool, String), String> { + let script_test = path.join("script").join("test"); + if script_test.exists() { + let mut output = String::from("=== script/test ===\n"); + let result = Command::new(&script_test) + .current_dir(path) + .output() + .map_err(|e| format!("Failed to run script/test: {e}"))?; + let out = format!( + "{}{}", + String::from_utf8_lossy(&result.stdout), + String::from_utf8_lossy(&result.stderr) + ); + output.push_str(&out); + output.push('\n'); + return Ok((result.status.success(), output)); + } + + // Fallback: cargo nextest run / cargo test + let mut output = String::from("=== tests ===\n"); + let (success, test_out) = match Command::new("cargo") + .args(["nextest", "run"]) + .current_dir(path) + .output() + { + Ok(o) => { + let combined = format!( + "{}{}", + String::from_utf8_lossy(&o.stdout), + String::from_utf8_lossy(&o.stderr) + ); + (o.status.success(), combined) + } + Err(_) => { + // nextest not available — fall back to cargo test + let o = Command::new("cargo") + .args(["test"]) + .current_dir(path) + .output() + .map_err(|e| format!("Failed to run cargo test: {e}"))?; + let combined = format!( + "{}{}", + String::from_utf8_lossy(&o.stdout), + String::from_utf8_lossy(&o.stderr) + ); + (o.status.success(), combined) + } + }; + output.push_str(&test_out); + output.push('\n'); + Ok((success, output)) +} + +/// Run `cargo clippy` and the project test suite (via `script/test` if present, +/// otherwise `cargo nextest run` / `cargo test`) in the given directory. +/// Returns `(gates_passed, combined_output)`. +pub(crate) fn run_acceptance_gates(path: &Path) -> Result<(bool, String), String> { + let mut all_output = String::new(); + let mut all_passed = true; + + // ── cargo clippy ────────────────────────────────────────────── + let clippy = Command::new("cargo") + .args(["clippy", "--all-targets", "--all-features"]) + .current_dir(path) + .output() + .map_err(|e| format!("Failed to run cargo clippy: {e}"))?; + + all_output.push_str("=== cargo clippy ===\n"); + let clippy_stdout = String::from_utf8_lossy(&clippy.stdout); + let clippy_stderr = String::from_utf8_lossy(&clippy.stderr); + if !clippy_stdout.is_empty() { + all_output.push_str(&clippy_stdout); + } + if !clippy_stderr.is_empty() { + all_output.push_str(&clippy_stderr); + } + all_output.push('\n'); + + if !clippy.status.success() { + all_passed = false; + } + + // ── tests (script/test if available, else cargo nextest/test) ─ + let (test_success, test_out) = run_project_tests(path)?; + all_output.push_str(&test_out); + if !test_success { + all_passed = false; + } + + Ok((all_passed, all_output)) +} + +/// Run `script/test_coverage` in the given directory if the script exists. +/// +/// Used as a QA gate before advancing a story from `3_qa/` to `4_merge/`. +/// Returns `(passed, output)`. If the script does not exist, returns `(true, …)`. +pub(crate) fn run_coverage_gate(path: &Path) -> Result<(bool, String), String> { + let script = path.join("script").join("test_coverage"); + if !script.exists() { + return Ok(( + true, + "script/test_coverage not found; coverage gate skipped.\n".to_string(), + )); + } + + let mut output = String::from("=== script/test_coverage ===\n"); + let result = Command::new(&script) + .current_dir(path) + .output() + .map_err(|e| format!("Failed to run script/test_coverage: {e}"))?; + + let combined = format!( + "{}{}", + String::from_utf8_lossy(&result.stdout), + String::from_utf8_lossy(&result.stderr) + ); + output.push_str(&combined); + output.push('\n'); + + Ok((result.status.success(), output)) +} + +#[cfg(test)] +mod tests { + use super::*; + + fn init_git_repo(repo: &std::path::Path) { + Command::new("git") + .args(["init"]) + .current_dir(repo) + .output() + .unwrap(); + Command::new("git") + .args(["config", "user.email", "test@test.com"]) + .current_dir(repo) + .output() + .unwrap(); + Command::new("git") + .args(["config", "user.name", "Test"]) + .current_dir(repo) + .output() + .unwrap(); + Command::new("git") + .args(["commit", "--allow-empty", "-m", "init"]) + .current_dir(repo) + .output() + .unwrap(); + } + + // ── run_project_tests tests ─────────────────────────────────── + + #[cfg(unix)] + #[test] + fn run_project_tests_uses_script_test_when_present_and_passes() { + use std::fs; + use std::os::unix::fs::PermissionsExt; + use tempfile::tempdir; + + let tmp = tempdir().unwrap(); + let path = tmp.path(); + let script_dir = path.join("script"); + fs::create_dir_all(&script_dir).unwrap(); + let script_test = script_dir.join("test"); + fs::write(&script_test, "#!/usr/bin/env bash\necho 'all tests passed'\nexit 0\n").unwrap(); + let mut perms = fs::metadata(&script_test).unwrap().permissions(); + perms.set_mode(0o755); + fs::set_permissions(&script_test, perms).unwrap(); + + let (passed, output) = run_project_tests(path).unwrap(); + assert!(passed, "script/test exiting 0 should pass"); + assert!(output.contains("script/test"), "output should mention script/test"); + } + + #[cfg(unix)] + #[test] + fn run_project_tests_reports_failure_when_script_test_exits_nonzero() { + use std::fs; + use std::os::unix::fs::PermissionsExt; + use tempfile::tempdir; + + let tmp = tempdir().unwrap(); + let path = tmp.path(); + let script_dir = path.join("script"); + fs::create_dir_all(&script_dir).unwrap(); + let script_test = script_dir.join("test"); + fs::write(&script_test, "#!/usr/bin/env bash\nexit 1\n").unwrap(); + let mut perms = fs::metadata(&script_test).unwrap().permissions(); + perms.set_mode(0o755); + fs::set_permissions(&script_test, perms).unwrap(); + + let (passed, output) = run_project_tests(path).unwrap(); + assert!(!passed, "script/test exiting 1 should fail"); + assert!(output.contains("script/test"), "output should mention script/test"); + } + + // ── run_coverage_gate tests ─────────────────────────────────────────────── + + #[cfg(unix)] + #[test] + fn coverage_gate_passes_when_script_absent() { + use tempfile::tempdir; + let tmp = tempdir().unwrap(); + let (passed, output) = run_coverage_gate(tmp.path()).unwrap(); + assert!(passed, "coverage gate should pass when script is absent"); + assert!( + output.contains("not found"), + "output should mention script not found" + ); + } + + #[cfg(unix)] + #[test] + fn coverage_gate_passes_when_script_exits_zero() { + use std::fs; + use std::os::unix::fs::PermissionsExt; + use tempfile::tempdir; + + let tmp = tempdir().unwrap(); + let path = tmp.path(); + let script_dir = path.join("script"); + fs::create_dir_all(&script_dir).unwrap(); + let script = script_dir.join("test_coverage"); + fs::write( + &script, + "#!/usr/bin/env bash\necho 'Rust line coverage: 85%'\necho 'PASS: Coverage 85% meets threshold 0%'\nexit 0\n", + ) + .unwrap(); + let mut perms = fs::metadata(&script).unwrap().permissions(); + perms.set_mode(0o755); + fs::set_permissions(&script, perms).unwrap(); + + let (passed, output) = run_coverage_gate(path).unwrap(); + assert!(passed, "coverage gate should pass when script exits 0"); + assert!( + output.contains("script/test_coverage"), + "output should mention script/test_coverage" + ); + } + + #[cfg(unix)] + #[test] + fn coverage_gate_fails_when_script_exits_nonzero() { + use std::fs; + use std::os::unix::fs::PermissionsExt; + use tempfile::tempdir; + + let tmp = tempdir().unwrap(); + let path = tmp.path(); + let script_dir = path.join("script"); + fs::create_dir_all(&script_dir).unwrap(); + let script = script_dir.join("test_coverage"); + fs::write( + &script, + "#!/usr/bin/env bash\necho 'FAIL: Coverage 40% is below threshold 80%'\nexit 1\n", + ) + .unwrap(); + let mut perms = fs::metadata(&script).unwrap().permissions(); + perms.set_mode(0o755); + fs::set_permissions(&script, perms).unwrap(); + + let (passed, output) = run_coverage_gate(path).unwrap(); + assert!(!passed, "coverage gate should fail when script exits 1"); + assert!( + output.contains("script/test_coverage"), + "output should mention script/test_coverage" + ); + } + + // ── worktree_has_committed_work tests ───────────────────────────────────── + + #[test] + fn worktree_has_committed_work_false_on_fresh_repo() { + let tmp = tempfile::tempdir().unwrap(); + let repo = tmp.path(); + // init_git_repo creates the initial commit on the default branch. + // HEAD IS the base branch — no commits ahead. + init_git_repo(repo); + assert!(!worktree_has_committed_work(repo)); + } + + #[test] + fn worktree_has_committed_work_true_after_commit_on_feature_branch() { + use std::fs; + let tmp = tempfile::tempdir().unwrap(); + let project_root = tmp.path().join("project"); + fs::create_dir_all(&project_root).unwrap(); + init_git_repo(&project_root); + + // Create a git worktree on a feature branch. + let wt_path = tmp.path().join("wt"); + Command::new("git") + .args([ + "worktree", + "add", + &wt_path.to_string_lossy(), + "-b", + "feature/story-99_test", + ]) + .current_dir(&project_root) + .output() + .unwrap(); + + // No commits on the feature branch yet — same as base branch. + assert!(!worktree_has_committed_work(&wt_path)); + + // Add a commit to the feature branch in the worktree. + fs::write(wt_path.join("work.txt"), "done").unwrap(); + Command::new("git") + .args(["add", "."]) + .current_dir(&wt_path) + .output() + .unwrap(); + Command::new("git") + .args([ + "-c", + "user.email=test@test.com", + "-c", + "user.name=Test", + "commit", + "-m", + "coder: implement story", + ]) + .current_dir(&wt_path) + .output() + .unwrap(); + + // Now the feature branch is ahead of the base branch. + assert!(worktree_has_committed_work(&wt_path)); + } +} diff --git a/server/src/agents/lifecycle.rs b/server/src/agents/lifecycle.rs new file mode 100644 index 0000000..dbd7b3a --- /dev/null +++ b/server/src/agents/lifecycle.rs @@ -0,0 +1,556 @@ +use std::path::{Path, PathBuf}; +use std::process::Command; + +use crate::io::story_metadata::clear_front_matter_field; +use crate::slog; + +#[allow(dead_code)] +fn item_type_from_id(item_id: &str) -> &'static str { + // New format: {digits}_{type}_{slug} + let after_num = item_id.trim_start_matches(|c: char| c.is_ascii_digit()); + if after_num.starts_with("_bug_") { + "bug" + } else if after_num.starts_with("_spike_") { + "spike" + } else { + "story" + } +} + +/// Return the source directory path for a work item (always work/1_upcoming/). +fn item_source_dir(project_root: &Path, _item_id: &str) -> PathBuf { + project_root.join(".story_kit").join("work").join("1_upcoming") +} + +/// Return the done directory path for a work item (always work/5_done/). +fn item_archive_dir(project_root: &Path, _item_id: &str) -> PathBuf { + project_root.join(".story_kit").join("work").join("5_done") +} + +/// Move a work item (story, bug, or spike) from `work/1_upcoming/` to `work/2_current/`. +/// +/// Idempotent: if the item is already in `2_current/`, returns Ok without committing. +/// If the item is not found in `1_upcoming/`, logs a warning and returns Ok. +pub fn move_story_to_current(project_root: &Path, story_id: &str) -> Result<(), String> { + let sk = project_root.join(".story_kit").join("work"); + let current_dir = sk.join("2_current"); + let current_path = current_dir.join(format!("{story_id}.md")); + + if current_path.exists() { + // Already in 2_current/ — idempotent, nothing to do. + return Ok(()); + } + + let source_dir = item_source_dir(project_root, story_id); + let source_path = source_dir.join(format!("{story_id}.md")); + + if !source_path.exists() { + slog!( + "[lifecycle] Work item '{story_id}' not found in {}; skipping move to 2_current/", + source_dir.display() + ); + return Ok(()); + } + + std::fs::create_dir_all(¤t_dir) + .map_err(|e| format!("Failed to create work/2_current/ directory: {e}"))?; + + std::fs::rename(&source_path, ¤t_path) + .map_err(|e| format!("Failed to move '{story_id}' to 2_current/: {e}"))?; + + slog!( + "[lifecycle] Moved '{story_id}' from {} to work/2_current/", + source_dir.display() + ); + + Ok(()) +} + +/// Check whether a feature branch `feature/story-{story_id}` exists and has +/// commits that are not yet on master. Returns `true` when there is unmerged +/// work, `false` when there is no branch or all its commits are already +/// reachable from master. +pub fn feature_branch_has_unmerged_changes(project_root: &Path, story_id: &str) -> bool { + let branch = format!("feature/story-{story_id}"); + + // Check if the branch exists. + let branch_check = Command::new("git") + .args(["rev-parse", "--verify", &branch]) + .current_dir(project_root) + .output(); + match branch_check { + Ok(out) if out.status.success() => {} + _ => return false, // No feature branch → nothing to merge. + } + + // Check if the branch has commits not reachable from master. + let log = Command::new("git") + .args(["log", &format!("master..{branch}"), "--oneline"]) + .current_dir(project_root) + .output(); + match log { + Ok(out) => { + let stdout = String::from_utf8_lossy(&out.stdout); + !stdout.trim().is_empty() + } + Err(_) => false, + } +} + +/// Move a story from `work/2_current/` to `work/5_done/` and auto-commit. +/// +/// * If the story is in `2_current/`, it is moved to `5_done/` and committed. +/// * If the story is in `4_merge/`, it is moved to `5_done/` and committed. +/// * If the story is already in `5_done/` or `6_archived/`, this is a no-op (idempotent). +/// * If the story is not found in `2_current/`, `4_merge/`, `5_done/`, or `6_archived/`, an error is returned. +pub fn move_story_to_archived(project_root: &Path, story_id: &str) -> Result<(), String> { + let sk = project_root.join(".story_kit").join("work"); + let current_path = sk.join("2_current").join(format!("{story_id}.md")); + let merge_path = sk.join("4_merge").join(format!("{story_id}.md")); + let done_dir = sk.join("5_done"); + let done_path = done_dir.join(format!("{story_id}.md")); + let archived_path = sk.join("6_archived").join(format!("{story_id}.md")); + + if done_path.exists() || archived_path.exists() { + // Already in done or archived — idempotent, nothing to do. + return Ok(()); + } + + // Check 2_current/ first, then 4_merge/ + let source_path = if current_path.exists() { + current_path.clone() + } else if merge_path.exists() { + merge_path.clone() + } else { + return Err(format!( + "Story '{story_id}' not found in work/2_current/ or work/4_merge/. Cannot accept story." + )); + }; + + std::fs::create_dir_all(&done_dir) + .map_err(|e| format!("Failed to create work/5_done/ directory: {e}"))?; + std::fs::rename(&source_path, &done_path) + .map_err(|e| format!("Failed to move story '{story_id}' to 5_done/: {e}"))?; + + // Strip stale merge_failure from front matter now that the story is done. + if let Err(e) = clear_front_matter_field(&done_path, "merge_failure") { + slog!("[lifecycle] Warning: could not clear merge_failure from '{story_id}': {e}"); + } + + let from_dir = if source_path == current_path { + "work/2_current/" + } else { + "work/4_merge/" + }; + slog!("[lifecycle] Moved story '{story_id}' from {from_dir} to work/5_done/"); + + Ok(()) +} + +/// Move a story/bug from `work/2_current/` or `work/3_qa/` to `work/4_merge/`. +/// +/// This stages a work item as ready for the mergemaster to pick up and merge into master. +/// Idempotent: if already in `4_merge/`, returns Ok without committing. +pub fn move_story_to_merge(project_root: &Path, story_id: &str) -> Result<(), String> { + let sk = project_root.join(".story_kit").join("work"); + let current_path = sk.join("2_current").join(format!("{story_id}.md")); + let qa_path = sk.join("3_qa").join(format!("{story_id}.md")); + let merge_dir = sk.join("4_merge"); + let merge_path = merge_dir.join(format!("{story_id}.md")); + + if merge_path.exists() { + // Already in 4_merge/ — idempotent, nothing to do. + return Ok(()); + } + + // Accept from 2_current/ (manual trigger) or 3_qa/ (pipeline advancement from QA stage). + let source_path = if current_path.exists() { + current_path.clone() + } else if qa_path.exists() { + qa_path.clone() + } else { + return Err(format!( + "Work item '{story_id}' not found in work/2_current/ or work/3_qa/. Cannot move to 4_merge/." + )); + }; + + std::fs::create_dir_all(&merge_dir) + .map_err(|e| format!("Failed to create work/4_merge/ directory: {e}"))?; + std::fs::rename(&source_path, &merge_path) + .map_err(|e| format!("Failed to move '{story_id}' to 4_merge/: {e}"))?; + + let from_dir = if source_path == current_path { + "work/2_current/" + } else { + "work/3_qa/" + }; + slog!("[lifecycle] Moved '{story_id}' from {from_dir} to work/4_merge/"); + + Ok(()) +} + +/// Move a story/bug from `work/2_current/` to `work/3_qa/` and auto-commit. +/// +/// This stages a work item for QA review before merging to master. +/// Idempotent: if already in `3_qa/`, returns Ok without committing. +pub fn move_story_to_qa(project_root: &Path, story_id: &str) -> Result<(), String> { + let sk = project_root.join(".story_kit").join("work"); + let current_path = sk.join("2_current").join(format!("{story_id}.md")); + let qa_dir = sk.join("3_qa"); + let qa_path = qa_dir.join(format!("{story_id}.md")); + + if qa_path.exists() { + // Already in 3_qa/ — idempotent, nothing to do. + return Ok(()); + } + + if !current_path.exists() { + return Err(format!( + "Work item '{story_id}' not found in work/2_current/. Cannot move to 3_qa/." + )); + } + + std::fs::create_dir_all(&qa_dir) + .map_err(|e| format!("Failed to create work/3_qa/ directory: {e}"))?; + std::fs::rename(¤t_path, &qa_path) + .map_err(|e| format!("Failed to move '{story_id}' to 3_qa/: {e}"))?; + + slog!("[lifecycle] Moved '{story_id}' from work/2_current/ to work/3_qa/"); + + Ok(()) +} + +/// Move a bug from `work/2_current/` or `work/1_upcoming/` to `work/5_done/` and auto-commit. +/// +/// * If the bug is in `2_current/`, it is moved to `5_done/` and committed. +/// * If the bug is still in `1_upcoming/` (never started), it is moved directly to `5_done/`. +/// * If the bug is already in `5_done/`, this is a no-op (idempotent). +/// * If the bug is not found anywhere, an error is returned. +pub fn close_bug_to_archive(project_root: &Path, bug_id: &str) -> Result<(), String> { + let sk = project_root.join(".story_kit").join("work"); + let current_path = sk.join("2_current").join(format!("{bug_id}.md")); + let upcoming_path = sk.join("1_upcoming").join(format!("{bug_id}.md")); + let archive_dir = item_archive_dir(project_root, bug_id); + let archive_path = archive_dir.join(format!("{bug_id}.md")); + + if archive_path.exists() { + return Ok(()); + } + + let source_path = if current_path.exists() { + current_path.clone() + } else if upcoming_path.exists() { + upcoming_path.clone() + } else { + return Err(format!( + "Bug '{bug_id}' not found in work/2_current/ or work/1_upcoming/. Cannot close bug." + )); + }; + + std::fs::create_dir_all(&archive_dir) + .map_err(|e| format!("Failed to create work/5_done/ directory: {e}"))?; + std::fs::rename(&source_path, &archive_path) + .map_err(|e| format!("Failed to move bug '{bug_id}' to 5_done/: {e}"))?; + + slog!( + "[lifecycle] Closed bug '{bug_id}' → work/5_done/" + ); + + Ok(()) +} + +#[cfg(test)] +mod tests { + use super::*; + + // ── move_story_to_current tests ──────────────────────────────────────────── + + #[test] + fn move_story_to_current_moves_file() { + use std::fs; + let tmp = tempfile::tempdir().unwrap(); + let root = tmp.path(); + let upcoming = root.join(".story_kit/work/1_upcoming"); + let current = root.join(".story_kit/work/2_current"); + fs::create_dir_all(&upcoming).unwrap(); + fs::create_dir_all(¤t).unwrap(); + fs::write(upcoming.join("10_story_foo.md"), "test").unwrap(); + + move_story_to_current(root, "10_story_foo").unwrap(); + + assert!(!upcoming.join("10_story_foo.md").exists()); + assert!(current.join("10_story_foo.md").exists()); + } + + #[test] + fn move_story_to_current_is_idempotent_when_already_current() { + use std::fs; + let tmp = tempfile::tempdir().unwrap(); + let root = tmp.path(); + let current = root.join(".story_kit/work/2_current"); + fs::create_dir_all(¤t).unwrap(); + fs::write(current.join("11_story_foo.md"), "test").unwrap(); + + move_story_to_current(root, "11_story_foo").unwrap(); + assert!(current.join("11_story_foo.md").exists()); + } + + #[test] + fn move_story_to_current_noop_when_not_in_upcoming() { + let tmp = tempfile::tempdir().unwrap(); + assert!(move_story_to_current(tmp.path(), "99_missing").is_ok()); + } + + #[test] + fn move_bug_to_current_moves_from_upcoming() { + use std::fs; + let tmp = tempfile::tempdir().unwrap(); + let root = tmp.path(); + let upcoming = root.join(".story_kit/work/1_upcoming"); + let current = root.join(".story_kit/work/2_current"); + fs::create_dir_all(&upcoming).unwrap(); + fs::create_dir_all(¤t).unwrap(); + fs::write(upcoming.join("1_bug_test.md"), "# Bug 1\n").unwrap(); + + move_story_to_current(root, "1_bug_test").unwrap(); + + assert!(!upcoming.join("1_bug_test.md").exists()); + assert!(current.join("1_bug_test.md").exists()); + } + + // ── close_bug_to_archive tests ───────────────────────────────────────────── + + #[test] + fn close_bug_moves_from_current_to_archive() { + use std::fs; + let tmp = tempfile::tempdir().unwrap(); + let root = tmp.path(); + let current = root.join(".story_kit/work/2_current"); + fs::create_dir_all(¤t).unwrap(); + fs::write(current.join("2_bug_test.md"), "# Bug 2\n").unwrap(); + + close_bug_to_archive(root, "2_bug_test").unwrap(); + + assert!(!current.join("2_bug_test.md").exists()); + assert!(root.join(".story_kit/work/5_done/2_bug_test.md").exists()); + } + + #[test] + fn close_bug_moves_from_upcoming_when_not_started() { + use std::fs; + let tmp = tempfile::tempdir().unwrap(); + let root = tmp.path(); + let upcoming = root.join(".story_kit/work/1_upcoming"); + fs::create_dir_all(&upcoming).unwrap(); + fs::write(upcoming.join("3_bug_test.md"), "# Bug 3\n").unwrap(); + + close_bug_to_archive(root, "3_bug_test").unwrap(); + + assert!(!upcoming.join("3_bug_test.md").exists()); + assert!(root.join(".story_kit/work/5_done/3_bug_test.md").exists()); + } + + // ── item_type_from_id tests ──────────────────────────────────────────────── + + #[test] + fn item_type_from_id_detects_types() { + assert_eq!(item_type_from_id("1_bug_test"), "bug"); + assert_eq!(item_type_from_id("1_spike_research"), "spike"); + assert_eq!(item_type_from_id("50_story_my_story"), "story"); + assert_eq!(item_type_from_id("1_story_simple"), "story"); + } + + // ── move_story_to_merge tests ────────────────────────────────────────────── + + #[test] + fn move_story_to_merge_moves_file() { + use std::fs; + let tmp = tempfile::tempdir().unwrap(); + let root = tmp.path(); + let current = root.join(".story_kit/work/2_current"); + fs::create_dir_all(¤t).unwrap(); + fs::write(current.join("20_story_foo.md"), "test").unwrap(); + + move_story_to_merge(root, "20_story_foo").unwrap(); + + assert!(!current.join("20_story_foo.md").exists()); + assert!(root.join(".story_kit/work/4_merge/20_story_foo.md").exists()); + } + + #[test] + fn move_story_to_merge_from_qa_dir() { + use std::fs; + let tmp = tempfile::tempdir().unwrap(); + let root = tmp.path(); + let qa_dir = root.join(".story_kit/work/3_qa"); + fs::create_dir_all(&qa_dir).unwrap(); + fs::write(qa_dir.join("40_story_test.md"), "test").unwrap(); + + move_story_to_merge(root, "40_story_test").unwrap(); + + assert!(!qa_dir.join("40_story_test.md").exists()); + assert!(root.join(".story_kit/work/4_merge/40_story_test.md").exists()); + } + + #[test] + fn move_story_to_merge_idempotent_when_already_in_merge() { + use std::fs; + let tmp = tempfile::tempdir().unwrap(); + let root = tmp.path(); + let merge_dir = root.join(".story_kit/work/4_merge"); + fs::create_dir_all(&merge_dir).unwrap(); + fs::write(merge_dir.join("21_story_test.md"), "test").unwrap(); + + move_story_to_merge(root, "21_story_test").unwrap(); + assert!(merge_dir.join("21_story_test.md").exists()); + } + + #[test] + fn move_story_to_merge_errors_when_not_in_current_or_qa() { + let tmp = tempfile::tempdir().unwrap(); + let result = move_story_to_merge(tmp.path(), "99_nonexistent"); + assert!(result.unwrap_err().contains("not found in work/2_current/ or work/3_qa/")); + } + + // ── move_story_to_qa tests ──────────────────────────────────────────────── + + #[test] + fn move_story_to_qa_moves_file() { + use std::fs; + let tmp = tempfile::tempdir().unwrap(); + let root = tmp.path(); + let current = root.join(".story_kit/work/2_current"); + fs::create_dir_all(¤t).unwrap(); + fs::write(current.join("30_story_qa.md"), "test").unwrap(); + + move_story_to_qa(root, "30_story_qa").unwrap(); + + assert!(!current.join("30_story_qa.md").exists()); + assert!(root.join(".story_kit/work/3_qa/30_story_qa.md").exists()); + } + + #[test] + fn move_story_to_qa_idempotent_when_already_in_qa() { + use std::fs; + let tmp = tempfile::tempdir().unwrap(); + let root = tmp.path(); + let qa_dir = root.join(".story_kit/work/3_qa"); + fs::create_dir_all(&qa_dir).unwrap(); + fs::write(qa_dir.join("31_story_test.md"), "test").unwrap(); + + move_story_to_qa(root, "31_story_test").unwrap(); + assert!(qa_dir.join("31_story_test.md").exists()); + } + + #[test] + fn move_story_to_qa_errors_when_not_in_current() { + let tmp = tempfile::tempdir().unwrap(); + let result = move_story_to_qa(tmp.path(), "99_nonexistent"); + assert!(result.unwrap_err().contains("not found in work/2_current/")); + } + + // ── move_story_to_archived tests ────────────────────────────────────────── + + #[test] + fn move_story_to_archived_finds_in_merge_dir() { + use std::fs; + let tmp = tempfile::tempdir().unwrap(); + let root = tmp.path(); + let merge_dir = root.join(".story_kit/work/4_merge"); + fs::create_dir_all(&merge_dir).unwrap(); + fs::write(merge_dir.join("22_story_test.md"), "test").unwrap(); + + move_story_to_archived(root, "22_story_test").unwrap(); + + assert!(!merge_dir.join("22_story_test.md").exists()); + assert!(root.join(".story_kit/work/5_done/22_story_test.md").exists()); + } + + #[test] + fn move_story_to_archived_error_when_not_in_current_or_merge() { + let tmp = tempfile::tempdir().unwrap(); + let result = move_story_to_archived(tmp.path(), "99_nonexistent"); + assert!(result.unwrap_err().contains("4_merge")); + } + + // ── feature_branch_has_unmerged_changes tests ──────────────────────────── + + fn init_git_repo(repo: &std::path::Path) { + Command::new("git") + .args(["init"]) + .current_dir(repo) + .output() + .unwrap(); + Command::new("git") + .args(["config", "user.email", "test@test.com"]) + .current_dir(repo) + .output() + .unwrap(); + Command::new("git") + .args(["config", "user.name", "Test"]) + .current_dir(repo) + .output() + .unwrap(); + Command::new("git") + .args(["commit", "--allow-empty", "-m", "init"]) + .current_dir(repo) + .output() + .unwrap(); + } + + /// Bug 226: feature_branch_has_unmerged_changes returns true when the + /// feature branch has commits not on master. + #[test] + fn feature_branch_has_unmerged_changes_detects_unmerged_code() { + use std::fs; + use tempfile::tempdir; + + let tmp = tempdir().unwrap(); + let repo = tmp.path(); + init_git_repo(repo); + + // Create a feature branch with a code commit. + Command::new("git") + .args(["checkout", "-b", "feature/story-50_story_test"]) + .current_dir(repo) + .output() + .unwrap(); + fs::write(repo.join("feature.rs"), "fn main() {}").unwrap(); + Command::new("git") + .args(["add", "."]) + .current_dir(repo) + .output() + .unwrap(); + Command::new("git") + .args(["commit", "-m", "add feature"]) + .current_dir(repo) + .output() + .unwrap(); + Command::new("git") + .args(["checkout", "master"]) + .current_dir(repo) + .output() + .unwrap(); + + assert!( + feature_branch_has_unmerged_changes(repo, "50_story_test"), + "should detect unmerged changes on feature branch" + ); + } + + /// Bug 226: feature_branch_has_unmerged_changes returns false when no + /// feature branch exists. + #[test] + fn feature_branch_has_unmerged_changes_false_when_no_branch() { + use tempfile::tempdir; + + let tmp = tempdir().unwrap(); + let repo = tmp.path(); + init_git_repo(repo); + + assert!( + !feature_branch_has_unmerged_changes(repo, "99_nonexistent"), + "should return false when no feature branch" + ); + } +} diff --git a/server/src/agents/merge.rs b/server/src/agents/merge.rs new file mode 100644 index 0000000..0cb743a --- /dev/null +++ b/server/src/agents/merge.rs @@ -0,0 +1,1638 @@ +use std::path::Path; +use std::process::Command; + +use serde::Serialize; + +use crate::config::ProjectConfig; + +use super::gates::run_project_tests; + +/// Result of a mergemaster merge operation. +#[derive(Debug, Serialize, Clone)] +pub struct MergeReport { + pub story_id: String, + pub success: bool, + pub had_conflicts: bool, + /// `true` when conflicts were detected but automatically resolved. + pub conflicts_resolved: bool, + pub conflict_details: Option, + pub gates_passed: bool, + pub gate_output: String, + pub worktree_cleaned_up: bool, + pub story_archived: bool, +} + +/// Result of a squash-merge operation. +pub(crate) struct SquashMergeResult { + pub(crate) success: bool, + pub(crate) had_conflicts: bool, + /// `true` when conflicts were detected but automatically resolved. + pub(crate) conflicts_resolved: bool, + pub(crate) conflict_details: Option, + pub(crate) output: String, + /// Whether quality gates ran and passed. `false` when `success` is `false` + /// due to a gate failure; callers can use this to distinguish gate failures + /// from merge/commit/FF failures in the `MergeReport`. + pub(crate) gates_passed: bool, +} + +/// Squash-merge a feature branch into the current branch using a temporary +/// merge-queue worktree for quality-gate isolation. +/// +/// **Flow:** +/// 1. Create a temporary `merge-queue/{story_id}` branch at current HEAD. +/// 2. Create a temporary worktree for that branch. +/// 3. Run `git merge --squash` in the temporary worktree (not the main worktree). +/// 4. If conflicts arise, attempt automatic resolution for simple additive cases. +/// 5. If clean (or resolved), commit in the temp worktree. +/// 6. Run quality gates **in the merge worktree** before touching master. +/// 7. If gates pass: cherry-pick the squash commit onto master. +/// 8. Clean up the temporary worktree and branch. +/// +/// Step 7 uses `git cherry-pick` instead of `git merge --ff-only` so that +/// concurrent filesystem-watcher commits on master (pipeline file moves) do +/// not block the merge. +pub(crate) fn run_squash_merge( + project_root: &Path, + branch: &str, + story_id: &str, +) -> Result { + let mut all_output = String::new(); + let merge_branch = format!("merge-queue/{story_id}"); + let merge_wt_path = project_root + .join(".story_kit") + .join("merge_workspace"); + + // Ensure we start clean: remove any leftover merge workspace. + cleanup_merge_workspace(project_root, &merge_wt_path, &merge_branch); + + // ── Create merge-queue branch at current HEAD ───────────────── + all_output.push_str(&format!( + "=== Creating merge-queue branch '{merge_branch}' ===\n" + )); + let create_branch = Command::new("git") + .args(["branch", &merge_branch]) + .current_dir(project_root) + .output() + .map_err(|e| format!("Failed to create merge-queue branch: {e}"))?; + if !create_branch.status.success() { + let stderr = String::from_utf8_lossy(&create_branch.stderr); + all_output.push_str(&format!("Branch creation failed: {stderr}\n")); + return Err(format!("Failed to create merge-queue branch: {stderr}")); + } + + // ── Create temporary worktree ───────────────────────────────── + all_output.push_str("=== Creating temporary merge worktree ===\n"); + let wt_str = merge_wt_path.to_string_lossy().to_string(); + let create_wt = Command::new("git") + .args(["worktree", "add", &wt_str, &merge_branch]) + .current_dir(project_root) + .output() + .map_err(|e| format!("Failed to create merge worktree: {e}"))?; + if !create_wt.status.success() { + let stderr = String::from_utf8_lossy(&create_wt.stderr); + all_output.push_str(&format!("Worktree creation failed: {stderr}\n")); + cleanup_merge_workspace(project_root, &merge_wt_path, &merge_branch); + return Err(format!("Failed to create merge worktree: {stderr}")); + } + + // ── Squash-merge in the temporary worktree ──────────────────── + all_output.push_str(&format!("=== git merge --squash {branch} ===\n")); + let merge = Command::new("git") + .args(["merge", "--squash", branch]) + .current_dir(&merge_wt_path) + .output() + .map_err(|e| format!("Failed to run git merge: {e}"))?; + + let merge_stdout = String::from_utf8_lossy(&merge.stdout).to_string(); + let merge_stderr = String::from_utf8_lossy(&merge.stderr).to_string(); + all_output.push_str(&merge_stdout); + all_output.push_str(&merge_stderr); + all_output.push('\n'); + + let mut had_conflicts = false; + let mut conflicts_resolved = false; + let mut conflict_details: Option = None; + + if !merge.status.success() { + had_conflicts = true; + all_output.push_str("=== Conflicts detected, attempting auto-resolution ===\n"); + + // Try to automatically resolve simple conflicts. + match try_resolve_conflicts(&merge_wt_path) { + Ok((resolved, resolution_log)) => { + all_output.push_str(&resolution_log); + if resolved { + conflicts_resolved = true; + all_output + .push_str("=== All conflicts resolved automatically ===\n"); + } else { + // Could not resolve — abort, clean up, and report. + let details = format!( + "Merge conflicts in branch '{branch}':\n{merge_stdout}{merge_stderr}\n{resolution_log}" + ); + conflict_details = Some(details); + all_output + .push_str("=== Unresolvable conflicts, aborting merge ===\n"); + cleanup_merge_workspace( + project_root, + &merge_wt_path, + &merge_branch, + ); + return Ok(SquashMergeResult { + success: false, + had_conflicts: true, + conflicts_resolved: false, + conflict_details, + output: all_output, + gates_passed: false, + }); + } + } + Err(e) => { + all_output.push_str(&format!("Auto-resolution error: {e}\n")); + cleanup_merge_workspace( + project_root, + &merge_wt_path, + &merge_branch, + ); + return Ok(SquashMergeResult { + success: false, + had_conflicts: true, + conflicts_resolved: false, + conflict_details: Some(format!( + "Merge conflicts in branch '{branch}' (auto-resolution failed: {e}):\n{merge_stdout}{merge_stderr}" + )), + output: all_output, + gates_passed: false, + }); + } + } + } + + // ── Commit in the temporary worktree ────────────────────────── + all_output.push_str("=== git commit ===\n"); + let commit_msg = format!("story-kit: merge {story_id}"); + let commit = Command::new("git") + .args(["commit", "-m", &commit_msg]) + .current_dir(&merge_wt_path) + .output() + .map_err(|e| format!("Failed to run git commit: {e}"))?; + + let commit_stdout = String::from_utf8_lossy(&commit.stdout).to_string(); + let commit_stderr = String::from_utf8_lossy(&commit.stderr).to_string(); + all_output.push_str(&commit_stdout); + all_output.push_str(&commit_stderr); + all_output.push('\n'); + + if !commit.status.success() { + // Bug 226: "nothing to commit" means the feature branch has no changes + // beyond what's already on master. This must NOT be treated as success + // — it means the code was never actually merged. + if commit_stderr.contains("nothing to commit") + || commit_stdout.contains("nothing to commit") + { + all_output.push_str( + "=== Nothing to commit — feature branch has no changes beyond master ===\n", + ); + cleanup_merge_workspace(project_root, &merge_wt_path, &merge_branch); + return Ok(SquashMergeResult { + success: false, + had_conflicts, + conflicts_resolved, + conflict_details: Some( + "Squash-merge resulted in an empty diff — the feature branch has no \ + code changes to merge into master." + .to_string(), + ), + output: all_output, + gates_passed: false, + }); + } + cleanup_merge_workspace(project_root, &merge_wt_path, &merge_branch); + return Ok(SquashMergeResult { + success: false, + had_conflicts, + conflicts_resolved, + conflict_details, + output: all_output, + gates_passed: false, + }); + } + + // ── Bug 226: Verify the commit contains real code changes ───── + // If the merge only brought in .story_kit/ files (pipeline file moves), + // there are no actual code changes to land on master. Abort. + { + let diff_check = Command::new("git") + .args(["diff", "--name-only", "HEAD~1..HEAD"]) + .current_dir(&merge_wt_path) + .output() + .map_err(|e| format!("Failed to check merge diff: {e}"))?; + let changed_files = String::from_utf8_lossy(&diff_check.stdout); + let has_code_changes = changed_files + .lines() + .any(|f| !f.starts_with(".story_kit/")); + if !has_code_changes { + all_output.push_str( + "=== Merge commit contains only .story_kit/ file moves, no code changes ===\n", + ); + cleanup_merge_workspace(project_root, &merge_wt_path, &merge_branch); + return Ok(SquashMergeResult { + success: false, + had_conflicts, + conflicts_resolved, + conflict_details: Some( + "Feature branch has no code changes outside .story_kit/ — only \ + pipeline file moves were found." + .to_string(), + ), + output: all_output, + gates_passed: false, + }); + } + } + + // ── Run component setup from project.toml (same as worktree creation) ────────── + { + let config = ProjectConfig::load(&merge_wt_path).unwrap_or_default(); + if !config.component.is_empty() { + all_output.push_str("=== component setup (merge worktree) ===\n"); + } + for component in &config.component { + let cmd_dir = merge_wt_path.join(&component.path); + for cmd in &component.setup { + all_output.push_str(&format!("--- {}: {cmd} ---\n", component.name)); + match Command::new("sh") + .args(["-c", cmd]) + .current_dir(&cmd_dir) + .output() + { + Ok(out) => { + all_output.push_str(&String::from_utf8_lossy(&out.stdout)); + all_output.push_str(&String::from_utf8_lossy(&out.stderr)); + all_output.push('\n'); + if !out.status.success() { + all_output.push_str(&format!( + "=== setup warning: '{}' failed: {cmd} ===\n", + component.name + )); + } + } + Err(e) => { + all_output.push_str(&format!( + "=== setup warning: failed to run '{cmd}': {e} ===\n" + )); + } + } + } + } + } + + // ── Quality gates in merge workspace (BEFORE fast-forward) ──── + // Run gates in the merge worktree so that failures abort before master moves. + all_output.push_str("=== Running quality gates before fast-forward ===\n"); + match run_merge_quality_gates(&merge_wt_path) { + Ok((true, gate_out)) => { + all_output.push_str(&gate_out); + all_output.push('\n'); + all_output.push_str("=== Quality gates passed ===\n"); + } + Ok((false, gate_out)) => { + all_output.push_str(&gate_out); + all_output.push('\n'); + all_output + .push_str("=== Quality gates FAILED — aborting fast-forward, master unchanged ===\n"); + cleanup_merge_workspace(project_root, &merge_wt_path, &merge_branch); + return Ok(SquashMergeResult { + success: false, + had_conflicts, + conflicts_resolved, + conflict_details, + output: all_output, + gates_passed: false, + }); + } + Err(e) => { + all_output.push_str(&format!("Gate check error: {e}\n")); + cleanup_merge_workspace(project_root, &merge_wt_path, &merge_branch); + return Ok(SquashMergeResult { + success: false, + had_conflicts, + conflicts_resolved, + conflict_details, + output: all_output, + gates_passed: false, + }); + } + } + + // ── Cherry-pick the squash commit onto master ────────────────── + // We cherry-pick instead of fast-forward so that concurrent filesystem + // watcher commits on master (e.g. pipeline file moves) don't block the + // merge. Cherry-pick applies the diff of the squash commit cleanly on + // top of master's current HEAD. + all_output.push_str(&format!( + "=== Cherry-picking squash commit from {merge_branch} onto master ===\n" + )); + let cp = Command::new("git") + .args(["cherry-pick", &merge_branch]) + .current_dir(project_root) + .output() + .map_err(|e| format!("Failed to cherry-pick merge-queue commit: {e}"))?; + + let cp_stdout = String::from_utf8_lossy(&cp.stdout).to_string(); + let cp_stderr = String::from_utf8_lossy(&cp.stderr).to_string(); + all_output.push_str(&cp_stdout); + all_output.push_str(&cp_stderr); + all_output.push('\n'); + + if !cp.status.success() { + // Abort the cherry-pick so master is left clean. + let _ = Command::new("git") + .args(["cherry-pick", "--abort"]) + .current_dir(project_root) + .output(); + all_output.push_str("=== Cherry-pick failed — aborting, master unchanged ===\n"); + cleanup_merge_workspace(project_root, &merge_wt_path, &merge_branch); + return Ok(SquashMergeResult { + success: false, + had_conflicts, + conflicts_resolved, + conflict_details: Some(format!( + "Cherry-pick of squash commit failed (conflict with master?):\n{cp_stderr}" + )), + output: all_output, + gates_passed: true, + }); + } + + // ── Clean up ────────────────────────────────────────────────── + cleanup_merge_workspace(project_root, &merge_wt_path, &merge_branch); + all_output.push_str("=== Merge-queue cleanup complete ===\n"); + + Ok(SquashMergeResult { + success: true, + had_conflicts, + conflicts_resolved, + conflict_details, + output: all_output, + gates_passed: true, + }) +} + +/// Remove the temporary merge worktree and branch. Best-effort — errors are +/// silently ignored because this is cleanup code. +pub(crate) fn cleanup_merge_workspace( + project_root: &Path, + merge_wt_path: &Path, + merge_branch: &str, +) { + let wt_str = merge_wt_path.to_string_lossy().to_string(); + let _ = Command::new("git") + .args(["worktree", "remove", "--force", &wt_str]) + .current_dir(project_root) + .output(); + // If the directory still exists (e.g. it was a plain directory from a + // previous failed run, not a registered git worktree), remove it so + // the next `git worktree add` can succeed. + if merge_wt_path.exists() { + let _ = std::fs::remove_dir_all(merge_wt_path); + } + let _ = Command::new("git") + .args(["branch", "-D", merge_branch]) + .current_dir(project_root) + .output(); +} + +/// Attempt to automatically resolve merge conflicts in the given worktree. +/// +/// Finds all conflicted files and tries [`resolve_simple_conflicts`] on each. +/// If **all** conflicts can be resolved, stages the resolved files and returns +/// `Ok((true, log))`. If any file has a complex conflict that cannot be +/// auto-resolved, returns `Ok((false, log))` without staging anything. +fn try_resolve_conflicts(worktree: &Path) -> Result<(bool, String), String> { + let mut log = String::new(); + + // List conflicted files. + let ls = Command::new("git") + .args(["diff", "--name-only", "--diff-filter=U"]) + .current_dir(worktree) + .output() + .map_err(|e| format!("Failed to list conflicted files: {e}"))?; + + let file_list = String::from_utf8_lossy(&ls.stdout); + let conflicted_files: Vec<&str> = + file_list.lines().filter(|l| !l.is_empty()).collect(); + + if conflicted_files.is_empty() { + log.push_str("No conflicted files found (conflict may be index-only).\n"); + return Ok((false, log)); + } + + log.push_str(&format!( + "Conflicted files ({}):\n", + conflicted_files.len() + )); + for f in &conflicted_files { + log.push_str(&format!(" - {f}\n")); + } + + // First pass: check that all files can be resolved before touching any. + let mut resolutions: Vec<(&str, String)> = Vec::new(); + for file in &conflicted_files { + let file_path = worktree.join(file); + let content = std::fs::read_to_string(&file_path) + .map_err(|e| format!("Failed to read conflicted file '{file}': {e}"))?; + + match resolve_simple_conflicts(&content) { + Some(resolved) => { + log.push_str(&format!(" [auto-resolve] {file}\n")); + resolutions.push((file, resolved)); + } + None => { + log.push_str(&format!( + " [COMPLEX — cannot auto-resolve] {file}\n" + )); + return Ok((false, log)); + } + } + } + + // Second pass: write resolved content and stage. + for (file, resolved) in &resolutions { + let file_path = worktree.join(file); + std::fs::write(&file_path, resolved) + .map_err(|e| format!("Failed to write resolved file '{file}': {e}"))?; + + let add = Command::new("git") + .args(["add", file]) + .current_dir(worktree) + .output() + .map_err(|e| format!("Failed to stage resolved file '{file}': {e}"))?; + if !add.status.success() { + return Err(format!( + "git add failed for '{file}': {}", + String::from_utf8_lossy(&add.stderr) + )); + } + } + + Ok((true, log)) +} + +/// Try to resolve simple additive merge conflicts in a file's content. +/// +/// A conflict is considered "simple additive" when both sides add new content +/// at the same location without modifying existing lines. In that case we keep +/// both additions (ours first, then theirs). +/// +/// Returns `Some(resolved)` if all conflict blocks in the file are simple, or +/// `None` if any block is too complex to auto-resolve. +fn resolve_simple_conflicts(content: &str) -> Option { + // Quick check: if there are no conflict markers at all, nothing to do. + if !content.contains("<<<<<<<") { + return Some(content.to_string()); + } + + let mut result = String::new(); + let mut lines = content.lines().peekable(); + + while let Some(line) = lines.next() { + if line.starts_with("<<<<<<<") { + // Collect the "ours" side (between <<<<<<< and =======). + let mut ours = Vec::new(); + let mut found_separator = false; + for next_line in lines.by_ref() { + if next_line.starts_with("=======") { + found_separator = true; + break; + } + ours.push(next_line); + } + if !found_separator { + return None; // Malformed conflict block. + } + + // Collect the "theirs" side (between ======= and >>>>>>>). + let mut theirs = Vec::new(); + let mut found_end = false; + for next_line in lines.by_ref() { + if next_line.starts_with(">>>>>>>") { + found_end = true; + break; + } + theirs.push(next_line); + } + if !found_end { + return None; // Malformed conflict block. + } + + // Both sides must be non-empty additions to be considered simple. + // If either side is empty, it means one side deleted something — complex. + if ours.is_empty() && theirs.is_empty() { + // Both empty — nothing to add, skip. + continue; + } + + // Accept both: ours first, then theirs. + for l in &ours { + result.push_str(l); + result.push('\n'); + } + for l in &theirs { + result.push_str(l); + result.push('\n'); + } + } else { + result.push_str(line); + result.push('\n'); + } + } + + // Preserve trailing newline consistency: if original ended without + // newline, strip the trailing one we added. + if !content.ends_with('\n') && result.ends_with('\n') { + result.pop(); + } + + Some(result) +} + +/// Run quality gates in the project root after a successful merge. +/// +/// Runs quality gates in the merge workspace. +/// +/// When `script/test` is present it is the single source of truth and is the +/// only gate that runs — it is expected to cover the full suite (clippy, unit +/// tests, frontend tests, etc.). When `script/test` is absent the function +/// falls back to `cargo clippy` + `cargo nextest`/`cargo test` for Rust +/// projects. No hardcoded references to pnpm or frontend/ are used. +/// +/// Returns `(gates_passed, combined_output)`. +fn run_merge_quality_gates(project_root: &Path) -> Result<(bool, String), String> { + let mut all_output = String::new(); + let mut all_passed = true; + + let script_test = project_root.join("script").join("test"); + + if script_test.exists() { + // Delegate entirely to script/test — it is the single source of truth + // for the full test suite (clippy, cargo tests, frontend builds, etc.). + let (success, output) = run_project_tests(project_root)?; + all_output.push_str(&output); + if !success { + all_passed = false; + } + return Ok((all_passed, all_output)); + } + + // No script/test — fall back to cargo gates for Rust projects. + let cargo_toml = project_root.join("Cargo.toml"); + if cargo_toml.exists() { + let clippy = Command::new("cargo") + .args(["clippy", "--all-targets", "--all-features"]) + .current_dir(project_root) + .output() + .map_err(|e| format!("Failed to run cargo clippy: {e}"))?; + + all_output.push_str("=== cargo clippy ===\n"); + let clippy_out = format!( + "{}{}", + String::from_utf8_lossy(&clippy.stdout), + String::from_utf8_lossy(&clippy.stderr) + ); + all_output.push_str(&clippy_out); + all_output.push('\n'); + + if !clippy.status.success() { + all_passed = false; + } + + let (test_success, test_out) = run_project_tests(project_root)?; + all_output.push_str(&test_out); + if !test_success { + all_passed = false; + } + } + + Ok((all_passed, all_output)) +} + +#[cfg(test)] +mod tests { + use super::*; + use std::process::Command; + + fn init_git_repo(repo: &std::path::Path) { + Command::new("git") + .args(["init"]) + .current_dir(repo) + .output() + .unwrap(); + Command::new("git") + .args(["config", "user.email", "test@test.com"]) + .current_dir(repo) + .output() + .unwrap(); + Command::new("git") + .args(["config", "user.name", "Test"]) + .current_dir(repo) + .output() + .unwrap(); + Command::new("git") + .args(["commit", "--allow-empty", "-m", "init"]) + .current_dir(repo) + .output() + .unwrap(); + } + + // ── resolve_simple_conflicts unit tests ────────────────────────────────── + + #[test] + fn resolve_simple_conflicts_no_markers() { + let input = "line 1\nline 2\nline 3\n"; + let result = resolve_simple_conflicts(input); + assert_eq!(result, Some(input.to_string())); + } + + #[test] + fn resolve_simple_conflicts_additive() { + let input = "\ +before +<<<<<<< HEAD +ours line 1 +ours line 2 +======= +theirs line 1 +theirs line 2 +>>>>>>> feature +after +"; + let result = resolve_simple_conflicts(input).unwrap(); + assert!( + !result.contains("<<<<<<<"), + "should not contain conflict markers" + ); + assert!( + !result.contains(">>>>>>>"), + "should not contain conflict markers" + ); + assert!(result.contains("ours line 1")); + assert!(result.contains("ours line 2")); + assert!(result.contains("theirs line 1")); + assert!(result.contains("theirs line 2")); + assert!(result.contains("before")); + assert!(result.contains("after")); + // Ours comes before theirs + let ours_pos = result.find("ours line 1").unwrap(); + let theirs_pos = result.find("theirs line 1").unwrap(); + assert!( + ours_pos < theirs_pos, + "ours should come before theirs" + ); + } + + #[test] + fn resolve_simple_conflicts_multiple_blocks() { + let input = "\ +header +<<<<<<< HEAD +ours block 1 +======= +theirs block 1 +>>>>>>> feature +middle +<<<<<<< HEAD +ours block 2 +======= +theirs block 2 +>>>>>>> feature +footer +"; + let result = resolve_simple_conflicts(input).unwrap(); + assert!(!result.contains("<<<<<<<")); + assert!(result.contains("ours block 1")); + assert!(result.contains("theirs block 1")); + assert!(result.contains("ours block 2")); + assert!(result.contains("theirs block 2")); + assert!(result.contains("header")); + assert!(result.contains("middle")); + assert!(result.contains("footer")); + } + + #[test] + fn resolve_simple_conflicts_malformed_no_separator() { + let input = "\ +<<<<<<< HEAD +ours +>>>>>>> feature +"; + let result = resolve_simple_conflicts(input); + assert!(result.is_none(), "malformed conflict (no separator) should return None"); + } + + #[test] + fn resolve_simple_conflicts_malformed_no_end() { + let input = "\ +<<<<<<< HEAD +ours +======= +theirs +"; + let result = resolve_simple_conflicts(input); + assert!(result.is_none(), "malformed conflict (no end marker) should return None"); + } + + #[test] + fn resolve_simple_conflicts_preserves_no_trailing_newline() { + let input = "before\n<<<<<<< HEAD\nours\n=======\ntheirs\n>>>>>>> branch\nafter"; + let result = resolve_simple_conflicts(input).unwrap(); + assert!(!result.ends_with('\n'), "should not add trailing newline if original lacks one"); + assert!(result.ends_with("after")); + } + + // ── Additional resolve_simple_conflicts tests (real conflict markers) ──── + // + // AC1: The mergemaster reads both sides of the conflict and produces a + // resolved file that preserves changes from both branches. + + #[test] + fn resolve_simple_conflicts_real_markers_additive_both_sides() { + // The most common real-world case: both branches add different content + // (e.g. different functions) to the same region of a file. + let input = "// shared code\n\ +<<<<<<< HEAD\n\ +fn master_fn() { println!(\"from master\"); }\n\ +=======\n\ +fn feature_fn() { println!(\"from feature\"); }\n\ +>>>>>>> feature/story-42\n\ +// end\n"; + let result = resolve_simple_conflicts(input).unwrap(); + assert!(!result.contains("<<<<<<<"), "no conflict markers in output"); + assert!(!result.contains(">>>>>>>"), "no conflict markers in output"); + assert!(!result.contains("======="), "no separator in output"); + assert!(result.contains("fn master_fn()"), "master (ours) side must be preserved"); + assert!(result.contains("fn feature_fn()"), "feature (theirs) side must be preserved"); + assert!(result.contains("// shared code"), "context before conflict preserved"); + assert!(result.contains("// end"), "context after conflict preserved"); + // ours (master) must appear before theirs (feature) + assert!( + result.find("master_fn").unwrap() < result.find("feature_fn").unwrap(), + "master side must appear before feature side" + ); + } + + #[test] + fn resolve_simple_conflicts_real_markers_multiple_conflict_blocks() { + // Two separate conflict blocks in the same file — as happens when two + // feature branches both add imports AND test suites to the same file. + let input = "// imports\n\ +<<<<<<< HEAD\n\ +import { A } from './a';\n\ +=======\n\ +import { B } from './b';\n\ +>>>>>>> feature/story-43\n\ +// implementation\n\ +<<<<<<< HEAD\n\ +export function masterImpl() {}\n\ +=======\n\ +export function featureImpl() {}\n\ +>>>>>>> feature/story-43\n"; + let result = resolve_simple_conflicts(input).unwrap(); + assert!(!result.contains("<<<<<<<"), "no conflict markers in output"); + assert!(result.contains("import { A }"), "first block ours preserved"); + assert!(result.contains("import { B }"), "first block theirs preserved"); + assert!(result.contains("masterImpl"), "second block ours preserved"); + assert!(result.contains("featureImpl"), "second block theirs preserved"); + assert!(result.contains("// imports"), "surrounding context preserved"); + assert!(result.contains("// implementation"), "surrounding context preserved"); + } + + #[test] + fn resolve_simple_conflicts_real_markers_one_side_empty() { + // Ours (master) has no content in the conflicted region; theirs (feature) + // adds new content. Resolution: keep theirs. + let input = "before\n\ +<<<<<<< HEAD\n\ +=======\n\ +feature_addition\n\ +>>>>>>> feature/story-44\n\ +after\n"; + let result = resolve_simple_conflicts(input).unwrap(); + assert!(!result.contains("<<<<<<<"), "no conflict markers"); + assert!(result.contains("feature_addition"), "non-empty side preserved"); + assert!(result.contains("before"), "context preserved"); + assert!(result.contains("after"), "context preserved"); + } + + // ── merge-queue squash-merge integration tests ────────────────────────── + + #[tokio::test] + async fn squash_merge_uses_merge_queue_no_conflict_markers_on_master() { + use std::fs; + use tempfile::tempdir; + + let tmp = tempdir().unwrap(); + let repo = tmp.path(); + init_git_repo(repo); + + // Create a file that will be conflicted on master. + fs::write(repo.join("shared.txt"), "line 1\nline 2\n").unwrap(); + Command::new("git") + .args(["add", "."]) + .current_dir(repo) + .output() + .unwrap(); + Command::new("git") + .args(["commit", "-m", "initial shared file"]) + .current_dir(repo) + .output() + .unwrap(); + + // Create a feature branch that modifies the file. + Command::new("git") + .args(["checkout", "-b", "feature/story-conflict_test"]) + .current_dir(repo) + .output() + .unwrap(); + fs::write(repo.join("shared.txt"), "line 1\nline 2\nfeature addition\n").unwrap(); + Command::new("git") + .args(["add", "."]) + .current_dir(repo) + .output() + .unwrap(); + Command::new("git") + .args(["commit", "-m", "feature: add line"]) + .current_dir(repo) + .output() + .unwrap(); + + // Switch to master and make a conflicting change. + Command::new("git") + .args(["checkout", "master"]) + .current_dir(repo) + .output() + .unwrap(); + fs::write(repo.join("shared.txt"), "line 1\nline 2\nmaster addition\n").unwrap(); + Command::new("git") + .args(["add", "."]) + .current_dir(repo) + .output() + .unwrap(); + Command::new("git") + .args(["commit", "-m", "master: add line"]) + .current_dir(repo) + .output() + .unwrap(); + + // Run the squash merge. + let result = run_squash_merge(repo, "feature/story-conflict_test", "conflict_test") + .unwrap(); + + // Master should NEVER contain conflict markers, regardless of outcome. + let master_content = fs::read_to_string(repo.join("shared.txt")).unwrap(); + assert!( + !master_content.contains("<<<<<<<"), + "master must never contain conflict markers, got:\n{master_content}" + ); + assert!( + !master_content.contains(">>>>>>>"), + "master must never contain conflict markers, got:\n{master_content}" + ); + + // The merge should have had conflicts. + assert!(result.had_conflicts, "should detect conflicts"); + + // Conflicts should have been auto-resolved (both are simple additions). + if result.conflicts_resolved { + assert!(result.success, "auto-resolved merge should succeed"); + assert!( + master_content.contains("master addition"), + "master side should be present" + ); + assert!( + master_content.contains("feature addition"), + "feature side should be present" + ); + } + + // Verify no leftover merge-queue branch. + let branches = Command::new("git") + .args(["branch", "--list", "merge-queue/*"]) + .current_dir(repo) + .output() + .unwrap(); + let branch_list = String::from_utf8_lossy(&branches.stdout); + assert!( + branch_list.trim().is_empty(), + "merge-queue branch should be cleaned up, got: {branch_list}" + ); + + // Verify no leftover merge workspace directory. + assert!( + !repo.join(".story_kit/merge_workspace").exists(), + "merge workspace should be cleaned up" + ); + } + + #[tokio::test] + async fn squash_merge_clean_merge_succeeds() { + use std::fs; + use tempfile::tempdir; + + let tmp = tempdir().unwrap(); + let repo = tmp.path(); + init_git_repo(repo); + + // Create feature branch with a new file. + Command::new("git") + .args(["checkout", "-b", "feature/story-clean_test"]) + .current_dir(repo) + .output() + .unwrap(); + fs::write(repo.join("new_file.txt"), "new content").unwrap(); + Command::new("git") + .args(["add", "."]) + .current_dir(repo) + .output() + .unwrap(); + Command::new("git") + .args(["commit", "-m", "add new file"]) + .current_dir(repo) + .output() + .unwrap(); + + // Switch back to master. + Command::new("git") + .args(["checkout", "master"]) + .current_dir(repo) + .output() + .unwrap(); + + let result = run_squash_merge(repo, "feature/story-clean_test", "clean_test") + .unwrap(); + + assert!(result.success, "clean merge should succeed"); + assert!(!result.had_conflicts, "clean merge should have no conflicts"); + assert!(!result.conflicts_resolved, "no conflicts means nothing to resolve"); + assert!( + repo.join("new_file.txt").exists(), + "merged file should exist on master" + ); + } + + #[tokio::test] + async fn squash_merge_nonexistent_branch_fails() { + use tempfile::tempdir; + + let tmp = tempdir().unwrap(); + let repo = tmp.path(); + init_git_repo(repo); + + let result = run_squash_merge(repo, "feature/story-nope", "nope") + .unwrap(); + + assert!(!result.success, "merge of nonexistent branch should fail"); + } + + /// Verifies that `run_squash_merge` succeeds even when master has advanced + /// with unrelated commits after the merge-queue branch was created (the race + /// condition that previously caused fast-forward to fail). + #[tokio::test] + async fn squash_merge_succeeds_when_master_diverges() { + use std::fs; + use tempfile::tempdir; + + let tmp = tempdir().unwrap(); + let repo = tmp.path(); + init_git_repo(repo); + + // Create an initial file on master. + fs::write(repo.join("base.txt"), "base content\n").unwrap(); + Command::new("git") + .args(["add", "."]) + .current_dir(repo) + .output() + .unwrap(); + Command::new("git") + .args(["commit", "-m", "initial"]) + .current_dir(repo) + .output() + .unwrap(); + + // Create a feature branch with a new file (clean merge, no conflicts). + Command::new("git") + .args(["checkout", "-b", "feature/story-diverge_test"]) + .current_dir(repo) + .output() + .unwrap(); + fs::write(repo.join("feature.txt"), "feature content\n").unwrap(); + Command::new("git") + .args(["add", "."]) + .current_dir(repo) + .output() + .unwrap(); + Command::new("git") + .args(["commit", "-m", "feature: add file"]) + .current_dir(repo) + .output() + .unwrap(); + + // Switch back to master and simulate a filesystem watcher commit + // (e.g. a pipeline file move) that advances master beyond the point + // where the merge-queue branch will be created. + Command::new("git") + .args(["checkout", "master"]) + .current_dir(repo) + .output() + .unwrap(); + let sk_dir = repo.join(".story_kit/work/4_merge"); + fs::create_dir_all(&sk_dir).unwrap(); + fs::write( + sk_dir.join("diverge_test.md"), + "---\nname: test\n---\n", + ) + .unwrap(); + Command::new("git") + .args(["add", "."]) + .current_dir(repo) + .output() + .unwrap(); + Command::new("git") + .args(["commit", "-m", "story-kit: queue diverge_test for merge"]) + .current_dir(repo) + .output() + .unwrap(); + + // Run the squash merge. With the old fast-forward approach, this + // would fail because master diverged. With cherry-pick, it succeeds. + let result = + run_squash_merge(repo, "feature/story-diverge_test", "diverge_test").unwrap(); + + assert!( + result.success, + "squash merge should succeed despite diverged master: {}", + result.output + ); + assert!( + !result.had_conflicts, + "no conflicts expected" + ); + + // Verify the feature file landed on master. + assert!( + repo.join("feature.txt").exists(), + "feature file should be on master after cherry-pick" + ); + let feature_content = fs::read_to_string(repo.join("feature.txt")).unwrap(); + assert_eq!(feature_content, "feature content\n"); + + // Verify the watcher commit's file is still present. + assert!( + sk_dir.join("diverge_test.md").exists(), + "watcher-committed file should still be on master" + ); + + // Verify cleanup: no merge-queue branch, no merge workspace. + let branches = Command::new("git") + .args(["branch", "--list", "merge-queue/*"]) + .current_dir(repo) + .output() + .unwrap(); + let branch_list = String::from_utf8_lossy(&branches.stdout); + assert!( + branch_list.trim().is_empty(), + "merge-queue branch should be cleaned up, got: {branch_list}" + ); + assert!( + !repo.join(".story_kit/merge_workspace").exists(), + "merge workspace should be cleaned up" + ); + } + + /// Bug 226: Verifies that `run_squash_merge` returns `success: false` when + /// the feature branch has no changes beyond what's already on master (empty diff). + #[tokio::test] + async fn squash_merge_empty_diff_fails() { + use std::fs; + use tempfile::tempdir; + + let tmp = tempdir().unwrap(); + let repo = tmp.path(); + init_git_repo(repo); + + // Create a file on master. + fs::write(repo.join("code.txt"), "content\n").unwrap(); + Command::new("git") + .args(["add", "."]) + .current_dir(repo) + .output() + .unwrap(); + Command::new("git") + .args(["commit", "-m", "add code"]) + .current_dir(repo) + .output() + .unwrap(); + + // Create a feature branch with NO additional changes (empty diff). + Command::new("git") + .args(["checkout", "-b", "feature/story-empty_test"]) + .current_dir(repo) + .output() + .unwrap(); + Command::new("git") + .args(["checkout", "master"]) + .current_dir(repo) + .output() + .unwrap(); + + let result = + run_squash_merge(repo, "feature/story-empty_test", "empty_test").unwrap(); + + // Bug 226: empty diff must NOT be treated as success. + assert!( + !result.success, + "empty diff merge must fail, not silently succeed: {}", + result.output + ); + + // Cleanup should still happen. + assert!( + !repo.join(".story_kit/merge_workspace").exists(), + "merge workspace should be cleaned up" + ); + } + + /// Bug 226: Verifies that `run_squash_merge` fails when the feature branch + /// only contains .story_kit/ file moves with no real code changes. + #[tokio::test] + async fn squash_merge_md_only_changes_fails() { + use std::fs; + use tempfile::tempdir; + + let tmp = tempdir().unwrap(); + let repo = tmp.path(); + init_git_repo(repo); + + // Create a feature branch that only moves a .story_kit/ file. + Command::new("git") + .args(["checkout", "-b", "feature/story-md_only_test"]) + .current_dir(repo) + .output() + .unwrap(); + let sk_dir = repo.join(".story_kit/work/2_current"); + fs::create_dir_all(&sk_dir).unwrap(); + fs::write( + sk_dir.join("md_only_test.md"), + "---\nname: Test\n---\n", + ) + .unwrap(); + Command::new("git") + .args(["add", "."]) + .current_dir(repo) + .output() + .unwrap(); + Command::new("git") + .args(["commit", "-m", "move story file"]) + .current_dir(repo) + .output() + .unwrap(); + Command::new("git") + .args(["checkout", "master"]) + .current_dir(repo) + .output() + .unwrap(); + + let result = + run_squash_merge(repo, "feature/story-md_only_test", "md_only_test").unwrap(); + + // The squash merge will commit the .story_kit/ file, but should fail because + // there are no code changes outside .story_kit/. + assert!( + !result.success, + "merge with only .story_kit/ changes must fail: {}", + result.output + ); + + // Cleanup should still happen. + assert!( + !repo.join(".story_kit/merge_workspace").exists(), + "merge workspace should be cleaned up" + ); + } + + // ── AC4: additive multi-branch conflict auto-resolution ──────────────── + // + // Verifies that when two feature branches both add different code to the + // same region of a file (the most common conflict pattern in this project), + // the mergemaster auto-resolves the conflict and preserves both additions. + #[tokio::test] + async fn squash_merge_additive_conflict_both_additions_preserved() { + use std::fs; + use tempfile::tempdir; + + let tmp = tempdir().unwrap(); + let repo = tmp.path(); + init_git_repo(repo); + + // Initial file with a shared base. + fs::write(repo.join("module.rs"), "// module\npub fn existing() {}\n").unwrap(); + Command::new("git") + .args(["add", "."]) + .current_dir(repo) + .output() + .unwrap(); + Command::new("git") + .args(["commit", "-m", "initial module"]) + .current_dir(repo) + .output() + .unwrap(); + + // Feature branch: appends feature_fn to the file. + Command::new("git") + .args(["checkout", "-b", "feature/story-238_additive"]) + .current_dir(repo) + .output() + .unwrap(); + fs::write( + repo.join("module.rs"), + "// module\npub fn existing() {}\npub fn feature_fn() {}\n", + ) + .unwrap(); + Command::new("git") + .args(["add", "."]) + .current_dir(repo) + .output() + .unwrap(); + Command::new("git") + .args(["commit", "-m", "add feature_fn"]) + .current_dir(repo) + .output() + .unwrap(); + + // Simulate another branch already merged into master: appends master_fn. + Command::new("git") + .args(["checkout", "master"]) + .current_dir(repo) + .output() + .unwrap(); + fs::write( + repo.join("module.rs"), + "// module\npub fn existing() {}\npub fn master_fn() {}\n", + ) + .unwrap(); + Command::new("git") + .args(["add", "."]) + .current_dir(repo) + .output() + .unwrap(); + Command::new("git") + .args(["commit", "-m", "add master_fn (another branch merged)"]) + .current_dir(repo) + .output() + .unwrap(); + + // Squash-merge the feature branch — conflicts because both appended to the same location. + let result = + run_squash_merge(repo, "feature/story-238_additive", "238_additive").unwrap(); + + // Conflict must be detected and auto-resolved. + assert!(result.had_conflicts, "additive conflict should be detected"); + assert!( + result.conflicts_resolved, + "additive conflict must be auto-resolved; output:\n{}", + result.output + ); + + // Master must contain both additions without conflict markers. + let content = fs::read_to_string(repo.join("module.rs")).unwrap(); + assert!(!content.contains("<<<<<<<"), "master must not contain conflict markers"); + assert!(!content.contains(">>>>>>>"), "master must not contain conflict markers"); + assert!( + content.contains("feature_fn"), + "feature branch addition must be preserved on master" + ); + assert!( + content.contains("master_fn"), + "master branch addition must be preserved on master" + ); + assert!(content.contains("existing"), "original function must be preserved"); + + // Cleanup: no leftover merge-queue branch or workspace. + let branches = Command::new("git") + .args(["branch", "--list", "merge-queue/*"]) + .current_dir(repo) + .output() + .unwrap(); + assert!( + String::from_utf8_lossy(&branches.stdout).trim().is_empty(), + "merge-queue branch must be cleaned up" + ); + assert!( + !repo.join(".story_kit/merge_workspace").exists(), + "merge workspace must be cleaned up" + ); + } + + // ── AC3: quality gates fail after conflict resolution ───────────────── + // + // Verifies that when conflicts are auto-resolved but the resulting code + // fails quality gates, the merge is reported as failed (not merged to master). + #[tokio::test] + async fn squash_merge_conflict_resolved_but_gates_fail_reported_as_failure() { + use std::fs; + use tempfile::tempdir; + + let tmp = tempdir().unwrap(); + let repo = tmp.path(); + init_git_repo(repo); + + // Add a script/test that always fails (quality gate). This must be on + // master before the feature branch forks so it doesn't cause its own conflict. + let script_dir = repo.join("script"); + fs::create_dir_all(&script_dir).unwrap(); + fs::write(script_dir.join("test"), "#!/bin/sh\nexit 1\n").unwrap(); + #[cfg(unix)] + { + use std::os::unix::fs::PermissionsExt; + std::fs::set_permissions( + script_dir.join("test"), + std::fs::Permissions::from_mode(0o755), + ) + .unwrap(); + } + fs::write(repo.join("code.txt"), "// base\n").unwrap(); + Command::new("git") + .args(["add", "."]) + .current_dir(repo) + .output() + .unwrap(); + Command::new("git") + .args(["commit", "-m", "initial with failing script/test"]) + .current_dir(repo) + .output() + .unwrap(); + + // Feature branch: appends feature content (creates future conflict point). + Command::new("git") + .args(["checkout", "-b", "feature/story-238_gates_fail"]) + .current_dir(repo) + .output() + .unwrap(); + fs::write(repo.join("code.txt"), "// base\nfeature_addition\n").unwrap(); + Command::new("git") + .args(["add", "."]) + .current_dir(repo) + .output() + .unwrap(); + Command::new("git") + .args(["commit", "-m", "feature addition"]) + .current_dir(repo) + .output() + .unwrap(); + + // Master: append different content at same location (creates conflict). + Command::new("git") + .args(["checkout", "master"]) + .current_dir(repo) + .output() + .unwrap(); + fs::write(repo.join("code.txt"), "// base\nmaster_addition\n").unwrap(); + Command::new("git") + .args(["add", "."]) + .current_dir(repo) + .output() + .unwrap(); + Command::new("git") + .args(["commit", "-m", "master addition"]) + .current_dir(repo) + .output() + .unwrap(); + + // Squash-merge: conflict detected → auto-resolved → quality gates run → fail. + let result = + run_squash_merge(repo, "feature/story-238_gates_fail", "238_gates_fail").unwrap(); + + assert!(result.had_conflicts, "conflict must be detected"); + assert!(result.conflicts_resolved, "additive conflict must be auto-resolved"); + assert!(!result.gates_passed, "quality gates must fail (script/test exits 1)"); + assert!(!result.success, "merge must be reported as failed when gates fail"); + assert!( + !result.output.is_empty(), + "output must contain gate failure details" + ); + + // Master must NOT have been updated (cherry-pick was blocked by gate failure). + let content = fs::read_to_string(repo.join("code.txt")).unwrap(); + assert!(!content.contains("<<<<<<<"), "master must not contain conflict markers"); + // master_addition was the last commit on master; feature_addition must NOT be there. + assert!( + !content.contains("feature_addition"), + "feature code must not land on master when gates fail" + ); + + // Cleanup must still happen. + assert!( + !repo.join(".story_kit/merge_workspace").exists(), + "merge workspace must be cleaned up even on gate failure" + ); + } + + /// Verifies that stale merge_workspace directories from previous failed + /// merges are cleaned up before a new merge attempt. + #[tokio::test] + async fn squash_merge_cleans_up_stale_workspace() { + use std::fs; + use tempfile::tempdir; + + let tmp = tempdir().unwrap(); + let repo = tmp.path(); + init_git_repo(repo); + + // Create a feature branch with a file. + Command::new("git") + .args(["checkout", "-b", "feature/story-stale_test"]) + .current_dir(repo) + .output() + .unwrap(); + fs::write(repo.join("stale.txt"), "content\n").unwrap(); + Command::new("git") + .args(["add", "."]) + .current_dir(repo) + .output() + .unwrap(); + Command::new("git") + .args(["commit", "-m", "feature: stale test"]) + .current_dir(repo) + .output() + .unwrap(); + Command::new("git") + .args(["checkout", "master"]) + .current_dir(repo) + .output() + .unwrap(); + + // Simulate a stale merge workspace left from a previous failed merge. + let stale_ws = repo.join(".story_kit/merge_workspace"); + fs::create_dir_all(&stale_ws).unwrap(); + fs::write(stale_ws.join("leftover.txt"), "stale").unwrap(); + + // Run the merge — it should clean up the stale workspace first. + let result = + run_squash_merge(repo, "feature/story-stale_test", "stale_test").unwrap(); + + assert!( + result.success, + "merge should succeed after cleaning up stale workspace: {}", + result.output + ); + assert!( + !stale_ws.exists(), + "stale merge workspace should be cleaned up" + ); + } + + // ── story 216: merge worktree uses project.toml component setup ─────────── + + /// When the project has `[[component]]` entries in `.story_kit/project.toml`, + /// `run_squash_merge` must run their setup commands in the merge worktree + /// before quality gates — matching the behaviour of `create_worktree`. + #[cfg(unix)] + #[test] + fn squash_merge_runs_component_setup_from_project_toml() { + use std::fs; + use tempfile::tempdir; + + let tmp = tempdir().unwrap(); + let repo = tmp.path(); + init_git_repo(repo); + + // Add a .story_kit/project.toml with a component whose setup writes a + // sentinel file so we can confirm the command ran. + let sk_dir = repo.join(".story_kit"); + fs::create_dir_all(&sk_dir).unwrap(); + fs::write( + sk_dir.join("project.toml"), + "[[component]]\nname = \"sentinel\"\npath = \".\"\nsetup = [\"touch setup_ran.txt\"]\n", + ) + .unwrap(); + Command::new("git") + .args(["add", "."]) + .current_dir(repo) + .output() + .unwrap(); + Command::new("git") + .args(["commit", "-m", "add project.toml with component setup"]) + .current_dir(repo) + .output() + .unwrap(); + + // Create feature branch with a change. + Command::new("git") + .args(["checkout", "-b", "feature/story-216_setup_test"]) + .current_dir(repo) + .output() + .unwrap(); + fs::write(repo.join("feature.txt"), "change").unwrap(); + Command::new("git") + .args(["add", "."]) + .current_dir(repo) + .output() + .unwrap(); + Command::new("git") + .args(["commit", "-m", "feature work"]) + .current_dir(repo) + .output() + .unwrap(); + + // Switch back to master. + Command::new("git") + .args(["checkout", "master"]) + .current_dir(repo) + .output() + .unwrap(); + + let result = + run_squash_merge(repo, "feature/story-216_setup_test", "216_setup_test").unwrap(); + + // The output must mention component setup, proving the new code path ran. + assert!( + result.output.contains("component setup"), + "merge output must mention component setup when project.toml has components, got:\n{}", + result.output + ); + // The sentinel command must appear in the output. + assert!( + result.output.contains("sentinel"), + "merge output must name the component, got:\n{}", + result.output + ); + } + + /// When there are no `[[component]]` entries in project.toml (or no + /// project.toml at all), `run_squash_merge` must succeed without trying to + /// run any setup. No hardcoded pnpm or frontend/ references should appear. + #[cfg(unix)] + #[test] + fn squash_merge_succeeds_without_components_in_project_toml() { + use std::fs; + use tempfile::tempdir; + + let tmp = tempdir().unwrap(); + let repo = tmp.path(); + init_git_repo(repo); + + // No .story_kit/project.toml — no component setup. + fs::write(repo.join("file.txt"), "initial").unwrap(); + Command::new("git") + .args(["add", "."]) + .current_dir(repo) + .output() + .unwrap(); + Command::new("git") + .args(["commit", "-m", "initial commit"]) + .current_dir(repo) + .output() + .unwrap(); + + Command::new("git") + .args(["checkout", "-b", "feature/story-216_no_components"]) + .current_dir(repo) + .output() + .unwrap(); + fs::write(repo.join("change.txt"), "change").unwrap(); + Command::new("git") + .args(["add", "."]) + .current_dir(repo) + .output() + .unwrap(); + Command::new("git") + .args(["commit", "-m", "feature"]) + .current_dir(repo) + .output() + .unwrap(); + + Command::new("git") + .args(["checkout", "master"]) + .current_dir(repo) + .output() + .unwrap(); + + let result = + run_squash_merge(repo, "feature/story-216_no_components", "216_no_components") + .unwrap(); + + // No pnpm or frontend references should appear in the output. + assert!( + !result.output.contains("pnpm"), + "output must not mention pnpm, got:\n{}", + result.output + ); + assert!( + !result.output.contains("frontend/"), + "output must not mention frontend/, got:\n{}", + result.output + ); + } +} diff --git a/server/src/agents/mod.rs b/server/src/agents/mod.rs new file mode 100644 index 0000000..fee1216 --- /dev/null +++ b/server/src/agents/mod.rs @@ -0,0 +1,181 @@ +pub mod gates; +pub mod lifecycle; +pub mod merge; +mod pool; +mod pty; + +use crate::config::AgentConfig; +use serde::Serialize; + +pub use lifecycle::{ + close_bug_to_archive, feature_branch_has_unmerged_changes, move_story_to_archived, + move_story_to_merge, move_story_to_qa, +}; +pub use pool::AgentPool; + +/// Events emitted during server startup reconciliation to broadcast real-time +/// progress to connected WebSocket clients. +#[derive(Debug, Clone, Serialize)] +pub struct ReconciliationEvent { + /// The story being reconciled, or empty string for the overall "done" event. + pub story_id: String, + /// Coarse status: "checking", "gates_running", "advanced", "skipped", "failed", "done" + pub status: String, + /// Human-readable details. + pub message: String, +} + +/// Events streamed from a running agent to SSE clients. +#[derive(Debug, Clone, Serialize)] +#[serde(tag = "type", rename_all = "snake_case")] +pub enum AgentEvent { + /// Agent status changed. + Status { + story_id: String, + agent_name: String, + status: String, + }, + /// Raw text output from the agent process. + Output { + story_id: String, + agent_name: String, + text: String, + }, + /// Agent produced a JSON event from `--output-format stream-json`. + AgentJson { + story_id: String, + agent_name: String, + data: serde_json::Value, + }, + /// Agent finished. + Done { + story_id: String, + agent_name: String, + session_id: Option, + }, + /// Agent errored. + Error { + story_id: String, + agent_name: String, + message: String, + }, + /// Thinking tokens from an extended-thinking block. + Thinking { + story_id: String, + agent_name: String, + text: String, + }, +} + +#[derive(Debug, Clone, Serialize, PartialEq)] +#[serde(rename_all = "snake_case")] +pub enum AgentStatus { + Pending, + Running, + Completed, + Failed, +} + +impl std::fmt::Display for AgentStatus { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + Self::Pending => write!(f, "pending"), + Self::Running => write!(f, "running"), + Self::Completed => write!(f, "completed"), + Self::Failed => write!(f, "failed"), + } + } +} + +/// Pipeline stages for automatic story advancement. +#[derive(Debug, Clone, PartialEq)] +pub enum PipelineStage { + /// Coding agents (coder-1, coder-2, etc.) + Coder, + /// QA review agent + Qa, + /// Mergemaster agent + Mergemaster, + /// Supervisors and unknown agents — no automatic advancement. + Other, +} + +/// Determine the pipeline stage from an agent name. +pub fn pipeline_stage(agent_name: &str) -> PipelineStage { + match agent_name { + "qa" => PipelineStage::Qa, + "mergemaster" => PipelineStage::Mergemaster, + name if name.starts_with("coder") => PipelineStage::Coder, + _ => PipelineStage::Other, + } +} + +/// Determine the pipeline stage for a configured agent. +/// +/// Prefers the explicit `stage` config field (added in Bug 150) over the +/// legacy name-based heuristic so that agents with non-standard names +/// (e.g. `qa-2`, `coder-opus`) are assigned to the correct stage. +pub(crate) fn agent_config_stage(cfg: &AgentConfig) -> PipelineStage { + match cfg.stage.as_deref() { + Some("coder") => PipelineStage::Coder, + Some("qa") => PipelineStage::Qa, + Some("mergemaster") => PipelineStage::Mergemaster, + Some(_) => PipelineStage::Other, + None => pipeline_stage(&cfg.name), + } +} + +/// Completion report produced when acceptance gates are run. +/// +/// Created automatically by the server when an agent process exits normally, +/// or via the internal `report_completion` method. +#[derive(Debug, Serialize, Clone)] +pub struct CompletionReport { + pub summary: String, + pub gates_passed: bool, + pub gate_output: String, +} + +#[derive(Debug, Serialize, Clone)] +pub struct AgentInfo { + pub story_id: String, + pub agent_name: String, + pub status: AgentStatus, + pub session_id: Option, + pub worktree_path: Option, + pub base_branch: Option, + pub completion: Option, + /// UUID identifying the persistent log file for this session. + pub log_session_id: Option, +} + +#[cfg(test)] +mod tests { + use super::*; + + // ── pipeline_stage tests ────────────────────────────────────────────────── + + #[test] + fn pipeline_stage_detects_coders() { + assert_eq!(pipeline_stage("coder-1"), PipelineStage::Coder); + assert_eq!(pipeline_stage("coder-2"), PipelineStage::Coder); + assert_eq!(pipeline_stage("coder-3"), PipelineStage::Coder); + } + + #[test] + fn pipeline_stage_detects_qa() { + assert_eq!(pipeline_stage("qa"), PipelineStage::Qa); + } + + #[test] + fn pipeline_stage_detects_mergemaster() { + assert_eq!(pipeline_stage("mergemaster"), PipelineStage::Mergemaster); + } + + #[test] + fn pipeline_stage_supervisor_is_other() { + assert_eq!(pipeline_stage("supervisor"), PipelineStage::Other); + assert_eq!(pipeline_stage("default"), PipelineStage::Other); + assert_eq!(pipeline_stage("unknown"), PipelineStage::Other); + } +} diff --git a/server/src/agents.rs b/server/src/agents/pool.rs similarity index 58% rename from server/src/agents.rs rename to server/src/agents/pool.rs index dd135c0..092845b 100644 --- a/server/src/agents.rs +++ b/server/src/agents/pool.rs @@ -1,30 +1,20 @@ use crate::agent_log::AgentLogWriter; +use crate::config::ProjectConfig; use crate::io::watcher::WatcherEvent; use crate::slog; use crate::slog_error; use crate::slog_warn; -use crate::config::{AgentConfig, ProjectConfig}; use crate::worktree::{self, WorktreeInfo}; -use portable_pty::{ChildKiller, CommandBuilder, PtySize, native_pty_system}; -use serde::Serialize; +use portable_pty::ChildKiller; use std::collections::HashMap; -use std::io::{BufRead, BufReader}; use std::path::{Path, PathBuf}; -use std::process::Command; use std::sync::{Arc, Mutex}; use tokio::sync::broadcast; -/// Events emitted during server startup reconciliation to broadcast real-time -/// progress to connected WebSocket clients. -#[derive(Debug, Clone, Serialize)] -pub struct ReconciliationEvent { - /// The story being reconciled, or empty string for the overall "done" event. - pub story_id: String, - /// Coarse status: "checking", "gates_running", "advanced", "skipped", "failed", "done" - pub status: String, - /// Human-readable details. - pub message: String, -} +use super::{ + AgentEvent, AgentInfo, AgentStatus, CompletionReport, PipelineStage, ReconciliationEvent, + agent_config_stage, pipeline_stage, +}; /// Build the composite key used to track agents in the pool. fn composite_key(story_id: &str, agent_name: &str) -> String { @@ -78,130 +68,6 @@ impl Drop for PendingGuard { } } -/// Events streamed from a running agent to SSE clients. -#[derive(Debug, Clone, Serialize)] -#[serde(tag = "type", rename_all = "snake_case")] -pub enum AgentEvent { - /// Agent status changed. - Status { - story_id: String, - agent_name: String, - status: String, - }, - /// Raw text output from the agent process. - Output { - story_id: String, - agent_name: String, - text: String, - }, - /// Agent produced a JSON event from `--output-format stream-json`. - AgentJson { - story_id: String, - agent_name: String, - data: serde_json::Value, - }, - /// Agent finished. - Done { - story_id: String, - agent_name: String, - session_id: Option, - }, - /// Agent errored. - Error { - story_id: String, - agent_name: String, - message: String, - }, - /// Thinking tokens from an extended-thinking block. - Thinking { - story_id: String, - agent_name: String, - text: String, - }, -} - -#[derive(Debug, Clone, Serialize, PartialEq)] -#[serde(rename_all = "snake_case")] -pub enum AgentStatus { - Pending, - Running, - Completed, - Failed, -} - -impl std::fmt::Display for AgentStatus { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match self { - Self::Pending => write!(f, "pending"), - Self::Running => write!(f, "running"), - Self::Completed => write!(f, "completed"), - Self::Failed => write!(f, "failed"), - } - } -} - -/// Pipeline stages for automatic story advancement. -#[derive(Debug, Clone, PartialEq)] -pub enum PipelineStage { - /// Coding agents (coder-1, coder-2, etc.) - Coder, - /// QA review agent - Qa, - /// Mergemaster agent - Mergemaster, - /// Supervisors and unknown agents — no automatic advancement. - Other, -} - -/// Determine the pipeline stage from an agent name. -pub fn pipeline_stage(agent_name: &str) -> PipelineStage { - match agent_name { - "qa" => PipelineStage::Qa, - "mergemaster" => PipelineStage::Mergemaster, - name if name.starts_with("coder") => PipelineStage::Coder, - _ => PipelineStage::Other, - } -} - -/// Determine the pipeline stage for a configured agent. -/// -/// Prefers the explicit `stage` config field (added in Bug 150) over the -/// legacy name-based heuristic so that agents with non-standard names -/// (e.g. `qa-2`, `coder-opus`) are assigned to the correct stage. -fn agent_config_stage(cfg: &AgentConfig) -> PipelineStage { - match cfg.stage.as_deref() { - Some("coder") => PipelineStage::Coder, - Some("qa") => PipelineStage::Qa, - Some("mergemaster") => PipelineStage::Mergemaster, - Some(_) => PipelineStage::Other, - None => pipeline_stage(&cfg.name), - } -} - -/// Completion report produced when acceptance gates are run. -/// -/// Created automatically by the server when an agent process exits normally, -/// or via the internal `report_completion` method. -#[derive(Debug, Serialize, Clone)] -pub struct CompletionReport { - pub summary: String, - pub gates_passed: bool, - pub gate_output: String, -} - -#[derive(Debug, Serialize, Clone)] -pub struct AgentInfo { - pub story_id: String, - pub agent_name: String, - pub status: AgentStatus, - pub session_id: Option, - pub worktree_path: Option, - pub base_branch: Option, - pub completion: Option, - /// UUID identifying the persistent log file for this session. - pub log_session_id: Option, -} - struct StoryAgent { agent_name: String, status: AgentStatus, @@ -260,23 +126,6 @@ pub struct AgentPool { watcher_tx: broadcast::Sender, } -/// RAII guard that removes a child killer from the registry on drop. -/// -/// This ensures the killer is always cleaned up when `run_agent_pty_blocking` -/// returns, regardless of the exit path (normal completion, timeout, or error). -struct ChildKillerGuard { - killers: Arc>>>, - key: String, -} - -impl Drop for ChildKillerGuard { - fn drop(&mut self) { - if let Ok(mut killers) = self.killers.lock() { - killers.remove(&self.key); - } - } -} - impl AgentPool { pub fn new(port: u16, watcher_tx: broadcast::Sender) -> Self { Self { @@ -363,7 +212,7 @@ impl AgentPool { // when all coders are currently busy (story 203). This is idempotent: // if the story is already in 2_current/ or a later stage, the call is // a no-op. - move_story_to_current(project_root, story_id)?; + super::lifecycle::move_story_to_current(project_root, story_id)?; // Atomically resolve agent name, check availability, and register as // Pending. When `agent_name` is `None` the first idle coder is @@ -633,7 +482,7 @@ impl AgentPool { Self::notify_agent_state_changed(&watcher_tx_clone); // Step 4: launch the agent process. - match run_agent_pty_streaming( + match super::pty::run_agent_pty_streaming( &sid, &aname, &command, @@ -985,7 +834,7 @@ impl AgentPool { slog!( "[pipeline] Coder '{agent_name}' passed gates for '{story_id}'. Moving to QA." ); - if let Err(e) = move_story_to_qa(&project_root, story_id) { + if let Err(e) = super::lifecycle::move_story_to_qa(&project_root, story_id) { slog_error!("[pipeline] Failed to move '{story_id}' to 3_qa/: {e}"); return; } @@ -1023,7 +872,7 @@ impl AgentPool { let coverage_path = worktree_path.clone().unwrap_or_else(|| project_root.clone()); let cp = coverage_path.clone(); let coverage_result = - tokio::task::spawn_blocking(move || run_coverage_gate(&cp)) + tokio::task::spawn_blocking(move || super::gates::run_coverage_gate(&cp)) .await .unwrap_or_else(|e| { slog_warn!("[pipeline] Coverage gate task panicked: {e}"); @@ -1038,7 +887,7 @@ impl AgentPool { slog!( "[pipeline] QA passed gates and coverage for '{story_id}'. Moving to merge." ); - if let Err(e) = move_story_to_merge(&project_root, story_id) { + if let Err(e) = super::lifecycle::move_story_to_merge(&project_root, story_id) { slog_error!("[pipeline] Failed to move '{story_id}' to 4_merge/: {e}"); return; } @@ -1103,7 +952,7 @@ impl AgentPool { "[pipeline] Mergemaster completed for '{story_id}'. Running post-merge tests on master." ); let root = project_root.clone(); - let test_result = tokio::task::spawn_blocking(move || run_project_tests(&root)) + let test_result = tokio::task::spawn_blocking(move || super::gates::run_project_tests(&root)) .await .unwrap_or_else(|e| { slog_warn!("[pipeline] Post-merge test task panicked: {e}"); @@ -1118,7 +967,7 @@ impl AgentPool { slog!( "[pipeline] Post-merge tests passed for '{story_id}'. Moving to done." ); - if let Err(e) = move_story_to_archived(&project_root, story_id) { + if let Err(e) = super::lifecycle::move_story_to_archived(&project_root, story_id) { slog_error!("[pipeline] Failed to move '{story_id}' to done: {e}"); } self.remove_agents_for_story(story_id); @@ -1215,9 +1064,9 @@ impl AgentPool { // Run gate checks in a blocking thread to avoid stalling the async runtime. let (gates_passed, gate_output) = tokio::task::spawn_blocking(move || { // Step 1: Reject if worktree is dirty. - check_uncommitted_changes(&path)?; + super::gates::check_uncommitted_changes(&path)?; // Step 2: Run clippy + tests and return (passed, output). - run_acceptance_gates(&path) + super::gates::run_acceptance_gates(&path) }) .await .map_err(|e| format!("Gate check task panicked: {e}"))??; @@ -1293,7 +1142,7 @@ impl AgentPool { &self, project_root: &Path, story_id: &str, - ) -> Result { + ) -> Result { let branch = format!("feature/story-{story_id}"); let wt_path = worktree::worktree_path(project_root, story_id); let root = project_root.to_path_buf(); @@ -1303,12 +1152,12 @@ impl AgentPool { // Run blocking operations (git + cargo + quality gates) off the async runtime. // Quality gates now run inside run_squash_merge before the fast-forward. let merge_result = - tokio::task::spawn_blocking(move || run_squash_merge(&root, &br, &sid)) + tokio::task::spawn_blocking(move || super::merge::run_squash_merge(&root, &br, &sid)) .await .map_err(|e| format!("Merge task panicked: {e}"))??; if !merge_result.success { - return Ok(MergeReport { + return Ok(super::merge::MergeReport { story_id: story_id.to_string(), success: false, had_conflicts: merge_result.had_conflicts, @@ -1322,7 +1171,7 @@ impl AgentPool { } // Merge + gates both passed — archive the story and clean up agent entries. - let story_archived = move_story_to_archived(project_root, story_id).is_ok(); + let story_archived = super::lifecycle::move_story_to_archived(project_root, story_id).is_ok(); if story_archived { self.remove_agents_for_story(story_id); } @@ -1344,7 +1193,7 @@ impl AgentPool { // removed the agent entry above. self.auto_assign_available_work(project_root).await; - Ok(MergeReport { + Ok(super::merge::MergeReport { story_id: story_id.to_string(), success: true, had_conflicts: merge_result.had_conflicts, @@ -1640,7 +1489,7 @@ impl AgentPool { // Check whether the worktree has commits ahead of the base branch. let wt_path_for_check = wt_path.clone(); let has_work = tokio::task::spawn_blocking(move || { - worktree_has_committed_work(&wt_path_for_check) + super::gates::worktree_has_committed_work(&wt_path_for_check) }) .await .unwrap_or(false); @@ -1669,8 +1518,8 @@ impl AgentPool { // Run acceptance gates on the worktree. let wt_path_for_gates = wt_path.clone(); let gates_result = tokio::task::spawn_blocking(move || { - check_uncommitted_changes(&wt_path_for_gates)?; - run_acceptance_gates(&wt_path_for_gates) + super::gates::check_uncommitted_changes(&wt_path_for_gates)?; + super::gates::run_acceptance_gates(&wt_path_for_gates) }) .await; @@ -1717,7 +1566,7 @@ impl AgentPool { if stage_dir == "2_current" { // Coder stage → advance to QA. - if let Err(e) = move_story_to_qa(project_root, story_id) { + if let Err(e) = super::lifecycle::move_story_to_qa(project_root, story_id) { eprintln!("[startup:reconcile] Failed to move '{story_id}' to 3_qa/: {e}"); let _ = progress_tx.send(ReconciliationEvent { story_id: story_id.clone(), @@ -1736,7 +1585,7 @@ impl AgentPool { // QA stage → run coverage gate before advancing to merge. let wt_path_for_cov = wt_path.clone(); let coverage_result = - tokio::task::spawn_blocking(move || run_coverage_gate(&wt_path_for_cov)) + tokio::task::spawn_blocking(move || super::gates::run_coverage_gate(&wt_path_for_cov)) .await; let (coverage_passed, coverage_output) = match coverage_result { @@ -1766,7 +1615,7 @@ impl AgentPool { }; if coverage_passed { - if let Err(e) = move_story_to_merge(project_root, story_id) { + if let Err(e) = super::lifecycle::move_story_to_merge(project_root, story_id) { eprintln!( "[startup:reconcile] Failed to move '{story_id}' to 4_merge/: {e}" ); @@ -2138,8 +1987,8 @@ async fn run_server_owned_completion( let (gates_passed, gate_output) = if let Some(wt_path) = worktree_path { let path = wt_path; match tokio::task::spawn_blocking(move || { - check_uncommitted_changes(&path)?; - run_acceptance_gates(&path) + super::gates::check_uncommitted_changes(&path)?; + super::gates::run_acceptance_gates(&path) }) .await { @@ -2246,1395 +2095,47 @@ fn spawn_pipeline_advance( }); } -/// Result of a mergemaster merge operation. -#[derive(Debug, Serialize, Clone)] -pub struct MergeReport { - pub story_id: String, - pub success: bool, - pub had_conflicts: bool, - /// `true` when conflicts were detected but automatically resolved. - pub conflicts_resolved: bool, - pub conflict_details: Option, - pub gates_passed: bool, - pub gate_output: String, - pub worktree_cleaned_up: bool, - pub story_archived: bool, -} - -/// Determine the work item type from its ID (new naming: `{N}_{type}_{slug}`). -/// Returns "bug", "spike", or "story". -#[allow(dead_code)] -fn item_type_from_id(item_id: &str) -> &'static str { - // New format: {digits}_{type}_{slug} - let after_num = item_id.trim_start_matches(|c: char| c.is_ascii_digit()); - if after_num.starts_with("_bug_") { - "bug" - } else if after_num.starts_with("_spike_") { - "spike" - } else { - "story" - } -} - -/// Return the source directory path for a work item (always work/1_upcoming/). -fn item_source_dir(project_root: &Path, _item_id: &str) -> PathBuf { - project_root.join(".story_kit").join("work").join("1_upcoming") -} - -/// Return the done directory path for a work item (always work/5_done/). -fn item_archive_dir(project_root: &Path, _item_id: &str) -> PathBuf { - project_root.join(".story_kit").join("work").join("5_done") -} - -/// Move a work item (story, bug, or spike) from `work/1_upcoming/` to `work/2_current/`. -/// -/// Idempotent: if the item is already in `2_current/`, returns Ok without committing. -/// If the item is not found in `1_upcoming/`, logs a warning and returns Ok. -pub fn move_story_to_current(project_root: &Path, story_id: &str) -> Result<(), String> { - let sk = project_root.join(".story_kit").join("work"); - let current_dir = sk.join("2_current"); - let current_path = current_dir.join(format!("{story_id}.md")); - - if current_path.exists() { - // Already in 2_current/ — idempotent, nothing to do. - return Ok(()); - } - - let source_dir = item_source_dir(project_root, story_id); - let source_path = source_dir.join(format!("{story_id}.md")); - - if !source_path.exists() { - slog!( - "[lifecycle] Work item '{story_id}' not found in {}; skipping move to 2_current/", - source_dir.display() - ); - return Ok(()); - } - - std::fs::create_dir_all(¤t_dir) - .map_err(|e| format!("Failed to create work/2_current/ directory: {e}"))?; - - std::fs::rename(&source_path, ¤t_path) - .map_err(|e| format!("Failed to move '{story_id}' to 2_current/: {e}"))?; - - slog!( - "[lifecycle] Moved '{story_id}' from {} to work/2_current/", - source_dir.display() - ); - - Ok(()) -} - -/// Check whether a feature branch `feature/story-{story_id}` exists and has -/// commits that are not yet on master. Returns `true` when there is unmerged -/// work, `false` when there is no branch or all its commits are already -/// reachable from master. -pub fn feature_branch_has_unmerged_changes(project_root: &Path, story_id: &str) -> bool { - let branch = format!("feature/story-{story_id}"); - - // Check if the branch exists. - let branch_check = Command::new("git") - .args(["rev-parse", "--verify", &branch]) - .current_dir(project_root) - .output(); - match branch_check { - Ok(out) if out.status.success() => {} - _ => return false, // No feature branch → nothing to merge. - } - - // Check if the branch has commits not reachable from master. - let log = Command::new("git") - .args(["log", &format!("master..{branch}"), "--oneline"]) - .current_dir(project_root) - .output(); - match log { - Ok(out) => { - let stdout = String::from_utf8_lossy(&out.stdout); - !stdout.trim().is_empty() - } - Err(_) => false, - } -} - -/// Move a story from `work/2_current/` to `work/5_done/` and auto-commit. -/// -/// * If the story is in `2_current/`, it is moved to `5_done/` and committed. -/// * If the story is in `4_merge/`, it is moved to `5_done/` and committed. -/// * If the story is already in `5_done/` or `6_archived/`, this is a no-op (idempotent). -/// * If the story is not found in `2_current/`, `4_merge/`, `5_done/`, or `6_archived/`, an error is returned. -pub fn move_story_to_archived(project_root: &Path, story_id: &str) -> Result<(), String> { - let sk = project_root.join(".story_kit").join("work"); - let current_path = sk.join("2_current").join(format!("{story_id}.md")); - let merge_path = sk.join("4_merge").join(format!("{story_id}.md")); - let done_dir = sk.join("5_done"); - let done_path = done_dir.join(format!("{story_id}.md")); - let archived_path = sk.join("6_archived").join(format!("{story_id}.md")); - - if done_path.exists() || archived_path.exists() { - // Already in done or archived — idempotent, nothing to do. - return Ok(()); - } - - // Check 2_current/ first, then 4_merge/ - let source_path = if current_path.exists() { - current_path.clone() - } else if merge_path.exists() { - merge_path.clone() - } else { - return Err(format!( - "Story '{story_id}' not found in work/2_current/ or work/4_merge/. Cannot accept story." - )); - }; - - std::fs::create_dir_all(&done_dir) - .map_err(|e| format!("Failed to create work/5_done/ directory: {e}"))?; - std::fs::rename(&source_path, &done_path) - .map_err(|e| format!("Failed to move story '{story_id}' to 5_done/: {e}"))?; - - let from_dir = if source_path == current_path { - "work/2_current/" - } else { - "work/4_merge/" - }; - slog!("[lifecycle] Moved story '{story_id}' from {from_dir} to work/5_done/"); - - Ok(()) -} - -/// Move a story/bug from `work/2_current/` or `work/3_qa/` to `work/4_merge/`. -/// -/// This stages a work item as ready for the mergemaster to pick up and merge into master. -/// Idempotent: if already in `4_merge/`, returns Ok without committing. -pub fn move_story_to_merge(project_root: &Path, story_id: &str) -> Result<(), String> { - let sk = project_root.join(".story_kit").join("work"); - let current_path = sk.join("2_current").join(format!("{story_id}.md")); - let qa_path = sk.join("3_qa").join(format!("{story_id}.md")); - let merge_dir = sk.join("4_merge"); - let merge_path = merge_dir.join(format!("{story_id}.md")); - - if merge_path.exists() { - // Already in 4_merge/ — idempotent, nothing to do. - return Ok(()); - } - - // Accept from 2_current/ (manual trigger) or 3_qa/ (pipeline advancement from QA stage). - let source_path = if current_path.exists() { - current_path.clone() - } else if qa_path.exists() { - qa_path.clone() - } else { - return Err(format!( - "Work item '{story_id}' not found in work/2_current/ or work/3_qa/. Cannot move to 4_merge/." - )); - }; - - std::fs::create_dir_all(&merge_dir) - .map_err(|e| format!("Failed to create work/4_merge/ directory: {e}"))?; - std::fs::rename(&source_path, &merge_path) - .map_err(|e| format!("Failed to move '{story_id}' to 4_merge/: {e}"))?; - - let from_dir = if source_path == current_path { - "work/2_current/" - } else { - "work/3_qa/" - }; - slog!("[lifecycle] Moved '{story_id}' from {from_dir} to work/4_merge/"); - - Ok(()) -} - -/// Move a story/bug from `work/2_current/` to `work/3_qa/` and auto-commit. -/// -/// This stages a work item for QA review before merging to master. -/// Idempotent: if already in `3_qa/`, returns Ok without committing. -pub fn move_story_to_qa(project_root: &Path, story_id: &str) -> Result<(), String> { - let sk = project_root.join(".story_kit").join("work"); - let current_path = sk.join("2_current").join(format!("{story_id}.md")); - let qa_dir = sk.join("3_qa"); - let qa_path = qa_dir.join(format!("{story_id}.md")); - - if qa_path.exists() { - // Already in 3_qa/ — idempotent, nothing to do. - return Ok(()); - } - - if !current_path.exists() { - return Err(format!( - "Work item '{story_id}' not found in work/2_current/. Cannot move to 3_qa/." - )); - } - - std::fs::create_dir_all(&qa_dir) - .map_err(|e| format!("Failed to create work/3_qa/ directory: {e}"))?; - std::fs::rename(¤t_path, &qa_path) - .map_err(|e| format!("Failed to move '{story_id}' to 3_qa/: {e}"))?; - - slog!("[lifecycle] Moved '{story_id}' from work/2_current/ to work/3_qa/"); - - Ok(()) -} - -/// Move a bug from `work/2_current/` or `work/1_upcoming/` to `work/5_done/` and auto-commit. -/// -/// * If the bug is in `2_current/`, it is moved to `5_done/` and committed. -/// * If the bug is still in `1_upcoming/` (never started), it is moved directly to `5_done/`. -/// * If the bug is already in `5_done/`, this is a no-op (idempotent). -/// * If the bug is not found anywhere, an error is returned. -pub fn close_bug_to_archive(project_root: &Path, bug_id: &str) -> Result<(), String> { - let sk = project_root.join(".story_kit").join("work"); - let current_path = sk.join("2_current").join(format!("{bug_id}.md")); - let upcoming_path = sk.join("1_upcoming").join(format!("{bug_id}.md")); - let archive_dir = item_archive_dir(project_root, bug_id); - let archive_path = archive_dir.join(format!("{bug_id}.md")); - - if archive_path.exists() { - return Ok(()); - } - - let source_path = if current_path.exists() { - current_path.clone() - } else if upcoming_path.exists() { - upcoming_path.clone() - } else { - return Err(format!( - "Bug '{bug_id}' not found in work/2_current/ or work/1_upcoming/. Cannot close bug." - )); - }; - - std::fs::create_dir_all(&archive_dir) - .map_err(|e| format!("Failed to create work/5_done/ directory: {e}"))?; - std::fs::rename(&source_path, &archive_path) - .map_err(|e| format!("Failed to move bug '{bug_id}' to 5_done/: {e}"))?; - - slog!( - "[lifecycle] Closed bug '{bug_id}' → work/5_done/" - ); - - Ok(()) -} - -// ── Acceptance-gate helpers ─────────────────────────────────────────────────── - -/// Detect the base branch for a git worktree by checking common default branch names. -/// -/// Tries `master` then `main`; falls back to `"master"` if neither is resolvable. -fn detect_worktree_base_branch(wt_path: &Path) -> String { - for branch in &["master", "main"] { - let ok = Command::new("git") - .args(["rev-parse", "--verify", branch]) - .current_dir(wt_path) - .output() - .map(|o| o.status.success()) - .unwrap_or(false); - if ok { - return branch.to_string(); - } - } - "master".to_string() -} - -/// Return `true` if the git worktree at `wt_path` has commits on its current -/// branch that are not present on the base branch (`master` or `main`). -/// -/// Used during server startup reconciliation to detect stories whose agent work -/// was committed while the server was offline. -fn worktree_has_committed_work(wt_path: &Path) -> bool { - let base_branch = detect_worktree_base_branch(wt_path); - let output = Command::new("git") - .args(["log", &format!("{base_branch}..HEAD"), "--oneline"]) - .current_dir(wt_path) - .output(); - match output { - Ok(out) if out.status.success() => { - !String::from_utf8_lossy(&out.stdout).trim().is_empty() - } - _ => false, - } -} - -/// Check whether the given directory has any uncommitted git changes. -/// Returns `Err` with a descriptive message if there are any. -fn check_uncommitted_changes(path: &Path) -> Result<(), String> { - let output = Command::new("git") - .args(["status", "--porcelain"]) - .current_dir(path) - .output() - .map_err(|e| format!("Failed to run git status: {e}"))?; - - let stdout = String::from_utf8_lossy(&output.stdout); - if !stdout.trim().is_empty() { - return Err(format!( - "Worktree has uncommitted changes. Please commit all work before \ - the agent exits:\n{stdout}" - )); - } - Ok(()) -} - -/// Run the project's test suite. -/// -/// Uses `script/test` if present, treating it as the canonical single test entry point. -/// Falls back to `cargo nextest run` / `cargo test` when `script/test` is absent. -/// Returns `(tests_passed, output)`. -fn run_project_tests(path: &Path) -> Result<(bool, String), String> { - let script_test = path.join("script").join("test"); - if script_test.exists() { - let mut output = String::from("=== script/test ===\n"); - let result = Command::new(&script_test) - .current_dir(path) - .output() - .map_err(|e| format!("Failed to run script/test: {e}"))?; - let out = format!( - "{}{}", - String::from_utf8_lossy(&result.stdout), - String::from_utf8_lossy(&result.stderr) - ); - output.push_str(&out); - output.push('\n'); - return Ok((result.status.success(), output)); - } - - // Fallback: cargo nextest run / cargo test - let mut output = String::from("=== tests ===\n"); - let (success, test_out) = match Command::new("cargo") - .args(["nextest", "run"]) - .current_dir(path) - .output() - { - Ok(o) => { - let combined = format!( - "{}{}", - String::from_utf8_lossy(&o.stdout), - String::from_utf8_lossy(&o.stderr) - ); - (o.status.success(), combined) - } - Err(_) => { - // nextest not available — fall back to cargo test - let o = Command::new("cargo") - .args(["test"]) - .current_dir(path) - .output() - .map_err(|e| format!("Failed to run cargo test: {e}"))?; - let combined = format!( - "{}{}", - String::from_utf8_lossy(&o.stdout), - String::from_utf8_lossy(&o.stderr) - ); - (o.status.success(), combined) - } - }; - output.push_str(&test_out); - output.push('\n'); - Ok((success, output)) -} - -/// Run `cargo clippy` and the project test suite (via `script/test` if present, -/// otherwise `cargo nextest run` / `cargo test`) in the given directory. -/// Returns `(gates_passed, combined_output)`. -fn run_acceptance_gates(path: &Path) -> Result<(bool, String), String> { - let mut all_output = String::new(); - let mut all_passed = true; - - // ── cargo clippy ────────────────────────────────────────────── - let clippy = Command::new("cargo") - .args(["clippy", "--all-targets", "--all-features"]) - .current_dir(path) - .output() - .map_err(|e| format!("Failed to run cargo clippy: {e}"))?; - - all_output.push_str("=== cargo clippy ===\n"); - let clippy_stdout = String::from_utf8_lossy(&clippy.stdout); - let clippy_stderr = String::from_utf8_lossy(&clippy.stderr); - if !clippy_stdout.is_empty() { - all_output.push_str(&clippy_stdout); - } - if !clippy_stderr.is_empty() { - all_output.push_str(&clippy_stderr); - } - all_output.push('\n'); - - if !clippy.status.success() { - all_passed = false; - } - - // ── tests (script/test if available, else cargo nextest/test) ─ - let (test_success, test_out) = run_project_tests(path)?; - all_output.push_str(&test_out); - if !test_success { - all_passed = false; - } - - Ok((all_passed, all_output)) -} - -/// Run `script/test_coverage` in the given directory if the script exists. -/// -/// Used as a QA gate before advancing a story from `3_qa/` to `4_merge/`. -/// Returns `(passed, output)`. If the script does not exist, returns `(true, …)`. -fn run_coverage_gate(path: &Path) -> Result<(bool, String), String> { - let script = path.join("script").join("test_coverage"); - if !script.exists() { - return Ok(( - true, - "script/test_coverage not found; coverage gate skipped.\n".to_string(), - )); - } - - let mut output = String::from("=== script/test_coverage ===\n"); - let result = Command::new(&script) - .current_dir(path) - .output() - .map_err(|e| format!("Failed to run script/test_coverage: {e}"))?; - - let combined = format!( - "{}{}", - String::from_utf8_lossy(&result.stdout), - String::from_utf8_lossy(&result.stderr) - ); - output.push_str(&combined); - output.push('\n'); - - Ok((result.status.success(), output)) -} - -// ── Mergemaster helpers ─────────────────────────────────────────────────────── - -/// Result of a squash-merge operation. -struct SquashMergeResult { - success: bool, - had_conflicts: bool, - /// `true` when conflicts were detected but automatically resolved. - conflicts_resolved: bool, - conflict_details: Option, - output: String, - /// Whether quality gates ran and passed. `false` when `success` is `false` - /// due to a gate failure; callers can use this to distinguish gate failures - /// from merge/commit/FF failures in the `MergeReport`. - gates_passed: bool, -} - -/// Squash-merge a feature branch into the current branch using a temporary -/// merge-queue worktree for quality-gate isolation. -/// -/// **Flow:** -/// 1. Create a temporary `merge-queue/{story_id}` branch at current HEAD. -/// 2. Create a temporary worktree for that branch. -/// 3. Run `git merge --squash` in the temporary worktree (not the main worktree). -/// 4. If conflicts arise, attempt automatic resolution for simple additive cases. -/// 5. If clean (or resolved), commit in the temp worktree. -/// 6. Run quality gates **in the merge worktree** before touching master. -/// 7. If gates pass: cherry-pick the squash commit onto master. -/// 8. Clean up the temporary worktree and branch. -/// -/// Step 7 uses `git cherry-pick` instead of `git merge --ff-only` so that -/// concurrent filesystem-watcher commits on master (pipeline file moves) do -/// not block the merge. -fn run_squash_merge( - project_root: &Path, - branch: &str, - story_id: &str, -) -> Result { - let mut all_output = String::new(); - let merge_branch = format!("merge-queue/{story_id}"); - let merge_wt_path = project_root - .join(".story_kit") - .join("merge_workspace"); - - // Ensure we start clean: remove any leftover merge workspace. - cleanup_merge_workspace(project_root, &merge_wt_path, &merge_branch); - - // ── Create merge-queue branch at current HEAD ───────────────── - all_output.push_str(&format!( - "=== Creating merge-queue branch '{merge_branch}' ===\n" - )); - let create_branch = Command::new("git") - .args(["branch", &merge_branch]) - .current_dir(project_root) - .output() - .map_err(|e| format!("Failed to create merge-queue branch: {e}"))?; - if !create_branch.status.success() { - let stderr = String::from_utf8_lossy(&create_branch.stderr); - all_output.push_str(&format!("Branch creation failed: {stderr}\n")); - return Err(format!("Failed to create merge-queue branch: {stderr}")); - } - - // ── Create temporary worktree ───────────────────────────────── - all_output.push_str("=== Creating temporary merge worktree ===\n"); - let wt_str = merge_wt_path.to_string_lossy().to_string(); - let create_wt = Command::new("git") - .args(["worktree", "add", &wt_str, &merge_branch]) - .current_dir(project_root) - .output() - .map_err(|e| format!("Failed to create merge worktree: {e}"))?; - if !create_wt.status.success() { - let stderr = String::from_utf8_lossy(&create_wt.stderr); - all_output.push_str(&format!("Worktree creation failed: {stderr}\n")); - cleanup_merge_workspace(project_root, &merge_wt_path, &merge_branch); - return Err(format!("Failed to create merge worktree: {stderr}")); - } - - // ── Squash-merge in the temporary worktree ──────────────────── - all_output.push_str(&format!("=== git merge --squash {branch} ===\n")); - let merge = Command::new("git") - .args(["merge", "--squash", branch]) - .current_dir(&merge_wt_path) - .output() - .map_err(|e| format!("Failed to run git merge: {e}"))?; - - let merge_stdout = String::from_utf8_lossy(&merge.stdout).to_string(); - let merge_stderr = String::from_utf8_lossy(&merge.stderr).to_string(); - all_output.push_str(&merge_stdout); - all_output.push_str(&merge_stderr); - all_output.push('\n'); - - let mut had_conflicts = false; - let mut conflicts_resolved = false; - let mut conflict_details: Option = None; - - if !merge.status.success() { - had_conflicts = true; - all_output.push_str("=== Conflicts detected, attempting auto-resolution ===\n"); - - // Try to automatically resolve simple conflicts. - match try_resolve_conflicts(&merge_wt_path) { - Ok((resolved, resolution_log)) => { - all_output.push_str(&resolution_log); - if resolved { - conflicts_resolved = true; - all_output - .push_str("=== All conflicts resolved automatically ===\n"); - } else { - // Could not resolve — abort, clean up, and report. - let details = format!( - "Merge conflicts in branch '{branch}':\n{merge_stdout}{merge_stderr}\n{resolution_log}" - ); - conflict_details = Some(details); - all_output - .push_str("=== Unresolvable conflicts, aborting merge ===\n"); - cleanup_merge_workspace( - project_root, - &merge_wt_path, - &merge_branch, - ); - return Ok(SquashMergeResult { - success: false, - had_conflicts: true, - conflicts_resolved: false, - conflict_details, - output: all_output, - gates_passed: false, - }); - } - } - Err(e) => { - all_output.push_str(&format!("Auto-resolution error: {e}\n")); - cleanup_merge_workspace( - project_root, - &merge_wt_path, - &merge_branch, - ); - return Ok(SquashMergeResult { - success: false, - had_conflicts: true, - conflicts_resolved: false, - conflict_details: Some(format!( - "Merge conflicts in branch '{branch}' (auto-resolution failed: {e}):\n{merge_stdout}{merge_stderr}" - )), - output: all_output, - gates_passed: false, - }); - } - } - } - - // ── Commit in the temporary worktree ────────────────────────── - all_output.push_str("=== git commit ===\n"); - let commit_msg = format!("story-kit: merge {story_id}"); - let commit = Command::new("git") - .args(["commit", "-m", &commit_msg]) - .current_dir(&merge_wt_path) - .output() - .map_err(|e| format!("Failed to run git commit: {e}"))?; - - let commit_stdout = String::from_utf8_lossy(&commit.stdout).to_string(); - let commit_stderr = String::from_utf8_lossy(&commit.stderr).to_string(); - all_output.push_str(&commit_stdout); - all_output.push_str(&commit_stderr); - all_output.push('\n'); - - if !commit.status.success() { - // Bug 226: "nothing to commit" means the feature branch has no changes - // beyond what's already on master. This must NOT be treated as success - // — it means the code was never actually merged. - if commit_stderr.contains("nothing to commit") - || commit_stdout.contains("nothing to commit") - { - all_output.push_str( - "=== Nothing to commit — feature branch has no changes beyond master ===\n", - ); - cleanup_merge_workspace(project_root, &merge_wt_path, &merge_branch); - return Ok(SquashMergeResult { - success: false, - had_conflicts, - conflicts_resolved, - conflict_details: Some( - "Squash-merge resulted in an empty diff — the feature branch has no \ - code changes to merge into master." - .to_string(), - ), - output: all_output, - gates_passed: false, - }); - } - cleanup_merge_workspace(project_root, &merge_wt_path, &merge_branch); - return Ok(SquashMergeResult { - success: false, - had_conflicts, - conflicts_resolved, - conflict_details, - output: all_output, - gates_passed: false, - }); - } - - // ── Bug 226: Verify the commit contains real code changes ───── - // If the merge only brought in .story_kit/ files (pipeline file moves), - // there are no actual code changes to land on master. Abort. - { - let diff_check = Command::new("git") - .args(["diff", "--name-only", "HEAD~1..HEAD"]) - .current_dir(&merge_wt_path) - .output() - .map_err(|e| format!("Failed to check merge diff: {e}"))?; - let changed_files = String::from_utf8_lossy(&diff_check.stdout); - let has_code_changes = changed_files - .lines() - .any(|f| !f.starts_with(".story_kit/")); - if !has_code_changes { - all_output.push_str( - "=== Merge commit contains only .story_kit/ file moves, no code changes ===\n", - ); - cleanup_merge_workspace(project_root, &merge_wt_path, &merge_branch); - return Ok(SquashMergeResult { - success: false, - had_conflicts, - conflicts_resolved, - conflict_details: Some( - "Feature branch has no code changes outside .story_kit/ — only \ - pipeline file moves were found." - .to_string(), - ), - output: all_output, - gates_passed: false, - }); - } - } - - // ── Run component setup from project.toml (same as worktree creation) ────────── - { - let config = ProjectConfig::load(&merge_wt_path).unwrap_or_default(); - if !config.component.is_empty() { - all_output.push_str("=== component setup (merge worktree) ===\n"); - } - for component in &config.component { - let cmd_dir = merge_wt_path.join(&component.path); - for cmd in &component.setup { - all_output.push_str(&format!("--- {}: {cmd} ---\n", component.name)); - match Command::new("sh") - .args(["-c", cmd]) - .current_dir(&cmd_dir) - .output() - { - Ok(out) => { - all_output.push_str(&String::from_utf8_lossy(&out.stdout)); - all_output.push_str(&String::from_utf8_lossy(&out.stderr)); - all_output.push('\n'); - if !out.status.success() { - all_output.push_str(&format!( - "=== setup warning: '{}' failed: {cmd} ===\n", - component.name - )); - } - } - Err(e) => { - all_output.push_str(&format!( - "=== setup warning: failed to run '{cmd}': {e} ===\n" - )); - } - } - } - } - } - - // ── Quality gates in merge workspace (BEFORE fast-forward) ──── - // Run gates in the merge worktree so that failures abort before master moves. - all_output.push_str("=== Running quality gates before fast-forward ===\n"); - match run_merge_quality_gates(&merge_wt_path) { - Ok((true, gate_out)) => { - all_output.push_str(&gate_out); - all_output.push('\n'); - all_output.push_str("=== Quality gates passed ===\n"); - } - Ok((false, gate_out)) => { - all_output.push_str(&gate_out); - all_output.push('\n'); - all_output - .push_str("=== Quality gates FAILED — aborting fast-forward, master unchanged ===\n"); - cleanup_merge_workspace(project_root, &merge_wt_path, &merge_branch); - return Ok(SquashMergeResult { - success: false, - had_conflicts, - conflicts_resolved, - conflict_details, - output: all_output, - gates_passed: false, - }); - } - Err(e) => { - all_output.push_str(&format!("Gate check error: {e}\n")); - cleanup_merge_workspace(project_root, &merge_wt_path, &merge_branch); - return Ok(SquashMergeResult { - success: false, - had_conflicts, - conflicts_resolved, - conflict_details, - output: all_output, - gates_passed: false, - }); - } - } - - // ── Cherry-pick the squash commit onto master ────────────────── - // We cherry-pick instead of fast-forward so that concurrent filesystem - // watcher commits on master (e.g. pipeline file moves) don't block the - // merge. Cherry-pick applies the diff of the squash commit cleanly on - // top of master's current HEAD. - all_output.push_str(&format!( - "=== Cherry-picking squash commit from {merge_branch} onto master ===\n" - )); - let cp = Command::new("git") - .args(["cherry-pick", &merge_branch]) - .current_dir(project_root) - .output() - .map_err(|e| format!("Failed to cherry-pick merge-queue commit: {e}"))?; - - let cp_stdout = String::from_utf8_lossy(&cp.stdout).to_string(); - let cp_stderr = String::from_utf8_lossy(&cp.stderr).to_string(); - all_output.push_str(&cp_stdout); - all_output.push_str(&cp_stderr); - all_output.push('\n'); - - if !cp.status.success() { - // Abort the cherry-pick so master is left clean. - let _ = Command::new("git") - .args(["cherry-pick", "--abort"]) - .current_dir(project_root) - .output(); - all_output.push_str("=== Cherry-pick failed — aborting, master unchanged ===\n"); - cleanup_merge_workspace(project_root, &merge_wt_path, &merge_branch); - return Ok(SquashMergeResult { - success: false, - had_conflicts, - conflicts_resolved, - conflict_details: Some(format!( - "Cherry-pick of squash commit failed (conflict with master?):\n{cp_stderr}" - )), - output: all_output, - gates_passed: true, - }); - } - - // ── Clean up ────────────────────────────────────────────────── - cleanup_merge_workspace(project_root, &merge_wt_path, &merge_branch); - all_output.push_str("=== Merge-queue cleanup complete ===\n"); - - Ok(SquashMergeResult { - success: true, - had_conflicts, - conflicts_resolved, - conflict_details, - output: all_output, - gates_passed: true, - }) -} - -/// Remove the temporary merge worktree and branch. Best-effort — errors are -/// silently ignored because this is cleanup code. -fn cleanup_merge_workspace( - project_root: &Path, - merge_wt_path: &Path, - merge_branch: &str, -) { - let wt_str = merge_wt_path.to_string_lossy().to_string(); - let _ = Command::new("git") - .args(["worktree", "remove", "--force", &wt_str]) - .current_dir(project_root) - .output(); - // If the directory still exists (e.g. it was a plain directory from a - // previous failed run, not a registered git worktree), remove it so - // the next `git worktree add` can succeed. - if merge_wt_path.exists() { - let _ = std::fs::remove_dir_all(merge_wt_path); - } - let _ = Command::new("git") - .args(["branch", "-D", merge_branch]) - .current_dir(project_root) - .output(); -} - -/// Attempt to automatically resolve merge conflicts in the given worktree. -/// -/// Finds all conflicted files and tries [`resolve_simple_conflicts`] on each. -/// If **all** conflicts can be resolved, stages the resolved files and returns -/// `Ok((true, log))`. If any file has a complex conflict that cannot be -/// auto-resolved, returns `Ok((false, log))` without staging anything. -fn try_resolve_conflicts(worktree: &Path) -> Result<(bool, String), String> { - let mut log = String::new(); - - // List conflicted files. - let ls = Command::new("git") - .args(["diff", "--name-only", "--diff-filter=U"]) - .current_dir(worktree) - .output() - .map_err(|e| format!("Failed to list conflicted files: {e}"))?; - - let file_list = String::from_utf8_lossy(&ls.stdout); - let conflicted_files: Vec<&str> = - file_list.lines().filter(|l| !l.is_empty()).collect(); - - if conflicted_files.is_empty() { - log.push_str("No conflicted files found (conflict may be index-only).\n"); - return Ok((false, log)); - } - - log.push_str(&format!( - "Conflicted files ({}):\n", - conflicted_files.len() - )); - for f in &conflicted_files { - log.push_str(&format!(" - {f}\n")); - } - - // First pass: check that all files can be resolved before touching any. - let mut resolutions: Vec<(&str, String)> = Vec::new(); - for file in &conflicted_files { - let file_path = worktree.join(file); - let content = std::fs::read_to_string(&file_path) - .map_err(|e| format!("Failed to read conflicted file '{file}': {e}"))?; - - match resolve_simple_conflicts(&content) { - Some(resolved) => { - log.push_str(&format!(" [auto-resolve] {file}\n")); - resolutions.push((file, resolved)); - } - None => { - log.push_str(&format!( - " [COMPLEX — cannot auto-resolve] {file}\n" - )); - return Ok((false, log)); - } - } - } - - // Second pass: write resolved content and stage. - for (file, resolved) in &resolutions { - let file_path = worktree.join(file); - std::fs::write(&file_path, resolved) - .map_err(|e| format!("Failed to write resolved file '{file}': {e}"))?; - - let add = Command::new("git") - .args(["add", file]) - .current_dir(worktree) - .output() - .map_err(|e| format!("Failed to stage resolved file '{file}': {e}"))?; - if !add.status.success() { - return Err(format!( - "git add failed for '{file}': {}", - String::from_utf8_lossy(&add.stderr) - )); - } - } - - Ok((true, log)) -} - -/// Try to resolve simple additive merge conflicts in a file's content. -/// -/// A conflict is considered "simple additive" when both sides add new content -/// at the same location without modifying existing lines. In that case we keep -/// both additions (ours first, then theirs). -/// -/// Returns `Some(resolved)` if all conflict blocks in the file are simple, or -/// `None` if any block is too complex to auto-resolve. -fn resolve_simple_conflicts(content: &str) -> Option { - // Quick check: if there are no conflict markers at all, nothing to do. - if !content.contains("<<<<<<<") { - return Some(content.to_string()); - } - - let mut result = String::new(); - let mut lines = content.lines().peekable(); - - while let Some(line) = lines.next() { - if line.starts_with("<<<<<<<") { - // Collect the "ours" side (between <<<<<<< and =======). - let mut ours = Vec::new(); - let mut found_separator = false; - for next_line in lines.by_ref() { - if next_line.starts_with("=======") { - found_separator = true; - break; - } - ours.push(next_line); - } - if !found_separator { - return None; // Malformed conflict block. - } - - // Collect the "theirs" side (between ======= and >>>>>>>). - let mut theirs = Vec::new(); - let mut found_end = false; - for next_line in lines.by_ref() { - if next_line.starts_with(">>>>>>>") { - found_end = true; - break; - } - theirs.push(next_line); - } - if !found_end { - return None; // Malformed conflict block. - } - - // Both sides must be non-empty additions to be considered simple. - // If either side is empty, it means one side deleted something — complex. - if ours.is_empty() && theirs.is_empty() { - // Both empty — nothing to add, skip. - continue; - } - - // Accept both: ours first, then theirs. - for l in &ours { - result.push_str(l); - result.push('\n'); - } - for l in &theirs { - result.push_str(l); - result.push('\n'); - } - } else { - result.push_str(line); - result.push('\n'); - } - } - - // Preserve trailing newline consistency: if original ended without - // newline, strip the trailing one we added. - if !content.ends_with('\n') && result.ends_with('\n') { - result.pop(); - } - - Some(result) -} - -/// Run quality gates in the project root after a successful merge. -/// -/// Runs quality gates in the merge workspace. -/// -/// When `script/test` is present it is the single source of truth and is the -/// only gate that runs — it is expected to cover the full suite (clippy, unit -/// tests, frontend tests, etc.). When `script/test` is absent the function -/// falls back to `cargo clippy` + `cargo nextest`/`cargo test` for Rust -/// projects. No hardcoded references to pnpm or frontend/ are used. -/// -/// Returns `(gates_passed, combined_output)`. -fn run_merge_quality_gates(project_root: &Path) -> Result<(bool, String), String> { - let mut all_output = String::new(); - let mut all_passed = true; - - let script_test = project_root.join("script").join("test"); - - if script_test.exists() { - // Delegate entirely to script/test — it is the single source of truth - // for the full test suite (clippy, cargo tests, frontend builds, etc.). - let (success, output) = run_project_tests(project_root)?; - all_output.push_str(&output); - if !success { - all_passed = false; - } - return Ok((all_passed, all_output)); - } - - // No script/test — fall back to cargo gates for Rust projects. - let cargo_toml = project_root.join("Cargo.toml"); - if cargo_toml.exists() { - let clippy = Command::new("cargo") - .args(["clippy", "--all-targets", "--all-features"]) - .current_dir(project_root) - .output() - .map_err(|e| format!("Failed to run cargo clippy: {e}"))?; - - all_output.push_str("=== cargo clippy ===\n"); - let clippy_out = format!( - "{}{}", - String::from_utf8_lossy(&clippy.stdout), - String::from_utf8_lossy(&clippy.stderr) - ); - all_output.push_str(&clippy_out); - all_output.push('\n'); - - if !clippy.status.success() { - all_passed = false; - } - - let (test_success, test_out) = run_project_tests(project_root)?; - all_output.push_str(&test_out); - if !test_success { - all_passed = false; - } - } - - Ok((all_passed, all_output)) -} - -/// Spawn claude agent in a PTY and stream events through the broadcast channel. -#[allow(clippy::too_many_arguments)] -async fn run_agent_pty_streaming( - story_id: &str, - agent_name: &str, - command: &str, - args: &[String], - prompt: &str, - cwd: &str, - tx: &broadcast::Sender, - event_log: &Arc>>, - log_writer: Option>>, - inactivity_timeout_secs: u64, - child_killers: Arc>>>, -) -> Result, String> { - let sid = story_id.to_string(); - let aname = agent_name.to_string(); - let cmd = command.to_string(); - let args = args.to_vec(); - let prompt = prompt.to_string(); - let cwd = cwd.to_string(); - let tx = tx.clone(); - let event_log = event_log.clone(); - - tokio::task::spawn_blocking(move || { - run_agent_pty_blocking( - &sid, - &aname, - &cmd, - &args, - &prompt, - &cwd, - &tx, - &event_log, - log_writer.as_deref(), - inactivity_timeout_secs, - &child_killers, - ) - }) - .await - .map_err(|e| format!("Agent task panicked: {e}"))? -} - -/// Dispatch a `stream_event` from Claude Code's `--include-partial-messages` output. -/// -/// Extracts `thinking_delta` and `text_delta` from `content_block_delta` events -/// and routes them as `AgentEvent::Thinking` and `AgentEvent::Output` respectively. -/// This ensures thinking traces flow through the dedicated `ThinkingBlock` UI -/// component rather than appearing as unbounded regular output. -fn handle_agent_stream_event( - event: &serde_json::Value, - story_id: &str, - agent_name: &str, - tx: &broadcast::Sender, - event_log: &Mutex>, - log_writer: Option<&Mutex>, -) { - let event_type = event.get("type").and_then(|t| t.as_str()).unwrap_or(""); - - if event_type == "content_block_delta" - && let Some(delta) = event.get("delta") - { - let delta_type = delta.get("type").and_then(|t| t.as_str()).unwrap_or(""); - match delta_type { - "thinking_delta" => { - if let Some(thinking) = delta.get("thinking").and_then(|t| t.as_str()) { - emit_event( - AgentEvent::Thinking { - story_id: story_id.to_string(), - agent_name: agent_name.to_string(), - text: thinking.to_string(), - }, - tx, - event_log, - log_writer, - ); - } - } - "text_delta" => { - if let Some(text) = delta.get("text").and_then(|t| t.as_str()) { - emit_event( - AgentEvent::Output { - story_id: story_id.to_string(), - agent_name: agent_name.to_string(), - text: text.to_string(), - }, - tx, - event_log, - log_writer, - ); - } - } - _ => {} - } - } -} - -/// Helper to send an event to broadcast, event log, and optional persistent log file. -fn emit_event( - event: AgentEvent, - tx: &broadcast::Sender, - event_log: &Mutex>, - log_writer: Option<&Mutex>, -) { - if let Ok(mut log) = event_log.lock() { - log.push(event.clone()); - } - if let Some(writer) = log_writer - && let Ok(mut w) = writer.lock() - && let Err(e) = w.write_event(&event) - { - eprintln!("[agent_log] Failed to write event to log file: {e}"); - } - let _ = tx.send(event); -} - -#[allow(clippy::too_many_arguments)] -fn run_agent_pty_blocking( - story_id: &str, - agent_name: &str, - command: &str, - args: &[String], - prompt: &str, - cwd: &str, - tx: &broadcast::Sender, - event_log: &Mutex>, - log_writer: Option<&Mutex>, - inactivity_timeout_secs: u64, - child_killers: &Arc>>>, -) -> Result, String> { - let pty_system = native_pty_system(); - - let pair = pty_system - .openpty(PtySize { - rows: 50, - cols: 200, - pixel_width: 0, - pixel_height: 0, - }) - .map_err(|e| format!("Failed to open PTY: {e}"))?; - - let mut cmd = CommandBuilder::new(command); - - // -p must come first - cmd.arg("-p"); - cmd.arg(prompt); - - // Add configured args (e.g., --directory /path/to/worktree, --model, etc.) - for arg in args { - cmd.arg(arg); - } - - cmd.arg("--output-format"); - cmd.arg("stream-json"); - cmd.arg("--verbose"); - // Enable partial streaming so we receive thinking_delta and text_delta - // events in real-time, rather than only complete assistant events. - // Without this, thinking traces may not appear in the structured output - // and instead leak as unstructured PTY text. - cmd.arg("--include-partial-messages"); - - // Supervised agents don't need interactive permission prompts - cmd.arg("--permission-mode"); - cmd.arg("bypassPermissions"); - - cmd.cwd(cwd); - cmd.env("NO_COLOR", "1"); - - // Allow spawning Claude Code from within a Claude Code session - cmd.env_remove("CLAUDECODE"); - cmd.env_remove("CLAUDE_CODE_ENTRYPOINT"); - - slog!("[agent:{story_id}:{agent_name}] Spawning {command} in {cwd} with args: {args:?}"); - - let mut child = pair - .slave - .spawn_command(cmd) - .map_err(|e| format!("Failed to spawn agent for {story_id}:{agent_name}: {e}"))?; - - // Register the child killer so that kill_all_children() / stop_agent() can - // terminate this process on server shutdown, even if the blocking thread - // cannot be interrupted. The ChildKillerGuard deregisters on function exit. - let killer_key = composite_key(story_id, agent_name); - { - let killer = child.clone_killer(); - if let Ok(mut killers) = child_killers.lock() { - killers.insert(killer_key.clone(), killer); - } - } - let _killer_guard = ChildKillerGuard { - killers: Arc::clone(child_killers), - key: killer_key, - }; - - drop(pair.slave); - - let reader = pair - .master - .try_clone_reader() - .map_err(|e| format!("Failed to clone PTY reader: {e}"))?; - - drop(pair.master); - - // Spawn a reader thread to collect PTY output lines. - // We use a channel so the main thread can apply an inactivity deadline - // via recv_timeout: if no output arrives within the configured window - // the process is killed and the agent is marked Failed. - let (line_tx, line_rx) = std::sync::mpsc::channel::>(); - std::thread::spawn(move || { - let buf_reader = BufReader::new(reader); - for line in buf_reader.lines() { - if line_tx.send(line).is_err() { - break; - } - } - }); - - let timeout_dur = if inactivity_timeout_secs > 0 { - Some(std::time::Duration::from_secs(inactivity_timeout_secs)) - } else { - None - }; - - let mut session_id: Option = None; - - loop { - let recv_result = match timeout_dur { - Some(dur) => line_rx.recv_timeout(dur), - None => line_rx - .recv() - .map_err(|_| std::sync::mpsc::RecvTimeoutError::Disconnected), - }; - - let line = match recv_result { - Ok(Ok(l)) => l, - Ok(Err(_)) => { - // IO error reading from PTY — treat as EOF. - break; - } - Err(std::sync::mpsc::RecvTimeoutError::Disconnected) => { - // Reader thread exited (EOF from PTY). - break; - } - Err(std::sync::mpsc::RecvTimeoutError::Timeout) => { - slog_warn!( - "[agent:{story_id}:{agent_name}] Inactivity timeout after \ - {inactivity_timeout_secs}s with no output. Killing process." - ); - let _ = child.kill(); - let _ = child.wait(); - return Err(format!( - "Agent inactivity timeout: no output received for {inactivity_timeout_secs}s" - )); - } - }; - - let trimmed = line.trim(); - if trimmed.is_empty() { - continue; - } - - // Try to parse as JSON - let json: serde_json::Value = match serde_json::from_str(trimmed) { - Ok(j) => j, - Err(_) => { - // Non-JSON output (terminal escapes etc.) — send as raw output - emit_event( - AgentEvent::Output { - story_id: story_id.to_string(), - agent_name: agent_name.to_string(), - text: trimmed.to_string(), - }, - tx, - event_log, - log_writer, - ); - continue; - } - }; - - let event_type = json.get("type").and_then(|t| t.as_str()).unwrap_or(""); - - match event_type { - "system" => { - session_id = json - .get("session_id") - .and_then(|s| s.as_str()) - .map(|s| s.to_string()); - } - // With --include-partial-messages, thinking and text arrive - // incrementally via stream_event → content_block_delta. Handle - // them here for real-time streaming to the frontend. - "stream_event" => { - if let Some(event) = json.get("event") { - handle_agent_stream_event( - event, - story_id, - agent_name, - tx, - event_log, - log_writer, - ); - } - } - // Complete assistant events are skipped for content extraction - // because thinking and text already arrived via stream_event. - // The raw JSON is still forwarded as AgentJson below. - "assistant" | "user" | "result" => {} - _ => {} - } - - // Forward all JSON events - emit_event( - AgentEvent::AgentJson { - story_id: story_id.to_string(), - agent_name: agent_name.to_string(), - data: json, - }, - tx, - event_log, - log_writer, - ); - } - - let _ = child.kill(); - let _ = child.wait(); - - slog!( - "[agent:{story_id}:{agent_name}] Done. Session: {:?}", - session_id - ); - - Ok(session_id) -} - #[cfg(test)] mod tests { use super::*; + use crate::agents::{ + AgentEvent, AgentStatus, CompletionReport, PipelineStage, ReconciliationEvent, + lifecycle::move_story_to_archived, + }; + use crate::config::ProjectConfig; + use crate::io::watcher::WatcherEvent; + use portable_pty::{CommandBuilder, PtySize, native_pty_system}; + use std::collections::HashMap; + use std::path::PathBuf; + use std::process::Command; + use tokio::sync::broadcast; + + fn init_git_repo(repo: &std::path::Path) { + Command::new("git") + .args(["init"]) + .current_dir(repo) + .output() + .unwrap(); + Command::new("git") + .args(["config", "user.email", "test@test.com"]) + .current_dir(repo) + .output() + .unwrap(); + Command::new("git") + .args(["config", "user.name", "Test"]) + .current_dir(repo) + .output() + .unwrap(); + Command::new("git") + .args(["commit", "--allow-empty", "-m", "init"]) + .current_dir(repo) + .output() + .unwrap(); + } + + fn make_config(toml_str: &str) -> ProjectConfig { + ProjectConfig::parse(toml_str).unwrap() + } #[tokio::test] async fn wait_for_agent_returns_immediately_if_completed() { @@ -3912,149 +2413,6 @@ mod tests { .await; } - // ── move_story_to_current tests ──────────────────────────────────────────── - // No git repo needed: the watcher handles commits asynchronously. - - fn init_git_repo(repo: &std::path::Path) { - Command::new("git") - .args(["init"]) - .current_dir(repo) - .output() - .unwrap(); - Command::new("git") - .args(["config", "user.email", "test@test.com"]) - .current_dir(repo) - .output() - .unwrap(); - Command::new("git") - .args(["config", "user.name", "Test"]) - .current_dir(repo) - .output() - .unwrap(); - Command::new("git") - .args(["commit", "--allow-empty", "-m", "init"]) - .current_dir(repo) - .output() - .unwrap(); - } - - #[test] - fn move_story_to_current_moves_file() { - use std::fs; - let tmp = tempfile::tempdir().unwrap(); - let root = tmp.path(); - let upcoming = root.join(".story_kit/work/1_upcoming"); - let current = root.join(".story_kit/work/2_current"); - fs::create_dir_all(&upcoming).unwrap(); - fs::create_dir_all(¤t).unwrap(); - fs::write(upcoming.join("10_story_foo.md"), "test").unwrap(); - - move_story_to_current(root, "10_story_foo").unwrap(); - - assert!(!upcoming.join("10_story_foo.md").exists()); - assert!(current.join("10_story_foo.md").exists()); - } - - #[test] - fn move_story_to_current_is_idempotent_when_already_current() { - use std::fs; - let tmp = tempfile::tempdir().unwrap(); - let root = tmp.path(); - let current = root.join(".story_kit/work/2_current"); - fs::create_dir_all(¤t).unwrap(); - fs::write(current.join("11_story_foo.md"), "test").unwrap(); - - move_story_to_current(root, "11_story_foo").unwrap(); - assert!(current.join("11_story_foo.md").exists()); - } - - #[test] - fn move_story_to_current_noop_when_not_in_upcoming() { - let tmp = tempfile::tempdir().unwrap(); - assert!(move_story_to_current(tmp.path(), "99_missing").is_ok()); - } - - #[test] - fn move_bug_to_current_moves_from_upcoming() { - use std::fs; - let tmp = tempfile::tempdir().unwrap(); - let root = tmp.path(); - let upcoming = root.join(".story_kit/work/1_upcoming"); - let current = root.join(".story_kit/work/2_current"); - fs::create_dir_all(&upcoming).unwrap(); - fs::create_dir_all(¤t).unwrap(); - fs::write(upcoming.join("1_bug_test.md"), "# Bug 1\n").unwrap(); - - move_story_to_current(root, "1_bug_test").unwrap(); - - assert!(!upcoming.join("1_bug_test.md").exists()); - assert!(current.join("1_bug_test.md").exists()); - } - - #[test] - fn close_bug_moves_from_current_to_archive() { - use std::fs; - let tmp = tempfile::tempdir().unwrap(); - let root = tmp.path(); - let current = root.join(".story_kit/work/2_current"); - fs::create_dir_all(¤t).unwrap(); - fs::write(current.join("2_bug_test.md"), "# Bug 2\n").unwrap(); - - close_bug_to_archive(root, "2_bug_test").unwrap(); - - assert!(!current.join("2_bug_test.md").exists()); - assert!(root.join(".story_kit/work/5_done/2_bug_test.md").exists()); - } - - #[test] - fn close_bug_moves_from_upcoming_when_not_started() { - use std::fs; - let tmp = tempfile::tempdir().unwrap(); - let root = tmp.path(); - let upcoming = root.join(".story_kit/work/1_upcoming"); - fs::create_dir_all(&upcoming).unwrap(); - fs::write(upcoming.join("3_bug_test.md"), "# Bug 3\n").unwrap(); - - close_bug_to_archive(root, "3_bug_test").unwrap(); - - assert!(!upcoming.join("3_bug_test.md").exists()); - assert!(root.join(".story_kit/work/5_done/3_bug_test.md").exists()); - } - - #[test] - fn item_type_from_id_detects_types() { - assert_eq!(item_type_from_id("1_bug_test"), "bug"); - assert_eq!(item_type_from_id("1_spike_research"), "spike"); - assert_eq!(item_type_from_id("50_story_my_story"), "story"); - assert_eq!(item_type_from_id("1_story_simple"), "story"); - } - - // ── pipeline_stage tests ────────────────────────────────────────────────── - - #[test] - fn pipeline_stage_detects_coders() { - assert_eq!(pipeline_stage("coder-1"), PipelineStage::Coder); - assert_eq!(pipeline_stage("coder-2"), PipelineStage::Coder); - assert_eq!(pipeline_stage("coder-3"), PipelineStage::Coder); - } - - #[test] - fn pipeline_stage_detects_qa() { - assert_eq!(pipeline_stage("qa"), PipelineStage::Qa); - } - - #[test] - fn pipeline_stage_detects_mergemaster() { - assert_eq!(pipeline_stage("mergemaster"), PipelineStage::Mergemaster); - } - - #[test] - fn pipeline_stage_supervisor_is_other() { - assert_eq!(pipeline_stage("supervisor"), PipelineStage::Other); - assert_eq!(pipeline_stage("default"), PipelineStage::Other); - assert_eq!(pipeline_stage("unknown"), PipelineStage::Other); - } - // ── pipeline advance tests ──────────────────────────────────────────────── #[tokio::test] @@ -4165,425 +2523,83 @@ mod tests { ); } - // ── move_story_to_merge tests ────────────────────────────────────────────── - - #[test] - fn move_story_to_merge_moves_file() { + #[tokio::test] + async fn pipeline_advance_sends_agent_state_changed_to_watcher_tx() { use std::fs; + let tmp = tempfile::tempdir().unwrap(); let root = tmp.path(); + + // Set up story in 2_current/ let current = root.join(".story_kit/work/2_current"); fs::create_dir_all(¤t).unwrap(); - fs::write(current.join("20_story_foo.md"), "test").unwrap(); + fs::write(current.join("173_story_test.md"), "test").unwrap(); + // Ensure 3_qa/ exists for the move target + fs::create_dir_all(root.join(".story_kit/work/3_qa")).unwrap(); + // Ensure 1_upcoming/ exists (start_agent calls move_story_to_current) + fs::create_dir_all(root.join(".story_kit/work/1_upcoming")).unwrap(); - move_story_to_merge(root, "20_story_foo").unwrap(); + // Write a project.toml with a qa agent so start_agent can resolve it. + fs::create_dir_all(root.join(".story_kit")).unwrap(); + fs::write( + root.join(".story_kit/project.toml"), + r#" +[[agent]] +name = "coder-1" +role = "Coder" +command = "echo" +args = ["noop"] +prompt = "test" +stage = "coder" - assert!(!current.join("20_story_foo.md").exists()); - assert!(root.join(".story_kit/work/4_merge/20_story_foo.md").exists()); - } - - #[test] - fn move_story_to_merge_from_qa_dir() { - use std::fs; - let tmp = tempfile::tempdir().unwrap(); - let root = tmp.path(); - let qa_dir = root.join(".story_kit/work/3_qa"); - fs::create_dir_all(&qa_dir).unwrap(); - fs::write(qa_dir.join("40_story_test.md"), "test").unwrap(); - - move_story_to_merge(root, "40_story_test").unwrap(); - - assert!(!qa_dir.join("40_story_test.md").exists()); - assert!(root.join(".story_kit/work/4_merge/40_story_test.md").exists()); - } - - #[test] - fn move_story_to_merge_idempotent_when_already_in_merge() { - use std::fs; - let tmp = tempfile::tempdir().unwrap(); - let root = tmp.path(); - let merge_dir = root.join(".story_kit/work/4_merge"); - fs::create_dir_all(&merge_dir).unwrap(); - fs::write(merge_dir.join("21_story_test.md"), "test").unwrap(); - - move_story_to_merge(root, "21_story_test").unwrap(); - assert!(merge_dir.join("21_story_test.md").exists()); - } - - #[test] - fn move_story_to_merge_errors_when_not_in_current_or_qa() { - let tmp = tempfile::tempdir().unwrap(); - let result = move_story_to_merge(tmp.path(), "99_nonexistent"); - assert!(result.unwrap_err().contains("not found in work/2_current/ or work/3_qa/")); - } - - // ── move_story_to_qa tests ──────────────────────────────────────────────── - - #[test] - fn move_story_to_qa_moves_file() { - use std::fs; - let tmp = tempfile::tempdir().unwrap(); - let root = tmp.path(); - let current = root.join(".story_kit/work/2_current"); - fs::create_dir_all(¤t).unwrap(); - fs::write(current.join("30_story_qa.md"), "test").unwrap(); - - move_story_to_qa(root, "30_story_qa").unwrap(); - - assert!(!current.join("30_story_qa.md").exists()); - assert!(root.join(".story_kit/work/3_qa/30_story_qa.md").exists()); - } - - #[test] - fn move_story_to_qa_idempotent_when_already_in_qa() { - use std::fs; - let tmp = tempfile::tempdir().unwrap(); - let root = tmp.path(); - let qa_dir = root.join(".story_kit/work/3_qa"); - fs::create_dir_all(&qa_dir).unwrap(); - fs::write(qa_dir.join("31_story_test.md"), "test").unwrap(); - - move_story_to_qa(root, "31_story_test").unwrap(); - assert!(qa_dir.join("31_story_test.md").exists()); - } - - #[test] - fn move_story_to_qa_errors_when_not_in_current() { - let tmp = tempfile::tempdir().unwrap(); - let result = move_story_to_qa(tmp.path(), "99_nonexistent"); - assert!(result.unwrap_err().contains("not found in work/2_current/")); - } - - // ── move_story_to_archived tests ────────────────────────────────────────── - - #[test] - fn move_story_to_archived_finds_in_merge_dir() { - use std::fs; - let tmp = tempfile::tempdir().unwrap(); - let root = tmp.path(); - let merge_dir = root.join(".story_kit/work/4_merge"); - fs::create_dir_all(&merge_dir).unwrap(); - fs::write(merge_dir.join("22_story_test.md"), "test").unwrap(); - - move_story_to_archived(root, "22_story_test").unwrap(); - - assert!(!merge_dir.join("22_story_test.md").exists()); - assert!(root.join(".story_kit/work/5_done/22_story_test.md").exists()); - } - - #[test] - fn move_story_to_archived_error_when_not_in_current_or_merge() { - let tmp = tempfile::tempdir().unwrap(); - let result = move_story_to_archived(tmp.path(), "99_nonexistent"); - assert!(result.unwrap_err().contains("4_merge")); - } - - // ── merge_agent_work tests ──────────────────────────────────────────────── - - #[tokio::test] - async fn merge_agent_work_returns_error_when_branch_not_found() { - use tempfile::tempdir; - - let tmp = tempdir().unwrap(); - let repo = tmp.path(); - init_git_repo(repo); +[[agent]] +name = "qa" +role = "QA" +command = "echo" +args = ["noop"] +prompt = "test" +stage = "qa" +"#, + ) + .unwrap(); let pool = AgentPool::new_test(3001); - // branch feature/story-99_nonexistent does not exist - let result = pool - .merge_agent_work(repo, "99_nonexistent") - .await - .unwrap(); - // Should fail (no branch) — not panic - assert!(!result.success, "should fail when branch missing"); - } + // Subscribe to the watcher channel BEFORE the pipeline advance. + let mut rx = pool.watcher_tx.subscribe(); - #[tokio::test] - async fn merge_agent_work_succeeds_on_clean_branch() { - use std::fs; - use tempfile::tempdir; + // Call pipeline advance directly. This will: + // 1. Move the story to 3_qa/ + // 2. Start the QA agent (which calls notify_agent_state_changed) + // Note: the actual agent process will fail (no real worktree), but the + // agent insertion and notification happen before the background spawn. + pool.run_pipeline_advance( + "173_story_test", + "coder-1", + CompletionReport { + summary: "done".to_string(), + gates_passed: true, + gate_output: String::new(), + }, + Some(root.to_path_buf()), + None, + false, + ) + .await; - let tmp = tempdir().unwrap(); - let repo = tmp.path(); - init_git_repo(repo); - - // Create a feature branch with a commit - Command::new("git") - .args(["checkout", "-b", "feature/story-23_test"]) - .current_dir(repo) - .output() - .unwrap(); - fs::write(repo.join("feature.txt"), "feature content").unwrap(); - Command::new("git") - .args(["add", "."]) - .current_dir(repo) - .output() - .unwrap(); - Command::new("git") - .args(["commit", "-m", "add feature"]) - .current_dir(repo) - .output() - .unwrap(); - - // Switch back to master (initial branch) - Command::new("git") - .args(["checkout", "master"]) - .current_dir(repo) - .output() - .unwrap(); - - // Create the story file in 4_merge/ so we can test archival - let merge_dir = repo.join(".story_kit/work/4_merge"); - fs::create_dir_all(&merge_dir).unwrap(); - let story_file = merge_dir.join("23_test.md"); - fs::write(&story_file, "---\nname: Test\n---\n").unwrap(); - Command::new("git") - .args(["add", "."]) - .current_dir(repo) - .output() - .unwrap(); - Command::new("git") - .args(["commit", "-m", "add story in merge"]) - .current_dir(repo) - .output() - .unwrap(); - - let pool = AgentPool::new_test(3001); - let report = pool.merge_agent_work(repo, "23_test").await.unwrap(); - - // Merge should succeed (gates will run but cargo/pnpm results will depend on env) - // At minimum the merge itself should succeed - assert!(!report.had_conflicts, "should have no conflicts"); - // Note: gates_passed may be false in test env without Rust project, that's OK - // The important thing is the merge itself ran - assert!( - report.success || report.gate_output.contains("Failed to run") || !report.gates_passed, - "report should be coherent: {report:?}" - ); - // Story should be in done if gates passed - if report.story_archived { - let done = repo.join(".story_kit/work/5_done/23_test.md"); - assert!(done.exists(), "done file should exist"); + // The pipeline advance should have sent AgentStateChanged events via + // the pool's watcher_tx (not a dummy channel). Collect all events. + let mut got_agent_state_changed = false; + while let Ok(evt) = rx.try_recv() { + if matches!(evt, WatcherEvent::AgentStateChanged) { + got_agent_state_changed = true; + break; + } } - } - // ── quality gate ordering test ──────────────────────────────── - - /// Regression test for bug 142: quality gates must run BEFORE the fast-forward - /// to master so that broken code never lands on master. - /// - /// Setup: a repo with a failing `script/test`, a feature branch with one commit. - /// When `run_squash_merge` is called, the gates must detect failure and abort the - /// fast-forward, leaving master HEAD unchanged. - #[cfg(unix)] - #[test] - fn quality_gates_run_before_fast_forward_to_master() { - use std::fs; - use std::os::unix::fs::PermissionsExt; - use tempfile::tempdir; - - let tmp = tempdir().unwrap(); - let repo = tmp.path(); - init_git_repo(repo); - - // Add a failing script/test so quality gates will fail. - let script_dir = repo.join("script"); - fs::create_dir_all(&script_dir).unwrap(); - let script_test = script_dir.join("test"); - fs::write(&script_test, "#!/usr/bin/env bash\nexit 1\n").unwrap(); - let mut perms = fs::metadata(&script_test).unwrap().permissions(); - perms.set_mode(0o755); - fs::set_permissions(&script_test, perms).unwrap(); - Command::new("git") - .args(["add", "."]) - .current_dir(repo) - .output() - .unwrap(); - Command::new("git") - .args(["commit", "-m", "add failing script/test"]) - .current_dir(repo) - .output() - .unwrap(); - - // Create a feature branch with a commit. - Command::new("git") - .args(["checkout", "-b", "feature/story-142_test"]) - .current_dir(repo) - .output() - .unwrap(); - fs::write(repo.join("change.txt"), "feature change").unwrap(); - Command::new("git") - .args(["add", "."]) - .current_dir(repo) - .output() - .unwrap(); - Command::new("git") - .args(["commit", "-m", "feature work"]) - .current_dir(repo) - .output() - .unwrap(); - - // Switch back to master and record its HEAD. - Command::new("git") - .args(["checkout", "master"]) - .current_dir(repo) - .output() - .unwrap(); - let head_before = String::from_utf8( - Command::new("git") - .args(["rev-parse", "HEAD"]) - .current_dir(repo) - .output() - .unwrap() - .stdout, - ) - .unwrap() - .trim() - .to_string(); - - // Run the squash-merge. The failing script/test makes quality gates - // fail → fast-forward must NOT happen. - let result = run_squash_merge(repo, "feature/story-142_test", "142_test").unwrap(); - - let head_after = String::from_utf8( - Command::new("git") - .args(["rev-parse", "HEAD"]) - .current_dir(repo) - .output() - .unwrap() - .stdout, - ) - .unwrap() - .trim() - .to_string(); - - // Gates must have failed (script/test exits 1) so master should be untouched. assert!( - !result.success, - "run_squash_merge must report failure when gates fail" - ); - assert_eq!( - head_before, head_after, - "master HEAD must not advance when quality gates fail (bug 142)" - ); - } - - // ── run_project_tests tests ─────────────────────────────────── - - #[cfg(unix)] - #[test] - fn run_project_tests_uses_script_test_when_present_and_passes() { - use std::fs; - use std::os::unix::fs::PermissionsExt; - use tempfile::tempdir; - - let tmp = tempdir().unwrap(); - let path = tmp.path(); - let script_dir = path.join("script"); - fs::create_dir_all(&script_dir).unwrap(); - let script_test = script_dir.join("test"); - fs::write(&script_test, "#!/usr/bin/env bash\necho 'all tests passed'\nexit 0\n").unwrap(); - let mut perms = fs::metadata(&script_test).unwrap().permissions(); - perms.set_mode(0o755); - fs::set_permissions(&script_test, perms).unwrap(); - - let (passed, output) = run_project_tests(path).unwrap(); - assert!(passed, "script/test exiting 0 should pass"); - assert!(output.contains("script/test"), "output should mention script/test"); - } - - #[cfg(unix)] - #[test] - fn run_project_tests_reports_failure_when_script_test_exits_nonzero() { - use std::fs; - use std::os::unix::fs::PermissionsExt; - use tempfile::tempdir; - - let tmp = tempdir().unwrap(); - let path = tmp.path(); - let script_dir = path.join("script"); - fs::create_dir_all(&script_dir).unwrap(); - let script_test = script_dir.join("test"); - fs::write(&script_test, "#!/usr/bin/env bash\nexit 1\n").unwrap(); - let mut perms = fs::metadata(&script_test).unwrap().permissions(); - perms.set_mode(0o755); - fs::set_permissions(&script_test, perms).unwrap(); - - let (passed, output) = run_project_tests(path).unwrap(); - assert!(!passed, "script/test exiting 1 should fail"); - assert!(output.contains("script/test"), "output should mention script/test"); - } - - // ── run_coverage_gate tests ─────────────────────────────────────────────── - - #[cfg(unix)] - #[test] - fn coverage_gate_passes_when_script_absent() { - use tempfile::tempdir; - let tmp = tempdir().unwrap(); - let (passed, output) = run_coverage_gate(tmp.path()).unwrap(); - assert!(passed, "coverage gate should pass when script is absent"); - assert!( - output.contains("not found"), - "output should mention script not found" - ); - } - - #[cfg(unix)] - #[test] - fn coverage_gate_passes_when_script_exits_zero() { - use std::fs; - use std::os::unix::fs::PermissionsExt; - use tempfile::tempdir; - - let tmp = tempdir().unwrap(); - let path = tmp.path(); - let script_dir = path.join("script"); - fs::create_dir_all(&script_dir).unwrap(); - let script = script_dir.join("test_coverage"); - fs::write( - &script, - "#!/usr/bin/env bash\necho 'Rust line coverage: 85%'\necho 'PASS: Coverage 85% meets threshold 0%'\nexit 0\n", - ) - .unwrap(); - let mut perms = fs::metadata(&script).unwrap().permissions(); - perms.set_mode(0o755); - fs::set_permissions(&script, perms).unwrap(); - - let (passed, output) = run_coverage_gate(path).unwrap(); - assert!(passed, "coverage gate should pass when script exits 0"); - assert!( - output.contains("script/test_coverage"), - "output should mention script/test_coverage" - ); - } - - #[cfg(unix)] - #[test] - fn coverage_gate_fails_when_script_exits_nonzero() { - use std::fs; - use std::os::unix::fs::PermissionsExt; - use tempfile::tempdir; - - let tmp = tempdir().unwrap(); - let path = tmp.path(); - let script_dir = path.join("script"); - fs::create_dir_all(&script_dir).unwrap(); - let script = script_dir.join("test_coverage"); - fs::write( - &script, - "#!/usr/bin/env bash\necho 'FAIL: Coverage 40% is below threshold 80%'\nexit 1\n", - ) - .unwrap(); - let mut perms = fs::metadata(&script).unwrap().permissions(); - perms.set_mode(0o755); - fs::set_permissions(&script, perms).unwrap(); - - let (passed, output) = run_coverage_gate(path).unwrap(); - assert!(!passed, "coverage gate should fail when script exits 1"); - assert!( - output.contains("script/test_coverage"), - "output should mention script/test_coverage" + got_agent_state_changed, + "pipeline advance should send AgentStateChanged through the real watcher_tx \ + (bug 173: lozenges must update when agents are assigned during pipeline advance)" ); } @@ -4614,7 +2630,6 @@ mod tests { #[test] fn is_story_assigned_returns_true_for_running_coder() { - use crate::config::ProjectConfig; let config = ProjectConfig::default(); let pool = AgentPool::new_test(3001); pool.inject_test_agent("42_story_foo", "coder-1", AgentStatus::Running); @@ -4644,7 +2659,6 @@ mod tests { #[test] fn is_story_assigned_returns_false_for_completed_agent() { - use crate::config::ProjectConfig; let config = ProjectConfig::default(); let pool = AgentPool::new_test(3001); pool.inject_test_agent("42_story_foo", "coder-1", AgentStatus::Completed); @@ -4659,9 +2673,40 @@ mod tests { )); } + #[test] + fn is_story_assigned_uses_config_stage_field_for_nonstandard_names() { + let config = ProjectConfig::parse( + r#" +[[agent]] +name = "qa-2" +stage = "qa" +"#, + ) + .unwrap(); + + let pool = AgentPool::new_test(3001); + pool.inject_test_agent("42_story_foo", "qa-2", AgentStatus::Running); + + let agents = pool.agents.lock().unwrap(); + // qa-2 with stage=qa should be recognised as a QA agent + assert!( + is_story_assigned_for_stage(&config, &agents, "42_story_foo", &PipelineStage::Qa), + "qa-2 should be detected as assigned to QA stage" + ); + // Should NOT appear as a coder + assert!( + !is_story_assigned_for_stage( + &config, + &agents, + "42_story_foo", + &PipelineStage::Coder + ), + "qa-2 should not be detected as a coder" + ); + } + #[test] fn find_free_agent_returns_none_when_all_busy() { - use crate::config::ProjectConfig; let config = ProjectConfig::parse( r#" [[agent]] @@ -4683,7 +2728,6 @@ name = "coder-2" #[test] fn find_free_agent_returns_first_free_coder() { - use crate::config::ProjectConfig; let config = ProjectConfig::parse( r#" [[agent]] @@ -4707,7 +2751,6 @@ name = "coder-3" #[test] fn find_free_agent_ignores_completed_agents() { - use crate::config::ProjectConfig; let config = ProjectConfig::parse( r#" [[agent]] @@ -4727,7 +2770,6 @@ name = "coder-1" #[test] fn find_free_agent_returns_none_for_wrong_stage() { - use crate::config::ProjectConfig; let config = ProjectConfig::parse( r#" [[agent]] @@ -4745,13 +2787,10 @@ name = "qa" assert_eq!(free_qa, Some("qa")); } - // ── agent_config_stage / stage field tests ──────────────────────────────── - #[test] fn find_free_agent_uses_config_stage_field_not_name() { // Agents named "qa-2" and "coder-opus" don't match the legacy name heuristic // but should be picked up via their explicit stage field. - use crate::config::ProjectConfig; let config = ProjectConfig::parse( r#" [[agent]] @@ -4784,39 +2823,6 @@ stage = "coder" assert!(free_merge.is_none()); } - #[test] - fn is_story_assigned_uses_config_stage_field_for_nonstandard_names() { - use crate::config::ProjectConfig; - let config = ProjectConfig::parse( - r#" -[[agent]] -name = "qa-2" -stage = "qa" -"#, - ) - .unwrap(); - - let pool = AgentPool::new_test(3001); - pool.inject_test_agent("42_story_foo", "qa-2", AgentStatus::Running); - - let agents = pool.agents.lock().unwrap(); - // qa-2 with stage=qa should be recognised as a QA agent - assert!( - is_story_assigned_for_stage(&config, &agents, "42_story_foo", &PipelineStage::Qa), - "qa-2 should be detected as assigned to QA stage" - ); - // Should NOT appear as a coder - assert!( - !is_story_assigned_for_stage( - &config, - &agents, - "42_story_foo", - &PipelineStage::Coder - ), - "qa-2 should not be detected as a coder" - ); - } - // ── find_active_story_stage tests ───────────────────────────────────────── #[test] @@ -4870,6 +2876,605 @@ stage = "qa" assert_eq!(find_active_story_stage(tmp.path(), "99_nonexistent"), None); } + // ── check_orphaned_agents return value tests (bug 161) ────────────────── + + #[tokio::test] + async fn check_orphaned_agents_returns_count_of_orphaned_agents() { + let pool = AgentPool::new_test(3001); + + // Spawn two tasks that finish immediately. + let h1 = tokio::spawn(async {}); + let h2 = tokio::spawn(async {}); + tokio::time::sleep(std::time::Duration::from_millis(20)).await; + assert!(h1.is_finished()); + assert!(h2.is_finished()); + + pool.inject_test_agent_with_handle("story_a", "coder", AgentStatus::Running, h1); + pool.inject_test_agent_with_handle("story_b", "coder", AgentStatus::Running, h2); + + let found = check_orphaned_agents(&pool.agents); + assert_eq!(found, 2, "should detect both orphaned agents"); + } + + #[test] + fn check_orphaned_agents_returns_zero_when_no_orphans() { + let pool = AgentPool::new_test(3001); + // Inject agents in terminal states — not orphaned. + pool.inject_test_agent("story_a", "coder", AgentStatus::Completed); + pool.inject_test_agent("story_b", "qa", AgentStatus::Failed); + + let found = check_orphaned_agents(&pool.agents); + assert_eq!(found, 0, "no orphans should be detected for terminal agents"); + } + + #[tokio::test] + async fn watchdog_detects_orphaned_running_agent() { + let pool = AgentPool::new_test(3001); + + let handle = tokio::spawn(async {}); + tokio::time::sleep(std::time::Duration::from_millis(20)).await; + assert!(handle.is_finished(), "task should be finished before injection"); + + let tx = + pool.inject_test_agent_with_handle("orphan_story", "coder", AgentStatus::Running, handle); + let mut rx = tx.subscribe(); + + pool.run_watchdog_once(); + + { + let agents = pool.agents.lock().unwrap(); + let key = composite_key("orphan_story", "coder"); + let agent = agents.get(&key).unwrap(); + assert_eq!( + agent.status, + AgentStatus::Failed, + "watchdog must mark an orphaned Running agent as Failed" + ); + } + + let event = rx.try_recv().expect("watchdog must emit an Error event"); + assert!( + matches!(event, AgentEvent::Error { .. }), + "expected AgentEvent::Error, got: {event:?}" + ); + } + + #[tokio::test] + async fn watchdog_orphan_detection_returns_nonzero_enabling_auto_assign() { + // This test verifies the contract that `check_orphaned_agents` returns + // a non-zero count when orphans exist, which the watchdog uses to + // decide whether to trigger auto-assign (bug 161). + let pool = AgentPool::new_test(3001); + + let handle = tokio::spawn(async {}); + tokio::time::sleep(std::time::Duration::from_millis(20)).await; + + pool.inject_test_agent_with_handle( + "orphan_story", + "coder", + AgentStatus::Running, + handle, + ); + + // Before watchdog: agent is Running. + { + let agents = pool.agents.lock().unwrap(); + let key = composite_key("orphan_story", "coder"); + assert_eq!(agents.get(&key).unwrap().status, AgentStatus::Running); + } + + // Run watchdog pass — should return 1 (orphan found). + let found = check_orphaned_agents(&pool.agents); + assert_eq!( + found, 1, + "watchdog must return 1 for a single orphaned agent" + ); + + // After watchdog: agent is Failed. + { + let agents = pool.agents.lock().unwrap(); + let key = composite_key("orphan_story", "coder"); + assert_eq!( + agents.get(&key).unwrap().status, + AgentStatus::Failed, + "orphaned agent must be marked Failed" + ); + } + } + + // ── remove_agents_for_story tests ──────────────────────────────────────── + + #[test] + fn remove_agents_for_story_removes_all_entries() { + let pool = AgentPool::new_test(3001); + pool.inject_test_agent("story_a", "coder-1", AgentStatus::Completed); + pool.inject_test_agent("story_a", "qa", AgentStatus::Failed); + pool.inject_test_agent("story_b", "coder-1", AgentStatus::Running); + + let removed = pool.remove_agents_for_story("story_a"); + assert_eq!(removed, 2, "should remove both agents for story_a"); + + let agents = pool.list_agents().unwrap(); + assert_eq!(agents.len(), 1, "only story_b agent should remain"); + assert_eq!(agents[0].story_id, "story_b"); + } + + #[test] + fn remove_agents_for_story_returns_zero_when_no_match() { + let pool = AgentPool::new_test(3001); + pool.inject_test_agent("story_a", "coder-1", AgentStatus::Running); + + let removed = pool.remove_agents_for_story("nonexistent"); + assert_eq!(removed, 0); + + let agents = pool.list_agents().unwrap(); + assert_eq!(agents.len(), 1, "existing agents should not be affected"); + } + + // ── archive + cleanup integration test ─────────────────────────────────── + + #[tokio::test] + async fn archiving_story_removes_agent_entries_from_pool() { + use std::fs; + + let tmp = tempfile::tempdir().unwrap(); + let root = tmp.path(); + + // Set up story in 2_current/ + let current = root.join(".story_kit/work/2_current"); + fs::create_dir_all(¤t).unwrap(); + fs::write(current.join("60_story_cleanup.md"), "test").unwrap(); + + let pool = AgentPool::new_test(3001); + pool.inject_test_agent("60_story_cleanup", "coder-1", AgentStatus::Completed); + pool.inject_test_agent("60_story_cleanup", "qa", AgentStatus::Completed); + pool.inject_test_agent("61_story_other", "coder-1", AgentStatus::Running); + + // Verify all 3 agents exist. + assert_eq!(pool.list_agents().unwrap().len(), 3); + + // Archive the story. + move_story_to_archived(root, "60_story_cleanup").unwrap(); + pool.remove_agents_for_story("60_story_cleanup"); + + // Agent entries for the archived story should be gone. + let remaining = pool.list_agents().unwrap(); + assert_eq!(remaining.len(), 1, "only the other story's agent should remain"); + assert_eq!(remaining[0].story_id, "61_story_other"); + + // Story file should be in 5_done/ + assert!(root.join(".story_kit/work/5_done/60_story_cleanup.md").exists()); + } + + // ── kill_all_children tests ──────────────────────────────────── + + /// Returns true if a process with the given PID is currently running. + fn process_is_running(pid: u32) -> bool { + std::process::Command::new("ps") + .arg("-p") + .arg(pid.to_string()) + .stdout(std::process::Stdio::null()) + .stderr(std::process::Stdio::null()) + .status() + .map(|s| s.success()) + .unwrap_or(false) + } + + #[test] + fn kill_all_children_is_safe_on_empty_pool() { + let pool = AgentPool::new_test(3001); + // Should not panic or deadlock on an empty registry. + pool.kill_all_children(); + assert_eq!(pool.child_killer_count(), 0); + } + + #[test] + fn kill_all_children_kills_real_process() { + // GIVEN: a real PTY child process (sleep 100) with its killer registered. + let pool = AgentPool::new_test(3001); + + let pty_system = native_pty_system(); + let pair = pty_system + .openpty(PtySize { + rows: 24, + cols: 80, + pixel_width: 0, + pixel_height: 0, + }) + .expect("failed to open pty"); + + let mut cmd = CommandBuilder::new("sleep"); + cmd.arg("100"); + let mut child = pair + .slave + .spawn_command(cmd) + .expect("failed to spawn sleep"); + let pid = child.process_id().expect("no pid"); + + pool.inject_child_killer("story:agent", child.clone_killer()); + + // Verify the process is alive before we kill it. + assert!( + process_is_running(pid), + "process {pid} should be running before kill_all_children" + ); + + // WHEN: kill_all_children() is called. + pool.kill_all_children(); + + // Collect the exit status (prevents zombie; also ensures signal was sent). + let _ = child.wait(); + + // THEN: the process should be dead. + assert!( + !process_is_running(pid), + "process {pid} should have been killed by kill_all_children" + ); + } + + #[test] + fn kill_all_children_clears_registry() { + // GIVEN: a pool with one registered killer. + let pool = AgentPool::new_test(3001); + + let pty_system = native_pty_system(); + let pair = pty_system + .openpty(PtySize { + rows: 24, + cols: 80, + pixel_width: 0, + pixel_height: 0, + }) + .expect("failed to open pty"); + + let mut cmd = CommandBuilder::new("sleep"); + cmd.arg("1"); + let mut child = pair + .slave + .spawn_command(cmd) + .expect("failed to spawn sleep"); + + pool.inject_child_killer("story:agent", child.clone_killer()); + assert_eq!(pool.child_killer_count(), 1); + + // WHEN: kill_all_children() is called. + pool.kill_all_children(); + let _ = child.wait(); + + // THEN: the registry is empty. + assert_eq!( + pool.child_killer_count(), + 0, + "child_killers should be cleared after kill_all_children" + ); + } + + // ── available_agents_for_stage tests (story 190) ────────────────────────── + + #[test] + fn available_agents_for_stage_returns_idle_agents() { + let config = make_config( + r#" +[[agent]] +name = "coder-1" +stage = "coder" + +[[agent]] +name = "coder-2" +stage = "coder" + +[[agent]] +name = "qa" +stage = "qa" +"#, + ); + let pool = AgentPool::new_test(3001); + // coder-1 is busy on story-1 + pool.inject_test_agent("story-1", "coder-1", AgentStatus::Running); + + let available = pool + .available_agents_for_stage(&config, &PipelineStage::Coder) + .unwrap(); + assert_eq!(available, vec!["coder-2"]); + + let available_qa = pool + .available_agents_for_stage(&config, &PipelineStage::Qa) + .unwrap(); + assert_eq!(available_qa, vec!["qa"]); + } + + #[test] + fn available_agents_for_stage_returns_empty_when_all_busy() { + let config = make_config( + r#" +[[agent]] +name = "coder-1" +stage = "coder" +"#, + ); + let pool = AgentPool::new_test(3001); + pool.inject_test_agent("story-1", "coder-1", AgentStatus::Running); + + let available = pool + .available_agents_for_stage(&config, &PipelineStage::Coder) + .unwrap(); + assert!(available.is_empty()); + } + + #[test] + fn available_agents_for_stage_ignores_completed_agents() { + let config = make_config( + r#" +[[agent]] +name = "coder-1" +stage = "coder" +"#, + ); + let pool = AgentPool::new_test(3001); + // Completed agents should not count as busy. + pool.inject_test_agent("story-1", "coder-1", AgentStatus::Completed); + + let available = pool + .available_agents_for_stage(&config, &PipelineStage::Coder) + .unwrap(); + assert_eq!(available, vec!["coder-1"]); + } + + #[tokio::test] + async fn start_agent_auto_selects_second_coder_when_first_busy() { + let tmp = tempfile::tempdir().unwrap(); + let sk = tmp.path().join(".story_kit"); + std::fs::create_dir_all(&sk).unwrap(); + std::fs::write( + sk.join("project.toml"), + r#" +[[agent]] +name = "supervisor" +stage = "other" + +[[agent]] +name = "coder-1" +stage = "coder" + +[[agent]] +name = "coder-2" +stage = "coder" +"#, + ) + .unwrap(); + + let pool = AgentPool::new_test(3001); + // coder-1 is busy on another story + pool.inject_test_agent("other-story", "coder-1", AgentStatus::Running); + + // Call start_agent without agent_name — should pick coder-2 + let result = pool + .start_agent(tmp.path(), "42_my_story", None, None) + .await; + // Will fail for infrastructure reasons (no git repo), but should NOT + // fail with "All coder agents are busy" — that would mean it didn't + // try coder-2. + match result { + Ok(info) => { + assert_eq!(info.agent_name, "coder-2"); + } + Err(err) => { + assert!( + !err.contains("All coder agents are busy"), + "should have selected coder-2 but got: {err}" + ); + assert!( + !err.contains("No coder agent configured"), + "should not fail on agent selection, got: {err}" + ); + } + } + } + + #[tokio::test] + async fn start_agent_returns_busy_when_all_coders_occupied() { + let tmp = tempfile::tempdir().unwrap(); + let sk = tmp.path().join(".story_kit"); + std::fs::create_dir_all(&sk).unwrap(); + std::fs::write( + sk.join("project.toml"), + r#" +[[agent]] +name = "coder-1" +stage = "coder" + +[[agent]] +name = "coder-2" +stage = "coder" +"#, + ) + .unwrap(); + + let pool = AgentPool::new_test(3001); + pool.inject_test_agent("story-1", "coder-1", AgentStatus::Running); + pool.inject_test_agent("story-2", "coder-2", AgentStatus::Pending); + + let result = pool + .start_agent(tmp.path(), "story-3", None, None) + .await; + assert!(result.is_err()); + let err = result.unwrap_err(); + assert!( + err.contains("All coder agents are busy"), + "expected busy error, got: {err}" + ); + } + + /// Story 203: when all coders are busy the story file must be moved from + /// 1_upcoming/ to 2_current/ so that auto_assign_available_work can pick + /// it up once a coder finishes. + #[tokio::test] + async fn start_agent_moves_story_to_current_when_coders_busy() { + let tmp = tempfile::tempdir().unwrap(); + let sk = tmp.path().join(".story_kit"); + let upcoming = sk.join("work/1_upcoming"); + std::fs::create_dir_all(&upcoming).unwrap(); + std::fs::write( + sk.join("project.toml"), + r#" +[[agent]] +name = "coder-1" +stage = "coder" +"#, + ) + .unwrap(); + // Place the story in 1_upcoming/. + std::fs::write( + upcoming.join("story-3.md"), + "---\nname: Story 3\n---\n", + ) + .unwrap(); + + let pool = AgentPool::new_test(3001); + pool.inject_test_agent("story-1", "coder-1", AgentStatus::Running); + + let result = pool + .start_agent(tmp.path(), "story-3", None, None) + .await; + + // Should fail because all coders are busy. + assert!(result.is_err()); + let err = result.unwrap_err(); + assert!( + err.contains("All coder agents are busy"), + "expected busy error, got: {err}" + ); + assert!( + err.contains("queued in work/2_current/"), + "expected story-to-current message, got: {err}" + ); + + // Story must have been moved to 2_current/. + let current_path = sk.join("work/2_current/story-3.md"); + assert!( + current_path.exists(), + "story should be in 2_current/ after busy error, but was not" + ); + let upcoming_path = upcoming.join("story-3.md"); + assert!( + !upcoming_path.exists(), + "story should no longer be in 1_upcoming/" + ); + } + + /// Story 203: auto_assign_available_work must detect a story in 2_current/ + /// with no active agent and start an agent for it. + #[tokio::test] + async fn auto_assign_picks_up_story_queued_in_current() { + let tmp = tempfile::tempdir().unwrap(); + let sk = tmp.path().join(".story_kit"); + let current = sk.join("work/2_current"); + std::fs::create_dir_all(¤t).unwrap(); + std::fs::write( + sk.join("project.toml"), + "[[agent]]\nname = \"coder-1\"\nstage = \"coder\"\n", + ) + .unwrap(); + // Place the story in 2_current/ (simulating the "queued" state). + std::fs::write( + current.join("story-3.md"), + "---\nname: Story 3\n---\n", + ) + .unwrap(); + + let pool = AgentPool::new_test(3001); + // No agents are running — coder-1 is free. + + // auto_assign will try to call start_agent, which will attempt to create + // a worktree (will fail without a git repo) — that is fine. We only need + // to verify the agent is registered as Pending before the background + // task eventually fails. + pool.auto_assign_available_work(tmp.path()).await; + + let agents = pool.agents.lock().unwrap(); + let has_pending = agents.values().any(|a| { + a.agent_name == "coder-1" + && matches!(a.status, AgentStatus::Pending | AgentStatus::Running) + }); + assert!( + has_pending, + "auto_assign should have started coder-1 for story-3, but pool is empty" + ); + } + + /// Story 203: if a story is already in 2_current/ or later, start_agent + /// must not fail — the move is a no-op. + #[tokio::test] + async fn start_agent_story_already_in_current_is_noop() { + let tmp = tempfile::tempdir().unwrap(); + let sk = tmp.path().join(".story_kit"); + let current = sk.join("work/2_current"); + std::fs::create_dir_all(¤t).unwrap(); + std::fs::write( + sk.join("project.toml"), + "[[agent]]\nname = \"coder-1\"\nstage = \"coder\"\n", + ) + .unwrap(); + // Place the story directly in 2_current/. + std::fs::write( + current.join("story-5.md"), + "---\nname: Story 5\n---\n", + ) + .unwrap(); + + let pool = AgentPool::new_test(3001); + + // start_agent should attempt to assign coder-1 (no infra, so it will + // fail for git reasons), but must NOT fail due to the story already + // being in 2_current/. + let result = pool + .start_agent(tmp.path(), "story-5", None, None) + .await; + match result { + Ok(_) => {} + Err(e) => { + assert!( + !e.contains("Failed to move"), + "should not fail on idempotent move, got: {e}" + ); + } + } + } + + #[tokio::test] + async fn start_agent_explicit_name_unchanged_when_busy() { + let tmp = tempfile::tempdir().unwrap(); + let sk = tmp.path().join(".story_kit"); + std::fs::create_dir_all(&sk).unwrap(); + std::fs::write( + sk.join("project.toml"), + r#" +[[agent]] +name = "coder-1" +stage = "coder" + +[[agent]] +name = "coder-2" +stage = "coder" +"#, + ) + .unwrap(); + + let pool = AgentPool::new_test(3001); + pool.inject_test_agent("story-1", "coder-1", AgentStatus::Running); + + // Explicit request for coder-1 (busy) should fail even though coder-2 is free. + let result = pool + .start_agent(tmp.path(), "story-2", Some("coder-1"), None) + .await; + assert!(result.is_err()); + let err = result.unwrap_err(); + assert!( + err.contains("coder-1") && err.contains("already running"), + "expected explicit busy error, got: {err}" + ); + } + // ── start_agent single-instance concurrency tests ───────────────────────── /// Regression test for bug 97: the agent pool must reject a second concurrent @@ -4948,337 +3553,6 @@ stage = "qa" // result may be Ok (unlikely in test env) or Err for infra reasons — both fine. } - // ── worktree_has_committed_work tests ───────────────────────────────────── - - #[test] - fn worktree_has_committed_work_false_on_fresh_repo() { - let tmp = tempfile::tempdir().unwrap(); - let repo = tmp.path(); - // init_git_repo creates the initial commit on the default branch. - // HEAD IS the base branch — no commits ahead. - init_git_repo(repo); - assert!(!worktree_has_committed_work(repo)); - } - - #[test] - fn worktree_has_committed_work_true_after_commit_on_feature_branch() { - use std::fs; - let tmp = tempfile::tempdir().unwrap(); - let project_root = tmp.path().join("project"); - fs::create_dir_all(&project_root).unwrap(); - init_git_repo(&project_root); - - // Create a git worktree on a feature branch. - let wt_path = tmp.path().join("wt"); - Command::new("git") - .args([ - "worktree", - "add", - &wt_path.to_string_lossy(), - "-b", - "feature/story-99_test", - ]) - .current_dir(&project_root) - .output() - .unwrap(); - - // No commits on the feature branch yet — same as base branch. - assert!(!worktree_has_committed_work(&wt_path)); - - // Add a commit to the feature branch in the worktree. - fs::write(wt_path.join("work.txt"), "done").unwrap(); - Command::new("git") - .args(["add", "."]) - .current_dir(&wt_path) - .output() - .unwrap(); - Command::new("git") - .args([ - "-c", - "user.email=test@test.com", - "-c", - "user.name=Test", - "commit", - "-m", - "coder: implement story", - ]) - .current_dir(&wt_path) - .output() - .unwrap(); - - // Now the feature branch is ahead of the base branch. - assert!(worktree_has_committed_work(&wt_path)); - } - - // ── reconcile_on_startup tests ──────────────────────────────────────────── - - #[tokio::test] - async fn reconcile_on_startup_noop_when_no_worktrees() { - let tmp = tempfile::tempdir().unwrap(); - let pool = AgentPool::new_test(3001); - let (tx, _rx) = broadcast::channel(16); - // Should not panic; no worktrees to reconcile. - pool.reconcile_on_startup(tmp.path(), &tx).await; - } - - #[tokio::test] - async fn reconcile_on_startup_emits_done_event() { - let tmp = tempfile::tempdir().unwrap(); - let pool = AgentPool::new_test(3001); - let (tx, mut rx) = broadcast::channel::(16); - pool.reconcile_on_startup(tmp.path(), &tx).await; - - // Collect all events; the last must be "done". - let mut events: Vec = Vec::new(); - while let Ok(evt) = rx.try_recv() { - events.push(evt); - } - assert!( - events.iter().any(|e| e.status == "done"), - "reconcile_on_startup must emit a 'done' event; got: {:?}", - events.iter().map(|e| &e.status).collect::>() - ); - } - - #[tokio::test] - async fn reconcile_on_startup_skips_story_without_committed_work() { - use std::fs; - let tmp = tempfile::tempdir().unwrap(); - let root = tmp.path(); - - // Set up story in 2_current/. - let current = root.join(".story_kit/work/2_current"); - fs::create_dir_all(¤t).unwrap(); - fs::write(current.join("60_story_test.md"), "test").unwrap(); - - // Create a worktree directory that is a fresh git repo with no commits - // ahead of its own base branch (simulates a worktree where no work was done). - let wt_dir = root.join(".story_kit/worktrees/60_story_test"); - fs::create_dir_all(&wt_dir).unwrap(); - init_git_repo(&wt_dir); - - let pool = AgentPool::new_test(3001); - let (tx, _rx) = broadcast::channel(16); - pool.reconcile_on_startup(root, &tx).await; - - // Story should still be in 2_current/ — nothing was reconciled. - assert!( - current.join("60_story_test.md").exists(), - "story should stay in 2_current/ when worktree has no committed work" - ); - } - - #[tokio::test] - async fn reconcile_on_startup_runs_gates_on_worktree_with_committed_work() { - use std::fs; - let tmp = tempfile::tempdir().unwrap(); - let root = tmp.path(); - - // Set up a git repo for the project root. - init_git_repo(root); - - // Set up story in 2_current/ and commit it so the project root is clean. - let current = root.join(".story_kit/work/2_current"); - fs::create_dir_all(¤t).unwrap(); - fs::write(current.join("61_story_test.md"), "test").unwrap(); - Command::new("git") - .args(["add", "."]) - .current_dir(root) - .output() - .unwrap(); - Command::new("git") - .args([ - "-c", - "user.email=test@test.com", - "-c", - "user.name=Test", - "commit", - "-m", - "add story", - ]) - .current_dir(root) - .output() - .unwrap(); - - // Create a real git worktree for the story. - let wt_dir = root.join(".story_kit/worktrees/61_story_test"); - fs::create_dir_all(wt_dir.parent().unwrap()).unwrap(); - Command::new("git") - .args([ - "worktree", - "add", - &wt_dir.to_string_lossy(), - "-b", - "feature/story-61_story_test", - ]) - .current_dir(root) - .output() - .unwrap(); - - // Add a commit to the feature branch (simulates coder completing work). - fs::write(wt_dir.join("implementation.txt"), "done").unwrap(); - Command::new("git") - .args(["add", "."]) - .current_dir(&wt_dir) - .output() - .unwrap(); - Command::new("git") - .args([ - "-c", - "user.email=test@test.com", - "-c", - "user.name=Test", - "commit", - "-m", - "implement story", - ]) - .current_dir(&wt_dir) - .output() - .unwrap(); - - assert!( - worktree_has_committed_work(&wt_dir), - "test setup: worktree should have committed work" - ); - - let pool = AgentPool::new_test(3001); - let (tx, _rx) = broadcast::channel(16); - pool.reconcile_on_startup(root, &tx).await; - - // In the test env, cargo clippy will fail (no Cargo.toml) so gates fail - // and the story stays in 2_current/. The important assertion is that - // reconcile ran without panicking and the story is in a consistent state. - let in_current = current.join("61_story_test.md").exists(); - let in_qa = root - .join(".story_kit/work/3_qa/61_story_test.md") - .exists(); - assert!( - in_current || in_qa, - "story should be in 2_current/ or 3_qa/ after reconciliation" - ); - } - - #[test] - fn test_emit_event_writes_to_log_writer() { - let tmp = tempfile::tempdir().unwrap(); - let root = tmp.path(); - - let log_writer = - AgentLogWriter::new(root, "42_story_foo", "coder-1", "sess-emit").unwrap(); - let log_mutex = Mutex::new(log_writer); - - let (tx, _rx) = broadcast::channel::(64); - let event_log: Mutex> = Mutex::new(Vec::new()); - - let event = AgentEvent::Status { - story_id: "42_story_foo".to_string(), - agent_name: "coder-1".to_string(), - status: "running".to_string(), - }; - - emit_event(event, &tx, &event_log, Some(&log_mutex)); - - // Verify event was added to in-memory log - let mem_events = event_log.lock().unwrap(); - assert_eq!(mem_events.len(), 1); - drop(mem_events); - - // Verify event was written to the log file - let log_path = - crate::agent_log::log_file_path(root, "42_story_foo", "coder-1", "sess-emit"); - let entries = crate::agent_log::read_log(&log_path).unwrap(); - assert_eq!(entries.len(), 1); - assert_eq!(entries[0].event["type"], "status"); - assert_eq!(entries[0].event["status"], "running"); - } - - // ── bug 167: handle_agent_stream_event routes thinking/text correctly ─── - - #[test] - fn stream_event_thinking_delta_emits_thinking_event() { - let (tx, mut rx) = broadcast::channel::(64); - let event_log: Mutex> = Mutex::new(Vec::new()); - - let event = serde_json::json!({ - "type": "content_block_delta", - "delta": {"type": "thinking_delta", "thinking": "Let me analyze this..."} - }); - - handle_agent_stream_event(&event, "s1", "coder-1", &tx, &event_log, None); - - let received = rx.try_recv().unwrap(); - match received { - AgentEvent::Thinking { - story_id, - agent_name, - text, - } => { - assert_eq!(story_id, "s1"); - assert_eq!(agent_name, "coder-1"); - assert_eq!(text, "Let me analyze this..."); - } - other => panic!("Expected Thinking event, got: {other:?}"), - } - } - - #[test] - fn stream_event_text_delta_emits_output_event() { - let (tx, mut rx) = broadcast::channel::(64); - let event_log: Mutex> = Mutex::new(Vec::new()); - - let event = serde_json::json!({ - "type": "content_block_delta", - "delta": {"type": "text_delta", "text": "Here is the result."} - }); - - handle_agent_stream_event(&event, "s1", "coder-1", &tx, &event_log, None); - - let received = rx.try_recv().unwrap(); - match received { - AgentEvent::Output { - story_id, - agent_name, - text, - } => { - assert_eq!(story_id, "s1"); - assert_eq!(agent_name, "coder-1"); - assert_eq!(text, "Here is the result."); - } - other => panic!("Expected Output event, got: {other:?}"), - } - } - - #[test] - fn stream_event_input_json_delta_ignored() { - let (tx, mut rx) = broadcast::channel::(64); - let event_log: Mutex> = Mutex::new(Vec::new()); - - let event = serde_json::json!({ - "type": "content_block_delta", - "delta": {"type": "input_json_delta", "partial_json": "{\"file\":"} - }); - - handle_agent_stream_event(&event, "s1", "coder-1", &tx, &event_log, None); - - // No event should be emitted for tool argument deltas - assert!(rx.try_recv().is_err()); - } - - #[test] - fn stream_event_non_delta_type_ignored() { - let (tx, mut rx) = broadcast::channel::(64); - let event_log: Mutex> = Mutex::new(Vec::new()); - - let event = serde_json::json!({ - "type": "message_start", - "message": {"role": "assistant"} - }); - - handle_agent_stream_event(&event, "s1", "coder-1", &tx, &event_log, None); - - assert!(rx.try_recv().is_err()); - } - // ── bug 118: pending entry cleanup on start_agent failure ──────────────── /// Regression test for bug 118: when worktree creation fails (e.g. because @@ -5765,180 +4039,28 @@ stage = "qa" ); } - // ── resolve_simple_conflicts unit tests ────────────────────────────────── - - #[test] - fn resolve_simple_conflicts_no_markers() { - let input = "line 1\nline 2\nline 3\n"; - let result = resolve_simple_conflicts(input); - assert_eq!(result, Some(input.to_string())); - } - - #[test] - fn resolve_simple_conflicts_additive() { - let input = "\ -before -ours line 1 -ours line 2 -theirs line 1 -theirs line 2 -after -"; - let result = resolve_simple_conflicts(input).unwrap(); - assert!( - !result.contains("<<<<<<<"), - "should not contain conflict markers" - ); - assert!( - !result.contains(">>>>>>>"), - "should not contain conflict markers" - ); - assert!(result.contains("ours line 1")); - assert!(result.contains("ours line 2")); - assert!(result.contains("theirs line 1")); - assert!(result.contains("theirs line 2")); - assert!(result.contains("before")); - assert!(result.contains("after")); - // Ours comes before theirs - let ours_pos = result.find("ours line 1").unwrap(); - let theirs_pos = result.find("theirs line 1").unwrap(); - assert!( - ours_pos < theirs_pos, - "ours should come before theirs" - ); - } - - #[test] - fn resolve_simple_conflicts_multiple_blocks() { - let input = "\ -header -ours block 1 -theirs block 1 -middle -ours block 2 -theirs block 2 -footer -"; - let result = resolve_simple_conflicts(input).unwrap(); - assert!(!result.contains("<<<<<<<")); - assert!(result.contains("ours block 1")); - assert!(result.contains("theirs block 1")); - assert!(result.contains("ours block 2")); - assert!(result.contains("theirs block 2")); - assert!(result.contains("header")); - assert!(result.contains("middle")); - assert!(result.contains("footer")); - } - - #[test] - fn resolve_simple_conflicts_malformed_no_separator() { - let input = "\ -<<<<<<< HEAD -ours ->>>>>>> feature -"; - let result = resolve_simple_conflicts(input); - assert!(result.is_none(), "malformed conflict (no separator) should return None"); - } - - #[test] - fn resolve_simple_conflicts_malformed_no_end() { - let input = "\ -<<<<<<< HEAD -ours -======= -theirs -"; - let result = resolve_simple_conflicts(input); - assert!(result.is_none(), "malformed conflict (no end marker) should return None"); - } - - #[test] - fn resolve_simple_conflicts_preserves_no_trailing_newline() { - let input = "before\n<<<<<<< HEAD\nours\n=======\ntheirs\n>>>>>>> branch\nafter"; - let result = resolve_simple_conflicts(input).unwrap(); - assert!(!result.ends_with('\n'), "should not add trailing newline if original lacks one"); - assert!(result.ends_with("after")); - } - - // ── Additional resolve_simple_conflicts tests (real conflict markers) ──── - // - // AC1: The mergemaster reads both sides of the conflict and produces a - // resolved file that preserves changes from both branches. - - #[test] - fn resolve_simple_conflicts_real_markers_additive_both_sides() { - // The most common real-world case: both branches add different content - // (e.g. different functions) to the same region of a file. - let input = "// shared code\n\ -<<<<<<< HEAD\n\ -fn master_fn() { println!(\"from master\"); }\n\ -=======\n\ -fn feature_fn() { println!(\"from feature\"); }\n\ ->>>>>>> feature/story-42\n\ -// end\n"; - let result = resolve_simple_conflicts(input).unwrap(); - assert!(!result.contains("<<<<<<<"), "no conflict markers in output"); - assert!(!result.contains(">>>>>>>"), "no conflict markers in output"); - assert!(!result.contains("======="), "no separator in output"); - assert!(result.contains("fn master_fn()"), "master (ours) side must be preserved"); - assert!(result.contains("fn feature_fn()"), "feature (theirs) side must be preserved"); - assert!(result.contains("// shared code"), "context before conflict preserved"); - assert!(result.contains("// end"), "context after conflict preserved"); - // ours (master) must appear before theirs (feature) - assert!( - result.find("master_fn").unwrap() < result.find("feature_fn").unwrap(), - "master side must appear before feature side" - ); - } - - #[test] - fn resolve_simple_conflicts_real_markers_multiple_conflict_blocks() { - // Two separate conflict blocks in the same file — as happens when two - // feature branches both add imports AND test suites to the same file. - let input = "// imports\n\ -<<<<<<< HEAD\n\ -import { A } from './a';\n\ -=======\n\ -import { B } from './b';\n\ ->>>>>>> feature/story-43\n\ -// implementation\n\ -<<<<<<< HEAD\n\ -export function masterImpl() {}\n\ -=======\n\ -export function featureImpl() {}\n\ ->>>>>>> feature/story-43\n"; - let result = resolve_simple_conflicts(input).unwrap(); - assert!(!result.contains("<<<<<<<"), "no conflict markers in output"); - assert!(result.contains("import { A }"), "first block ours preserved"); - assert!(result.contains("import { B }"), "first block theirs preserved"); - assert!(result.contains("masterImpl"), "second block ours preserved"); - assert!(result.contains("featureImpl"), "second block theirs preserved"); - assert!(result.contains("// imports"), "surrounding context preserved"); - assert!(result.contains("// implementation"), "surrounding context preserved"); - } - - #[test] - fn resolve_simple_conflicts_real_markers_one_side_empty() { - // Ours (master) has no content in the conflicted region; theirs (feature) - // adds new content. Resolution: keep theirs. - let input = "before\n\ -<<<<<<< HEAD\n\ -=======\n\ -feature_addition\n\ ->>>>>>> feature/story-44\n\ -after\n"; - let result = resolve_simple_conflicts(input).unwrap(); - assert!(!result.contains("<<<<<<<"), "no conflict markers"); - assert!(result.contains("feature_addition"), "non-empty side preserved"); - assert!(result.contains("before"), "context preserved"); - assert!(result.contains("after"), "context preserved"); - } - - // ── merge-queue squash-merge integration tests ────────────────────────── + // ── merge_agent_work tests ──────────────────────────────────────────────── #[tokio::test] - async fn squash_merge_uses_merge_queue_no_conflict_markers_on_master() { + async fn merge_agent_work_returns_error_when_branch_not_found() { + use tempfile::tempdir; + + let tmp = tempdir().unwrap(); + let repo = tmp.path(); + init_git_repo(repo); + + let pool = AgentPool::new_test(3001); + // branch feature/story-99_nonexistent does not exist + let result = pool + .merge_agent_work(repo, "99_nonexistent") + .await + .unwrap(); + // Should fail (no branch) — not panic + assert!(!result.success, "should fail when branch missing"); + } + + #[tokio::test] + async fn merge_agent_work_succeeds_on_clean_branch() { use std::fs; use tempfile::tempdir; @@ -5946,163 +4068,165 @@ after\n"; let repo = tmp.path(); init_git_repo(repo); - // Create a file that will be conflicted on master. - fs::write(repo.join("shared.txt"), "line 1\nline 2\n").unwrap(); + // Create a feature branch with a commit + Command::new("git") + .args(["checkout", "-b", "feature/story-23_test"]) + .current_dir(repo) + .output() + .unwrap(); + fs::write(repo.join("feature.txt"), "feature content").unwrap(); Command::new("git") .args(["add", "."]) .current_dir(repo) .output() .unwrap(); Command::new("git") - .args(["commit", "-m", "initial shared file"]) + .args(["commit", "-m", "add feature"]) .current_dir(repo) .output() .unwrap(); - // Create a feature branch that modifies the file. - Command::new("git") - .args(["checkout", "-b", "feature/story-conflict_test"]) - .current_dir(repo) - .output() - .unwrap(); - fs::write(repo.join("shared.txt"), "line 1\nline 2\nfeature addition\n").unwrap(); - Command::new("git") - .args(["add", "."]) - .current_dir(repo) - .output() - .unwrap(); - Command::new("git") - .args(["commit", "-m", "feature: add line"]) - .current_dir(repo) - .output() - .unwrap(); - - // Switch to master and make a conflicting change. + // Switch back to master (initial branch) Command::new("git") .args(["checkout", "master"]) .current_dir(repo) .output() .unwrap(); - fs::write(repo.join("shared.txt"), "line 1\nline 2\nmaster addition\n").unwrap(); + + // Create the story file in 4_merge/ so we can test archival + let merge_dir = repo.join(".story_kit/work/4_merge"); + fs::create_dir_all(&merge_dir).unwrap(); + let story_file = merge_dir.join("23_test.md"); + fs::write(&story_file, "---\nname: Test\n---\n").unwrap(); Command::new("git") .args(["add", "."]) .current_dir(repo) .output() .unwrap(); Command::new("git") - .args(["commit", "-m", "master: add line"]) + .args(["commit", "-m", "add story in merge"]) .current_dir(repo) .output() .unwrap(); - // Run the squash merge. - let result = run_squash_merge(repo, "feature/story-conflict_test", "conflict_test") - .unwrap(); + let pool = AgentPool::new_test(3001); + let report = pool.merge_agent_work(repo, "23_test").await.unwrap(); - // Master should NEVER contain conflict markers, regardless of outcome. - let master_content = fs::read_to_string(repo.join("shared.txt")).unwrap(); + // Merge should succeed (gates will run but cargo/pnpm results will depend on env) + // At minimum the merge itself should succeed + assert!(!report.had_conflicts, "should have no conflicts"); + // Note: gates_passed may be false in test env without Rust project, that's OK + // The important thing is the merge itself ran assert!( - !master_content.contains("<<<<<<<"), - "master must never contain conflict markers, got:\n{master_content}" + report.success || report.gate_output.contains("Failed to run") || !report.gates_passed, + "report should be coherent: {report:?}" ); - assert!( - !master_content.contains(">>>>>>>"), - "master must never contain conflict markers, got:\n{master_content}" - ); - - // The merge should have had conflicts. - assert!(result.had_conflicts, "should detect conflicts"); - - // Conflicts should have been auto-resolved (both are simple additions). - if result.conflicts_resolved { - assert!(result.success, "auto-resolved merge should succeed"); - assert!( - master_content.contains("master addition"), - "master side should be present" - ); - assert!( - master_content.contains("feature addition"), - "feature side should be present" - ); + // Story should be in done if gates passed + if report.story_archived { + let done = repo.join(".story_kit/work/5_done/23_test.md"); + assert!(done.exists(), "done file should exist"); } - - // Verify no leftover merge-queue branch. - let branches = Command::new("git") - .args(["branch", "--list", "merge-queue/*"]) - .current_dir(repo) - .output() - .unwrap(); - let branch_list = String::from_utf8_lossy(&branches.stdout); - assert!( - branch_list.trim().is_empty(), - "merge-queue branch should be cleaned up, got: {branch_list}" - ); - - // Verify no leftover merge workspace directory. - assert!( - !repo.join(".story_kit/merge_workspace").exists(), - "merge workspace should be cleaned up" - ); } - #[tokio::test] - async fn squash_merge_clean_merge_succeeds() { + // ── quality gate ordering test ──────────────────────────────── + + /// Regression test for bug 142: quality gates must run BEFORE the fast-forward + /// to master so that broken code never lands on master. + /// + /// Setup: a repo with a failing `script/test`, a feature branch with one commit. + /// When `run_squash_merge` is called, the gates must detect failure and abort the + /// fast-forward, leaving master HEAD unchanged. + #[cfg(unix)] + #[test] + fn quality_gates_run_before_fast_forward_to_master() { use std::fs; + use std::os::unix::fs::PermissionsExt; use tempfile::tempdir; let tmp = tempdir().unwrap(); let repo = tmp.path(); init_git_repo(repo); - // Create feature branch with a new file. - Command::new("git") - .args(["checkout", "-b", "feature/story-clean_test"]) - .current_dir(repo) - .output() - .unwrap(); - fs::write(repo.join("new_file.txt"), "new content").unwrap(); + // Add a failing script/test so quality gates will fail. + let script_dir = repo.join("script"); + fs::create_dir_all(&script_dir).unwrap(); + let script_test = script_dir.join("test"); + fs::write(&script_test, "#!/usr/bin/env bash\nexit 1\n").unwrap(); + let mut perms = fs::metadata(&script_test).unwrap().permissions(); + perms.set_mode(0o755); + fs::set_permissions(&script_test, perms).unwrap(); Command::new("git") .args(["add", "."]) .current_dir(repo) .output() .unwrap(); Command::new("git") - .args(["commit", "-m", "add new file"]) + .args(["commit", "-m", "add failing script/test"]) .current_dir(repo) .output() .unwrap(); - // Switch back to master. + // Create a feature branch with a commit. + Command::new("git") + .args(["checkout", "-b", "feature/story-142_test"]) + .current_dir(repo) + .output() + .unwrap(); + fs::write(repo.join("change.txt"), "feature change").unwrap(); + Command::new("git") + .args(["add", "."]) + .current_dir(repo) + .output() + .unwrap(); + Command::new("git") + .args(["commit", "-m", "feature work"]) + .current_dir(repo) + .output() + .unwrap(); + + // Switch back to master and record its HEAD. Command::new("git") .args(["checkout", "master"]) .current_dir(repo) .output() .unwrap(); + let head_before = String::from_utf8( + Command::new("git") + .args(["rev-parse", "HEAD"]) + .current_dir(repo) + .output() + .unwrap() + .stdout, + ) + .unwrap() + .trim() + .to_string(); - let result = run_squash_merge(repo, "feature/story-clean_test", "clean_test") - .unwrap(); + // Run the squash-merge. The failing script/test makes quality gates + // fail → fast-forward must NOT happen. + let result = crate::agents::merge::run_squash_merge(repo, "feature/story-142_test", "142_test").unwrap(); - assert!(result.success, "clean merge should succeed"); - assert!(!result.had_conflicts, "clean merge should have no conflicts"); - assert!(!result.conflicts_resolved, "no conflicts means nothing to resolve"); + let head_after = String::from_utf8( + Command::new("git") + .args(["rev-parse", "HEAD"]) + .current_dir(repo) + .output() + .unwrap() + .stdout, + ) + .unwrap() + .trim() + .to_string(); + + // Gates must have failed (script/test exits 1) so master should be untouched. assert!( - repo.join("new_file.txt").exists(), - "merged file should exist on master" + !result.success, + "run_squash_merge must report failure when gates fail" + ); + assert_eq!( + head_before, head_after, + "master HEAD must not advance when quality gates fail (bug 142)" ); - } - - #[tokio::test] - async fn squash_merge_nonexistent_branch_fails() { - use tempfile::tempdir; - - let tmp = tempdir().unwrap(); - let repo = tmp.path(); - init_git_repo(repo); - - let result = run_squash_merge(repo, "feature/story-nope", "nope") - .unwrap(); - - assert!(!result.success, "merge of nonexistent branch should fail"); } #[tokio::test] @@ -6196,1431 +4320,151 @@ after\n"; assert!(report.had_conflicts, "should report conflicts"); } - /// Verifies that `run_squash_merge` succeeds even when master has advanced - /// with unrelated commits after the merge-queue branch was created (the race - /// condition that previously caused fast-forward to fail). - #[tokio::test] - async fn squash_merge_succeeds_when_master_diverges() { - use std::fs; - use tempfile::tempdir; - - let tmp = tempdir().unwrap(); - let repo = tmp.path(); - init_git_repo(repo); - - // Create an initial file on master. - fs::write(repo.join("base.txt"), "base content\n").unwrap(); - Command::new("git") - .args(["add", "."]) - .current_dir(repo) - .output() - .unwrap(); - Command::new("git") - .args(["commit", "-m", "initial"]) - .current_dir(repo) - .output() - .unwrap(); - - // Create a feature branch with a new file (clean merge, no conflicts). - Command::new("git") - .args(["checkout", "-b", "feature/story-diverge_test"]) - .current_dir(repo) - .output() - .unwrap(); - fs::write(repo.join("feature.txt"), "feature content\n").unwrap(); - Command::new("git") - .args(["add", "."]) - .current_dir(repo) - .output() - .unwrap(); - Command::new("git") - .args(["commit", "-m", "feature: add file"]) - .current_dir(repo) - .output() - .unwrap(); - - // Switch back to master and simulate a filesystem watcher commit - // (e.g. a pipeline file move) that advances master beyond the point - // where the merge-queue branch will be created. - Command::new("git") - .args(["checkout", "master"]) - .current_dir(repo) - .output() - .unwrap(); - let sk_dir = repo.join(".story_kit/work/4_merge"); - fs::create_dir_all(&sk_dir).unwrap(); - fs::write( - sk_dir.join("diverge_test.md"), - "---\nname: test\n---\n", - ) - .unwrap(); - Command::new("git") - .args(["add", "."]) - .current_dir(repo) - .output() - .unwrap(); - Command::new("git") - .args(["commit", "-m", "story-kit: queue diverge_test for merge"]) - .current_dir(repo) - .output() - .unwrap(); - - // Run the squash merge. With the old fast-forward approach, this - // would fail because master diverged. With cherry-pick, it succeeds. - let result = - run_squash_merge(repo, "feature/story-diverge_test", "diverge_test").unwrap(); - - assert!( - result.success, - "squash merge should succeed despite diverged master: {}", - result.output - ); - assert!( - !result.had_conflicts, - "no conflicts expected" - ); - - // Verify the feature file landed on master. - assert!( - repo.join("feature.txt").exists(), - "feature file should be on master after cherry-pick" - ); - let feature_content = fs::read_to_string(repo.join("feature.txt")).unwrap(); - assert_eq!(feature_content, "feature content\n"); - - // Verify the watcher commit's file is still present. - assert!( - sk_dir.join("diverge_test.md").exists(), - "watcher-committed file should still be on master" - ); - - // Verify cleanup: no merge-queue branch, no merge workspace. - let branches = Command::new("git") - .args(["branch", "--list", "merge-queue/*"]) - .current_dir(repo) - .output() - .unwrap(); - let branch_list = String::from_utf8_lossy(&branches.stdout); - assert!( - branch_list.trim().is_empty(), - "merge-queue branch should be cleaned up, got: {branch_list}" - ); - assert!( - !repo.join(".story_kit/merge_workspace").exists(), - "merge workspace should be cleaned up" - ); - } - - /// Bug 226: Verifies that `run_squash_merge` returns `success: false` when - /// the feature branch has no changes beyond what's already on master (empty diff). - #[tokio::test] - async fn squash_merge_empty_diff_fails() { - use std::fs; - use tempfile::tempdir; - - let tmp = tempdir().unwrap(); - let repo = tmp.path(); - init_git_repo(repo); - - // Create a file on master. - fs::write(repo.join("code.txt"), "content\n").unwrap(); - Command::new("git") - .args(["add", "."]) - .current_dir(repo) - .output() - .unwrap(); - Command::new("git") - .args(["commit", "-m", "add code"]) - .current_dir(repo) - .output() - .unwrap(); - - // Create a feature branch with NO additional changes (empty diff). - Command::new("git") - .args(["checkout", "-b", "feature/story-empty_test"]) - .current_dir(repo) - .output() - .unwrap(); - Command::new("git") - .args(["checkout", "master"]) - .current_dir(repo) - .output() - .unwrap(); - - let result = - run_squash_merge(repo, "feature/story-empty_test", "empty_test").unwrap(); - - // Bug 226: empty diff must NOT be treated as success. - assert!( - !result.success, - "empty diff merge must fail, not silently succeed: {}", - result.output - ); - - // Cleanup should still happen. - assert!( - !repo.join(".story_kit/merge_workspace").exists(), - "merge workspace should be cleaned up" - ); - } - - /// Bug 226: Verifies that `run_squash_merge` fails when the feature branch - /// only contains .story_kit/ file moves with no real code changes. - #[tokio::test] - async fn squash_merge_md_only_changes_fails() { - use std::fs; - use tempfile::tempdir; - - let tmp = tempdir().unwrap(); - let repo = tmp.path(); - init_git_repo(repo); - - // Create a feature branch that only moves a .story_kit/ file. - Command::new("git") - .args(["checkout", "-b", "feature/story-md_only_test"]) - .current_dir(repo) - .output() - .unwrap(); - let sk_dir = repo.join(".story_kit/work/2_current"); - fs::create_dir_all(&sk_dir).unwrap(); - fs::write( - sk_dir.join("md_only_test.md"), - "---\nname: Test\n---\n", - ) - .unwrap(); - Command::new("git") - .args(["add", "."]) - .current_dir(repo) - .output() - .unwrap(); - Command::new("git") - .args(["commit", "-m", "move story file"]) - .current_dir(repo) - .output() - .unwrap(); - Command::new("git") - .args(["checkout", "master"]) - .current_dir(repo) - .output() - .unwrap(); - - let result = - run_squash_merge(repo, "feature/story-md_only_test", "md_only_test").unwrap(); - - // The squash merge will commit the .story_kit/ file, but should fail because - // there are no code changes outside .story_kit/. - assert!( - !result.success, - "merge with only .story_kit/ changes must fail: {}", - result.output - ); - - // Cleanup should still happen. - assert!( - !repo.join(".story_kit/merge_workspace").exists(), - "merge workspace should be cleaned up" - ); - } - - // ── AC4: additive multi-branch conflict auto-resolution ──────────────── - // - // Verifies that when two feature branches both add different code to the - // same region of a file (the most common conflict pattern in this project), - // the mergemaster auto-resolves the conflict and preserves both additions. - #[tokio::test] - async fn squash_merge_additive_conflict_both_additions_preserved() { - use std::fs; - use tempfile::tempdir; - - let tmp = tempdir().unwrap(); - let repo = tmp.path(); - init_git_repo(repo); - - // Initial file with a shared base. - fs::write(repo.join("module.rs"), "// module\npub fn existing() {}\n").unwrap(); - Command::new("git") - .args(["add", "."]) - .current_dir(repo) - .output() - .unwrap(); - Command::new("git") - .args(["commit", "-m", "initial module"]) - .current_dir(repo) - .output() - .unwrap(); - - // Feature branch: appends feature_fn to the file. - Command::new("git") - .args(["checkout", "-b", "feature/story-238_additive"]) - .current_dir(repo) - .output() - .unwrap(); - fs::write( - repo.join("module.rs"), - "// module\npub fn existing() {}\npub fn feature_fn() {}\n", - ) - .unwrap(); - Command::new("git") - .args(["add", "."]) - .current_dir(repo) - .output() - .unwrap(); - Command::new("git") - .args(["commit", "-m", "add feature_fn"]) - .current_dir(repo) - .output() - .unwrap(); - - // Simulate another branch already merged into master: appends master_fn. - Command::new("git") - .args(["checkout", "master"]) - .current_dir(repo) - .output() - .unwrap(); - fs::write( - repo.join("module.rs"), - "// module\npub fn existing() {}\npub fn master_fn() {}\n", - ) - .unwrap(); - Command::new("git") - .args(["add", "."]) - .current_dir(repo) - .output() - .unwrap(); - Command::new("git") - .args(["commit", "-m", "add master_fn (another branch merged)"]) - .current_dir(repo) - .output() - .unwrap(); - - // Squash-merge the feature branch — conflicts because both appended to the same location. - let result = - run_squash_merge(repo, "feature/story-238_additive", "238_additive").unwrap(); - - // Conflict must be detected and auto-resolved. - assert!(result.had_conflicts, "additive conflict should be detected"); - assert!( - result.conflicts_resolved, - "additive conflict must be auto-resolved; output:\n{}", - result.output - ); - - // Master must contain both additions without conflict markers. - let content = fs::read_to_string(repo.join("module.rs")).unwrap(); - assert!(!content.contains("<<<<<<<"), "master must not contain conflict markers"); - assert!(!content.contains(">>>>>>>"), "master must not contain conflict markers"); - assert!( - content.contains("feature_fn"), - "feature branch addition must be preserved on master" - ); - assert!( - content.contains("master_fn"), - "master branch addition must be preserved on master" - ); - assert!(content.contains("existing"), "original function must be preserved"); - - // Cleanup: no leftover merge-queue branch or workspace. - let branches = Command::new("git") - .args(["branch", "--list", "merge-queue/*"]) - .current_dir(repo) - .output() - .unwrap(); - assert!( - String::from_utf8_lossy(&branches.stdout).trim().is_empty(), - "merge-queue branch must be cleaned up" - ); - assert!( - !repo.join(".story_kit/merge_workspace").exists(), - "merge workspace must be cleaned up" - ); - } - - // ── AC3: quality gates fail after conflict resolution ───────────────── - // - // Verifies that when conflicts are auto-resolved but the resulting code - // fails quality gates, the merge is reported as failed (not merged to master). - #[tokio::test] - async fn squash_merge_conflict_resolved_but_gates_fail_reported_as_failure() { - use std::fs; - use tempfile::tempdir; - - let tmp = tempdir().unwrap(); - let repo = tmp.path(); - init_git_repo(repo); - - // Add a script/test that always fails (quality gate). This must be on - // master before the feature branch forks so it doesn't cause its own conflict. - let script_dir = repo.join("script"); - fs::create_dir_all(&script_dir).unwrap(); - fs::write(script_dir.join("test"), "#!/bin/sh\nexit 1\n").unwrap(); - #[cfg(unix)] - { - use std::os::unix::fs::PermissionsExt; - std::fs::set_permissions( - script_dir.join("test"), - std::fs::Permissions::from_mode(0o755), - ) - .unwrap(); - } - fs::write(repo.join("code.txt"), "// base\n").unwrap(); - Command::new("git") - .args(["add", "."]) - .current_dir(repo) - .output() - .unwrap(); - Command::new("git") - .args(["commit", "-m", "initial with failing script/test"]) - .current_dir(repo) - .output() - .unwrap(); - - // Feature branch: appends feature content (creates future conflict point). - Command::new("git") - .args(["checkout", "-b", "feature/story-238_gates_fail"]) - .current_dir(repo) - .output() - .unwrap(); - fs::write(repo.join("code.txt"), "// base\nfeature_addition\n").unwrap(); - Command::new("git") - .args(["add", "."]) - .current_dir(repo) - .output() - .unwrap(); - Command::new("git") - .args(["commit", "-m", "feature addition"]) - .current_dir(repo) - .output() - .unwrap(); - - // Master: append different content at same location (creates conflict). - Command::new("git") - .args(["checkout", "master"]) - .current_dir(repo) - .output() - .unwrap(); - fs::write(repo.join("code.txt"), "// base\nmaster_addition\n").unwrap(); - Command::new("git") - .args(["add", "."]) - .current_dir(repo) - .output() - .unwrap(); - Command::new("git") - .args(["commit", "-m", "master addition"]) - .current_dir(repo) - .output() - .unwrap(); - - // Squash-merge: conflict detected → auto-resolved → quality gates run → fail. - let result = - run_squash_merge(repo, "feature/story-238_gates_fail", "238_gates_fail").unwrap(); - - assert!(result.had_conflicts, "conflict must be detected"); - assert!(result.conflicts_resolved, "additive conflict must be auto-resolved"); - assert!(!result.gates_passed, "quality gates must fail (script/test exits 1)"); - assert!(!result.success, "merge must be reported as failed when gates fail"); - assert!( - !result.output.is_empty(), - "output must contain gate failure details" - ); - - // Master must NOT have been updated (cherry-pick was blocked by gate failure). - let content = fs::read_to_string(repo.join("code.txt")).unwrap(); - assert!(!content.contains("<<<<<<<"), "master must not contain conflict markers"); - // master_addition was the last commit on master; feature_addition must NOT be there. - assert!( - !content.contains("feature_addition"), - "feature code must not land on master when gates fail" - ); - - // Cleanup must still happen. - assert!( - !repo.join(".story_kit/merge_workspace").exists(), - "merge workspace must be cleaned up even on gate failure" - ); - } - - /// Bug 226: feature_branch_has_unmerged_changes returns true when the - /// feature branch has commits not on master. - #[test] - fn feature_branch_has_unmerged_changes_detects_unmerged_code() { - use std::fs; - use tempfile::tempdir; - - let tmp = tempdir().unwrap(); - let repo = tmp.path(); - init_git_repo(repo); - - // Create a feature branch with a code commit. - Command::new("git") - .args(["checkout", "-b", "feature/story-50_story_test"]) - .current_dir(repo) - .output() - .unwrap(); - fs::write(repo.join("feature.rs"), "fn main() {}").unwrap(); - Command::new("git") - .args(["add", "."]) - .current_dir(repo) - .output() - .unwrap(); - Command::new("git") - .args(["commit", "-m", "add feature"]) - .current_dir(repo) - .output() - .unwrap(); - Command::new("git") - .args(["checkout", "master"]) - .current_dir(repo) - .output() - .unwrap(); - - assert!( - feature_branch_has_unmerged_changes(repo, "50_story_test"), - "should detect unmerged changes on feature branch" - ); - } - - /// Bug 226: feature_branch_has_unmerged_changes returns false when no - /// feature branch exists. - #[test] - fn feature_branch_has_unmerged_changes_false_when_no_branch() { - use tempfile::tempdir; - - let tmp = tempdir().unwrap(); - let repo = tmp.path(); - init_git_repo(repo); - - assert!( - !feature_branch_has_unmerged_changes(repo, "99_nonexistent"), - "should return false when no feature branch" - ); - } - - /// Verifies that stale merge_workspace directories from previous failed - /// merges are cleaned up before a new merge attempt. - #[tokio::test] - async fn squash_merge_cleans_up_stale_workspace() { - use std::fs; - use tempfile::tempdir; - - let tmp = tempdir().unwrap(); - let repo = tmp.path(); - init_git_repo(repo); - - // Create a feature branch with a file. - Command::new("git") - .args(["checkout", "-b", "feature/story-stale_test"]) - .current_dir(repo) - .output() - .unwrap(); - fs::write(repo.join("stale.txt"), "content\n").unwrap(); - Command::new("git") - .args(["add", "."]) - .current_dir(repo) - .output() - .unwrap(); - Command::new("git") - .args(["commit", "-m", "feature: stale test"]) - .current_dir(repo) - .output() - .unwrap(); - Command::new("git") - .args(["checkout", "master"]) - .current_dir(repo) - .output() - .unwrap(); - - // Simulate a stale merge workspace left from a previous failed merge. - let stale_ws = repo.join(".story_kit/merge_workspace"); - fs::create_dir_all(&stale_ws).unwrap(); - fs::write(stale_ws.join("leftover.txt"), "stale").unwrap(); - - // Run the merge — it should clean up the stale workspace first. - let result = - run_squash_merge(repo, "feature/story-stale_test", "stale_test").unwrap(); - - assert!( - result.success, - "merge should succeed after cleaning up stale workspace: {}", - result.output - ); - assert!( - !stale_ws.exists(), - "stale merge workspace should be cleaned up" - ); - } - - // ── process health monitoring tests ────────────────────────────────────── - - /// Demonstrates that the PTY read-loop inactivity timeout fires when no output - /// is produced by the agent process within the configured window. - /// - /// A `HangingReader` simulates a hung agent process that never writes to the - /// PTY master. The test verifies that `recv_timeout` fires with a `Timeout` - /// error — the signal that causes `run_agent_pty_blocking` to kill the child - /// and return `Err("Agent inactivity timeout: …")`, which the error handler - /// in `start_agent` converts into `AgentStatus::Failed`. - #[test] - fn pty_inactivity_timeout_kills_hung_agent() { - struct HangingReader; - impl std::io::Read for HangingReader { - fn read(&mut self, _buf: &mut [u8]) -> std::io::Result { - std::thread::sleep(std::time::Duration::from_secs(300)); - Ok(0) - } - } - - let (line_tx, line_rx) = - std::sync::mpsc::channel::>(); - - std::thread::spawn(move || { - let buf_reader = BufReader::new(HangingReader); - for line in buf_reader.lines() { - if line_tx.send(line).is_err() { - break; - } - } - }); - - let timeout_dur = std::time::Duration::from_millis(100); - let result = line_rx.recv_timeout(timeout_dur); - - assert!( - matches!( - result, - Err(std::sync::mpsc::RecvTimeoutError::Timeout) - ), - "recv_timeout must fire when no PTY output arrives within the deadline" - ); - } - - /// Demonstrates that the background watchdog detects Running agents whose - /// backing tokio task has already finished (orphaned entries) and marks them - /// as Failed, emitting an Error event so that `wait_for_agent` unblocks. - #[tokio::test] - async fn watchdog_detects_orphaned_running_agent() { - let pool = AgentPool::new_test(3001); - - let handle = tokio::spawn(async {}); - tokio::time::sleep(std::time::Duration::from_millis(20)).await; - assert!(handle.is_finished(), "task should be finished before injection"); - - let tx = - pool.inject_test_agent_with_handle("orphan_story", "coder", AgentStatus::Running, handle); - let mut rx = tx.subscribe(); - - pool.run_watchdog_once(); - - { - let agents = pool.agents.lock().unwrap(); - let key = composite_key("orphan_story", "coder"); - let agent = agents.get(&key).unwrap(); - assert_eq!( - agent.status, - AgentStatus::Failed, - "watchdog must mark an orphaned Running agent as Failed" - ); - } - - let event = rx.try_recv().expect("watchdog must emit an Error event"); - assert!( - matches!(event, AgentEvent::Error { .. }), - "expected AgentEvent::Error, got: {event:?}" - ); - } - - // ── remove_agents_for_story tests ──────────────────────────────────────── - - #[test] - fn remove_agents_for_story_removes_all_entries() { - let pool = AgentPool::new_test(3001); - pool.inject_test_agent("story_a", "coder-1", AgentStatus::Completed); - pool.inject_test_agent("story_a", "qa", AgentStatus::Failed); - pool.inject_test_agent("story_b", "coder-1", AgentStatus::Running); - - let removed = pool.remove_agents_for_story("story_a"); - assert_eq!(removed, 2, "should remove both agents for story_a"); - - let agents = pool.list_agents().unwrap(); - assert_eq!(agents.len(), 1, "only story_b agent should remain"); - assert_eq!(agents[0].story_id, "story_b"); - } - - #[test] - fn remove_agents_for_story_returns_zero_when_no_match() { - let pool = AgentPool::new_test(3001); - pool.inject_test_agent("story_a", "coder-1", AgentStatus::Running); - - let removed = pool.remove_agents_for_story("nonexistent"); - assert_eq!(removed, 0); - - let agents = pool.list_agents().unwrap(); - assert_eq!(agents.len(), 1, "existing agents should not be affected"); - } - - // ── archive + cleanup integration test ─────────────────────────────────── + // ── reconcile_on_startup tests ──────────────────────────────────────────── #[tokio::test] - async fn archiving_story_removes_agent_entries_from_pool() { - use std::fs; - + async fn reconcile_on_startup_noop_when_no_worktrees() { let tmp = tempfile::tempdir().unwrap(); - let root = tmp.path(); - - // Set up story in 2_current/ - let current = root.join(".story_kit/work/2_current"); - fs::create_dir_all(¤t).unwrap(); - fs::write(current.join("60_story_cleanup.md"), "test").unwrap(); - let pool = AgentPool::new_test(3001); - pool.inject_test_agent("60_story_cleanup", "coder-1", AgentStatus::Completed); - pool.inject_test_agent("60_story_cleanup", "qa", AgentStatus::Completed); - pool.inject_test_agent("61_story_other", "coder-1", AgentStatus::Running); - - // Verify all 3 agents exist. - assert_eq!(pool.list_agents().unwrap().len(), 3); - - // Archive the story. - move_story_to_archived(root, "60_story_cleanup").unwrap(); - pool.remove_agents_for_story("60_story_cleanup"); - - // Agent entries for the archived story should be gone. - let remaining = pool.list_agents().unwrap(); - assert_eq!(remaining.len(), 1, "only the other story's agent should remain"); - assert_eq!(remaining[0].story_id, "61_story_other"); - - // Story file should be in 5_done/ - assert!(root.join(".story_kit/work/5_done/60_story_cleanup.md").exists()); - } - - // ── story 216: merge worktree uses project.toml component setup ─────────── - - /// When the project has `[[component]]` entries in `.story_kit/project.toml`, - /// `run_squash_merge` must run their setup commands in the merge worktree - /// before quality gates — matching the behaviour of `create_worktree`. - #[cfg(unix)] - #[test] - fn squash_merge_runs_component_setup_from_project_toml() { - use std::fs; - use tempfile::tempdir; - - let tmp = tempdir().unwrap(); - let repo = tmp.path(); - init_git_repo(repo); - - // Add a .story_kit/project.toml with a component whose setup writes a - // sentinel file so we can confirm the command ran. - let sk_dir = repo.join(".story_kit"); - fs::create_dir_all(&sk_dir).unwrap(); - fs::write( - sk_dir.join("project.toml"), - "[[component]]\nname = \"sentinel\"\npath = \".\"\nsetup = [\"touch setup_ran.txt\"]\n", - ) - .unwrap(); - Command::new("git") - .args(["add", "."]) - .current_dir(repo) - .output() - .unwrap(); - Command::new("git") - .args(["commit", "-m", "add project.toml with component setup"]) - .current_dir(repo) - .output() - .unwrap(); - - // Create feature branch with a change. - Command::new("git") - .args(["checkout", "-b", "feature/story-216_setup_test"]) - .current_dir(repo) - .output() - .unwrap(); - fs::write(repo.join("feature.txt"), "change").unwrap(); - Command::new("git") - .args(["add", "."]) - .current_dir(repo) - .output() - .unwrap(); - Command::new("git") - .args(["commit", "-m", "feature work"]) - .current_dir(repo) - .output() - .unwrap(); - - // Switch back to master. - Command::new("git") - .args(["checkout", "master"]) - .current_dir(repo) - .output() - .unwrap(); - - let result = - run_squash_merge(repo, "feature/story-216_setup_test", "216_setup_test").unwrap(); - - // The output must mention component setup, proving the new code path ran. - assert!( - result.output.contains("component setup"), - "merge output must mention component setup when project.toml has components, got:\n{}", - result.output - ); - // The sentinel command must appear in the output. - assert!( - result.output.contains("sentinel"), - "merge output must name the component, got:\n{}", - result.output - ); - } - - /// When there are no `[[component]]` entries in project.toml (or no - /// project.toml at all), `run_squash_merge` must succeed without trying to - /// run any setup. No hardcoded pnpm or frontend/ references should appear. - #[cfg(unix)] - #[test] - fn squash_merge_succeeds_without_components_in_project_toml() { - use std::fs; - use tempfile::tempdir; - - let tmp = tempdir().unwrap(); - let repo = tmp.path(); - init_git_repo(repo); - - // No .story_kit/project.toml — no component setup. - fs::write(repo.join("file.txt"), "initial").unwrap(); - Command::new("git") - .args(["add", "."]) - .current_dir(repo) - .output() - .unwrap(); - Command::new("git") - .args(["commit", "-m", "initial commit"]) - .current_dir(repo) - .output() - .unwrap(); - - Command::new("git") - .args(["checkout", "-b", "feature/story-216_no_components"]) - .current_dir(repo) - .output() - .unwrap(); - fs::write(repo.join("change.txt"), "change").unwrap(); - Command::new("git") - .args(["add", "."]) - .current_dir(repo) - .output() - .unwrap(); - Command::new("git") - .args(["commit", "-m", "feature"]) - .current_dir(repo) - .output() - .unwrap(); - - Command::new("git") - .args(["checkout", "master"]) - .current_dir(repo) - .output() - .unwrap(); - - let result = - run_squash_merge(repo, "feature/story-216_no_components", "216_no_components") - .unwrap(); - - // No pnpm or frontend references should appear in the output. - assert!( - !result.output.contains("pnpm"), - "output must not mention pnpm, got:\n{}", - result.output - ); - assert!( - !result.output.contains("frontend/"), - "output must not mention frontend/, got:\n{}", - result.output - ); - } - - // ── check_orphaned_agents return value tests (bug 161) ────────────────── - - #[tokio::test] - async fn check_orphaned_agents_returns_count_of_orphaned_agents() { - let pool = AgentPool::new_test(3001); - - // Spawn two tasks that finish immediately. - let h1 = tokio::spawn(async {}); - let h2 = tokio::spawn(async {}); - tokio::time::sleep(std::time::Duration::from_millis(20)).await; - assert!(h1.is_finished()); - assert!(h2.is_finished()); - - pool.inject_test_agent_with_handle("story_a", "coder", AgentStatus::Running, h1); - pool.inject_test_agent_with_handle("story_b", "coder", AgentStatus::Running, h2); - - let found = check_orphaned_agents(&pool.agents); - assert_eq!(found, 2, "should detect both orphaned agents"); - } - - #[test] - fn check_orphaned_agents_returns_zero_when_no_orphans() { - let pool = AgentPool::new_test(3001); - // Inject agents in terminal states — not orphaned. - pool.inject_test_agent("story_a", "coder", AgentStatus::Completed); - pool.inject_test_agent("story_b", "qa", AgentStatus::Failed); - - let found = check_orphaned_agents(&pool.agents); - assert_eq!(found, 0, "no orphans should be detected for terminal agents"); + let (tx, _rx) = broadcast::channel(16); + // Should not panic; no worktrees to reconcile. + pool.reconcile_on_startup(tmp.path(), &tx).await; } #[tokio::test] - async fn watchdog_orphan_detection_returns_nonzero_enabling_auto_assign() { - // This test verifies the contract that `check_orphaned_agents` returns - // a non-zero count when orphans exist, which the watchdog uses to - // decide whether to trigger auto-assign (bug 161). - let pool = AgentPool::new_test(3001); - - let handle = tokio::spawn(async {}); - tokio::time::sleep(std::time::Duration::from_millis(20)).await; - - pool.inject_test_agent_with_handle( - "orphan_story", - "coder", - AgentStatus::Running, - handle, - ); - - // Before watchdog: agent is Running. - { - let agents = pool.agents.lock().unwrap(); - let key = composite_key("orphan_story", "coder"); - assert_eq!(agents.get(&key).unwrap().status, AgentStatus::Running); - } - - // Run watchdog pass — should return 1 (orphan found). - let found = check_orphaned_agents(&pool.agents); - assert_eq!( - found, 1, - "watchdog must return 1 for a single orphaned agent" - ); - - // After watchdog: agent is Failed. - { - let agents = pool.agents.lock().unwrap(); - let key = composite_key("orphan_story", "coder"); - assert_eq!( - agents.get(&key).unwrap().status, - AgentStatus::Failed, - "orphaned agent must be marked Failed" - ); - } - } - - // ── kill_all_children tests ──────────────────────────────────── - - /// Returns true if a process with the given PID is currently running. - fn process_is_running(pid: u32) -> bool { - std::process::Command::new("ps") - .arg("-p") - .arg(pid.to_string()) - .stdout(std::process::Stdio::null()) - .stderr(std::process::Stdio::null()) - .status() - .map(|s| s.success()) - .unwrap_or(false) - } - - #[test] - fn kill_all_children_is_safe_on_empty_pool() { - let pool = AgentPool::new_test(3001); - // Should not panic or deadlock on an empty registry. - pool.kill_all_children(); - assert_eq!(pool.child_killer_count(), 0); - } - - #[test] - fn kill_all_children_kills_real_process() { - // GIVEN: a real PTY child process (sleep 100) with its killer registered. - let pool = AgentPool::new_test(3001); - - let pty_system = native_pty_system(); - let pair = pty_system - .openpty(PtySize { - rows: 24, - cols: 80, - pixel_width: 0, - pixel_height: 0, - }) - .expect("failed to open pty"); - - let mut cmd = CommandBuilder::new("sleep"); - cmd.arg("100"); - let mut child = pair - .slave - .spawn_command(cmd) - .expect("failed to spawn sleep"); - let pid = child.process_id().expect("no pid"); - - pool.inject_child_killer("story:agent", child.clone_killer()); - - // Verify the process is alive before we kill it. - assert!( - process_is_running(pid), - "process {pid} should be running before kill_all_children" - ); - - // WHEN: kill_all_children() is called. - pool.kill_all_children(); - - // Collect the exit status (prevents zombie; also ensures signal was sent). - let _ = child.wait(); - - // THEN: the process should be dead. - assert!( - !process_is_running(pid), - "process {pid} should have been killed by kill_all_children" - ); - } - - #[test] - fn kill_all_children_clears_registry() { - // GIVEN: a pool with one registered killer. - let pool = AgentPool::new_test(3001); - - let pty_system = native_pty_system(); - let pair = pty_system - .openpty(PtySize { - rows: 24, - cols: 80, - pixel_width: 0, - pixel_height: 0, - }) - .expect("failed to open pty"); - - let mut cmd = CommandBuilder::new("sleep"); - cmd.arg("1"); - let mut child = pair - .slave - .spawn_command(cmd) - .expect("failed to spawn sleep"); - - pool.inject_child_killer("story:agent", child.clone_killer()); - assert_eq!(pool.child_killer_count(), 1); - - // WHEN: kill_all_children() is called. - pool.kill_all_children(); - let _ = child.wait(); - - // THEN: the registry is empty. - assert_eq!( - pool.child_killer_count(), - 0, - "child_killers should be cleared after kill_all_children" - ); - } - - // ── Bug 173: pipeline advance sends AgentStateChanged via real watcher_tx ─ - - #[tokio::test] - async fn pipeline_advance_sends_agent_state_changed_to_watcher_tx() { - use std::fs; - + async fn reconcile_on_startup_emits_done_event() { let tmp = tempfile::tempdir().unwrap(); - let root = tmp.path(); - - // Set up story in 2_current/ - let current = root.join(".story_kit/work/2_current"); - fs::create_dir_all(¤t).unwrap(); - fs::write(current.join("173_story_test.md"), "test").unwrap(); - // Ensure 3_qa/ exists for the move target - fs::create_dir_all(root.join(".story_kit/work/3_qa")).unwrap(); - // Ensure 1_upcoming/ exists (start_agent calls move_story_to_current) - fs::create_dir_all(root.join(".story_kit/work/1_upcoming")).unwrap(); - - // Write a project.toml with a qa agent so start_agent can resolve it. - fs::create_dir_all(root.join(".story_kit")).unwrap(); - fs::write( - root.join(".story_kit/project.toml"), - r#" -[[agent]] -name = "coder-1" -role = "Coder" -command = "echo" -args = ["noop"] -prompt = "test" -stage = "coder" - -[[agent]] -name = "qa" -role = "QA" -command = "echo" -args = ["noop"] -prompt = "test" -stage = "qa" -"#, - ) - .unwrap(); - let pool = AgentPool::new_test(3001); - // Subscribe to the watcher channel BEFORE the pipeline advance. - let mut rx = pool.watcher_tx.subscribe(); + let (tx, mut rx) = broadcast::channel::(16); + pool.reconcile_on_startup(tmp.path(), &tx).await; - // Call pipeline advance directly. This will: - // 1. Move the story to 3_qa/ - // 2. Start the QA agent (which calls notify_agent_state_changed) - // Note: the actual agent process will fail (no real worktree), but the - // agent insertion and notification happen before the background spawn. - pool.run_pipeline_advance( - "173_story_test", - "coder-1", - CompletionReport { - summary: "done".to_string(), - gates_passed: true, - gate_output: String::new(), - }, - Some(root.to_path_buf()), - None, - false, - ) - .await; - - // The pipeline advance should have sent AgentStateChanged events via - // the pool's watcher_tx (not a dummy channel). Collect all events. - let mut got_agent_state_changed = false; + // Collect all events; the last must be "done". + let mut events: Vec = Vec::new(); while let Ok(evt) = rx.try_recv() { - if matches!(evt, WatcherEvent::AgentStateChanged) { - got_agent_state_changed = true; - break; - } + events.push(evt); } - assert!( - got_agent_state_changed, - "pipeline advance should send AgentStateChanged through the real watcher_tx \ - (bug 173: lozenges must update when agents are assigned during pipeline advance)" + events.iter().any(|e| e.status == "done"), + "reconcile_on_startup must emit a 'done' event; got: {:?}", + events.iter().map(|e| &e.status).collect::>() ); } - // ── available_agents_for_stage tests (story 190) ────────────────────────── + #[tokio::test] + async fn reconcile_on_startup_skips_story_without_committed_work() { + use std::fs; + let tmp = tempfile::tempdir().unwrap(); + let root = tmp.path(); - fn make_config(toml_str: &str) -> ProjectConfig { - ProjectConfig::parse(toml_str).unwrap() - } + // Set up story in 2_current/. + let current = root.join(".story_kit/work/2_current"); + fs::create_dir_all(¤t).unwrap(); + fs::write(current.join("60_story_test.md"), "test").unwrap(); - #[test] - fn available_agents_for_stage_returns_idle_agents() { - let config = make_config( - r#" -[[agent]] -name = "coder-1" -stage = "coder" + // Create a worktree directory that is a fresh git repo with no commits + // ahead of its own base branch (simulates a worktree where no work was done). + let wt_dir = root.join(".story_kit/worktrees/60_story_test"); + fs::create_dir_all(&wt_dir).unwrap(); + init_git_repo(&wt_dir); -[[agent]] -name = "coder-2" -stage = "coder" - -[[agent]] -name = "qa" -stage = "qa" -"#, - ); let pool = AgentPool::new_test(3001); - // coder-1 is busy on story-1 - pool.inject_test_agent("story-1", "coder-1", AgentStatus::Running); + let (tx, _rx) = broadcast::channel(16); + pool.reconcile_on_startup(root, &tx).await; - let available = pool - .available_agents_for_stage(&config, &PipelineStage::Coder) + // Story should still be in 2_current/ — nothing was reconciled. + assert!( + current.join("60_story_test.md").exists(), + "story should stay in 2_current/ when worktree has no committed work" + ); + } + + #[tokio::test] + async fn reconcile_on_startup_runs_gates_on_worktree_with_committed_work() { + use std::fs; + let tmp = tempfile::tempdir().unwrap(); + let root = tmp.path(); + + // Set up a git repo for the project root. + init_git_repo(root); + + // Set up story in 2_current/ and commit it so the project root is clean. + let current = root.join(".story_kit/work/2_current"); + fs::create_dir_all(¤t).unwrap(); + fs::write(current.join("61_story_test.md"), "test").unwrap(); + Command::new("git") + .args(["add", "."]) + .current_dir(root) + .output() .unwrap(); - assert_eq!(available, vec!["coder-2"]); - - let available_qa = pool - .available_agents_for_stage(&config, &PipelineStage::Qa) + Command::new("git") + .args([ + "-c", + "user.email=test@test.com", + "-c", + "user.name=Test", + "commit", + "-m", + "add story", + ]) + .current_dir(root) + .output() .unwrap(); - assert_eq!(available_qa, vec!["qa"]); - } - #[test] - fn available_agents_for_stage_returns_empty_when_all_busy() { - let config = make_config( - r#" -[[agent]] -name = "coder-1" -stage = "coder" -"#, - ); - let pool = AgentPool::new_test(3001); - pool.inject_test_agent("story-1", "coder-1", AgentStatus::Running); - - let available = pool - .available_agents_for_stage(&config, &PipelineStage::Coder) + // Create a real git worktree for the story. + let wt_dir = root.join(".story_kit/worktrees/61_story_test"); + fs::create_dir_all(wt_dir.parent().unwrap()).unwrap(); + Command::new("git") + .args([ + "worktree", + "add", + &wt_dir.to_string_lossy(), + "-b", + "feature/story-61_story_test", + ]) + .current_dir(root) + .output() .unwrap(); - assert!(available.is_empty()); - } - #[test] - fn available_agents_for_stage_ignores_completed_agents() { - let config = make_config( - r#" -[[agent]] -name = "coder-1" -stage = "coder" -"#, - ); - let pool = AgentPool::new_test(3001); - // Completed agents should not count as busy. - pool.inject_test_agent("story-1", "coder-1", AgentStatus::Completed); - - let available = pool - .available_agents_for_stage(&config, &PipelineStage::Coder) + // Add a commit to the feature branch (simulates coder completing work). + fs::write(wt_dir.join("implementation.txt"), "done").unwrap(); + Command::new("git") + .args(["add", "."]) + .current_dir(&wt_dir) + .output() + .unwrap(); + Command::new("git") + .args([ + "-c", + "user.email=test@test.com", + "-c", + "user.name=Test", + "commit", + "-m", + "implement story", + ]) + .current_dir(&wt_dir) + .output() .unwrap(); - assert_eq!(available, vec!["coder-1"]); - } - #[tokio::test] - async fn start_agent_auto_selects_second_coder_when_first_busy() { - let tmp = tempfile::tempdir().unwrap(); - let sk = tmp.path().join(".story_kit"); - std::fs::create_dir_all(&sk).unwrap(); - std::fs::write( - sk.join("project.toml"), - r#" -[[agent]] -name = "supervisor" -stage = "other" - -[[agent]] -name = "coder-1" -stage = "coder" - -[[agent]] -name = "coder-2" -stage = "coder" -"#, - ) - .unwrap(); - - let pool = AgentPool::new_test(3001); - // coder-1 is busy on another story - pool.inject_test_agent("other-story", "coder-1", AgentStatus::Running); - - // Call start_agent without agent_name — should pick coder-2 - let result = pool - .start_agent(tmp.path(), "42_my_story", None, None) - .await; - // Will fail for infrastructure reasons (no git repo), but should NOT - // fail with "All coder agents are busy" — that would mean it didn't - // try coder-2. - match result { - Ok(info) => { - assert_eq!(info.agent_name, "coder-2"); - } - Err(err) => { - assert!( - !err.contains("All coder agents are busy"), - "should have selected coder-2 but got: {err}" - ); - assert!( - !err.contains("No coder agent configured"), - "should not fail on agent selection, got: {err}" - ); - } - } - } - - #[tokio::test] - async fn start_agent_returns_busy_when_all_coders_occupied() { - let tmp = tempfile::tempdir().unwrap(); - let sk = tmp.path().join(".story_kit"); - std::fs::create_dir_all(&sk).unwrap(); - std::fs::write( - sk.join("project.toml"), - r#" -[[agent]] -name = "coder-1" -stage = "coder" - -[[agent]] -name = "coder-2" -stage = "coder" -"#, - ) - .unwrap(); - - let pool = AgentPool::new_test(3001); - pool.inject_test_agent("story-1", "coder-1", AgentStatus::Running); - pool.inject_test_agent("story-2", "coder-2", AgentStatus::Pending); - - let result = pool - .start_agent(tmp.path(), "story-3", None, None) - .await; - assert!(result.is_err()); - let err = result.unwrap_err(); assert!( - err.contains("All coder agents are busy"), - "expected busy error, got: {err}" - ); - } - - /// Story 203: when all coders are busy the story file must be moved from - /// 1_upcoming/ to 2_current/ so that auto_assign_available_work can pick - /// it up once a coder finishes. - #[tokio::test] - async fn start_agent_moves_story_to_current_when_coders_busy() { - let tmp = tempfile::tempdir().unwrap(); - let sk = tmp.path().join(".story_kit"); - let upcoming = sk.join("work/1_upcoming"); - std::fs::create_dir_all(&upcoming).unwrap(); - std::fs::write( - sk.join("project.toml"), - r#" -[[agent]] -name = "coder-1" -stage = "coder" -"#, - ) - .unwrap(); - // Place the story in 1_upcoming/. - std::fs::write( - upcoming.join("story-3.md"), - "---\nname: Story 3\n---\n", - ) - .unwrap(); - - let pool = AgentPool::new_test(3001); - pool.inject_test_agent("story-1", "coder-1", AgentStatus::Running); - - let result = pool - .start_agent(tmp.path(), "story-3", None, None) - .await; - - // Should fail because all coders are busy. - assert!(result.is_err()); - let err = result.unwrap_err(); - assert!( - err.contains("All coder agents are busy"), - "expected busy error, got: {err}" - ); - assert!( - err.contains("queued in work/2_current/"), - "expected story-to-current message, got: {err}" + crate::agents::gates::worktree_has_committed_work(&wt_dir), + "test setup: worktree should have committed work" ); - // Story must have been moved to 2_current/. - let current_path = sk.join("work/2_current/story-3.md"); - assert!( - current_path.exists(), - "story should be in 2_current/ after busy error, but was not" - ); - let upcoming_path = upcoming.join("story-3.md"); - assert!( - !upcoming_path.exists(), - "story should no longer be in 1_upcoming/" - ); - } - - /// Story 203: auto_assign_available_work must detect a story in 2_current/ - /// with no active agent and start an agent for it. - #[tokio::test] - async fn auto_assign_picks_up_story_queued_in_current() { - let tmp = tempfile::tempdir().unwrap(); - let sk = tmp.path().join(".story_kit"); - let current = sk.join("work/2_current"); - std::fs::create_dir_all(¤t).unwrap(); - std::fs::write( - sk.join("project.toml"), - "[[agent]]\nname = \"coder-1\"\nstage = \"coder\"\n", - ) - .unwrap(); - // Place the story in 2_current/ (simulating the "queued" state). - std::fs::write( - current.join("story-3.md"), - "---\nname: Story 3\n---\n", - ) - .unwrap(); - let pool = AgentPool::new_test(3001); - // No agents are running — coder-1 is free. + let (tx, _rx) = broadcast::channel(16); + pool.reconcile_on_startup(root, &tx).await; - // auto_assign will try to call start_agent, which will attempt to create - // a worktree (will fail without a git repo) — that is fine. We only need - // to verify the agent is registered as Pending before the background - // task eventually fails. - pool.auto_assign_available_work(tmp.path()).await; - - let agents = pool.agents.lock().unwrap(); - let has_pending = agents.values().any(|a| { - a.agent_name == "coder-1" - && matches!(a.status, AgentStatus::Pending | AgentStatus::Running) - }); + // In the test env, cargo clippy will fail (no Cargo.toml) so gates fail + // and the story stays in 2_current/. The important assertion is that + // reconcile ran without panicking and the story is in a consistent state. + let in_current = current.join("61_story_test.md").exists(); + let in_qa = root + .join(".story_kit/work/3_qa/61_story_test.md") + .exists(); assert!( - has_pending, - "auto_assign should have started coder-1 for story-3, but pool is empty" - ); - } - - /// Story 203: if a story is already in 2_current/ or later, start_agent - /// must not fail — the move is a no-op. - #[tokio::test] - async fn start_agent_story_already_in_current_is_noop() { - let tmp = tempfile::tempdir().unwrap(); - let sk = tmp.path().join(".story_kit"); - let current = sk.join("work/2_current"); - std::fs::create_dir_all(¤t).unwrap(); - std::fs::write( - sk.join("project.toml"), - "[[agent]]\nname = \"coder-1\"\nstage = \"coder\"\n", - ) - .unwrap(); - // Place the story directly in 2_current/. - std::fs::write( - current.join("story-5.md"), - "---\nname: Story 5\n---\n", - ) - .unwrap(); - - let pool = AgentPool::new_test(3001); - - // start_agent should attempt to assign coder-1 (no infra, so it will - // fail for git reasons), but must NOT fail due to the story already - // being in 2_current/. - let result = pool - .start_agent(tmp.path(), "story-5", None, None) - .await; - match result { - Ok(_) => {} - Err(e) => { - assert!( - !e.contains("Failed to move"), - "should not fail on idempotent move, got: {e}" - ); - } - } - } - - #[tokio::test] - async fn start_agent_explicit_name_unchanged_when_busy() { - let tmp = tempfile::tempdir().unwrap(); - let sk = tmp.path().join(".story_kit"); - std::fs::create_dir_all(&sk).unwrap(); - std::fs::write( - sk.join("project.toml"), - r#" -[[agent]] -name = "coder-1" -stage = "coder" - -[[agent]] -name = "coder-2" -stage = "coder" -"#, - ) - .unwrap(); - - let pool = AgentPool::new_test(3001); - pool.inject_test_agent("story-1", "coder-1", AgentStatus::Running); - - // Explicit request for coder-1 (busy) should fail even though coder-2 is free. - let result = pool - .start_agent(tmp.path(), "story-2", Some("coder-1"), None) - .await; - assert!(result.is_err()); - let err = result.unwrap_err(); - assert!( - err.contains("coder-1") && err.contains("already running"), - "expected explicit busy error, got: {err}" + in_current || in_qa, + "story should be in 2_current/ or 3_qa/ after reconciliation" ); } } diff --git a/server/src/agents/pty.rs b/server/src/agents/pty.rs new file mode 100644 index 0000000..4702f4e --- /dev/null +++ b/server/src/agents/pty.rs @@ -0,0 +1,490 @@ +use std::collections::HashMap; +use std::io::{BufRead, BufReader}; +use std::sync::{Arc, Mutex}; + +use portable_pty::{ChildKiller, CommandBuilder, PtySize, native_pty_system}; +use tokio::sync::broadcast; + +use super::AgentEvent; +use crate::agent_log::AgentLogWriter; +use crate::slog; +use crate::slog_warn; + +fn composite_key(story_id: &str, agent_name: &str) -> String { + format!("{story_id}:{agent_name}") +} + +struct ChildKillerGuard { + killers: Arc>>>, + key: String, +} + +impl Drop for ChildKillerGuard { + fn drop(&mut self) { + if let Ok(mut killers) = self.killers.lock() { + killers.remove(&self.key); + } + } +} + +/// Spawn claude agent in a PTY and stream events through the broadcast channel. +#[allow(clippy::too_many_arguments)] +pub(super) async fn run_agent_pty_streaming( + story_id: &str, + agent_name: &str, + command: &str, + args: &[String], + prompt: &str, + cwd: &str, + tx: &broadcast::Sender, + event_log: &Arc>>, + log_writer: Option>>, + inactivity_timeout_secs: u64, + child_killers: Arc>>>, +) -> Result, String> { + let sid = story_id.to_string(); + let aname = agent_name.to_string(); + let cmd = command.to_string(); + let args = args.to_vec(); + let prompt = prompt.to_string(); + let cwd = cwd.to_string(); + let tx = tx.clone(); + let event_log = event_log.clone(); + + tokio::task::spawn_blocking(move || { + run_agent_pty_blocking( + &sid, + &aname, + &cmd, + &args, + &prompt, + &cwd, + &tx, + &event_log, + log_writer.as_deref(), + inactivity_timeout_secs, + &child_killers, + ) + }) + .await + .map_err(|e| format!("Agent task panicked: {e}"))? +} + +/// Dispatch a `stream_event` from Claude Code's `--include-partial-messages` output. +/// +/// Extracts `thinking_delta` and `text_delta` from `content_block_delta` events +/// and routes them as `AgentEvent::Thinking` and `AgentEvent::Output` respectively. +/// This ensures thinking traces flow through the dedicated `ThinkingBlock` UI +/// component rather than appearing as unbounded regular output. +fn handle_agent_stream_event( + event: &serde_json::Value, + story_id: &str, + agent_name: &str, + tx: &broadcast::Sender, + event_log: &Mutex>, + log_writer: Option<&Mutex>, +) { + let event_type = event.get("type").and_then(|t| t.as_str()).unwrap_or(""); + + if event_type == "content_block_delta" + && let Some(delta) = event.get("delta") + { + let delta_type = delta.get("type").and_then(|t| t.as_str()).unwrap_or(""); + match delta_type { + "thinking_delta" => { + if let Some(thinking) = delta.get("thinking").and_then(|t| t.as_str()) { + emit_event( + AgentEvent::Thinking { + story_id: story_id.to_string(), + agent_name: agent_name.to_string(), + text: thinking.to_string(), + }, + tx, + event_log, + log_writer, + ); + } + } + "text_delta" => { + if let Some(text) = delta.get("text").and_then(|t| t.as_str()) { + emit_event( + AgentEvent::Output { + story_id: story_id.to_string(), + agent_name: agent_name.to_string(), + text: text.to_string(), + }, + tx, + event_log, + log_writer, + ); + } + } + _ => {} + } + } +} + +/// Helper to send an event to broadcast, event log, and optional persistent log file. +pub(super) fn emit_event( + event: AgentEvent, + tx: &broadcast::Sender, + event_log: &Mutex>, + log_writer: Option<&Mutex>, +) { + if let Ok(mut log) = event_log.lock() { + log.push(event.clone()); + } + if let Some(writer) = log_writer + && let Ok(mut w) = writer.lock() + && let Err(e) = w.write_event(&event) + { + eprintln!("[agent_log] Failed to write event to log file: {e}"); + } + let _ = tx.send(event); +} + +#[allow(clippy::too_many_arguments)] +fn run_agent_pty_blocking( + story_id: &str, + agent_name: &str, + command: &str, + args: &[String], + prompt: &str, + cwd: &str, + tx: &broadcast::Sender, + event_log: &Mutex>, + log_writer: Option<&Mutex>, + inactivity_timeout_secs: u64, + child_killers: &Arc>>>, +) -> Result, String> { + let pty_system = native_pty_system(); + + let pair = pty_system + .openpty(PtySize { + rows: 50, + cols: 200, + pixel_width: 0, + pixel_height: 0, + }) + .map_err(|e| format!("Failed to open PTY: {e}"))?; + + let mut cmd = CommandBuilder::new(command); + + // -p must come first + cmd.arg("-p"); + cmd.arg(prompt); + + // Add configured args (e.g., --directory /path/to/worktree, --model, etc.) + for arg in args { + cmd.arg(arg); + } + + cmd.arg("--output-format"); + cmd.arg("stream-json"); + cmd.arg("--verbose"); + // Enable partial streaming so we receive thinking_delta and text_delta + // events in real-time, rather than only complete assistant events. + // Without this, thinking traces may not appear in the structured output + // and instead leak as unstructured PTY text. + cmd.arg("--include-partial-messages"); + + // Supervised agents don't need interactive permission prompts + cmd.arg("--permission-mode"); + cmd.arg("bypassPermissions"); + + cmd.cwd(cwd); + cmd.env("NO_COLOR", "1"); + + // Allow spawning Claude Code from within a Claude Code session + cmd.env_remove("CLAUDECODE"); + cmd.env_remove("CLAUDE_CODE_ENTRYPOINT"); + + slog!("[agent:{story_id}:{agent_name}] Spawning {command} in {cwd} with args: {args:?}"); + + let mut child = pair + .slave + .spawn_command(cmd) + .map_err(|e| format!("Failed to spawn agent for {story_id}:{agent_name}: {e}"))?; + + // Register the child killer so that kill_all_children() / stop_agent() can + // terminate this process on server shutdown, even if the blocking thread + // cannot be interrupted. The ChildKillerGuard deregisters on function exit. + let killer_key = composite_key(story_id, agent_name); + { + let killer = child.clone_killer(); + if let Ok(mut killers) = child_killers.lock() { + killers.insert(killer_key.clone(), killer); + } + } + let _killer_guard = ChildKillerGuard { + killers: Arc::clone(child_killers), + key: killer_key, + }; + + drop(pair.slave); + + let reader = pair + .master + .try_clone_reader() + .map_err(|e| format!("Failed to clone PTY reader: {e}"))?; + + drop(pair.master); + + // Spawn a reader thread to collect PTY output lines. + // We use a channel so the main thread can apply an inactivity deadline + // via recv_timeout: if no output arrives within the configured window + // the process is killed and the agent is marked Failed. + let (line_tx, line_rx) = std::sync::mpsc::channel::>(); + std::thread::spawn(move || { + let buf_reader = BufReader::new(reader); + for line in buf_reader.lines() { + if line_tx.send(line).is_err() { + break; + } + } + }); + + let timeout_dur = if inactivity_timeout_secs > 0 { + Some(std::time::Duration::from_secs(inactivity_timeout_secs)) + } else { + None + }; + + let mut session_id: Option = None; + + loop { + let recv_result = match timeout_dur { + Some(dur) => line_rx.recv_timeout(dur), + None => line_rx + .recv() + .map_err(|_| std::sync::mpsc::RecvTimeoutError::Disconnected), + }; + + let line = match recv_result { + Ok(Ok(l)) => l, + Ok(Err(_)) => { + // IO error reading from PTY — treat as EOF. + break; + } + Err(std::sync::mpsc::RecvTimeoutError::Disconnected) => { + // Reader thread exited (EOF from PTY). + break; + } + Err(std::sync::mpsc::RecvTimeoutError::Timeout) => { + slog_warn!( + "[agent:{story_id}:{agent_name}] Inactivity timeout after \ + {inactivity_timeout_secs}s with no output. Killing process." + ); + let _ = child.kill(); + let _ = child.wait(); + return Err(format!( + "Agent inactivity timeout: no output received for {inactivity_timeout_secs}s" + )); + } + }; + + let trimmed = line.trim(); + if trimmed.is_empty() { + continue; + } + + // Try to parse as JSON + let json: serde_json::Value = match serde_json::from_str(trimmed) { + Ok(j) => j, + Err(_) => { + // Non-JSON output (terminal escapes etc.) — send as raw output + emit_event( + AgentEvent::Output { + story_id: story_id.to_string(), + agent_name: agent_name.to_string(), + text: trimmed.to_string(), + }, + tx, + event_log, + log_writer, + ); + continue; + } + }; + + let event_type = json.get("type").and_then(|t| t.as_str()).unwrap_or(""); + + match event_type { + "system" => { + session_id = json + .get("session_id") + .and_then(|s| s.as_str()) + .map(|s| s.to_string()); + } + // With --include-partial-messages, thinking and text arrive + // incrementally via stream_event → content_block_delta. Handle + // them here for real-time streaming to the frontend. + "stream_event" => { + if let Some(event) = json.get("event") { + handle_agent_stream_event( + event, + story_id, + agent_name, + tx, + event_log, + log_writer, + ); + } + } + // Complete assistant events are skipped for content extraction + // because thinking and text already arrived via stream_event. + // The raw JSON is still forwarded as AgentJson below. + "assistant" | "user" | "result" => {} + _ => {} + } + + // Forward all JSON events + emit_event( + AgentEvent::AgentJson { + story_id: story_id.to_string(), + agent_name: agent_name.to_string(), + data: json, + }, + tx, + event_log, + log_writer, + ); + } + + let _ = child.kill(); + let _ = child.wait(); + + slog!( + "[agent:{story_id}:{agent_name}] Done. Session: {:?}", + session_id + ); + + Ok(session_id) +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::agents::AgentEvent; + + #[test] + fn test_emit_event_writes_to_log_writer() { + let tmp = tempfile::tempdir().unwrap(); + let root = tmp.path(); + + let log_writer = + AgentLogWriter::new(root, "42_story_foo", "coder-1", "sess-emit").unwrap(); + let log_mutex = Mutex::new(log_writer); + + let (tx, _rx) = broadcast::channel::(64); + let event_log: Mutex> = Mutex::new(Vec::new()); + + let event = AgentEvent::Status { + story_id: "42_story_foo".to_string(), + agent_name: "coder-1".to_string(), + status: "running".to_string(), + }; + + emit_event(event, &tx, &event_log, Some(&log_mutex)); + + // Verify event was added to in-memory log + let mem_events = event_log.lock().unwrap(); + assert_eq!(mem_events.len(), 1); + drop(mem_events); + + // Verify event was written to the log file + let log_path = + crate::agent_log::log_file_path(root, "42_story_foo", "coder-1", "sess-emit"); + let entries = crate::agent_log::read_log(&log_path).unwrap(); + assert_eq!(entries.len(), 1); + assert_eq!(entries[0].event["type"], "status"); + assert_eq!(entries[0].event["status"], "running"); + } + + // ── bug 167: handle_agent_stream_event routes thinking/text correctly ─── + + #[test] + fn stream_event_thinking_delta_emits_thinking_event() { + let (tx, mut rx) = broadcast::channel::(64); + let event_log: Mutex> = Mutex::new(Vec::new()); + + let event = serde_json::json!({ + "type": "content_block_delta", + "delta": {"type": "thinking_delta", "thinking": "Let me analyze this..."} + }); + + handle_agent_stream_event(&event, "s1", "coder-1", &tx, &event_log, None); + + let received = rx.try_recv().unwrap(); + match received { + AgentEvent::Thinking { + story_id, + agent_name, + text, + } => { + assert_eq!(story_id, "s1"); + assert_eq!(agent_name, "coder-1"); + assert_eq!(text, "Let me analyze this..."); + } + other => panic!("Expected Thinking event, got: {other:?}"), + } + } + + #[test] + fn stream_event_text_delta_emits_output_event() { + let (tx, mut rx) = broadcast::channel::(64); + let event_log: Mutex> = Mutex::new(Vec::new()); + + let event = serde_json::json!({ + "type": "content_block_delta", + "delta": {"type": "text_delta", "text": "Here is the result."} + }); + + handle_agent_stream_event(&event, "s1", "coder-1", &tx, &event_log, None); + + let received = rx.try_recv().unwrap(); + match received { + AgentEvent::Output { + story_id, + agent_name, + text, + } => { + assert_eq!(story_id, "s1"); + assert_eq!(agent_name, "coder-1"); + assert_eq!(text, "Here is the result."); + } + other => panic!("Expected Output event, got: {other:?}"), + } + } + + #[test] + fn stream_event_input_json_delta_ignored() { + let (tx, mut rx) = broadcast::channel::(64); + let event_log: Mutex> = Mutex::new(Vec::new()); + + let event = serde_json::json!({ + "type": "content_block_delta", + "delta": {"type": "input_json_delta", "partial_json": "{\"file\":"} + }); + + handle_agent_stream_event(&event, "s1", "coder-1", &tx, &event_log, None); + + // No event should be emitted for tool argument deltas + assert!(rx.try_recv().is_err()); + } + + #[test] + fn stream_event_non_delta_type_ignored() { + let (tx, mut rx) = broadcast::channel::(64); + let event_log: Mutex> = Mutex::new(Vec::new()); + + let event = serde_json::json!({ + "type": "message_start", + "message": {"role": "assistant"} + }); + + handle_agent_stream_event(&event, "s1", "coder-1", &tx, &event_log, None); + + assert!(rx.try_recv().is_err()); + } +} diff --git a/server/src/io/story_metadata.rs b/server/src/io/story_metadata.rs index c360c0d..40b00c2 100644 --- a/server/src/io/story_metadata.rs +++ b/server/src/io/story_metadata.rs @@ -95,6 +95,52 @@ pub fn write_merge_failure(path: &Path, reason: &str) -> Result<(), String> { Ok(()) } +/// Remove a key from the YAML front matter of a story file on disk. +/// +/// If front matter is present and contains the key, the line is removed. +/// If no front matter or key is not found, the file is left unchanged. +pub fn clear_front_matter_field(path: &Path, key: &str) -> Result<(), String> { + let contents = + fs::read_to_string(path).map_err(|e| format!("Failed to read story file: {e}"))?; + let updated = remove_front_matter_field(&contents, key); + if updated != contents { + fs::write(path, &updated).map_err(|e| format!("Failed to write story file: {e}"))?; + } + Ok(()) +} + +/// Remove a key: value line from the YAML front matter of a markdown string. +/// +/// If no front matter (opening `---`) is found or the key is absent, returns content unchanged. +fn remove_front_matter_field(contents: &str, key: &str) -> String { + let mut lines: Vec = contents.lines().map(String::from).collect(); + if lines.is_empty() || lines[0].trim() != "---" { + return contents.to_string(); + } + + let close_idx = match lines[1..].iter().position(|l| l.trim() == "---") { + Some(i) => i + 1, + None => return contents.to_string(), + }; + + let key_prefix = format!("{key}:"); + if let Some(idx) = lines[1..close_idx] + .iter() + .position(|l| l.trim_start().starts_with(&key_prefix)) + .map(|i| i + 1) + { + lines.remove(idx); + } else { + return contents.to_string(); + } + + let mut result = lines.join("\n"); + if contents.ends_with('\n') { + result.push('\n'); + } + result +} + /// Insert or update a key: value pair in the YAML front matter of a markdown string. /// /// If no front matter (opening `---`) is found, returns the content unchanged. @@ -219,6 +265,40 @@ workflow: tdd )); } + #[test] + fn remove_front_matter_field_removes_key() { + let input = "---\nname: My Story\nmerge_failure: \"something broke\"\n---\n# Body\n"; + let output = remove_front_matter_field(input, "merge_failure"); + assert!(!output.contains("merge_failure")); + assert!(output.contains("name: My Story")); + assert!(output.ends_with('\n')); + } + + #[test] + fn remove_front_matter_field_no_op_when_absent() { + let input = "---\nname: My Story\n---\n# Body\n"; + let output = remove_front_matter_field(input, "merge_failure"); + assert_eq!(output, input); + } + + #[test] + fn remove_front_matter_field_no_op_without_front_matter() { + let input = "# No front matter\n"; + let output = remove_front_matter_field(input, "merge_failure"); + assert_eq!(output, input); + } + + #[test] + fn clear_front_matter_field_updates_file() { + let tmp = tempfile::tempdir().unwrap(); + let path = tmp.path().join("story.md"); + std::fs::write(&path, "---\nname: Test\nmerge_failure: \"bad\"\n---\n# Story\n").unwrap(); + clear_front_matter_field(&path, "merge_failure").unwrap(); + let contents = std::fs::read_to_string(&path).unwrap(); + assert!(!contents.contains("merge_failure")); + assert!(contents.contains("name: Test")); + } + #[test] fn parse_unchecked_todos_mixed() { let input = "## AC\n- [ ] First thing\n- [x] Done thing\n- [ ] Second thing\n";