//! Story lifecycle helpers — archival and stage transitions for pipeline items. //! //! All pipeline state lives in the CRDT. These functions never consult the //! filesystem for work-item data — CRDT lookup failures propagate as errors. //! //! Every lifecycle function routes through the typed state machine //! ([`crate::pipeline_state::apply_transition`]) so that illegal transitions //! are rejected and every stage change emits a [`TransitionFired`] event. use std::num::NonZeroU32; use std::path::Path; use std::process::Command; use crate::pipeline_state::{ ApplyError, ArchiveReason, BranchName, GitSha, PipelineEvent, Stage, TransitionFired, apply_transition, stage_label, }; use crate::slog; /// Determine the item type ("story", "bug", "spike", or "refactor") from the item ID. /// /// For slug-format IDs (e.g. `"4_bug_login_crash"`), the type is embedded in the ID. /// For numeric-only IDs (e.g. `"4"`), the type is read from the typed CRDT /// `item_type` register (story 933). Falls back to `"story"` if not found. pub(crate) fn item_type_from_id(item_id: &str) -> &'static str { let after_num = item_id.trim_start_matches(|c: char| c.is_ascii_digit()); if after_num.starts_with("_bug_") { return "bug"; } else if after_num.starts_with("_spike_") { return "spike"; } else if after_num.starts_with("_refactor_") { return "refactor"; } // Numeric-only ID: consult the typed CRDT register. if after_num.is_empty() && let Some(view) = crate::crdt_state::read_item(item_id) && let Some(t) = view.item_type() { use crate::io::story_metadata::ItemType; return match t { ItemType::Bug => "bug", ItemType::Spike => "spike", ItemType::Refactor => "refactor", _ => "story", }; } "story" } /// Move a work item (story, bug, or spike) from `1_backlog` to `work/2_current/`. /// /// Only promotes from `1_backlog` — stories already in later stages (3_qa, 4_merge, /// etc.) are left untouched. This prevents coders from accidentally demoting a story /// that has already advanced past the coding stage. /// Idempotent: if already in `2_current/`, returns Ok. If not found, logs and returns Ok. pub fn move_story_to_current(story_id: &str) -> Result<(), String> { match apply_transition(story_id, PipelineEvent::DepsMet, None) { Ok(_) => Ok(()), Err(ApplyError::NotFound(_)) => { slog!("[lifecycle] Work item '{story_id}' not found; skipping move to work/2_current/"); Ok(()) } Err(ApplyError::InvalidTransition(_)) => { // Already promoted or in a later stage — idempotent no-op. Ok(()) } Err(ApplyError::Projection(_)) => Ok(()), } } /// Check whether a feature branch `feature/story-{story_id}` exists and has /// commits that are not yet on master. Returns `true` when there is unmerged /// work, `false` when there is no branch or all its commits are already /// reachable from master. pub fn feature_branch_has_unmerged_changes(project_root: &Path, story_id: &str) -> bool { let branch = format!("feature/story-{story_id}"); // Check if the branch exists. let branch_check = Command::new("git") .args(["rev-parse", "--verify", &branch]) .current_dir(project_root) .output(); match branch_check { Ok(out) if out.status.success() => {} _ => return false, // No feature branch → nothing to merge. } // Check if the branch has commits not reachable from master. let log = Command::new("git") .args(["log", &format!("master..{branch}"), "--oneline"]) .current_dir(project_root) .output(); match log { Ok(out) => { let stdout = String::from_utf8_lossy(&out.stdout); !stdout.trim().is_empty() } Err(_) => false, } } /// Move a story from `work/2_current/`, `work/3_qa/`, or `work/4_merge/` to `work/5_done/`. /// /// Idempotent if already in `5_done/` or `6_archived/`. Errors if not found in any earlier stage. /// Spikes may transition directly from `3_qa/` to `5_done/`, skipping the merge stage. pub fn move_story_to_done(story_id: &str) -> Result<(), String> { let item = read_typed_or_err(story_id)?; // Idempotent: already at or past done. if matches!(item.stage, Stage::Done { .. } | Stage::Archived { .. }) { return Ok(()); } let event = match &item.stage { Stage::Merge { .. } => PipelineEvent::MergeSucceeded { merge_commit: GitSha("accepted".to_string()), }, Stage::MergeFailure { .. } => PipelineEvent::Accepted, Stage::Coding | Stage::Qa | Stage::Backlog => PipelineEvent::Close, _ => { return Err(format!( "Work item '{story_id}' is in {} — cannot move to done.", stage_label(&item.stage) )); } }; apply_transition(story_id, event, None) .map(|_| ()) .map_err(|e| e.to_string()) } /// Move a story/bug from `work/2_current/` or `work/3_qa/` to `work/4_merge/`. /// /// Idempotent if already in `4_merge/`. Errors if not found in `2_current/` or `3_qa/`. pub fn move_story_to_merge(story_id: &str) -> Result<(), String> { let item = read_typed_or_err(story_id)?; // Idempotent: already at or past merge. if matches!( item.stage, Stage::Merge { .. } | Stage::MergeFailure { .. } | Stage::Done { .. } | Stage::Archived { .. } ) { return Ok(()); } let branch = BranchName(format!("feature/story-{story_id}")); let commits = NonZeroU32::new(1).expect("1 is non-zero"); let event = match &item.stage { Stage::Coding => PipelineEvent::QaSkipped { feature_branch: branch, commits_ahead: commits, }, Stage::Qa => PipelineEvent::GatesPassed { feature_branch: branch, commits_ahead: commits, }, _ => { return Err(format!( "Work item '{story_id}' not found in work/2_current/ or work/3_qa/." )); } }; apply_transition(story_id, event, None) .map(|_| ()) .map_err(|e| e.to_string()) } /// Move a story/bug from `work/2_current/` to `work/3_qa/`. /// /// Idempotent if already in `3_qa/`. Errors if not found in `2_current/`. pub fn move_story_to_qa(story_id: &str) -> Result<(), String> { let item = read_typed_or_err(story_id)?; // Idempotent: already at or past qa. if matches!( item.stage, Stage::Qa | Stage::Merge { .. } | Stage::MergeFailure { .. } | Stage::Done { .. } | Stage::Archived { .. } ) { return Ok(()); } apply_transition(story_id, PipelineEvent::GatesStarted, None) .map(|_| ()) .map_err(|e| e.to_string()) } /// Move a story from `work/3_qa/` back to `work/2_current/`, appending /// rejection notes. Story 945: the legacy `review_hold` flag is gone; if a /// story is in `Stage::ReviewHold`, the `GatesFailed` event simply fails to /// transition, which is the correct behaviour (you cannot reject from QA a /// story that is currently parked in review hold). pub fn reject_story_from_qa(story_id: &str, notes: &str) -> Result<(), String> { if notes.is_empty() { apply_transition( story_id, PipelineEvent::GatesFailed { reason: notes.to_string(), }, None, ) .map(|_| ()) .map_err(|e| e.to_string()) } else { let notes_owned = notes.to_string(); let transform = move |content: &str| -> String { format!("{content}\n\n## QA Rejection Notes\n\n{notes_owned}\n") }; apply_transition( story_id, PipelineEvent::GatesFailed { reason: notes.to_string(), }, Some(&transform), ) .map(|_| ()) .map_err(|e| e.to_string()) } } /// Transition a story to the `Blocked` stage via the state machine. /// /// Builds a `PipelineEvent::Block { reason }`, validates the transition, and /// writes the resulting `Stage::Blocked` to the CRDT. Returns `Err` on /// `TransitionError` — callers must NOT fall back to direct register writes. pub fn transition_to_blocked(story_id: &str, reason: &str) -> Result<(), String> { apply_transition( story_id, PipelineEvent::Block { reason: reason.to_string(), }, None, ) .map(|_| ()) .map_err(|e| e.to_string()) } /// Transition a story from `Stage::Merge` (or `Stage::MergeFailure`) to /// `Stage::MergeFailure` via the state machine. /// /// Builds a `PipelineEvent::MergeFailed { reason }`, validates the transition, /// writes the resulting `Stage::MergeFailure` to the CRDT, and persists the /// reason to the typed `MergeJob.error` CRDT register so it survives server /// restarts (story 929: the legacy YAML write of `merge_failure: "..."` is gone). /// /// When the story is already in `MergeFailure`, this is a silent self-loop: the /// returned `TransitionFired::before` will be `Stage::MergeFailure`. Callers /// should suppress re-notification in that case to avoid duplicate chat messages. /// /// Returns `Err` on `TransitionError` — callers must NOT fall back to direct register writes. pub fn transition_to_merge_failure( story_id: &str, reason: &str, ) -> Result { let fired = apply_transition( story_id, PipelineEvent::MergeFailed { reason: reason.to_string(), }, None, ) .map_err(|e| e.to_string())?; // Persist the failure reason on the MergeJob CRDT entry so display tools // (status_tools, chat status renderer, pipeline.rs::load_pipeline_state) // can surface it without re-parsing YAML. crate::crdt_state::write_merge_job( story_id, "failed", chrono::Utc::now().timestamp() as f64, None, Some(reason), ); Ok(fired) } /// Transition a story out of a blocked state via the state machine. /// /// Builds a `PipelineEvent::Unblock`, validates the transition, writes the /// result to the CRDT, and resets `retry_count` to 0. The destination stage /// depends on the current stage: `Blocked` → `Coding`; `MergeFailure` → `Merge`. /// Returns `Err` on `TransitionError` — callers must NOT fall back to direct /// register writes. pub fn transition_to_unblocked(story_id: &str) -> Result<(), String> { apply_transition(story_id, PipelineEvent::Unblock, None) .map(|_| ()) .map_err(|e| e.to_string())?; // Story 945: the legacy `blocked` boolean flag is gone — `Stage::Blocked` // is the single source of truth. We still reset `retry_count` so a fresh // attempt at the resumed stage starts at zero. crate::crdt_state::set_retry_count(story_id, 0); Ok(()) } /// Map a (current stage, target stage name) pair to the appropriate PipelineEvent. fn map_stage_move_to_event( from: &Stage, target: &str, story_id: &str, ) -> Result { let branch = || BranchName(format!("feature/story-{story_id}")); let nz1 = || NonZeroU32::new(1).expect("1 is non-zero"); match (from, target) { (Stage::Upcoming, "backlog") => Ok(PipelineEvent::Triage), (Stage::Backlog, "current") => Ok(PipelineEvent::DepsMet), (Stage::Coding, "qa") => Ok(PipelineEvent::GatesStarted), (Stage::Coding, "merge") => Ok(PipelineEvent::QaSkipped { feature_branch: branch(), commits_ahead: nz1(), }), (Stage::Qa, "merge") => Ok(PipelineEvent::GatesPassed { feature_branch: branch(), commits_ahead: nz1(), }), (Stage::Coding, "backlog") | (Stage::Qa, "backlog") | (Stage::Merge { .. }, "backlog") | (Stage::Blocked { .. }, "backlog") => Ok(PipelineEvent::Demote), (Stage::Qa, "current") => Ok(PipelineEvent::GatesFailed { reason: "manual move".to_string(), }), (Stage::Merge { .. }, "done") => Ok(PipelineEvent::MergeSucceeded { merge_commit: GitSha("manual".to_string()), }), (Stage::Coding | Stage::Qa | Stage::Backlog, "done") => Ok(PipelineEvent::Close), (Stage::Blocked { .. }, "current") => Ok(PipelineEvent::Unblock), // Story 919: MergeFailure + Unblock goes to Merge (re-attempt); manual // demotion to backlog uses Demote to park it without a retry. (Stage::MergeFailure { .. }, "backlog") => Ok(PipelineEvent::Demote), ( Stage::Archived { reason: ArchiveReason::Blocked { .. }, .. }, "backlog", ) | ( Stage::Archived { reason: ArchiveReason::MergeFailed { .. }, .. }, "backlog", ) => Ok(PipelineEvent::Unblock), _ => Err(format!( "Invalid target_stage '{target}'. Cannot transition from {} to {target}.", stage_label(from), )), } } /// Move any work item to an arbitrary pipeline stage by searching all stages. /// /// Accepts `target_stage` as one of: `backlog`, `current`, `qa`, `merge`, `done`. /// (`current` is the user-facing alias for the `coding` stage.) /// Idempotent: if the item is already in the target stage, returns Ok. /// Returns `(from_stage, to_stage)` on success. pub fn move_story_to_stage(story_id: &str, target_stage: &str) -> Result<(String, String), String> { // Validate target. We accept the user-facing aliases (which include // "current" as the historical alias for "coding") and normalise to the // canonical clean wire form for the idempotency check. let target_wire = match target_stage { "backlog" => "backlog", "current" => "coding", "qa" => "qa", "merge" => "merge", "done" => "done", _ => { return Err(format!( "Invalid target_stage '{target_stage}'. Must be one of: backlog, current, qa, merge, done" )); } }; let item = read_typed_or_err(story_id)?; let from_name = stage_to_name(&item.stage); // Idempotent: already in the target stage. if item.stage.dir_name() == target_wire { return Ok((target_stage.to_string(), target_stage.to_string())); } let event = map_stage_move_to_event(&item.stage, target_stage, story_id)?; apply_transition(story_id, event, None).map_err(|e| e.to_string())?; Ok((from_name.to_string(), target_stage.to_string())) } /// Move a bug from `work/2_current/` or `work/1_backlog/` to `work/5_done/`. /// /// Idempotent if already in `5_done/`. Errors if not found in `2_current/` or `1_backlog/`. pub fn close_bug_to_archive(bug_id: &str) -> Result<(), String> { let item = read_typed_or_err(bug_id)?; if matches!(item.stage, Stage::Done { .. } | Stage::Archived { .. }) { return Ok(()); } apply_transition(bug_id, PipelineEvent::Close, None) .map(|_| ()) .map_err(|e| e.to_string()) } /// Read a typed pipeline item or return a user-facing error. fn read_typed_or_err(story_id: &str) -> Result { crate::pipeline_state::read_typed(story_id) .map_err(|e| format!("Work item '{story_id}': {e}"))? .ok_or_else(|| format!("Work item '{story_id}' not found in any pipeline stage.")) } /// Map a Stage variant to the short name used by `move_story_to_stage` return values. fn stage_to_name(s: &Stage) -> &'static str { match s { Stage::Upcoming => "upcoming", Stage::Backlog => "backlog", Stage::Coding => "current", Stage::Blocked { .. } => "blocked", Stage::Qa => "qa", Stage::Merge { .. } => "merge", Stage::MergeFailure { .. } => "merge_failure", Stage::MergeFailureFinal { .. } => "merge_failure_final", Stage::Frozen { .. } => "frozen", Stage::ReviewHold { .. } => "review_hold", Stage::Done { .. } => "done", Stage::Archived { .. } => "archived", } } #[cfg(test)] mod tests { use super::*; // ── move_story_to_current tests ──────────────────────────────────────────── #[test] fn move_story_to_current_from_crdt() { // Seed via CRDT — the sole source of truth for pipeline state. crate::db::ensure_content_store(); crate::db::write_item_with_content( "99950_story_lifecycle", "1_backlog", "---\nname: Lifecycle Test\n---\n# Story\n", crate::db::ItemMeta::named("Lifecycle Test"), ); move_story_to_current("99950_story_lifecycle").unwrap(); // Verify the CRDT now has the item in 2_current. let item = crate::pipeline_state::read_typed("99950_story_lifecycle") .expect("CRDT read should succeed") .expect("item should exist in CRDT after move"); assert_eq!( item.stage.dir_name(), "coding", "item should be in coding after move" ); } #[test] fn move_story_to_current_noop_when_not_found() { assert!(move_story_to_current("99_missing").is_ok()); } /// Lifecycle operation runs to completion using only CRDT state; /// no `.huskies/work//` tree is consulted because no `project_root` /// is passed — the functions operate purely on the CRDT. #[test] fn move_story_uses_only_crdt_no_fs_shadow() { crate::db::ensure_content_store(); crate::db::write_item_with_content( "99951_story_crdt_only", "2_current", "---\nname: CRDT Only Test\n---\n# Story\n", crate::db::ItemMeta::named("CRDT Only Test"), ); // No filesystem path is involved — lifecycle functions no longer // accept a project_root, proving they never touch the filesystem. move_story_to_done("99951_story_crdt_only").unwrap(); let item = crate::pipeline_state::read_typed("99951_story_crdt_only") .expect("CRDT read should succeed") .expect("item should exist in CRDT"); assert_eq!( item.stage.dir_name(), "done", "item should be in done after move" ); } // ── item_type_from_id tests ──────────────────────────────────────────────── #[test] fn item_type_from_id_detects_types() { assert_eq!(item_type_from_id("1_bug_test"), "bug"); assert_eq!(item_type_from_id("1_spike_research"), "spike"); assert_eq!(item_type_from_id("50_story_my_story"), "story"); assert_eq!(item_type_from_id("1_story_simple"), "story"); assert_eq!(item_type_from_id("1_refactor_cleanup"), "refactor"); } #[test] fn item_type_from_id_uses_crdt_register_for_numeric_ids() { crate::crdt_state::init_for_test(); crate::db::ensure_content_store(); // Story 933: numeric-only IDs read item_type from the CRDT register. for (id, t) in [ ("9999", "bug"), ("9998", "spike"), ("9997", "refactor"), ("9996", "story"), ] { crate::db::write_item_with_content( id, "1_backlog", &format!("# Test {t}\n"), crate::db::ItemMeta::named(format!("Test {t}")), ); crate::crdt_state::set_item_type(id, crate::io::story_metadata::ItemType::from_str(t)); } assert_eq!(item_type_from_id("9999"), "bug"); assert_eq!(item_type_from_id("9998"), "spike"); assert_eq!(item_type_from_id("9997"), "refactor"); assert_eq!(item_type_from_id("9996"), "story"); // No CRDT entry → defaults to "story". assert_eq!(item_type_from_id("99999"), "story"); } // ── Story 866: block/unblock round-trip regression test ────────────────── /// Regression test (story 866): block a story via the new state-machine path, /// verify it lands in `Stage::Blocked`, then unblock and verify it returns /// to `Stage::Coding`. #[test] fn block_unblock_round_trip_via_state_machine() { crate::db::ensure_content_store(); crate::db::write_item_with_content( "99866_story_block_test", "2_current", "---\nname: Block Round Trip\n---\n# Story\n", crate::db::ItemMeta::named("Block Round Trip"), ); // Verify starting state is Coding. let item = crate::pipeline_state::read_typed("99866_story_block_test") .expect("read should succeed") .expect("item should exist"); assert_eq!(item.stage.dir_name(), "coding", "should start in coding"); // Block via the state machine. transition_to_blocked("99866_story_block_test", "retry limit exceeded") .expect("transition_to_blocked should succeed"); // Verify the CRDT now shows Stage::Blocked. let item = crate::pipeline_state::read_typed("99866_story_block_test") .expect("read should succeed") .expect("item should exist after block"); assert_eq!( item.stage.dir_name(), "blocked", "should be in blocked after transition_to_blocked" ); assert!(item.stage.is_blocked(), "is_blocked() should return true"); assert!( matches!(item.stage, Stage::Blocked { .. }), "stage should be Stage::Blocked variant" ); // Unblock via the state machine. transition_to_unblocked("99866_story_block_test") .expect("transition_to_unblocked should succeed"); // Verify the story returned to Coding. let item = crate::pipeline_state::read_typed("99866_story_block_test") .expect("read should succeed") .expect("item should exist after unblock"); assert_eq!( item.stage.dir_name(), "coding", "should return to coding after unblock" ); assert!( matches!(item.stage, Stage::Coding), "stage should be Stage::Coding after unblock" ); } // ── Story 919: MergeFailure unblock → Merge regression ────────────────── /// Regression test (story 919): unblocking a story in `MergeFailure` via /// `transition_to_unblocked` transitions it to `Stage::Merge`, not `Coding` /// or `Backlog`. After the unblock, the merge pipeline re-attempts the /// squash-merge immediately. #[test] fn unblock_merge_failure_story_lands_in_merge() { crate::db::ensure_content_store(); crate::db::write_item_with_content( "99893_story_merge_failure_unblock", "merge_failure", "---\nname: MergeFailure Unblock Test\n---\n# Story\n", crate::db::ItemMeta::named("MergeFailure Unblock Test"), ); // Verify starting state is MergeFailure. let item = crate::pipeline_state::read_typed("99893_story_merge_failure_unblock") .expect("CRDT read should succeed") .expect("item should exist"); assert!( matches!(item.stage, Stage::MergeFailure { .. }), "should start in MergeFailure: {:?}", item.stage ); // Unblock routes through transition_to_unblocked (same path as unblock_story MCP). transition_to_unblocked("99893_story_merge_failure_unblock") .expect("transition_to_unblocked should succeed for MergeFailure story"); // Story must land in Merge — the mergemaster re-attempts the squash. let item = crate::pipeline_state::read_typed("99893_story_merge_failure_unblock") .expect("CRDT read should succeed") .expect("item should exist after unblock"); assert_eq!( item.stage.dir_name(), "merge", "MergeFailure story should land in Merge after unblock for immediate re-attempt: {:?}", item.stage ); assert!( matches!(item.stage, Stage::Merge { .. }), "stage should be Stage::Merge after unblock, got: {:?}", item.stage ); // auto_assign checks is_active() — Merge satisfies it. assert!( item.stage.is_active(), "Merge satisfies is_active() so auto_assign can pick it up: {:?}", item.stage ); } // ── feature_branch_has_unmerged_changes tests ──────────────────────────── fn init_git_repo(repo: &std::path::Path) { Command::new("git") .args(["init"]) .current_dir(repo) .output() .unwrap(); Command::new("git") .args(["config", "user.email", "test@test.com"]) .current_dir(repo) .output() .unwrap(); Command::new("git") .args(["config", "user.name", "Test"]) .current_dir(repo) .output() .unwrap(); Command::new("git") .args(["commit", "--allow-empty", "-m", "init"]) .current_dir(repo) .output() .unwrap(); } /// Bug 226: feature_branch_has_unmerged_changes returns true when the /// feature branch has commits not on master. #[test] fn feature_branch_has_unmerged_changes_detects_unmerged_code() { use std::fs; use tempfile::tempdir; let tmp = tempdir().unwrap(); let repo = tmp.path(); init_git_repo(repo); // Create a feature branch with a code commit. Command::new("git") .args(["checkout", "-b", "feature/story-50_story_test"]) .current_dir(repo) .output() .unwrap(); fs::write(repo.join("feature.rs"), "fn main() {}").unwrap(); Command::new("git") .args(["add", "."]) .current_dir(repo) .output() .unwrap(); Command::new("git") .args(["commit", "-m", "add feature"]) .current_dir(repo) .output() .unwrap(); Command::new("git") .args(["checkout", "master"]) .current_dir(repo) .output() .unwrap(); assert!( feature_branch_has_unmerged_changes(repo, "50_story_test"), "should detect unmerged changes on feature branch" ); } /// Bug 226: feature_branch_has_unmerged_changes returns false when no /// feature branch exists. #[test] fn feature_branch_has_unmerged_changes_false_when_no_branch() { use tempfile::tempdir; let tmp = tempdir().unwrap(); let repo = tmp.path(); init_git_repo(repo); assert!( !feature_branch_has_unmerged_changes(repo, "99_nonexistent"), "should return false when no feature branch" ); } }