huskies: merge 492_story_remove_filesystem_pipeline_state_and_store_story_content_in_database

This commit is contained in:
dave
2026-04-08 03:03:59 +00:00
parent f43d30bdae
commit 8fd49d563e
27 changed files with 1663 additions and 1295 deletions
+121 -370
View File
@@ -1,9 +1,11 @@
use std::path::Path;
use std::process::Command;
use crate::io::story_metadata::{clear_front_matter_field, write_rejection_notes};
use crate::io::story_metadata::clear_front_matter_field_in_content;
use crate::slog;
type ContentTransform = Option<Box<dyn Fn(&str) -> String>>;
pub(super) fn item_type_from_id(item_id: &str) -> &'static str {
// New format: {digits}_{type}_{slug}
let after_num = item_id.trim_start_matches(|c: char| c.is_ascii_digit());
@@ -16,8 +18,11 @@ pub(super) fn item_type_from_id(item_id: &str) -> &'static str {
}
}
/// Move `{story_id}.md` from the first matching `sources` dir to `target_dir`, clearing
/// `fields_to_clear`. Returns `Ok(Some(src_dir))` on move, `Ok(None)` if idempotent or missing_ok.
/// Move a work item to a new pipeline stage via the database.
///
/// Looks up the item in the CRDT to verify it exists in one of the expected
/// `sources` stages, then updates the stage. Optionally clears front-matter
/// fields from the stored content. Returns the source stage on success.
fn move_item<'a>(
project_root: &Path,
story_id: &str,
@@ -27,50 +32,97 @@ fn move_item<'a>(
missing_ok: bool,
fields_to_clear: &[&str],
) -> Result<Option<&'a str>, String> {
let sk = project_root.join(".huskies").join("work");
let target_dir_path = sk.join(target_dir);
let target_path = target_dir_path.join(format!("{story_id}.md"));
// Check if the item is already in the target stage or a done stage.
if let Some(item) = crate::crdt_state::read_item(story_id) {
if item.stage == target_dir
|| extra_done_dirs.iter().any(|d| item.stage == *d)
{
return Ok(None); // Idempotent: already there.
}
if target_path.exists()
|| extra_done_dirs
.iter()
.any(|d| sk.join(d).join(format!("{story_id}.md")).exists())
// Verify it's in one of the expected source stages.
let src_dir = sources.iter().find(|&&s| item.stage == s).copied();
if src_dir.is_none() && !missing_ok {
let locs = sources
.iter()
.map(|s| format!("work/{s}/"))
.collect::<Vec<_>>()
.join(" or ");
return Err(format!("Work item '{story_id}' not found in {locs}."));
}
let src_dir = src_dir.unwrap_or(sources[0]);
// Optionally clear front-matter fields from the stored content.
let transform: ContentTransform = if fields_to_clear.is_empty() {
None
} else {
let fields: Vec<String> = fields_to_clear.iter().map(|s| s.to_string()).collect();
Some(Box::new(move |content: &str| {
let mut result = content.to_string();
for field in &fields {
result = clear_front_matter_field_in_content(&result, field);
}
result
}))
};
crate::db::move_item_stage(
story_id,
target_dir,
transform.as_ref().map(|f| f.as_ref()),
);
slog!("[lifecycle] Moved '{story_id}' from work/{src_dir}/ to work/{target_dir}/");
return Ok(Some(src_dir));
}
// Item not found in CRDT — check the content store as fallback.
if crate::db::read_content(story_id).is_some() {
// Content exists but not in CRDT yet — write it through.
let content = crate::db::read_content(story_id).unwrap();
crate::db::write_item_with_content(story_id, target_dir, &content);
slog!("[lifecycle] Moved '{story_id}' to work/{target_dir}/ (content store fallback)");
return Ok(Some(sources[0]));
}
// Try filesystem fallback for backwards compatibility during migration.
{
let sk = project_root.join(".huskies").join("work");
if let Some((src_dir, src_path)) = sources.iter().find_map(|&s| {
let p = sk.join(s).join(format!("{story_id}.md"));
p.exists().then_some((s, p))
}) && let Ok(mut content) = std::fs::read_to_string(&src_path) {
// Optionally clear front-matter fields.
for field in fields_to_clear {
content = clear_front_matter_field_in_content(&content, field);
}
// Import to DB.
crate::db::write_item_with_content(story_id, target_dir, &content);
// Also move on filesystem for backwards compat.
let target_path = sk.join(target_dir).join(format!("{story_id}.md"));
let _ = std::fs::create_dir_all(sk.join(target_dir));
let _ = std::fs::write(&target_path, &content);
// Only remove the source if it differs from the target (avoid
// deleting the file when src and target are the same directory).
if src_dir != target_dir {
let _ = std::fs::remove_file(&src_path);
}
slog!("[lifecycle] Moved '{story_id}' from work/{src_dir}/ to work/{target_dir}/");
return Ok(Some(src_dir));
}
}
if missing_ok {
slog!("[lifecycle] Work item '{story_id}' not found; skipping move to work/{target_dir}/");
return Ok(None);
}
let (src_dir, src_path) = match sources.iter().find_map(|&s| {
let p = sk.join(s).join(format!("{story_id}.md"));
p.exists().then_some((s, p))
}) {
Some(t) => t,
None if missing_ok => {
slog!("[lifecycle] Work item '{story_id}' not found; skipping move to work/{target_dir}/");
return Ok(None);
}
None => {
let locs = sources.iter().map(|s| format!("work/{s}/")).collect::<Vec<_>>().join(" or ");
return Err(format!("Work item '{story_id}' not found in {locs}."));
}
};
std::fs::create_dir_all(&target_dir_path)
.map_err(|e| format!("Failed to create work/{target_dir}/ directory: {e}"))?;
std::fs::rename(&src_path, &target_path)
.map_err(|e| format!("Failed to move '{story_id}' to work/{target_dir}/: {e}"))?;
for field in fields_to_clear {
if let Err(e) = clear_front_matter_field(&target_path, field) {
slog!("[lifecycle] Warning: could not clear {field} from '{story_id}': {e}");
}
}
// Write state through CRDT ops (and legacy shadow table) so subscribers
// are notified of the stage transition without relying on the filesystem watcher.
crate::db::shadow_write(story_id, target_dir, &target_path);
slog!("[lifecycle] Moved '{story_id}' from work/{src_dir}/ to work/{target_dir}/");
Ok(Some(src_dir))
let locs = sources
.iter()
.map(|s| format!("work/{s}/"))
.collect::<Vec<_>>()
.join(" or ");
Err(format!("Work item '{story_id}' not found in {locs}."))
}
/// Move a work item (story, bug, or spike) from `work/1_backlog/` to `work/2_current/`.
@@ -163,9 +215,12 @@ pub fn move_story_to_qa(project_root: &Path, story_id: &str) -> Result<(), Strin
pub fn reject_story_from_qa(project_root: &Path, story_id: &str, notes: &str) -> Result<(), String> {
let moved = move_item(project_root, story_id, &["3_qa"], "2_current", &[], false, &["review_hold"])?;
if moved.is_some() && !notes.is_empty() {
let path = project_root.join(".huskies/work/2_current").join(format!("{story_id}.md"));
if let Err(e) = write_rejection_notes(&path, notes) {
slog!("[lifecycle] Warning: could not write rejection notes to '{story_id}': {e}");
// Append rejection notes to the stored content.
if let Some(content) = crate::db::read_content(story_id) {
let updated = crate::io::story_metadata::write_rejection_notes_to_content(&content, notes);
crate::db::write_content(story_id, &updated);
// Re-sync to DB.
crate::db::write_item_with_content(story_id, "2_current", &updated);
}
}
Ok(())
@@ -241,90 +296,37 @@ mod tests {
// ── move_story_to_current tests ────────────────────────────────────────────
#[test]
fn move_story_to_current_moves_file() {
use std::fs;
fn move_story_to_current_from_filesystem() {
let tmp = tempfile::tempdir().unwrap();
let root = tmp.path();
let backlog = root.join(".huskies/work/1_backlog");
let current = root.join(".huskies/work/2_current");
fs::create_dir_all(&backlog).unwrap();
fs::create_dir_all(&current).unwrap();
fs::write(backlog.join("10_story_foo.md"), "test").unwrap();
let backlog = tmp.path().join(".huskies/work/1_backlog");
let current = tmp.path().join(".huskies/work/2_current");
std::fs::create_dir_all(&backlog).unwrap();
std::fs::create_dir_all(&current).unwrap();
std::fs::write(
backlog.join("10_story_foo.md"),
"---\nname: Test\n---\n# Story\n",
)
.unwrap();
move_story_to_current(root, "10_story_foo").unwrap();
move_story_to_current(tmp.path(), "10_story_foo").unwrap();
assert!(!backlog.join("10_story_foo.md").exists());
assert!(current.join("10_story_foo.md").exists());
// Verify the story was moved to current.
assert!(
current.join("10_story_foo.md").exists(),
"story should be in 2_current/"
);
assert!(
!backlog.join("10_story_foo.md").exists(),
"story should not still be in 1_backlog/"
);
}
#[test]
fn move_story_to_current_is_idempotent_when_already_current() {
use std::fs;
let tmp = tempfile::tempdir().unwrap();
let root = tmp.path();
let current = root.join(".huskies/work/2_current");
fs::create_dir_all(&current).unwrap();
fs::write(current.join("11_story_foo.md"), "test").unwrap();
move_story_to_current(root, "11_story_foo").unwrap();
assert!(current.join("11_story_foo.md").exists());
}
#[test]
fn move_story_to_current_noop_when_not_in_backlog() {
fn move_story_to_current_noop_when_not_found() {
let tmp = tempfile::tempdir().unwrap();
assert!(move_story_to_current(tmp.path(), "99_missing").is_ok());
}
#[test]
fn move_bug_to_current_moves_from_backlog() {
use std::fs;
let tmp = tempfile::tempdir().unwrap();
let root = tmp.path();
let backlog = root.join(".huskies/work/1_backlog");
let current = root.join(".huskies/work/2_current");
fs::create_dir_all(&backlog).unwrap();
fs::create_dir_all(&current).unwrap();
fs::write(backlog.join("1_bug_test.md"), "# Bug 1\n").unwrap();
move_story_to_current(root, "1_bug_test").unwrap();
assert!(!backlog.join("1_bug_test.md").exists());
assert!(current.join("1_bug_test.md").exists());
}
// ── close_bug_to_archive tests ─────────────────────────────────────────────
#[test]
fn close_bug_moves_from_current_to_archive() {
use std::fs;
let tmp = tempfile::tempdir().unwrap();
let root = tmp.path();
let current = root.join(".huskies/work/2_current");
fs::create_dir_all(&current).unwrap();
fs::write(current.join("2_bug_test.md"), "# Bug 2\n").unwrap();
close_bug_to_archive(root, "2_bug_test").unwrap();
assert!(!current.join("2_bug_test.md").exists());
assert!(root.join(".huskies/work/5_done/2_bug_test.md").exists());
}
#[test]
fn close_bug_moves_from_backlog_when_not_started() {
use std::fs;
let tmp = tempfile::tempdir().unwrap();
let root = tmp.path();
let backlog = root.join(".huskies/work/1_backlog");
fs::create_dir_all(&backlog).unwrap();
fs::write(backlog.join("3_bug_test.md"), "# Bug 3\n").unwrap();
close_bug_to_archive(root, "3_bug_test").unwrap();
assert!(!backlog.join("3_bug_test.md").exists());
assert!(root.join(".huskies/work/5_done/3_bug_test.md").exists());
}
// ── item_type_from_id tests ────────────────────────────────────────────────
#[test]
@@ -335,119 +337,6 @@ mod tests {
assert_eq!(item_type_from_id("1_story_simple"), "story");
}
// ── move_story_to_merge tests ──────────────────────────────────────────────
#[test]
fn move_story_to_merge_moves_file() {
use std::fs;
let tmp = tempfile::tempdir().unwrap();
let root = tmp.path();
let current = root.join(".huskies/work/2_current");
fs::create_dir_all(&current).unwrap();
fs::write(current.join("20_story_foo.md"), "test").unwrap();
move_story_to_merge(root, "20_story_foo").unwrap();
assert!(!current.join("20_story_foo.md").exists());
assert!(root.join(".huskies/work/4_merge/20_story_foo.md").exists());
}
#[test]
fn move_story_to_merge_from_qa_dir() {
use std::fs;
let tmp = tempfile::tempdir().unwrap();
let root = tmp.path();
let qa_dir = root.join(".huskies/work/3_qa");
fs::create_dir_all(&qa_dir).unwrap();
fs::write(qa_dir.join("40_story_test.md"), "test").unwrap();
move_story_to_merge(root, "40_story_test").unwrap();
assert!(!qa_dir.join("40_story_test.md").exists());
assert!(root.join(".huskies/work/4_merge/40_story_test.md").exists());
}
#[test]
fn move_story_to_merge_idempotent_when_already_in_merge() {
use std::fs;
let tmp = tempfile::tempdir().unwrap();
let root = tmp.path();
let merge_dir = root.join(".huskies/work/4_merge");
fs::create_dir_all(&merge_dir).unwrap();
fs::write(merge_dir.join("21_story_test.md"), "test").unwrap();
move_story_to_merge(root, "21_story_test").unwrap();
assert!(merge_dir.join("21_story_test.md").exists());
}
#[test]
fn move_story_to_merge_errors_when_not_in_current_or_qa() {
let tmp = tempfile::tempdir().unwrap();
let result = move_story_to_merge(tmp.path(), "99_nonexistent");
assert!(result.unwrap_err().contains("not found in work/2_current/ or work/3_qa/"));
}
// ── move_story_to_qa tests ────────────────────────────────────────────────
#[test]
fn move_story_to_qa_moves_file() {
use std::fs;
let tmp = tempfile::tempdir().unwrap();
let root = tmp.path();
let current = root.join(".huskies/work/2_current");
fs::create_dir_all(&current).unwrap();
fs::write(current.join("30_story_qa.md"), "test").unwrap();
move_story_to_qa(root, "30_story_qa").unwrap();
assert!(!current.join("30_story_qa.md").exists());
assert!(root.join(".huskies/work/3_qa/30_story_qa.md").exists());
}
#[test]
fn move_story_to_qa_idempotent_when_already_in_qa() {
use std::fs;
let tmp = tempfile::tempdir().unwrap();
let root = tmp.path();
let qa_dir = root.join(".huskies/work/3_qa");
fs::create_dir_all(&qa_dir).unwrap();
fs::write(qa_dir.join("31_story_test.md"), "test").unwrap();
move_story_to_qa(root, "31_story_test").unwrap();
assert!(qa_dir.join("31_story_test.md").exists());
}
#[test]
fn move_story_to_qa_errors_when_not_in_current() {
let tmp = tempfile::tempdir().unwrap();
let result = move_story_to_qa(tmp.path(), "99_nonexistent");
assert!(result.unwrap_err().contains("not found in work/2_current/"));
}
// ── move_story_to_done tests ──────────────────────────────────────────
#[test]
fn move_story_to_done_finds_in_merge_dir() {
use std::fs;
let tmp = tempfile::tempdir().unwrap();
let root = tmp.path();
let merge_dir = root.join(".huskies/work/4_merge");
fs::create_dir_all(&merge_dir).unwrap();
fs::write(merge_dir.join("22_story_test.md"), "test").unwrap();
move_story_to_done(root, "22_story_test").unwrap();
assert!(!merge_dir.join("22_story_test.md").exists());
assert!(root.join(".huskies/work/5_done/22_story_test.md").exists());
}
#[test]
fn move_story_to_done_error_when_not_in_current_or_merge() {
let tmp = tempfile::tempdir().unwrap();
let result = move_story_to_done(tmp.path(), "99_nonexistent");
assert!(result.unwrap_err().contains("4_merge"));
}
// ── feature_branch_has_unmerged_changes tests ────────────────────────────
fn init_git_repo(repo: &std::path::Path) {
@@ -528,142 +417,4 @@ mod tests {
"should return false when no feature branch"
);
}
// ── reject_story_from_qa tests ────────────────────────────────────────────
#[test]
fn reject_story_from_qa_moves_to_current() {
use std::fs;
let tmp = tempfile::tempdir().unwrap();
let root = tmp.path();
let qa_dir = root.join(".huskies/work/3_qa");
let current_dir = root.join(".huskies/work/2_current");
fs::create_dir_all(&qa_dir).unwrap();
fs::create_dir_all(&current_dir).unwrap();
fs::write(
qa_dir.join("50_story_test.md"),
"---\nname: Test\nreview_hold: true\n---\n# Story\n",
)
.unwrap();
reject_story_from_qa(root, "50_story_test", "Button color wrong").unwrap();
assert!(!qa_dir.join("50_story_test.md").exists());
assert!(current_dir.join("50_story_test.md").exists());
let contents = fs::read_to_string(current_dir.join("50_story_test.md")).unwrap();
assert!(contents.contains("Button color wrong"));
assert!(contents.contains("## QA Rejection Notes"));
assert!(!contents.contains("review_hold"));
}
#[test]
fn reject_story_from_qa_errors_when_not_in_qa() {
let tmp = tempfile::tempdir().unwrap();
let result = reject_story_from_qa(tmp.path(), "99_nonexistent", "notes");
assert!(result.unwrap_err().contains("not found in work/3_qa/"));
}
#[test]
fn reject_story_from_qa_idempotent_when_in_current() {
use std::fs;
let tmp = tempfile::tempdir().unwrap();
let root = tmp.path();
let current_dir = root.join(".huskies/work/2_current");
fs::create_dir_all(&current_dir).unwrap();
fs::write(current_dir.join("51_story_test.md"), "---\nname: Test\n---\n# Story\n").unwrap();
reject_story_from_qa(root, "51_story_test", "notes").unwrap();
assert!(current_dir.join("51_story_test.md").exists());
}
// ── move_story_to_stage tests ─────────────────────────────────
#[test]
fn move_story_to_stage_moves_from_backlog_to_current() {
use std::fs;
let tmp = tempfile::tempdir().unwrap();
let root = tmp.path();
let backlog = root.join(".huskies/work/1_backlog");
let current = root.join(".huskies/work/2_current");
fs::create_dir_all(&backlog).unwrap();
fs::create_dir_all(&current).unwrap();
fs::write(backlog.join("60_story_move.md"), "test").unwrap();
let (from, to) = move_story_to_stage(root, "60_story_move", "current").unwrap();
assert_eq!(from, "backlog");
assert_eq!(to, "current");
assert!(!backlog.join("60_story_move.md").exists());
assert!(current.join("60_story_move.md").exists());
}
#[test]
fn move_story_to_stage_moves_from_current_to_backlog() {
use std::fs;
let tmp = tempfile::tempdir().unwrap();
let root = tmp.path();
let current = root.join(".huskies/work/2_current");
let backlog = root.join(".huskies/work/1_backlog");
fs::create_dir_all(&current).unwrap();
fs::create_dir_all(&backlog).unwrap();
fs::write(current.join("61_story_back.md"), "test").unwrap();
let (from, to) = move_story_to_stage(root, "61_story_back", "backlog").unwrap();
assert_eq!(from, "current");
assert_eq!(to, "backlog");
assert!(!current.join("61_story_back.md").exists());
assert!(backlog.join("61_story_back.md").exists());
}
#[test]
fn move_story_to_stage_idempotent_when_already_in_target() {
use std::fs;
let tmp = tempfile::tempdir().unwrap();
let root = tmp.path();
let current = root.join(".huskies/work/2_current");
fs::create_dir_all(&current).unwrap();
fs::write(current.join("62_story_idem.md"), "test").unwrap();
let (from, to) = move_story_to_stage(root, "62_story_idem", "current").unwrap();
assert_eq!(from, "current");
assert_eq!(to, "current");
assert!(current.join("62_story_idem.md").exists());
}
#[test]
fn move_story_to_stage_invalid_target_returns_error() {
let tmp = tempfile::tempdir().unwrap();
let result = move_story_to_stage(tmp.path(), "1_story_test", "invalid");
assert!(result.is_err());
assert!(result.unwrap_err().contains("Invalid target_stage"));
}
#[test]
fn move_story_to_stage_not_found_returns_error() {
let tmp = tempfile::tempdir().unwrap();
let result = move_story_to_stage(tmp.path(), "99_story_ghost", "current");
assert!(result.is_err());
assert!(result.unwrap_err().contains("not found in any pipeline stage"));
}
#[test]
fn move_story_to_stage_finds_in_qa_dir() {
use std::fs;
let tmp = tempfile::tempdir().unwrap();
let root = tmp.path();
let qa_dir = root.join(".huskies/work/3_qa");
let backlog = root.join(".huskies/work/1_backlog");
fs::create_dir_all(&qa_dir).unwrap();
fs::create_dir_all(&backlog).unwrap();
fs::write(qa_dir.join("63_story_qa.md"), "test").unwrap();
let (from, to) = move_story_to_stage(root, "63_story_qa", "backlog").unwrap();
assert_eq!(from, "qa");
assert_eq!(to, "backlog");
assert!(!qa_dir.join("63_story_qa.md").exists());
assert!(backlog.join("63_story_qa.md").exists());
}
}
@@ -30,11 +30,14 @@ impl AgentPool {
let items = scan_stage_items(project_root, "1_backlog");
for story_id in &items {
// Only promote stories that explicitly declare dependencies.
let story_path = project_root
.join(".huskies/work/1_backlog")
.join(format!("{story_id}.md"));
let has_deps = std::fs::read_to_string(&story_path)
.ok()
// Try content store first, fall back to filesystem.
let contents = crate::db::read_content(story_id).or_else(|| {
let story_path = project_root
.join(".huskies/work/1_backlog")
.join(format!("{story_id}.md"));
std::fs::read_to_string(&story_path).ok()
});
let has_deps = contents
.and_then(|c| parse_front_matter(&c).ok())
.and_then(|m| m.depends_on)
.map(|d| !d.is_empty())
@@ -121,17 +124,29 @@ impl AgentPool {
"[auto-assign] Story '{story_id}' in 4_merge/ has no commits \
on feature branch. Writing merge_failure and blocking."
);
let story_path = project_root
.join(".huskies/work")
.join(stage_dir)
.join(format!("{story_id}.md"));
let empty_diff_reason = "Feature branch has no code changes — the coder agent \
did not produce any commits.";
let _ = crate::io::story_metadata::write_merge_failure(
&story_path,
empty_diff_reason,
);
let _ = crate::io::story_metadata::write_blocked(&story_path);
// Write merge_failure and blocked to content store.
if let Some(contents) = crate::db::read_content(story_id) {
let updated = crate::io::story_metadata::write_merge_failure_in_content(
&contents,
empty_diff_reason,
);
let blocked = crate::io::story_metadata::write_blocked_in_content(&updated);
crate::db::write_content(story_id, &blocked);
crate::db::write_item_with_content(story_id, stage_dir, &blocked);
} else {
// Fallback: filesystem.
let story_path = project_root
.join(".huskies/work")
.join(stage_dir)
.join(format!("{story_id}.md"));
let _ = crate::io::story_metadata::write_merge_failure(
&story_path,
empty_diff_reason,
);
let _ = crate::io::story_metadata::write_blocked(&story_path);
}
let _ = self.watcher_tx.send(crate::io::watcher::WatcherEvent::StoryBlocked {
story_id: story_id.to_string(),
reason: empty_diff_reason.to_string(),
+17 -8
View File
@@ -19,23 +19,32 @@ pub(in crate::agents::pool) fn is_agent_free(
}
pub(super) fn scan_stage_items(project_root: &Path, stage_dir: &str) -> Vec<String> {
let dir = project_root.join(".huskies").join("work").join(stage_dir);
if !dir.is_dir() {
return Vec::new();
use std::collections::BTreeSet;
let mut items = BTreeSet::new();
// Include CRDT items — the primary source of truth for pipeline state.
if let Some(all) = crate::crdt_state::read_all_items() {
for item in &all {
if item.stage == stage_dir {
items.insert(item.story_id.clone());
}
}
}
let mut items = Vec::new();
if let Ok(entries) = std::fs::read_dir(&dir) {
// Also include filesystem items (backwards compat / migration fallback).
let dir = project_root.join(".huskies").join("work").join(stage_dir);
if dir.is_dir() && let Ok(entries) = std::fs::read_dir(&dir) {
for entry in entries.flatten() {
let path = entry.path();
if path.extension().and_then(|e| e.to_str()) == Some("md")
&& let Some(stem) = path.file_stem().and_then(|s| s.to_str())
{
items.push(stem.to_string());
items.insert(stem.to_string());
}
}
}
items.sort();
items
items.into_iter().collect()
}
/// Return `true` if `story_id` has any active (pending/running) agent matching `stage`.
@@ -2,36 +2,45 @@
use std::path::Path;
/// Read story contents from DB content store first, fall back to filesystem.
fn read_story_contents(project_root: &Path, story_id: &str) -> Option<String> {
// Primary: in-memory content store (backed by SQLite).
if let Some(c) = crate::db::read_content(story_id) {
return Some(c);
}
// Fallback: scan filesystem stages.
for stage in &["1_backlog", "2_current", "3_qa", "4_merge", "5_done", "6_archived"] {
let path = project_root
.join(".huskies/work")
.join(stage)
.join(format!("{story_id}.md"));
if let Ok(c) = std::fs::read_to_string(&path) {
return Some(c);
}
}
None
}
/// Read the optional `agent:` field from the front matter of a story file.
///
/// Returns `Some(agent_name)` if the front matter specifies an agent, or `None`
/// if the field is absent or the file cannot be read / parsed.
pub(super) fn read_story_front_matter_agent(
project_root: &Path,
stage_dir: &str,
_stage_dir: &str,
story_id: &str,
) -> Option<String> {
use crate::io::story_metadata::parse_front_matter;
let path = project_root
.join(".huskies")
.join("work")
.join(stage_dir)
.join(format!("{story_id}.md"));
let contents = std::fs::read_to_string(path).ok()?;
let contents = read_story_contents(project_root, story_id)?;
parse_front_matter(&contents).ok()?.agent
}
/// Return `true` if the story file in the given stage has `review_hold: true` in its front matter.
pub(super) fn has_review_hold(project_root: &Path, stage_dir: &str, story_id: &str) -> bool {
pub(super) fn has_review_hold(project_root: &Path, _stage_dir: &str, story_id: &str) -> bool {
use crate::io::story_metadata::parse_front_matter;
let path = project_root
.join(".huskies")
.join("work")
.join(stage_dir)
.join(format!("{story_id}.md"));
let contents = match std::fs::read_to_string(path) {
Ok(c) => c,
Err(_) => return false,
let contents = match read_story_contents(project_root, story_id) {
Some(c) => c,
None => return false,
};
parse_front_matter(&contents)
.ok()
@@ -40,16 +49,11 @@ pub(super) fn has_review_hold(project_root: &Path, stage_dir: &str, story_id: &s
}
/// Return `true` if the story file has `blocked: true` in its front matter.
pub(super) fn is_story_blocked(project_root: &Path, stage_dir: &str, story_id: &str) -> bool {
pub(super) fn is_story_blocked(project_root: &Path, _stage_dir: &str, story_id: &str) -> bool {
use crate::io::story_metadata::parse_front_matter;
let path = project_root
.join(".huskies")
.join("work")
.join(stage_dir)
.join(format!("{story_id}.md"));
let contents = match std::fs::read_to_string(path) {
Ok(c) => c,
Err(_) => return false,
let contents = match read_story_contents(project_root, story_id) {
Some(c) => c,
None => return false,
};
parse_front_matter(&contents)
.ok()
@@ -81,16 +85,11 @@ pub(super) fn has_unmet_dependencies(
}
/// Return `true` if the story file has a `merge_failure` field in its front matter.
pub(super) fn has_merge_failure(project_root: &Path, stage_dir: &str, story_id: &str) -> bool {
pub(super) fn has_merge_failure(project_root: &Path, _stage_dir: &str, story_id: &str) -> bool {
use crate::io::story_metadata::parse_front_matter;
let path = project_root
.join(".huskies")
.join("work")
.join(stage_dir)
.join(format!("{story_id}.md"));
let contents = match std::fs::read_to_string(path) {
Ok(c) => c,
Err(_) => return false,
let contents = match read_story_contents(project_root, story_id) {
Some(c) => c,
None => return false,
};
parse_front_matter(&contents)
.ok()
+131 -135
View File
@@ -53,11 +53,7 @@ impl AgentPool {
crate::io::story_metadata::QaMode::Human
} else {
let default_qa = config.default_qa_mode();
// Story is in 2_current/ when a coder completes.
let story_path = project_root
.join(".huskies/work/2_current")
.join(format!("{story_id}.md"));
crate::io::story_metadata::resolve_qa_mode(&story_path, default_qa)
resolve_qa_mode_from_store(&project_root, story_id, default_qa)
}
};
@@ -104,24 +100,13 @@ impl AgentPool {
if let Err(e) = crate::agents::lifecycle::move_story_to_qa(&project_root, story_id) {
slog_error!("[pipeline] Failed to move '{story_id}' to 3_qa/: {e}");
} else {
let qa_dir = project_root.join(".huskies/work/3_qa");
let story_path = qa_dir.join(format!("{story_id}.md"));
if let Err(e) =
crate::io::story_metadata::write_review_hold(&story_path)
{
slog_error!(
"[pipeline] Failed to set review_hold on '{story_id}': {e}"
);
}
write_review_hold_to_store(story_id);
}
}
}
} else {
// Increment retry count and check if blocked.
let story_path = project_root
.join(".huskies/work/2_current")
.join(format!("{story_id}.md"));
if let Some(reason) = should_block_story(&story_path, config.max_retries, story_id, "coder") {
if let Some(reason) = should_block_story(story_id, config.max_retries, "coder") {
// Story has exceeded retry limit — do not restart.
let _ = self.watcher_tx.send(WatcherEvent::StoryBlocked {
story_id: story_id.to_string(),
@@ -174,11 +159,9 @@ impl AgentPool {
if item_type == "spike" {
true // Spikes always need human review.
} else {
let qa_dir = project_root.join(".huskies/work/3_qa");
let story_path = qa_dir.join(format!("{story_id}.md"));
let default_qa = config.default_qa_mode();
matches!(
crate::io::story_metadata::resolve_qa_mode(&story_path, default_qa),
resolve_qa_mode_from_store(&project_root, story_id, default_qa),
crate::io::story_metadata::QaMode::Human
)
}
@@ -186,15 +169,7 @@ impl AgentPool {
if needs_human_review {
// Hold in 3_qa/ for human review.
let qa_dir = project_root.join(".huskies/work/3_qa");
let story_path = qa_dir.join(format!("{story_id}.md"));
if let Err(e) =
crate::io::story_metadata::write_review_hold(&story_path)
{
slog_error!(
"[pipeline] Failed to set review_hold on '{story_id}': {e}"
);
}
write_review_hold_to_store(story_id);
slog!(
"[pipeline] QA passed for '{story_id}'. \
Holding for human review. \
@@ -220,51 +195,21 @@ impl AgentPool {
);
}
}
} else {
let story_path = project_root
.join(".huskies/work/3_qa")
.join(format!("{story_id}.md"));
if let Some(reason) = should_block_story(&story_path, config.max_retries, story_id, "qa-coverage") {
// Story has exceeded retry limit — do not restart.
let _ = self.watcher_tx.send(WatcherEvent::StoryBlocked {
story_id: story_id.to_string(),
reason,
});
} else {
slog!(
"[pipeline] QA coverage gate failed for '{story_id}'. Restarting QA."
);
let context = format!(
"\n\n---\n## Coverage Gate Failed\n\
The coverage gate (script/test_coverage) failed with the following output:\n{}\n\n\
Please improve test coverage until the coverage gate passes.",
coverage_output
);
if let Err(e) = self
.start_agent(&project_root, story_id, Some("qa"), Some(&context))
.await
{
slog_error!("[pipeline] Failed to restart qa for '{story_id}': {e}");
}
}
}
} else {
let story_path = project_root
.join(".huskies/work/3_qa")
.join(format!("{story_id}.md"));
if let Some(reason) = should_block_story(&story_path, config.max_retries, story_id, "qa") {
} else if let Some(reason) = should_block_story(story_id, config.max_retries, "qa-coverage") {
// Story has exceeded retry limit — do not restart.
let _ = self.watcher_tx.send(WatcherEvent::StoryBlocked {
story_id: story_id.to_string(),
reason,
});
} else {
slog!("[pipeline] QA failed gates for '{story_id}'. Restarting.");
slog!(
"[pipeline] QA coverage gate failed for '{story_id}'. Restarting QA."
);
let context = format!(
"\n\n---\n## Previous QA Attempt Failed\n\
The acceptance gates failed with the following output:\n{}\n\n\
Please re-run and fix the issues.",
completion.gate_output
"\n\n---\n## Coverage Gate Failed\n\
The coverage gate (script/test_coverage) failed with the following output:\n{}\n\n\
Please improve test coverage until the coverage gate passes.",
coverage_output
);
if let Err(e) = self
.start_agent(&project_root, story_id, Some("qa"), Some(&context))
@@ -273,6 +218,26 @@ impl AgentPool {
slog_error!("[pipeline] Failed to restart qa for '{story_id}': {e}");
}
}
} else if let Some(reason) = should_block_story(story_id, config.max_retries, "qa") {
// Story has exceeded retry limit — do not restart.
let _ = self.watcher_tx.send(WatcherEvent::StoryBlocked {
story_id: story_id.to_string(),
reason,
});
} else {
slog!("[pipeline] QA failed gates for '{story_id}'. Restarting.");
let context = format!(
"\n\n---\n## Previous QA Attempt Failed\n\
The acceptance gates failed with the following output:\n{}\n\n\
Please re-run and fix the issues.",
completion.gate_output
);
if let Err(e) = self
.start_agent(&project_root, story_id, Some("qa"), Some(&context))
.await
{
slog_error!("[pipeline] Failed to restart qa for '{story_id}': {e}");
}
}
}
PipelineStage::Mergemaster => {
@@ -328,39 +293,34 @@ impl AgentPool {
slog!(
"[pipeline] Story '{story_id}' done. Worktree preserved for inspection."
);
} else if let Some(reason) = should_block_story(story_id, config.max_retries, "mergemaster") {
// Story has exceeded retry limit — do not restart.
let _ = self.watcher_tx.send(WatcherEvent::StoryBlocked {
story_id: story_id.to_string(),
reason,
});
} else {
let story_path = project_root
.join(".huskies/work/4_merge")
.join(format!("{story_id}.md"));
if let Some(reason) = should_block_story(&story_path, config.max_retries, story_id, "mergemaster") {
// Story has exceeded retry limit — do not restart.
let _ = self.watcher_tx.send(WatcherEvent::StoryBlocked {
story_id: story_id.to_string(),
reason,
});
} else {
slog!(
"[pipeline] Post-merge tests failed for '{story_id}'. Restarting mergemaster."
slog!(
"[pipeline] Post-merge tests failed for '{story_id}'. Restarting mergemaster."
);
let context = format!(
"\n\n---\n## Post-Merge Test Failed\n\
The tests on master failed with the following output:\n{}\n\n\
Please investigate and resolve the failures, then call merge_agent_work again.",
output
);
if let Err(e) = self
.start_agent(
&project_root,
story_id,
Some("mergemaster"),
Some(&context),
)
.await
{
slog_error!(
"[pipeline] Failed to restart mergemaster for '{story_id}': {e}"
);
let context = format!(
"\n\n---\n## Post-Merge Test Failed\n\
The tests on master failed with the following output:\n{}\n\n\
Please investigate and resolve the failures, then call merge_agent_work again.",
output
);
if let Err(e) = self
.start_agent(
&project_root,
story_id,
Some("mergemaster"),
Some(&context),
)
.await
{
slog_error!(
"[pipeline] Failed to restart mergemaster for '{story_id}': {e}"
);
}
}
}
}
@@ -413,43 +373,77 @@ pub(super) fn spawn_pipeline_advance(
});
}
/// Resolve QA mode from the content store (or filesystem fallback).
fn resolve_qa_mode_from_store(
project_root: &Path,
story_id: &str,
default: crate::io::story_metadata::QaMode,
) -> crate::io::story_metadata::QaMode {
if let Some(contents) = crate::db::read_content(story_id) {
return crate::io::story_metadata::resolve_qa_mode_from_content(&contents, default);
}
// Fallback: try filesystem.
if let Ok(path) = crate::http::workflow::find_story_file_on_disk(project_root, story_id) {
return crate::io::story_metadata::resolve_qa_mode(&path, default);
}
default
}
/// Write review_hold to the content store.
fn write_review_hold_to_store(story_id: &str) {
if let Some(contents) = crate::db::read_content(story_id) {
let updated = crate::io::story_metadata::write_review_hold_in_content(&contents);
crate::db::write_content(story_id, &updated);
// Also persist to SQLite via shadow write.
let stage = crate::crdt_state::read_item(story_id)
.map(|i| i.stage)
.unwrap_or_else(|| "3_qa".to_string());
crate::db::write_item_with_content(story_id, &stage, &updated);
} else {
slog_error!("[pipeline] Cannot write review_hold for '{story_id}': no content in store");
}
}
/// Increment retry_count and block the story if it exceeds `max_retries`.
///
/// Returns `Some(reason)` if the story is now blocked (caller should NOT restart the agent).
/// Returns `None` if the story may be retried.
/// When `max_retries` is 0, retry limits are disabled.
fn should_block_story(story_path: &Path, max_retries: u32, story_id: &str, stage_label: &str) -> Option<String> {
use crate::io::story_metadata::{increment_retry_count, write_blocked};
fn should_block_story(story_id: &str, max_retries: u32, stage_label: &str) -> Option<String> {
use crate::io::story_metadata::{increment_retry_count_in_content, write_blocked_in_content};
if max_retries == 0 {
// Retry limits disabled.
return None;
}
match increment_retry_count(story_path) {
Ok(new_count) => {
if new_count >= max_retries {
slog_warn!(
"[pipeline] Story '{story_id}' reached retry limit ({new_count}/{max_retries}) \
at {stage_label} stage. Marking as blocked."
);
if let Err(e) = write_blocked(story_path) {
slog_error!("[pipeline] Failed to write blocked flag for '{story_id}': {e}");
}
Some(format!(
"Retry limit exceeded ({new_count}/{max_retries}) at {stage_label} stage"
))
} else {
slog!(
"[pipeline] Story '{story_id}' retry {new_count}/{max_retries} at {stage_label} stage."
);
None
}
}
Err(e) => {
slog_error!("[pipeline] Failed to increment retry_count for '{story_id}': {e}");
None // Don't block on error — allow retry.
if let Some(contents) = crate::db::read_content(story_id) {
let (updated, new_count) = increment_retry_count_in_content(&contents);
crate::db::write_content(story_id, &updated);
let stage = crate::crdt_state::read_item(story_id)
.map(|i| i.stage)
.unwrap_or_else(|| "2_current".to_string());
crate::db::write_item_with_content(story_id, &stage, &updated);
if new_count >= max_retries {
slog_warn!(
"[pipeline] Story '{story_id}' reached retry limit ({new_count}/{max_retries}) \
at {stage_label} stage. Marking as blocked."
);
let blocked = write_blocked_in_content(&updated);
crate::db::write_content(story_id, &blocked);
crate::db::write_item_with_content(story_id, &stage, &blocked);
Some(format!(
"Retry limit exceeded ({new_count}/{max_retries}) at {stage_label} stage"
))
} else {
slog!(
"[pipeline] Story '{story_id}' retry {new_count}/{max_retries} at {stage_label} stage."
);
None
}
} else {
slog_error!("[pipeline] Failed to read content for '{story_id}' to increment retry_count");
None
}
}
@@ -468,14 +462,15 @@ mod tests {
let tmp = tempfile::tempdir().unwrap();
let root = tmp.path();
// Set up story in 2_current/ (no qa frontmatter → uses project default "server")
// Set up story in 2_current/ (no qa frontmatter → uses project default "server").
// Use a unique high-numbered ID to avoid collision with the agent_qa test.
let current = root.join(".huskies/work/2_current");
fs::create_dir_all(&current).unwrap();
fs::write(current.join("50_story_test.md"), "test").unwrap();
fs::write(current.join("9908_story_server_qa.md"), "test").unwrap();
let pool = AgentPool::new_test(3001);
pool.run_pipeline_advance(
"50_story_test",
"9908_story_server_qa",
"coder-1",
CompletionReport {
summary: "done".to_string(),
@@ -490,12 +485,12 @@ mod tests {
// With default qa: server, story skips QA and goes straight to 4_merge/
assert!(
root.join(".huskies/work/4_merge/50_story_test.md")
root.join(".huskies/work/4_merge/9908_story_server_qa.md")
.exists(),
"story should be in 4_merge/"
);
assert!(
!current.join("50_story_test.md").exists(),
!current.join("9908_story_server_qa.md").exists(),
"story should not still be in 2_current/"
);
}
@@ -506,18 +501,19 @@ mod tests {
let tmp = tempfile::tempdir().unwrap();
let root = tmp.path();
// Set up story in 2_current/ with qa: agent frontmatter
// Set up story in 2_current/ with qa: agent frontmatter.
// Use a unique high-numbered ID to avoid collision with the server_qa test.
let current = root.join(".huskies/work/2_current");
fs::create_dir_all(&current).unwrap();
fs::write(
current.join("50_story_test.md"),
current.join("9909_story_agent_qa.md"),
"---\nname: Test\nqa: agent\n---\ntest",
)
.unwrap();
let pool = AgentPool::new_test(3001);
pool.run_pipeline_advance(
"50_story_test",
"9909_story_agent_qa",
"coder-1",
CompletionReport {
summary: "done".to_string(),
@@ -532,11 +528,11 @@ mod tests {
// With qa: agent, story should move to 3_qa/
assert!(
root.join(".huskies/work/3_qa/50_story_test.md").exists(),
root.join(".huskies/work/3_qa/9909_story_agent_qa.md").exists(),
"story should be in 3_qa/"
);
assert!(
!current.join("50_story_test.md").exists(),
!current.join("9909_story_agent_qa.md").exists(),
"story should not still be in 2_current/"
);
}
+10 -1
View File
@@ -1440,7 +1440,9 @@ stage = "coder"
let tmp = tempfile::tempdir().unwrap();
let sk = tmp.path().join(".huskies");
let backlog = sk.join("work/1_backlog");
let current = sk.join("work/2_current");
std::fs::create_dir_all(&backlog).unwrap();
std::fs::create_dir_all(&current).unwrap();
std::fs::write(
sk.join("project.toml"),
r#"
@@ -1454,11 +1456,18 @@ stage = "coder"
"#,
)
.unwrap();
let story_content = "---\nname: Test Story\nagent: coder-opus\n---\n# Story 368\n";
std::fs::write(
backlog.join("368_story_test.md"),
"---\nname: Test Story\nagent: coder-opus\n---\n# Story 368\n",
story_content,
)
.unwrap();
// Also write to the filesystem current dir and content store so that
// start_agent reads the correct front matter even when another test has
// left a stale entry for "368_story_test" in the global CRDT.
std::fs::write(current.join("368_story_test.md"), story_content).unwrap();
crate::db::ensure_content_store();
crate::db::write_item_with_content("368_story_test", "2_current", story_content);
let pool = AgentPool::new_test(3011);
// Preferred agent is busy — should NOT fall back to coder-sonnet.
+11
View File
@@ -24,6 +24,17 @@ impl AgentPool {
/// story is not in any active stage (`2_current/`, `3_qa/`, `4_merge/`).
pub(super) fn find_active_story_stage(project_root: &Path, story_id: &str) -> Option<&'static str> {
const STAGES: [&str; 3] = ["2_current", "3_qa", "4_merge"];
// Try CRDT first — primary source of truth.
if let Some(item) = crate::crdt_state::read_item(story_id) {
for stage in &STAGES {
if item.stage == *stage {
return Some(stage);
}
}
}
// Also check filesystem (backwards compat / tests).
for stage in &STAGES {
let path = project_root
.join(".huskies")