huskies: merge 484_story_story_dependencies_in_pipeline_auto_assign

This commit is contained in:
dave
2026-04-04 21:43:29 +00:00
parent 26de009259
commit 5413a26406
6 changed files with 665 additions and 3 deletions
@@ -14,7 +14,8 @@ use super::scan::{
is_story_assigned_for_stage, scan_stage_items,
};
use super::story_checks::{
has_merge_failure, has_review_hold, is_story_blocked, read_story_front_matter_agent,
has_merge_failure, has_review_hold, has_unmet_dependencies, is_story_blocked,
read_story_front_matter_agent,
};
impl AgentPool {
@@ -52,6 +53,14 @@ impl AgentPool {
continue;
}
// Skip stories whose dependencies haven't landed yet.
if has_unmet_dependencies(project_root, stage_dir, story_id) {
slog!(
"[auto-assign] Story '{story_id}' has unmet dependencies; skipping until deps are done."
);
continue;
}
// Skip stories in 4_merge/ that already have a reported merge failure.
// These need human intervention — auto-assigning a new mergemaster
// would just waste tokens on the same broken merge.
@@ -420,6 +429,74 @@ mod tests {
);
}
/// Story 484: auto_assign must skip stories whose depends_on entries are not
/// yet in 5_done or 6_archived.
#[tokio::test]
async fn auto_assign_skips_stories_with_unmet_dependencies() {
let tmp = tempfile::tempdir().unwrap();
let root = tmp.path();
let sk = root.join(".huskies");
let current = sk.join("work/2_current");
std::fs::create_dir_all(&current).unwrap();
std::fs::write(
sk.join("project.toml"),
"[[agent]]\nname = \"coder-1\"\nstage = \"coder\"\n",
)
.unwrap();
// Story 10 depends on 999 which is not done.
std::fs::write(
current.join("10_story_waiting.md"),
"---\nname: Waiting\ndepends_on: [999]\n---\n",
)
.unwrap();
let pool = AgentPool::new_test(3001);
pool.auto_assign_available_work(root).await;
let agents = pool.agents.lock().unwrap();
assert!(
agents.is_empty(),
"story with unmet deps should not be auto-assigned"
);
}
/// Story 484: auto_assign must pick up a story once its dependency lands in 5_done.
#[tokio::test]
async fn auto_assign_picks_up_story_after_dep_completes() {
let tmp = tempfile::tempdir().unwrap();
let root = tmp.path();
let sk = root.join(".huskies");
let current = sk.join("work/2_current");
let done = sk.join("work/5_done");
std::fs::create_dir_all(&current).unwrap();
std::fs::create_dir_all(&done).unwrap();
std::fs::write(
sk.join("project.toml"),
"[[agent]]\nname = \"coder-1\"\nstage = \"coder\"\n",
)
.unwrap();
// Dep 999 is now done.
std::fs::write(done.join("999_story_dep.md"), "---\nname: Dep\n---\n").unwrap();
// Story 10 depends on 999 which is done.
std::fs::write(
current.join("10_story_unblocked.md"),
"---\nname: Unblocked\ndepends_on: [999]\n---\n",
)
.unwrap();
let pool = AgentPool::new_test(3001);
pool.auto_assign_available_work(root).await;
let agents = pool.agents.lock().unwrap();
let has_pending = agents
.values()
.any(|a| matches!(a.status, crate::agents::AgentStatus::Pending | crate::agents::AgentStatus::Running));
assert!(
has_pending,
"story with all deps done should be auto-assigned"
);
}
/// Two concurrent auto_assign_available_work calls must not assign the same
/// agent to two stories simultaneously. After both complete, at most one
/// Pending/Running entry must exist per agent name.