huskies: merge 827

This commit is contained in:
dave
2026-04-28 12:57:28 +00:00
parent 1bd01eb9d4
commit 36ca8d5e3b
4 changed files with 223 additions and 5 deletions
@@ -14,8 +14,9 @@ use super::scan::{
is_story_assigned_for_stage, scan_stage_items,
};
use super::story_checks::{
check_archived_dependencies, has_merge_failure, has_review_hold, has_unmet_dependencies,
is_story_blocked, is_story_frozen, read_story_front_matter_agent,
check_archived_dependencies, has_content_conflict_failure, has_merge_failure,
has_mergemaster_attempted, has_review_hold, has_unmet_dependencies, is_story_blocked,
is_story_frozen, read_story_front_matter_agent,
};
impl AgentPool {
@@ -246,9 +247,66 @@ impl AgentPool {
// call invokes the LLM-driven recovery path.
let merge_items = scan_stage_items(project_root, "4_merge");
for story_id in &merge_items {
// Skip stories with an already-recorded merge failure — they need
// human intervention (operator can call start_agent mergemaster).
// Stories with a recorded merge failure may be eligible for
// automatic mergemaster dispatch when the failure is a content
// conflict — otherwise they need human intervention.
if has_merge_failure(project_root, "4_merge", story_id) {
// Auto-spawn mergemaster for content conflicts, but only once.
if has_content_conflict_failure(project_root, "4_merge", story_id)
&& !has_mergemaster_attempted(project_root, "4_merge", story_id)
&& !is_story_blocked(project_root, "4_merge", story_id)
{
// Find the mergemaster agent.
let mergemaster_agent = {
let agents = match self.agents.lock() {
Ok(a) => a,
Err(e) => {
slog_error!(
"[auto-assign] Failed to lock agents for mergemaster check: {e}"
);
continue;
}
};
if is_story_assigned_for_stage(
&config,
&agents,
story_id,
&PipelineStage::Mergemaster,
) {
// Already running — don't spawn again.
None
} else {
find_free_agent_for_stage(&config, &agents, &PipelineStage::Mergemaster)
.map(str::to_string)
}
};
if let Some(agent_name) = mergemaster_agent {
slog!(
"[auto-assign] Content conflict on '{story_id}'; \
auto-spawning mergemaster '{agent_name}'."
);
// Record mergemaster_attempted before spawning so a
// crash/restart doesn't re-trigger an infinite loop.
if let Some(contents) = crate::db::read_content(story_id) {
let updated =
crate::io::story_metadata::write_mergemaster_attempted_in_content(
&contents,
);
crate::db::write_content(story_id, &updated);
crate::db::write_item_with_content(story_id, "4_merge", &updated);
}
if let Err(e) = self
.start_agent(project_root, story_id, Some(&agent_name), None, None)
.await
{
slog!(
"[auto-assign] Failed to start mergemaster '{agent_name}' \
for '{story_id}': {e}"
);
}
}
}
continue;
}
@@ -782,6 +840,110 @@ mod tests {
);
}
// ── Story 827: auto-spawn mergemaster on content conflict ─────────────────
/// A story in 4_merge with a content-conflict merge_failure and no
/// mergemaster_attempted flag must trigger an auto-spawn of mergemaster.
#[tokio::test]
async fn auto_assign_spawns_mergemaster_for_content_conflict() {
let tmp = tempfile::tempdir().unwrap();
let sk = tmp.path().join(".huskies");
std::fs::create_dir_all(&sk).unwrap();
std::fs::write(
sk.join("project.toml"),
"[[agent]]\nname = \"mergemaster\"\nstage = \"mergemaster\"\n",
)
.unwrap();
crate::db::ensure_content_store();
crate::db::write_item_with_content(
"9860_story_conflict",
"4_merge",
"---\nname: Conflict\nmerge_failure: \"CONFLICT (content): server/src/lib.rs\"\n---\n",
);
let pool = AgentPool::new_test(3001);
pool.auto_assign_available_work(tmp.path()).await;
let agents = pool.agents.lock().unwrap();
let mergemaster_spawned = agents.iter().any(|(key, a)| {
key.contains("9860_story_conflict")
&& a.agent_name == "mergemaster"
&& matches!(a.status, AgentStatus::Pending | AgentStatus::Running)
});
assert!(
mergemaster_spawned,
"mergemaster should be spawned for a content-conflict story"
);
}
/// A story with merge_failure containing only "nothing to commit" must NOT
/// auto-spawn mergemaster.
#[tokio::test]
async fn auto_assign_does_not_spawn_mergemaster_for_non_conflict_failure() {
let tmp = tempfile::tempdir().unwrap();
let sk = tmp.path().join(".huskies");
std::fs::create_dir_all(&sk).unwrap();
std::fs::write(
sk.join("project.toml"),
"[[agent]]\nname = \"mergemaster\"\nstage = \"mergemaster\"\n",
)
.unwrap();
crate::db::ensure_content_store();
crate::db::write_item_with_content(
"9861_story_nothing",
"4_merge",
"---\nname: Nothing\nmerge_failure: \"nothing to commit, working tree clean\"\n---\n",
);
let pool = AgentPool::new_test(3001);
pool.auto_assign_available_work(tmp.path()).await;
let agents = pool.agents.lock().unwrap();
let mergemaster_spawned = agents.iter().any(|(key, a)| {
key.contains("9861_story_nothing")
&& a.agent_name == "mergemaster"
&& matches!(a.status, AgentStatus::Pending | AgentStatus::Running)
});
assert!(
!mergemaster_spawned,
"mergemaster must not be spawned for non-conflict failures"
);
}
/// A story with mergemaster_attempted: true must NOT auto-spawn again, even
/// if the merge_failure still contains a content conflict.
#[tokio::test]
async fn auto_assign_does_not_respawn_mergemaster_when_already_attempted() {
let tmp = tempfile::tempdir().unwrap();
let sk = tmp.path().join(".huskies");
std::fs::create_dir_all(&sk).unwrap();
std::fs::write(
sk.join("project.toml"),
"[[agent]]\nname = \"mergemaster\"\nstage = \"mergemaster\"\n",
)
.unwrap();
crate::db::ensure_content_store();
crate::db::write_item_with_content(
"9862_story_attempted",
"4_merge",
"---\nname: Already tried\nmerge_failure: \"CONFLICT (content): foo.rs\"\nmergemaster_attempted: true\n---\n",
);
let pool = AgentPool::new_test(3001);
pool.auto_assign_available_work(tmp.path()).await;
let agents = pool.agents.lock().unwrap();
let mergemaster_spawned = agents.iter().any(|(key, a)| {
key.contains("9862_story_attempted")
&& a.agent_name == "mergemaster"
&& matches!(a.status, AgentStatus::Pending | AgentStatus::Running)
});
assert!(
!mergemaster_spawned,
"mergemaster must not re-spawn when mergemaster_attempted is true"
);
}
/// Two concurrent auto_assign_available_work calls must not assign the same
/// agent to two stories simultaneously. After both complete, at most one
/// Pending/Running entry must exist per agent name.