2026-03-28 08:44:27 +00:00
|
|
|
//! Auto-assign: scan pipeline stages and dispatch free agents to unassigned stories.
|
|
|
|
|
|
2026-04-29 09:49:45 +00:00
|
|
|
use std::path::Path;
|
|
|
|
|
|
2026-03-28 08:44:27 +00:00
|
|
|
use crate::config::ProjectConfig;
|
|
|
|
|
use crate::slog_warn;
|
|
|
|
|
|
|
|
|
|
use super::super::AgentPool;
|
|
|
|
|
|
|
|
|
|
impl AgentPool {
|
2026-04-29 09:49:45 +00:00
|
|
|
/// Scan all active pipeline stages and start free agents for any unassigned work.
|
2026-04-08 01:28:53 +00:00
|
|
|
///
|
2026-04-29 09:49:45 +00:00
|
|
|
/// Order of operations:
|
|
|
|
|
/// 1. Promote backlog stories whose `depends_on` are all satisfied.
|
|
|
|
|
/// 2. Assign coder and QA agents to stories in `2_current/` and `3_qa/`.
|
|
|
|
|
/// 3. Trigger server-side merges (or auto-spawn mergemaster) for `4_merge/`.
|
2026-03-28 08:44:27 +00:00
|
|
|
pub async fn auto_assign_available_work(&self, project_root: &Path) {
|
2026-04-08 01:28:53 +00:00
|
|
|
// Promote any backlog stories whose dependencies are all done.
|
|
|
|
|
self.promote_ready_backlog_stories(project_root);
|
|
|
|
|
|
2026-03-28 08:44:27 +00:00
|
|
|
let config = match ProjectConfig::load(project_root) {
|
|
|
|
|
Ok(c) => c,
|
|
|
|
|
Err(e) => {
|
|
|
|
|
slog_warn!("[auto-assign] Failed to load project config: {e}");
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
};
|
|
|
|
|
|
2026-04-29 09:49:45 +00:00
|
|
|
// Process the coder (2_current/) and QA (3_qa/) stages.
|
|
|
|
|
self.assign_pipeline_stages(project_root, &config).await;
|
2026-04-27 23:31:57 +00:00
|
|
|
|
2026-04-29 09:49:45 +00:00
|
|
|
// Process the merge (4_merge/) stage.
|
|
|
|
|
self.assign_merge_stage(project_root, &config).await;
|
2026-03-28 08:44:27 +00:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// ── Tests ──────────────────────────────────────────────────────────────────
|
|
|
|
|
|
|
|
|
|
#[cfg(test)]
|
|
|
|
|
mod tests {
|
|
|
|
|
use super::super::super::AgentPool;
|
|
|
|
|
use crate::agents::AgentStatus;
|
|
|
|
|
use crate::io::watcher::WatcherEvent;
|
|
|
|
|
use tokio::sync::broadcast;
|
|
|
|
|
|
|
|
|
|
/// Story 203: auto_assign_available_work must detect a story in 2_current/
|
|
|
|
|
/// with no active agent and start an agent for it.
|
|
|
|
|
#[tokio::test]
|
|
|
|
|
async fn auto_assign_picks_up_story_queued_in_current() {
|
|
|
|
|
let tmp = tempfile::tempdir().unwrap();
|
2026-04-03 16:12:52 +01:00
|
|
|
let sk = tmp.path().join(".huskies");
|
2026-04-14 09:10:14 +00:00
|
|
|
std::fs::create_dir_all(&sk).unwrap();
|
2026-03-28 08:44:27 +00:00
|
|
|
std::fs::write(
|
|
|
|
|
sk.join("project.toml"),
|
|
|
|
|
"[[agent]]\nname = \"coder-1\"\nstage = \"coder\"\n",
|
|
|
|
|
)
|
|
|
|
|
.unwrap();
|
2026-04-14 09:10:14 +00:00
|
|
|
// Place the story in 2_current/ via CRDT (the only source of truth).
|
|
|
|
|
crate::db::ensure_content_store();
|
2026-04-30 22:23:21 +00:00
|
|
|
crate::db::write_item_with_content(
|
|
|
|
|
"story-3",
|
|
|
|
|
"2_current",
|
|
|
|
|
"---\nname: Story 3\n---\n",
|
|
|
|
|
crate::db::ItemMeta::named("Story 3"),
|
|
|
|
|
);
|
2026-03-28 08:44:27 +00:00
|
|
|
|
|
|
|
|
let pool = AgentPool::new_test(3001);
|
|
|
|
|
// No agents are running — coder-1 is free.
|
|
|
|
|
|
|
|
|
|
// auto_assign will try to call start_agent, which will attempt to create
|
|
|
|
|
// a worktree (will fail without a git repo) — that is fine. We only need
|
|
|
|
|
// to verify the agent is registered as Pending before the background
|
|
|
|
|
// task eventually fails.
|
|
|
|
|
pool.auto_assign_available_work(tmp.path()).await;
|
|
|
|
|
|
|
|
|
|
let agents = pool.agents.lock().unwrap();
|
|
|
|
|
let has_pending = agents.values().any(|a| {
|
|
|
|
|
a.agent_name == "coder-1"
|
|
|
|
|
&& matches!(a.status, AgentStatus::Pending | AgentStatus::Running)
|
|
|
|
|
});
|
|
|
|
|
assert!(
|
|
|
|
|
has_pending,
|
|
|
|
|
"auto_assign should have started coder-1 for story-3, but pool is empty"
|
|
|
|
|
);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Story 265: auto_assign_available_work must skip spikes in 3_qa/ that
|
|
|
|
|
/// have review_hold: true set in their front matter.
|
|
|
|
|
#[tokio::test]
|
|
|
|
|
async fn auto_assign_skips_spikes_with_review_hold() {
|
|
|
|
|
let tmp = tempfile::tempdir().unwrap();
|
|
|
|
|
let root = tmp.path();
|
|
|
|
|
|
|
|
|
|
// Create project.toml with a QA agent.
|
2026-04-03 16:12:52 +01:00
|
|
|
let sk = root.join(".huskies");
|
2026-03-28 08:44:27 +00:00
|
|
|
std::fs::create_dir_all(&sk).unwrap();
|
|
|
|
|
std::fs::write(
|
|
|
|
|
sk.join("project.toml"),
|
|
|
|
|
"[[agents]]\nname = \"qa\"\nrole = \"qa\"\nmodel = \"test\"\nprompt = \"test\"\n",
|
|
|
|
|
)
|
|
|
|
|
.unwrap();
|
|
|
|
|
|
|
|
|
|
// Put a spike in 3_qa/ with review_hold: true.
|
2026-04-03 16:12:52 +01:00
|
|
|
let qa_dir = root.join(".huskies/work/3_qa");
|
2026-03-28 08:44:27 +00:00
|
|
|
std::fs::create_dir_all(&qa_dir).unwrap();
|
|
|
|
|
std::fs::write(
|
|
|
|
|
qa_dir.join("20_spike_test.md"),
|
|
|
|
|
"---\nname: Test Spike\nreview_hold: true\n---\n# Spike\n",
|
|
|
|
|
)
|
|
|
|
|
.unwrap();
|
|
|
|
|
|
|
|
|
|
let (watcher_tx, _) = broadcast::channel::<WatcherEvent>(4);
|
|
|
|
|
let pool = AgentPool::new(3001, watcher_tx);
|
|
|
|
|
|
|
|
|
|
pool.auto_assign_available_work(root).await;
|
|
|
|
|
|
|
|
|
|
// No agent should have been started for the spike.
|
|
|
|
|
let agents = pool.agents.lock().unwrap();
|
|
|
|
|
assert!(
|
|
|
|
|
agents.is_empty(),
|
|
|
|
|
"No agents should be assigned to a spike with review_hold"
|
|
|
|
|
);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// ── Story 279: auto-assign respects agent stage from front matter ──────────
|
|
|
|
|
|
|
|
|
|
/// When a story in 3_qa/ has `agent: coder-1` in its front matter but
|
|
|
|
|
/// coder-1 is a coder-stage agent, auto-assign must NOT assign coder-1.
|
|
|
|
|
/// Instead it should fall back to a free QA-stage agent.
|
|
|
|
|
#[tokio::test]
|
|
|
|
|
async fn auto_assign_ignores_coder_preference_when_story_is_in_qa_stage() {
|
|
|
|
|
let tmp = tempfile::tempdir().unwrap();
|
2026-04-03 16:12:52 +01:00
|
|
|
let sk = tmp.path().join(".huskies");
|
2026-04-10 14:56:13 +00:00
|
|
|
std::fs::create_dir_all(&sk).unwrap();
|
2026-03-28 08:44:27 +00:00
|
|
|
std::fs::write(
|
|
|
|
|
sk.join("project.toml"),
|
|
|
|
|
"[[agent]]\nname = \"coder-1\"\nstage = \"coder\"\n\n\
|
|
|
|
|
[[agent]]\nname = \"qa-1\"\nstage = \"qa\"\n",
|
|
|
|
|
)
|
|
|
|
|
.unwrap();
|
2026-04-10 14:56:13 +00:00
|
|
|
// Story in 3_qa/ with a preferred coder-stage agent — write via CRDT.
|
|
|
|
|
crate::db::ensure_content_store();
|
|
|
|
|
crate::db::write_item_with_content(
|
|
|
|
|
"9930_story_qa1",
|
|
|
|
|
"3_qa",
|
2026-03-28 08:44:27 +00:00
|
|
|
"---\nname: QA Story\nagent: coder-1\n---\n",
|
2026-04-30 22:23:21 +00:00
|
|
|
crate::db::ItemMeta {
|
|
|
|
|
name: Some("QA Story".into()),
|
|
|
|
|
agent: Some("coder-1".into()),
|
|
|
|
|
..Default::default()
|
|
|
|
|
},
|
2026-04-10 14:56:13 +00:00
|
|
|
);
|
2026-03-28 08:44:27 +00:00
|
|
|
|
|
|
|
|
let pool = AgentPool::new_test(3001);
|
|
|
|
|
|
|
|
|
|
pool.auto_assign_available_work(tmp.path()).await;
|
|
|
|
|
|
|
|
|
|
let agents = pool.agents.lock().unwrap();
|
2026-04-10 14:56:13 +00:00
|
|
|
// coder-1 must NOT have been assigned to the QA story (wrong stage).
|
|
|
|
|
let coder_assigned_to_qa = agents.iter().any(|(key, a)| {
|
|
|
|
|
key.contains("9930_story_qa1")
|
|
|
|
|
&& a.agent_name == "coder-1"
|
2026-03-28 08:44:27 +00:00
|
|
|
&& matches!(a.status, AgentStatus::Pending | AgentStatus::Running)
|
|
|
|
|
});
|
|
|
|
|
assert!(
|
2026-04-10 14:56:13 +00:00
|
|
|
!coder_assigned_to_qa,
|
2026-03-28 08:44:27 +00:00
|
|
|
"coder-1 should not be assigned to a QA-stage story"
|
|
|
|
|
);
|
|
|
|
|
// qa-1 should have been assigned instead.
|
2026-04-10 14:56:13 +00:00
|
|
|
let qa_assigned = agents.iter().any(|(key, a)| {
|
|
|
|
|
key.contains("9930_story_qa1")
|
|
|
|
|
&& a.agent_name == "qa-1"
|
2026-03-28 08:44:27 +00:00
|
|
|
&& matches!(a.status, AgentStatus::Pending | AgentStatus::Running)
|
|
|
|
|
});
|
|
|
|
|
assert!(
|
|
|
|
|
qa_assigned,
|
|
|
|
|
"qa-1 should be assigned as fallback for the QA-stage story"
|
|
|
|
|
);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// When a story in 2_current/ has `agent: coder-1` in its front matter and
|
|
|
|
|
/// coder-1 is a coder-stage agent, auto-assign must respect the preference
|
|
|
|
|
/// and assign coder-1 (not fall back to some other coder).
|
|
|
|
|
#[tokio::test]
|
|
|
|
|
async fn auto_assign_respects_coder_preference_when_story_is_in_current_stage() {
|
|
|
|
|
let tmp = tempfile::tempdir().unwrap();
|
2026-04-03 16:12:52 +01:00
|
|
|
let sk = tmp.path().join(".huskies");
|
2026-04-10 14:56:13 +00:00
|
|
|
std::fs::create_dir_all(sk.join("work/2_current")).unwrap();
|
2026-03-28 08:44:27 +00:00
|
|
|
std::fs::write(
|
|
|
|
|
sk.join("project.toml"),
|
|
|
|
|
"[[agent]]\nname = \"coder-1\"\nstage = \"coder\"\n\n\
|
|
|
|
|
[[agent]]\nname = \"coder-2\"\nstage = \"coder\"\n",
|
|
|
|
|
)
|
|
|
|
|
.unwrap();
|
|
|
|
|
// Story in 2_current/ with a preferred coder-1 agent.
|
2026-04-10 14:56:13 +00:00
|
|
|
crate::db::ensure_content_store();
|
|
|
|
|
crate::db::write_item_with_content(
|
|
|
|
|
"story-pref",
|
|
|
|
|
"2_current",
|
2026-03-28 08:44:27 +00:00
|
|
|
"---\nname: Coder Story\nagent: coder-1\n---\n",
|
2026-04-30 22:23:21 +00:00
|
|
|
crate::db::ItemMeta {
|
|
|
|
|
name: Some("Coder Story".into()),
|
|
|
|
|
agent: Some("coder-1".into()),
|
|
|
|
|
..Default::default()
|
|
|
|
|
},
|
2026-04-10 14:56:13 +00:00
|
|
|
);
|
2026-03-28 08:44:27 +00:00
|
|
|
|
|
|
|
|
let pool = AgentPool::new_test(3001);
|
|
|
|
|
|
|
|
|
|
pool.auto_assign_available_work(tmp.path()).await;
|
|
|
|
|
|
|
|
|
|
let agents = pool.agents.lock().unwrap();
|
|
|
|
|
// coder-1 should have been picked (it matches the stage and is preferred).
|
|
|
|
|
let coder1_assigned = agents.values().any(|a| {
|
|
|
|
|
a.agent_name == "coder-1"
|
|
|
|
|
&& matches!(a.status, AgentStatus::Pending | AgentStatus::Running)
|
|
|
|
|
});
|
|
|
|
|
assert!(
|
|
|
|
|
coder1_assigned,
|
|
|
|
|
"coder-1 should be assigned when it matches the stage and is preferred"
|
|
|
|
|
);
|
|
|
|
|
// coder-2 must NOT be assigned (not preferred).
|
|
|
|
|
let coder2_assigned = agents.values().any(|a| {
|
|
|
|
|
a.agent_name == "coder-2"
|
|
|
|
|
&& matches!(a.status, AgentStatus::Pending | AgentStatus::Running)
|
|
|
|
|
});
|
|
|
|
|
assert!(
|
|
|
|
|
!coder2_assigned,
|
|
|
|
|
"coder-2 should not be assigned when coder-1 is explicitly preferred"
|
|
|
|
|
);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// When the preferred agent's stage mismatches and no other agent of the
|
|
|
|
|
/// correct stage is available, auto-assign must not start any agent for that
|
|
|
|
|
/// story (no panic, no error).
|
|
|
|
|
#[tokio::test]
|
|
|
|
|
async fn auto_assign_stage_mismatch_with_no_fallback_starts_no_agent() {
|
|
|
|
|
let tmp = tempfile::tempdir().unwrap();
|
2026-04-03 16:12:52 +01:00
|
|
|
let sk = tmp.path().join(".huskies");
|
2026-04-10 14:56:13 +00:00
|
|
|
std::fs::create_dir_all(&sk).unwrap();
|
2026-03-28 08:44:27 +00:00
|
|
|
// Only a coder agent is configured — no QA agent exists.
|
|
|
|
|
std::fs::write(
|
|
|
|
|
sk.join("project.toml"),
|
|
|
|
|
"[[agent]]\nname = \"coder-1\"\nstage = \"coder\"\n",
|
|
|
|
|
)
|
|
|
|
|
.unwrap();
|
2026-04-10 14:56:13 +00:00
|
|
|
// Story in 3_qa/ requests coder-1 (wrong stage) and no QA agent exists — write via CRDT.
|
|
|
|
|
crate::db::ensure_content_store();
|
|
|
|
|
crate::db::write_item_with_content(
|
|
|
|
|
"9931_story_noqa",
|
|
|
|
|
"3_qa",
|
2026-03-28 08:44:27 +00:00
|
|
|
"---\nname: QA Story No Agent\nagent: coder-1\n---\n",
|
2026-04-30 22:23:21 +00:00
|
|
|
crate::db::ItemMeta {
|
|
|
|
|
name: Some("QA Story No Agent".into()),
|
|
|
|
|
agent: Some("coder-1".into()),
|
|
|
|
|
..Default::default()
|
|
|
|
|
},
|
2026-04-10 14:56:13 +00:00
|
|
|
);
|
2026-03-28 08:44:27 +00:00
|
|
|
|
|
|
|
|
let pool = AgentPool::new_test(3001);
|
|
|
|
|
|
|
|
|
|
// Must not panic.
|
|
|
|
|
pool.auto_assign_available_work(tmp.path()).await;
|
|
|
|
|
|
|
|
|
|
let agents = pool.agents.lock().unwrap();
|
2026-04-10 14:56:13 +00:00
|
|
|
// No agent should be assigned to the specific QA story (coder-1 may
|
|
|
|
|
// be assigned to leaked 2_current items from the global CRDT store).
|
|
|
|
|
let assigned_to_qa_story = agents.iter().any(|(key, a)| {
|
|
|
|
|
key.contains("9931_story_noqa")
|
|
|
|
|
&& matches!(a.status, AgentStatus::Pending | AgentStatus::Running)
|
|
|
|
|
});
|
2026-03-28 08:44:27 +00:00
|
|
|
assert!(
|
2026-04-10 14:56:13 +00:00
|
|
|
!assigned_to_qa_story,
|
2026-03-28 08:44:27 +00:00
|
|
|
"No agent should be started when no stage-appropriate agent is available"
|
|
|
|
|
);
|
|
|
|
|
}
|
|
|
|
|
|
2026-04-04 21:43:29 +00:00
|
|
|
/// Story 484: auto_assign must skip stories whose depends_on entries are not
|
|
|
|
|
/// yet in 5_done or 6_archived.
|
|
|
|
|
#[tokio::test]
|
|
|
|
|
async fn auto_assign_skips_stories_with_unmet_dependencies() {
|
|
|
|
|
let tmp = tempfile::tempdir().unwrap();
|
|
|
|
|
let root = tmp.path();
|
|
|
|
|
let sk = root.join(".huskies");
|
2026-04-10 14:56:13 +00:00
|
|
|
std::fs::create_dir_all(&sk).unwrap();
|
2026-04-04 21:43:29 +00:00
|
|
|
std::fs::write(
|
|
|
|
|
sk.join("project.toml"),
|
|
|
|
|
"[[agent]]\nname = \"coder-1\"\nstage = \"coder\"\n",
|
|
|
|
|
)
|
|
|
|
|
.unwrap();
|
2026-04-10 14:56:13 +00:00
|
|
|
// Story 9932 depends on 9999 which is not done — write via CRDT.
|
|
|
|
|
crate::db::ensure_content_store();
|
|
|
|
|
crate::db::write_item_with_content(
|
|
|
|
|
"9932_story_waiting",
|
|
|
|
|
"2_current",
|
|
|
|
|
"---\nname: Waiting\ndepends_on: [9999]\n---\n",
|
2026-04-30 22:23:21 +00:00
|
|
|
crate::db::ItemMeta::from_yaml("---\nname: Waiting\ndepends_on: [9999]\n---\n"),
|
2026-04-10 14:56:13 +00:00
|
|
|
);
|
2026-04-04 21:43:29 +00:00
|
|
|
|
|
|
|
|
let pool = AgentPool::new_test(3001);
|
|
|
|
|
pool.auto_assign_available_work(root).await;
|
|
|
|
|
|
|
|
|
|
let agents = pool.agents.lock().unwrap();
|
2026-04-10 14:56:13 +00:00
|
|
|
// Filter to only agents assigned to our specific story to avoid
|
|
|
|
|
// interference from other tests sharing the global CRDT store.
|
|
|
|
|
let assigned_to_our_story = agents.iter().any(|(key, a)| {
|
|
|
|
|
key.contains("9932_story_waiting")
|
|
|
|
|
&& matches!(a.status, AgentStatus::Pending | AgentStatus::Running)
|
|
|
|
|
});
|
2026-04-04 21:43:29 +00:00
|
|
|
assert!(
|
2026-04-10 14:56:13 +00:00
|
|
|
!assigned_to_our_story,
|
2026-04-04 21:43:29 +00:00
|
|
|
"story with unmet deps should not be auto-assigned"
|
|
|
|
|
);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Story 484: auto_assign must pick up a story once its dependency lands in 5_done.
|
|
|
|
|
#[tokio::test]
|
|
|
|
|
async fn auto_assign_picks_up_story_after_dep_completes() {
|
|
|
|
|
let tmp = tempfile::tempdir().unwrap();
|
|
|
|
|
let root = tmp.path();
|
|
|
|
|
let sk = root.join(".huskies");
|
2026-04-14 09:10:14 +00:00
|
|
|
std::fs::create_dir_all(&sk).unwrap();
|
2026-04-04 21:43:29 +00:00
|
|
|
std::fs::write(
|
|
|
|
|
sk.join("project.toml"),
|
|
|
|
|
"[[agent]]\nname = \"coder-1\"\nstage = \"coder\"\n",
|
|
|
|
|
)
|
|
|
|
|
.unwrap();
|
2026-04-14 09:10:14 +00:00
|
|
|
// Seed stories via CRDT (the only source of truth).
|
|
|
|
|
crate::db::ensure_content_store();
|
2026-04-04 21:43:29 +00:00
|
|
|
// Dep 999 is now done.
|
2026-04-30 22:23:21 +00:00
|
|
|
crate::db::write_item_with_content(
|
|
|
|
|
"999_story_dep",
|
|
|
|
|
"5_done",
|
|
|
|
|
"---\nname: Dep\n---\n",
|
|
|
|
|
crate::db::ItemMeta::from_yaml("---\nname: Dep\n---\n"),
|
|
|
|
|
);
|
2026-04-04 21:43:29 +00:00
|
|
|
// Story 10 depends on 999 which is done.
|
2026-04-14 09:10:14 +00:00
|
|
|
crate::db::write_item_with_content(
|
|
|
|
|
"10_story_unblocked",
|
|
|
|
|
"2_current",
|
2026-04-04 21:43:29 +00:00
|
|
|
"---\nname: Unblocked\ndepends_on: [999]\n---\n",
|
2026-04-30 22:23:21 +00:00
|
|
|
crate::db::ItemMeta::from_yaml("---\nname: Unblocked\ndepends_on: [999]\n---\n"),
|
2026-04-14 09:10:14 +00:00
|
|
|
);
|
2026-04-04 21:43:29 +00:00
|
|
|
|
|
|
|
|
let pool = AgentPool::new_test(3001);
|
|
|
|
|
pool.auto_assign_available_work(root).await;
|
|
|
|
|
|
|
|
|
|
let agents = pool.agents.lock().unwrap();
|
2026-04-13 14:07:08 +00:00
|
|
|
let has_pending = agents.values().any(|a| {
|
|
|
|
|
matches!(
|
|
|
|
|
a.status,
|
|
|
|
|
crate::agents::AgentStatus::Pending | crate::agents::AgentStatus::Running
|
|
|
|
|
)
|
|
|
|
|
});
|
2026-04-04 21:43:29 +00:00
|
|
|
assert!(
|
|
|
|
|
has_pending,
|
|
|
|
|
"story with all deps done should be auto-assigned"
|
|
|
|
|
);
|
|
|
|
|
}
|
|
|
|
|
|
2026-04-08 01:28:53 +00:00
|
|
|
// ── Bug 497: backlog dependency promotion ───────────────────────────────
|
|
|
|
|
|
|
|
|
|
/// Stories in backlog with `depends_on` that are all in 5_done must be
|
|
|
|
|
/// promoted to 2_current when auto_assign_available_work runs.
|
|
|
|
|
#[tokio::test]
|
|
|
|
|
async fn auto_assign_promotes_backlog_story_when_all_deps_done() {
|
|
|
|
|
let tmp = tempfile::tempdir().unwrap();
|
|
|
|
|
let root = tmp.path();
|
|
|
|
|
let sk = root.join(".huskies");
|
|
|
|
|
let backlog = sk.join("work/1_backlog");
|
|
|
|
|
let current = sk.join("work/2_current");
|
|
|
|
|
let done = sk.join("work/5_done");
|
|
|
|
|
std::fs::create_dir_all(&backlog).unwrap();
|
|
|
|
|
std::fs::create_dir_all(¤t).unwrap();
|
|
|
|
|
std::fs::create_dir_all(&done).unwrap();
|
|
|
|
|
std::fs::write(
|
|
|
|
|
sk.join("project.toml"),
|
|
|
|
|
"[[agent]]\nname = \"coder-1\"\nstage = \"coder\"\n",
|
|
|
|
|
)
|
|
|
|
|
.unwrap();
|
|
|
|
|
// Dep 1 is done.
|
2026-04-10 12:56:16 +00:00
|
|
|
crate::db::ensure_content_store();
|
|
|
|
|
let dep_content = "---\nname: Dep\n---\n";
|
|
|
|
|
std::fs::write(done.join("1_story_dep.md"), dep_content).unwrap();
|
|
|
|
|
crate::db::write_content("1_story_dep", dep_content);
|
2026-04-08 01:28:53 +00:00
|
|
|
// Story B depends on story 1.
|
2026-04-10 12:56:16 +00:00
|
|
|
let story_b_content = "---\nname: B\ndepends_on: [1]\n---\n";
|
|
|
|
|
std::fs::write(backlog.join("2_story_b.md"), story_b_content).unwrap();
|
|
|
|
|
crate::db::write_content("2_story_b", story_b_content);
|
2026-04-08 01:28:53 +00:00
|
|
|
|
|
|
|
|
let pool = AgentPool::new_test(3001);
|
|
|
|
|
pool.auto_assign_available_work(root).await;
|
|
|
|
|
|
2026-04-10 12:56:16 +00:00
|
|
|
// The lifecycle function updates the content store (not the filesystem),
|
|
|
|
|
// so verify the move via the DB.
|
|
|
|
|
let content = crate::db::read_content("2_story_b")
|
|
|
|
|
.expect("story B should be in content store after promotion");
|
2026-04-08 01:28:53 +00:00
|
|
|
assert!(
|
2026-04-10 12:56:16 +00:00
|
|
|
content.contains("name: B"),
|
|
|
|
|
"story B content should be preserved after promotion"
|
2026-04-08 01:28:53 +00:00
|
|
|
);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Stories in backlog with unmet dependencies must NOT be promoted.
|
|
|
|
|
#[tokio::test]
|
|
|
|
|
async fn auto_assign_does_not_promote_backlog_story_with_unmet_deps() {
|
|
|
|
|
let tmp = tempfile::tempdir().unwrap();
|
|
|
|
|
let root = tmp.path();
|
|
|
|
|
let sk = root.join(".huskies");
|
|
|
|
|
let backlog = sk.join("work/1_backlog");
|
|
|
|
|
std::fs::create_dir_all(&backlog).unwrap();
|
|
|
|
|
std::fs::write(
|
|
|
|
|
sk.join("project.toml"),
|
|
|
|
|
"[[agent]]\nname = \"coder-1\"\nstage = \"coder\"\n",
|
|
|
|
|
)
|
|
|
|
|
.unwrap();
|
|
|
|
|
// Dep 99 is NOT done.
|
|
|
|
|
std::fs::write(
|
|
|
|
|
backlog.join("5_story_c.md"),
|
|
|
|
|
"---\nname: C\ndepends_on: [99]\n---\n",
|
|
|
|
|
)
|
|
|
|
|
.unwrap();
|
|
|
|
|
|
|
|
|
|
let pool = AgentPool::new_test(3001);
|
|
|
|
|
pool.auto_assign_available_work(root).await;
|
|
|
|
|
|
|
|
|
|
assert!(
|
|
|
|
|
backlog.join("5_story_c.md").exists(),
|
|
|
|
|
"story C should stay in 1_backlog/ when dep 99 is not done"
|
|
|
|
|
);
|
|
|
|
|
}
|
|
|
|
|
|
2026-04-09 18:27:25 +00:00
|
|
|
// ── Bug 503: archived-dep promotion visibility ─────────────────────────────
|
|
|
|
|
|
|
|
|
|
/// A backlog story whose dep is in 6_archived must still be promoted
|
|
|
|
|
/// (archived = satisfied), but the promotion must not silently skip the warning
|
|
|
|
|
/// path. This test verifies the promotion itself fires; the warning is a
|
|
|
|
|
/// slog_warn! side-effect that we can't easily assert on in unit tests.
|
|
|
|
|
#[tokio::test]
|
|
|
|
|
async fn auto_assign_promotes_backlog_story_when_dep_is_archived() {
|
|
|
|
|
let tmp = tempfile::tempdir().unwrap();
|
|
|
|
|
let root = tmp.path();
|
|
|
|
|
let sk = root.join(".huskies");
|
|
|
|
|
let backlog = sk.join("work/1_backlog");
|
|
|
|
|
let current = sk.join("work/2_current");
|
|
|
|
|
let archived = sk.join("work/6_archived");
|
|
|
|
|
std::fs::create_dir_all(&backlog).unwrap();
|
|
|
|
|
std::fs::create_dir_all(¤t).unwrap();
|
|
|
|
|
std::fs::create_dir_all(&archived).unwrap();
|
|
|
|
|
std::fs::write(
|
|
|
|
|
sk.join("project.toml"),
|
|
|
|
|
"[[agent]]\nname = \"coder-1\"\nstage = \"coder\"\n",
|
|
|
|
|
)
|
|
|
|
|
.unwrap();
|
|
|
|
|
// Dep 490 is in 6_archived (e.g. a CRDT spike that was archived/superseded).
|
2026-04-10 12:56:16 +00:00
|
|
|
crate::db::ensure_content_store();
|
|
|
|
|
let dep_content = "---\nname: CRDT Spike\n---\n";
|
|
|
|
|
std::fs::write(archived.join("490_spike_crdt.md"), dep_content).unwrap();
|
|
|
|
|
crate::db::write_content("490_spike_crdt", dep_content);
|
2026-04-09 18:27:25 +00:00
|
|
|
// Story 478 depends on 490 (the archived spike).
|
2026-04-10 12:56:16 +00:00
|
|
|
let story_content = "---\nname: Dependent\ndepends_on: [490]\n---\n";
|
|
|
|
|
std::fs::write(backlog.join("478_story_dependent.md"), story_content).unwrap();
|
|
|
|
|
crate::db::write_content("478_story_dependent", story_content);
|
2026-04-09 18:27:25 +00:00
|
|
|
|
|
|
|
|
let pool = AgentPool::new_test(3001);
|
|
|
|
|
pool.auto_assign_available_work(root).await;
|
|
|
|
|
|
2026-04-10 12:56:16 +00:00
|
|
|
// Story 478 must be promoted even though dep 490 is only in 6_archived
|
|
|
|
|
// (not in 5_done), because archived = satisfied. The lifecycle function
|
|
|
|
|
// updates the content store, so verify via the DB.
|
|
|
|
|
let content = crate::db::read_content("478_story_dependent")
|
|
|
|
|
.expect("story 478 should be in content store after promotion");
|
2026-04-09 18:27:25 +00:00
|
|
|
assert!(
|
2026-04-10 12:56:16 +00:00
|
|
|
content.contains("name: Dependent"),
|
|
|
|
|
"story 478 content should be preserved after promotion"
|
2026-04-09 18:27:25 +00:00
|
|
|
);
|
|
|
|
|
}
|
|
|
|
|
|
2026-04-08 01:28:53 +00:00
|
|
|
/// Stories in backlog with NO depends_on must NOT be auto-promoted.
|
|
|
|
|
#[tokio::test]
|
|
|
|
|
async fn auto_assign_does_not_promote_backlog_story_without_deps() {
|
|
|
|
|
let tmp = tempfile::tempdir().unwrap();
|
|
|
|
|
let root = tmp.path();
|
|
|
|
|
let sk = root.join(".huskies");
|
|
|
|
|
let backlog = sk.join("work/1_backlog");
|
|
|
|
|
std::fs::create_dir_all(&backlog).unwrap();
|
|
|
|
|
std::fs::write(
|
|
|
|
|
sk.join("project.toml"),
|
|
|
|
|
"[[agent]]\nname = \"coder-1\"\nstage = \"coder\"\n",
|
|
|
|
|
)
|
|
|
|
|
.unwrap();
|
|
|
|
|
std::fs::write(
|
|
|
|
|
backlog.join("7_story_nodeps.md"),
|
|
|
|
|
"---\nname: No deps\n---\n",
|
|
|
|
|
)
|
|
|
|
|
.unwrap();
|
|
|
|
|
|
|
|
|
|
let pool = AgentPool::new_test(3001);
|
|
|
|
|
pool.auto_assign_available_work(root).await;
|
|
|
|
|
|
|
|
|
|
assert!(
|
|
|
|
|
backlog.join("7_story_nodeps.md").exists(),
|
|
|
|
|
"story with no depends_on should stay in 1_backlog/ — human schedules it"
|
|
|
|
|
);
|
|
|
|
|
}
|
|
|
|
|
|
2026-04-28 12:57:28 +00:00
|
|
|
// ── Story 827: auto-spawn mergemaster on content conflict ─────────────────
|
|
|
|
|
|
|
|
|
|
/// A story in 4_merge with a content-conflict merge_failure and no
|
|
|
|
|
/// mergemaster_attempted flag must trigger an auto-spawn of mergemaster.
|
|
|
|
|
#[tokio::test]
|
|
|
|
|
async fn auto_assign_spawns_mergemaster_for_content_conflict() {
|
|
|
|
|
let tmp = tempfile::tempdir().unwrap();
|
|
|
|
|
let sk = tmp.path().join(".huskies");
|
|
|
|
|
std::fs::create_dir_all(&sk).unwrap();
|
|
|
|
|
std::fs::write(
|
|
|
|
|
sk.join("project.toml"),
|
|
|
|
|
"[[agent]]\nname = \"mergemaster\"\nstage = \"mergemaster\"\n",
|
|
|
|
|
)
|
|
|
|
|
.unwrap();
|
2026-05-12 14:43:27 +00:00
|
|
|
crate::crdt_state::init_for_test();
|
2026-04-28 12:57:28 +00:00
|
|
|
crate::db::ensure_content_store();
|
|
|
|
|
crate::db::write_item_with_content(
|
|
|
|
|
"9860_story_conflict",
|
2026-05-12 14:43:27 +00:00
|
|
|
"4_merge_failure",
|
2026-04-28 12:57:28 +00:00
|
|
|
"---\nname: Conflict\nmerge_failure: \"CONFLICT (content): server/src/lib.rs\"\n---\n",
|
2026-04-30 22:23:21 +00:00
|
|
|
crate::db::ItemMeta::from_yaml(
|
|
|
|
|
"---\nname: Conflict\nmerge_failure: \"CONFLICT (content): server/src/lib.rs\"\n---\n",
|
|
|
|
|
),
|
2026-04-28 12:57:28 +00:00
|
|
|
);
|
|
|
|
|
|
|
|
|
|
let pool = AgentPool::new_test(3001);
|
|
|
|
|
pool.auto_assign_available_work(tmp.path()).await;
|
|
|
|
|
|
|
|
|
|
let agents = pool.agents.lock().unwrap();
|
|
|
|
|
let mergemaster_spawned = agents.iter().any(|(key, a)| {
|
|
|
|
|
key.contains("9860_story_conflict")
|
|
|
|
|
&& a.agent_name == "mergemaster"
|
|
|
|
|
&& matches!(a.status, AgentStatus::Pending | AgentStatus::Running)
|
|
|
|
|
});
|
|
|
|
|
assert!(
|
|
|
|
|
mergemaster_spawned,
|
|
|
|
|
"mergemaster should be spawned for a content-conflict story"
|
|
|
|
|
);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// A story with merge_failure containing only "nothing to commit" must NOT
|
|
|
|
|
/// auto-spawn mergemaster.
|
|
|
|
|
#[tokio::test]
|
|
|
|
|
async fn auto_assign_does_not_spawn_mergemaster_for_non_conflict_failure() {
|
|
|
|
|
let tmp = tempfile::tempdir().unwrap();
|
|
|
|
|
let sk = tmp.path().join(".huskies");
|
|
|
|
|
std::fs::create_dir_all(&sk).unwrap();
|
|
|
|
|
std::fs::write(
|
|
|
|
|
sk.join("project.toml"),
|
|
|
|
|
"[[agent]]\nname = \"mergemaster\"\nstage = \"mergemaster\"\n",
|
|
|
|
|
)
|
|
|
|
|
.unwrap();
|
2026-05-12 14:43:27 +00:00
|
|
|
crate::crdt_state::init_for_test();
|
2026-04-28 12:57:28 +00:00
|
|
|
crate::db::ensure_content_store();
|
|
|
|
|
crate::db::write_item_with_content(
|
|
|
|
|
"9861_story_nothing",
|
2026-05-12 14:43:27 +00:00
|
|
|
"4_merge_failure",
|
2026-04-28 12:57:28 +00:00
|
|
|
"---\nname: Nothing\nmerge_failure: \"nothing to commit, working tree clean\"\n---\n",
|
2026-04-30 22:23:21 +00:00
|
|
|
crate::db::ItemMeta::from_yaml(
|
|
|
|
|
"---\nname: Nothing\nmerge_failure: \"nothing to commit, working tree clean\"\n---\n",
|
|
|
|
|
),
|
2026-04-28 12:57:28 +00:00
|
|
|
);
|
|
|
|
|
|
|
|
|
|
let pool = AgentPool::new_test(3001);
|
|
|
|
|
pool.auto_assign_available_work(tmp.path()).await;
|
|
|
|
|
|
|
|
|
|
let agents = pool.agents.lock().unwrap();
|
|
|
|
|
let mergemaster_spawned = agents.iter().any(|(key, a)| {
|
|
|
|
|
key.contains("9861_story_nothing")
|
|
|
|
|
&& a.agent_name == "mergemaster"
|
|
|
|
|
&& matches!(a.status, AgentStatus::Pending | AgentStatus::Running)
|
|
|
|
|
});
|
|
|
|
|
assert!(
|
|
|
|
|
!mergemaster_spawned,
|
|
|
|
|
"mergemaster must not be spawned for non-conflict failures"
|
|
|
|
|
);
|
|
|
|
|
}
|
|
|
|
|
|
2026-04-28 14:17:58 +00:00
|
|
|
/// A story in 4_merge with blocked: true must NOT auto-spawn mergemaster
|
|
|
|
|
/// even when it has an unresolved content-conflict merge_failure and
|
|
|
|
|
/// mergemaster_attempted is still false.
|
|
|
|
|
#[tokio::test]
|
|
|
|
|
async fn auto_assign_does_not_spawn_mergemaster_for_blocked_story() {
|
|
|
|
|
let tmp = tempfile::tempdir().unwrap();
|
|
|
|
|
let sk = tmp.path().join(".huskies");
|
|
|
|
|
std::fs::create_dir_all(&sk).unwrap();
|
|
|
|
|
std::fs::write(
|
|
|
|
|
sk.join("project.toml"),
|
|
|
|
|
"[[agent]]\nname = \"mergemaster\"\nstage = \"mergemaster\"\n",
|
|
|
|
|
)
|
|
|
|
|
.unwrap();
|
|
|
|
|
crate::db::ensure_content_store();
|
|
|
|
|
crate::db::write_item_with_content(
|
|
|
|
|
"9863_story_blocked_conflict",
|
|
|
|
|
"4_merge",
|
|
|
|
|
"---\nname: Blocked conflict\nmerge_failure: \"CONFLICT (content): foo.rs\"\nblocked: true\n---\n",
|
2026-04-30 22:23:21 +00:00
|
|
|
crate::db::ItemMeta::from_yaml(
|
|
|
|
|
"---\nname: Blocked conflict\nmerge_failure: \"CONFLICT (content): foo.rs\"\nblocked: true\n---\n",
|
|
|
|
|
),
|
2026-04-28 14:17:58 +00:00
|
|
|
);
|
|
|
|
|
|
|
|
|
|
let pool = AgentPool::new_test(3001);
|
|
|
|
|
pool.auto_assign_available_work(tmp.path()).await;
|
|
|
|
|
|
|
|
|
|
let agents = pool.agents.lock().unwrap();
|
|
|
|
|
let mergemaster_spawned = agents.iter().any(|(key, a)| {
|
|
|
|
|
key.contains("9863_story_blocked_conflict")
|
|
|
|
|
&& a.agent_name == "mergemaster"
|
|
|
|
|
&& matches!(a.status, AgentStatus::Pending | AgentStatus::Running)
|
|
|
|
|
});
|
|
|
|
|
assert!(
|
|
|
|
|
!mergemaster_spawned,
|
|
|
|
|
"mergemaster must not be spawned for a blocked story"
|
|
|
|
|
);
|
|
|
|
|
}
|
|
|
|
|
|
2026-04-28 12:57:28 +00:00
|
|
|
/// A story with mergemaster_attempted: true must NOT auto-spawn again, even
|
|
|
|
|
/// if the merge_failure still contains a content conflict.
|
|
|
|
|
#[tokio::test]
|
|
|
|
|
async fn auto_assign_does_not_respawn_mergemaster_when_already_attempted() {
|
|
|
|
|
let tmp = tempfile::tempdir().unwrap();
|
|
|
|
|
let sk = tmp.path().join(".huskies");
|
|
|
|
|
std::fs::create_dir_all(&sk).unwrap();
|
|
|
|
|
std::fs::write(
|
|
|
|
|
sk.join("project.toml"),
|
|
|
|
|
"[[agent]]\nname = \"mergemaster\"\nstage = \"mergemaster\"\n",
|
|
|
|
|
)
|
|
|
|
|
.unwrap();
|
|
|
|
|
crate::db::ensure_content_store();
|
|
|
|
|
crate::db::write_item_with_content(
|
|
|
|
|
"9862_story_attempted",
|
|
|
|
|
"4_merge",
|
|
|
|
|
"---\nname: Already tried\nmerge_failure: \"CONFLICT (content): foo.rs\"\nmergemaster_attempted: true\n---\n",
|
2026-04-30 22:23:21 +00:00
|
|
|
crate::db::ItemMeta::from_yaml(
|
|
|
|
|
"---\nname: Already tried\nmerge_failure: \"CONFLICT (content): foo.rs\"\nmergemaster_attempted: true\n---\n",
|
|
|
|
|
),
|
2026-04-28 12:57:28 +00:00
|
|
|
);
|
|
|
|
|
|
|
|
|
|
let pool = AgentPool::new_test(3001);
|
|
|
|
|
pool.auto_assign_available_work(tmp.path()).await;
|
|
|
|
|
|
|
|
|
|
let agents = pool.agents.lock().unwrap();
|
|
|
|
|
let mergemaster_spawned = agents.iter().any(|(key, a)| {
|
|
|
|
|
key.contains("9862_story_attempted")
|
|
|
|
|
&& a.agent_name == "mergemaster"
|
|
|
|
|
&& matches!(a.status, AgentStatus::Pending | AgentStatus::Running)
|
|
|
|
|
});
|
|
|
|
|
assert!(
|
|
|
|
|
!mergemaster_spawned,
|
|
|
|
|
"mergemaster must not re-spawn when mergemaster_attempted is true"
|
|
|
|
|
);
|
|
|
|
|
}
|
|
|
|
|
|
2026-03-28 08:44:27 +00:00
|
|
|
/// Two concurrent auto_assign_available_work calls must not assign the same
|
|
|
|
|
/// agent to two stories simultaneously. After both complete, at most one
|
|
|
|
|
/// Pending/Running entry must exist per agent name.
|
|
|
|
|
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
|
|
|
|
|
async fn toctou_concurrent_auto_assign_no_duplicate_agent_assignments() {
|
|
|
|
|
use std::fs;
|
|
|
|
|
use std::sync::Arc;
|
|
|
|
|
|
|
|
|
|
let tmp = tempfile::tempdir().unwrap();
|
|
|
|
|
let root = tmp.path().to_path_buf();
|
|
|
|
|
|
2026-04-03 16:12:52 +01:00
|
|
|
let sk_dir = root.join(".huskies");
|
2026-03-28 08:44:27 +00:00
|
|
|
// Two stories waiting in 2_current, one coder agent.
|
|
|
|
|
fs::create_dir_all(sk_dir.join("work/2_current")).unwrap();
|
|
|
|
|
fs::write(
|
|
|
|
|
sk_dir.join("project.toml"),
|
|
|
|
|
"[[agent]]\nname = \"coder-1\"\n",
|
|
|
|
|
)
|
|
|
|
|
.unwrap();
|
|
|
|
|
fs::write(
|
|
|
|
|
sk_dir.join("work/2_current/86_story_foo.md"),
|
|
|
|
|
"---\nname: Foo\n---\n",
|
|
|
|
|
)
|
|
|
|
|
.unwrap();
|
|
|
|
|
fs::write(
|
|
|
|
|
sk_dir.join("work/2_current/130_story_bar.md"),
|
|
|
|
|
"---\nname: Bar\n---\n",
|
|
|
|
|
)
|
|
|
|
|
.unwrap();
|
|
|
|
|
|
|
|
|
|
let pool = Arc::new(AgentPool::new_test(3099));
|
|
|
|
|
|
|
|
|
|
// Run two concurrent auto_assign calls.
|
|
|
|
|
let pool1 = pool.clone();
|
|
|
|
|
let root1 = root.clone();
|
|
|
|
|
let t1 = tokio::spawn(async move { pool1.auto_assign_available_work(&root1).await });
|
|
|
|
|
|
|
|
|
|
let pool2 = pool.clone();
|
|
|
|
|
let root2 = root.clone();
|
|
|
|
|
let t2 = tokio::spawn(async move { pool2.auto_assign_available_work(&root2).await });
|
|
|
|
|
|
|
|
|
|
let _ = tokio::join!(t1, t2);
|
|
|
|
|
|
|
|
|
|
// At most one Pending/Running entry should exist for coder-1.
|
|
|
|
|
let agents = pool.agents.lock().unwrap();
|
|
|
|
|
let active_coder_count = agents
|
|
|
|
|
.values()
|
|
|
|
|
.filter(|a| {
|
|
|
|
|
a.agent_name == "coder-1"
|
|
|
|
|
&& matches!(a.status, AgentStatus::Pending | AgentStatus::Running)
|
|
|
|
|
})
|
|
|
|
|
.count();
|
|
|
|
|
|
|
|
|
|
assert!(
|
|
|
|
|
active_coder_count <= 1,
|
|
|
|
|
"coder-1 must not be assigned to more than one story simultaneously; \
|
|
|
|
|
found {active_coder_count} active entries"
|
|
|
|
|
);
|
|
|
|
|
}
|
|
|
|
|
}
|