//! Auto-assign: scan pipeline stages and dispatch free agents to unassigned stories. use crate::config::ProjectConfig; use crate::slog; use crate::slog_error; use crate::slog_warn; use crate::worktree; use std::path::Path; use super::super::super::PipelineStage; use super::super::AgentPool; use super::scan::{ count_active_agents_for_stage, find_free_agent_for_stage, is_agent_free, is_story_assigned_for_stage, scan_stage_items, }; use super::story_checks::{ check_archived_dependencies, has_merge_failure, has_review_hold, has_unmet_dependencies, is_story_blocked, is_story_frozen, read_story_front_matter_agent, }; impl AgentPool { /// Scan `1_backlog/` and promote any story whose `depends_on` are all met. /// /// A story is only promoted if it explicitly lists `depends_on` AND every /// listed dependency has reached `5_done` or `6_archived`. Stories with no /// `depends_on` are left in the backlog for human scheduling. /// /// **Archived dep semantics:** a dep in `6_archived` counts as satisfied (since /// stories auto-sweep from `5_done` to `6_archived` after 4 hours, and the /// dependent story would normally already be promoted by then). However, if a /// dep was already in `6_archived` when the dependent story was created (e.g. it /// was abandoned/superseded before the dependent existed), a prominent warning is /// logged so the user can see the promotion was triggered by an archived dep, not /// a clean completion. fn promote_ready_backlog_stories(&self, project_root: &Path) { use crate::io::story_metadata::parse_front_matter; let items = scan_stage_items(project_root, "1_backlog"); for story_id in &items { // Only promote stories that explicitly declare dependencies. let contents = crate::db::read_content(story_id); let has_deps = contents .and_then(|c| parse_front_matter(&c).ok()) .and_then(|m| m.depends_on) .map(|d| !d.is_empty()) .unwrap_or(false); if !has_deps { continue; } // Check whether any dependencies are still unmet. if has_unmet_dependencies(project_root, "1_backlog", story_id) { continue; } // Warn if any deps were satisfied via archive rather than via clean done. let archived_deps = check_archived_dependencies(project_root, "1_backlog", story_id); if !archived_deps.is_empty() { slog_warn!( "[auto-assign] Story '{story_id}' is being promoted because deps \ {archived_deps:?} are in 6_archived (not cleanly completed via 5_done). \ These deps may have been abandoned or superseded. If this promotion is \ unintentional, remove the depends_on or manually move the story back to \ 1_backlog." ); } // All deps met — promote from backlog to current. slog!("[auto-assign] Story '{story_id}' deps met; promoting from backlog to current."); if let Err(e) = crate::agents::lifecycle::move_story_to_current(project_root, story_id) { slog!("[auto-assign] Failed to promote '{story_id}' to current: {e}"); } } } pub async fn auto_assign_available_work(&self, project_root: &Path) { // Promote any backlog stories whose dependencies are all done. self.promote_ready_backlog_stories(project_root); let config = match ProjectConfig::load(project_root) { Ok(c) => c, Err(e) => { slog_warn!("[auto-assign] Failed to load project config: {e}"); return; } }; // Process each active pipeline stage in order. let stages: [(&str, PipelineStage); 3] = [ ("2_current", PipelineStage::Coder), ("3_qa", PipelineStage::Qa), ("4_merge", PipelineStage::Mergemaster), ]; for (stage_dir, stage) in &stages { let items = scan_stage_items(project_root, stage_dir); if items.is_empty() { continue; } for story_id in &items { // Items marked with review_hold (e.g. spikes after QA passes) stay // in their current stage for human review — don't auto-assign agents. if has_review_hold(project_root, stage_dir, story_id) { continue; } // Skip frozen stories — pipeline advancement is suspended. if is_story_frozen(project_root, stage_dir, story_id) { slog!("[auto-assign] Story '{story_id}' is frozen; skipping until unfrozen."); continue; } // Skip blocked stories (retry limit exceeded). if is_story_blocked(project_root, stage_dir, story_id) { continue; } // Skip stories whose dependencies haven't landed yet. if has_unmet_dependencies(project_root, stage_dir, story_id) { slog!( "[auto-assign] Story '{story_id}' has unmet dependencies; skipping until deps are done." ); continue; } // Skip stories in 4_merge/ that already have a reported merge failure. // These need human intervention — auto-assigning a new mergemaster // would just waste tokens on the same broken merge. if *stage == PipelineStage::Mergemaster && has_merge_failure(project_root, stage_dir, story_id) { continue; } // AC6: Detect empty-diff stories in 4_merge/ before starting a // mergemaster. If the worktree has no commits on the feature branch, // write a merge_failure and block the story immediately. if *stage == PipelineStage::Mergemaster && let Some(wt_path) = worktree::find_worktree_path(project_root, story_id) && !crate::agents::gates::worktree_has_committed_work(&wt_path) { slog_warn!( "[auto-assign] Story '{story_id}' in 4_merge/ has no commits \ on feature branch. Writing merge_failure and blocking." ); let empty_diff_reason = "Feature branch has no code changes — the coder agent \ did not produce any commits."; // Write merge_failure and blocked to content store. if let Some(contents) = crate::db::read_content(story_id) { let updated = crate::io::story_metadata::write_merge_failure_in_content( &contents, empty_diff_reason, ); let blocked = crate::io::story_metadata::write_blocked_in_content(&updated); crate::db::write_content(story_id, &blocked); crate::db::write_item_with_content(story_id, stage_dir, &blocked); } else { // Fallback: filesystem. let story_path = project_root .join(".huskies/work") .join(stage_dir) .join(format!("{story_id}.md")); let _ = crate::io::story_metadata::write_merge_failure( &story_path, empty_diff_reason, ); let _ = crate::io::story_metadata::write_blocked(&story_path); } let _ = self .watcher_tx .send(crate::io::watcher::WatcherEvent::StoryBlocked { story_id: story_id.to_string(), reason: empty_diff_reason.to_string(), }); continue; } // Re-acquire the lock on each iteration to see state changes // from previous start_agent calls in the same pass. let preferred_agent = read_story_front_matter_agent(project_root, stage_dir, story_id); // Check max_coders limit for the Coder stage before agent selection. // If the pool is full, all remaining items in this stage wait. if *stage == PipelineStage::Coder && let Some(max) = config.max_coders { let agents_lock = match self.agents.lock() { Ok(a) => a, Err(e) => { slog_error!("[auto-assign] Failed to lock agents: {e}"); break; } }; let active = count_active_agents_for_stage(&config, &agents_lock, stage); if active >= max { slog!( "[auto-assign] Coder pool full ({active}/{max}); remaining items in {stage_dir}/ will wait." ); break; } } // Outcome: (already_assigned, chosen_agent, preferred_busy, stage_mismatch) // preferred_busy=true means the story has a specific agent requested but it is // currently occupied — the story should wait rather than fall back. // stage_mismatch=true means the preferred agent's stage doesn't match the // pipeline stage, so we fell back to a generic stage agent. let (already_assigned, free_agent, preferred_busy, stage_mismatch) = { let agents = match self.agents.lock() { Ok(a) => a, Err(e) => { slog_error!("[auto-assign] Failed to lock agents: {e}"); break; } }; let assigned = is_story_assigned_for_stage(&config, &agents, story_id, stage); if assigned { (true, None, false, false) } else if let Some(ref pref) = preferred_agent { // Story has a front-matter agent preference. // Verify the preferred agent's stage matches the current // pipeline stage — a coder shouldn't be assigned to QA. let pref_stage_matches = config .find_agent(pref) .map(|cfg| super::super::super::agent_config_stage(cfg) == *stage) .unwrap_or(false); if !pref_stage_matches { // Stage mismatch — fall back to any free agent for this stage. let free = find_free_agent_for_stage(&config, &agents, stage) .map(|s| s.to_string()); (false, free, false, true) } else if is_agent_free(&agents, pref) { (false, Some(pref.clone()), false, false) } else { (false, None, true, false) } } else { let free = find_free_agent_for_stage(&config, &agents, stage) .map(|s| s.to_string()); (false, free, false, false) } }; if already_assigned { // Story already has an active agent — skip silently. continue; } if preferred_busy { // The story requests a specific agent that is currently busy. // Do not fall back to a different agent; let this story wait. slog!( "[auto-assign] Preferred agent '{}' busy for '{story_id}'; story will wait.", preferred_agent.as_deref().unwrap_or("?") ); continue; } if stage_mismatch { slog!( "[auto-assign] Preferred agent '{}' stage mismatch for '{story_id}' in {stage_dir}/; falling back to stage-appropriate agent.", preferred_agent.as_deref().unwrap_or("?") ); } match free_agent { Some(agent_name) => { slog!( "[auto-assign] Assigning '{agent_name}' to '{story_id}' in {stage_dir}/" ); if let Err(e) = self .start_agent(project_root, story_id, Some(&agent_name), None, None) .await { slog!( "[auto-assign] Failed to start '{agent_name}' for '{story_id}': {e}" ); } } None => { // No free agents of this type — stop scanning this stage. slog!( "[auto-assign] All {:?} agents busy; remaining items in {stage_dir}/ will wait.", stage ); break; } } } } } } // ── Tests ────────────────────────────────────────────────────────────────── #[cfg(test)] mod tests { use super::super::super::AgentPool; use crate::agents::AgentStatus; use crate::io::watcher::WatcherEvent; use tokio::sync::broadcast; /// Story 203: auto_assign_available_work must detect a story in 2_current/ /// with no active agent and start an agent for it. #[tokio::test] async fn auto_assign_picks_up_story_queued_in_current() { let tmp = tempfile::tempdir().unwrap(); let sk = tmp.path().join(".huskies"); std::fs::create_dir_all(&sk).unwrap(); std::fs::write( sk.join("project.toml"), "[[agent]]\nname = \"coder-1\"\nstage = \"coder\"\n", ) .unwrap(); // Place the story in 2_current/ via CRDT (the only source of truth). crate::db::ensure_content_store(); crate::db::write_item_with_content("story-3", "2_current", "---\nname: Story 3\n---\n"); let pool = AgentPool::new_test(3001); // No agents are running — coder-1 is free. // auto_assign will try to call start_agent, which will attempt to create // a worktree (will fail without a git repo) — that is fine. We only need // to verify the agent is registered as Pending before the background // task eventually fails. pool.auto_assign_available_work(tmp.path()).await; let agents = pool.agents.lock().unwrap(); let has_pending = agents.values().any(|a| { a.agent_name == "coder-1" && matches!(a.status, AgentStatus::Pending | AgentStatus::Running) }); assert!( has_pending, "auto_assign should have started coder-1 for story-3, but pool is empty" ); } /// Story 265: auto_assign_available_work must skip spikes in 3_qa/ that /// have review_hold: true set in their front matter. #[tokio::test] async fn auto_assign_skips_spikes_with_review_hold() { let tmp = tempfile::tempdir().unwrap(); let root = tmp.path(); // Create project.toml with a QA agent. let sk = root.join(".huskies"); std::fs::create_dir_all(&sk).unwrap(); std::fs::write( sk.join("project.toml"), "[[agents]]\nname = \"qa\"\nrole = \"qa\"\nmodel = \"test\"\nprompt = \"test\"\n", ) .unwrap(); // Put a spike in 3_qa/ with review_hold: true. let qa_dir = root.join(".huskies/work/3_qa"); std::fs::create_dir_all(&qa_dir).unwrap(); std::fs::write( qa_dir.join("20_spike_test.md"), "---\nname: Test Spike\nreview_hold: true\n---\n# Spike\n", ) .unwrap(); let (watcher_tx, _) = broadcast::channel::(4); let pool = AgentPool::new(3001, watcher_tx); pool.auto_assign_available_work(root).await; // No agent should have been started for the spike. let agents = pool.agents.lock().unwrap(); assert!( agents.is_empty(), "No agents should be assigned to a spike with review_hold" ); } // ── Story 279: auto-assign respects agent stage from front matter ────────── /// When a story in 3_qa/ has `agent: coder-1` in its front matter but /// coder-1 is a coder-stage agent, auto-assign must NOT assign coder-1. /// Instead it should fall back to a free QA-stage agent. #[tokio::test] async fn auto_assign_ignores_coder_preference_when_story_is_in_qa_stage() { let tmp = tempfile::tempdir().unwrap(); let sk = tmp.path().join(".huskies"); std::fs::create_dir_all(&sk).unwrap(); std::fs::write( sk.join("project.toml"), "[[agent]]\nname = \"coder-1\"\nstage = \"coder\"\n\n\ [[agent]]\nname = \"qa-1\"\nstage = \"qa\"\n", ) .unwrap(); // Story in 3_qa/ with a preferred coder-stage agent — write via CRDT. crate::db::ensure_content_store(); crate::db::write_item_with_content( "9930_story_qa1", "3_qa", "---\nname: QA Story\nagent: coder-1\n---\n", ); let pool = AgentPool::new_test(3001); pool.auto_assign_available_work(tmp.path()).await; let agents = pool.agents.lock().unwrap(); // coder-1 must NOT have been assigned to the QA story (wrong stage). let coder_assigned_to_qa = agents.iter().any(|(key, a)| { key.contains("9930_story_qa1") && a.agent_name == "coder-1" && matches!(a.status, AgentStatus::Pending | AgentStatus::Running) }); assert!( !coder_assigned_to_qa, "coder-1 should not be assigned to a QA-stage story" ); // qa-1 should have been assigned instead. let qa_assigned = agents.iter().any(|(key, a)| { key.contains("9930_story_qa1") && a.agent_name == "qa-1" && matches!(a.status, AgentStatus::Pending | AgentStatus::Running) }); assert!( qa_assigned, "qa-1 should be assigned as fallback for the QA-stage story" ); } /// When a story in 2_current/ has `agent: coder-1` in its front matter and /// coder-1 is a coder-stage agent, auto-assign must respect the preference /// and assign coder-1 (not fall back to some other coder). #[tokio::test] async fn auto_assign_respects_coder_preference_when_story_is_in_current_stage() { let tmp = tempfile::tempdir().unwrap(); let sk = tmp.path().join(".huskies"); std::fs::create_dir_all(sk.join("work/2_current")).unwrap(); std::fs::write( sk.join("project.toml"), "[[agent]]\nname = \"coder-1\"\nstage = \"coder\"\n\n\ [[agent]]\nname = \"coder-2\"\nstage = \"coder\"\n", ) .unwrap(); // Story in 2_current/ with a preferred coder-1 agent. crate::db::ensure_content_store(); crate::db::write_item_with_content( "story-pref", "2_current", "---\nname: Coder Story\nagent: coder-1\n---\n", ); let pool = AgentPool::new_test(3001); pool.auto_assign_available_work(tmp.path()).await; let agents = pool.agents.lock().unwrap(); // coder-1 should have been picked (it matches the stage and is preferred). let coder1_assigned = agents.values().any(|a| { a.agent_name == "coder-1" && matches!(a.status, AgentStatus::Pending | AgentStatus::Running) }); assert!( coder1_assigned, "coder-1 should be assigned when it matches the stage and is preferred" ); // coder-2 must NOT be assigned (not preferred). let coder2_assigned = agents.values().any(|a| { a.agent_name == "coder-2" && matches!(a.status, AgentStatus::Pending | AgentStatus::Running) }); assert!( !coder2_assigned, "coder-2 should not be assigned when coder-1 is explicitly preferred" ); } /// When the preferred agent's stage mismatches and no other agent of the /// correct stage is available, auto-assign must not start any agent for that /// story (no panic, no error). #[tokio::test] async fn auto_assign_stage_mismatch_with_no_fallback_starts_no_agent() { let tmp = tempfile::tempdir().unwrap(); let sk = tmp.path().join(".huskies"); std::fs::create_dir_all(&sk).unwrap(); // Only a coder agent is configured — no QA agent exists. std::fs::write( sk.join("project.toml"), "[[agent]]\nname = \"coder-1\"\nstage = \"coder\"\n", ) .unwrap(); // Story in 3_qa/ requests coder-1 (wrong stage) and no QA agent exists — write via CRDT. crate::db::ensure_content_store(); crate::db::write_item_with_content( "9931_story_noqa", "3_qa", "---\nname: QA Story No Agent\nagent: coder-1\n---\n", ); let pool = AgentPool::new_test(3001); // Must not panic. pool.auto_assign_available_work(tmp.path()).await; let agents = pool.agents.lock().unwrap(); // No agent should be assigned to the specific QA story (coder-1 may // be assigned to leaked 2_current items from the global CRDT store). let assigned_to_qa_story = agents.iter().any(|(key, a)| { key.contains("9931_story_noqa") && matches!(a.status, AgentStatus::Pending | AgentStatus::Running) }); assert!( !assigned_to_qa_story, "No agent should be started when no stage-appropriate agent is available" ); } /// Story 484: auto_assign must skip stories whose depends_on entries are not /// yet in 5_done or 6_archived. #[tokio::test] async fn auto_assign_skips_stories_with_unmet_dependencies() { let tmp = tempfile::tempdir().unwrap(); let root = tmp.path(); let sk = root.join(".huskies"); std::fs::create_dir_all(&sk).unwrap(); std::fs::write( sk.join("project.toml"), "[[agent]]\nname = \"coder-1\"\nstage = \"coder\"\n", ) .unwrap(); // Story 9932 depends on 9999 which is not done — write via CRDT. crate::db::ensure_content_store(); crate::db::write_item_with_content( "9932_story_waiting", "2_current", "---\nname: Waiting\ndepends_on: [9999]\n---\n", ); let pool = AgentPool::new_test(3001); pool.auto_assign_available_work(root).await; let agents = pool.agents.lock().unwrap(); // Filter to only agents assigned to our specific story to avoid // interference from other tests sharing the global CRDT store. let assigned_to_our_story = agents.iter().any(|(key, a)| { key.contains("9932_story_waiting") && matches!(a.status, AgentStatus::Pending | AgentStatus::Running) }); assert!( !assigned_to_our_story, "story with unmet deps should not be auto-assigned" ); } /// Story 484: auto_assign must pick up a story once its dependency lands in 5_done. #[tokio::test] async fn auto_assign_picks_up_story_after_dep_completes() { let tmp = tempfile::tempdir().unwrap(); let root = tmp.path(); let sk = root.join(".huskies"); std::fs::create_dir_all(&sk).unwrap(); std::fs::write( sk.join("project.toml"), "[[agent]]\nname = \"coder-1\"\nstage = \"coder\"\n", ) .unwrap(); // Seed stories via CRDT (the only source of truth). crate::db::ensure_content_store(); // Dep 999 is now done. crate::db::write_item_with_content("999_story_dep", "5_done", "---\nname: Dep\n---\n"); // Story 10 depends on 999 which is done. crate::db::write_item_with_content( "10_story_unblocked", "2_current", "---\nname: Unblocked\ndepends_on: [999]\n---\n", ); let pool = AgentPool::new_test(3001); pool.auto_assign_available_work(root).await; let agents = pool.agents.lock().unwrap(); let has_pending = agents.values().any(|a| { matches!( a.status, crate::agents::AgentStatus::Pending | crate::agents::AgentStatus::Running ) }); assert!( has_pending, "story with all deps done should be auto-assigned" ); } // ── Bug 497: backlog dependency promotion ─────────────────────────────── /// Stories in backlog with `depends_on` that are all in 5_done must be /// promoted to 2_current when auto_assign_available_work runs. #[tokio::test] async fn auto_assign_promotes_backlog_story_when_all_deps_done() { let tmp = tempfile::tempdir().unwrap(); let root = tmp.path(); let sk = root.join(".huskies"); let backlog = sk.join("work/1_backlog"); let current = sk.join("work/2_current"); let done = sk.join("work/5_done"); std::fs::create_dir_all(&backlog).unwrap(); std::fs::create_dir_all(¤t).unwrap(); std::fs::create_dir_all(&done).unwrap(); std::fs::write( sk.join("project.toml"), "[[agent]]\nname = \"coder-1\"\nstage = \"coder\"\n", ) .unwrap(); // Dep 1 is done. crate::db::ensure_content_store(); let dep_content = "---\nname: Dep\n---\n"; std::fs::write(done.join("1_story_dep.md"), dep_content).unwrap(); crate::db::write_content("1_story_dep", dep_content); // Story B depends on story 1. let story_b_content = "---\nname: B\ndepends_on: [1]\n---\n"; std::fs::write(backlog.join("2_story_b.md"), story_b_content).unwrap(); crate::db::write_content("2_story_b", story_b_content); let pool = AgentPool::new_test(3001); pool.auto_assign_available_work(root).await; // The lifecycle function updates the content store (not the filesystem), // so verify the move via the DB. let content = crate::db::read_content("2_story_b") .expect("story B should be in content store after promotion"); assert!( content.contains("name: B"), "story B content should be preserved after promotion" ); } /// Stories in backlog with unmet dependencies must NOT be promoted. #[tokio::test] async fn auto_assign_does_not_promote_backlog_story_with_unmet_deps() { let tmp = tempfile::tempdir().unwrap(); let root = tmp.path(); let sk = root.join(".huskies"); let backlog = sk.join("work/1_backlog"); std::fs::create_dir_all(&backlog).unwrap(); std::fs::write( sk.join("project.toml"), "[[agent]]\nname = \"coder-1\"\nstage = \"coder\"\n", ) .unwrap(); // Dep 99 is NOT done. std::fs::write( backlog.join("5_story_c.md"), "---\nname: C\ndepends_on: [99]\n---\n", ) .unwrap(); let pool = AgentPool::new_test(3001); pool.auto_assign_available_work(root).await; assert!( backlog.join("5_story_c.md").exists(), "story C should stay in 1_backlog/ when dep 99 is not done" ); } // ── Bug 503: archived-dep promotion visibility ───────────────────────────── /// A backlog story whose dep is in 6_archived must still be promoted /// (archived = satisfied), but the promotion must not silently skip the warning /// path. This test verifies the promotion itself fires; the warning is a /// slog_warn! side-effect that we can't easily assert on in unit tests. #[tokio::test] async fn auto_assign_promotes_backlog_story_when_dep_is_archived() { let tmp = tempfile::tempdir().unwrap(); let root = tmp.path(); let sk = root.join(".huskies"); let backlog = sk.join("work/1_backlog"); let current = sk.join("work/2_current"); let archived = sk.join("work/6_archived"); std::fs::create_dir_all(&backlog).unwrap(); std::fs::create_dir_all(¤t).unwrap(); std::fs::create_dir_all(&archived).unwrap(); std::fs::write( sk.join("project.toml"), "[[agent]]\nname = \"coder-1\"\nstage = \"coder\"\n", ) .unwrap(); // Dep 490 is in 6_archived (e.g. a CRDT spike that was archived/superseded). crate::db::ensure_content_store(); let dep_content = "---\nname: CRDT Spike\n---\n"; std::fs::write(archived.join("490_spike_crdt.md"), dep_content).unwrap(); crate::db::write_content("490_spike_crdt", dep_content); // Story 478 depends on 490 (the archived spike). let story_content = "---\nname: Dependent\ndepends_on: [490]\n---\n"; std::fs::write(backlog.join("478_story_dependent.md"), story_content).unwrap(); crate::db::write_content("478_story_dependent", story_content); let pool = AgentPool::new_test(3001); pool.auto_assign_available_work(root).await; // Story 478 must be promoted even though dep 490 is only in 6_archived // (not in 5_done), because archived = satisfied. The lifecycle function // updates the content store, so verify via the DB. let content = crate::db::read_content("478_story_dependent") .expect("story 478 should be in content store after promotion"); assert!( content.contains("name: Dependent"), "story 478 content should be preserved after promotion" ); } /// Stories in backlog with NO depends_on must NOT be auto-promoted. #[tokio::test] async fn auto_assign_does_not_promote_backlog_story_without_deps() { let tmp = tempfile::tempdir().unwrap(); let root = tmp.path(); let sk = root.join(".huskies"); let backlog = sk.join("work/1_backlog"); std::fs::create_dir_all(&backlog).unwrap(); std::fs::write( sk.join("project.toml"), "[[agent]]\nname = \"coder-1\"\nstage = \"coder\"\n", ) .unwrap(); std::fs::write( backlog.join("7_story_nodeps.md"), "---\nname: No deps\n---\n", ) .unwrap(); let pool = AgentPool::new_test(3001); pool.auto_assign_available_work(root).await; assert!( backlog.join("7_story_nodeps.md").exists(), "story with no depends_on should stay in 1_backlog/ — human schedules it" ); } /// Two concurrent auto_assign_available_work calls must not assign the same /// agent to two stories simultaneously. After both complete, at most one /// Pending/Running entry must exist per agent name. #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn toctou_concurrent_auto_assign_no_duplicate_agent_assignments() { use std::fs; use std::sync::Arc; let tmp = tempfile::tempdir().unwrap(); let root = tmp.path().to_path_buf(); let sk_dir = root.join(".huskies"); // Two stories waiting in 2_current, one coder agent. fs::create_dir_all(sk_dir.join("work/2_current")).unwrap(); fs::write( sk_dir.join("project.toml"), "[[agent]]\nname = \"coder-1\"\n", ) .unwrap(); fs::write( sk_dir.join("work/2_current/86_story_foo.md"), "---\nname: Foo\n---\n", ) .unwrap(); fs::write( sk_dir.join("work/2_current/130_story_bar.md"), "---\nname: Bar\n---\n", ) .unwrap(); let pool = Arc::new(AgentPool::new_test(3099)); // Run two concurrent auto_assign calls. let pool1 = pool.clone(); let root1 = root.clone(); let t1 = tokio::spawn(async move { pool1.auto_assign_available_work(&root1).await }); let pool2 = pool.clone(); let root2 = root.clone(); let t2 = tokio::spawn(async move { pool2.auto_assign_available_work(&root2).await }); let _ = tokio::join!(t1, t2); // At most one Pending/Running entry should exist for coder-1. let agents = pool.agents.lock().unwrap(); let active_coder_count = agents .values() .filter(|a| { a.agent_name == "coder-1" && matches!(a.status, AgentStatus::Pending | AgentStatus::Running) }) .count(); assert!( active_coder_count <= 1, "coder-1 must not be assigned to more than one story simultaneously; \ found {active_coder_count} active entries" ); } }