Files
huskies/server/src/agents/pool/auto_assign/auto_assign.rs
T
Timmy 2d8ccb3eb6 huskies: rename project from storkit to huskies
Rename all references from storkit to huskies across the codebase:
- .storkit/ directory → .huskies/
- Binary name, Cargo package name, Docker image references
- Server code, frontend code, config files, scripts
- Fix script/test to build frontend before cargo clippy/test
  so merge worktrees have frontend/dist available for RustEmbed

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-04-03 16:12:52 +01:00

483 lines
20 KiB
Rust

//! Auto-assign: scan pipeline stages and dispatch free agents to unassigned stories.
use crate::config::ProjectConfig;
use crate::slog;
use crate::slog_error;
use crate::slog_warn;
use crate::worktree;
use std::path::Path;
use super::super::super::PipelineStage;
use super::super::AgentPool;
use super::scan::{
count_active_agents_for_stage, find_free_agent_for_stage, is_agent_free,
is_story_assigned_for_stage, scan_stage_items,
};
use super::story_checks::{
has_merge_failure, has_review_hold, is_story_blocked, read_story_front_matter_agent,
};
impl AgentPool {
pub async fn auto_assign_available_work(&self, project_root: &Path) {
let config = match ProjectConfig::load(project_root) {
Ok(c) => c,
Err(e) => {
slog_warn!("[auto-assign] Failed to load project config: {e}");
return;
}
};
// Process each active pipeline stage in order.
let stages: [(&str, PipelineStage); 3] = [
("2_current", PipelineStage::Coder),
("3_qa", PipelineStage::Qa),
("4_merge", PipelineStage::Mergemaster),
];
for (stage_dir, stage) in &stages {
let items = scan_stage_items(project_root, stage_dir);
if items.is_empty() {
continue;
}
for story_id in &items {
// Items marked with review_hold (e.g. spikes after QA passes) stay
// in their current stage for human review — don't auto-assign agents.
if has_review_hold(project_root, stage_dir, story_id) {
continue;
}
// Skip blocked stories (retry limit exceeded).
if is_story_blocked(project_root, stage_dir, story_id) {
continue;
}
// Skip stories in 4_merge/ that already have a reported merge failure.
// These need human intervention — auto-assigning a new mergemaster
// would just waste tokens on the same broken merge.
if *stage == PipelineStage::Mergemaster
&& has_merge_failure(project_root, stage_dir, story_id)
{
continue;
}
// AC6: Detect empty-diff stories in 4_merge/ before starting a
// mergemaster. If the worktree has no commits on the feature branch,
// write a merge_failure and block the story immediately.
if *stage == PipelineStage::Mergemaster
&& let Some(wt_path) = worktree::find_worktree_path(project_root, story_id)
&& !crate::agents::gates::worktree_has_committed_work(&wt_path)
{
slog_warn!(
"[auto-assign] Story '{story_id}' in 4_merge/ has no commits \
on feature branch. Writing merge_failure and blocking."
);
let story_path = project_root
.join(".huskies/work")
.join(stage_dir)
.join(format!("{story_id}.md"));
let empty_diff_reason = "Feature branch has no code changes — the coder agent \
did not produce any commits.";
let _ = crate::io::story_metadata::write_merge_failure(
&story_path,
empty_diff_reason,
);
let _ = crate::io::story_metadata::write_blocked(&story_path);
let _ = self.watcher_tx.send(crate::io::watcher::WatcherEvent::StoryBlocked {
story_id: story_id.to_string(),
reason: empty_diff_reason.to_string(),
});
continue;
}
// Re-acquire the lock on each iteration to see state changes
// from previous start_agent calls in the same pass.
let preferred_agent =
read_story_front_matter_agent(project_root, stage_dir, story_id);
// Check max_coders limit for the Coder stage before agent selection.
// If the pool is full, all remaining items in this stage wait.
if *stage == PipelineStage::Coder
&& let Some(max) = config.max_coders
{
let agents_lock = match self.agents.lock() {
Ok(a) => a,
Err(e) => {
slog_error!("[auto-assign] Failed to lock agents: {e}");
break;
}
};
let active = count_active_agents_for_stage(&config, &agents_lock, stage);
if active >= max {
slog!(
"[auto-assign] Coder pool full ({active}/{max}); remaining items in {stage_dir}/ will wait."
);
break;
}
}
// Outcome: (already_assigned, chosen_agent, preferred_busy, stage_mismatch)
// preferred_busy=true means the story has a specific agent requested but it is
// currently occupied — the story should wait rather than fall back.
// stage_mismatch=true means the preferred agent's stage doesn't match the
// pipeline stage, so we fell back to a generic stage agent.
let (already_assigned, free_agent, preferred_busy, stage_mismatch) = {
let agents = match self.agents.lock() {
Ok(a) => a,
Err(e) => {
slog_error!("[auto-assign] Failed to lock agents: {e}");
break;
}
};
let assigned = is_story_assigned_for_stage(&config, &agents, story_id, stage);
if assigned {
(true, None, false, false)
} else if let Some(ref pref) = preferred_agent {
// Story has a front-matter agent preference.
// Verify the preferred agent's stage matches the current
// pipeline stage — a coder shouldn't be assigned to QA.
let pref_stage_matches = config
.find_agent(pref)
.map(|cfg| super::super::super::agent_config_stage(cfg) == *stage)
.unwrap_or(false);
if !pref_stage_matches {
// Stage mismatch — fall back to any free agent for this stage.
let free = find_free_agent_for_stage(&config, &agents, stage)
.map(|s| s.to_string());
(false, free, false, true)
} else if is_agent_free(&agents, pref) {
(false, Some(pref.clone()), false, false)
} else {
(false, None, true, false)
}
} else {
let free = find_free_agent_for_stage(&config, &agents, stage)
.map(|s| s.to_string());
(false, free, false, false)
}
};
if already_assigned {
// Story already has an active agent — skip silently.
continue;
}
if preferred_busy {
// The story requests a specific agent that is currently busy.
// Do not fall back to a different agent; let this story wait.
slog!(
"[auto-assign] Preferred agent '{}' busy for '{story_id}'; story will wait.",
preferred_agent.as_deref().unwrap_or("?")
);
continue;
}
if stage_mismatch {
slog!(
"[auto-assign] Preferred agent '{}' stage mismatch for '{story_id}' in {stage_dir}/; falling back to stage-appropriate agent.",
preferred_agent.as_deref().unwrap_or("?")
);
}
match free_agent {
Some(agent_name) => {
slog!(
"[auto-assign] Assigning '{agent_name}' to '{story_id}' in {stage_dir}/"
);
if let Err(e) = self
.start_agent(project_root, story_id, Some(&agent_name), None)
.await
{
slog!(
"[auto-assign] Failed to start '{agent_name}' for '{story_id}': {e}"
);
}
}
None => {
// No free agents of this type — stop scanning this stage.
slog!(
"[auto-assign] All {:?} agents busy; remaining items in {stage_dir}/ will wait.",
stage
);
break;
}
}
}
}
}
}
// ── Tests ──────────────────────────────────────────────────────────────────
#[cfg(test)]
mod tests {
use super::super::super::AgentPool;
use crate::agents::AgentStatus;
use crate::io::watcher::WatcherEvent;
use tokio::sync::broadcast;
/// Story 203: auto_assign_available_work must detect a story in 2_current/
/// with no active agent and start an agent for it.
#[tokio::test]
async fn auto_assign_picks_up_story_queued_in_current() {
let tmp = tempfile::tempdir().unwrap();
let sk = tmp.path().join(".huskies");
let current = sk.join("work/2_current");
std::fs::create_dir_all(&current).unwrap();
std::fs::write(
sk.join("project.toml"),
"[[agent]]\nname = \"coder-1\"\nstage = \"coder\"\n",
)
.unwrap();
// Place the story in 2_current/ (simulating the "queued" state).
std::fs::write(current.join("story-3.md"), "---\nname: Story 3\n---\n").unwrap();
let pool = AgentPool::new_test(3001);
// No agents are running — coder-1 is free.
// auto_assign will try to call start_agent, which will attempt to create
// a worktree (will fail without a git repo) — that is fine. We only need
// to verify the agent is registered as Pending before the background
// task eventually fails.
pool.auto_assign_available_work(tmp.path()).await;
let agents = pool.agents.lock().unwrap();
let has_pending = agents.values().any(|a| {
a.agent_name == "coder-1"
&& matches!(a.status, AgentStatus::Pending | AgentStatus::Running)
});
assert!(
has_pending,
"auto_assign should have started coder-1 for story-3, but pool is empty"
);
}
/// Story 265: auto_assign_available_work must skip spikes in 3_qa/ that
/// have review_hold: true set in their front matter.
#[tokio::test]
async fn auto_assign_skips_spikes_with_review_hold() {
let tmp = tempfile::tempdir().unwrap();
let root = tmp.path();
// Create project.toml with a QA agent.
let sk = root.join(".huskies");
std::fs::create_dir_all(&sk).unwrap();
std::fs::write(
sk.join("project.toml"),
"[[agents]]\nname = \"qa\"\nrole = \"qa\"\nmodel = \"test\"\nprompt = \"test\"\n",
)
.unwrap();
// Put a spike in 3_qa/ with review_hold: true.
let qa_dir = root.join(".huskies/work/3_qa");
std::fs::create_dir_all(&qa_dir).unwrap();
std::fs::write(
qa_dir.join("20_spike_test.md"),
"---\nname: Test Spike\nreview_hold: true\n---\n# Spike\n",
)
.unwrap();
let (watcher_tx, _) = broadcast::channel::<WatcherEvent>(4);
let pool = AgentPool::new(3001, watcher_tx);
pool.auto_assign_available_work(root).await;
// No agent should have been started for the spike.
let agents = pool.agents.lock().unwrap();
assert!(
agents.is_empty(),
"No agents should be assigned to a spike with review_hold"
);
}
// ── Story 279: auto-assign respects agent stage from front matter ──────────
/// When a story in 3_qa/ has `agent: coder-1` in its front matter but
/// coder-1 is a coder-stage agent, auto-assign must NOT assign coder-1.
/// Instead it should fall back to a free QA-stage agent.
#[tokio::test]
async fn auto_assign_ignores_coder_preference_when_story_is_in_qa_stage() {
let tmp = tempfile::tempdir().unwrap();
let sk = tmp.path().join(".huskies");
let qa_dir = sk.join("work/3_qa");
std::fs::create_dir_all(&qa_dir).unwrap();
std::fs::write(
sk.join("project.toml"),
"[[agent]]\nname = \"coder-1\"\nstage = \"coder\"\n\n\
[[agent]]\nname = \"qa-1\"\nstage = \"qa\"\n",
)
.unwrap();
// Story in 3_qa/ with a preferred coder-stage agent.
std::fs::write(
qa_dir.join("story-qa1.md"),
"---\nname: QA Story\nagent: coder-1\n---\n",
)
.unwrap();
let pool = AgentPool::new_test(3001);
pool.auto_assign_available_work(tmp.path()).await;
let agents = pool.agents.lock().unwrap();
// coder-1 must NOT have been assigned (wrong stage for 3_qa/).
let coder_assigned = agents.values().any(|a| {
a.agent_name == "coder-1"
&& matches!(a.status, AgentStatus::Pending | AgentStatus::Running)
});
assert!(
!coder_assigned,
"coder-1 should not be assigned to a QA-stage story"
);
// qa-1 should have been assigned instead.
let qa_assigned = agents.values().any(|a| {
a.agent_name == "qa-1"
&& matches!(a.status, AgentStatus::Pending | AgentStatus::Running)
});
assert!(
qa_assigned,
"qa-1 should be assigned as fallback for the QA-stage story"
);
}
/// When a story in 2_current/ has `agent: coder-1` in its front matter and
/// coder-1 is a coder-stage agent, auto-assign must respect the preference
/// and assign coder-1 (not fall back to some other coder).
#[tokio::test]
async fn auto_assign_respects_coder_preference_when_story_is_in_current_stage() {
let tmp = tempfile::tempdir().unwrap();
let sk = tmp.path().join(".huskies");
let current_dir = sk.join("work/2_current");
std::fs::create_dir_all(&current_dir).unwrap();
std::fs::write(
sk.join("project.toml"),
"[[agent]]\nname = \"coder-1\"\nstage = \"coder\"\n\n\
[[agent]]\nname = \"coder-2\"\nstage = \"coder\"\n",
)
.unwrap();
// Story in 2_current/ with a preferred coder-1 agent.
std::fs::write(
current_dir.join("story-pref.md"),
"---\nname: Coder Story\nagent: coder-1\n---\n",
)
.unwrap();
let pool = AgentPool::new_test(3001);
pool.auto_assign_available_work(tmp.path()).await;
let agents = pool.agents.lock().unwrap();
// coder-1 should have been picked (it matches the stage and is preferred).
let coder1_assigned = agents.values().any(|a| {
a.agent_name == "coder-1"
&& matches!(a.status, AgentStatus::Pending | AgentStatus::Running)
});
assert!(
coder1_assigned,
"coder-1 should be assigned when it matches the stage and is preferred"
);
// coder-2 must NOT be assigned (not preferred).
let coder2_assigned = agents.values().any(|a| {
a.agent_name == "coder-2"
&& matches!(a.status, AgentStatus::Pending | AgentStatus::Running)
});
assert!(
!coder2_assigned,
"coder-2 should not be assigned when coder-1 is explicitly preferred"
);
}
/// When the preferred agent's stage mismatches and no other agent of the
/// correct stage is available, auto-assign must not start any agent for that
/// story (no panic, no error).
#[tokio::test]
async fn auto_assign_stage_mismatch_with_no_fallback_starts_no_agent() {
let tmp = tempfile::tempdir().unwrap();
let sk = tmp.path().join(".huskies");
let qa_dir = sk.join("work/3_qa");
std::fs::create_dir_all(&qa_dir).unwrap();
// Only a coder agent is configured — no QA agent exists.
std::fs::write(
sk.join("project.toml"),
"[[agent]]\nname = \"coder-1\"\nstage = \"coder\"\n",
)
.unwrap();
// Story in 3_qa/ requests coder-1 (wrong stage) and no QA agent exists.
std::fs::write(
qa_dir.join("story-noqa.md"),
"---\nname: QA Story No Agent\nagent: coder-1\n---\n",
)
.unwrap();
let pool = AgentPool::new_test(3001);
// Must not panic.
pool.auto_assign_available_work(tmp.path()).await;
let agents = pool.agents.lock().unwrap();
assert!(
agents.is_empty(),
"No agent should be started when no stage-appropriate agent is available"
);
}
/// Two concurrent auto_assign_available_work calls must not assign the same
/// agent to two stories simultaneously. After both complete, at most one
/// Pending/Running entry must exist per agent name.
#[tokio::test(flavor = "multi_thread", worker_threads = 2)]
async fn toctou_concurrent_auto_assign_no_duplicate_agent_assignments() {
use std::fs;
use std::sync::Arc;
let tmp = tempfile::tempdir().unwrap();
let root = tmp.path().to_path_buf();
let sk_dir = root.join(".huskies");
// Two stories waiting in 2_current, one coder agent.
fs::create_dir_all(sk_dir.join("work/2_current")).unwrap();
fs::write(
sk_dir.join("project.toml"),
"[[agent]]\nname = \"coder-1\"\n",
)
.unwrap();
fs::write(
sk_dir.join("work/2_current/86_story_foo.md"),
"---\nname: Foo\n---\n",
)
.unwrap();
fs::write(
sk_dir.join("work/2_current/130_story_bar.md"),
"---\nname: Bar\n---\n",
)
.unwrap();
let pool = Arc::new(AgentPool::new_test(3099));
// Run two concurrent auto_assign calls.
let pool1 = pool.clone();
let root1 = root.clone();
let t1 = tokio::spawn(async move { pool1.auto_assign_available_work(&root1).await });
let pool2 = pool.clone();
let root2 = root.clone();
let t2 = tokio::spawn(async move { pool2.auto_assign_available_work(&root2).await });
let _ = tokio::join!(t1, t2);
// At most one Pending/Running entry should exist for coder-1.
let agents = pool.agents.lock().unwrap();
let active_coder_count = agents
.values()
.filter(|a| {
a.agent_name == "coder-1"
&& matches!(a.status, AgentStatus::Pending | AgentStatus::Running)
})
.count();
assert!(
active_coder_count <= 1,
"coder-1 must not be assigned to more than one story simultaneously; \
found {active_coder_count} active entries"
);
}
}