huskies: merge 833

This commit is contained in:
dave
2026-04-29 09:49:45 +00:00
parent 4ed1fb5110
commit 0403dc9871
5 changed files with 445 additions and 375 deletions
+12 -375
View File
@@ -1,76 +1,19 @@
//! Auto-assign: scan pipeline stages and dispatch free agents to unassigned stories.
use crate::config::ProjectConfig;
use crate::slog;
use crate::slog_error;
use crate::slog_warn;
use crate::worktree;
use std::path::Path;
use super::super::super::PipelineStage;
use crate::config::ProjectConfig;
use crate::slog_warn;
use super::super::AgentPool;
use super::scan::{
count_active_agents_for_stage, find_free_agent_for_stage, is_agent_free,
is_story_assigned_for_stage, scan_stage_items,
};
use super::story_checks::{
check_archived_dependencies, has_content_conflict_failure, has_merge_failure,
has_mergemaster_attempted, has_review_hold, has_unmet_dependencies, is_story_blocked,
is_story_frozen, read_story_front_matter_agent,
};
impl AgentPool {
/// Scan `1_backlog/` and promote any story whose `depends_on` are all met.
/// Scan all active pipeline stages and start free agents for any unassigned work.
///
/// A story is only promoted if it explicitly lists `depends_on` AND every
/// listed dependency has reached `5_done` or `6_archived`. Stories with no
/// `depends_on` are left in the backlog for human scheduling.
///
/// **Archived dep semantics:** a dep in `6_archived` counts as satisfied (since
/// stories auto-sweep from `5_done` to `6_archived` after 4 hours, and the
/// dependent story would normally already be promoted by then). However, if a
/// dep was already in `6_archived` when the dependent story was created (e.g. it
/// was abandoned/superseded before the dependent existed), a prominent warning is
/// logged so the user can see the promotion was triggered by an archived dep, not
/// a clean completion.
fn promote_ready_backlog_stories(&self, project_root: &Path) {
use crate::io::story_metadata::parse_front_matter;
let items = scan_stage_items(project_root, "1_backlog");
for story_id in &items {
// Only promote stories that explicitly declare dependencies.
let contents = crate::db::read_content(story_id);
let has_deps = contents
.and_then(|c| parse_front_matter(&c).ok())
.and_then(|m| m.depends_on)
.map(|d| !d.is_empty())
.unwrap_or(false);
if !has_deps {
continue;
}
// Check whether any dependencies are still unmet.
if has_unmet_dependencies(project_root, "1_backlog", story_id) {
continue;
}
// Warn if any deps were satisfied via archive rather than via clean done.
let archived_deps = check_archived_dependencies(project_root, "1_backlog", story_id);
if !archived_deps.is_empty() {
slog_warn!(
"[auto-assign] Story '{story_id}' is being promoted because deps \
{archived_deps:?} are in 6_archived (not cleanly completed via 5_done). \
These deps may have been abandoned or superseded. If this promotion is \
unintentional, remove the depends_on or manually move the story back to \
1_backlog."
);
}
// All deps met — promote from backlog to current.
slog!("[auto-assign] Story '{story_id}' deps met; promoting from backlog to current.");
if let Err(e) = crate::agents::lifecycle::move_story_to_current(story_id) {
slog!("[auto-assign] Failed to promote '{story_id}' to current: {e}");
}
}
}
/// Order of operations:
/// 1. Promote backlog stories whose `depends_on` are all satisfied.
/// 2. Assign coder and QA agents to stories in `2_current/` and `3_qa/`.
/// 3. Trigger server-side merges (or auto-spawn mergemaster) for `4_merge/`.
pub async fn auto_assign_available_work(&self, project_root: &Path) {
// Promote any backlog stories whose dependencies are all done.
self.promote_ready_backlog_stories(project_root);
@@ -83,317 +26,11 @@ impl AgentPool {
}
};
// Process each active pipeline stage in order.
let stages: [(&str, PipelineStage); 2] = [
("2_current", PipelineStage::Coder),
("3_qa", PipelineStage::Qa),
];
// Process the coder (2_current/) and QA (3_qa/) stages.
self.assign_pipeline_stages(project_root, &config).await;
for (stage_dir, stage) in &stages {
let items = scan_stage_items(project_root, stage_dir);
if items.is_empty() {
continue;
}
for story_id in &items {
// Items marked with review_hold (e.g. spikes after QA passes) stay
// in their current stage for human review — don't auto-assign agents.
if has_review_hold(project_root, stage_dir, story_id) {
continue;
}
// Skip frozen stories — pipeline advancement is suspended.
if is_story_frozen(project_root, stage_dir, story_id) {
slog!("[auto-assign] Story '{story_id}' is frozen; skipping until unfrozen.");
continue;
}
// Skip blocked stories (retry limit exceeded).
if is_story_blocked(project_root, stage_dir, story_id) {
continue;
}
// Skip stories whose dependencies haven't landed yet.
if has_unmet_dependencies(project_root, stage_dir, story_id) {
slog!(
"[auto-assign] Story '{story_id}' has unmet dependencies; skipping until deps are done."
);
continue;
}
// Re-acquire the lock on each iteration to see state changes
// from previous start_agent calls in the same pass.
let preferred_agent =
read_story_front_matter_agent(project_root, stage_dir, story_id);
// Check max_coders limit for the Coder stage before agent selection.
// If the pool is full, all remaining items in this stage wait.
if *stage == PipelineStage::Coder
&& let Some(max) = config.max_coders
{
let agents_lock = match self.agents.lock() {
Ok(a) => a,
Err(e) => {
slog_error!("[auto-assign] Failed to lock agents: {e}");
break;
}
};
let active = count_active_agents_for_stage(&config, &agents_lock, stage);
if active >= max {
slog!(
"[auto-assign] Coder pool full ({active}/{max}); remaining items in {stage_dir}/ will wait."
);
break;
}
}
// Outcome: (already_assigned, chosen_agent, preferred_busy, stage_mismatch)
// preferred_busy=true means the story has a specific agent requested but it is
// currently occupied — the story should wait rather than fall back.
// stage_mismatch=true means the preferred agent's stage doesn't match the
// pipeline stage, so we fell back to a generic stage agent.
let (already_assigned, free_agent, preferred_busy, stage_mismatch) = {
let agents = match self.agents.lock() {
Ok(a) => a,
Err(e) => {
slog_error!("[auto-assign] Failed to lock agents: {e}");
break;
}
};
let assigned = is_story_assigned_for_stage(&config, &agents, story_id, stage);
if assigned {
(true, None, false, false)
} else if let Some(ref pref) = preferred_agent {
// Story has a front-matter agent preference.
// Verify the preferred agent's stage matches the current
// pipeline stage — a coder shouldn't be assigned to QA.
let pref_stage_matches = config
.find_agent(pref)
.map(|cfg| super::super::super::agent_config_stage(cfg) == *stage)
.unwrap_or(false);
if !pref_stage_matches {
// Stage mismatch — fall back to any free agent for this stage.
let free = find_free_agent_for_stage(&config, &agents, stage)
.map(|s| s.to_string());
(false, free, false, true)
} else if is_agent_free(&agents, pref) {
(false, Some(pref.clone()), false, false)
} else {
(false, None, true, false)
}
} else {
let free = find_free_agent_for_stage(&config, &agents, stage)
.map(|s| s.to_string());
(false, free, false, false)
}
};
if already_assigned {
// Story already has an active agent — skip silently.
continue;
}
if preferred_busy {
// The story requests a specific agent that is currently busy.
// Do not fall back to a different agent; let this story wait.
slog!(
"[auto-assign] Preferred agent '{}' busy for '{story_id}'; story will wait.",
preferred_agent.as_deref().unwrap_or("?")
);
continue;
}
if stage_mismatch {
slog!(
"[auto-assign] Preferred agent '{}' stage mismatch for '{story_id}' in {stage_dir}/; falling back to stage-appropriate agent.",
preferred_agent.as_deref().unwrap_or("?")
);
}
match free_agent {
Some(agent_name) => {
slog!(
"[auto-assign] Assigning '{agent_name}' to '{story_id}' in {stage_dir}/"
);
if let Err(e) = self
.start_agent(project_root, story_id, Some(&agent_name), None, None)
.await
{
slog!(
"[auto-assign] Failed to start '{agent_name}' for '{story_id}': {e}"
);
}
}
None => {
// No free agents of this type — stop scanning this stage.
slog!(
"[auto-assign] All {:?} agents busy; remaining items in {stage_dir}/ will wait.",
stage
);
break;
}
}
}
}
// ── 4_merge: deterministic server-side merge (no LLM agent) ──────────
//
// Stories in 4_merge/ are handled directly by the server rather than by
// a mergemaster agent. For each eligible story, trigger start_merge_agent_work
// which runs the squash-merge pipeline in a background task. On success
// the story advances to 5_done automatically. On failure merge_failure is
// written to the CRDT and a notification is emitted; the story stays in
// 4_merge/ until a human intervenes or an explicit `start_agent mergemaster`
// call invokes the LLM-driven recovery path.
let merge_items = scan_stage_items(project_root, "4_merge");
for story_id in &merge_items {
// Stories with a recorded merge failure may be eligible for
// automatic mergemaster dispatch when the failure is a content
// conflict — otherwise they need human intervention.
if has_merge_failure(project_root, "4_merge", story_id) {
// Auto-spawn mergemaster for content conflicts, but only once.
if has_content_conflict_failure(project_root, "4_merge", story_id)
&& !has_mergemaster_attempted(project_root, "4_merge", story_id)
&& !is_story_blocked(project_root, "4_merge", story_id)
{
// Find the mergemaster agent.
let mergemaster_agent = {
let agents = match self.agents.lock() {
Ok(a) => a,
Err(e) => {
slog_error!(
"[auto-assign] Failed to lock agents for mergemaster check: {e}"
);
continue;
}
};
if is_story_assigned_for_stage(
&config,
&agents,
story_id,
&PipelineStage::Mergemaster,
) {
// Already running — don't spawn again.
None
} else {
find_free_agent_for_stage(&config, &agents, &PipelineStage::Mergemaster)
.map(str::to_string)
}
};
if let Some(agent_name) = mergemaster_agent {
slog!(
"[auto-assign] Content conflict on '{story_id}'; \
auto-spawning mergemaster '{agent_name}'."
);
// Record mergemaster_attempted before spawning so a
// crash/restart doesn't re-trigger an infinite loop.
if let Some(contents) = crate::db::read_content(story_id) {
let updated =
crate::io::story_metadata::write_mergemaster_attempted_in_content(
&contents,
);
crate::db::write_content(story_id, &updated);
crate::db::write_item_with_content(story_id, "4_merge", &updated);
}
if let Err(e) = self
.start_agent(project_root, story_id, Some(&agent_name), None, None)
.await
{
slog!(
"[auto-assign] Failed to start mergemaster '{agent_name}' \
for '{story_id}': {e}"
);
}
}
}
continue;
}
if has_review_hold(project_root, "4_merge", story_id) {
continue;
}
if is_story_frozen(project_root, "4_merge", story_id) {
slog!("[auto-assign] Story '{story_id}' in 4_merge/ is frozen; skipping.");
continue;
}
if is_story_blocked(project_root, "4_merge", story_id) {
continue;
}
if has_unmet_dependencies(project_root, "4_merge", story_id) {
slog!("[auto-assign] Story '{story_id}' in 4_merge/ has unmet deps; skipping.");
continue;
}
// AC6: Detect empty-diff stories before starting the merge pipeline.
// If the worktree has no commits on the feature branch, write a
// merge_failure and block the story immediately — no merge job needed.
if let Some(wt_path) = worktree::find_worktree_path(project_root, story_id)
&& !crate::agents::gates::worktree_has_committed_work(&wt_path)
{
slog_warn!(
"[auto-assign] Story '{story_id}' in 4_merge/ has no commits \
on feature branch. Writing merge_failure and blocking."
);
let empty_diff_reason = "Feature branch has no code changes — the coder agent \
did not produce any commits.";
if let Some(contents) = crate::db::read_content(story_id) {
let updated = crate::io::story_metadata::write_merge_failure_in_content(
&contents,
empty_diff_reason,
);
let blocked = crate::io::story_metadata::write_blocked_in_content(&updated);
crate::db::write_content(story_id, &blocked);
crate::db::write_item_with_content(story_id, "4_merge", &blocked);
} else {
let story_path = project_root
.join(".huskies/work/4_merge")
.join(format!("{story_id}.md"));
let _ = crate::io::story_metadata::write_merge_failure(
&story_path,
empty_diff_reason,
);
let _ = crate::io::story_metadata::write_blocked(&story_path);
}
let _ = self
.watcher_tx
.send(crate::io::watcher::WatcherEvent::StoryBlocked {
story_id: story_id.to_string(),
reason: empty_diff_reason.to_string(),
});
continue;
}
// Skip if a merge job is already running for this story (e.g. triggered
// by a previous auto-assign pass or by pipeline advancement).
let already_running = crate::crdt_state::read_merge_job(story_id.as_str())
.is_some_and(|job| job.status == "running");
if already_running {
continue;
}
// Skip if an explicit mergemaster LLM agent is already running
// (operator-driven failure recovery path).
let has_mergemaster = {
let agents = match self.agents.lock() {
Ok(a) => a,
Err(e) => {
slog_error!("[auto-assign] Failed to lock agents: {e}");
break;
}
};
is_story_assigned_for_stage(&config, &agents, story_id, &PipelineStage::Mergemaster)
};
if has_mergemaster {
continue;
}
slog!("[auto-assign] Triggering server-side merge for '{story_id}' in 4_merge/");
self.trigger_server_side_merge(project_root, story_id);
}
// Process the merge (4_merge/) stage.
self.assign_merge_stage(project_root, &config).await;
}
}
@@ -0,0 +1,63 @@
//! Backlog promotion: scan `1_backlog/` and promote stories whose `depends_on` are all met.
use std::path::Path;
use crate::slog;
use crate::slog_warn;
use super::super::AgentPool;
use super::scan::scan_stage_items;
use super::story_checks::{check_archived_dependencies, has_unmet_dependencies};
impl AgentPool {
/// Scan `1_backlog/` and promote any story whose `depends_on` are all met.
///
/// A story is only promoted if it explicitly lists `depends_on` AND every
/// listed dependency has reached `5_done` or `6_archived`. Stories with no
/// `depends_on` are left in the backlog for human scheduling.
///
/// **Archived dep semantics:** a dep in `6_archived` counts as satisfied (since
/// stories auto-sweep from `5_done` to `6_archived` after 4 hours, and the
/// dependent story would normally already be promoted by then). However, if a
/// dep was already in `6_archived` when the dependent story was created (e.g. it
/// was abandoned/superseded before the dependent existed), a prominent warning is
/// logged so the user can see the promotion was triggered by an archived dep, not
/// a clean completion.
pub(super) fn promote_ready_backlog_stories(&self, project_root: &Path) {
use crate::io::story_metadata::parse_front_matter;
let items = scan_stage_items(project_root, "1_backlog");
for story_id in &items {
// Only promote stories that explicitly declare dependencies.
let contents = crate::db::read_content(story_id);
let has_deps = contents
.and_then(|c| parse_front_matter(&c).ok())
.and_then(|m| m.depends_on)
.map(|d| !d.is_empty())
.unwrap_or(false);
if !has_deps {
continue;
}
// Check whether any dependencies are still unmet.
if has_unmet_dependencies(project_root, "1_backlog", story_id) {
continue;
}
// Warn if any deps were satisfied via archive rather than via clean done.
let archived_deps = check_archived_dependencies(project_root, "1_backlog", story_id);
if !archived_deps.is_empty() {
slog_warn!(
"[auto-assign] Story '{story_id}' is being promoted because deps \
{archived_deps:?} are in 6_archived (not cleanly completed via 5_done). \
These deps may have been abandoned or superseded. If this promotion is \
unintentional, remove the depends_on or manually move the story back to \
1_backlog."
);
}
// All deps met — promote from backlog to current.
slog!("[auto-assign] Story '{story_id}' deps met; promoting from backlog to current.");
if let Err(e) = crate::agents::lifecycle::move_story_to_current(story_id) {
slog!("[auto-assign] Failed to promote '{story_id}' to current: {e}");
}
}
}
}
+187
View File
@@ -0,0 +1,187 @@
//! Merge stage dispatch: trigger server-side merges and auto-spawn mergemaster for content conflicts.
use std::path::Path;
use crate::config::ProjectConfig;
use crate::slog;
use crate::slog_error;
use crate::slog_warn;
use crate::worktree;
use super::super::super::PipelineStage;
use super::super::AgentPool;
use super::scan::{find_free_agent_for_stage, is_story_assigned_for_stage, scan_stage_items};
use super::story_checks::{
has_content_conflict_failure, has_merge_failure, has_mergemaster_attempted, has_review_hold,
has_unmet_dependencies, is_story_blocked, is_story_frozen,
};
impl AgentPool {
/// Process stories in `4_merge/`: trigger server-side squash-merges and auto-spawn
/// a mergemaster agent when a content-conflict failure is detected.
///
/// Stories with a recorded merge failure may be eligible for automatic mergemaster
/// dispatch when the failure is a content conflict — otherwise they need human
/// intervention. Each eligible story without an active merge job triggers
/// `trigger_server_side_merge`.
pub(super) async fn assign_merge_stage(&self, project_root: &Path, config: &ProjectConfig) {
// ── 4_merge: deterministic server-side merge (no LLM agent) ──────────
//
// Stories in 4_merge/ are handled directly by the server rather than by
// a mergemaster agent. For each eligible story, trigger start_merge_agent_work
// which runs the squash-merge pipeline in a background task. On success
// the story advances to 5_done automatically. On failure merge_failure is
// written to the CRDT and a notification is emitted; the story stays in
// 4_merge/ until a human intervenes or an explicit `start_agent mergemaster`
// call invokes the LLM-driven recovery path.
let merge_items = scan_stage_items(project_root, "4_merge");
for story_id in &merge_items {
// Stories with a recorded merge failure may be eligible for
// automatic mergemaster dispatch when the failure is a content
// conflict — otherwise they need human intervention.
if has_merge_failure(project_root, "4_merge", story_id) {
// Auto-spawn mergemaster for content conflicts, but only once.
if has_content_conflict_failure(project_root, "4_merge", story_id)
&& !has_mergemaster_attempted(project_root, "4_merge", story_id)
&& !is_story_blocked(project_root, "4_merge", story_id)
{
// Find the mergemaster agent.
let mergemaster_agent = {
let agents = match self.agents.lock() {
Ok(a) => a,
Err(e) => {
slog_error!(
"[auto-assign] Failed to lock agents for mergemaster check: {e}"
);
continue;
}
};
if is_story_assigned_for_stage(
config,
&agents,
story_id,
&PipelineStage::Mergemaster,
) {
// Already running — don't spawn again.
None
} else {
find_free_agent_for_stage(config, &agents, &PipelineStage::Mergemaster)
.map(str::to_string)
}
};
if let Some(agent_name) = mergemaster_agent {
slog!(
"[auto-assign] Content conflict on '{story_id}'; \
auto-spawning mergemaster '{agent_name}'."
);
// Record mergemaster_attempted before spawning so a
// crash/restart doesn't re-trigger an infinite loop.
if let Some(contents) = crate::db::read_content(story_id) {
let updated =
crate::io::story_metadata::write_mergemaster_attempted_in_content(
&contents,
);
crate::db::write_content(story_id, &updated);
crate::db::write_item_with_content(story_id, "4_merge", &updated);
}
if let Err(e) = self
.start_agent(project_root, story_id, Some(&agent_name), None, None)
.await
{
slog!(
"[auto-assign] Failed to start mergemaster '{agent_name}' \
for '{story_id}': {e}"
);
}
}
}
continue;
}
if has_review_hold(project_root, "4_merge", story_id) {
continue;
}
if is_story_frozen(project_root, "4_merge", story_id) {
slog!("[auto-assign] Story '{story_id}' in 4_merge/ is frozen; skipping.");
continue;
}
if is_story_blocked(project_root, "4_merge", story_id) {
continue;
}
if has_unmet_dependencies(project_root, "4_merge", story_id) {
slog!("[auto-assign] Story '{story_id}' in 4_merge/ has unmet deps; skipping.");
continue;
}
// AC6: Detect empty-diff stories before starting the merge pipeline.
// If the worktree has no commits on the feature branch, write a
// merge_failure and block the story immediately — no merge job needed.
if let Some(wt_path) = worktree::find_worktree_path(project_root, story_id)
&& !crate::agents::gates::worktree_has_committed_work(&wt_path)
{
slog_warn!(
"[auto-assign] Story '{story_id}' in 4_merge/ has no commits \
on feature branch. Writing merge_failure and blocking."
);
let empty_diff_reason = "Feature branch has no code changes — the coder agent \
did not produce any commits.";
if let Some(contents) = crate::db::read_content(story_id) {
let updated = crate::io::story_metadata::write_merge_failure_in_content(
&contents,
empty_diff_reason,
);
let blocked = crate::io::story_metadata::write_blocked_in_content(&updated);
crate::db::write_content(story_id, &blocked);
crate::db::write_item_with_content(story_id, "4_merge", &blocked);
} else {
let story_path = project_root
.join(".huskies/work/4_merge")
.join(format!("{story_id}.md"));
let _ = crate::io::story_metadata::write_merge_failure(
&story_path,
empty_diff_reason,
);
let _ = crate::io::story_metadata::write_blocked(&story_path);
}
let _ = self
.watcher_tx
.send(crate::io::watcher::WatcherEvent::StoryBlocked {
story_id: story_id.to_string(),
reason: empty_diff_reason.to_string(),
});
continue;
}
// Skip if a merge job is already running for this story (e.g. triggered
// by a previous auto-assign pass or by pipeline advancement).
let already_running = crate::crdt_state::read_merge_job(story_id.as_str())
.is_some_and(|job| job.status == "running");
if already_running {
continue;
}
// Skip if an explicit mergemaster LLM agent is already running
// (operator-driven failure recovery path).
let has_mergemaster = {
let agents = match self.agents.lock() {
Ok(a) => a,
Err(e) => {
slog_error!("[auto-assign] Failed to lock agents: {e}");
break;
}
};
is_story_assigned_for_stage(config, &agents, story_id, &PipelineStage::Mergemaster)
};
if has_mergemaster {
continue;
}
slog!("[auto-assign] Triggering server-side merge for '{story_id}' in 4_merge/");
self.trigger_server_side_merge(project_root, story_id);
}
}
}
@@ -2,6 +2,9 @@
#[allow(clippy::module_inception)]
mod auto_assign;
mod backlog;
mod merge;
mod pipeline;
mod reconcile;
mod scan;
mod story_checks;
@@ -0,0 +1,180 @@
//! Pipeline stage dispatch: assign free coder and QA agents to stories in `2_current/` and `3_qa/`.
use std::path::Path;
use crate::config::ProjectConfig;
use crate::slog;
use crate::slog_error;
use super::super::super::PipelineStage;
use super::super::AgentPool;
use super::scan::{
count_active_agents_for_stage, find_free_agent_for_stage, is_agent_free,
is_story_assigned_for_stage, scan_stage_items,
};
use super::story_checks::{
has_review_hold, has_unmet_dependencies, is_story_blocked, is_story_frozen,
read_story_front_matter_agent,
};
impl AgentPool {
/// Assign free agents to stories in the coder (`2_current/`) and QA (`3_qa/`) stages.
///
/// For each stage, iterates over pending stories and starts a free agent if one is
/// available. Respects `max_coders`, `review_hold`, frozen, blocked, and unmet-dep
/// guards. Agent front-matter preferences and stage-mismatch fallback are handled
/// here as well.
pub(super) async fn assign_pipeline_stages(&self, project_root: &Path, config: &ProjectConfig) {
let stages: [(&str, PipelineStage); 2] = [
("2_current", PipelineStage::Coder),
("3_qa", PipelineStage::Qa),
];
for (stage_dir, stage) in &stages {
let items = scan_stage_items(project_root, stage_dir);
if items.is_empty() {
continue;
}
for story_id in &items {
// Items marked with review_hold (e.g. spikes after QA passes) stay
// in their current stage for human review — don't auto-assign agents.
if has_review_hold(project_root, stage_dir, story_id) {
continue;
}
// Skip frozen stories — pipeline advancement is suspended.
if is_story_frozen(project_root, stage_dir, story_id) {
slog!("[auto-assign] Story '{story_id}' is frozen; skipping until unfrozen.");
continue;
}
// Skip blocked stories (retry limit exceeded).
if is_story_blocked(project_root, stage_dir, story_id) {
continue;
}
// Skip stories whose dependencies haven't landed yet.
if has_unmet_dependencies(project_root, stage_dir, story_id) {
slog!(
"[auto-assign] Story '{story_id}' has unmet dependencies; skipping until deps are done."
);
continue;
}
// Re-acquire the lock on each iteration to see state changes
// from previous start_agent calls in the same pass.
let preferred_agent =
read_story_front_matter_agent(project_root, stage_dir, story_id);
// Check max_coders limit for the Coder stage before agent selection.
// If the pool is full, all remaining items in this stage wait.
if *stage == PipelineStage::Coder
&& let Some(max) = config.max_coders
{
let agents_lock = match self.agents.lock() {
Ok(a) => a,
Err(e) => {
slog_error!("[auto-assign] Failed to lock agents: {e}");
break;
}
};
let active = count_active_agents_for_stage(config, &agents_lock, stage);
if active >= max {
slog!(
"[auto-assign] Coder pool full ({active}/{max}); remaining items in {stage_dir}/ will wait."
);
break;
}
}
// Outcome: (already_assigned, chosen_agent, preferred_busy, stage_mismatch)
// preferred_busy=true means the story has a specific agent requested but it is
// currently occupied — the story should wait rather than fall back.
// stage_mismatch=true means the preferred agent's stage doesn't match the
// pipeline stage, so we fell back to a generic stage agent.
let (already_assigned, free_agent, preferred_busy, stage_mismatch) = {
let agents = match self.agents.lock() {
Ok(a) => a,
Err(e) => {
slog_error!("[auto-assign] Failed to lock agents: {e}");
break;
}
};
let assigned = is_story_assigned_for_stage(config, &agents, story_id, stage);
if assigned {
(true, None, false, false)
} else if let Some(ref pref) = preferred_agent {
// Story has a front-matter agent preference.
// Verify the preferred agent's stage matches the current
// pipeline stage — a coder shouldn't be assigned to QA.
let pref_stage_matches = config
.find_agent(pref)
.map(|cfg| super::super::super::agent_config_stage(cfg) == *stage)
.unwrap_or(false);
if !pref_stage_matches {
// Stage mismatch — fall back to any free agent for this stage.
let free = find_free_agent_for_stage(config, &agents, stage)
.map(|s| s.to_string());
(false, free, false, true)
} else if is_agent_free(&agents, pref) {
(false, Some(pref.clone()), false, false)
} else {
(false, None, true, false)
}
} else {
let free = find_free_agent_for_stage(config, &agents, stage)
.map(|s| s.to_string());
(false, free, false, false)
}
};
if already_assigned {
// Story already has an active agent — skip silently.
continue;
}
if preferred_busy {
// The story requests a specific agent that is currently busy.
// Do not fall back to a different agent; let this story wait.
slog!(
"[auto-assign] Preferred agent '{}' busy for '{story_id}'; story will wait.",
preferred_agent.as_deref().unwrap_or("?")
);
continue;
}
if stage_mismatch {
slog!(
"[auto-assign] Preferred agent '{}' stage mismatch for '{story_id}' in {stage_dir}/; falling back to stage-appropriate agent.",
preferred_agent.as_deref().unwrap_or("?")
);
}
match free_agent {
Some(agent_name) => {
slog!(
"[auto-assign] Assigning '{agent_name}' to '{story_id}' in {stage_dir}/"
);
if let Err(e) = self
.start_agent(project_root, story_id, Some(&agent_name), None, None)
.await
{
slog!(
"[auto-assign] Failed to start '{agent_name}' for '{story_id}': {e}"
);
}
}
None => {
// No free agents of this type — stop scanning this stage.
slog!(
"[auto-assign] All {:?} agents busy; remaining items in {stage_dir}/ will wait.",
stage
);
break;
}
}
}
}
}
}