story-kit: merge 311_story_server_enforced_retry_limits_for_failed_merge_and_empty_diff_stories

This commit is contained in:
Dave
2026-03-19 16:34:11 +00:00
parent 662e00f94a
commit 3b887e3085
9 changed files with 346 additions and 65 deletions

View File

@@ -9,6 +9,10 @@ default_coder_model = "sonnet"
# Maximum concurrent coder agents. Stories wait in 2_current/ when all slots are full. # Maximum concurrent coder agents. Stories wait in 2_current/ when all slots are full.
max_coders = 3 max_coders = 3
# Maximum retries per story per pipeline stage before marking as blocked.
# Set to 0 to disable retry limits.
max_retries = 2
[[component]] [[component]]
name = "frontend" name = "frontend"
path = "frontend" path = "frontend"

View File

@@ -131,9 +131,11 @@ pub fn move_story_to_archived(project_root: &Path, story_id: &str) -> Result<(),
std::fs::rename(&source_path, &done_path) std::fs::rename(&source_path, &done_path)
.map_err(|e| format!("Failed to move story '{story_id}' to 5_done/: {e}"))?; .map_err(|e| format!("Failed to move story '{story_id}' to 5_done/: {e}"))?;
// Strip stale merge_failure from front matter now that the story is done. // Strip stale pipeline fields from front matter now that the story is done.
if let Err(e) = clear_front_matter_field(&done_path, "merge_failure") { for field in &["merge_failure", "retry_count", "blocked"] {
slog!("[lifecycle] Warning: could not clear merge_failure from '{story_id}': {e}"); if let Err(e) = clear_front_matter_field(&done_path, field) {
slog!("[lifecycle] Warning: could not clear {field} from '{story_id}': {e}");
}
} }
let from_dir = if source_path == current_path { let from_dir = if source_path == current_path {
@@ -183,6 +185,14 @@ pub fn move_story_to_merge(project_root: &Path, story_id: &str) -> Result<(), St
} else { } else {
"work/3_qa/" "work/3_qa/"
}; };
// Reset retry count and blocked for the new stage.
if let Err(e) = clear_front_matter_field(&merge_path, "retry_count") {
slog!("[lifecycle] Warning: could not clear retry_count for '{story_id}': {e}");
}
if let Err(e) = clear_front_matter_field(&merge_path, "blocked") {
slog!("[lifecycle] Warning: could not clear blocked for '{story_id}': {e}");
}
slog!("[lifecycle] Moved '{story_id}' from {from_dir} to work/4_merge/"); slog!("[lifecycle] Moved '{story_id}' from {from_dir} to work/4_merge/");
Ok(()) Ok(())
@@ -214,6 +224,14 @@ pub fn move_story_to_qa(project_root: &Path, story_id: &str) -> Result<(), Strin
std::fs::rename(&current_path, &qa_path) std::fs::rename(&current_path, &qa_path)
.map_err(|e| format!("Failed to move '{story_id}' to 3_qa/: {e}"))?; .map_err(|e| format!("Failed to move '{story_id}' to 3_qa/: {e}"))?;
// Reset retry count for the new stage.
if let Err(e) = clear_front_matter_field(&qa_path, "retry_count") {
slog!("[lifecycle] Warning: could not clear retry_count for '{story_id}': {e}");
}
if let Err(e) = clear_front_matter_field(&qa_path, "blocked") {
slog!("[lifecycle] Warning: could not clear blocked for '{story_id}': {e}");
}
slog!("[lifecycle] Moved '{story_id}' from work/2_current/ to work/3_qa/"); slog!("[lifecycle] Moved '{story_id}' from work/2_current/ to work/3_qa/");
Ok(()) Ok(())

View File

@@ -924,6 +924,13 @@ impl AgentPool {
} }
} }
} }
} else {
// Increment retry count and check if blocked.
let story_path = project_root
.join(".story_kit/work/2_current")
.join(format!("{story_id}.md"));
if should_block_story(&story_path, config.max_retries, story_id, "coder") {
// Story has exceeded retry limit — do not restart.
} else { } else {
slog!( slog!(
"[pipeline] Coder '{agent_name}' failed gates for '{story_id}'. Restarting." "[pipeline] Coder '{agent_name}' failed gates for '{story_id}'. Restarting."
@@ -944,6 +951,7 @@ impl AgentPool {
} }
} }
} }
}
PipelineStage::Qa => { PipelineStage::Qa => {
if completion.gates_passed { if completion.gates_passed {
// Run coverage gate in the QA worktree before advancing to merge. // Run coverage gate in the QA worktree before advancing to merge.
@@ -1016,6 +1024,12 @@ impl AgentPool {
); );
} }
} }
} else {
let story_path = project_root
.join(".story_kit/work/3_qa")
.join(format!("{story_id}.md"));
if should_block_story(&story_path, config.max_retries, story_id, "qa-coverage") {
// Story has exceeded retry limit — do not restart.
} else { } else {
slog!( slog!(
"[pipeline] QA coverage gate failed for '{story_id}'. Restarting QA." "[pipeline] QA coverage gate failed for '{story_id}'. Restarting QA."
@@ -1033,6 +1047,13 @@ impl AgentPool {
slog_error!("[pipeline] Failed to restart qa for '{story_id}': {e}"); slog_error!("[pipeline] Failed to restart qa for '{story_id}': {e}");
} }
} }
}
} else {
let story_path = project_root
.join(".story_kit/work/3_qa")
.join(format!("{story_id}.md"));
if should_block_story(&story_path, config.max_retries, story_id, "qa") {
// Story has exceeded retry limit — do not restart.
} else { } else {
slog!("[pipeline] QA failed gates for '{story_id}'. Restarting."); slog!("[pipeline] QA failed gates for '{story_id}'. Restarting.");
let context = format!( let context = format!(
@@ -1049,6 +1070,7 @@ impl AgentPool {
} }
} }
} }
}
PipelineStage::Mergemaster => { PipelineStage::Mergemaster => {
// Block advancement if the mergemaster explicitly reported a failure. // Block advancement if the mergemaster explicitly reported a failure.
// The server-owned gate check runs in the feature-branch worktree (not // The server-owned gate check runs in the feature-branch worktree (not
@@ -1102,6 +1124,12 @@ impl AgentPool {
slog!( slog!(
"[pipeline] Story '{story_id}' done. Worktree preserved for inspection." "[pipeline] Story '{story_id}' done. Worktree preserved for inspection."
); );
} else {
let story_path = project_root
.join(".story_kit/work/4_merge")
.join(format!("{story_id}.md"));
if should_block_story(&story_path, config.max_retries, story_id, "mergemaster") {
// Story has exceeded retry limit — do not restart.
} else { } else {
slog!( slog!(
"[pipeline] Post-merge tests failed for '{story_id}'. Restarting mergemaster." "[pipeline] Post-merge tests failed for '{story_id}'. Restarting mergemaster."
@@ -1129,6 +1157,7 @@ impl AgentPool {
} }
} }
} }
}
// Always scan for unassigned work after any agent completes, regardless // Always scan for unassigned work after any agent completes, regardless
// of the outcome (success, failure, restart). This ensures stories that // of the outcome (success, failure, restart). This ensures stories that
@@ -1563,6 +1592,44 @@ impl AgentPool {
continue; continue;
} }
// Skip blocked stories (retry limit exceeded).
if is_story_blocked(project_root, stage_dir, story_id) {
continue;
}
// Skip stories in 4_merge/ that already have a reported merge failure.
// These need human intervention — auto-assigning a new mergemaster
// would just waste tokens on the same broken merge.
if *stage == PipelineStage::Mergemaster
&& has_merge_failure(project_root, stage_dir, story_id)
{
continue;
}
// AC6: Detect empty-diff stories in 4_merge/ before starting a
// mergemaster. If the worktree has no commits on the feature branch,
// write a merge_failure and block the story immediately.
if *stage == PipelineStage::Mergemaster
&& let Some(wt_path) = worktree::find_worktree_path(project_root, story_id)
&& !super::gates::worktree_has_committed_work(&wt_path)
{
slog_warn!(
"[auto-assign] Story '{story_id}' in 4_merge/ has no commits \
on feature branch. Writing merge_failure and blocking."
);
let story_path = project_root
.join(".story_kit/work")
.join(stage_dir)
.join(format!("{story_id}.md"));
let _ = crate::io::story_metadata::write_merge_failure(
&story_path,
"Feature branch has no code changes — the coder agent \
did not produce any commits.",
);
let _ = crate::io::story_metadata::write_blocked(&story_path);
continue;
}
// Re-acquire the lock on each iteration to see state changes // Re-acquire the lock on each iteration to see state changes
// from previous start_agent calls in the same pass. // from previous start_agent calls in the same pass.
let preferred_agent = let preferred_agent =
@@ -2195,6 +2262,80 @@ fn has_review_hold(project_root: &Path, stage_dir: &str, story_id: &str) -> bool
.unwrap_or(false) .unwrap_or(false)
} }
/// Increment retry_count and block the story if it exceeds `max_retries`.
///
/// Returns `true` if the story is now blocked (caller should NOT restart the agent).
/// Returns `false` if the story may be retried.
/// When `max_retries` is 0, retry limits are disabled.
fn should_block_story(story_path: &Path, max_retries: u32, story_id: &str, stage_label: &str) -> bool {
use crate::io::story_metadata::{increment_retry_count, write_blocked};
if max_retries == 0 {
// Retry limits disabled.
return false;
}
match increment_retry_count(story_path) {
Ok(new_count) => {
if new_count >= max_retries {
slog_warn!(
"[pipeline] Story '{story_id}' reached retry limit ({new_count}/{max_retries}) \
at {stage_label} stage. Marking as blocked."
);
if let Err(e) = write_blocked(story_path) {
slog_error!("[pipeline] Failed to write blocked flag for '{story_id}': {e}");
}
true
} else {
slog!(
"[pipeline] Story '{story_id}' retry {new_count}/{max_retries} at {stage_label} stage."
);
false
}
}
Err(e) => {
slog_error!("[pipeline] Failed to increment retry_count for '{story_id}': {e}");
false // Don't block on error — allow retry.
}
}
}
/// Return `true` if the story file has `blocked: true` in its front matter.
fn is_story_blocked(project_root: &Path, stage_dir: &str, story_id: &str) -> bool {
use crate::io::story_metadata::parse_front_matter;
let path = project_root
.join(".story_kit")
.join("work")
.join(stage_dir)
.join(format!("{story_id}.md"));
let contents = match std::fs::read_to_string(path) {
Ok(c) => c,
Err(_) => return false,
};
parse_front_matter(&contents)
.ok()
.and_then(|m| m.blocked)
.unwrap_or(false)
}
/// Return `true` if the story file has a `merge_failure` field in its front matter.
fn has_merge_failure(project_root: &Path, stage_dir: &str, story_id: &str) -> bool {
use crate::io::story_metadata::parse_front_matter;
let path = project_root
.join(".story_kit")
.join("work")
.join(stage_dir)
.join(format!("{story_id}.md"));
let contents = match std::fs::read_to_string(path) {
Ok(c) => c,
Err(_) => return false,
};
parse_front_matter(&contents)
.ok()
.and_then(|m| m.merge_failure)
.is_some()
}
/// Return `true` if `agent_name` has no active (pending/running) entry in the pool. /// Return `true` if `agent_name` has no active (pending/running) entry in the pool.
fn is_agent_free(agents: &HashMap<String, StoryAgent>, agent_name: &str) -> bool { fn is_agent_free(agents: &HashMap<String, StoryAgent>, agent_name: &str) -> bool {
!agents.values().any(|a| { !agents.values().any(|a| {
@@ -2420,6 +2561,16 @@ async fn run_server_owned_completion(
let path = wt_path; let path = wt_path;
match tokio::task::spawn_blocking(move || { match tokio::task::spawn_blocking(move || {
super::gates::check_uncommitted_changes(&path)?; super::gates::check_uncommitted_changes(&path)?;
// AC5: Fail early if the coder finished with no commits on the feature branch.
// This prevents empty-diff stories from advancing through QA to merge.
if !super::gates::worktree_has_committed_work(&path) {
return Ok((
false,
"Agent exited with no commits on the feature branch. \
The agent did not produce any code changes."
.to_string(),
));
}
super::gates::run_acceptance_gates(&path) super::gates::run_acceptance_gates(&path)
}) })
.await .await

View File

@@ -26,6 +26,10 @@ pub struct ProjectConfig {
/// coder agents at once. Stories wait in `2_current/` until a slot frees up. /// coder agents at once. Stories wait in `2_current/` until a slot frees up.
#[serde(default)] #[serde(default)]
pub max_coders: Option<usize>, pub max_coders: Option<usize>,
/// Maximum number of retries per story per pipeline stage before marking as blocked.
/// Default: 2. Set to 0 to disable retry limits.
#[serde(default = "default_max_retries")]
pub max_retries: u32,
} }
/// Configuration for the filesystem watcher's sweep behaviour. /// Configuration for the filesystem watcher's sweep behaviour.
@@ -65,6 +69,10 @@ fn default_qa() -> String {
"server".to_string() "server".to_string()
} }
fn default_max_retries() -> u32 {
2
}
#[derive(Debug, Clone, Deserialize)] #[derive(Debug, Clone, Deserialize)]
#[allow(dead_code)] #[allow(dead_code)]
pub struct ComponentConfig { pub struct ComponentConfig {
@@ -149,6 +157,8 @@ struct LegacyProjectConfig {
default_coder_model: Option<String>, default_coder_model: Option<String>,
#[serde(default)] #[serde(default)]
max_coders: Option<usize>, max_coders: Option<usize>,
#[serde(default = "default_max_retries")]
max_retries: u32,
} }
impl Default for ProjectConfig { impl Default for ProjectConfig {
@@ -173,6 +183,7 @@ impl Default for ProjectConfig {
default_qa: default_qa(), default_qa: default_qa(),
default_coder_model: None, default_coder_model: None,
max_coders: None, max_coders: None,
max_retries: default_max_retries(),
} }
} }
} }
@@ -217,6 +228,7 @@ impl ProjectConfig {
default_qa: legacy.default_qa, default_qa: legacy.default_qa,
default_coder_model: legacy.default_coder_model, default_coder_model: legacy.default_coder_model,
max_coders: legacy.max_coders, max_coders: legacy.max_coders,
max_retries: legacy.max_retries,
}; };
validate_agents(&config.agent)?; validate_agents(&config.agent)?;
return Ok(config); return Ok(config);
@@ -240,6 +252,7 @@ impl ProjectConfig {
default_qa: legacy.default_qa, default_qa: legacy.default_qa,
default_coder_model: legacy.default_coder_model, default_coder_model: legacy.default_coder_model,
max_coders: legacy.max_coders, max_coders: legacy.max_coders,
max_retries: legacy.max_retries,
}; };
validate_agents(&config.agent)?; validate_agents(&config.agent)?;
Ok(config) Ok(config)
@@ -251,6 +264,7 @@ impl ProjectConfig {
default_qa: legacy.default_qa, default_qa: legacy.default_qa,
default_coder_model: legacy.default_coder_model, default_coder_model: legacy.default_coder_model,
max_coders: legacy.max_coders, max_coders: legacy.max_coders,
max_retries: legacy.max_retries,
}) })
} }
} }

View File

@@ -1156,7 +1156,7 @@ fn tool_get_pipeline_status(ctx: &AppContext) -> Result<String, String> {
items items
.iter() .iter()
.map(|s| { .map(|s| {
json!({ let mut item = json!({
"story_id": s.story_id, "story_id": s.story_id,
"name": s.name, "name": s.name,
"stage": stage, "stage": stage,
@@ -1165,7 +1165,19 @@ fn tool_get_pipeline_status(ctx: &AppContext) -> Result<String, String> {
"model": a.model, "model": a.model,
"status": a.status, "status": a.status,
})), })),
}) });
// Include blocked/retry_count when present so callers can
// identify stories stuck in the pipeline.
if let Some(true) = s.blocked {
item["blocked"] = json!(true);
}
if let Some(rc) = s.retry_count {
item["retry_count"] = json!(rc);
}
if let Some(ref mf) = s.merge_failure {
item["merge_failure"] = json!(mf);
}
item
}) })
.collect() .collect()
} }

View File

@@ -30,6 +30,12 @@ pub struct UpcomingStory {
/// QA mode for this item: "human", "server", or "agent". /// QA mode for this item: "human", "server", or "agent".
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if = "Option::is_none")]
pub qa: Option<String>, pub qa: Option<String>,
/// Number of retries at the current pipeline stage.
#[serde(skip_serializing_if = "Option::is_none")]
pub retry_count: Option<u32>,
/// True when the story has exceeded its retry limit and will not be auto-assigned.
#[serde(skip_serializing_if = "Option::is_none")]
pub blocked: Option<bool>,
} }
pub struct StoryValidationResult { pub struct StoryValidationResult {
@@ -123,12 +129,12 @@ fn load_stage_items(
.to_string(); .to_string();
let contents = fs::read_to_string(&path) let contents = fs::read_to_string(&path)
.map_err(|e| format!("Failed to read story file {}: {e}", path.display()))?; .map_err(|e| format!("Failed to read story file {}: {e}", path.display()))?;
let (name, error, merge_failure, review_hold, qa) = match parse_front_matter(&contents) { let (name, error, merge_failure, review_hold, qa, retry_count, blocked) = match parse_front_matter(&contents) {
Ok(meta) => (meta.name, None, meta.merge_failure, meta.review_hold, meta.qa.map(|m| m.as_str().to_string())), Ok(meta) => (meta.name, None, meta.merge_failure, meta.review_hold, meta.qa.map(|m| m.as_str().to_string()), meta.retry_count, meta.blocked),
Err(e) => (None, Some(e.to_string()), None, None, None), Err(e) => (None, Some(e.to_string()), None, None, None, None, None),
}; };
let agent = agent_map.get(&story_id).cloned(); let agent = agent_map.get(&story_id).cloned();
stories.push(UpcomingStory { story_id, name, error, merge_failure, agent, review_hold, qa }); stories.push(UpcomingStory { story_id, name, error, merge_failure, agent, review_hold, qa, retry_count, blocked });
} }
stories.sort_by(|a, b| a.story_id.cmp(&b.story_id)); stories.sort_by(|a, b| a.story_id.cmp(&b.story_id));

View File

@@ -739,6 +739,8 @@ mod tests {
agent: None, agent: None,
review_hold: None, review_hold: None,
qa: None, qa: None,
retry_count: None,
blocked: None,
}; };
let resp = WsResponse::PipelineState { let resp = WsResponse::PipelineState {
backlog: vec![story], backlog: vec![story],
@@ -878,6 +880,8 @@ mod tests {
agent: None, agent: None,
review_hold: None, review_hold: None,
qa: None, qa: None,
retry_count: None,
blocked: None,
}], }],
current: vec![UpcomingStory { current: vec![UpcomingStory {
story_id: "2_story_b".to_string(), story_id: "2_story_b".to_string(),
@@ -887,6 +891,8 @@ mod tests {
agent: None, agent: None,
review_hold: None, review_hold: None,
qa: None, qa: None,
retry_count: None,
blocked: None,
}], }],
qa: vec![], qa: vec![],
merge: vec![], merge: vec![],
@@ -898,6 +904,8 @@ mod tests {
agent: None, agent: None,
review_hold: None, review_hold: None,
qa: None, qa: None,
retry_count: None,
blocked: None,
}], }],
}; };
let resp: WsResponse = state.into(); let resp: WsResponse = state.into();
@@ -1056,6 +1064,8 @@ mod tests {
}), }),
review_hold: None, review_hold: None,
qa: None, qa: None,
retry_count: None,
blocked: None,
}], }],
qa: vec![], qa: vec![],
merge: vec![], merge: vec![],

View File

@@ -49,6 +49,10 @@ pub struct StoryMetadata {
pub agent: Option<String>, pub agent: Option<String>,
pub review_hold: Option<bool>, pub review_hold: Option<bool>,
pub qa: Option<QaMode>, pub qa: Option<QaMode>,
/// Number of times this story has been retried at its current pipeline stage.
pub retry_count: Option<u32>,
/// When `true`, auto-assign will skip this story (retry limit exceeded).
pub blocked: Option<bool>,
} }
#[derive(Debug, Clone, PartialEq, Eq)] #[derive(Debug, Clone, PartialEq, Eq)]
@@ -77,6 +81,10 @@ struct FrontMatter {
qa: Option<String>, qa: Option<String>,
/// Legacy boolean field — mapped to `qa: human` (true) or ignored (false/absent). /// Legacy boolean field — mapped to `qa: human` (true) or ignored (false/absent).
manual_qa: Option<bool>, manual_qa: Option<bool>,
/// Number of times this story has been retried at its current pipeline stage.
retry_count: Option<u32>,
/// When `true`, auto-assign will skip this story (retry limit exceeded).
blocked: Option<bool>,
} }
pub fn parse_front_matter(contents: &str) -> Result<StoryMetadata, StoryMetaError> { pub fn parse_front_matter(contents: &str) -> Result<StoryMetadata, StoryMetaError> {
@@ -119,6 +127,8 @@ fn build_metadata(front: FrontMatter) -> StoryMetadata {
agent: front.agent, agent: front.agent,
review_hold: front.review_hold, review_hold: front.review_hold,
qa, qa,
retry_count: front.retry_count,
blocked: front.blocked,
} }
} }
@@ -245,6 +255,37 @@ pub fn set_front_matter_field(contents: &str, key: &str, value: &str) -> String
result result
} }
/// Increment the `retry_count` field in the story file's front matter.
///
/// Reads the current value (defaulting to 0), increments by 1, and writes back.
/// Returns the new retry count.
pub fn increment_retry_count(path: &Path) -> Result<u32, String> {
let contents =
fs::read_to_string(path).map_err(|e| format!("Failed to read story file: {e}"))?;
let current = parse_front_matter(&contents)
.ok()
.and_then(|m| m.retry_count)
.unwrap_or(0);
let new_count = current + 1;
let updated = set_front_matter_field(&contents, "retry_count", &new_count.to_string());
fs::write(path, &updated).map_err(|e| format!("Failed to write story file: {e}"))?;
Ok(new_count)
}
/// Write `blocked: true` to the YAML front matter of a story file.
///
/// Used to mark stories that have exceeded the retry limit and should not
/// be auto-assigned again.
pub fn write_blocked(path: &Path) -> Result<(), String> {
let contents =
fs::read_to_string(path).map_err(|e| format!("Failed to read story file: {e}"))?;
let updated = set_front_matter_field(&contents, "blocked", "true");
fs::write(path, &updated).map_err(|e| format!("Failed to write story file: {e}"))?;
Ok(())
}
/// Append rejection notes to a story file body. /// Append rejection notes to a story file body.
/// ///
/// Adds a `## QA Rejection Notes` section at the end of the file so the coder /// Adds a `## QA Rejection Notes` section at the end of the file so the coder

View File

@@ -219,6 +219,19 @@ pub async fn remove_worktree_by_story_id(
} }
/// List all worktrees under `{project_root}/.story_kit/worktrees/`. /// List all worktrees under `{project_root}/.story_kit/worktrees/`.
/// Find the worktree path for a given story ID, if it exists.
pub fn find_worktree_path(project_root: &Path, story_id: &str) -> Option<PathBuf> {
let wt_path = project_root
.join(".story_kit")
.join("worktrees")
.join(story_id);
if wt_path.is_dir() {
Some(wt_path)
} else {
None
}
}
pub fn list_worktrees(project_root: &Path) -> Result<Vec<WorktreeListEntry>, String> { pub fn list_worktrees(project_root: &Path) -> Result<Vec<WorktreeListEntry>, String> {
let worktrees_dir = project_root.join(".story_kit").join("worktrees"); let worktrees_dir = project_root.join(".story_kit").join("worktrees");
if !worktrees_dir.exists() { if !worktrees_dir.exists() {
@@ -510,6 +523,7 @@ mod tests {
default_qa: "server".to_string(), default_qa: "server".to_string(),
default_coder_model: None, default_coder_model: None,
max_coders: None, max_coders: None,
max_retries: 2,
}; };
// Should complete without panic // Should complete without panic
run_setup_commands(tmp.path(), &config).await; run_setup_commands(tmp.path(), &config).await;
@@ -530,6 +544,7 @@ mod tests {
default_qa: "server".to_string(), default_qa: "server".to_string(),
default_coder_model: None, default_coder_model: None,
max_coders: None, max_coders: None,
max_retries: 2,
}; };
// Should complete without panic // Should complete without panic
run_setup_commands(tmp.path(), &config).await; run_setup_commands(tmp.path(), &config).await;
@@ -550,6 +565,7 @@ mod tests {
default_qa: "server".to_string(), default_qa: "server".to_string(),
default_coder_model: None, default_coder_model: None,
max_coders: None, max_coders: None,
max_retries: 2,
}; };
// Setup command failures are non-fatal — should not panic or propagate // Setup command failures are non-fatal — should not panic or propagate
run_setup_commands(tmp.path(), &config).await; run_setup_commands(tmp.path(), &config).await;
@@ -570,6 +586,7 @@ mod tests {
default_qa: "server".to_string(), default_qa: "server".to_string(),
default_coder_model: None, default_coder_model: None,
max_coders: None, max_coders: None,
max_retries: 2,
}; };
// Teardown failures are best-effort — should not propagate // Teardown failures are best-effort — should not propagate
assert!(run_teardown_commands(tmp.path(), &config).await.is_ok()); assert!(run_teardown_commands(tmp.path(), &config).await.is_ok());
@@ -589,6 +606,7 @@ mod tests {
default_qa: "server".to_string(), default_qa: "server".to_string(),
default_coder_model: None, default_coder_model: None,
max_coders: None, max_coders: None,
max_retries: 2,
}; };
let info = create_worktree(&project_root, "42_fresh_test", &config, 3001) let info = create_worktree(&project_root, "42_fresh_test", &config, 3001)
.await .await
@@ -615,6 +633,7 @@ mod tests {
default_qa: "server".to_string(), default_qa: "server".to_string(),
default_coder_model: None, default_coder_model: None,
max_coders: None, max_coders: None,
max_retries: 2,
}; };
// First creation // First creation
let _info1 = create_worktree(&project_root, "43_reuse_test", &config, 3001) let _info1 = create_worktree(&project_root, "43_reuse_test", &config, 3001)
@@ -657,6 +676,7 @@ mod tests {
default_qa: "server".to_string(), default_qa: "server".to_string(),
default_coder_model: None, default_coder_model: None,
max_coders: None, max_coders: None,
max_retries: 2,
}; };
let result = remove_worktree_by_story_id(tmp.path(), "99_nonexistent", &config).await; let result = remove_worktree_by_story_id(tmp.path(), "99_nonexistent", &config).await;
@@ -682,6 +702,7 @@ mod tests {
default_qa: "server".to_string(), default_qa: "server".to_string(),
default_coder_model: None, default_coder_model: None,
max_coders: None, max_coders: None,
max_retries: 2,
}; };
create_worktree(&project_root, "88_remove_by_id", &config, 3001) create_worktree(&project_root, "88_remove_by_id", &config, 3001)
.await .await
@@ -738,6 +759,7 @@ mod tests {
default_qa: "server".to_string(), default_qa: "server".to_string(),
default_coder_model: None, default_coder_model: None,
max_coders: None, max_coders: None,
max_retries: 2,
}; };
// Even though setup commands fail, create_worktree must succeed // Even though setup commands fail, create_worktree must succeed
// so the agent can start and fix the problem itself. // so the agent can start and fix the problem itself.
@@ -766,6 +788,7 @@ mod tests {
default_qa: "server".to_string(), default_qa: "server".to_string(),
default_coder_model: None, default_coder_model: None,
max_coders: None, max_coders: None,
max_retries: 2,
}; };
// First creation — no setup commands, should succeed // First creation — no setup commands, should succeed
create_worktree(&project_root, "173_reuse_fail", &empty_config, 3001) create_worktree(&project_root, "173_reuse_fail", &empty_config, 3001)
@@ -784,6 +807,7 @@ mod tests {
default_qa: "server".to_string(), default_qa: "server".to_string(),
default_coder_model: None, default_coder_model: None,
max_coders: None, max_coders: None,
max_retries: 2,
}; };
// Second call — worktree exists, setup commands fail, must still succeed // Second call — worktree exists, setup commands fail, must still succeed
let result = let result =
@@ -809,6 +833,7 @@ mod tests {
default_qa: "server".to_string(), default_qa: "server".to_string(),
default_coder_model: None, default_coder_model: None,
max_coders: None, max_coders: None,
max_retries: 2,
}; };
let info = create_worktree(&project_root, "77_remove_async", &config, 3001) let info = create_worktree(&project_root, "77_remove_async", &config, 3001)
.await .await