story-kit: merge 311_story_server_enforced_retry_limits_for_failed_merge_and_empty_diff_stories
This commit is contained in:
@@ -131,9 +131,11 @@ pub fn move_story_to_archived(project_root: &Path, story_id: &str) -> Result<(),
|
||||
std::fs::rename(&source_path, &done_path)
|
||||
.map_err(|e| format!("Failed to move story '{story_id}' to 5_done/: {e}"))?;
|
||||
|
||||
// Strip stale merge_failure from front matter now that the story is done.
|
||||
if let Err(e) = clear_front_matter_field(&done_path, "merge_failure") {
|
||||
slog!("[lifecycle] Warning: could not clear merge_failure from '{story_id}': {e}");
|
||||
// Strip stale pipeline fields from front matter now that the story is done.
|
||||
for field in &["merge_failure", "retry_count", "blocked"] {
|
||||
if let Err(e) = clear_front_matter_field(&done_path, field) {
|
||||
slog!("[lifecycle] Warning: could not clear {field} from '{story_id}': {e}");
|
||||
}
|
||||
}
|
||||
|
||||
let from_dir = if source_path == current_path {
|
||||
@@ -183,6 +185,14 @@ pub fn move_story_to_merge(project_root: &Path, story_id: &str) -> Result<(), St
|
||||
} else {
|
||||
"work/3_qa/"
|
||||
};
|
||||
// Reset retry count and blocked for the new stage.
|
||||
if let Err(e) = clear_front_matter_field(&merge_path, "retry_count") {
|
||||
slog!("[lifecycle] Warning: could not clear retry_count for '{story_id}': {e}");
|
||||
}
|
||||
if let Err(e) = clear_front_matter_field(&merge_path, "blocked") {
|
||||
slog!("[lifecycle] Warning: could not clear blocked for '{story_id}': {e}");
|
||||
}
|
||||
|
||||
slog!("[lifecycle] Moved '{story_id}' from {from_dir} to work/4_merge/");
|
||||
|
||||
Ok(())
|
||||
@@ -214,6 +224,14 @@ pub fn move_story_to_qa(project_root: &Path, story_id: &str) -> Result<(), Strin
|
||||
std::fs::rename(¤t_path, &qa_path)
|
||||
.map_err(|e| format!("Failed to move '{story_id}' to 3_qa/: {e}"))?;
|
||||
|
||||
// Reset retry count for the new stage.
|
||||
if let Err(e) = clear_front_matter_field(&qa_path, "retry_count") {
|
||||
slog!("[lifecycle] Warning: could not clear retry_count for '{story_id}': {e}");
|
||||
}
|
||||
if let Err(e) = clear_front_matter_field(&qa_path, "blocked") {
|
||||
slog!("[lifecycle] Warning: could not clear blocked for '{story_id}': {e}");
|
||||
}
|
||||
|
||||
slog!("[lifecycle] Moved '{story_id}' from work/2_current/ to work/3_qa/");
|
||||
|
||||
Ok(())
|
||||
|
||||
@@ -925,22 +925,30 @@ impl AgentPool {
|
||||
}
|
||||
}
|
||||
} else {
|
||||
slog!(
|
||||
"[pipeline] Coder '{agent_name}' failed gates for '{story_id}'. Restarting."
|
||||
);
|
||||
let context = format!(
|
||||
"\n\n---\n## Previous Attempt Failed\n\
|
||||
The acceptance gates failed with the following output:\n{}\n\n\
|
||||
Please review the failures above, fix the issues, and try again.",
|
||||
completion.gate_output
|
||||
);
|
||||
if let Err(e) = self
|
||||
.start_agent(&project_root, story_id, Some(agent_name), Some(&context))
|
||||
.await
|
||||
{
|
||||
slog_error!(
|
||||
"[pipeline] Failed to restart coder '{agent_name}' for '{story_id}': {e}"
|
||||
// Increment retry count and check if blocked.
|
||||
let story_path = project_root
|
||||
.join(".story_kit/work/2_current")
|
||||
.join(format!("{story_id}.md"));
|
||||
if should_block_story(&story_path, config.max_retries, story_id, "coder") {
|
||||
// Story has exceeded retry limit — do not restart.
|
||||
} else {
|
||||
slog!(
|
||||
"[pipeline] Coder '{agent_name}' failed gates for '{story_id}'. Restarting."
|
||||
);
|
||||
let context = format!(
|
||||
"\n\n---\n## Previous Attempt Failed\n\
|
||||
The acceptance gates failed with the following output:\n{}\n\n\
|
||||
Please review the failures above, fix the issues, and try again.",
|
||||
completion.gate_output
|
||||
);
|
||||
if let Err(e) = self
|
||||
.start_agent(&project_root, story_id, Some(agent_name), Some(&context))
|
||||
.await
|
||||
{
|
||||
slog_error!(
|
||||
"[pipeline] Failed to restart coder '{agent_name}' for '{story_id}': {e}"
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1017,14 +1025,42 @@ impl AgentPool {
|
||||
}
|
||||
}
|
||||
} else {
|
||||
slog!(
|
||||
"[pipeline] QA coverage gate failed for '{story_id}'. Restarting QA."
|
||||
);
|
||||
let story_path = project_root
|
||||
.join(".story_kit/work/3_qa")
|
||||
.join(format!("{story_id}.md"));
|
||||
if should_block_story(&story_path, config.max_retries, story_id, "qa-coverage") {
|
||||
// Story has exceeded retry limit — do not restart.
|
||||
} else {
|
||||
slog!(
|
||||
"[pipeline] QA coverage gate failed for '{story_id}'. Restarting QA."
|
||||
);
|
||||
let context = format!(
|
||||
"\n\n---\n## Coverage Gate Failed\n\
|
||||
The coverage gate (script/test_coverage) failed with the following output:\n{}\n\n\
|
||||
Please improve test coverage until the coverage gate passes.",
|
||||
coverage_output
|
||||
);
|
||||
if let Err(e) = self
|
||||
.start_agent(&project_root, story_id, Some("qa"), Some(&context))
|
||||
.await
|
||||
{
|
||||
slog_error!("[pipeline] Failed to restart qa for '{story_id}': {e}");
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
let story_path = project_root
|
||||
.join(".story_kit/work/3_qa")
|
||||
.join(format!("{story_id}.md"));
|
||||
if should_block_story(&story_path, config.max_retries, story_id, "qa") {
|
||||
// Story has exceeded retry limit — do not restart.
|
||||
} else {
|
||||
slog!("[pipeline] QA failed gates for '{story_id}'. Restarting.");
|
||||
let context = format!(
|
||||
"\n\n---\n## Coverage Gate Failed\n\
|
||||
The coverage gate (script/test_coverage) failed with the following output:\n{}\n\n\
|
||||
Please improve test coverage until the coverage gate passes.",
|
||||
coverage_output
|
||||
"\n\n---\n## Previous QA Attempt Failed\n\
|
||||
The acceptance gates failed with the following output:\n{}\n\n\
|
||||
Please re-run and fix the issues.",
|
||||
completion.gate_output
|
||||
);
|
||||
if let Err(e) = self
|
||||
.start_agent(&project_root, story_id, Some("qa"), Some(&context))
|
||||
@@ -1033,20 +1069,6 @@ impl AgentPool {
|
||||
slog_error!("[pipeline] Failed to restart qa for '{story_id}': {e}");
|
||||
}
|
||||
}
|
||||
} else {
|
||||
slog!("[pipeline] QA failed gates for '{story_id}'. Restarting.");
|
||||
let context = format!(
|
||||
"\n\n---\n## Previous QA Attempt Failed\n\
|
||||
The acceptance gates failed with the following output:\n{}\n\n\
|
||||
Please re-run and fix the issues.",
|
||||
completion.gate_output
|
||||
);
|
||||
if let Err(e) = self
|
||||
.start_agent(&project_root, story_id, Some("qa"), Some(&context))
|
||||
.await
|
||||
{
|
||||
slog_error!("[pipeline] Failed to restart qa for '{story_id}': {e}");
|
||||
}
|
||||
}
|
||||
}
|
||||
PipelineStage::Mergemaster => {
|
||||
@@ -1103,27 +1125,34 @@ impl AgentPool {
|
||||
"[pipeline] Story '{story_id}' done. Worktree preserved for inspection."
|
||||
);
|
||||
} else {
|
||||
slog!(
|
||||
"[pipeline] Post-merge tests failed for '{story_id}'. Restarting mergemaster."
|
||||
);
|
||||
let context = format!(
|
||||
"\n\n---\n## Post-Merge Test Failed\n\
|
||||
The tests on master failed with the following output:\n{}\n\n\
|
||||
Please investigate and resolve the failures, then call merge_agent_work again.",
|
||||
output
|
||||
);
|
||||
if let Err(e) = self
|
||||
.start_agent(
|
||||
&project_root,
|
||||
story_id,
|
||||
Some("mergemaster"),
|
||||
Some(&context),
|
||||
)
|
||||
.await
|
||||
{
|
||||
slog_error!(
|
||||
"[pipeline] Failed to restart mergemaster for '{story_id}': {e}"
|
||||
let story_path = project_root
|
||||
.join(".story_kit/work/4_merge")
|
||||
.join(format!("{story_id}.md"));
|
||||
if should_block_story(&story_path, config.max_retries, story_id, "mergemaster") {
|
||||
// Story has exceeded retry limit — do not restart.
|
||||
} else {
|
||||
slog!(
|
||||
"[pipeline] Post-merge tests failed for '{story_id}'. Restarting mergemaster."
|
||||
);
|
||||
let context = format!(
|
||||
"\n\n---\n## Post-Merge Test Failed\n\
|
||||
The tests on master failed with the following output:\n{}\n\n\
|
||||
Please investigate and resolve the failures, then call merge_agent_work again.",
|
||||
output
|
||||
);
|
||||
if let Err(e) = self
|
||||
.start_agent(
|
||||
&project_root,
|
||||
story_id,
|
||||
Some("mergemaster"),
|
||||
Some(&context),
|
||||
)
|
||||
.await
|
||||
{
|
||||
slog_error!(
|
||||
"[pipeline] Failed to restart mergemaster for '{story_id}': {e}"
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1563,6 +1592,44 @@ impl AgentPool {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Skip blocked stories (retry limit exceeded).
|
||||
if is_story_blocked(project_root, stage_dir, story_id) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Skip stories in 4_merge/ that already have a reported merge failure.
|
||||
// These need human intervention — auto-assigning a new mergemaster
|
||||
// would just waste tokens on the same broken merge.
|
||||
if *stage == PipelineStage::Mergemaster
|
||||
&& has_merge_failure(project_root, stage_dir, story_id)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
// AC6: Detect empty-diff stories in 4_merge/ before starting a
|
||||
// mergemaster. If the worktree has no commits on the feature branch,
|
||||
// write a merge_failure and block the story immediately.
|
||||
if *stage == PipelineStage::Mergemaster
|
||||
&& let Some(wt_path) = worktree::find_worktree_path(project_root, story_id)
|
||||
&& !super::gates::worktree_has_committed_work(&wt_path)
|
||||
{
|
||||
slog_warn!(
|
||||
"[auto-assign] Story '{story_id}' in 4_merge/ has no commits \
|
||||
on feature branch. Writing merge_failure and blocking."
|
||||
);
|
||||
let story_path = project_root
|
||||
.join(".story_kit/work")
|
||||
.join(stage_dir)
|
||||
.join(format!("{story_id}.md"));
|
||||
let _ = crate::io::story_metadata::write_merge_failure(
|
||||
&story_path,
|
||||
"Feature branch has no code changes — the coder agent \
|
||||
did not produce any commits.",
|
||||
);
|
||||
let _ = crate::io::story_metadata::write_blocked(&story_path);
|
||||
continue;
|
||||
}
|
||||
|
||||
// Re-acquire the lock on each iteration to see state changes
|
||||
// from previous start_agent calls in the same pass.
|
||||
let preferred_agent =
|
||||
@@ -2195,6 +2262,80 @@ fn has_review_hold(project_root: &Path, stage_dir: &str, story_id: &str) -> bool
|
||||
.unwrap_or(false)
|
||||
}
|
||||
|
||||
/// Increment retry_count and block the story if it exceeds `max_retries`.
|
||||
///
|
||||
/// Returns `true` if the story is now blocked (caller should NOT restart the agent).
|
||||
/// Returns `false` if the story may be retried.
|
||||
/// When `max_retries` is 0, retry limits are disabled.
|
||||
fn should_block_story(story_path: &Path, max_retries: u32, story_id: &str, stage_label: &str) -> bool {
|
||||
use crate::io::story_metadata::{increment_retry_count, write_blocked};
|
||||
|
||||
if max_retries == 0 {
|
||||
// Retry limits disabled.
|
||||
return false;
|
||||
}
|
||||
|
||||
match increment_retry_count(story_path) {
|
||||
Ok(new_count) => {
|
||||
if new_count >= max_retries {
|
||||
slog_warn!(
|
||||
"[pipeline] Story '{story_id}' reached retry limit ({new_count}/{max_retries}) \
|
||||
at {stage_label} stage. Marking as blocked."
|
||||
);
|
||||
if let Err(e) = write_blocked(story_path) {
|
||||
slog_error!("[pipeline] Failed to write blocked flag for '{story_id}': {e}");
|
||||
}
|
||||
true
|
||||
} else {
|
||||
slog!(
|
||||
"[pipeline] Story '{story_id}' retry {new_count}/{max_retries} at {stage_label} stage."
|
||||
);
|
||||
false
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
slog_error!("[pipeline] Failed to increment retry_count for '{story_id}': {e}");
|
||||
false // Don't block on error — allow retry.
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Return `true` if the story file has `blocked: true` in its front matter.
|
||||
fn is_story_blocked(project_root: &Path, stage_dir: &str, story_id: &str) -> bool {
|
||||
use crate::io::story_metadata::parse_front_matter;
|
||||
let path = project_root
|
||||
.join(".story_kit")
|
||||
.join("work")
|
||||
.join(stage_dir)
|
||||
.join(format!("{story_id}.md"));
|
||||
let contents = match std::fs::read_to_string(path) {
|
||||
Ok(c) => c,
|
||||
Err(_) => return false,
|
||||
};
|
||||
parse_front_matter(&contents)
|
||||
.ok()
|
||||
.and_then(|m| m.blocked)
|
||||
.unwrap_or(false)
|
||||
}
|
||||
|
||||
/// Return `true` if the story file has a `merge_failure` field in its front matter.
|
||||
fn has_merge_failure(project_root: &Path, stage_dir: &str, story_id: &str) -> bool {
|
||||
use crate::io::story_metadata::parse_front_matter;
|
||||
let path = project_root
|
||||
.join(".story_kit")
|
||||
.join("work")
|
||||
.join(stage_dir)
|
||||
.join(format!("{story_id}.md"));
|
||||
let contents = match std::fs::read_to_string(path) {
|
||||
Ok(c) => c,
|
||||
Err(_) => return false,
|
||||
};
|
||||
parse_front_matter(&contents)
|
||||
.ok()
|
||||
.and_then(|m| m.merge_failure)
|
||||
.is_some()
|
||||
}
|
||||
|
||||
/// Return `true` if `agent_name` has no active (pending/running) entry in the pool.
|
||||
fn is_agent_free(agents: &HashMap<String, StoryAgent>, agent_name: &str) -> bool {
|
||||
!agents.values().any(|a| {
|
||||
@@ -2420,6 +2561,16 @@ async fn run_server_owned_completion(
|
||||
let path = wt_path;
|
||||
match tokio::task::spawn_blocking(move || {
|
||||
super::gates::check_uncommitted_changes(&path)?;
|
||||
// AC5: Fail early if the coder finished with no commits on the feature branch.
|
||||
// This prevents empty-diff stories from advancing through QA to merge.
|
||||
if !super::gates::worktree_has_committed_work(&path) {
|
||||
return Ok((
|
||||
false,
|
||||
"Agent exited with no commits on the feature branch. \
|
||||
The agent did not produce any code changes."
|
||||
.to_string(),
|
||||
));
|
||||
}
|
||||
super::gates::run_acceptance_gates(&path)
|
||||
})
|
||||
.await
|
||||
|
||||
@@ -26,6 +26,10 @@ pub struct ProjectConfig {
|
||||
/// coder agents at once. Stories wait in `2_current/` until a slot frees up.
|
||||
#[serde(default)]
|
||||
pub max_coders: Option<usize>,
|
||||
/// Maximum number of retries per story per pipeline stage before marking as blocked.
|
||||
/// Default: 2. Set to 0 to disable retry limits.
|
||||
#[serde(default = "default_max_retries")]
|
||||
pub max_retries: u32,
|
||||
}
|
||||
|
||||
/// Configuration for the filesystem watcher's sweep behaviour.
|
||||
@@ -65,6 +69,10 @@ fn default_qa() -> String {
|
||||
"server".to_string()
|
||||
}
|
||||
|
||||
fn default_max_retries() -> u32 {
|
||||
2
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Deserialize)]
|
||||
#[allow(dead_code)]
|
||||
pub struct ComponentConfig {
|
||||
@@ -149,6 +157,8 @@ struct LegacyProjectConfig {
|
||||
default_coder_model: Option<String>,
|
||||
#[serde(default)]
|
||||
max_coders: Option<usize>,
|
||||
#[serde(default = "default_max_retries")]
|
||||
max_retries: u32,
|
||||
}
|
||||
|
||||
impl Default for ProjectConfig {
|
||||
@@ -173,6 +183,7 @@ impl Default for ProjectConfig {
|
||||
default_qa: default_qa(),
|
||||
default_coder_model: None,
|
||||
max_coders: None,
|
||||
max_retries: default_max_retries(),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -217,6 +228,7 @@ impl ProjectConfig {
|
||||
default_qa: legacy.default_qa,
|
||||
default_coder_model: legacy.default_coder_model,
|
||||
max_coders: legacy.max_coders,
|
||||
max_retries: legacy.max_retries,
|
||||
};
|
||||
validate_agents(&config.agent)?;
|
||||
return Ok(config);
|
||||
@@ -240,6 +252,7 @@ impl ProjectConfig {
|
||||
default_qa: legacy.default_qa,
|
||||
default_coder_model: legacy.default_coder_model,
|
||||
max_coders: legacy.max_coders,
|
||||
max_retries: legacy.max_retries,
|
||||
};
|
||||
validate_agents(&config.agent)?;
|
||||
Ok(config)
|
||||
@@ -251,6 +264,7 @@ impl ProjectConfig {
|
||||
default_qa: legacy.default_qa,
|
||||
default_coder_model: legacy.default_coder_model,
|
||||
max_coders: legacy.max_coders,
|
||||
max_retries: legacy.max_retries,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1156,7 +1156,7 @@ fn tool_get_pipeline_status(ctx: &AppContext) -> Result<String, String> {
|
||||
items
|
||||
.iter()
|
||||
.map(|s| {
|
||||
json!({
|
||||
let mut item = json!({
|
||||
"story_id": s.story_id,
|
||||
"name": s.name,
|
||||
"stage": stage,
|
||||
@@ -1165,7 +1165,19 @@ fn tool_get_pipeline_status(ctx: &AppContext) -> Result<String, String> {
|
||||
"model": a.model,
|
||||
"status": a.status,
|
||||
})),
|
||||
})
|
||||
});
|
||||
// Include blocked/retry_count when present so callers can
|
||||
// identify stories stuck in the pipeline.
|
||||
if let Some(true) = s.blocked {
|
||||
item["blocked"] = json!(true);
|
||||
}
|
||||
if let Some(rc) = s.retry_count {
|
||||
item["retry_count"] = json!(rc);
|
||||
}
|
||||
if let Some(ref mf) = s.merge_failure {
|
||||
item["merge_failure"] = json!(mf);
|
||||
}
|
||||
item
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
@@ -30,6 +30,12 @@ pub struct UpcomingStory {
|
||||
/// QA mode for this item: "human", "server", or "agent".
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub qa: Option<String>,
|
||||
/// Number of retries at the current pipeline stage.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub retry_count: Option<u32>,
|
||||
/// True when the story has exceeded its retry limit and will not be auto-assigned.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub blocked: Option<bool>,
|
||||
}
|
||||
|
||||
pub struct StoryValidationResult {
|
||||
@@ -123,12 +129,12 @@ fn load_stage_items(
|
||||
.to_string();
|
||||
let contents = fs::read_to_string(&path)
|
||||
.map_err(|e| format!("Failed to read story file {}: {e}", path.display()))?;
|
||||
let (name, error, merge_failure, review_hold, qa) = match parse_front_matter(&contents) {
|
||||
Ok(meta) => (meta.name, None, meta.merge_failure, meta.review_hold, meta.qa.map(|m| m.as_str().to_string())),
|
||||
Err(e) => (None, Some(e.to_string()), None, None, None),
|
||||
let (name, error, merge_failure, review_hold, qa, retry_count, blocked) = match parse_front_matter(&contents) {
|
||||
Ok(meta) => (meta.name, None, meta.merge_failure, meta.review_hold, meta.qa.map(|m| m.as_str().to_string()), meta.retry_count, meta.blocked),
|
||||
Err(e) => (None, Some(e.to_string()), None, None, None, None, None),
|
||||
};
|
||||
let agent = agent_map.get(&story_id).cloned();
|
||||
stories.push(UpcomingStory { story_id, name, error, merge_failure, agent, review_hold, qa });
|
||||
stories.push(UpcomingStory { story_id, name, error, merge_failure, agent, review_hold, qa, retry_count, blocked });
|
||||
}
|
||||
|
||||
stories.sort_by(|a, b| a.story_id.cmp(&b.story_id));
|
||||
|
||||
@@ -739,6 +739,8 @@ mod tests {
|
||||
agent: None,
|
||||
review_hold: None,
|
||||
qa: None,
|
||||
retry_count: None,
|
||||
blocked: None,
|
||||
};
|
||||
let resp = WsResponse::PipelineState {
|
||||
backlog: vec![story],
|
||||
@@ -878,6 +880,8 @@ mod tests {
|
||||
agent: None,
|
||||
review_hold: None,
|
||||
qa: None,
|
||||
retry_count: None,
|
||||
blocked: None,
|
||||
}],
|
||||
current: vec![UpcomingStory {
|
||||
story_id: "2_story_b".to_string(),
|
||||
@@ -887,6 +891,8 @@ mod tests {
|
||||
agent: None,
|
||||
review_hold: None,
|
||||
qa: None,
|
||||
retry_count: None,
|
||||
blocked: None,
|
||||
}],
|
||||
qa: vec![],
|
||||
merge: vec![],
|
||||
@@ -898,6 +904,8 @@ mod tests {
|
||||
agent: None,
|
||||
review_hold: None,
|
||||
qa: None,
|
||||
retry_count: None,
|
||||
blocked: None,
|
||||
}],
|
||||
};
|
||||
let resp: WsResponse = state.into();
|
||||
@@ -1056,6 +1064,8 @@ mod tests {
|
||||
}),
|
||||
review_hold: None,
|
||||
qa: None,
|
||||
retry_count: None,
|
||||
blocked: None,
|
||||
}],
|
||||
qa: vec![],
|
||||
merge: vec![],
|
||||
|
||||
@@ -49,6 +49,10 @@ pub struct StoryMetadata {
|
||||
pub agent: Option<String>,
|
||||
pub review_hold: Option<bool>,
|
||||
pub qa: Option<QaMode>,
|
||||
/// Number of times this story has been retried at its current pipeline stage.
|
||||
pub retry_count: Option<u32>,
|
||||
/// When `true`, auto-assign will skip this story (retry limit exceeded).
|
||||
pub blocked: Option<bool>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
@@ -77,6 +81,10 @@ struct FrontMatter {
|
||||
qa: Option<String>,
|
||||
/// Legacy boolean field — mapped to `qa: human` (true) or ignored (false/absent).
|
||||
manual_qa: Option<bool>,
|
||||
/// Number of times this story has been retried at its current pipeline stage.
|
||||
retry_count: Option<u32>,
|
||||
/// When `true`, auto-assign will skip this story (retry limit exceeded).
|
||||
blocked: Option<bool>,
|
||||
}
|
||||
|
||||
pub fn parse_front_matter(contents: &str) -> Result<StoryMetadata, StoryMetaError> {
|
||||
@@ -119,6 +127,8 @@ fn build_metadata(front: FrontMatter) -> StoryMetadata {
|
||||
agent: front.agent,
|
||||
review_hold: front.review_hold,
|
||||
qa,
|
||||
retry_count: front.retry_count,
|
||||
blocked: front.blocked,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -245,6 +255,37 @@ pub fn set_front_matter_field(contents: &str, key: &str, value: &str) -> String
|
||||
result
|
||||
}
|
||||
|
||||
/// Increment the `retry_count` field in the story file's front matter.
|
||||
///
|
||||
/// Reads the current value (defaulting to 0), increments by 1, and writes back.
|
||||
/// Returns the new retry count.
|
||||
pub fn increment_retry_count(path: &Path) -> Result<u32, String> {
|
||||
let contents =
|
||||
fs::read_to_string(path).map_err(|e| format!("Failed to read story file: {e}"))?;
|
||||
|
||||
let current = parse_front_matter(&contents)
|
||||
.ok()
|
||||
.and_then(|m| m.retry_count)
|
||||
.unwrap_or(0);
|
||||
let new_count = current + 1;
|
||||
|
||||
let updated = set_front_matter_field(&contents, "retry_count", &new_count.to_string());
|
||||
fs::write(path, &updated).map_err(|e| format!("Failed to write story file: {e}"))?;
|
||||
Ok(new_count)
|
||||
}
|
||||
|
||||
/// Write `blocked: true` to the YAML front matter of a story file.
|
||||
///
|
||||
/// Used to mark stories that have exceeded the retry limit and should not
|
||||
/// be auto-assigned again.
|
||||
pub fn write_blocked(path: &Path) -> Result<(), String> {
|
||||
let contents =
|
||||
fs::read_to_string(path).map_err(|e| format!("Failed to read story file: {e}"))?;
|
||||
let updated = set_front_matter_field(&contents, "blocked", "true");
|
||||
fs::write(path, &updated).map_err(|e| format!("Failed to write story file: {e}"))?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Append rejection notes to a story file body.
|
||||
///
|
||||
/// Adds a `## QA Rejection Notes` section at the end of the file so the coder
|
||||
|
||||
@@ -219,6 +219,19 @@ pub async fn remove_worktree_by_story_id(
|
||||
}
|
||||
|
||||
/// List all worktrees under `{project_root}/.story_kit/worktrees/`.
|
||||
/// Find the worktree path for a given story ID, if it exists.
|
||||
pub fn find_worktree_path(project_root: &Path, story_id: &str) -> Option<PathBuf> {
|
||||
let wt_path = project_root
|
||||
.join(".story_kit")
|
||||
.join("worktrees")
|
||||
.join(story_id);
|
||||
if wt_path.is_dir() {
|
||||
Some(wt_path)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
pub fn list_worktrees(project_root: &Path) -> Result<Vec<WorktreeListEntry>, String> {
|
||||
let worktrees_dir = project_root.join(".story_kit").join("worktrees");
|
||||
if !worktrees_dir.exists() {
|
||||
@@ -510,6 +523,7 @@ mod tests {
|
||||
default_qa: "server".to_string(),
|
||||
default_coder_model: None,
|
||||
max_coders: None,
|
||||
max_retries: 2,
|
||||
};
|
||||
// Should complete without panic
|
||||
run_setup_commands(tmp.path(), &config).await;
|
||||
@@ -530,6 +544,7 @@ mod tests {
|
||||
default_qa: "server".to_string(),
|
||||
default_coder_model: None,
|
||||
max_coders: None,
|
||||
max_retries: 2,
|
||||
};
|
||||
// Should complete without panic
|
||||
run_setup_commands(tmp.path(), &config).await;
|
||||
@@ -550,6 +565,7 @@ mod tests {
|
||||
default_qa: "server".to_string(),
|
||||
default_coder_model: None,
|
||||
max_coders: None,
|
||||
max_retries: 2,
|
||||
};
|
||||
// Setup command failures are non-fatal — should not panic or propagate
|
||||
run_setup_commands(tmp.path(), &config).await;
|
||||
@@ -570,6 +586,7 @@ mod tests {
|
||||
default_qa: "server".to_string(),
|
||||
default_coder_model: None,
|
||||
max_coders: None,
|
||||
max_retries: 2,
|
||||
};
|
||||
// Teardown failures are best-effort — should not propagate
|
||||
assert!(run_teardown_commands(tmp.path(), &config).await.is_ok());
|
||||
@@ -589,6 +606,7 @@ mod tests {
|
||||
default_qa: "server".to_string(),
|
||||
default_coder_model: None,
|
||||
max_coders: None,
|
||||
max_retries: 2,
|
||||
};
|
||||
let info = create_worktree(&project_root, "42_fresh_test", &config, 3001)
|
||||
.await
|
||||
@@ -615,6 +633,7 @@ mod tests {
|
||||
default_qa: "server".to_string(),
|
||||
default_coder_model: None,
|
||||
max_coders: None,
|
||||
max_retries: 2,
|
||||
};
|
||||
// First creation
|
||||
let _info1 = create_worktree(&project_root, "43_reuse_test", &config, 3001)
|
||||
@@ -657,6 +676,7 @@ mod tests {
|
||||
default_qa: "server".to_string(),
|
||||
default_coder_model: None,
|
||||
max_coders: None,
|
||||
max_retries: 2,
|
||||
};
|
||||
|
||||
let result = remove_worktree_by_story_id(tmp.path(), "99_nonexistent", &config).await;
|
||||
@@ -682,6 +702,7 @@ mod tests {
|
||||
default_qa: "server".to_string(),
|
||||
default_coder_model: None,
|
||||
max_coders: None,
|
||||
max_retries: 2,
|
||||
};
|
||||
create_worktree(&project_root, "88_remove_by_id", &config, 3001)
|
||||
.await
|
||||
@@ -738,6 +759,7 @@ mod tests {
|
||||
default_qa: "server".to_string(),
|
||||
default_coder_model: None,
|
||||
max_coders: None,
|
||||
max_retries: 2,
|
||||
};
|
||||
// Even though setup commands fail, create_worktree must succeed
|
||||
// so the agent can start and fix the problem itself.
|
||||
@@ -766,6 +788,7 @@ mod tests {
|
||||
default_qa: "server".to_string(),
|
||||
default_coder_model: None,
|
||||
max_coders: None,
|
||||
max_retries: 2,
|
||||
};
|
||||
// First creation — no setup commands, should succeed
|
||||
create_worktree(&project_root, "173_reuse_fail", &empty_config, 3001)
|
||||
@@ -784,6 +807,7 @@ mod tests {
|
||||
default_qa: "server".to_string(),
|
||||
default_coder_model: None,
|
||||
max_coders: None,
|
||||
max_retries: 2,
|
||||
};
|
||||
// Second call — worktree exists, setup commands fail, must still succeed
|
||||
let result =
|
||||
@@ -809,6 +833,7 @@ mod tests {
|
||||
default_qa: "server".to_string(),
|
||||
default_coder_model: None,
|
||||
max_coders: None,
|
||||
max_retries: 2,
|
||||
};
|
||||
let info = create_worktree(&project_root, "77_remove_async", &config, 3001)
|
||||
.await
|
||||
|
||||
Reference in New Issue
Block a user