huskies: merge 492_story_remove_filesystem_pipeline_state_and_store_story_content_in_database
This commit is contained in:
@@ -53,11 +53,7 @@ impl AgentPool {
|
||||
crate::io::story_metadata::QaMode::Human
|
||||
} else {
|
||||
let default_qa = config.default_qa_mode();
|
||||
// Story is in 2_current/ when a coder completes.
|
||||
let story_path = project_root
|
||||
.join(".huskies/work/2_current")
|
||||
.join(format!("{story_id}.md"));
|
||||
crate::io::story_metadata::resolve_qa_mode(&story_path, default_qa)
|
||||
resolve_qa_mode_from_store(&project_root, story_id, default_qa)
|
||||
}
|
||||
};
|
||||
|
||||
@@ -104,24 +100,13 @@ impl AgentPool {
|
||||
if let Err(e) = crate::agents::lifecycle::move_story_to_qa(&project_root, story_id) {
|
||||
slog_error!("[pipeline] Failed to move '{story_id}' to 3_qa/: {e}");
|
||||
} else {
|
||||
let qa_dir = project_root.join(".huskies/work/3_qa");
|
||||
let story_path = qa_dir.join(format!("{story_id}.md"));
|
||||
if let Err(e) =
|
||||
crate::io::story_metadata::write_review_hold(&story_path)
|
||||
{
|
||||
slog_error!(
|
||||
"[pipeline] Failed to set review_hold on '{story_id}': {e}"
|
||||
);
|
||||
}
|
||||
write_review_hold_to_store(story_id);
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Increment retry count and check if blocked.
|
||||
let story_path = project_root
|
||||
.join(".huskies/work/2_current")
|
||||
.join(format!("{story_id}.md"));
|
||||
if let Some(reason) = should_block_story(&story_path, config.max_retries, story_id, "coder") {
|
||||
if let Some(reason) = should_block_story(story_id, config.max_retries, "coder") {
|
||||
// Story has exceeded retry limit — do not restart.
|
||||
let _ = self.watcher_tx.send(WatcherEvent::StoryBlocked {
|
||||
story_id: story_id.to_string(),
|
||||
@@ -174,11 +159,9 @@ impl AgentPool {
|
||||
if item_type == "spike" {
|
||||
true // Spikes always need human review.
|
||||
} else {
|
||||
let qa_dir = project_root.join(".huskies/work/3_qa");
|
||||
let story_path = qa_dir.join(format!("{story_id}.md"));
|
||||
let default_qa = config.default_qa_mode();
|
||||
matches!(
|
||||
crate::io::story_metadata::resolve_qa_mode(&story_path, default_qa),
|
||||
resolve_qa_mode_from_store(&project_root, story_id, default_qa),
|
||||
crate::io::story_metadata::QaMode::Human
|
||||
)
|
||||
}
|
||||
@@ -186,15 +169,7 @@ impl AgentPool {
|
||||
|
||||
if needs_human_review {
|
||||
// Hold in 3_qa/ for human review.
|
||||
let qa_dir = project_root.join(".huskies/work/3_qa");
|
||||
let story_path = qa_dir.join(format!("{story_id}.md"));
|
||||
if let Err(e) =
|
||||
crate::io::story_metadata::write_review_hold(&story_path)
|
||||
{
|
||||
slog_error!(
|
||||
"[pipeline] Failed to set review_hold on '{story_id}': {e}"
|
||||
);
|
||||
}
|
||||
write_review_hold_to_store(story_id);
|
||||
slog!(
|
||||
"[pipeline] QA passed for '{story_id}'. \
|
||||
Holding for human review. \
|
||||
@@ -220,51 +195,21 @@ impl AgentPool {
|
||||
);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
let story_path = project_root
|
||||
.join(".huskies/work/3_qa")
|
||||
.join(format!("{story_id}.md"));
|
||||
if let Some(reason) = should_block_story(&story_path, config.max_retries, story_id, "qa-coverage") {
|
||||
// Story has exceeded retry limit — do not restart.
|
||||
let _ = self.watcher_tx.send(WatcherEvent::StoryBlocked {
|
||||
story_id: story_id.to_string(),
|
||||
reason,
|
||||
});
|
||||
} else {
|
||||
slog!(
|
||||
"[pipeline] QA coverage gate failed for '{story_id}'. Restarting QA."
|
||||
);
|
||||
let context = format!(
|
||||
"\n\n---\n## Coverage Gate Failed\n\
|
||||
The coverage gate (script/test_coverage) failed with the following output:\n{}\n\n\
|
||||
Please improve test coverage until the coverage gate passes.",
|
||||
coverage_output
|
||||
);
|
||||
if let Err(e) = self
|
||||
.start_agent(&project_root, story_id, Some("qa"), Some(&context))
|
||||
.await
|
||||
{
|
||||
slog_error!("[pipeline] Failed to restart qa for '{story_id}': {e}");
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
let story_path = project_root
|
||||
.join(".huskies/work/3_qa")
|
||||
.join(format!("{story_id}.md"));
|
||||
if let Some(reason) = should_block_story(&story_path, config.max_retries, story_id, "qa") {
|
||||
} else if let Some(reason) = should_block_story(story_id, config.max_retries, "qa-coverage") {
|
||||
// Story has exceeded retry limit — do not restart.
|
||||
let _ = self.watcher_tx.send(WatcherEvent::StoryBlocked {
|
||||
story_id: story_id.to_string(),
|
||||
reason,
|
||||
});
|
||||
} else {
|
||||
slog!("[pipeline] QA failed gates for '{story_id}'. Restarting.");
|
||||
slog!(
|
||||
"[pipeline] QA coverage gate failed for '{story_id}'. Restarting QA."
|
||||
);
|
||||
let context = format!(
|
||||
"\n\n---\n## Previous QA Attempt Failed\n\
|
||||
The acceptance gates failed with the following output:\n{}\n\n\
|
||||
Please re-run and fix the issues.",
|
||||
completion.gate_output
|
||||
"\n\n---\n## Coverage Gate Failed\n\
|
||||
The coverage gate (script/test_coverage) failed with the following output:\n{}\n\n\
|
||||
Please improve test coverage until the coverage gate passes.",
|
||||
coverage_output
|
||||
);
|
||||
if let Err(e) = self
|
||||
.start_agent(&project_root, story_id, Some("qa"), Some(&context))
|
||||
@@ -273,6 +218,26 @@ impl AgentPool {
|
||||
slog_error!("[pipeline] Failed to restart qa for '{story_id}': {e}");
|
||||
}
|
||||
}
|
||||
} else if let Some(reason) = should_block_story(story_id, config.max_retries, "qa") {
|
||||
// Story has exceeded retry limit — do not restart.
|
||||
let _ = self.watcher_tx.send(WatcherEvent::StoryBlocked {
|
||||
story_id: story_id.to_string(),
|
||||
reason,
|
||||
});
|
||||
} else {
|
||||
slog!("[pipeline] QA failed gates for '{story_id}'. Restarting.");
|
||||
let context = format!(
|
||||
"\n\n---\n## Previous QA Attempt Failed\n\
|
||||
The acceptance gates failed with the following output:\n{}\n\n\
|
||||
Please re-run and fix the issues.",
|
||||
completion.gate_output
|
||||
);
|
||||
if let Err(e) = self
|
||||
.start_agent(&project_root, story_id, Some("qa"), Some(&context))
|
||||
.await
|
||||
{
|
||||
slog_error!("[pipeline] Failed to restart qa for '{story_id}': {e}");
|
||||
}
|
||||
}
|
||||
}
|
||||
PipelineStage::Mergemaster => {
|
||||
@@ -328,39 +293,34 @@ impl AgentPool {
|
||||
slog!(
|
||||
"[pipeline] Story '{story_id}' done. Worktree preserved for inspection."
|
||||
);
|
||||
} else if let Some(reason) = should_block_story(story_id, config.max_retries, "mergemaster") {
|
||||
// Story has exceeded retry limit — do not restart.
|
||||
let _ = self.watcher_tx.send(WatcherEvent::StoryBlocked {
|
||||
story_id: story_id.to_string(),
|
||||
reason,
|
||||
});
|
||||
} else {
|
||||
let story_path = project_root
|
||||
.join(".huskies/work/4_merge")
|
||||
.join(format!("{story_id}.md"));
|
||||
if let Some(reason) = should_block_story(&story_path, config.max_retries, story_id, "mergemaster") {
|
||||
// Story has exceeded retry limit — do not restart.
|
||||
let _ = self.watcher_tx.send(WatcherEvent::StoryBlocked {
|
||||
story_id: story_id.to_string(),
|
||||
reason,
|
||||
});
|
||||
} else {
|
||||
slog!(
|
||||
"[pipeline] Post-merge tests failed for '{story_id}'. Restarting mergemaster."
|
||||
slog!(
|
||||
"[pipeline] Post-merge tests failed for '{story_id}'. Restarting mergemaster."
|
||||
);
|
||||
let context = format!(
|
||||
"\n\n---\n## Post-Merge Test Failed\n\
|
||||
The tests on master failed with the following output:\n{}\n\n\
|
||||
Please investigate and resolve the failures, then call merge_agent_work again.",
|
||||
output
|
||||
);
|
||||
if let Err(e) = self
|
||||
.start_agent(
|
||||
&project_root,
|
||||
story_id,
|
||||
Some("mergemaster"),
|
||||
Some(&context),
|
||||
)
|
||||
.await
|
||||
{
|
||||
slog_error!(
|
||||
"[pipeline] Failed to restart mergemaster for '{story_id}': {e}"
|
||||
);
|
||||
let context = format!(
|
||||
"\n\n---\n## Post-Merge Test Failed\n\
|
||||
The tests on master failed with the following output:\n{}\n\n\
|
||||
Please investigate and resolve the failures, then call merge_agent_work again.",
|
||||
output
|
||||
);
|
||||
if let Err(e) = self
|
||||
.start_agent(
|
||||
&project_root,
|
||||
story_id,
|
||||
Some("mergemaster"),
|
||||
Some(&context),
|
||||
)
|
||||
.await
|
||||
{
|
||||
slog_error!(
|
||||
"[pipeline] Failed to restart mergemaster for '{story_id}': {e}"
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -413,43 +373,77 @@ pub(super) fn spawn_pipeline_advance(
|
||||
});
|
||||
}
|
||||
|
||||
/// Resolve QA mode from the content store (or filesystem fallback).
|
||||
fn resolve_qa_mode_from_store(
|
||||
project_root: &Path,
|
||||
story_id: &str,
|
||||
default: crate::io::story_metadata::QaMode,
|
||||
) -> crate::io::story_metadata::QaMode {
|
||||
if let Some(contents) = crate::db::read_content(story_id) {
|
||||
return crate::io::story_metadata::resolve_qa_mode_from_content(&contents, default);
|
||||
}
|
||||
// Fallback: try filesystem.
|
||||
if let Ok(path) = crate::http::workflow::find_story_file_on_disk(project_root, story_id) {
|
||||
return crate::io::story_metadata::resolve_qa_mode(&path, default);
|
||||
}
|
||||
default
|
||||
}
|
||||
|
||||
/// Write review_hold to the content store.
|
||||
fn write_review_hold_to_store(story_id: &str) {
|
||||
if let Some(contents) = crate::db::read_content(story_id) {
|
||||
let updated = crate::io::story_metadata::write_review_hold_in_content(&contents);
|
||||
crate::db::write_content(story_id, &updated);
|
||||
// Also persist to SQLite via shadow write.
|
||||
let stage = crate::crdt_state::read_item(story_id)
|
||||
.map(|i| i.stage)
|
||||
.unwrap_or_else(|| "3_qa".to_string());
|
||||
crate::db::write_item_with_content(story_id, &stage, &updated);
|
||||
} else {
|
||||
slog_error!("[pipeline] Cannot write review_hold for '{story_id}': no content in store");
|
||||
}
|
||||
}
|
||||
|
||||
/// Increment retry_count and block the story if it exceeds `max_retries`.
|
||||
///
|
||||
/// Returns `Some(reason)` if the story is now blocked (caller should NOT restart the agent).
|
||||
/// Returns `None` if the story may be retried.
|
||||
/// When `max_retries` is 0, retry limits are disabled.
|
||||
fn should_block_story(story_path: &Path, max_retries: u32, story_id: &str, stage_label: &str) -> Option<String> {
|
||||
use crate::io::story_metadata::{increment_retry_count, write_blocked};
|
||||
fn should_block_story(story_id: &str, max_retries: u32, stage_label: &str) -> Option<String> {
|
||||
use crate::io::story_metadata::{increment_retry_count_in_content, write_blocked_in_content};
|
||||
|
||||
if max_retries == 0 {
|
||||
// Retry limits disabled.
|
||||
return None;
|
||||
}
|
||||
|
||||
match increment_retry_count(story_path) {
|
||||
Ok(new_count) => {
|
||||
if new_count >= max_retries {
|
||||
slog_warn!(
|
||||
"[pipeline] Story '{story_id}' reached retry limit ({new_count}/{max_retries}) \
|
||||
at {stage_label} stage. Marking as blocked."
|
||||
);
|
||||
if let Err(e) = write_blocked(story_path) {
|
||||
slog_error!("[pipeline] Failed to write blocked flag for '{story_id}': {e}");
|
||||
}
|
||||
Some(format!(
|
||||
"Retry limit exceeded ({new_count}/{max_retries}) at {stage_label} stage"
|
||||
))
|
||||
} else {
|
||||
slog!(
|
||||
"[pipeline] Story '{story_id}' retry {new_count}/{max_retries} at {stage_label} stage."
|
||||
);
|
||||
None
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
slog_error!("[pipeline] Failed to increment retry_count for '{story_id}': {e}");
|
||||
None // Don't block on error — allow retry.
|
||||
if let Some(contents) = crate::db::read_content(story_id) {
|
||||
let (updated, new_count) = increment_retry_count_in_content(&contents);
|
||||
crate::db::write_content(story_id, &updated);
|
||||
let stage = crate::crdt_state::read_item(story_id)
|
||||
.map(|i| i.stage)
|
||||
.unwrap_or_else(|| "2_current".to_string());
|
||||
crate::db::write_item_with_content(story_id, &stage, &updated);
|
||||
|
||||
if new_count >= max_retries {
|
||||
slog_warn!(
|
||||
"[pipeline] Story '{story_id}' reached retry limit ({new_count}/{max_retries}) \
|
||||
at {stage_label} stage. Marking as blocked."
|
||||
);
|
||||
let blocked = write_blocked_in_content(&updated);
|
||||
crate::db::write_content(story_id, &blocked);
|
||||
crate::db::write_item_with_content(story_id, &stage, &blocked);
|
||||
Some(format!(
|
||||
"Retry limit exceeded ({new_count}/{max_retries}) at {stage_label} stage"
|
||||
))
|
||||
} else {
|
||||
slog!(
|
||||
"[pipeline] Story '{story_id}' retry {new_count}/{max_retries} at {stage_label} stage."
|
||||
);
|
||||
None
|
||||
}
|
||||
} else {
|
||||
slog_error!("[pipeline] Failed to read content for '{story_id}' to increment retry_count");
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
@@ -468,14 +462,15 @@ mod tests {
|
||||
let tmp = tempfile::tempdir().unwrap();
|
||||
let root = tmp.path();
|
||||
|
||||
// Set up story in 2_current/ (no qa frontmatter → uses project default "server")
|
||||
// Set up story in 2_current/ (no qa frontmatter → uses project default "server").
|
||||
// Use a unique high-numbered ID to avoid collision with the agent_qa test.
|
||||
let current = root.join(".huskies/work/2_current");
|
||||
fs::create_dir_all(¤t).unwrap();
|
||||
fs::write(current.join("50_story_test.md"), "test").unwrap();
|
||||
fs::write(current.join("9908_story_server_qa.md"), "test").unwrap();
|
||||
|
||||
let pool = AgentPool::new_test(3001);
|
||||
pool.run_pipeline_advance(
|
||||
"50_story_test",
|
||||
"9908_story_server_qa",
|
||||
"coder-1",
|
||||
CompletionReport {
|
||||
summary: "done".to_string(),
|
||||
@@ -490,12 +485,12 @@ mod tests {
|
||||
|
||||
// With default qa: server, story skips QA and goes straight to 4_merge/
|
||||
assert!(
|
||||
root.join(".huskies/work/4_merge/50_story_test.md")
|
||||
root.join(".huskies/work/4_merge/9908_story_server_qa.md")
|
||||
.exists(),
|
||||
"story should be in 4_merge/"
|
||||
);
|
||||
assert!(
|
||||
!current.join("50_story_test.md").exists(),
|
||||
!current.join("9908_story_server_qa.md").exists(),
|
||||
"story should not still be in 2_current/"
|
||||
);
|
||||
}
|
||||
@@ -506,18 +501,19 @@ mod tests {
|
||||
let tmp = tempfile::tempdir().unwrap();
|
||||
let root = tmp.path();
|
||||
|
||||
// Set up story in 2_current/ with qa: agent frontmatter
|
||||
// Set up story in 2_current/ with qa: agent frontmatter.
|
||||
// Use a unique high-numbered ID to avoid collision with the server_qa test.
|
||||
let current = root.join(".huskies/work/2_current");
|
||||
fs::create_dir_all(¤t).unwrap();
|
||||
fs::write(
|
||||
current.join("50_story_test.md"),
|
||||
current.join("9909_story_agent_qa.md"),
|
||||
"---\nname: Test\nqa: agent\n---\ntest",
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let pool = AgentPool::new_test(3001);
|
||||
pool.run_pipeline_advance(
|
||||
"50_story_test",
|
||||
"9909_story_agent_qa",
|
||||
"coder-1",
|
||||
CompletionReport {
|
||||
summary: "done".to_string(),
|
||||
@@ -532,11 +528,11 @@ mod tests {
|
||||
|
||||
// With qa: agent, story should move to 3_qa/
|
||||
assert!(
|
||||
root.join(".huskies/work/3_qa/50_story_test.md").exists(),
|
||||
root.join(".huskies/work/3_qa/9909_story_agent_qa.md").exists(),
|
||||
"story should be in 3_qa/"
|
||||
);
|
||||
assert!(
|
||||
!current.join("50_story_test.md").exists(),
|
||||
!current.join("9909_story_agent_qa.md").exists(),
|
||||
"story should not still be in 2_current/"
|
||||
);
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user