huskies: merge 961

This commit is contained in:
dave
2026-05-13 11:22:57 +00:00
parent 78b1ecdc3c
commit 8b53e20ca9
38 changed files with 327 additions and 146 deletions
@@ -384,18 +384,18 @@ mod tests {
crate::db::ensure_content_store();
let dep_content = "---\nname: Dep\n---\n";
std::fs::write(done.join("1_story_dep.md"), dep_content).unwrap();
crate::db::write_content("1_story_dep", dep_content);
crate::db::write_content(crate::db::ContentKey::Story("1_story_dep"), dep_content);
// Story B depends on story 1.
let story_b_content = "---\nname: B\ndepends_on: [1]\n---\n";
std::fs::write(backlog.join("2_story_b.md"), story_b_content).unwrap();
crate::db::write_content("2_story_b", story_b_content);
crate::db::write_content(crate::db::ContentKey::Story("2_story_b"), story_b_content);
let pool = AgentPool::new_test(3001);
pool.auto_assign_available_work(root).await;
// The lifecycle function updates the content store (not the filesystem),
// so verify the move via the DB.
let content = crate::db::read_content("2_story_b")
let content = crate::db::read_content(crate::db::ContentKey::Story("2_story_b"))
.expect("story B should be in content store after promotion");
assert!(
content.contains("name: B"),
@@ -458,11 +458,14 @@ mod tests {
crate::db::ensure_content_store();
let dep_content = "---\nname: CRDT Spike\n---\n";
std::fs::write(archived.join("490_spike_crdt.md"), dep_content).unwrap();
crate::db::write_content("490_spike_crdt", dep_content);
crate::db::write_content(crate::db::ContentKey::Story("490_spike_crdt"), dep_content);
// Story 478 depends on 490 (the archived spike).
let story_content = "---\nname: Dependent\ndepends_on: [490]\n---\n";
std::fs::write(backlog.join("478_story_dependent.md"), story_content).unwrap();
crate::db::write_content("478_story_dependent", story_content);
crate::db::write_content(
crate::db::ContentKey::Story("478_story_dependent"),
story_content,
);
let pool = AgentPool::new_test(3001);
pool.auto_assign_available_work(root).await;
@@ -470,7 +473,7 @@ mod tests {
// Story 478 must be promoted even though dep 490 is only in 6_archived
// (not in 5_done), because archived = satisfied. The lifecycle function
// updates the content store, so verify via the DB.
let content = crate::db::read_content("478_story_dependent")
let content = crate::db::read_content(crate::db::ContentKey::Story("478_story_dependent"))
.expect("story 478 should be in content store after promotion");
assert!(
content.contains("name: Dependent"),
@@ -531,7 +534,7 @@ mod tests {
// After master c228ae16, has_content_conflict_failure reads from
// {story_id}:gate_output (not the story description), so seed it there.
crate::db::write_content(
"9860_story_conflict:gate_output",
crate::db::ContentKey::GateOutput("9860_story_conflict"),
"CONFLICT (content): server/src/lib.rs",
);
@@ -690,11 +693,14 @@ mod tests {
// After master c228ae16, has_content_conflict_failure reads from
// {story_id}:gate_output (not the story description), so seed it there.
crate::db::write_content(
"920_story_transient:gate_output",
crate::db::ContentKey::GateOutput("920_story_transient"),
"CONFLICT (content): foo.rs",
);
// Simulate two previous transient exits (below cap of 3) recorded in DB.
crate::db::write_content("920_story_transient:mergemaster_spawn_count", "2");
crate::db::write_content(
crate::db::ContentKey::MergeMasterSpawnCount("920_story_transient"),
"2",
);
// mergemaster_attempted must still be false (transient exits don't set it).
let pool = AgentPool::new_test(3001);
@@ -74,9 +74,8 @@ pub(super) fn has_content_conflict_failure(
}
// The projection does not carry the reason string; read the gate output
// (where the merge runner persists the failure message) and scan for
// conflict markers. NB: the key is `{story_id}:gate_output`, not `{story_id}`
// — the latter is the story's *description* text and would never match.
crate::db::read_content(&format!("{story_id}:gate_output"))
// conflict markers.
crate::db::read_content(crate::db::ContentKey::GateOutput(story_id))
.map(|content| {
content.contains("Merge conflict") || content.contains("CONFLICT (content):")
})
@@ -242,7 +242,7 @@ max_turns = 10
// watchdog's retry/block path has something to read+update.
let story_id = "42_story_runaway";
let initial = "---\nname: Runaway Story\n---\n# Runaway Story\n";
crate::db::write_content(story_id, initial);
crate::db::write_content(crate::db::ContentKey::Story(story_id), initial);
crate::crdt_state::write_item_str(
story_id,
"2_current",
@@ -369,7 +369,10 @@ max_turns = 10
);
let story_id = "story_e_per_session";
crate::db::write_content(story_id, "---\nname: Per-Session Test\n---\n");
crate::db::write_content(
crate::db::ContentKey::Story(story_id),
"---\nname: Per-Session Test\n---\n",
);
crate::crdt_state::write_item_str(
story_id,
"2_current",
@@ -448,7 +451,7 @@ max_turns = 10
let story_id = "88_story_retry_watchdog";
let initial = "---\nname: Retry Test\n---\n";
crate::crdt_state::init_for_test();
crate::db::write_content(story_id, initial);
crate::db::write_content(crate::db::ContentKey::Story(story_id), initial);
crate::crdt_state::write_item_str(
story_id,
"2_current",
+23 -10
View File
@@ -78,10 +78,15 @@ impl AgentPool {
// The coder exited with uncommitted content but no commits.
// Check if this is already a second recovery attempt (the
// first recovery respawn also produced no commits).
let recovery_key = format!("{story_id}:commit_recovery_pending");
if crate::db::read_content(&recovery_key).is_some() {
if crate::db::read_content(crate::db::ContentKey::CommitRecoveryPending(
story_id,
))
.is_some()
{
// Second attempt still produced no commits → block.
crate::db::delete_content(&recovery_key);
crate::db::delete_content(crate::db::ContentKey::CommitRecoveryPending(
story_id,
));
slog!(
"[pipeline] Coder '{agent_name}' (commit-recovery respawn) \
still produced no commits for '{story_id}'. Blocking story."
@@ -99,7 +104,10 @@ impl AgentPool {
} else {
// First occurrence: issue a commit-only recovery respawn.
// This does NOT consume a retry_count slot.
crate::db::write_content(&recovery_key, "1");
crate::db::write_content(
crate::db::ContentKey::CommitRecoveryPending(story_id),
"1",
);
slog!(
"[pipeline] Coder '{agent_name}' exited with uncommitted work \
for '{story_id}'. Issuing commit-only recovery respawn."
@@ -125,7 +133,9 @@ impl AgentPool {
}
} else if completion.gates_passed {
// Clear any stale recovery key when the coder succeeds normally.
crate::db::delete_content(&format!("{story_id}:commit_recovery_pending"));
crate::db::delete_content(crate::db::ContentKey::CommitRecoveryPending(
story_id,
));
// Determine effective QA mode for this story.
let qa_mode = {
let item_type = crate::agents::lifecycle::item_type_from_id(story_id);
@@ -183,7 +193,9 @@ impl AgentPool {
} else {
// Clear any stale recovery key when gates fail normally (agent committed
// but the build is broken — treat as a standard retry, not a recovery).
crate::db::delete_content(&format!("{story_id}:commit_recovery_pending"));
crate::db::delete_content(crate::db::ContentKey::CommitRecoveryPending(
story_id,
));
// Bug 645 / 668: Before retry/block, check if the agent left committed
// work AND the agent had a passing run_tests result captured during its
// session. An agent may crash mid-output (e.g. Claude Code CLI PTY write
@@ -195,8 +207,9 @@ impl AgentPool {
// whenever script/test exits 0 inside a story worktree. Consume the
// evidence here so it does not persist to the next agent session.
let has_test_evidence =
crate::db::read_content(&format!("{story_id}:run_tests_ok")).is_some();
crate::db::delete_content(&format!("{story_id}:run_tests_ok"));
crate::db::read_content(crate::db::ContentKey::RunTestsOk(story_id))
.is_some();
crate::db::delete_content(crate::db::ContentKey::RunTestsOk(story_id));
let work_survived = has_test_evidence
&& worktree_path.as_ref().is_some_and(|wt_path| {
crate::agents::gates::worktree_has_committed_work(wt_path)
@@ -258,7 +271,7 @@ impl AgentPool {
// Persist gate_output so the retry spawn can inject it into
// --append-system-prompt (story 881).
crate::db::write_content(
&format!("{story_id}:gate_output"),
crate::db::ContentKey::GateOutput(story_id),
&completion.gate_output,
);
// Increment retry count and check if blocked.
@@ -384,7 +397,7 @@ impl AgentPool {
// Persist gate_output so the retry spawn can inject it into
// --append-system-prompt (story 881).
crate::db::write_content(
&format!("{story_id}:gate_output"),
crate::db::ContentKey::GateOutput(story_id),
&completion.gate_output,
);
if let Some(reason) = should_block_story(story_id, config.max_retries, "qa") {
@@ -17,7 +17,7 @@ async fn pipeline_advance_coder_gates_pass_server_qa_moves_to_merge() {
fs::create_dir_all(&current).unwrap();
fs::write(current.join("9908_story_server_qa.md"), "test").unwrap();
crate::db::ensure_content_store();
crate::db::write_content("9908_story_server_qa", "test");
crate::db::write_content(crate::db::ContentKey::Story("9908_story_server_qa"), "test");
let pool = AgentPool::new_test(3001);
pool.run_pipeline_advance(
@@ -39,7 +39,7 @@ async fn pipeline_advance_coder_gates_pass_server_qa_moves_to_merge() {
// With default qa: server, story skips QA and goes straight to 4_merge/
// Lifecycle moves now update the content store, not the filesystem.
assert!(
crate::db::read_content("9908_story_server_qa").is_some(),
crate::db::read_content(crate::db::ContentKey::Story("9908_story_server_qa")).is_some(),
"story should still exist in content store after move to merge"
);
}
@@ -61,7 +61,7 @@ async fn pipeline_advance_coder_gates_pass_agent_qa_moves_to_qa() {
.unwrap();
crate::db::ensure_content_store();
crate::db::write_content(
"9909_story_agent_qa",
crate::db::ContentKey::Story("9909_story_agent_qa"),
"---\nname: Test\nqa: agent\n---\ntest",
);
@@ -85,7 +85,7 @@ async fn pipeline_advance_coder_gates_pass_agent_qa_moves_to_qa() {
// With qa: agent, story should move to 3_qa/
// Lifecycle moves now update the content store, not the filesystem.
assert!(
crate::db::read_content("9909_story_agent_qa").is_some(),
crate::db::read_content(crate::db::ContentKey::Story("9909_story_agent_qa")).is_some(),
"story should still exist in content store after move to qa"
);
}
@@ -106,7 +106,10 @@ async fn pipeline_advance_qa_gates_pass_moves_story_to_merge() {
)
.unwrap();
crate::db::ensure_content_store();
crate::db::write_content("51_story_test", "---\nname: Test\nqa: server\n---\ntest");
crate::db::write_content(
crate::db::ContentKey::Story("51_story_test"),
"---\nname: Test\nqa: server\n---\ntest",
);
let pool = AgentPool::new_test(3001);
pool.run_pipeline_advance(
@@ -128,7 +131,7 @@ async fn pipeline_advance_qa_gates_pass_moves_story_to_merge() {
// Story should have moved to 4_merge/
// Lifecycle moves now update the content store, not the filesystem.
assert!(
crate::db::read_content("51_story_test").is_some(),
crate::db::read_content(crate::db::ContentKey::Story("51_story_test")).is_some(),
"story should still exist in content store after move to merge"
);
}
@@ -76,7 +76,7 @@ async fn mergemaster_blocks_and_sends_story_blocked_when_no_commits_ahead() {
// Story should still exist in the content store after moving to merge.
assert!(
crate::db::read_content("9919_story_no_commits").is_some(),
crate::db::read_content(crate::db::ContentKey::Story("9919_story_no_commits")).is_some(),
"story should remain in content store — not removed"
);
@@ -249,7 +249,7 @@ async fn stale_mergemaster_advance_for_done_story_is_noop() {
// Seed the story in 5_done via the DB, which also writes to the CRDT.
let story_id = "9929_story_zombie_merge";
let content = "---\nname: Zombie Merge Test\n---\n";
crate::db::write_content(story_id, content);
crate::db::write_content(crate::db::ContentKey::Story(story_id), content);
crate::db::write_item_with_content(
story_id,
"5_done",
@@ -387,7 +387,10 @@ async fn work_survived_advances_to_qa_instead_of_blocking() {
// Set up the story in the content store.
crate::db::ensure_content_store();
crate::db::write_content("9945_story_survived", "---\nname: Survived Test\n---\n");
crate::db::write_content(
crate::db::ContentKey::Story("9945_story_survived"),
"---\nname: Survived Test\n---\n",
);
crate::db::write_item_with_content(
"9945_story_survived",
"2_current",
@@ -397,7 +400,10 @@ async fn work_survived_advances_to_qa_instead_of_blocking() {
// Simulate a passing run_tests call during the agent's session (bug 668):
// the agent ran script/test, it passed, and the server captured the evidence.
crate::db::write_content("9945_story_survived:run_tests_ok", "1");
crate::db::write_content(
crate::db::ContentKey::RunTestsOk("9945_story_survived"),
"1",
);
let pool = AgentPool::new_test(3001);
@@ -421,7 +427,7 @@ async fn work_survived_advances_to_qa_instead_of_blocking() {
// Story should have advanced — content store should reflect the move.
// The work-survived check should have moved it to QA (or merge for
// server qa mode), NOT incremented retry_count.
let content = crate::db::read_content("9945_story_survived")
let content = crate::db::read_content(crate::db::ContentKey::Story("9945_story_survived"))
.expect("story should exist in content store");
assert!(
!content.contains("blocked"),
@@ -482,7 +488,10 @@ async fn no_committed_work_still_retries_and_blocks() {
// Set up the story with max_retries=1 so it blocks immediately.
crate::crdt_state::init_for_test();
crate::db::ensure_content_store();
crate::db::write_content("9946_story_nowork", "---\nname: No Work Test\n---\n");
crate::db::write_content(
crate::db::ContentKey::Story("9946_story_nowork"),
"---\nname: No Work Test\n---\n",
);
crate::db::write_item_with_content(
"9946_story_nowork",
"2_current",
@@ -609,7 +618,7 @@ async fn gates_failed_no_test_evidence_does_not_advance() {
// Set up the story with max_retries=1 so we can observe the retry/block.
crate::db::ensure_content_store();
crate::db::write_content(
"9947_story_no_evidence",
crate::db::ContentKey::Story("9947_story_no_evidence"),
"---\nname: No Evidence Test\n---\n",
);
crate::db::write_item_with_content(
@@ -620,7 +629,7 @@ async fn gates_failed_no_test_evidence_does_not_advance() {
);
// Explicitly ensure no test evidence exists for this story.
crate::db::delete_content("9947_story_no_evidence:run_tests_ok");
crate::db::delete_content(crate::db::ContentKey::RunTestsOk("9947_story_no_evidence"));
fs::create_dir_all(root.join(".huskies")).unwrap();
fs::write(
@@ -740,7 +749,7 @@ async fn gates_failed_with_test_evidence_and_committed_work_advances() {
crate::db::ensure_content_store();
crate::db::write_content(
"9948_story_with_evidence",
crate::db::ContentKey::Story("9948_story_with_evidence"),
"---\nname: With Evidence Test\n---\n",
);
crate::db::write_item_with_content(
@@ -752,7 +761,10 @@ async fn gates_failed_with_test_evidence_and_committed_work_advances() {
// Write the run_tests evidence — simulates the agent having called run_tests
// MCP and getting a passing result before it crashed.
crate::db::write_content("9948_story_with_evidence:run_tests_ok", "1");
crate::db::write_content(
crate::db::ContentKey::RunTestsOk("9948_story_with_evidence"),
"1",
);
let pool = AgentPool::new_test(3001);
@@ -774,7 +786,7 @@ async fn gates_failed_with_test_evidence_and_committed_work_advances() {
.await;
// Story should advance (not blocked, no retry_count).
let content = crate::db::read_content("9948_story_with_evidence")
let content = crate::db::read_content(crate::db::ContentKey::Story("9948_story_with_evidence"))
.expect("story must exist in content store");
assert!(
!content.contains("blocked"),
@@ -786,7 +798,10 @@ async fn gates_failed_with_test_evidence_and_committed_work_advances() {
);
// Evidence must be consumed (cleared) after use.
assert!(
crate::db::read_content("9948_story_with_evidence:run_tests_ok").is_none(),
crate::db::read_content(crate::db::ContentKey::RunTestsOk(
"9948_story_with_evidence"
))
.is_none(),
"run_tests evidence must be cleared after pipeline advance consumes it"
);
}
@@ -919,7 +934,9 @@ stage = "coder"
crate::db::ItemMeta::named("Recovery Test"),
);
// Ensure no stale recovery key exists.
crate::db::delete_content("9954_story_recovery:commit_recovery_pending");
crate::db::delete_content(crate::db::ContentKey::CommitRecoveryPending(
"9954_story_recovery",
));
let pool = AgentPool::new_test(3001);
@@ -966,7 +983,10 @@ stage = "coder"
// The recovery key must be set so a second failure triggers a block.
assert!(
crate::db::read_content("9954_story_recovery:commit_recovery_pending").is_some(),
crate::db::read_content(crate::db::ContentKey::CommitRecoveryPending(
"9954_story_recovery"
))
.is_some(),
"commit_recovery_pending key must be set after issuing recovery respawn"
);
}
@@ -1008,7 +1028,10 @@ stage = "coder"
// Simulate the recovery key already being set (first recovery respawn was
// issued previously).
crate::db::write_content("9955_story_recovery2:commit_recovery_pending", "1");
crate::db::write_content(
crate::db::ContentKey::CommitRecoveryPending("9955_story_recovery2"),
"1",
);
let pool = AgentPool::new_test(3001);
let mut rx = pool.watcher_tx.subscribe();
@@ -1052,7 +1075,10 @@ stage = "coder"
// The recovery key must be cleared after blocking.
assert!(
crate::db::read_content("9955_story_recovery2:commit_recovery_pending").is_none(),
crate::db::read_content(crate::db::ContentKey::CommitRecoveryPending(
"9955_story_recovery2"
))
.is_none(),
"commit_recovery_pending key must be cleared after blocking the story"
);
@@ -1128,7 +1154,10 @@ async fn coder_completion_with_test_evidence_and_zero_commits_does_not_advance()
// Simulate the agent having called run_tests with a passing result (bug-645
// evidence) — but the feature branch still has zero commits ahead of master.
crate::db::write_content("9953_story_zero_commits:run_tests_ok", "1");
crate::db::write_content(
crate::db::ContentKey::RunTestsOk("9953_story_zero_commits"),
"1",
);
// Write a project.toml with max_retries=1 so the story blocks immediately,
// giving us a clean assertion target (StoryBlocked event).
@@ -1193,7 +1222,8 @@ async fn coder_completion_with_test_evidence_and_zero_commits_does_not_advance()
// Test evidence must have been consumed (cleared) by the advance handler.
assert!(
crate::db::read_content("9953_story_zero_commits:run_tests_ok").is_none(),
crate::db::read_content(crate::db::ContentKey::RunTestsOk("9953_story_zero_commits"))
.is_none(),
"run_tests evidence must be cleared after pipeline advance consumes it"
);
}
@@ -703,7 +703,7 @@ async fn server_side_merge_happy_path_advances_to_done() {
if report.success {
// story_archived may or may not be true depending on gate env,
// but merge_failure must NOT be in the content store.
let content = crate::db::read_content("757a_happy");
let content = crate::db::read_content(crate::db::ContentKey::Story("757a_happy"));
if let Some(c) = content {
assert!(
!c.contains("merge_failure"),
@@ -713,7 +713,7 @@ async fn server_side_merge_happy_path_advances_to_done() {
} else {
// Gate failure (no script/test) is acceptable in test env —
// but merge_failure should be written.
let content = crate::db::read_content("757a_happy");
let content = crate::db::read_content(crate::db::ContentKey::Story("757a_happy"));
if let Some(c) = content {
// merge_failure should be written for gate failures
assert!(
+35 -20
View File
@@ -72,7 +72,8 @@ pub(super) fn maybe_inject_gate_failure(args: &mut Vec<String>, story_id: &str)
.map(|item| item.retry_count())
.unwrap_or(0);
if retry_count > 0
&& let Some(gate_output) = crate::db::read_content(&format!("{story_id}:gate_output"))
&& let Some(gate_output) =
crate::db::read_content(crate::db::ContentKey::GateOutput(story_id))
{
inject_gate_failure_section(args, &gate_output);
}
@@ -230,7 +231,10 @@ pub(super) async fn run_agent_spawn(
// Story 933: epic linkage is now a typed CRDT register on PipelineItemCrdt.
if let Some(view) = crate::crdt_state::read_item(&sid)
&& let Some(epic_id) = view.epic()
&& let Some(epic_content) = crate::db::read_content(&epic_id.to_string())
&& let Some(epic_content) = {
let epic_id_str = epic_id.to_string();
crate::db::read_content(crate::db::ContentKey::Story(&epic_id_str))
}
{
let block = format!(
"# Epic Context\n\nThis work item belongs to epic `{epic_id}`.\
@@ -434,12 +438,14 @@ pub(super) async fn run_agent_spawn(
// infinite loops; after the cap, block the story with a clear reason.
if result.aborted_signal && stage != PipelineStage::Mergemaster {
const ABORT_RESPAWN_CAP: u32 = 5;
let db_key = format!("{sid}:abort_respawn_count");
let count = crate::db::read_content(&db_key)
let count = crate::db::read_content(crate::db::ContentKey::AbortRespawnCount(&sid))
.and_then(|s| s.trim().parse::<u32>().ok())
.unwrap_or(0)
+ 1;
crate::db::write_content(&db_key, &count.to_string());
crate::db::write_content(
crate::db::ContentKey::AbortRespawnCount(&sid),
&count.to_string(),
);
// Remove the agent entry from the pool and emit Done so that
// any caller blocked on wait_for_agent is unblocked.
@@ -523,7 +529,7 @@ pub(super) async fn run_agent_spawn(
// Reset the abort-respawn counter on any non-aborted exit so that
// a single successful run clears the consecutive-crash history.
crate::db::delete_content(&format!("{sid}:abort_respawn_count"));
crate::db::delete_content(crate::db::ContentKey::AbortRespawnCount(&sid));
if stage == PipelineStage::Mergemaster {
let (tx_done, done_session_id, merge_failure_reported) = {
@@ -555,7 +561,6 @@ pub(super) async fn run_agent_spawn(
// Only mark mergemaster_attempted on a genuine give-up so that
// transient exits can be re-spawned up to the cap (story 920).
const MERGEMASTER_RESPAWN_CAP: u32 = 3;
let spawn_count_key = format!("{sid}:mergemaster_spawn_count");
let is_genuine = if merge_failure_reported {
slog!(
"[agents] Mergemaster '{aname}' for '{sid}' gave up genuinely \
@@ -563,11 +568,15 @@ pub(super) async fn run_agent_spawn(
);
true
} else {
let count = crate::db::read_content(&spawn_count_key)
.and_then(|s| s.trim().parse::<u32>().ok())
.unwrap_or(0)
+ 1;
crate::db::write_content(&spawn_count_key, &count.to_string());
let count =
crate::db::read_content(crate::db::ContentKey::MergeMasterSpawnCount(&sid))
.and_then(|s| s.trim().parse::<u32>().ok())
.unwrap_or(0)
+ 1;
crate::db::write_content(
crate::db::ContentKey::MergeMasterSpawnCount(&sid),
&count.to_string(),
);
if count >= MERGEMASTER_RESPAWN_CAP {
slog!(
"[agents] Mergemaster '{aname}' for '{sid}' exhausted \
@@ -667,7 +676,7 @@ mod tests {
let gate_output =
"error[E0308]: mismatched types\n --> src/lib.rs:5:10\n = expected i32, found &str";
crate::db::write_content(&format!("{story_id}:gate_output"), gate_output);
crate::db::write_content(crate::db::ContentKey::GateOutput(story_id), gate_output);
let mut args: Vec<String> = vec!["--verbose".to_string()];
maybe_inject_gate_failure(&mut args, story_id);
@@ -703,7 +712,10 @@ mod tests {
);
// retry_count is 0 (default — never bumped).
crate::db::write_content(&format!("{story_id}:gate_output"), "some previous output");
crate::db::write_content(
crate::db::ContentKey::GateOutput(story_id),
"some previous output",
);
let mut args: Vec<String> = vec!["--verbose".to_string()];
maybe_inject_gate_failure(&mut args, story_id);
@@ -767,17 +779,19 @@ mod tests {
crate::db::ItemMeta::named("Test"),
);
let db_key = format!("{story_id}:abort_respawn_count");
const CAP: u32 = 5;
// Simulate CAP consecutive abort-before-session exits.
for expected_count in 1u32..=CAP {
// This is exactly the counter logic in run_agent_spawn's abort path.
let count = crate::db::read_content(&db_key)
let count = crate::db::read_content(crate::db::ContentKey::AbortRespawnCount(story_id))
.and_then(|s| s.trim().parse::<u32>().ok())
.unwrap_or(0)
+ 1;
crate::db::write_content(&db_key, &count.to_string());
crate::db::write_content(
crate::db::ContentKey::AbortRespawnCount(story_id),
&count.to_string(),
);
assert_eq!(
count, expected_count,
"abort counter must increment by 1 each time"
@@ -795,9 +809,10 @@ mod tests {
}
// After CAP cycles the counter equals the cap — the story would be blocked.
let final_count: u32 = crate::db::read_content(&db_key)
.and_then(|s| s.trim().parse().ok())
.unwrap_or(0);
let final_count: u32 =
crate::db::read_content(crate::db::ContentKey::AbortRespawnCount(story_id))
.and_then(|s| s.trim().parse().ok())
.unwrap_or(0);
assert_eq!(
final_count, CAP,
"counter must equal {CAP} after {CAP} abort cycles"
@@ -102,7 +102,7 @@ stage = "coder"
let story_content = "---\nname: Story 3\n---\n";
std::fs::write(backlog.join("story-3.md"), story_content).unwrap();
crate::db::ensure_content_store();
crate::db::write_content("story-3", story_content);
crate::db::write_content(crate::db::ContentKey::Story("story-3"), story_content);
let pool = AgentPool::new_test(3001);
pool.inject_test_agent("story-1", "coder-1", AgentStatus::Running);
@@ -124,7 +124,7 @@ stage = "coder"
// The lifecycle function updates the content store (not the filesystem),
// so verify the move via the DB.
let content = crate::db::read_content("story-3")
let content = crate::db::read_content(crate::db::ContentKey::Story("story-3"))
.expect("story-3 should be in content store after move to current");
assert!(
content.contains("name: Story 3"),
@@ -280,7 +280,10 @@ stage = "coder"
// left a stale entry for "368_story_test" in the global CRDT.
std::fs::write(current.join("368_story_test.md"), story_content).unwrap();
crate::db::ensure_content_store();
crate::db::write_content("368_story_test", story_content);
crate::db::write_content(
crate::db::ContentKey::Story("368_story_test"),
story_content,
);
// Story 929: agent pin comes from the CRDT register, not YAML. Seed it.
crate::crdt_state::init_for_test();
crate::crdt_state::write_item_str(
+1 -1
View File
@@ -170,7 +170,7 @@ mod tests {
// The lifecycle function updates the content store (not the filesystem),
// so verify the move via the DB.
let content = crate::db::read_content("60_story_cleanup")
let content = crate::db::read_content(crate::db::ContentKey::Story("60_story_cleanup"))
.expect("60_story_cleanup should be in content store after move to done");
assert_eq!(content, "test", "content should be preserved after move");
}
+6 -5
View File
@@ -198,8 +198,8 @@ mod tests {
"CRDT register should hold [477, 478]: {view:?}"
);
// Content store YAML must NOT be mutated with depends_on.
let contents =
crate::db::read_content("9910_story_foo").expect("content store should have story");
let contents = crate::db::read_content(crate::db::ContentKey::Story("9910_story_foo"))
.expect("content store should have story");
assert!(
!contents.contains("depends_on"),
"content store YAML must not contain depends_on after chat command: {contents}"
@@ -230,8 +230,8 @@ mod tests {
"CRDT register should be empty after clearing: {view:?}"
);
// Content store YAML must not be mutated.
let contents =
crate::db::read_content("9911_story_bar").expect("content store should have story");
let contents = crate::db::read_content(crate::db::ContentKey::Story("9911_story_bar"))
.expect("content store should have story");
assert!(
!contents.contains("depends_on"),
"content store YAML must not contain depends_on after clear: {contents}"
@@ -268,7 +268,8 @@ mod tests {
"CRDT must hold [500, 501]: {view:?}"
);
let content = crate::db::read_content("8790_story_chat_dep").unwrap();
let content =
crate::db::read_content(crate::db::ContentKey::Story("8790_story_chat_dep")).unwrap();
assert!(
!content.contains("depends_on"),
"chat must not write depends_on to YAML: {content}"
+2 -1
View File
@@ -189,7 +189,8 @@ mod tests {
// Verify the story is still accessible in the content store after the move.
assert!(
crate::db::read_content("42_story_some_feature").is_some(),
crate::db::read_content(crate::db::ContentKey::Story("42_story_some_feature"))
.is_some(),
"story should be in the content store after move"
);
}
+1 -1
View File
@@ -64,7 +64,7 @@ fn build_triage_dump(
item: &crate::pipeline_state::PipelineItem,
num_str: &str,
) -> String {
let contents = match crate::db::read_content(story_id) {
let contents = match crate::db::read_content(crate::db::ContentKey::Story(story_id)) {
Some(c) => c,
None => return format!("Story {num_str}: content not found in content store."),
};
+3 -2
View File
@@ -39,7 +39,8 @@ pub(crate) fn find_story_by_number(
.join("work")
.join(&stage_dir)
.join(format!("{}.md", item.story_id()));
let content = crate::db::read_content(item.story_id());
let content =
crate::db::read_content(crate::db::ContentKey::Story(item.story_id()));
return Some((item.story_id().to_string(), stage_dir, path, content));
}
}
@@ -61,7 +62,7 @@ pub(crate) fn find_story_by_number(
.join("work")
.join(&stage_dir)
.join(format!("{id}.md"));
let content = crate::db::read_content(&id);
let content = crate::db::read_content(crate::db::ContentKey::Story(&id));
return Some((id, stage_dir, path, content));
}
+2 -1
View File
@@ -303,7 +303,8 @@ mod tests {
"unexpected response: {response}"
);
assert!(
crate::db::read_content("9975_story_some_feature").is_none(),
crate::db::read_content(crate::db::ContentKey::Story("9975_story_some_feature"))
.is_none(),
"content store should no longer contain the deleted story"
);
}
+1 -1
View File
@@ -283,7 +283,7 @@ pub fn evict_item(story_id: &str) -> Result<(), String> {
// Drop the content-store entry so the cached body doesn't outlive the
// CRDT entry. (Bug 521 follow-up: when CONTENT_STORE becomes a true
// lazy cache, this explicit eviction can go away.)
crate::db::delete_content(story_id);
crate::db::delete_content(crate::db::ContentKey::Story(story_id));
Ok(())
}
+96 -11
View File
@@ -7,6 +7,44 @@
use std::collections::HashMap;
use std::sync::{Mutex, OnceLock};
/// Typed key for the in-memory content store.
///
/// Each variant maps to a distinct raw key namespace so that content written
/// under one variant is never visible under another — no raw `format!()`
/// key construction is needed at call sites outside `db/`.
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum ContentKey<'a> {
/// Main markdown body of a work item (story, bug, spike, refactor, epic).
Story(&'a str),
/// Gate failure output from the last failed agent run.
GateOutput(&'a str),
/// Consecutive abort-respawn counter.
AbortRespawnCount(&'a str),
/// Mergemaster re-spawn counter.
MergeMasterSpawnCount(&'a str),
/// Evidence that `run_tests` passed during an agent session.
RunTestsOk(&'a str),
/// Flag indicating a commit-recovery respawn is in progress.
CommitRecoveryPending(&'a str),
}
impl<'a> ContentKey<'a> {
/// Lower this typed key to the underlying storage string used by the
/// CRDT content store (`{story_id}` for the base story, `{story_id}:<kind>`
/// for per-purpose sub-keys). Internal — callers should use the typed
/// `read_content` / `write_content` wrappers instead of touching strings.
pub(super) fn as_raw_key(&self) -> String {
match self {
ContentKey::Story(id) => id.to_string(),
ContentKey::GateOutput(id) => format!("{id}:gate_output"),
ContentKey::AbortRespawnCount(id) => format!("{id}:abort_respawn_count"),
ContentKey::MergeMasterSpawnCount(id) => format!("{id}:mergemaster_spawn_count"),
ContentKey::RunTestsOk(id) => format!("{id}:run_tests_ok"),
ContentKey::CommitRecoveryPending(id) => format!("{id}:commit_recovery_pending"),
}
}
}
static CONTENT_STORE: OnceLock<Mutex<HashMap<String, String>>> = OnceLock::new();
#[cfg(test)]
@@ -41,30 +79,28 @@ pub(super) fn get_content_store() -> Option<&'static Mutex<HashMap<String, Strin
}
}
/// Read the full markdown content of a story from the in-memory store.
pub fn read_content(story_id: &str) -> Option<String> {
/// Read content from the in-memory store by typed key.
pub fn read_content(key: ContentKey<'_>) -> Option<String> {
let store = get_content_store()?;
let map = store.lock().ok()?;
map.get(story_id).cloned()
map.get(&key.as_raw_key()).cloned()
}
/// Write (or overwrite) the full markdown content of a story.
///
/// Updates the in-memory store immediately.
pub fn write_content(story_id: &str, content: &str) {
/// Write (or overwrite) content in the in-memory store by typed key.
pub fn write_content(key: ContentKey<'_>, content: &str) {
if let Some(store) = get_content_store()
&& let Ok(mut map) = store.lock()
{
map.insert(story_id.to_string(), content.to_string());
map.insert(key.as_raw_key(), content.to_string());
}
}
/// Remove a story's content from the in-memory store.
pub fn delete_content(story_id: &str) {
/// Remove an entry from the in-memory store by typed key.
pub fn delete_content(key: ContentKey<'_>) {
if let Some(store) = get_content_store()
&& let Ok(mut map) = store.lock()
{
map.remove(story_id);
map.remove(&key.as_raw_key());
}
}
@@ -103,3 +139,52 @@ pub fn all_content_ids() -> Vec<String> {
pub(super) fn init_content_store(map: HashMap<String, String>) {
let _ = CONTENT_STORE.set(Mutex::new(map));
}
#[cfg(test)]
mod tests {
use super::*;
/// AC 2 regression: writing under `ContentKey::Story` is not visible under
/// `ContentKey::GateOutput` (and vice versa). The typed key namespace, not
/// runtime substring matching, enforces the separation.
#[test]
fn wrong_key_variant_is_isolated() {
ensure_content_store();
let id = "9961_regression_key_isolation";
write_content(ContentKey::Story(id), "story body");
// A different variant for the same base id must not surface the story body.
assert!(
read_content(ContentKey::GateOutput(id)).is_none(),
"GateOutput key must not read Story content"
);
assert!(
read_content(ContentKey::RunTestsOk(id)).is_none(),
"RunTestsOk key must not read Story content"
);
// The Story variant itself must still return the content.
assert_eq!(
read_content(ContentKey::Story(id)).as_deref(),
Some("story body")
);
// Write under a second variant; reading under Story must still return
// the original body, not the gate output.
write_content(ContentKey::GateOutput(id), "gate failure text");
assert_eq!(
read_content(ContentKey::Story(id)).as_deref(),
Some("story body"),
"Story key must not be polluted by GateOutput write"
);
assert_eq!(
read_content(ContentKey::GateOutput(id)).as_deref(),
Some("gate failure text")
);
// Cleanup.
delete_content(ContentKey::Story(id));
delete_content(ContentKey::GateOutput(id));
}
}
+19 -10
View File
@@ -20,7 +20,7 @@ pub mod ops;
/// Background shadow-write task — persists pipeline items to SQLite asynchronously.
pub mod shadow_write;
pub use content_store::{all_content_ids, delete_content, read_content, write_content};
pub use content_store::{ContentKey, all_content_ids, delete_content, read_content, write_content};
pub use ops::{ItemMeta, delete_item, move_item_stage, next_item_number, write_item_with_content};
pub use shadow_write::init;
@@ -217,17 +217,23 @@ mod tests {
let markdown = "---\nname: Content Test\n---\n# Story\n";
// Write.
write_content(story_id, markdown);
assert_eq!(read_content(story_id).as_deref(), Some(markdown));
write_content(ContentKey::Story(story_id), markdown);
assert_eq!(
read_content(ContentKey::Story(story_id)).as_deref(),
Some(markdown)
);
// Overwrite.
let updated = "---\nname: Updated\n---\n# Updated Story\n";
write_content(story_id, updated);
assert_eq!(read_content(story_id).as_deref(), Some(updated));
write_content(ContentKey::Story(story_id), updated);
assert_eq!(
read_content(ContentKey::Story(story_id)).as_deref(),
Some(updated)
);
// Delete.
delete_content(story_id);
assert!(read_content(story_id).is_none());
delete_content(ContentKey::Story(story_id));
assert!(read_content(ContentKey::Story(story_id)).is_none());
}
#[test]
@@ -327,7 +333,10 @@ mod tests {
assert_eq!(view.depends_on(), &[100, 200]);
// Content is stored verbatim (no parsing, no rewrite).
assert_eq!(read_content(story_id).as_deref(), Some(content));
assert_eq!(
read_content(ContentKey::Story(story_id)).as_deref(),
Some(content)
);
}
/// Story 864: passing `ItemMeta::default()` against a content blob that
@@ -371,7 +380,7 @@ mod tests {
},
);
let read_back = read_content(story_id).expect("content present");
let read_back = read_content(ContentKey::Story(story_id)).expect("content present");
assert_eq!(read_back, body, "plain content must be readable as-is");
assert!(
!read_back.trim_start().starts_with("---"),
@@ -402,7 +411,7 @@ mod tests {
None,
);
write_content(
story_id,
ContentKey::Story(story_id),
"---\nname: Retry reset test\nretry_count: 3\n---\n",
);
let typed = crate::pipeline_state::read_typed(story_id)
+5 -5
View File
@@ -4,7 +4,7 @@
//! content store, the CRDT (source of truth for metadata), and the SQLite
//! shadow table (via the background channel).
use super::content_store::{
all_content_ids, delete_content, ensure_content_store, read_content, write_content,
ContentKey, all_content_ids, delete_content, ensure_content_store, read_content, write_content,
};
use super::shadow_write::{PIPELINE_DB, PipelineWriteMsg};
@@ -74,7 +74,7 @@ pub fn write_item_with_content(story_id: &str, stage: &str, content: &str, meta:
// Update in-memory content store.
ensure_content_store();
write_content(story_id, content);
write_content(ContentKey::Story(story_id), content);
// Primary: CRDT ops.
let stage = normalise_stage_str(stage);
@@ -123,12 +123,12 @@ pub fn move_item_stage(
new_stage: &str,
content_transform: Option<&dyn Fn(&str) -> String>,
) {
let current_content = read_content(story_id);
let current_content = read_content(ContentKey::Story(story_id));
let content = match (&current_content, content_transform) {
(Some(c), Some(transform)) => {
let new_content = transform(c);
write_content(story_id, &new_content);
write_content(ContentKey::Story(story_id), &new_content);
Some(new_content)
}
(Some(c), None) => Some(c.clone()),
@@ -192,7 +192,7 @@ pub fn move_item_stage(
/// Delete a story from the shadow table (fire-and-forget).
pub fn delete_item(story_id: &str) {
delete_content(story_id);
delete_content(ContentKey::Story(story_id));
if let Some(db) = PIPELINE_DB.get() {
// Reuse the channel with a special "deleted" stage marker.
@@ -463,7 +463,7 @@ mod tests {
)
.unwrap();
assert!(crate::db::read_content("5_story_test").is_some());
assert!(crate::db::read_content(crate::db::ContentKey::Story("5_story_test")).is_some());
let parsed: Value = serde_json::from_str(&result).unwrap();
assert_eq!(parsed["story_id"], "5_story_test");
assert_eq!(parsed["from_stage"], "backlog");
@@ -495,7 +495,7 @@ mod tests {
)
.unwrap();
assert!(crate::db::read_content("6_story_back").is_some());
assert!(crate::db::read_content(crate::db::ContentKey::Story("6_story_back")).is_some());
let parsed: Value = serde_json::from_str(&result).unwrap();
// from_stage may be inaccurate when using the content-store fallback
// (it lacks stage tracking), but the move itself must succeed.
@@ -527,7 +527,7 @@ mod tests {
)
.unwrap();
assert!(crate::db::read_content("9907_story_idem").is_some());
assert!(crate::db::read_content(crate::db::ContentKey::Story("9907_story_idem")).is_some());
let parsed: Value = serde_json::from_str(&result).unwrap();
// When CRDT is uninitialised the content-store fallback handles the
// move, so idempotency detection may not fire. Verify the to_stage
+2 -2
View File
@@ -349,7 +349,7 @@ mod tests {
let story_file = current_dir.join("24_story_test.md");
std::fs::write(&story_file, content).unwrap();
crate::db::ensure_content_store();
crate::db::write_content("24_story_test", content);
crate::db::write_content(crate::db::ContentKey::Story("24_story_test"), content);
std::process::Command::new("git")
.args(["add", "."])
.current_dir(tmp.path())
@@ -366,7 +366,7 @@ mod tests {
let result = tool_move_story_to_merge(&json!({"story_id": "24_story_test"}), &ctx).await;
// Content store should still have the item after the move
assert!(
crate::db::read_content("24_story_test").is_some(),
crate::db::read_content(crate::db::ContentKey::Story("24_story_test")).is_some(),
"content store should have the story after move"
);
// Result is either Ok (agent started) or Err (agent failed - acceptable in tests)
+1 -1
View File
@@ -219,7 +219,7 @@ pub(crate) async fn tool_run_tests(args: &Value, ctx: &AppContext) -> Result<Str
// ran passing tests before it died. Only written when running
// in a story worktree (worktree_path arg provided).
if passed && args.get("worktree_path").is_some() {
crate::db::write_content(&format!("{sid}:run_tests_ok"), "1");
crate::db::write_content(crate::db::ContentKey::RunTestsOk(&sid), "1");
}
return serde_json::to_string_pretty(&json!({
"passed": passed,
+1 -1
View File
@@ -171,7 +171,7 @@ pub(super) async fn tool_status(args: &Value, ctx: &AppContext) -> Result<String
));
}
let contents = crate::db::read_content(story_id)
let contents = crate::db::read_content(crate::db::ContentKey::Story(story_id))
.ok_or_else(|| format!("Story '{story_id}' has no content in the content store."))?;
// --- Metadata (story 929: CRDT-first, yaml_residue marks gaps) ---
+2 -2
View File
@@ -284,7 +284,7 @@ mod tests {
let bug_id = result.trim_start_matches("Created bug: ").trim();
// Bug content should exist in the CRDT content store.
assert!(
crate::db::read_content(bug_id).is_some(),
crate::db::read_content(crate::db::ContentKey::Story(bug_id)).is_some(),
"expected bug content in CRDT for {bug_id}"
);
}
@@ -482,7 +482,7 @@ mod tests {
let result = tool_close_bug(&json!({"bug_id": "9901_bug_crash"}), &ctx).unwrap();
assert!(result.contains("9901_bug_crash"));
assert!(
crate::db::read_content("9901_bug_crash").is_some(),
crate::db::read_content(crate::db::ContentKey::Story("9901_bug_crash")).is_some(),
"content store should have the bug after close"
);
}
+2 -2
View File
@@ -528,7 +528,7 @@ mod tests {
)
.unwrap();
let contents = crate::db::read_content("9906_story_persist")
let contents = crate::db::read_content(crate::db::ContentKey::Story("9906_story_persist"))
.expect("story content should exist in CRDT");
assert!(
contents.contains("## Test Results"),
@@ -649,7 +649,7 @@ mod tests {
);
// Criterion must still be unchecked in the CRDT.
let contents = crate::db::read_content("9997_empty_branch")
let contents = crate::db::read_content(crate::db::ContentKey::Story("9997_empty_branch"))
.expect("story content should still be in CRDT");
assert!(
contents.contains("- [ ] Implement the feature"),
+1 -1
View File
@@ -105,7 +105,7 @@ pub(crate) fn tool_show_epic(args: &Value, _ctx: &AppContext) -> Result<String,
.and_then(|v| v.as_str())
.ok_or("Missing required argument: epic_id")?;
let content = crate::db::read_content(epic_id)
let content = crate::db::read_content(crate::db::ContentKey::Story(epic_id))
.ok_or_else(|| format!("Epic '{epic_id}' not found in content store"))?;
let epic_view = crate::crdt_state::read_item(epic_id)
+4 -2
View File
@@ -140,7 +140,8 @@ mod tests {
// Extract the actual spike ID from the result message (format: "Created spike: <id>").
let spike_id = result.trim_start_matches("Created spike: ").trim();
// Spike content should exist in the CRDT content store.
let contents = crate::db::read_content(spike_id).expect("expected spike content in CRDT");
let contents = crate::db::read_content(crate::db::ContentKey::Story(spike_id))
.expect("expected spike content in CRDT");
assert!(contents.starts_with("---\ntype: spike\nname: \"Compare Encoders\"\n---"));
assert!(contents.contains("Which encoder is fastest?"));
}
@@ -163,7 +164,8 @@ mod tests {
let spike_id = result.trim_start_matches("Created spike: ").trim();
// Spike content should exist in the CRDT content store.
let contents = crate::db::read_content(spike_id).expect("expected spike content in CRDT");
let contents = crate::db::read_content(crate::db::ContentKey::Story(spike_id))
.expect("expected spike content in CRDT");
assert!(contents.starts_with("---\ntype: spike\nname: \"My Spike\"\n---"));
assert!(contents.contains("## Question\n\n- TBD\n"));
}
@@ -239,7 +239,8 @@ mod tests {
.trim_start_matches("Created story: ")
.trim()
.to_string();
let content = crate::db::read_content(&story_id).expect("story content should exist");
let content = crate::db::read_content(crate::db::ContentKey::Story(&story_id))
.expect("story content should exist");
assert!(
content.contains("## Description"),
"Description section missing from story: {content}"
+4 -1
View File
@@ -115,7 +115,10 @@ pub fn list_bug_files(_root: &Path) -> Result<Vec<(String, String)>, String> {
} else {
Some(item.name)
}
.or_else(|| crate::db::read_content(&sid).and_then(|c| extract_bug_name_from_content(&c)))
.or_else(|| {
crate::db::read_content(crate::db::ContentKey::Story(&sid))
.and_then(|c| extract_bug_name_from_content(&c))
})
.unwrap_or_else(|| sid.clone());
bugs.push((sid, name));
}
+7 -6
View File
@@ -187,7 +187,7 @@ fn create_bug_file_writes_correct_content() {
);
// Check content exists (either in DB or filesystem).
let contents = crate::db::read_content(&bug_id)
let contents = crate::db::read_content(crate::db::ContentKey::Story(&bug_id))
.or_else(|| {
let filepath = tmp
.path()
@@ -273,7 +273,8 @@ fn create_spike_file_writes_correct_content() {
"spike ID must be numeric-only, got: {spike_id}"
);
let contents = crate::db::read_content(&spike_id).expect("spike content should exist");
let contents = crate::db::read_content(crate::db::ContentKey::Story(&spike_id))
.expect("spike content should exist");
assert!(
contents.starts_with("---\ntype: spike\nname: \"Filesystem Watcher Architecture\"\n---"),
@@ -305,7 +306,7 @@ fn create_spike_file_uses_description_when_provided() {
)
.unwrap();
let contents = crate::db::read_content(&spike_id)
let contents = crate::db::read_content(crate::db::ContentKey::Story(&spike_id))
.or_else(|| {
let filepath = tmp
.path()
@@ -328,7 +329,7 @@ fn create_spike_file_uses_placeholder_when_no_description() {
)
.unwrap();
let contents = crate::db::read_content(&spike_id)
let contents = crate::db::read_content(crate::db::ContentKey::Story(&spike_id))
.or_else(|| {
let filepath = tmp
.path()
@@ -437,7 +438,7 @@ fn create_bug_file_without_depends_on_omits_field() {
)
.unwrap();
let contents = crate::db::read_content(&bug_id)
let contents = crate::db::read_content(crate::db::ContentKey::Story(&bug_id))
.or_else(|| {
let filepath = tmp
.path()
@@ -485,7 +486,7 @@ fn create_refactor_file_without_depends_on_omits_field() {
)
.unwrap();
let contents = crate::db::read_content(&refactor_id)
let contents = crate::db::read_content(crate::db::ContentKey::Story(&refactor_id))
.or_else(|| {
let filepath = tmp
.path()
+1 -1
View File
@@ -119,7 +119,7 @@ mod tests {
// Also write to the global content store so read_story_content picks up this
// content even when a previous test has left a stale entry for the same ID.
crate::db::ensure_content_store();
crate::db::write_content(story_id, content);
crate::db::write_content(crate::db::ContentKey::Story(story_id), content);
}
// --- create_story integration tests ---
@@ -280,7 +280,7 @@ mod tests {
// Also write to the global content store so read_story_content picks up this
// content even when a previous test has left a stale entry for the same ID.
crate::db::ensure_content_store();
crate::db::write_content(story_id, content);
crate::db::write_content(crate::db::ContentKey::Story(story_id), content);
}
// --- create_story integration tests ---
+1 -1
View File
@@ -67,7 +67,7 @@ mod tests {
fs::create_dir_all(&current).unwrap();
fs::write(current.join(format!("{story_id}.md")), content).unwrap();
crate::db::ensure_content_store();
crate::db::write_content(story_id, content);
crate::db::write_content(crate::db::ContentKey::Story(story_id), content);
}
#[test]
+5 -2
View File
@@ -6,7 +6,7 @@ use std::path::Path;
///
/// Returns the story content or an error if not found.
pub(crate) fn read_story_content(_project_root: &Path, story_id: &str) -> Result<String, String> {
crate::db::read_content(story_id)
crate::db::read_content(crate::db::ContentKey::Story(story_id))
.ok_or_else(|| format!("Story '{story_id}' not found in any pipeline stage."))
}
@@ -341,7 +341,10 @@ mod tests {
fn read_story_content_from_content_store() {
crate::db::ensure_content_store();
let content = "---\nname: Test\n---\n# Story\n";
crate::db::write_content("9878_story_read_test", content);
crate::db::write_content(
crate::db::ContentKey::Story("9878_story_read_test"),
content,
);
let tmp = tempfile::tempdir().unwrap();
let result = read_story_content(tmp.path(), "9878_story_read_test").unwrap();
+1 -1
View File
@@ -179,7 +179,7 @@ pub fn get_work_item_content(
}
// CRDT-only fallback
if let Some(content) = crate::db::read_content(story_id) {
if let Some(content) = crate::db::read_content(crate::db::ContentKey::Story(story_id)) {
let item = crate::pipeline_state::read_typed(story_id)
.map_err(|e| Error::Io(format!("Pipeline read error: {e}")))?;
let stage = match item.as_ref() {
+2 -2
View File
@@ -663,7 +663,7 @@ mod tests {
let content = "---\nname: Foo\n---\n";
fs::write(backlog.join("9905_story_foo.md"), content).unwrap();
crate::db::ensure_content_store();
crate::db::write_content("9905_story_foo", content);
crate::db::write_content(crate::db::ContentKey::Story("9905_story_foo"), content);
let store = Arc::new(TimerStore::load(root.join("timers.json")));
let past = Utc::now() - Duration::seconds(5);
@@ -682,7 +682,7 @@ mod tests {
);
// Story should still be accessible in the content store after the move.
assert!(
crate::db::read_content("9905_story_foo").is_some(),
crate::db::read_content(crate::db::ContentKey::Story("9905_story_foo")).is_some(),
"story should be in the content store after tick fires"
);
}
+2 -2
View File
@@ -437,7 +437,7 @@ mod tests {
let content = "---\nname: Foo\n---\n";
fs::write(backlog.join("421_story_foo.md"), content).unwrap();
crate::db::ensure_content_store();
crate::db::write_content("421_story_foo", content);
crate::db::write_content(crate::db::ContentKey::Story("421_story_foo"), content);
// Add a past timer so take_due returns it immediately.
let store = TimerStore::load(root.join("timers.json"));
@@ -456,7 +456,7 @@ mod tests {
// Story must still be accessible in the content store after the move.
assert!(
crate::db::read_content("421_story_foo").is_some(),
crate::db::read_content(crate::db::ContentKey::Story("421_story_foo")).is_some(),
"story should be in the content store after timer fires"
);
// Timer was consumed.
+3 -2
View File
@@ -55,7 +55,8 @@ pub async fn delete_work_item(
// Pre-flight: check whether the item exists in any store before doing
// destructive work. We probe the content store, CRDT, and filesystem
// shadow dirs so we can return a clear "not found" when nothing matches.
let found_in_content = crate::db::read_content(story_id).is_some();
let found_in_content =
crate::db::read_content(crate::db::ContentKey::Story(story_id)).is_some();
let found_in_crdt = crate::pipeline_state::read_typed(story_id)
.ok()
.flatten()
@@ -232,7 +233,7 @@ mod tests {
// Content store must be empty.
assert!(
crate::db::read_content(story_id).is_none(),
crate::db::read_content(crate::db::ContentKey::Story(story_id)).is_none(),
"content store must not contain the story after delete"
);