fix: read_all_items must use deduplicated index, not raw CRDT entries

read_all_items was iterating all CRDT entries including stale duplicates
from earlier stage writes. A story written multiple times (backlog →
current → done) would appear in the output multiple times with different
stages, causing ghost entries in the pipeline status and backlog views.

Now iterates only the index (story_id → visible_index map) which
represents the latest-wins deduplicated view of each story.

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
dave
2026-04-10 19:32:55 +00:00
parent 2e0ed98d42
commit ea36160667
9 changed files with 88 additions and 526 deletions
-12
View File
@@ -529,7 +529,6 @@ mod tests {
rate_limit_notifications: true,
timezone: None,
rendezvous: None,
trusted_keys: Vec::new(),
};
// Should complete without panic
run_setup_commands(tmp.path(), &config).await;
@@ -555,7 +554,6 @@ mod tests {
rate_limit_notifications: true,
timezone: None,
rendezvous: None,
trusted_keys: Vec::new(),
};
// Should complete without panic
run_setup_commands(tmp.path(), &config).await;
@@ -581,7 +579,6 @@ mod tests {
rate_limit_notifications: true,
timezone: None,
rendezvous: None,
trusted_keys: Vec::new(),
};
// Setup command failures are non-fatal — should not panic or propagate
run_setup_commands(tmp.path(), &config).await;
@@ -607,7 +604,6 @@ mod tests {
rate_limit_notifications: true,
timezone: None,
rendezvous: None,
trusted_keys: Vec::new(),
};
// Teardown failures are best-effort — should not propagate
assert!(run_teardown_commands(tmp.path(), &config).await.is_ok());
@@ -632,7 +628,6 @@ mod tests {
rate_limit_notifications: true,
timezone: None,
rendezvous: None,
trusted_keys: Vec::new(),
};
let info = create_worktree(&project_root, "42_fresh_test", &config, 3001)
.await
@@ -664,7 +659,6 @@ mod tests {
rate_limit_notifications: true,
timezone: None,
rendezvous: None,
trusted_keys: Vec::new(),
};
// First creation
let _info1 = create_worktree(&project_root, "43_reuse_test", &config, 3001)
@@ -737,7 +731,6 @@ mod tests {
rate_limit_notifications: true,
timezone: None,
rendezvous: None,
trusted_keys: Vec::new(),
};
let result = remove_worktree_by_story_id(tmp.path(), "99_nonexistent", &config).await;
@@ -768,7 +761,6 @@ mod tests {
rate_limit_notifications: true,
timezone: None,
rendezvous: None,
trusted_keys: Vec::new(),
};
create_worktree(&project_root, "88_remove_by_id", &config, 3001)
.await
@@ -846,7 +838,6 @@ mod tests {
rate_limit_notifications: true,
timezone: None,
rendezvous: None,
trusted_keys: Vec::new(),
};
// Even though setup commands fail, create_worktree must succeed
// so the agent can start and fix the problem itself.
@@ -880,7 +871,6 @@ mod tests {
rate_limit_notifications: true,
timezone: None,
rendezvous: None,
trusted_keys: Vec::new(),
};
// First creation — no setup commands, should succeed
create_worktree(&project_root, "173_reuse_fail", &empty_config, 3001)
@@ -904,7 +894,6 @@ mod tests {
rate_limit_notifications: true,
timezone: None,
rendezvous: None,
trusted_keys: Vec::new(),
};
// Second call — worktree exists, setup commands fail, must still succeed
let result = create_worktree(&project_root, "173_reuse_fail", &failing_config, 3002).await;
@@ -934,7 +923,6 @@ mod tests {
rate_limit_notifications: true,
timezone: None,
rendezvous: None,
trusted_keys: Vec::new(),
};
let info = create_worktree(&project_root, "77_remove_async", &config, 3001)
.await