huskies: merge 1087 story Pipeline+Status split — Step D: migrate CRDT storage to (Pipeline, Status) and remove the Stage enum

This commit is contained in:
dave
2026-05-15 08:42:35 +00:00
parent e82602db77
commit df32a1542b
5 changed files with 349 additions and 0 deletions
+214
View File
@@ -588,4 +588,218 @@ mod tests {
"retry_count must reset to 0 on stage transition"
);
}
/// Story 1087, AC2: the split-stage migration projects every supported
/// wire-form `stage` string into the canonical `(pipeline, status)` pair.
/// The fixture covers each Stage variant (and the legacy numeric-prefix
/// directory names retained for back-compat).
#[tokio::test]
async fn split_stage_migration_backfills_pipeline_and_status_for_every_variant() {
let tmp = tempfile::tempdir().unwrap();
let db_path = tmp.path().join("pipeline.db");
let opts = sqlx::sqlite::SqliteConnectOptions::new()
.filename(&db_path)
.create_if_missing(true);
let pool = sqlx::SqlitePool::connect_with(opts).await.unwrap();
sqlx::migrate!("./migrations").run(&pool).await.unwrap();
// (stage written by older code, expected pipeline, expected status)
let fixture: &[(&str, &str, &str)] = &[
("upcoming", "backlog", "active"),
("backlog", "backlog", "active"),
("coding", "coding", "active"),
("blocked", "coding", "blocked"),
("qa", "qa", "active"),
("review_hold", "qa", "review-hold"),
("merge", "merge", "active"),
("merge_failure", "merge", "merge-failure"),
("merge_failure_final", "merge", "merge-failure-final"),
("done", "done", "done"),
("abandoned", "closed", "abandoned"),
("superseded", "closed", "superseded"),
("rejected", "closed", "rejected"),
("archived", "archived", "active"),
("frozen", "coding", "frozen"),
// Legacy numeric-prefix directory names.
("1_backlog", "backlog", "active"),
("2_current", "coding", "active"),
("3_qa", "qa", "active"),
("4_merge", "merge", "active"),
("5_done", "done", "done"),
("6_archived", "archived", "active"),
];
let now = chrono::Utc::now().to_rfc3339();
for (idx, (stage, _, _)) in fixture.iter().enumerate() {
let id = format!("1087_fixture_{idx}");
sqlx::query(
"INSERT INTO pipeline_items \
(id, name, stage, agent, retry_count, depends_on, content, created_at, updated_at) \
VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?8)",
)
.bind(&id)
.bind("fixture")
.bind(*stage)
.bind(Option::<String>::None)
.bind(Option::<i64>::None)
.bind(Option::<String>::None)
.bind("---\nname: fixture\n---\n")
.bind(&now)
.execute(&pool)
.await
.unwrap();
}
// Force the split-stage backfill to run against the rows we just
// inserted. In production this is `sqlx::migrate!`'s job, but the
// sqlx migrator only runs migrations once per DB and they were already
// applied at the top of the test before any rows existed. Reissuing
// the backfill statements is the migration logic under test.
sqlx::query(
"UPDATE pipeline_items SET pipeline = CASE stage \
WHEN 'upcoming' THEN 'backlog' \
WHEN 'backlog' THEN 'backlog' \
WHEN '1_backlog' THEN 'backlog' \
WHEN 'coding' THEN 'coding' \
WHEN 'blocked' THEN 'coding' \
WHEN '2_current' THEN 'coding' \
WHEN 'qa' THEN 'qa' \
WHEN 'review_hold' THEN 'qa' \
WHEN '3_qa' THEN 'qa' \
WHEN 'merge' THEN 'merge' \
WHEN 'merge_failure' THEN 'merge' \
WHEN 'merge_failure_final' THEN 'merge' \
WHEN '4_merge' THEN 'merge' \
WHEN 'done' THEN 'done' \
WHEN '5_done' THEN 'done' \
WHEN 'abandoned' THEN 'closed' \
WHEN 'superseded' THEN 'closed' \
WHEN 'rejected' THEN 'closed' \
WHEN 'archived' THEN 'archived' \
WHEN '6_archived' THEN 'archived' \
WHEN 'frozen' THEN 'coding' \
ELSE '' END",
)
.execute(&pool)
.await
.unwrap();
sqlx::query(
"UPDATE pipeline_items SET status = CASE stage \
WHEN 'frozen' THEN 'frozen' \
WHEN 'review_hold' THEN 'review-hold' \
WHEN 'blocked' THEN 'blocked' \
WHEN 'merge_failure' THEN 'merge-failure' \
WHEN 'merge_failure_final' THEN 'merge-failure-final' \
WHEN 'abandoned' THEN 'abandoned' \
WHEN 'superseded' THEN 'superseded' \
WHEN 'rejected' THEN 'rejected' \
WHEN 'done' THEN 'done' \
WHEN '5_done' THEN 'done' \
ELSE 'active' END",
)
.execute(&pool)
.await
.unwrap();
for (idx, (stage_input, expect_pipeline, expect_status)) in fixture.iter().enumerate() {
let id = format!("1087_fixture_{idx}");
let row: (String, String) =
sqlx::query_as("SELECT pipeline, status FROM pipeline_items WHERE id = ?1")
.bind(&id)
.fetch_one(&pool)
.await
.unwrap();
assert_eq!(
row.0, *expect_pipeline,
"stage {stage_input:?} should backfill pipeline to {expect_pipeline:?}, got {:?}",
row.0
);
assert_eq!(
row.1, *expect_status,
"stage {stage_input:?} should backfill status to {expect_status:?}, got {:?}",
row.1
);
}
}
/// Story 1087, AC1: `shadow_write::init` writes a timestamped backup of
/// pipeline.db before the split-stage migration applies, and skips the
/// backup on subsequent restarts (after the migration is recorded).
#[tokio::test]
async fn pre_pipeline_status_backup_only_runs_once() {
let tmp = tempfile::tempdir().unwrap();
let db_path = tmp.path().join("pipeline.db");
// Seed a "pre-1087" DB: open without applying the split-stage migration.
// We do this by opening with `create_if_missing` and running only the
// legacy migrations — but the simplest way to simulate that here is to
// hand-craft a DB containing an `_sqlx_migrations` table that lists
// every migration EXCEPT the split-stage one.
let opts = sqlx::sqlite::SqliteConnectOptions::new()
.filename(&db_path)
.create_if_missing(true);
let pool = sqlx::SqlitePool::connect_with(opts).await.unwrap();
// Apply migrations the normal way, then delete the split-stage row so
// the backup branch fires on the next `init`.
sqlx::migrate!("./migrations").run(&pool).await.unwrap();
sqlx::query("DELETE FROM _sqlx_migrations WHERE version = 20260515000000")
.execute(&pool)
.await
.unwrap();
pool.close().await;
// First call: backup branch fires, side-car file appears.
super::shadow_write::backup_pre_pipeline_status(&db_path).await;
let backups: Vec<_> = std::fs::read_dir(tmp.path())
.unwrap()
.filter_map(Result::ok)
.filter(|e| {
e.file_name()
.to_string_lossy()
.contains(".pre-pipeline-status.")
})
.collect();
assert_eq!(
backups.len(),
1,
"expected exactly one .pre-pipeline-status backup, got {}",
backups.len()
);
// Re-apply the migration so the marker row is back, simulating a
// post-migration server restart.
let opts = sqlx::sqlite::SqliteConnectOptions::new()
.filename(&db_path)
.create_if_missing(false);
let pool = sqlx::SqlitePool::connect_with(opts).await.unwrap();
let fake_checksum: Vec<u8> = vec![0u8; 20];
sqlx::query(
"INSERT INTO _sqlx_migrations \
(version, description, installed_on, success, checksum, execution_time) \
VALUES (20260515000000, 'split_stage_into_pipeline_status', '2026-05-15T00:00:00Z', 1, ?1, 0)",
)
.bind(&fake_checksum)
.execute(&pool)
.await
.unwrap();
pool.close().await;
// Second call: no new backup written.
super::shadow_write::backup_pre_pipeline_status(&db_path).await;
let backups_after: Vec<_> = std::fs::read_dir(tmp.path())
.unwrap()
.filter_map(Result::ok)
.filter(|e| {
e.file_name()
.to_string_lossy()
.contains(".pre-pipeline-status.")
})
.collect();
assert_eq!(
backups_after.len(),
1,
"post-migration init must not create another backup; got {} backups",
backups_after.len()
);
}
}