2026-05-13 16:26:09 +00:00
|
|
|
//! Name, story-ID, and MergeJob migration helpers for pipeline items.
|
2026-04-29 16:24:44 +00:00
|
|
|
//!
|
|
|
|
|
//! Contains one-time startup migrations that backfill the `name` field from
|
2026-05-13 16:26:09 +00:00
|
|
|
//! story ID slugs, rewrite slug-form story IDs to numeric-only form, and
|
|
|
|
|
//! upgrade four-bool MergeJob CRDT entries to the typed [`MergeResult`] enum.
|
2026-04-29 16:24:44 +00:00
|
|
|
|
|
|
|
|
use bft_json_crdt::json_crdt::{CrdtNode, JsonValue};
|
|
|
|
|
|
|
|
|
|
use super::super::state::{apply_and_persist, get_crdt, rebuild_index};
|
|
|
|
|
use crate::slog;
|
|
|
|
|
|
|
|
|
|
/// Derive a human-readable name from a story ID's slug component.
|
|
|
|
|
///
|
|
|
|
|
/// Strips the numeric prefix and item-type prefix (story/bug/spike/refactor),
|
|
|
|
|
/// replaces underscores with spaces, and capitalises the first letter.
|
|
|
|
|
///
|
|
|
|
|
/// Examples:
|
|
|
|
|
/// - `"729_story_store_story_name"` → `"Store story name"`
|
|
|
|
|
/// - `"4_bug_login_crash"` → `"Login crash"`
|
|
|
|
|
/// - `"10_spike_arch_review"` → `"Arch review"`
|
|
|
|
|
pub fn name_from_story_id(story_id: &str) -> String {
|
|
|
|
|
// Strip the leading digits then the first underscore: "729_story_..." → "story_..."
|
|
|
|
|
let after_num = story_id.trim_start_matches(|c: char| c.is_ascii_digit());
|
|
|
|
|
let after_num = after_num.strip_prefix('_').unwrap_or(after_num);
|
|
|
|
|
|
|
|
|
|
// Strip the item-type prefix.
|
|
|
|
|
let slug = after_num
|
|
|
|
|
.strip_prefix("story_")
|
|
|
|
|
.or_else(|| after_num.strip_prefix("bug_"))
|
|
|
|
|
.or_else(|| after_num.strip_prefix("spike_"))
|
|
|
|
|
.or_else(|| after_num.strip_prefix("refactor_"))
|
|
|
|
|
.unwrap_or(after_num);
|
|
|
|
|
|
|
|
|
|
// Replace underscores with spaces.
|
|
|
|
|
let spaced = slug.replace('_', " ");
|
|
|
|
|
|
|
|
|
|
// Capitalise the first character.
|
|
|
|
|
let mut chars = spaced.chars();
|
|
|
|
|
match chars.next() {
|
|
|
|
|
None => String::new(),
|
|
|
|
|
Some(first) => {
|
|
|
|
|
let mut name = first.to_uppercase().to_string();
|
|
|
|
|
name.push_str(chars.as_str());
|
|
|
|
|
name
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Extract the numeric-only ID from a slug-form story ID, if applicable.
|
|
|
|
|
///
|
|
|
|
|
/// Returns `Some("664")` for `"664_story_my_feature"`, and `None` for IDs
|
|
|
|
|
/// that are already numeric-only (`"664"`) or have no valid numeric prefix.
|
2026-05-12 17:49:44 +00:00
|
|
|
#[allow(clippy::string_slice)] // idx comes from find('_') → always a char boundary
|
2026-04-29 16:24:44 +00:00
|
|
|
pub(super) fn numeric_id_from_slug(story_id: &str) -> Option<String> {
|
|
|
|
|
// Already numeric-only — no migration needed.
|
|
|
|
|
if story_id.chars().all(|c: char| c.is_ascii_digit()) {
|
|
|
|
|
return None;
|
|
|
|
|
}
|
|
|
|
|
// Must have a non-empty numeric segment before the first underscore.
|
|
|
|
|
let idx = story_id.find('_')?;
|
|
|
|
|
let prefix = &story_id[..idx];
|
|
|
|
|
if prefix.is_empty() || !prefix.chars().all(|c| c.is_ascii_digit()) {
|
|
|
|
|
return None;
|
|
|
|
|
}
|
|
|
|
|
Some(prefix.to_string())
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Migrate existing story IDs from slug form (`664_story_my_feature`) to
|
|
|
|
|
/// numeric-only form (`664`) in the in-memory CRDT, persisting a signed op
|
|
|
|
|
/// for each updated register so the change survives restarts.
|
|
|
|
|
///
|
|
|
|
|
/// Returns the list of `(old_id, new_id)` pairs that were actually migrated.
|
|
|
|
|
/// Callers should use this list to rename downstream filesystem artifacts
|
|
|
|
|
/// (worktree directories, git branches, log directories).
|
|
|
|
|
///
|
|
|
|
|
/// Items whose `story_id` is already numeric-only are left untouched.
|
|
|
|
|
/// Items where the target numeric ID is already in use are skipped to avoid
|
|
|
|
|
/// conflicts. Running this migration repeatedly is safe — subsequent calls
|
|
|
|
|
/// on already-migrated state are no-ops.
|
|
|
|
|
pub fn migrate_story_ids_to_numeric() -> Vec<(String, String)> {
|
|
|
|
|
let Some(state_mutex) = get_crdt() else {
|
|
|
|
|
return Vec::new();
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
// First pass: collect (index, old_id, new_id) while holding the lock.
|
|
|
|
|
let migrations: Vec<(usize, String, String)> = {
|
|
|
|
|
let Ok(state) = state_mutex.lock() else {
|
|
|
|
|
return Vec::new();
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
let existing_ids: std::collections::HashSet<String> = state.index.keys().cloned().collect();
|
|
|
|
|
|
|
|
|
|
state
|
|
|
|
|
.index
|
|
|
|
|
.iter()
|
|
|
|
|
.filter_map(|(story_id, &idx)| {
|
|
|
|
|
let numeric = numeric_id_from_slug(story_id)?;
|
|
|
|
|
// Skip if the target numeric ID is already occupied.
|
|
|
|
|
if existing_ids.contains(&numeric) {
|
|
|
|
|
return None;
|
|
|
|
|
}
|
|
|
|
|
Some((idx, story_id.clone(), numeric))
|
|
|
|
|
})
|
|
|
|
|
.collect()
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
if migrations.is_empty() {
|
|
|
|
|
return Vec::new();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Second pass: apply story_id register updates.
|
|
|
|
|
let Ok(mut state) = state_mutex.lock() else {
|
|
|
|
|
return Vec::new();
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
let mut result = Vec::new();
|
|
|
|
|
for (idx, old_id, new_id) in migrations {
|
|
|
|
|
apply_and_persist(&mut state, |s| {
|
|
|
|
|
s.crdt.doc.items[idx].story_id.set(new_id.clone())
|
|
|
|
|
});
|
|
|
|
|
result.push((old_id, new_id));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Rebuild the index so all downstream reads use the new numeric IDs.
|
|
|
|
|
state.index = rebuild_index(&state.crdt);
|
|
|
|
|
|
|
|
|
|
let count = result.len();
|
|
|
|
|
slog!("[crdt] Migrated {count} story IDs from slug form to numeric");
|
|
|
|
|
result
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Backfill the `name` CRDT field for pipeline items that have an empty name.
|
|
|
|
|
///
|
|
|
|
|
/// Iterates over all items in the in-memory CRDT. For each item whose `name`
|
|
|
|
|
/// register is empty, derives a human-readable name from the story ID slug
|
|
|
|
|
/// (see [`name_from_story_id`]) and writes it via a signed CRDT op.
|
|
|
|
|
///
|
|
|
|
|
/// This is a one-time startup migration: items created before the `name` field
|
|
|
|
|
/// was consistently populated will gain a name on the next server start.
|
|
|
|
|
/// Items that already have a non-empty name are left untouched.
|
|
|
|
|
pub fn migrate_names_from_slugs() {
|
|
|
|
|
let Some(state_mutex) = get_crdt() else {
|
|
|
|
|
return;
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
// First pass: collect (index, derived_name) pairs for items missing a name.
|
|
|
|
|
let migrations: Vec<(usize, String)> = {
|
|
|
|
|
let Ok(state) = state_mutex.lock() else {
|
|
|
|
|
return;
|
|
|
|
|
};
|
|
|
|
|
state
|
|
|
|
|
.index
|
|
|
|
|
.iter()
|
|
|
|
|
.filter_map(|(story_id, &idx)| {
|
|
|
|
|
let item = &state.crdt.doc.items[idx];
|
|
|
|
|
// Skip items that already have a name.
|
|
|
|
|
let already_named =
|
|
|
|
|
matches!(item.name.view(), JsonValue::String(ref s) if !s.is_empty());
|
|
|
|
|
if already_named {
|
|
|
|
|
return None;
|
|
|
|
|
}
|
|
|
|
|
let name = name_from_story_id(story_id);
|
|
|
|
|
if name.is_empty() {
|
|
|
|
|
return None;
|
|
|
|
|
}
|
|
|
|
|
Some((idx, name))
|
|
|
|
|
})
|
|
|
|
|
.collect()
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
if migrations.is_empty() {
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Second pass: apply all name writes while holding the lock.
|
|
|
|
|
let Ok(mut state) = state_mutex.lock() else {
|
|
|
|
|
return;
|
|
|
|
|
};
|
|
|
|
|
let count = migrations.len();
|
|
|
|
|
for (idx, name) in migrations {
|
|
|
|
|
apply_and_persist(&mut state, |s| s.crdt.doc.items[idx].name.set(name.clone()));
|
|
|
|
|
}
|
|
|
|
|
slog!("[crdt] Migrated names for {count} items from story ID slugs");
|
|
|
|
|
}
|
2026-05-12 22:31:59 +01:00
|
|
|
|
|
|
|
|
/// Map a pre-934 legacy directory-style stage string to its clean wire form.
|
|
|
|
|
///
|
|
|
|
|
/// Returns `None` if `s` is already in clean wire form (or is genuinely
|
|
|
|
|
/// unknown), so the migration can quickly skip already-clean items.
|
|
|
|
|
fn legacy_stage_to_clean(s: &str) -> Option<&'static str> {
|
|
|
|
|
match s {
|
|
|
|
|
"0_upcoming" => Some("upcoming"),
|
|
|
|
|
"1_backlog" => Some("backlog"),
|
|
|
|
|
"2_current" => Some("coding"),
|
|
|
|
|
"2_blocked" => Some("blocked"),
|
|
|
|
|
"3_qa" => Some("qa"),
|
|
|
|
|
"4_merge" => Some("merge"),
|
|
|
|
|
"4_merge_failure" => Some("merge_failure"),
|
|
|
|
|
"5_done" => Some("done"),
|
|
|
|
|
"6_archived" => Some("archived"),
|
|
|
|
|
// Story 934, stage 4: `Stage::Frozen` no longer exists. Items that
|
|
|
|
|
// were previously frozen become orthogonal-flag-frozen: their stage
|
|
|
|
|
// register collapses to `backlog` (a safe "not progressing" default
|
|
|
|
|
// since the original resume_to payload was lost when the variant was
|
|
|
|
|
// dropped) and a separate write sets `frozen = true`.
|
|
|
|
|
"7_frozen" => Some("backlog"),
|
|
|
|
|
_ => None,
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Rewrite every pipeline item whose `stage` register still carries a pre-934
|
|
|
|
|
/// directory-style string (`"2_current"`, `"4_merge"`, etc.) to the clean wire
|
|
|
|
|
/// vocabulary (`"coding"`, `"merge"`, etc.).
|
|
|
|
|
///
|
|
|
|
|
/// Items that were at `"7_frozen"` additionally get the new `frozen` flag set
|
2026-05-13 06:05:01 +00:00
|
|
|
/// Story 945: the `Frozen` variant returns as `Stage::Frozen { resume_to }`,
|
|
|
|
|
/// replacing the orthogonal CRDT register that briefly existed in 934 stage 4.
|
2026-05-12 22:31:59 +01:00
|
|
|
///
|
|
|
|
|
/// One-time startup migration: items that have transitioned at least once
|
|
|
|
|
/// since story 934 stage 1 (which made writes emit clean form) are no-ops.
|
|
|
|
|
pub fn migrate_legacy_stage_strings() {
|
|
|
|
|
let Some(state_mutex) = get_crdt() else {
|
|
|
|
|
return;
|
|
|
|
|
};
|
|
|
|
|
|
2026-05-13 06:05:01 +00:00
|
|
|
// First pass: collect (index, clean_stage, was_frozen) for items that
|
|
|
|
|
// still carry legacy stage strings. `was_frozen` triggers writing
|
|
|
|
|
// `resume_to = "backlog"` so the post-945 typed projection reads back as
|
|
|
|
|
// `Stage::Frozen { resume_to: Stage::Backlog }`.
|
2026-05-12 22:31:59 +01:00
|
|
|
let migrations: Vec<(usize, &'static str, bool)> = {
|
|
|
|
|
let Ok(state) = state_mutex.lock() else {
|
|
|
|
|
return;
|
|
|
|
|
};
|
|
|
|
|
state
|
|
|
|
|
.index
|
|
|
|
|
.iter()
|
|
|
|
|
.filter_map(|(_story_id, &idx)| {
|
|
|
|
|
let item = &state.crdt.doc.items[idx];
|
|
|
|
|
let current = match item.stage.view() {
|
|
|
|
|
JsonValue::String(s) => s,
|
|
|
|
|
_ => return None,
|
|
|
|
|
};
|
|
|
|
|
let clean = legacy_stage_to_clean(¤t)?;
|
|
|
|
|
let was_frozen = current == "7_frozen";
|
2026-05-13 06:05:01 +00:00
|
|
|
// For legacy frozen items, store the post-945 stage as
|
|
|
|
|
// "frozen" rather than "backlog" so the typed projection
|
|
|
|
|
// produces `Stage::Frozen` again.
|
|
|
|
|
let stage_out = if was_frozen { "frozen" } else { clean };
|
|
|
|
|
Some((idx, stage_out, was_frozen))
|
2026-05-12 22:31:59 +01:00
|
|
|
})
|
|
|
|
|
.collect()
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
if migrations.is_empty() {
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
let Ok(mut state) = state_mutex.lock() else {
|
|
|
|
|
return;
|
|
|
|
|
};
|
|
|
|
|
let count = migrations.len();
|
|
|
|
|
let frozen_count = migrations.iter().filter(|(_, _, f)| *f).count();
|
|
|
|
|
for (idx, clean, was_frozen) in migrations {
|
|
|
|
|
apply_and_persist(&mut state, |s| {
|
|
|
|
|
s.crdt.doc.items[idx].stage.set(clean.to_string())
|
|
|
|
|
});
|
|
|
|
|
if was_frozen {
|
2026-05-13 06:05:01 +00:00
|
|
|
apply_and_persist(&mut state, |s| {
|
|
|
|
|
s.crdt.doc.items[idx].resume_to.set("backlog".to_string())
|
|
|
|
|
});
|
2026-05-12 22:31:59 +01:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
slog!(
|
|
|
|
|
"[crdt] Migrated {count} legacy stage strings to clean wire form \
|
2026-05-13 06:05:01 +00:00
|
|
|
({frozen_count} of which were '7_frozen' → frozen + resume_to=backlog)"
|
2026-05-12 22:31:59 +01:00
|
|
|
);
|
|
|
|
|
}
|
2026-05-12 23:02:48 +01:00
|
|
|
|
2026-05-13 22:50:13 +00:00
|
|
|
/// Clear legacy node-hex claims from `claim_agent` and `claim_ts` registers.
|
|
|
|
|
///
|
|
|
|
|
/// Pre-1009 nodes wrote the Ed25519 hex pubkey as `claimed_by`. That value
|
|
|
|
|
/// cannot be converted to an `AgentName`, so the safe migration is to wipe
|
|
|
|
|
/// any existing claim rather than carry over a semantically invalid string.
|
|
|
|
|
///
|
|
|
|
|
/// Only clears entries where `claim_agent` looks like a legacy node hex value
|
|
|
|
|
/// (64 hex chars). Entries that are already empty or contain an agent-name
|
|
|
|
|
/// string (shorter, mixed case) are left untouched.
|
|
|
|
|
pub fn migrate_node_claims_to_agent_claims() {
|
|
|
|
|
let Some(state_mutex) = get_crdt() else {
|
|
|
|
|
return;
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
let stale_indices: Vec<usize> = {
|
|
|
|
|
let Ok(state) = state_mutex.lock() else {
|
|
|
|
|
return;
|
|
|
|
|
};
|
|
|
|
|
state
|
|
|
|
|
.index
|
|
|
|
|
.values()
|
|
|
|
|
.copied()
|
|
|
|
|
.filter(|&idx| {
|
|
|
|
|
let item = &state.crdt.doc.items[idx];
|
|
|
|
|
match item.claim_agent.view() {
|
|
|
|
|
JsonValue::String(s) => {
|
|
|
|
|
s.len() == 64 && s.chars().all(|c| c.is_ascii_hexdigit())
|
|
|
|
|
}
|
|
|
|
|
_ => false,
|
|
|
|
|
}
|
|
|
|
|
})
|
|
|
|
|
.collect()
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
if stale_indices.is_empty() {
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
let Ok(mut state) = state_mutex.lock() else {
|
|
|
|
|
return;
|
|
|
|
|
};
|
|
|
|
|
let count = stale_indices.len();
|
|
|
|
|
for idx in stale_indices {
|
|
|
|
|
apply_and_persist(&mut state, |s| {
|
|
|
|
|
s.crdt.doc.items[idx].claim_agent.set(String::new())
|
|
|
|
|
});
|
|
|
|
|
apply_and_persist(&mut state, |s| s.crdt.doc.items[idx].claim_ts.set(0.0));
|
|
|
|
|
}
|
|
|
|
|
slog!("[crdt] Cleared {count} legacy node-hex claim(s) from claim_agent/claim_ts");
|
|
|
|
|
}
|
|
|
|
|
|
2026-05-12 23:02:48 +01:00
|
|
|
#[cfg(test)]
|
|
|
|
|
mod stage_migration_tests {
|
|
|
|
|
use super::super::super::state::init_for_test;
|
|
|
|
|
use super::super::item::write_item;
|
|
|
|
|
use super::*;
|
|
|
|
|
use crate::crdt_state::read_item;
|
2026-05-14 08:07:43 +00:00
|
|
|
use crate::pipeline_state::{BranchName, PlanState, Stage};
|
2026-05-12 23:02:48 +01:00
|
|
|
use std::num::NonZeroU32;
|
|
|
|
|
|
|
|
|
|
/// Seed a pipeline item with a raw, possibly-legacy stage register value,
|
|
|
|
|
/// bypassing the boundary normalisers that production write APIs apply.
|
|
|
|
|
/// Inserts via the typed API first so the item is indexed, then directly
|
|
|
|
|
/// rewrites the `stage` register to the legacy string.
|
|
|
|
|
fn seed_with_raw_stage(story_id: &str, raw_stage: &str) {
|
|
|
|
|
// Insert via the typed API so the item exists in state.index.
|
|
|
|
|
write_item(
|
|
|
|
|
story_id,
|
|
|
|
|
&Stage::Backlog,
|
|
|
|
|
Some("Migration Test"),
|
|
|
|
|
None,
|
|
|
|
|
None,
|
|
|
|
|
None,
|
|
|
|
|
);
|
|
|
|
|
// Then overwrite the stage register with the raw legacy string,
|
|
|
|
|
// bypassing `db::normalise_stage_str` / `write_item_str`'s mapping.
|
|
|
|
|
let state_mutex = get_crdt().expect("CRDT initialised in test");
|
|
|
|
|
let mut state = state_mutex.lock().unwrap();
|
|
|
|
|
let idx = *state.index.get(story_id).expect("item indexed");
|
|
|
|
|
let raw = raw_stage.to_string();
|
|
|
|
|
apply_and_persist(&mut state, |s| s.crdt.doc.items[idx].stage.set(raw.clone()));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
fn migrate_rewrites_legacy_directory_strings_to_clean_wire() {
|
|
|
|
|
init_for_test();
|
|
|
|
|
// High-numbered IDs to avoid colliding with other tests' globals.
|
|
|
|
|
let cases: &[(&str, &str, Stage)] = &[
|
|
|
|
|
("9501_legacy_upcoming", "0_upcoming", Stage::Upcoming),
|
|
|
|
|
("9502_legacy_backlog", "1_backlog", Stage::Backlog),
|
2026-05-13 22:50:13 +00:00
|
|
|
(
|
|
|
|
|
"9503_legacy_coding",
|
|
|
|
|
"2_current",
|
2026-05-14 08:07:43 +00:00
|
|
|
Stage::Coding {
|
|
|
|
|
claim: None,
|
|
|
|
|
plan: PlanState::Missing,
|
2026-05-14 11:01:06 +00:00
|
|
|
retries: 0,
|
2026-05-14 08:07:43 +00:00
|
|
|
},
|
2026-05-13 22:50:13 +00:00
|
|
|
),
|
2026-05-12 23:02:48 +01:00
|
|
|
(
|
|
|
|
|
"9504_legacy_blocked",
|
|
|
|
|
"2_blocked",
|
|
|
|
|
Stage::Blocked {
|
|
|
|
|
reason: String::new(),
|
|
|
|
|
},
|
|
|
|
|
),
|
|
|
|
|
("9505_legacy_qa", "3_qa", Stage::Qa),
|
|
|
|
|
(
|
|
|
|
|
"9506_legacy_merge",
|
|
|
|
|
"4_merge",
|
|
|
|
|
Stage::Merge {
|
|
|
|
|
feature_branch: BranchName(String::new()),
|
|
|
|
|
commits_ahead: NonZeroU32::new(1).unwrap(),
|
2026-05-13 22:50:13 +00:00
|
|
|
claim: None,
|
2026-05-14 11:01:06 +00:00
|
|
|
retries: 0,
|
2026-05-12 23:02:48 +01:00
|
|
|
},
|
|
|
|
|
),
|
|
|
|
|
(
|
|
|
|
|
"9507_legacy_merge_failure",
|
|
|
|
|
"4_merge_failure",
|
|
|
|
|
Stage::MergeFailure {
|
2026-05-13 15:30:03 +00:00
|
|
|
kind: crate::pipeline_state::MergeFailureKind::Other(String::new()),
|
2026-05-13 06:22:22 +00:00
|
|
|
feature_branch: crate::pipeline_state::BranchName(String::new()),
|
|
|
|
|
commits_ahead: NonZeroU32::new(1).unwrap(),
|
2026-05-12 23:02:48 +01:00
|
|
|
},
|
|
|
|
|
),
|
|
|
|
|
];
|
|
|
|
|
for (id, raw, _) in cases {
|
|
|
|
|
seed_with_raw_stage(id, raw);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
migrate_legacy_stage_strings();
|
|
|
|
|
|
|
|
|
|
for (id, _, expected_variant) in cases {
|
|
|
|
|
let view = read_item(id).expect("item must still exist after migration");
|
2026-05-13 04:28:30 +00:00
|
|
|
let projected: &Stage = view.stage();
|
2026-05-12 23:02:48 +01:00
|
|
|
assert_eq!(
|
2026-05-13 04:28:30 +00:00
|
|
|
std::mem::discriminant(projected),
|
2026-05-12 23:02:48 +01:00
|
|
|
std::mem::discriminant(expected_variant),
|
|
|
|
|
"stage for {id} should project to {expected_variant:?} after migration, got {projected:?}",
|
|
|
|
|
);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[test]
|
2026-05-13 06:05:01 +00:00
|
|
|
fn migrate_promotes_7_frozen_to_typed_frozen_variant() {
|
2026-05-12 23:02:48 +01:00
|
|
|
init_for_test();
|
|
|
|
|
let story_id = "9510_legacy_frozen";
|
|
|
|
|
seed_with_raw_stage(story_id, "7_frozen");
|
|
|
|
|
|
2026-05-13 06:05:01 +00:00
|
|
|
// Sanity: before migration, the projection's legacy fallback maps
|
|
|
|
|
// raw `"7_frozen"` → `Stage::Backlog` (frozen state is lost without the
|
|
|
|
|
// migration's resume_to write).
|
|
|
|
|
let before = read_item(story_id).expect("legacy 7_frozen should still project");
|
|
|
|
|
assert!(
|
|
|
|
|
matches!(before.stage(), crate::pipeline_state::Stage::Backlog),
|
|
|
|
|
"raw 7_frozen should fall back to Backlog before migration; got {:?}",
|
|
|
|
|
before.stage()
|
|
|
|
|
);
|
2026-05-12 23:02:48 +01:00
|
|
|
|
|
|
|
|
migrate_legacy_stage_strings();
|
|
|
|
|
|
|
|
|
|
let after = read_item(story_id).expect("item must still exist after migration");
|
2026-05-13 06:05:01 +00:00
|
|
|
match after.stage() {
|
|
|
|
|
crate::pipeline_state::Stage::Frozen { resume_to } => {
|
|
|
|
|
assert!(
|
|
|
|
|
matches!(**resume_to, crate::pipeline_state::Stage::Backlog),
|
|
|
|
|
"resume_to should default to Backlog for migrated 7_frozen items"
|
|
|
|
|
);
|
|
|
|
|
}
|
|
|
|
|
other => panic!("7_frozen should migrate to Stage::Frozen; got {other:?}"),
|
|
|
|
|
}
|
2026-05-12 23:02:48 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
fn migrate_leaves_clean_wire_items_untouched() {
|
|
|
|
|
init_for_test();
|
|
|
|
|
// Seed two items: one already in clean form, one in legacy form.
|
|
|
|
|
write_item(
|
|
|
|
|
"9520_already_clean",
|
2026-05-14 08:07:43 +00:00
|
|
|
&Stage::Coding {
|
|
|
|
|
claim: None,
|
|
|
|
|
plan: PlanState::Missing,
|
2026-05-14 11:01:06 +00:00
|
|
|
retries: 0,
|
2026-05-14 08:07:43 +00:00
|
|
|
},
|
2026-05-12 23:02:48 +01:00
|
|
|
Some("Already Clean"),
|
|
|
|
|
None,
|
|
|
|
|
None,
|
|
|
|
|
None,
|
|
|
|
|
);
|
|
|
|
|
seed_with_raw_stage("9521_needs_migration", "2_current");
|
|
|
|
|
|
|
|
|
|
migrate_legacy_stage_strings();
|
|
|
|
|
|
|
|
|
|
// Clean item is unchanged; legacy item is now clean too.
|
|
|
|
|
let clean = read_item("9520_already_clean").unwrap();
|
|
|
|
|
let migrated = read_item("9521_needs_migration").unwrap();
|
2026-05-13 04:28:30 +00:00
|
|
|
assert!(matches!(
|
|
|
|
|
clean.stage(),
|
2026-05-13 22:50:13 +00:00
|
|
|
crate::pipeline_state::Stage::Coding { .. }
|
2026-05-13 04:28:30 +00:00
|
|
|
));
|
|
|
|
|
assert!(matches!(
|
|
|
|
|
migrated.stage(),
|
2026-05-13 22:50:13 +00:00
|
|
|
crate::pipeline_state::Stage::Coding { .. }
|
2026-05-13 04:28:30 +00:00
|
|
|
));
|
2026-05-12 23:02:48 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
fn migrate_is_idempotent() {
|
|
|
|
|
init_for_test();
|
|
|
|
|
seed_with_raw_stage("9530_idempotent", "4_merge");
|
|
|
|
|
|
|
|
|
|
migrate_legacy_stage_strings();
|
|
|
|
|
let after_first = read_item("9530_idempotent").unwrap();
|
|
|
|
|
assert!(matches!(
|
|
|
|
|
after_first.stage(),
|
2026-05-13 04:28:30 +00:00
|
|
|
crate::pipeline_state::Stage::Merge { .. }
|
2026-05-12 23:02:48 +01:00
|
|
|
));
|
|
|
|
|
|
|
|
|
|
// Second call must be a no-op — the filter pass returns empty.
|
|
|
|
|
migrate_legacy_stage_strings();
|
|
|
|
|
let after_second = read_item("9530_idempotent").unwrap();
|
|
|
|
|
assert!(matches!(
|
|
|
|
|
after_second.stage(),
|
2026-05-13 04:28:30 +00:00
|
|
|
crate::pipeline_state::Stage::Merge { .. }
|
2026-05-12 23:02:48 +01:00
|
|
|
));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
fn migrate_is_noop_when_crdt_not_initialised() {
|
|
|
|
|
// Calling before init_for_test should not panic.
|
|
|
|
|
migrate_legacy_stage_strings();
|
|
|
|
|
}
|
|
|
|
|
}
|
2026-05-13 16:26:09 +00:00
|
|
|
|
|
|
|
|
// ── MergeJob migration ─────────────────────────────────────────────────────
|
|
|
|
|
|
|
|
|
|
/// Detect whether a JSON string uses the old four-bool MergeReport format
|
|
|
|
|
/// (pre-story-987) and convert it to the new typed [`MergeResult`] format.
|
|
|
|
|
///
|
|
|
|
|
/// Returns `None` when the input is already in the new format or cannot be
|
|
|
|
|
/// parsed.
|
|
|
|
|
fn upgrade_merge_report_json(json: &str) -> Option<String> {
|
|
|
|
|
let v: serde_json::Value = serde_json::from_str(json).ok()?;
|
|
|
|
|
// New format has a "kind" field inside the "result" object.
|
|
|
|
|
// Old format has top-level bool fields.
|
|
|
|
|
if v.get("result").and_then(|r| r.get("kind")).is_some() {
|
|
|
|
|
return None; // Already new format.
|
|
|
|
|
}
|
|
|
|
|
// Must have at least one of the old bool fields to be recognised as old format.
|
|
|
|
|
if v.get("success").is_none() && v.get("had_conflicts").is_none() {
|
|
|
|
|
return None;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
let story_id = v["story_id"].as_str().unwrap_or("").to_string();
|
|
|
|
|
let success = v["success"].as_bool().unwrap_or(false);
|
|
|
|
|
let had_conflicts = v["had_conflicts"].as_bool().unwrap_or(false);
|
|
|
|
|
let conflicts_resolved = v["conflicts_resolved"].as_bool().unwrap_or(false);
|
|
|
|
|
let conflict_details: Option<String> = v["conflict_details"].as_str().map(|s| s.to_string());
|
|
|
|
|
let gates_passed = v["gates_passed"].as_bool().unwrap_or(false);
|
|
|
|
|
let gate_output = v["gate_output"].as_str().unwrap_or("").to_string();
|
|
|
|
|
let no_commits = v["no_commits"].as_bool().unwrap_or(false);
|
|
|
|
|
let worktree_cleaned_up = v["worktree_cleaned_up"].as_bool().unwrap_or(false);
|
|
|
|
|
let story_archived = v["story_archived"].as_bool().unwrap_or(false);
|
|
|
|
|
|
|
|
|
|
// Reconstruct the typed MergeResult from the old bools.
|
|
|
|
|
let result = if no_commits {
|
|
|
|
|
serde_json::json!({ "kind": "NoCommits", "output": gate_output })
|
|
|
|
|
} else if had_conflicts && !conflicts_resolved {
|
|
|
|
|
serde_json::json!({
|
|
|
|
|
"kind": "Conflict",
|
|
|
|
|
"details": conflict_details,
|
|
|
|
|
"output": gate_output,
|
|
|
|
|
})
|
|
|
|
|
} else if success && gates_passed {
|
|
|
|
|
serde_json::json!({
|
|
|
|
|
"kind": "Success",
|
|
|
|
|
"conflicts_resolved": conflicts_resolved,
|
|
|
|
|
"conflict_details": conflict_details,
|
|
|
|
|
"gate_output": gate_output,
|
|
|
|
|
})
|
|
|
|
|
} else if !gates_passed {
|
|
|
|
|
serde_json::json!({
|
|
|
|
|
"kind": "GateFailure",
|
|
|
|
|
"output": gate_output,
|
|
|
|
|
})
|
|
|
|
|
} else {
|
|
|
|
|
serde_json::json!({
|
|
|
|
|
"kind": "Other",
|
|
|
|
|
"output": gate_output,
|
|
|
|
|
"conflict_details": conflict_details,
|
|
|
|
|
})
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
let new_report = serde_json::json!({
|
|
|
|
|
"story_id": story_id,
|
|
|
|
|
"result": result,
|
|
|
|
|
"worktree_cleaned_up": worktree_cleaned_up,
|
|
|
|
|
"story_archived": story_archived,
|
|
|
|
|
});
|
|
|
|
|
serde_json::to_string(&new_report).ok()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Migrate existing completed MergeJob CRDT entries from the old four-bool
|
|
|
|
|
/// `MergeReport` JSON to the new typed [`MergeResult`] enum format.
|
|
|
|
|
///
|
|
|
|
|
/// Before rewriting any entries, snapshots the current CRDT database file to
|
|
|
|
|
/// `.huskies/backups/pre_merge_result_migration_<ts>.db` so a botched
|
|
|
|
|
/// migration can be undone without manual SQLite surgery.
|
|
|
|
|
///
|
|
|
|
|
/// Running this migration repeatedly is safe — subsequent calls on
|
|
|
|
|
/// already-migrated state are no-ops.
|
|
|
|
|
pub fn migrate_merge_job(db_path: &std::path::Path) {
|
|
|
|
|
// First pass: collect (story_id, new_json, started_at, finished_at) using
|
|
|
|
|
// public read APIs so we don't need to hold the lock across the snapshot.
|
|
|
|
|
let jobs = match crate::crdt_state::read_all_merge_jobs() {
|
|
|
|
|
Some(j) => j,
|
|
|
|
|
None => return,
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
let to_migrate: Vec<(String, String, f64, Option<f64>)> = jobs
|
|
|
|
|
.into_iter()
|
|
|
|
|
.filter_map(|view| {
|
|
|
|
|
if view.status != "completed" {
|
|
|
|
|
return None;
|
|
|
|
|
}
|
|
|
|
|
let error_json = view.error?;
|
|
|
|
|
let new_json = upgrade_merge_report_json(&error_json)?;
|
|
|
|
|
Some((view.story_id, new_json, view.started_at, view.finished_at))
|
|
|
|
|
})
|
|
|
|
|
.collect();
|
|
|
|
|
|
|
|
|
|
if to_migrate.is_empty() {
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Snapshot the database before making any changes (AC 6).
|
|
|
|
|
if let Some(parent) = db_path.parent() {
|
|
|
|
|
let backups_dir = parent.join("backups");
|
|
|
|
|
if std::fs::create_dir_all(&backups_dir).is_ok() {
|
|
|
|
|
let ts = std::time::SystemTime::now()
|
|
|
|
|
.duration_since(std::time::UNIX_EPOCH)
|
|
|
|
|
.map(|d| d.as_secs())
|
|
|
|
|
.unwrap_or(0);
|
|
|
|
|
let backup = backups_dir.join(format!("pre_merge_result_migration_{ts}.db"));
|
|
|
|
|
if let Err(e) = std::fs::copy(db_path, &backup) {
|
|
|
|
|
slog!(
|
|
|
|
|
"[crdt] Warning: could not snapshot pipeline.db before \
|
|
|
|
|
MergeJob migration: {e}"
|
|
|
|
|
);
|
|
|
|
|
} else {
|
|
|
|
|
slog!(
|
|
|
|
|
"[crdt] Snapshotted pipeline.db to {} before MergeJob migration",
|
|
|
|
|
backup.display()
|
|
|
|
|
);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Second pass: rewrite each entry's error field via the public write API.
|
|
|
|
|
let count = to_migrate.len();
|
|
|
|
|
for (story_id, new_json, started_at, finished_at) in to_migrate {
|
|
|
|
|
crate::crdt_state::write_merge_job(
|
|
|
|
|
&story_id,
|
|
|
|
|
"completed",
|
|
|
|
|
started_at,
|
|
|
|
|
finished_at,
|
|
|
|
|
Some(&new_json),
|
|
|
|
|
);
|
|
|
|
|
}
|
|
|
|
|
slog!("[crdt] Migrated {count} MergeJob entries to typed MergeResult format");
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[cfg(test)]
|
|
|
|
|
mod merge_job_migration_tests {
|
|
|
|
|
use super::super::super::state::init_for_test;
|
|
|
|
|
use super::*;
|
|
|
|
|
use crate::crdt_state::write_merge_job;
|
|
|
|
|
|
|
|
|
|
struct OldReport<'a> {
|
|
|
|
|
success: bool,
|
|
|
|
|
had_conflicts: bool,
|
|
|
|
|
conflicts_resolved: bool,
|
|
|
|
|
gates_passed: bool,
|
|
|
|
|
no_commits: bool,
|
|
|
|
|
gate_output: &'a str,
|
|
|
|
|
conflict_details: Option<&'a str>,
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
fn seed_old_format(story_id: &str, r: OldReport<'_>) {
|
|
|
|
|
let (
|
|
|
|
|
success,
|
|
|
|
|
had_conflicts,
|
|
|
|
|
conflicts_resolved,
|
|
|
|
|
gates_passed,
|
|
|
|
|
no_commits,
|
|
|
|
|
gate_output,
|
|
|
|
|
conflict_details,
|
|
|
|
|
) = (
|
|
|
|
|
r.success,
|
|
|
|
|
r.had_conflicts,
|
|
|
|
|
r.conflicts_resolved,
|
|
|
|
|
r.gates_passed,
|
|
|
|
|
r.no_commits,
|
|
|
|
|
r.gate_output,
|
|
|
|
|
r.conflict_details,
|
|
|
|
|
);
|
|
|
|
|
let old_json = serde_json::to_string(&serde_json::json!({
|
|
|
|
|
"story_id": story_id,
|
|
|
|
|
"success": success,
|
|
|
|
|
"had_conflicts": had_conflicts,
|
|
|
|
|
"conflicts_resolved": conflicts_resolved,
|
|
|
|
|
"conflict_details": conflict_details,
|
|
|
|
|
"gates_passed": gates_passed,
|
|
|
|
|
"gate_output": gate_output,
|
|
|
|
|
"gate_failure_kind": null,
|
|
|
|
|
"no_commits": no_commits,
|
|
|
|
|
"worktree_cleaned_up": false,
|
|
|
|
|
"story_archived": false,
|
|
|
|
|
}))
|
|
|
|
|
.unwrap();
|
|
|
|
|
write_merge_job(story_id, "completed", 1.0, Some(2.0), Some(&old_json));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
fn read_result_kind(story_id: &str) -> Option<String> {
|
|
|
|
|
let view = crate::crdt_state::read_merge_job(story_id)?;
|
|
|
|
|
let json_str = view.error?;
|
|
|
|
|
let v: serde_json::Value = serde_json::from_str(&json_str).ok()?;
|
|
|
|
|
v["result"]["kind"].as_str().map(|s| s.to_string())
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
fn migrates_success_variant() {
|
|
|
|
|
init_for_test();
|
|
|
|
|
seed_old_format(
|
|
|
|
|
"9600_success",
|
|
|
|
|
OldReport {
|
|
|
|
|
success: true,
|
|
|
|
|
had_conflicts: false,
|
|
|
|
|
conflicts_resolved: false,
|
|
|
|
|
gates_passed: true,
|
|
|
|
|
no_commits: false,
|
|
|
|
|
gate_output: "gates ok",
|
|
|
|
|
conflict_details: None,
|
|
|
|
|
},
|
|
|
|
|
);
|
|
|
|
|
migrate_merge_job(std::path::Path::new("/nonexistent/pipeline.db"));
|
|
|
|
|
assert_eq!(read_result_kind("9600_success").as_deref(), Some("Success"));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
fn migrates_conflict_variant() {
|
|
|
|
|
init_for_test();
|
|
|
|
|
seed_old_format(
|
|
|
|
|
"9601_conflict",
|
|
|
|
|
OldReport {
|
|
|
|
|
success: false,
|
|
|
|
|
had_conflicts: true,
|
|
|
|
|
conflicts_resolved: false,
|
|
|
|
|
gates_passed: false,
|
|
|
|
|
no_commits: false,
|
|
|
|
|
gate_output: "conflicts",
|
|
|
|
|
conflict_details: Some("conflict details"),
|
|
|
|
|
},
|
|
|
|
|
);
|
|
|
|
|
migrate_merge_job(std::path::Path::new("/nonexistent/pipeline.db"));
|
|
|
|
|
assert_eq!(
|
|
|
|
|
read_result_kind("9601_conflict").as_deref(),
|
|
|
|
|
Some("Conflict")
|
|
|
|
|
);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
fn migrates_gate_failure_variant() {
|
|
|
|
|
init_for_test();
|
|
|
|
|
seed_old_format(
|
|
|
|
|
"9602_gates",
|
|
|
|
|
OldReport {
|
|
|
|
|
success: false,
|
|
|
|
|
had_conflicts: false,
|
|
|
|
|
conflicts_resolved: false,
|
|
|
|
|
gates_passed: false,
|
|
|
|
|
no_commits: false,
|
|
|
|
|
gate_output: "tests failed",
|
|
|
|
|
conflict_details: None,
|
|
|
|
|
},
|
|
|
|
|
);
|
|
|
|
|
migrate_merge_job(std::path::Path::new("/nonexistent/pipeline.db"));
|
|
|
|
|
assert_eq!(
|
|
|
|
|
read_result_kind("9602_gates").as_deref(),
|
|
|
|
|
Some("GateFailure")
|
|
|
|
|
);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
fn migrates_no_commits_variant() {
|
|
|
|
|
init_for_test();
|
|
|
|
|
seed_old_format(
|
|
|
|
|
"9603_nocommits",
|
|
|
|
|
OldReport {
|
|
|
|
|
success: false,
|
|
|
|
|
had_conflicts: false,
|
|
|
|
|
conflicts_resolved: false,
|
|
|
|
|
gates_passed: false,
|
|
|
|
|
no_commits: true,
|
|
|
|
|
gate_output: "no commits to merge",
|
|
|
|
|
conflict_details: None,
|
|
|
|
|
},
|
|
|
|
|
);
|
|
|
|
|
migrate_merge_job(std::path::Path::new("/nonexistent/pipeline.db"));
|
|
|
|
|
assert_eq!(
|
|
|
|
|
read_result_kind("9603_nocommits").as_deref(),
|
|
|
|
|
Some("NoCommits")
|
|
|
|
|
);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
fn migrates_other_variant() {
|
|
|
|
|
init_for_test();
|
|
|
|
|
seed_old_format(
|
|
|
|
|
"9604_other",
|
|
|
|
|
OldReport {
|
|
|
|
|
success: false,
|
|
|
|
|
had_conflicts: false,
|
|
|
|
|
conflicts_resolved: false,
|
|
|
|
|
gates_passed: false,
|
|
|
|
|
no_commits: false,
|
|
|
|
|
gate_output: "cherry-pick failed",
|
|
|
|
|
conflict_details: None,
|
|
|
|
|
},
|
|
|
|
|
);
|
|
|
|
|
migrate_merge_job(std::path::Path::new("/nonexistent/pipeline.db"));
|
|
|
|
|
// GateFailure because !success && !gates_passed matches that branch
|
|
|
|
|
let kind = read_result_kind("9604_other");
|
|
|
|
|
assert!(
|
|
|
|
|
kind.as_deref() == Some("GateFailure") || kind.as_deref() == Some("Other"),
|
|
|
|
|
"unexpected kind: {kind:?}"
|
|
|
|
|
);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
fn skips_non_completed_jobs() {
|
|
|
|
|
init_for_test();
|
|
|
|
|
write_merge_job("9605_running", "running", 1.0, None, Some("{\"ts\":1.0}"));
|
|
|
|
|
migrate_merge_job(std::path::Path::new("/nonexistent/pipeline.db"));
|
|
|
|
|
// Running job must not be touched — its error field is still the server-time encoding.
|
|
|
|
|
let view = crate::crdt_state::read_merge_job("9605_running").unwrap();
|
|
|
|
|
assert_eq!(view.status, "running");
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
fn is_idempotent() {
|
|
|
|
|
init_for_test();
|
|
|
|
|
seed_old_format(
|
|
|
|
|
"9606_idem",
|
|
|
|
|
OldReport {
|
|
|
|
|
success: true,
|
|
|
|
|
had_conflicts: false,
|
|
|
|
|
conflicts_resolved: false,
|
|
|
|
|
gates_passed: true,
|
|
|
|
|
no_commits: false,
|
|
|
|
|
gate_output: "ok",
|
|
|
|
|
conflict_details: None,
|
|
|
|
|
},
|
|
|
|
|
);
|
|
|
|
|
migrate_merge_job(std::path::Path::new("/nonexistent/pipeline.db"));
|
|
|
|
|
let kind_first = read_result_kind("9606_idem");
|
|
|
|
|
migrate_merge_job(std::path::Path::new("/nonexistent/pipeline.db"));
|
|
|
|
|
let kind_second = read_result_kind("9606_idem");
|
|
|
|
|
assert_eq!(kind_first, kind_second, "second migration must be a no-op");
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
fn is_noop_when_crdt_not_initialised() {
|
|
|
|
|
migrate_merge_job(std::path::Path::new("/nonexistent/pipeline.db"));
|
|
|
|
|
}
|
|
|
|
|
}
|