Revert "refactor: split top-5 largest files into mod.rs + tests.rs"
This reverts commit 65a3767a7a.
This commit is contained in:
@@ -1260,4 +1260,863 @@ pub(crate) mod hex {
|
|||||||
// ── Tests ────────────────────────────────────────────────────────────
|
// ── Tests ────────────────────────────────────────────────────────────
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests;
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
use bft_json_crdt::json_crdt::OpState;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn crdt_doc_insert_and_view() {
|
||||||
|
let kp = make_keypair();
|
||||||
|
let mut crdt = BaseCrdt::<PipelineDoc>::new(&kp);
|
||||||
|
|
||||||
|
let item_json: JsonValue = json!({
|
||||||
|
"story_id": "10_story_test",
|
||||||
|
"stage": "2_current",
|
||||||
|
"name": "Test Story",
|
||||||
|
"agent": "coder-opus",
|
||||||
|
"retry_count": 0.0,
|
||||||
|
"blocked": false,
|
||||||
|
"depends_on": "",
|
||||||
|
"claimed_by": "",
|
||||||
|
"claimed_at": 0.0,
|
||||||
|
})
|
||||||
|
.into();
|
||||||
|
|
||||||
|
let op = crdt.doc.items.insert(ROOT_ID, item_json).sign(&kp);
|
||||||
|
assert_eq!(crdt.apply(op), OpState::Ok);
|
||||||
|
|
||||||
|
let view = crdt.doc.items.view();
|
||||||
|
assert_eq!(view.len(), 1);
|
||||||
|
|
||||||
|
let item = &crdt.doc.items[0];
|
||||||
|
assert_eq!(
|
||||||
|
item.story_id.view(),
|
||||||
|
JsonValue::String("10_story_test".to_string())
|
||||||
|
);
|
||||||
|
assert_eq!(
|
||||||
|
item.stage.view(),
|
||||||
|
JsonValue::String("2_current".to_string())
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn crdt_doc_update_stage() {
|
||||||
|
let kp = make_keypair();
|
||||||
|
let mut crdt = BaseCrdt::<PipelineDoc>::new(&kp);
|
||||||
|
|
||||||
|
let item_json: JsonValue = json!({
|
||||||
|
"story_id": "20_story_move",
|
||||||
|
"stage": "1_backlog",
|
||||||
|
"name": "Move Me",
|
||||||
|
"agent": "",
|
||||||
|
"retry_count": 0.0,
|
||||||
|
"blocked": false,
|
||||||
|
"depends_on": "",
|
||||||
|
"claimed_by": "",
|
||||||
|
"claimed_at": 0.0,
|
||||||
|
})
|
||||||
|
.into();
|
||||||
|
|
||||||
|
let insert_op = crdt.doc.items.insert(ROOT_ID, item_json).sign(&kp);
|
||||||
|
crdt.apply(insert_op);
|
||||||
|
|
||||||
|
// Update stage
|
||||||
|
let stage_op = crdt.doc.items[0]
|
||||||
|
.stage
|
||||||
|
.set("2_current".to_string())
|
||||||
|
.sign(&kp);
|
||||||
|
crdt.apply(stage_op);
|
||||||
|
|
||||||
|
assert_eq!(
|
||||||
|
crdt.doc.items[0].stage.view(),
|
||||||
|
JsonValue::String("2_current".to_string())
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn crdt_ops_replay_reconstructs_state() {
|
||||||
|
let kp = make_keypair();
|
||||||
|
let mut crdt1 = BaseCrdt::<PipelineDoc>::new(&kp);
|
||||||
|
|
||||||
|
// Build state with a series of ops.
|
||||||
|
let item_json: JsonValue = json!({
|
||||||
|
"story_id": "30_story_replay",
|
||||||
|
"stage": "1_backlog",
|
||||||
|
"name": "Replay Test",
|
||||||
|
"agent": "",
|
||||||
|
"retry_count": 0.0,
|
||||||
|
"blocked": false,
|
||||||
|
"depends_on": "",
|
||||||
|
"claimed_by": "",
|
||||||
|
"claimed_at": 0.0,
|
||||||
|
})
|
||||||
|
.into();
|
||||||
|
|
||||||
|
let op1 = crdt1.doc.items.insert(ROOT_ID, item_json).sign(&kp);
|
||||||
|
crdt1.apply(op1.clone());
|
||||||
|
|
||||||
|
let op2 = crdt1.doc.items[0]
|
||||||
|
.stage
|
||||||
|
.set("2_current".to_string())
|
||||||
|
.sign(&kp);
|
||||||
|
crdt1.apply(op2.clone());
|
||||||
|
|
||||||
|
let op3 = crdt1.doc.items[0]
|
||||||
|
.name
|
||||||
|
.set("Updated Name".to_string())
|
||||||
|
.sign(&kp);
|
||||||
|
crdt1.apply(op3.clone());
|
||||||
|
|
||||||
|
// Replay ops on a fresh CRDT.
|
||||||
|
let mut crdt2 = BaseCrdt::<PipelineDoc>::new(&kp);
|
||||||
|
crdt2.apply(op1);
|
||||||
|
crdt2.apply(op2);
|
||||||
|
crdt2.apply(op3);
|
||||||
|
|
||||||
|
assert_eq!(
|
||||||
|
crdt1.doc.items[0].stage.view(),
|
||||||
|
crdt2.doc.items[0].stage.view()
|
||||||
|
);
|
||||||
|
assert_eq!(
|
||||||
|
crdt1.doc.items[0].name.view(),
|
||||||
|
crdt2.doc.items[0].name.view()
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn extract_item_view_parses_crdt_item() {
|
||||||
|
let kp = make_keypair();
|
||||||
|
let mut crdt = BaseCrdt::<PipelineDoc>::new(&kp);
|
||||||
|
|
||||||
|
let item_json: JsonValue = json!({
|
||||||
|
"story_id": "40_story_view",
|
||||||
|
"stage": "3_qa",
|
||||||
|
"name": "View Test",
|
||||||
|
"agent": "coder-1",
|
||||||
|
"retry_count": 2.0,
|
||||||
|
"blocked": true,
|
||||||
|
"depends_on": "[10,20]",
|
||||||
|
"claimed_by": "",
|
||||||
|
"claimed_at": 0.0,
|
||||||
|
})
|
||||||
|
.into();
|
||||||
|
|
||||||
|
let op = crdt.doc.items.insert(ROOT_ID, item_json).sign(&kp);
|
||||||
|
crdt.apply(op);
|
||||||
|
|
||||||
|
let view = extract_item_view(&crdt.doc.items[0]).unwrap();
|
||||||
|
assert_eq!(view.story_id, "40_story_view");
|
||||||
|
assert_eq!(view.stage, "3_qa");
|
||||||
|
assert_eq!(view.name.as_deref(), Some("View Test"));
|
||||||
|
assert_eq!(view.agent.as_deref(), Some("coder-1"));
|
||||||
|
assert_eq!(view.retry_count, Some(2));
|
||||||
|
assert_eq!(view.blocked, Some(true));
|
||||||
|
assert_eq!(view.depends_on, Some(vec![10, 20]));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn rebuild_index_maps_story_ids() {
|
||||||
|
let kp = make_keypair();
|
||||||
|
let mut crdt = BaseCrdt::<PipelineDoc>::new(&kp);
|
||||||
|
|
||||||
|
for (sid, stage) in &[("10_story_a", "1_backlog"), ("20_story_b", "2_current")] {
|
||||||
|
let item: JsonValue = json!({
|
||||||
|
"story_id": sid,
|
||||||
|
"stage": stage,
|
||||||
|
"name": "",
|
||||||
|
"agent": "",
|
||||||
|
"retry_count": 0.0,
|
||||||
|
"blocked": false,
|
||||||
|
"depends_on": "",
|
||||||
|
"claimed_by": "",
|
||||||
|
"claimed_at": 0.0,
|
||||||
|
})
|
||||||
|
.into();
|
||||||
|
let op = crdt.doc.items.insert(ROOT_ID, item).sign(&kp);
|
||||||
|
crdt.apply(op);
|
||||||
|
}
|
||||||
|
|
||||||
|
let index = rebuild_index(&crdt);
|
||||||
|
assert_eq!(index.len(), 2);
|
||||||
|
assert!(index.contains_key("10_story_a"));
|
||||||
|
assert!(index.contains_key("20_story_b"));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn init_and_write_read_roundtrip() {
|
||||||
|
let tmp = tempfile::tempdir().unwrap();
|
||||||
|
let db_path = tmp.path().join("crdt_test.db");
|
||||||
|
|
||||||
|
// Init directly (not via the global singleton, for test isolation).
|
||||||
|
let options = SqliteConnectOptions::new()
|
||||||
|
.filename(&db_path)
|
||||||
|
.create_if_missing(true);
|
||||||
|
let pool = SqlitePool::connect_with(options).await.unwrap();
|
||||||
|
sqlx::migrate!("./migrations").run(&pool).await.unwrap();
|
||||||
|
|
||||||
|
let keypair = make_keypair();
|
||||||
|
let mut crdt = BaseCrdt::<PipelineDoc>::new(&keypair);
|
||||||
|
|
||||||
|
// Insert and update like write_item does.
|
||||||
|
let item_json: JsonValue = json!({
|
||||||
|
"story_id": "50_story_roundtrip",
|
||||||
|
"stage": "1_backlog",
|
||||||
|
"name": "Roundtrip",
|
||||||
|
"agent": "",
|
||||||
|
"retry_count": 0.0,
|
||||||
|
"blocked": false,
|
||||||
|
"depends_on": "",
|
||||||
|
"claimed_by": "",
|
||||||
|
"claimed_at": 0.0,
|
||||||
|
})
|
||||||
|
.into();
|
||||||
|
|
||||||
|
let insert_op = crdt.doc.items.insert(ROOT_ID, item_json).sign(&keypair);
|
||||||
|
crdt.apply(insert_op.clone());
|
||||||
|
|
||||||
|
// Persist the op.
|
||||||
|
let op_json = serde_json::to_string(&insert_op).unwrap();
|
||||||
|
let op_id = hex::encode(&insert_op.id());
|
||||||
|
let now = chrono::Utc::now().to_rfc3339();
|
||||||
|
sqlx::query(
|
||||||
|
"INSERT INTO crdt_ops (op_id, seq, op_json, created_at) VALUES (?1, ?2, ?3, ?4)",
|
||||||
|
)
|
||||||
|
.bind(&op_id)
|
||||||
|
.bind(insert_op.inner.seq as i64)
|
||||||
|
.bind(&op_json)
|
||||||
|
.bind(&now)
|
||||||
|
.execute(&pool)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
// Reconstruct from DB.
|
||||||
|
let rows: Vec<(String,)> =
|
||||||
|
sqlx::query_as("SELECT op_json FROM crdt_ops ORDER BY rowid ASC")
|
||||||
|
.fetch_all(&pool)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let mut crdt2 = BaseCrdt::<PipelineDoc>::new(&keypair);
|
||||||
|
for (json_str,) in &rows {
|
||||||
|
let op: SignedOp = serde_json::from_str(json_str).unwrap();
|
||||||
|
crdt2.apply(op);
|
||||||
|
}
|
||||||
|
|
||||||
|
let view = extract_item_view(&crdt2.doc.items[0]).unwrap();
|
||||||
|
assert_eq!(view.story_id, "50_story_roundtrip");
|
||||||
|
assert_eq!(view.stage, "1_backlog");
|
||||||
|
assert_eq!(view.name.as_deref(), Some("Roundtrip"));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn signed_op_serialization_roundtrip() {
|
||||||
|
let kp = make_keypair();
|
||||||
|
let mut crdt = BaseCrdt::<PipelineDoc>::new(&kp);
|
||||||
|
|
||||||
|
let item: JsonValue = json!({
|
||||||
|
"story_id": "60_story_serde",
|
||||||
|
"stage": "1_backlog",
|
||||||
|
"name": "Serde Test",
|
||||||
|
"agent": "",
|
||||||
|
"retry_count": 0.0,
|
||||||
|
"blocked": false,
|
||||||
|
"depends_on": "",
|
||||||
|
"claimed_by": "",
|
||||||
|
"claimed_at": 0.0,
|
||||||
|
})
|
||||||
|
.into();
|
||||||
|
|
||||||
|
let op = crdt.doc.items.insert(ROOT_ID, item).sign(&kp);
|
||||||
|
let json_str = serde_json::to_string(&op).unwrap();
|
||||||
|
let deserialized: SignedOp = serde_json::from_str(&json_str).unwrap();
|
||||||
|
|
||||||
|
assert_eq!(op.id(), deserialized.id());
|
||||||
|
assert_eq!(op.inner.seq, deserialized.inner.seq);
|
||||||
|
}
|
||||||
|
|
||||||
|
// ── CrdtEvent tests ─────────────────────────────────────────────────
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn crdt_event_has_expected_fields() {
|
||||||
|
let evt = CrdtEvent {
|
||||||
|
story_id: "42_story_foo".to_string(),
|
||||||
|
from_stage: Some("1_backlog".to_string()),
|
||||||
|
to_stage: "2_current".to_string(),
|
||||||
|
name: Some("Foo Feature".to_string()),
|
||||||
|
};
|
||||||
|
assert_eq!(evt.story_id, "42_story_foo");
|
||||||
|
assert_eq!(evt.from_stage.as_deref(), Some("1_backlog"));
|
||||||
|
assert_eq!(evt.to_stage, "2_current");
|
||||||
|
assert_eq!(evt.name.as_deref(), Some("Foo Feature"));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn crdt_event_clone_preserves_data() {
|
||||||
|
let evt = CrdtEvent {
|
||||||
|
story_id: "10_story_bar".to_string(),
|
||||||
|
from_stage: None,
|
||||||
|
to_stage: "1_backlog".to_string(),
|
||||||
|
name: None,
|
||||||
|
};
|
||||||
|
let cloned = evt.clone();
|
||||||
|
assert_eq!(cloned.story_id, "10_story_bar");
|
||||||
|
assert!(cloned.from_stage.is_none());
|
||||||
|
assert!(cloned.name.is_none());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn emit_event_is_noop_when_channel_not_initialised() {
|
||||||
|
// Before CRDT_EVENT_TX is set, emit_event should not panic.
|
||||||
|
// This test verifies the guard clause works. In test binaries the
|
||||||
|
// OnceLock may already be set by another test, so we just verify
|
||||||
|
// the function doesn't panic regardless.
|
||||||
|
emit_event(CrdtEvent {
|
||||||
|
story_id: "99_story_noop".to_string(),
|
||||||
|
from_stage: None,
|
||||||
|
to_stage: "1_backlog".to_string(),
|
||||||
|
name: None,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn crdt_event_broadcast_channel_round_trip() {
|
||||||
|
let (tx, mut rx) = broadcast::channel::<CrdtEvent>(16);
|
||||||
|
let evt = CrdtEvent {
|
||||||
|
story_id: "70_story_broadcast".to_string(),
|
||||||
|
from_stage: Some("1_backlog".to_string()),
|
||||||
|
to_stage: "2_current".to_string(),
|
||||||
|
name: Some("Broadcast Test".to_string()),
|
||||||
|
};
|
||||||
|
tx.send(evt).unwrap();
|
||||||
|
|
||||||
|
let received = rx.try_recv().unwrap();
|
||||||
|
assert_eq!(received.story_id, "70_story_broadcast");
|
||||||
|
assert_eq!(received.from_stage.as_deref(), Some("1_backlog"));
|
||||||
|
assert_eq!(received.to_stage, "2_current");
|
||||||
|
assert_eq!(received.name.as_deref(), Some("Broadcast Test"));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn dep_is_done_crdt_returns_false_when_no_crdt_state() {
|
||||||
|
// When the global CRDT state is not initialised (or in a test environment),
|
||||||
|
// dep_is_done_crdt should return false rather than panicking.
|
||||||
|
// Note: in the test binary the global may or may not be initialised,
|
||||||
|
// but the function should never panic either way.
|
||||||
|
let _ = dep_is_done_crdt(9999);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn check_unmet_deps_crdt_returns_empty_when_item_not_found() {
|
||||||
|
// Non-existent story should return empty deps.
|
||||||
|
let result = check_unmet_deps_crdt("nonexistent_story");
|
||||||
|
assert!(result.is_empty());
|
||||||
|
}
|
||||||
|
|
||||||
|
// ── Bug 503: archived-dep visibility ─────────────────────────────────────
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn dep_is_archived_crdt_returns_false_when_no_crdt_state() {
|
||||||
|
// When the global CRDT state is not initialised, must not panic.
|
||||||
|
let _ = dep_is_archived_crdt(9998);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn check_archived_deps_crdt_returns_empty_when_item_not_found() {
|
||||||
|
// Non-existent story should return empty archived deps.
|
||||||
|
let result = check_archived_deps_crdt("nonexistent_story_archived");
|
||||||
|
assert!(result.is_empty());
|
||||||
|
}
|
||||||
|
|
||||||
|
// ── 478: WebSocket CRDT sync layer tests ────────────────────────────────
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn apply_remote_op_returns_false_when_not_initialised() {
|
||||||
|
// Without the global CRDT state, apply_remote_op should return false.
|
||||||
|
let kp = make_keypair();
|
||||||
|
let mut crdt = BaseCrdt::<PipelineDoc>::new(&kp);
|
||||||
|
let item: JsonValue = serde_json::json!({
|
||||||
|
"story_id": "80_story_remote",
|
||||||
|
"stage": "1_backlog",
|
||||||
|
"name": "Remote",
|
||||||
|
"agent": "",
|
||||||
|
"retry_count": 0.0,
|
||||||
|
"blocked": false,
|
||||||
|
"depends_on": "",
|
||||||
|
"claimed_by": "",
|
||||||
|
"claimed_at": 0.0,
|
||||||
|
})
|
||||||
|
.into();
|
||||||
|
let op = crdt
|
||||||
|
.doc
|
||||||
|
.items
|
||||||
|
.insert(bft_json_crdt::op::ROOT_ID, item)
|
||||||
|
.sign(&kp);
|
||||||
|
// This uses the global state which may not be initialised in tests.
|
||||||
|
let _ = apply_remote_op(op);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn signed_op_survives_sync_serialization_roundtrip() {
|
||||||
|
// Verify that a SignedOp serialised to JSON and back produces
|
||||||
|
// the same op (critical for the sync wire protocol).
|
||||||
|
let kp = make_keypair();
|
||||||
|
let mut crdt = BaseCrdt::<PipelineDoc>::new(&kp);
|
||||||
|
let item: JsonValue = serde_json::json!({
|
||||||
|
"story_id": "90_story_wire",
|
||||||
|
"stage": "2_current",
|
||||||
|
"name": "Wire Test",
|
||||||
|
"agent": "coder",
|
||||||
|
"retry_count": 1.0,
|
||||||
|
"blocked": false,
|
||||||
|
"depends_on": "[10]",
|
||||||
|
"claimed_by": "",
|
||||||
|
"claimed_at": 0.0,
|
||||||
|
})
|
||||||
|
.into();
|
||||||
|
let op = crdt
|
||||||
|
.doc
|
||||||
|
.items
|
||||||
|
.insert(bft_json_crdt::op::ROOT_ID, item)
|
||||||
|
.sign(&kp);
|
||||||
|
|
||||||
|
let json1 = serde_json::to_string(&op).unwrap();
|
||||||
|
let roundtripped: SignedOp = serde_json::from_str(&json1).unwrap();
|
||||||
|
let json2 = serde_json::to_string(&roundtripped).unwrap();
|
||||||
|
|
||||||
|
assert_eq!(json1, json2);
|
||||||
|
assert_eq!(op.id(), roundtripped.id());
|
||||||
|
assert_eq!(op.inner.seq, roundtripped.inner.seq);
|
||||||
|
assert_eq!(op.author(), roundtripped.author());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn sync_broadcast_channel_round_trip() {
|
||||||
|
let (tx, mut rx) = broadcast::channel::<SignedOp>(16);
|
||||||
|
let kp = make_keypair();
|
||||||
|
let mut crdt = BaseCrdt::<PipelineDoc>::new(&kp);
|
||||||
|
let item: JsonValue = serde_json::json!({
|
||||||
|
"story_id": "95_story_sync_bcast",
|
||||||
|
"stage": "1_backlog",
|
||||||
|
"name": "",
|
||||||
|
"agent": "",
|
||||||
|
"retry_count": 0.0,
|
||||||
|
"blocked": false,
|
||||||
|
"depends_on": "",
|
||||||
|
"claimed_by": "",
|
||||||
|
"claimed_at": 0.0,
|
||||||
|
})
|
||||||
|
.into();
|
||||||
|
let op = crdt
|
||||||
|
.doc
|
||||||
|
.items
|
||||||
|
.insert(bft_json_crdt::op::ROOT_ID, item)
|
||||||
|
.sign(&kp);
|
||||||
|
tx.send(op.clone()).unwrap();
|
||||||
|
|
||||||
|
let received = rx.try_recv().unwrap();
|
||||||
|
assert_eq!(received.id(), op.id());
|
||||||
|
}
|
||||||
|
|
||||||
|
// ── Bug 511: CRDT lamport clock resets on restart ────────────────────────
|
||||||
|
//
|
||||||
|
// Root cause: Op::sign() always produces SignedOp with depends_on = vec![],
|
||||||
|
// so the causal dependency queue never engages during replay. Field update
|
||||||
|
// ops (seq=1,2,3 from each field's LwwRegisterCrdt counter) are replayed
|
||||||
|
// before list insert ops (seq=N from the items ListCrdt counter) when
|
||||||
|
// ordered by `seq ASC`. They fail ErrPathMismatch silently, their our_seq
|
||||||
|
// is never updated, and the next field write re-uses seq=1.
|
||||||
|
//
|
||||||
|
// Fix: replay by `rowid ASC` (SQLite insertion order) instead of `seq ASC`.
|
||||||
|
// Rowid preserves the causal order ops were originally applied in, so field
|
||||||
|
// updates always come after the item insert they reference.
|
||||||
|
#[tokio::test]
|
||||||
|
async fn bug_511_rowid_replay_preserves_field_update_after_list_insert() {
|
||||||
|
let tmp = tempfile::tempdir().unwrap();
|
||||||
|
let db_path = tmp.path().join("bug511.db");
|
||||||
|
|
||||||
|
let options = SqliteConnectOptions::new()
|
||||||
|
.filename(&db_path)
|
||||||
|
.create_if_missing(true);
|
||||||
|
let pool = SqlitePool::connect_with(options).await.unwrap();
|
||||||
|
sqlx::migrate!("./migrations").run(&pool).await.unwrap();
|
||||||
|
|
||||||
|
let kp = make_keypair();
|
||||||
|
let mut crdt = BaseCrdt::<PipelineDoc>::new(&kp);
|
||||||
|
|
||||||
|
// Insert 5 dummy items to advance items.our_seq to 5.
|
||||||
|
for i in 0..5u32 {
|
||||||
|
let sid = format!("{}_story_warmup", i);
|
||||||
|
let item: JsonValue = json!({
|
||||||
|
"story_id": sid,
|
||||||
|
"stage": "1_backlog",
|
||||||
|
"name": "",
|
||||||
|
"agent": "",
|
||||||
|
"retry_count": 0.0,
|
||||||
|
"blocked": false,
|
||||||
|
"depends_on": "",
|
||||||
|
"claimed_by": "",
|
||||||
|
"claimed_at": 0.0,
|
||||||
|
})
|
||||||
|
.into();
|
||||||
|
let op = crdt.doc.items.insert(ROOT_ID, item).sign(&kp);
|
||||||
|
crdt.apply(op.clone());
|
||||||
|
// We don't persist these to the DB — they are pre-history.
|
||||||
|
}
|
||||||
|
|
||||||
|
// Now insert the real item. items.our_seq was 5, so this op gets seq=6.
|
||||||
|
let target_item: JsonValue = json!({
|
||||||
|
"story_id": "511_story_target",
|
||||||
|
"stage": "1_backlog",
|
||||||
|
"name": "Bug 511 target",
|
||||||
|
"agent": "",
|
||||||
|
"retry_count": 0.0,
|
||||||
|
"blocked": false,
|
||||||
|
"depends_on": "",
|
||||||
|
"claimed_by": "",
|
||||||
|
"claimed_at": 0.0,
|
||||||
|
})
|
||||||
|
.into();
|
||||||
|
let insert_op = crdt.doc.items.insert(ROOT_ID, target_item).sign(&kp);
|
||||||
|
crdt.apply(insert_op.clone());
|
||||||
|
// insert_op.inner.seq == 6
|
||||||
|
|
||||||
|
// Now update the stage. The stage LwwRegisterCrdt for this item starts
|
||||||
|
// at our_seq=0, so this field op gets seq=1. Crucially: seq=1 < seq=6.
|
||||||
|
let idx = rebuild_index(&crdt)["511_story_target"];
|
||||||
|
let stage_op = crdt.doc.items[idx]
|
||||||
|
.stage
|
||||||
|
.set("2_current".to_string())
|
||||||
|
.sign(&kp);
|
||||||
|
crdt.apply(stage_op.clone());
|
||||||
|
// stage_op.inner.seq == 1
|
||||||
|
|
||||||
|
// Persist BOTH ops in causal order (insert first, update second).
|
||||||
|
// This means insert_op gets rowid < stage_op rowid.
|
||||||
|
let now = chrono::Utc::now().to_rfc3339();
|
||||||
|
for op in [&insert_op, &stage_op] {
|
||||||
|
let op_json = serde_json::to_string(op).unwrap();
|
||||||
|
let op_id = hex::encode(&op.id());
|
||||||
|
sqlx::query(
|
||||||
|
"INSERT INTO crdt_ops (op_id, seq, op_json, created_at) VALUES (?1, ?2, ?3, ?4)",
|
||||||
|
)
|
||||||
|
.bind(&op_id)
|
||||||
|
.bind(op.inner.seq as i64)
|
||||||
|
.bind(&op_json)
|
||||||
|
.bind(&now)
|
||||||
|
.execute(&pool)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Replay by rowid ASC (the fix). The insert must come before the field
|
||||||
|
// update regardless of their field-level seq values.
|
||||||
|
let rows: Vec<(String,)> =
|
||||||
|
sqlx::query_as("SELECT op_json FROM crdt_ops ORDER BY rowid ASC")
|
||||||
|
.fetch_all(&pool)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let mut crdt2 = BaseCrdt::<PipelineDoc>::new(&kp);
|
||||||
|
for (json_str,) in &rows {
|
||||||
|
let op: SignedOp = serde_json::from_str(json_str).unwrap();
|
||||||
|
crdt2.apply(op);
|
||||||
|
}
|
||||||
|
|
||||||
|
// The item must be in the CRDT and must reflect the stage update.
|
||||||
|
let index2 = rebuild_index(&crdt2);
|
||||||
|
assert!(
|
||||||
|
index2.contains_key("511_story_target"),
|
||||||
|
"item not found after rowid-order replay"
|
||||||
|
);
|
||||||
|
let idx2 = index2["511_story_target"];
|
||||||
|
let view = extract_item_view(&crdt2.doc.items[idx2]).unwrap();
|
||||||
|
assert_eq!(
|
||||||
|
view.stage, "2_current",
|
||||||
|
"stage field update lost during replay (bug 511 regression)"
|
||||||
|
);
|
||||||
|
|
||||||
|
// Confirm the bug is reproducible by replaying seq ASC instead.
|
||||||
|
// With seq ASC the stage_op (seq=1) arrives before insert_op (seq=6),
|
||||||
|
// fails ErrPathMismatch, and the item ends up at "1_backlog".
|
||||||
|
let rows_wrong_order: Vec<(String,)> =
|
||||||
|
sqlx::query_as("SELECT op_json FROM crdt_ops ORDER BY seq ASC")
|
||||||
|
.fetch_all(&pool)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let mut crdt3 = BaseCrdt::<PipelineDoc>::new(&kp);
|
||||||
|
for (json_str,) in &rows_wrong_order {
|
||||||
|
let op: SignedOp = serde_json::from_str(json_str).unwrap();
|
||||||
|
crdt3.apply(op);
|
||||||
|
}
|
||||||
|
|
||||||
|
let index3 = rebuild_index(&crdt3);
|
||||||
|
// With seq ASC replay, the item is created (insert_op eventually runs)
|
||||||
|
// but the stage update is lost (it ran before the item existed).
|
||||||
|
if let Some(idx3) = index3.get("511_story_target") {
|
||||||
|
let view3 = extract_item_view(&crdt3.doc.items[*idx3]).unwrap();
|
||||||
|
// The bug: stage is still "1_backlog" because the update was dropped.
|
||||||
|
assert_eq!(
|
||||||
|
view3.stage, "1_backlog",
|
||||||
|
"expected seq-ASC replay to exhibit the bug (update lost)"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ── Story 518: persist_tx send failure logging ───────────────────────────
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn persist_tx_send_failure_logs_error() {
|
||||||
|
let kp = make_keypair();
|
||||||
|
let crdt = BaseCrdt::<PipelineDoc>::new(&kp);
|
||||||
|
let (persist_tx, persist_rx) = mpsc::unbounded_channel::<SignedOp>();
|
||||||
|
|
||||||
|
let mut state = CrdtState {
|
||||||
|
crdt,
|
||||||
|
keypair: kp,
|
||||||
|
index: HashMap::new(),
|
||||||
|
node_index: HashMap::new(),
|
||||||
|
persist_tx,
|
||||||
|
};
|
||||||
|
|
||||||
|
// Drop the receiver so that the next send fails immediately.
|
||||||
|
drop(persist_rx);
|
||||||
|
|
||||||
|
let item_json: JsonValue = json!({
|
||||||
|
"story_id": "518_story_persist_fail",
|
||||||
|
"stage": "1_backlog",
|
||||||
|
"name": "Persist Fail Test",
|
||||||
|
"agent": "",
|
||||||
|
"retry_count": 0.0,
|
||||||
|
"blocked": false,
|
||||||
|
"depends_on": "",
|
||||||
|
"claimed_by": "",
|
||||||
|
"claimed_at": 0.0,
|
||||||
|
})
|
||||||
|
.into();
|
||||||
|
|
||||||
|
let before_errors = crate::log_buffer::global()
|
||||||
|
.get_recent_entries(1000, None, Some(&crate::log_buffer::LogLevel::Error))
|
||||||
|
.len();
|
||||||
|
|
||||||
|
apply_and_persist(&mut state, |s| s.crdt.doc.items.insert(ROOT_ID, item_json));
|
||||||
|
|
||||||
|
let error_entries = crate::log_buffer::global().get_recent_entries(
|
||||||
|
1000,
|
||||||
|
None,
|
||||||
|
Some(&crate::log_buffer::LogLevel::Error),
|
||||||
|
);
|
||||||
|
|
||||||
|
assert!(
|
||||||
|
error_entries.len() > before_errors,
|
||||||
|
"expected an ERROR log entry when persist_tx send fails, but none was added"
|
||||||
|
);
|
||||||
|
|
||||||
|
let last_error = &error_entries[error_entries.len() - 1];
|
||||||
|
assert!(
|
||||||
|
last_error.message.contains("persist"),
|
||||||
|
"error message should mention persist: {}",
|
||||||
|
last_error.message
|
||||||
|
);
|
||||||
|
assert!(
|
||||||
|
last_error.message.contains("ahead") || last_error.message.contains("diverged"),
|
||||||
|
"error message should note in-memory/persisted divergence: {}",
|
||||||
|
last_error.message
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
// ── Story 631: vector clock delta sync tests ────────────────────────
|
||||||
|
|
||||||
|
/// Helper: create N signed insert ops on a CRDT and return them with their JSON.
|
||||||
|
fn make_ops(
|
||||||
|
kp: &Ed25519KeyPair,
|
||||||
|
crdt: &mut BaseCrdt<PipelineDoc>,
|
||||||
|
count: usize,
|
||||||
|
prefix: &str,
|
||||||
|
) -> Vec<(SignedOp, String)> {
|
||||||
|
let mut ops = Vec::new();
|
||||||
|
for i in 0..count {
|
||||||
|
let item: JsonValue = json!({
|
||||||
|
"story_id": format!("{prefix}_{i}"),
|
||||||
|
"stage": "1_backlog",
|
||||||
|
"name": format!("Item {i}"),
|
||||||
|
"agent": "",
|
||||||
|
"retry_count": 0.0,
|
||||||
|
"blocked": false,
|
||||||
|
"depends_on": "",
|
||||||
|
"claimed_by": "",
|
||||||
|
"claimed_at": 0.0,
|
||||||
|
})
|
||||||
|
.into();
|
||||||
|
let op = crdt.doc.items.insert(ROOT_ID, item).sign(kp);
|
||||||
|
crdt.apply(op.clone());
|
||||||
|
let json = serde_json::to_string(&op).unwrap();
|
||||||
|
ops.push((op, json));
|
||||||
|
}
|
||||||
|
ops
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Build a vector clock from a list of (SignedOp, json) pairs.
|
||||||
|
fn build_clock(ops: &[(SignedOp, String)]) -> VectorClock {
|
||||||
|
let mut clock = VectorClock::new();
|
||||||
|
for (op, _) in ops {
|
||||||
|
let author = hex::encode(&op.author());
|
||||||
|
*clock.entry(author).or_insert(0) += 1;
|
||||||
|
}
|
||||||
|
clock
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Compute ops_since against a local journal and peer clock.
|
||||||
|
///
|
||||||
|
/// Mirrors the production `ops_since` logic but operates on a local Vec
|
||||||
|
/// instead of the global `ALL_OPS` static.
|
||||||
|
fn local_ops_since(all_ops: &[(SignedOp, String)], peer_clock: &VectorClock) -> Vec<String> {
|
||||||
|
let mut author_counts: HashMap<String, u64> = HashMap::new();
|
||||||
|
let mut result = Vec::new();
|
||||||
|
for (op, json) in all_ops {
|
||||||
|
let author = hex::encode(&op.author());
|
||||||
|
let count = author_counts.entry(author.clone()).or_insert(0);
|
||||||
|
*count += 1;
|
||||||
|
let peer_has = peer_clock.get(&author).copied().unwrap_or(0);
|
||||||
|
if *count > peer_has {
|
||||||
|
result.push(json.clone());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
result
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Integration test (low-bandwidth sync): two nodes, A applies 100 ops,
|
||||||
|
/// B reconnects with a current clock — B receives 0 ops on the bulk phase.
|
||||||
|
#[test]
|
||||||
|
fn delta_sync_low_bandwidth_fully_caught_up() {
|
||||||
|
let kp_a = make_keypair();
|
||||||
|
let mut crdt_a = BaseCrdt::<PipelineDoc>::new(&kp_a);
|
||||||
|
|
||||||
|
let ops_a = make_ops(&kp_a, &mut crdt_a, 100, "631_low");
|
||||||
|
|
||||||
|
// B has already seen all 100 ops (its clock matches A's journal).
|
||||||
|
let clock_b = build_clock(&ops_a);
|
||||||
|
|
||||||
|
// Delta should be empty.
|
||||||
|
let delta = local_ops_since(&ops_a, &clock_b);
|
||||||
|
assert_eq!(
|
||||||
|
delta.len(),
|
||||||
|
0,
|
||||||
|
"caught-up peer should receive 0 ops, got {}",
|
||||||
|
delta.len()
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Integration test (mid-stream): A applies 100 ops, B disconnects,
|
||||||
|
/// A applies 50 more ops, B reconnects — B receives exactly the 50 missed ops.
|
||||||
|
#[test]
|
||||||
|
fn delta_sync_mid_stream_partial_catch_up() {
|
||||||
|
let kp_a = make_keypair();
|
||||||
|
let mut crdt_a = BaseCrdt::<PipelineDoc>::new(&kp_a);
|
||||||
|
|
||||||
|
// Phase 1: 100 ops that B has seen.
|
||||||
|
let ops_phase1 = make_ops(&kp_a, &mut crdt_a, 100, "631_mid1");
|
||||||
|
let clock_b = build_clock(&ops_phase1);
|
||||||
|
|
||||||
|
// Phase 2: 50 more ops that B missed.
|
||||||
|
let ops_phase2 = make_ops(&kp_a, &mut crdt_a, 50, "631_mid2");
|
||||||
|
|
||||||
|
// A's full journal is phase1 + phase2.
|
||||||
|
let mut all_ops_a: Vec<(SignedOp, String)> = ops_phase1;
|
||||||
|
all_ops_a.extend(ops_phase2);
|
||||||
|
|
||||||
|
let delta = local_ops_since(&all_ops_a, &clock_b);
|
||||||
|
assert_eq!(
|
||||||
|
delta.len(),
|
||||||
|
50,
|
||||||
|
"peer should receive exactly 50 missed ops, got {}",
|
||||||
|
delta.len()
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Integration test (new node): C connects with empty clock,
|
||||||
|
/// receives all 150 ops — verifies fallback behaviour.
|
||||||
|
#[test]
|
||||||
|
fn delta_sync_new_node_receives_all_ops() {
|
||||||
|
let kp_a = make_keypair();
|
||||||
|
let mut crdt_a = BaseCrdt::<PipelineDoc>::new(&kp_a);
|
||||||
|
|
||||||
|
let ops_phase1 = make_ops(&kp_a, &mut crdt_a, 100, "631_new1");
|
||||||
|
let ops_phase2 = make_ops(&kp_a, &mut crdt_a, 50, "631_new2");
|
||||||
|
|
||||||
|
let mut all_ops_a: Vec<(SignedOp, String)> = ops_phase1;
|
||||||
|
all_ops_a.extend(ops_phase2);
|
||||||
|
|
||||||
|
// Empty clock = new node.
|
||||||
|
let empty_clock = VectorClock::new();
|
||||||
|
let delta = local_ops_since(&all_ops_a, &empty_clock);
|
||||||
|
assert_eq!(
|
||||||
|
delta.len(),
|
||||||
|
150,
|
||||||
|
"new node should receive all 150 ops, got {}",
|
||||||
|
delta.len()
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Multi-author delta sync: ops from two different nodes, peer has seen
|
||||||
|
/// all of one author but none of the other.
|
||||||
|
#[test]
|
||||||
|
fn delta_sync_multi_author() {
|
||||||
|
use fastcrypto::traits::KeyPair;
|
||||||
|
|
||||||
|
let kp_a = make_keypair();
|
||||||
|
let kp_b = make_keypair();
|
||||||
|
let mut crdt_a = BaseCrdt::<PipelineDoc>::new(&kp_a);
|
||||||
|
let mut crdt_b = BaseCrdt::<PipelineDoc>::new(&kp_b);
|
||||||
|
|
||||||
|
let ops_a = make_ops(&kp_a, &mut crdt_a, 30, "631_ma_a");
|
||||||
|
let ops_b = make_ops(&kp_b, &mut crdt_b, 20, "631_ma_b");
|
||||||
|
|
||||||
|
// Combined journal on a hypothetical server.
|
||||||
|
let mut all_ops: Vec<(SignedOp, String)> = ops_a.clone();
|
||||||
|
all_ops.extend(ops_b);
|
||||||
|
|
||||||
|
// Peer has seen all of A's ops but none of B's.
|
||||||
|
let mut peer_clock = VectorClock::new();
|
||||||
|
let author_a_hex = hex::encode(&kp_a.public().0.to_bytes());
|
||||||
|
peer_clock.insert(author_a_hex, 30);
|
||||||
|
|
||||||
|
let delta = local_ops_since(&all_ops, &peer_clock);
|
||||||
|
assert_eq!(
|
||||||
|
delta.len(),
|
||||||
|
20,
|
||||||
|
"peer should receive 20 ops from author B, got {}",
|
||||||
|
delta.len()
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Vector clock construction from ops.
|
||||||
|
#[test]
|
||||||
|
fn build_vector_clock_from_ops() {
|
||||||
|
use fastcrypto::traits::KeyPair;
|
||||||
|
|
||||||
|
let kp = make_keypair();
|
||||||
|
let mut crdt = BaseCrdt::<PipelineDoc>::new(&kp);
|
||||||
|
let ops = make_ops(&kp, &mut crdt, 10, "631_vc");
|
||||||
|
|
||||||
|
let clock = build_clock(&ops);
|
||||||
|
let author_hex = hex::encode(&kp.public().0.to_bytes());
|
||||||
|
|
||||||
|
assert_eq!(clock.len(), 1, "single author should produce 1 clock entry");
|
||||||
|
assert_eq!(clock[&author_hex], 10, "clock should show 10 ops");
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Wire format: clock message serialization round-trip.
|
||||||
|
#[test]
|
||||||
|
fn clock_message_serialization_roundtrip() {
|
||||||
|
let mut clock = VectorClock::new();
|
||||||
|
clock.insert("aabbcc".to_string(), 42);
|
||||||
|
clock.insert("ddeeff".to_string(), 7);
|
||||||
|
|
||||||
|
let json = serde_json::to_value(&clock).unwrap();
|
||||||
|
assert!(json.is_object());
|
||||||
|
let deserialized: VectorClock = serde_json::from_value(json).unwrap();
|
||||||
|
assert_eq!(deserialized["aabbcc"], 42);
|
||||||
|
assert_eq!(deserialized["ddeeff"], 7);
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -1,854 +0,0 @@
|
|||||||
use super::*;
|
|
||||||
use bft_json_crdt::json_crdt::OpState;
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn crdt_doc_insert_and_view() {
|
|
||||||
let kp = make_keypair();
|
|
||||||
let mut crdt = BaseCrdt::<PipelineDoc>::new(&kp);
|
|
||||||
|
|
||||||
let item_json: JsonValue = json!({
|
|
||||||
"story_id": "10_story_test",
|
|
||||||
"stage": "2_current",
|
|
||||||
"name": "Test Story",
|
|
||||||
"agent": "coder-opus",
|
|
||||||
"retry_count": 0.0,
|
|
||||||
"blocked": false,
|
|
||||||
"depends_on": "",
|
|
||||||
"claimed_by": "",
|
|
||||||
"claimed_at": 0.0,
|
|
||||||
})
|
|
||||||
.into();
|
|
||||||
|
|
||||||
let op = crdt.doc.items.insert(ROOT_ID, item_json).sign(&kp);
|
|
||||||
assert_eq!(crdt.apply(op), OpState::Ok);
|
|
||||||
|
|
||||||
let view = crdt.doc.items.view();
|
|
||||||
assert_eq!(view.len(), 1);
|
|
||||||
|
|
||||||
let item = &crdt.doc.items[0];
|
|
||||||
assert_eq!(
|
|
||||||
item.story_id.view(),
|
|
||||||
JsonValue::String("10_story_test".to_string())
|
|
||||||
);
|
|
||||||
assert_eq!(
|
|
||||||
item.stage.view(),
|
|
||||||
JsonValue::String("2_current".to_string())
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn crdt_doc_update_stage() {
|
|
||||||
let kp = make_keypair();
|
|
||||||
let mut crdt = BaseCrdt::<PipelineDoc>::new(&kp);
|
|
||||||
|
|
||||||
let item_json: JsonValue = json!({
|
|
||||||
"story_id": "20_story_move",
|
|
||||||
"stage": "1_backlog",
|
|
||||||
"name": "Move Me",
|
|
||||||
"agent": "",
|
|
||||||
"retry_count": 0.0,
|
|
||||||
"blocked": false,
|
|
||||||
"depends_on": "",
|
|
||||||
"claimed_by": "",
|
|
||||||
"claimed_at": 0.0,
|
|
||||||
})
|
|
||||||
.into();
|
|
||||||
|
|
||||||
let insert_op = crdt.doc.items.insert(ROOT_ID, item_json).sign(&kp);
|
|
||||||
crdt.apply(insert_op);
|
|
||||||
|
|
||||||
// Update stage
|
|
||||||
let stage_op = crdt.doc.items[0]
|
|
||||||
.stage
|
|
||||||
.set("2_current".to_string())
|
|
||||||
.sign(&kp);
|
|
||||||
crdt.apply(stage_op);
|
|
||||||
|
|
||||||
assert_eq!(
|
|
||||||
crdt.doc.items[0].stage.view(),
|
|
||||||
JsonValue::String("2_current".to_string())
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn crdt_ops_replay_reconstructs_state() {
|
|
||||||
let kp = make_keypair();
|
|
||||||
let mut crdt1 = BaseCrdt::<PipelineDoc>::new(&kp);
|
|
||||||
|
|
||||||
// Build state with a series of ops.
|
|
||||||
let item_json: JsonValue = json!({
|
|
||||||
"story_id": "30_story_replay",
|
|
||||||
"stage": "1_backlog",
|
|
||||||
"name": "Replay Test",
|
|
||||||
"agent": "",
|
|
||||||
"retry_count": 0.0,
|
|
||||||
"blocked": false,
|
|
||||||
"depends_on": "",
|
|
||||||
"claimed_by": "",
|
|
||||||
"claimed_at": 0.0,
|
|
||||||
})
|
|
||||||
.into();
|
|
||||||
|
|
||||||
let op1 = crdt1.doc.items.insert(ROOT_ID, item_json).sign(&kp);
|
|
||||||
crdt1.apply(op1.clone());
|
|
||||||
|
|
||||||
let op2 = crdt1.doc.items[0]
|
|
||||||
.stage
|
|
||||||
.set("2_current".to_string())
|
|
||||||
.sign(&kp);
|
|
||||||
crdt1.apply(op2.clone());
|
|
||||||
|
|
||||||
let op3 = crdt1.doc.items[0]
|
|
||||||
.name
|
|
||||||
.set("Updated Name".to_string())
|
|
||||||
.sign(&kp);
|
|
||||||
crdt1.apply(op3.clone());
|
|
||||||
|
|
||||||
// Replay ops on a fresh CRDT.
|
|
||||||
let mut crdt2 = BaseCrdt::<PipelineDoc>::new(&kp);
|
|
||||||
crdt2.apply(op1);
|
|
||||||
crdt2.apply(op2);
|
|
||||||
crdt2.apply(op3);
|
|
||||||
|
|
||||||
assert_eq!(
|
|
||||||
crdt1.doc.items[0].stage.view(),
|
|
||||||
crdt2.doc.items[0].stage.view()
|
|
||||||
);
|
|
||||||
assert_eq!(
|
|
||||||
crdt1.doc.items[0].name.view(),
|
|
||||||
crdt2.doc.items[0].name.view()
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn extract_item_view_parses_crdt_item() {
|
|
||||||
let kp = make_keypair();
|
|
||||||
let mut crdt = BaseCrdt::<PipelineDoc>::new(&kp);
|
|
||||||
|
|
||||||
let item_json: JsonValue = json!({
|
|
||||||
"story_id": "40_story_view",
|
|
||||||
"stage": "3_qa",
|
|
||||||
"name": "View Test",
|
|
||||||
"agent": "coder-1",
|
|
||||||
"retry_count": 2.0,
|
|
||||||
"blocked": true,
|
|
||||||
"depends_on": "[10,20]",
|
|
||||||
"claimed_by": "",
|
|
||||||
"claimed_at": 0.0,
|
|
||||||
})
|
|
||||||
.into();
|
|
||||||
|
|
||||||
let op = crdt.doc.items.insert(ROOT_ID, item_json).sign(&kp);
|
|
||||||
crdt.apply(op);
|
|
||||||
|
|
||||||
let view = extract_item_view(&crdt.doc.items[0]).unwrap();
|
|
||||||
assert_eq!(view.story_id, "40_story_view");
|
|
||||||
assert_eq!(view.stage, "3_qa");
|
|
||||||
assert_eq!(view.name.as_deref(), Some("View Test"));
|
|
||||||
assert_eq!(view.agent.as_deref(), Some("coder-1"));
|
|
||||||
assert_eq!(view.retry_count, Some(2));
|
|
||||||
assert_eq!(view.blocked, Some(true));
|
|
||||||
assert_eq!(view.depends_on, Some(vec![10, 20]));
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn rebuild_index_maps_story_ids() {
|
|
||||||
let kp = make_keypair();
|
|
||||||
let mut crdt = BaseCrdt::<PipelineDoc>::new(&kp);
|
|
||||||
|
|
||||||
for (sid, stage) in &[("10_story_a", "1_backlog"), ("20_story_b", "2_current")] {
|
|
||||||
let item: JsonValue = json!({
|
|
||||||
"story_id": sid,
|
|
||||||
"stage": stage,
|
|
||||||
"name": "",
|
|
||||||
"agent": "",
|
|
||||||
"retry_count": 0.0,
|
|
||||||
"blocked": false,
|
|
||||||
"depends_on": "",
|
|
||||||
"claimed_by": "",
|
|
||||||
"claimed_at": 0.0,
|
|
||||||
})
|
|
||||||
.into();
|
|
||||||
let op = crdt.doc.items.insert(ROOT_ID, item).sign(&kp);
|
|
||||||
crdt.apply(op);
|
|
||||||
}
|
|
||||||
|
|
||||||
let index = rebuild_index(&crdt);
|
|
||||||
assert_eq!(index.len(), 2);
|
|
||||||
assert!(index.contains_key("10_story_a"));
|
|
||||||
assert!(index.contains_key("20_story_b"));
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
async fn init_and_write_read_roundtrip() {
|
|
||||||
let tmp = tempfile::tempdir().unwrap();
|
|
||||||
let db_path = tmp.path().join("crdt_test.db");
|
|
||||||
|
|
||||||
// Init directly (not via the global singleton, for test isolation).
|
|
||||||
let options = SqliteConnectOptions::new()
|
|
||||||
.filename(&db_path)
|
|
||||||
.create_if_missing(true);
|
|
||||||
let pool = SqlitePool::connect_with(options).await.unwrap();
|
|
||||||
sqlx::migrate!("./migrations").run(&pool).await.unwrap();
|
|
||||||
|
|
||||||
let keypair = make_keypair();
|
|
||||||
let mut crdt = BaseCrdt::<PipelineDoc>::new(&keypair);
|
|
||||||
|
|
||||||
// Insert and update like write_item does.
|
|
||||||
let item_json: JsonValue = json!({
|
|
||||||
"story_id": "50_story_roundtrip",
|
|
||||||
"stage": "1_backlog",
|
|
||||||
"name": "Roundtrip",
|
|
||||||
"agent": "",
|
|
||||||
"retry_count": 0.0,
|
|
||||||
"blocked": false,
|
|
||||||
"depends_on": "",
|
|
||||||
"claimed_by": "",
|
|
||||||
"claimed_at": 0.0,
|
|
||||||
})
|
|
||||||
.into();
|
|
||||||
|
|
||||||
let insert_op = crdt.doc.items.insert(ROOT_ID, item_json).sign(&keypair);
|
|
||||||
crdt.apply(insert_op.clone());
|
|
||||||
|
|
||||||
// Persist the op.
|
|
||||||
let op_json = serde_json::to_string(&insert_op).unwrap();
|
|
||||||
let op_id = hex::encode(&insert_op.id());
|
|
||||||
let now = chrono::Utc::now().to_rfc3339();
|
|
||||||
sqlx::query("INSERT INTO crdt_ops (op_id, seq, op_json, created_at) VALUES (?1, ?2, ?3, ?4)")
|
|
||||||
.bind(&op_id)
|
|
||||||
.bind(insert_op.inner.seq as i64)
|
|
||||||
.bind(&op_json)
|
|
||||||
.bind(&now)
|
|
||||||
.execute(&pool)
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
// Reconstruct from DB.
|
|
||||||
let rows: Vec<(String,)> = sqlx::query_as("SELECT op_json FROM crdt_ops ORDER BY rowid ASC")
|
|
||||||
.fetch_all(&pool)
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
let mut crdt2 = BaseCrdt::<PipelineDoc>::new(&keypair);
|
|
||||||
for (json_str,) in &rows {
|
|
||||||
let op: SignedOp = serde_json::from_str(json_str).unwrap();
|
|
||||||
crdt2.apply(op);
|
|
||||||
}
|
|
||||||
|
|
||||||
let view = extract_item_view(&crdt2.doc.items[0]).unwrap();
|
|
||||||
assert_eq!(view.story_id, "50_story_roundtrip");
|
|
||||||
assert_eq!(view.stage, "1_backlog");
|
|
||||||
assert_eq!(view.name.as_deref(), Some("Roundtrip"));
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn signed_op_serialization_roundtrip() {
|
|
||||||
let kp = make_keypair();
|
|
||||||
let mut crdt = BaseCrdt::<PipelineDoc>::new(&kp);
|
|
||||||
|
|
||||||
let item: JsonValue = json!({
|
|
||||||
"story_id": "60_story_serde",
|
|
||||||
"stage": "1_backlog",
|
|
||||||
"name": "Serde Test",
|
|
||||||
"agent": "",
|
|
||||||
"retry_count": 0.0,
|
|
||||||
"blocked": false,
|
|
||||||
"depends_on": "",
|
|
||||||
"claimed_by": "",
|
|
||||||
"claimed_at": 0.0,
|
|
||||||
})
|
|
||||||
.into();
|
|
||||||
|
|
||||||
let op = crdt.doc.items.insert(ROOT_ID, item).sign(&kp);
|
|
||||||
let json_str = serde_json::to_string(&op).unwrap();
|
|
||||||
let deserialized: SignedOp = serde_json::from_str(&json_str).unwrap();
|
|
||||||
|
|
||||||
assert_eq!(op.id(), deserialized.id());
|
|
||||||
assert_eq!(op.inner.seq, deserialized.inner.seq);
|
|
||||||
}
|
|
||||||
|
|
||||||
// ── CrdtEvent tests ─────────────────────────────────────────────────
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn crdt_event_has_expected_fields() {
|
|
||||||
let evt = CrdtEvent {
|
|
||||||
story_id: "42_story_foo".to_string(),
|
|
||||||
from_stage: Some("1_backlog".to_string()),
|
|
||||||
to_stage: "2_current".to_string(),
|
|
||||||
name: Some("Foo Feature".to_string()),
|
|
||||||
};
|
|
||||||
assert_eq!(evt.story_id, "42_story_foo");
|
|
||||||
assert_eq!(evt.from_stage.as_deref(), Some("1_backlog"));
|
|
||||||
assert_eq!(evt.to_stage, "2_current");
|
|
||||||
assert_eq!(evt.name.as_deref(), Some("Foo Feature"));
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn crdt_event_clone_preserves_data() {
|
|
||||||
let evt = CrdtEvent {
|
|
||||||
story_id: "10_story_bar".to_string(),
|
|
||||||
from_stage: None,
|
|
||||||
to_stage: "1_backlog".to_string(),
|
|
||||||
name: None,
|
|
||||||
};
|
|
||||||
let cloned = evt.clone();
|
|
||||||
assert_eq!(cloned.story_id, "10_story_bar");
|
|
||||||
assert!(cloned.from_stage.is_none());
|
|
||||||
assert!(cloned.name.is_none());
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn emit_event_is_noop_when_channel_not_initialised() {
|
|
||||||
// Before CRDT_EVENT_TX is set, emit_event should not panic.
|
|
||||||
// This test verifies the guard clause works. In test binaries the
|
|
||||||
// OnceLock may already be set by another test, so we just verify
|
|
||||||
// the function doesn't panic regardless.
|
|
||||||
emit_event(CrdtEvent {
|
|
||||||
story_id: "99_story_noop".to_string(),
|
|
||||||
from_stage: None,
|
|
||||||
to_stage: "1_backlog".to_string(),
|
|
||||||
name: None,
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn crdt_event_broadcast_channel_round_trip() {
|
|
||||||
let (tx, mut rx) = broadcast::channel::<CrdtEvent>(16);
|
|
||||||
let evt = CrdtEvent {
|
|
||||||
story_id: "70_story_broadcast".to_string(),
|
|
||||||
from_stage: Some("1_backlog".to_string()),
|
|
||||||
to_stage: "2_current".to_string(),
|
|
||||||
name: Some("Broadcast Test".to_string()),
|
|
||||||
};
|
|
||||||
tx.send(evt).unwrap();
|
|
||||||
|
|
||||||
let received = rx.try_recv().unwrap();
|
|
||||||
assert_eq!(received.story_id, "70_story_broadcast");
|
|
||||||
assert_eq!(received.from_stage.as_deref(), Some("1_backlog"));
|
|
||||||
assert_eq!(received.to_stage, "2_current");
|
|
||||||
assert_eq!(received.name.as_deref(), Some("Broadcast Test"));
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn dep_is_done_crdt_returns_false_when_no_crdt_state() {
|
|
||||||
// When the global CRDT state is not initialised (or in a test environment),
|
|
||||||
// dep_is_done_crdt should return false rather than panicking.
|
|
||||||
// Note: in the test binary the global may or may not be initialised,
|
|
||||||
// but the function should never panic either way.
|
|
||||||
let _ = dep_is_done_crdt(9999);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn check_unmet_deps_crdt_returns_empty_when_item_not_found() {
|
|
||||||
// Non-existent story should return empty deps.
|
|
||||||
let result = check_unmet_deps_crdt("nonexistent_story");
|
|
||||||
assert!(result.is_empty());
|
|
||||||
}
|
|
||||||
|
|
||||||
// ── Bug 503: archived-dep visibility ─────────────────────────────────────
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn dep_is_archived_crdt_returns_false_when_no_crdt_state() {
|
|
||||||
// When the global CRDT state is not initialised, must not panic.
|
|
||||||
let _ = dep_is_archived_crdt(9998);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn check_archived_deps_crdt_returns_empty_when_item_not_found() {
|
|
||||||
// Non-existent story should return empty archived deps.
|
|
||||||
let result = check_archived_deps_crdt("nonexistent_story_archived");
|
|
||||||
assert!(result.is_empty());
|
|
||||||
}
|
|
||||||
|
|
||||||
// ── 478: WebSocket CRDT sync layer tests ────────────────────────────────
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn apply_remote_op_returns_false_when_not_initialised() {
|
|
||||||
// Without the global CRDT state, apply_remote_op should return false.
|
|
||||||
let kp = make_keypair();
|
|
||||||
let mut crdt = BaseCrdt::<PipelineDoc>::new(&kp);
|
|
||||||
let item: JsonValue = serde_json::json!({
|
|
||||||
"story_id": "80_story_remote",
|
|
||||||
"stage": "1_backlog",
|
|
||||||
"name": "Remote",
|
|
||||||
"agent": "",
|
|
||||||
"retry_count": 0.0,
|
|
||||||
"blocked": false,
|
|
||||||
"depends_on": "",
|
|
||||||
"claimed_by": "",
|
|
||||||
"claimed_at": 0.0,
|
|
||||||
})
|
|
||||||
.into();
|
|
||||||
let op = crdt
|
|
||||||
.doc
|
|
||||||
.items
|
|
||||||
.insert(bft_json_crdt::op::ROOT_ID, item)
|
|
||||||
.sign(&kp);
|
|
||||||
// This uses the global state which may not be initialised in tests.
|
|
||||||
let _ = apply_remote_op(op);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn signed_op_survives_sync_serialization_roundtrip() {
|
|
||||||
// Verify that a SignedOp serialised to JSON and back produces
|
|
||||||
// the same op (critical for the sync wire protocol).
|
|
||||||
let kp = make_keypair();
|
|
||||||
let mut crdt = BaseCrdt::<PipelineDoc>::new(&kp);
|
|
||||||
let item: JsonValue = serde_json::json!({
|
|
||||||
"story_id": "90_story_wire",
|
|
||||||
"stage": "2_current",
|
|
||||||
"name": "Wire Test",
|
|
||||||
"agent": "coder",
|
|
||||||
"retry_count": 1.0,
|
|
||||||
"blocked": false,
|
|
||||||
"depends_on": "[10]",
|
|
||||||
"claimed_by": "",
|
|
||||||
"claimed_at": 0.0,
|
|
||||||
})
|
|
||||||
.into();
|
|
||||||
let op = crdt
|
|
||||||
.doc
|
|
||||||
.items
|
|
||||||
.insert(bft_json_crdt::op::ROOT_ID, item)
|
|
||||||
.sign(&kp);
|
|
||||||
|
|
||||||
let json1 = serde_json::to_string(&op).unwrap();
|
|
||||||
let roundtripped: SignedOp = serde_json::from_str(&json1).unwrap();
|
|
||||||
let json2 = serde_json::to_string(&roundtripped).unwrap();
|
|
||||||
|
|
||||||
assert_eq!(json1, json2);
|
|
||||||
assert_eq!(op.id(), roundtripped.id());
|
|
||||||
assert_eq!(op.inner.seq, roundtripped.inner.seq);
|
|
||||||
assert_eq!(op.author(), roundtripped.author());
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn sync_broadcast_channel_round_trip() {
|
|
||||||
let (tx, mut rx) = broadcast::channel::<SignedOp>(16);
|
|
||||||
let kp = make_keypair();
|
|
||||||
let mut crdt = BaseCrdt::<PipelineDoc>::new(&kp);
|
|
||||||
let item: JsonValue = serde_json::json!({
|
|
||||||
"story_id": "95_story_sync_bcast",
|
|
||||||
"stage": "1_backlog",
|
|
||||||
"name": "",
|
|
||||||
"agent": "",
|
|
||||||
"retry_count": 0.0,
|
|
||||||
"blocked": false,
|
|
||||||
"depends_on": "",
|
|
||||||
"claimed_by": "",
|
|
||||||
"claimed_at": 0.0,
|
|
||||||
})
|
|
||||||
.into();
|
|
||||||
let op = crdt
|
|
||||||
.doc
|
|
||||||
.items
|
|
||||||
.insert(bft_json_crdt::op::ROOT_ID, item)
|
|
||||||
.sign(&kp);
|
|
||||||
tx.send(op.clone()).unwrap();
|
|
||||||
|
|
||||||
let received = rx.try_recv().unwrap();
|
|
||||||
assert_eq!(received.id(), op.id());
|
|
||||||
}
|
|
||||||
|
|
||||||
// ── Bug 511: CRDT lamport clock resets on restart ────────────────────────
|
|
||||||
//
|
|
||||||
// Root cause: Op::sign() always produces SignedOp with depends_on = vec![],
|
|
||||||
// so the causal dependency queue never engages during replay. Field update
|
|
||||||
// ops (seq=1,2,3 from each field's LwwRegisterCrdt counter) are replayed
|
|
||||||
// before list insert ops (seq=N from the items ListCrdt counter) when
|
|
||||||
// ordered by `seq ASC`. They fail ErrPathMismatch silently, their our_seq
|
|
||||||
// is never updated, and the next field write re-uses seq=1.
|
|
||||||
//
|
|
||||||
// Fix: replay by `rowid ASC` (SQLite insertion order) instead of `seq ASC`.
|
|
||||||
// Rowid preserves the causal order ops were originally applied in, so field
|
|
||||||
// updates always come after the item insert they reference.
|
|
||||||
#[tokio::test]
|
|
||||||
async fn bug_511_rowid_replay_preserves_field_update_after_list_insert() {
|
|
||||||
let tmp = tempfile::tempdir().unwrap();
|
|
||||||
let db_path = tmp.path().join("bug511.db");
|
|
||||||
|
|
||||||
let options = SqliteConnectOptions::new()
|
|
||||||
.filename(&db_path)
|
|
||||||
.create_if_missing(true);
|
|
||||||
let pool = SqlitePool::connect_with(options).await.unwrap();
|
|
||||||
sqlx::migrate!("./migrations").run(&pool).await.unwrap();
|
|
||||||
|
|
||||||
let kp = make_keypair();
|
|
||||||
let mut crdt = BaseCrdt::<PipelineDoc>::new(&kp);
|
|
||||||
|
|
||||||
// Insert 5 dummy items to advance items.our_seq to 5.
|
|
||||||
for i in 0..5u32 {
|
|
||||||
let sid = format!("{}_story_warmup", i);
|
|
||||||
let item: JsonValue = json!({
|
|
||||||
"story_id": sid,
|
|
||||||
"stage": "1_backlog",
|
|
||||||
"name": "",
|
|
||||||
"agent": "",
|
|
||||||
"retry_count": 0.0,
|
|
||||||
"blocked": false,
|
|
||||||
"depends_on": "",
|
|
||||||
"claimed_by": "",
|
|
||||||
"claimed_at": 0.0,
|
|
||||||
})
|
|
||||||
.into();
|
|
||||||
let op = crdt.doc.items.insert(ROOT_ID, item).sign(&kp);
|
|
||||||
crdt.apply(op.clone());
|
|
||||||
// We don't persist these to the DB — they are pre-history.
|
|
||||||
}
|
|
||||||
|
|
||||||
// Now insert the real item. items.our_seq was 5, so this op gets seq=6.
|
|
||||||
let target_item: JsonValue = json!({
|
|
||||||
"story_id": "511_story_target",
|
|
||||||
"stage": "1_backlog",
|
|
||||||
"name": "Bug 511 target",
|
|
||||||
"agent": "",
|
|
||||||
"retry_count": 0.0,
|
|
||||||
"blocked": false,
|
|
||||||
"depends_on": "",
|
|
||||||
"claimed_by": "",
|
|
||||||
"claimed_at": 0.0,
|
|
||||||
})
|
|
||||||
.into();
|
|
||||||
let insert_op = crdt.doc.items.insert(ROOT_ID, target_item).sign(&kp);
|
|
||||||
crdt.apply(insert_op.clone());
|
|
||||||
// insert_op.inner.seq == 6
|
|
||||||
|
|
||||||
// Now update the stage. The stage LwwRegisterCrdt for this item starts
|
|
||||||
// at our_seq=0, so this field op gets seq=1. Crucially: seq=1 < seq=6.
|
|
||||||
let idx = rebuild_index(&crdt)["511_story_target"];
|
|
||||||
let stage_op = crdt.doc.items[idx]
|
|
||||||
.stage
|
|
||||||
.set("2_current".to_string())
|
|
||||||
.sign(&kp);
|
|
||||||
crdt.apply(stage_op.clone());
|
|
||||||
// stage_op.inner.seq == 1
|
|
||||||
|
|
||||||
// Persist BOTH ops in causal order (insert first, update second).
|
|
||||||
// This means insert_op gets rowid < stage_op rowid.
|
|
||||||
let now = chrono::Utc::now().to_rfc3339();
|
|
||||||
for op in [&insert_op, &stage_op] {
|
|
||||||
let op_json = serde_json::to_string(op).unwrap();
|
|
||||||
let op_id = hex::encode(&op.id());
|
|
||||||
sqlx::query(
|
|
||||||
"INSERT INTO crdt_ops (op_id, seq, op_json, created_at) VALUES (?1, ?2, ?3, ?4)",
|
|
||||||
)
|
|
||||||
.bind(&op_id)
|
|
||||||
.bind(op.inner.seq as i64)
|
|
||||||
.bind(&op_json)
|
|
||||||
.bind(&now)
|
|
||||||
.execute(&pool)
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
}
|
|
||||||
|
|
||||||
// Replay by rowid ASC (the fix). The insert must come before the field
|
|
||||||
// update regardless of their field-level seq values.
|
|
||||||
let rows: Vec<(String,)> = sqlx::query_as("SELECT op_json FROM crdt_ops ORDER BY rowid ASC")
|
|
||||||
.fetch_all(&pool)
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
let mut crdt2 = BaseCrdt::<PipelineDoc>::new(&kp);
|
|
||||||
for (json_str,) in &rows {
|
|
||||||
let op: SignedOp = serde_json::from_str(json_str).unwrap();
|
|
||||||
crdt2.apply(op);
|
|
||||||
}
|
|
||||||
|
|
||||||
// The item must be in the CRDT and must reflect the stage update.
|
|
||||||
let index2 = rebuild_index(&crdt2);
|
|
||||||
assert!(
|
|
||||||
index2.contains_key("511_story_target"),
|
|
||||||
"item not found after rowid-order replay"
|
|
||||||
);
|
|
||||||
let idx2 = index2["511_story_target"];
|
|
||||||
let view = extract_item_view(&crdt2.doc.items[idx2]).unwrap();
|
|
||||||
assert_eq!(
|
|
||||||
view.stage, "2_current",
|
|
||||||
"stage field update lost during replay (bug 511 regression)"
|
|
||||||
);
|
|
||||||
|
|
||||||
// Confirm the bug is reproducible by replaying seq ASC instead.
|
|
||||||
// With seq ASC the stage_op (seq=1) arrives before insert_op (seq=6),
|
|
||||||
// fails ErrPathMismatch, and the item ends up at "1_backlog".
|
|
||||||
let rows_wrong_order: Vec<(String,)> =
|
|
||||||
sqlx::query_as("SELECT op_json FROM crdt_ops ORDER BY seq ASC")
|
|
||||||
.fetch_all(&pool)
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
let mut crdt3 = BaseCrdt::<PipelineDoc>::new(&kp);
|
|
||||||
for (json_str,) in &rows_wrong_order {
|
|
||||||
let op: SignedOp = serde_json::from_str(json_str).unwrap();
|
|
||||||
crdt3.apply(op);
|
|
||||||
}
|
|
||||||
|
|
||||||
let index3 = rebuild_index(&crdt3);
|
|
||||||
// With seq ASC replay, the item is created (insert_op eventually runs)
|
|
||||||
// but the stage update is lost (it ran before the item existed).
|
|
||||||
if let Some(idx3) = index3.get("511_story_target") {
|
|
||||||
let view3 = extract_item_view(&crdt3.doc.items[*idx3]).unwrap();
|
|
||||||
// The bug: stage is still "1_backlog" because the update was dropped.
|
|
||||||
assert_eq!(
|
|
||||||
view3.stage, "1_backlog",
|
|
||||||
"expected seq-ASC replay to exhibit the bug (update lost)"
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ── Story 518: persist_tx send failure logging ───────────────────────────
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn persist_tx_send_failure_logs_error() {
|
|
||||||
let kp = make_keypair();
|
|
||||||
let crdt = BaseCrdt::<PipelineDoc>::new(&kp);
|
|
||||||
let (persist_tx, persist_rx) = mpsc::unbounded_channel::<SignedOp>();
|
|
||||||
|
|
||||||
let mut state = CrdtState {
|
|
||||||
crdt,
|
|
||||||
keypair: kp,
|
|
||||||
index: HashMap::new(),
|
|
||||||
node_index: HashMap::new(),
|
|
||||||
persist_tx,
|
|
||||||
};
|
|
||||||
|
|
||||||
// Drop the receiver so that the next send fails immediately.
|
|
||||||
drop(persist_rx);
|
|
||||||
|
|
||||||
let item_json: JsonValue = json!({
|
|
||||||
"story_id": "518_story_persist_fail",
|
|
||||||
"stage": "1_backlog",
|
|
||||||
"name": "Persist Fail Test",
|
|
||||||
"agent": "",
|
|
||||||
"retry_count": 0.0,
|
|
||||||
"blocked": false,
|
|
||||||
"depends_on": "",
|
|
||||||
"claimed_by": "",
|
|
||||||
"claimed_at": 0.0,
|
|
||||||
})
|
|
||||||
.into();
|
|
||||||
|
|
||||||
let before_errors = crate::log_buffer::global()
|
|
||||||
.get_recent_entries(1000, None, Some(&crate::log_buffer::LogLevel::Error))
|
|
||||||
.len();
|
|
||||||
|
|
||||||
apply_and_persist(&mut state, |s| s.crdt.doc.items.insert(ROOT_ID, item_json));
|
|
||||||
|
|
||||||
let error_entries = crate::log_buffer::global().get_recent_entries(
|
|
||||||
1000,
|
|
||||||
None,
|
|
||||||
Some(&crate::log_buffer::LogLevel::Error),
|
|
||||||
);
|
|
||||||
|
|
||||||
assert!(
|
|
||||||
error_entries.len() > before_errors,
|
|
||||||
"expected an ERROR log entry when persist_tx send fails, but none was added"
|
|
||||||
);
|
|
||||||
|
|
||||||
let last_error = &error_entries[error_entries.len() - 1];
|
|
||||||
assert!(
|
|
||||||
last_error.message.contains("persist"),
|
|
||||||
"error message should mention persist: {}",
|
|
||||||
last_error.message
|
|
||||||
);
|
|
||||||
assert!(
|
|
||||||
last_error.message.contains("ahead") || last_error.message.contains("diverged"),
|
|
||||||
"error message should note in-memory/persisted divergence: {}",
|
|
||||||
last_error.message
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
// ── Story 631: vector clock delta sync tests ────────────────────────
|
|
||||||
|
|
||||||
/// Helper: create N signed insert ops on a CRDT and return them with their JSON.
|
|
||||||
fn make_ops(
|
|
||||||
kp: &Ed25519KeyPair,
|
|
||||||
crdt: &mut BaseCrdt<PipelineDoc>,
|
|
||||||
count: usize,
|
|
||||||
prefix: &str,
|
|
||||||
) -> Vec<(SignedOp, String)> {
|
|
||||||
let mut ops = Vec::new();
|
|
||||||
for i in 0..count {
|
|
||||||
let item: JsonValue = json!({
|
|
||||||
"story_id": format!("{prefix}_{i}"),
|
|
||||||
"stage": "1_backlog",
|
|
||||||
"name": format!("Item {i}"),
|
|
||||||
"agent": "",
|
|
||||||
"retry_count": 0.0,
|
|
||||||
"blocked": false,
|
|
||||||
"depends_on": "",
|
|
||||||
"claimed_by": "",
|
|
||||||
"claimed_at": 0.0,
|
|
||||||
})
|
|
||||||
.into();
|
|
||||||
let op = crdt.doc.items.insert(ROOT_ID, item).sign(kp);
|
|
||||||
crdt.apply(op.clone());
|
|
||||||
let json = serde_json::to_string(&op).unwrap();
|
|
||||||
ops.push((op, json));
|
|
||||||
}
|
|
||||||
ops
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Build a vector clock from a list of (SignedOp, json) pairs.
|
|
||||||
fn build_clock(ops: &[(SignedOp, String)]) -> VectorClock {
|
|
||||||
let mut clock = VectorClock::new();
|
|
||||||
for (op, _) in ops {
|
|
||||||
let author = hex::encode(&op.author());
|
|
||||||
*clock.entry(author).or_insert(0) += 1;
|
|
||||||
}
|
|
||||||
clock
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Compute ops_since against a local journal and peer clock.
|
|
||||||
///
|
|
||||||
/// Mirrors the production `ops_since` logic but operates on a local Vec
|
|
||||||
/// instead of the global `ALL_OPS` static.
|
|
||||||
fn local_ops_since(all_ops: &[(SignedOp, String)], peer_clock: &VectorClock) -> Vec<String> {
|
|
||||||
let mut author_counts: HashMap<String, u64> = HashMap::new();
|
|
||||||
let mut result = Vec::new();
|
|
||||||
for (op, json) in all_ops {
|
|
||||||
let author = hex::encode(&op.author());
|
|
||||||
let count = author_counts.entry(author.clone()).or_insert(0);
|
|
||||||
*count += 1;
|
|
||||||
let peer_has = peer_clock.get(&author).copied().unwrap_or(0);
|
|
||||||
if *count > peer_has {
|
|
||||||
result.push(json.clone());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
result
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Integration test (low-bandwidth sync): two nodes, A applies 100 ops,
|
|
||||||
/// B reconnects with a current clock — B receives 0 ops on the bulk phase.
|
|
||||||
#[test]
|
|
||||||
fn delta_sync_low_bandwidth_fully_caught_up() {
|
|
||||||
let kp_a = make_keypair();
|
|
||||||
let mut crdt_a = BaseCrdt::<PipelineDoc>::new(&kp_a);
|
|
||||||
|
|
||||||
let ops_a = make_ops(&kp_a, &mut crdt_a, 100, "631_low");
|
|
||||||
|
|
||||||
// B has already seen all 100 ops (its clock matches A's journal).
|
|
||||||
let clock_b = build_clock(&ops_a);
|
|
||||||
|
|
||||||
// Delta should be empty.
|
|
||||||
let delta = local_ops_since(&ops_a, &clock_b);
|
|
||||||
assert_eq!(
|
|
||||||
delta.len(),
|
|
||||||
0,
|
|
||||||
"caught-up peer should receive 0 ops, got {}",
|
|
||||||
delta.len()
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Integration test (mid-stream): A applies 100 ops, B disconnects,
|
|
||||||
/// A applies 50 more ops, B reconnects — B receives exactly the 50 missed ops.
|
|
||||||
#[test]
|
|
||||||
fn delta_sync_mid_stream_partial_catch_up() {
|
|
||||||
let kp_a = make_keypair();
|
|
||||||
let mut crdt_a = BaseCrdt::<PipelineDoc>::new(&kp_a);
|
|
||||||
|
|
||||||
// Phase 1: 100 ops that B has seen.
|
|
||||||
let ops_phase1 = make_ops(&kp_a, &mut crdt_a, 100, "631_mid1");
|
|
||||||
let clock_b = build_clock(&ops_phase1);
|
|
||||||
|
|
||||||
// Phase 2: 50 more ops that B missed.
|
|
||||||
let ops_phase2 = make_ops(&kp_a, &mut crdt_a, 50, "631_mid2");
|
|
||||||
|
|
||||||
// A's full journal is phase1 + phase2.
|
|
||||||
let mut all_ops_a: Vec<(SignedOp, String)> = ops_phase1;
|
|
||||||
all_ops_a.extend(ops_phase2);
|
|
||||||
|
|
||||||
let delta = local_ops_since(&all_ops_a, &clock_b);
|
|
||||||
assert_eq!(
|
|
||||||
delta.len(),
|
|
||||||
50,
|
|
||||||
"peer should receive exactly 50 missed ops, got {}",
|
|
||||||
delta.len()
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Integration test (new node): C connects with empty clock,
|
|
||||||
/// receives all 150 ops — verifies fallback behaviour.
|
|
||||||
#[test]
|
|
||||||
fn delta_sync_new_node_receives_all_ops() {
|
|
||||||
let kp_a = make_keypair();
|
|
||||||
let mut crdt_a = BaseCrdt::<PipelineDoc>::new(&kp_a);
|
|
||||||
|
|
||||||
let ops_phase1 = make_ops(&kp_a, &mut crdt_a, 100, "631_new1");
|
|
||||||
let ops_phase2 = make_ops(&kp_a, &mut crdt_a, 50, "631_new2");
|
|
||||||
|
|
||||||
let mut all_ops_a: Vec<(SignedOp, String)> = ops_phase1;
|
|
||||||
all_ops_a.extend(ops_phase2);
|
|
||||||
|
|
||||||
// Empty clock = new node.
|
|
||||||
let empty_clock = VectorClock::new();
|
|
||||||
let delta = local_ops_since(&all_ops_a, &empty_clock);
|
|
||||||
assert_eq!(
|
|
||||||
delta.len(),
|
|
||||||
150,
|
|
||||||
"new node should receive all 150 ops, got {}",
|
|
||||||
delta.len()
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Multi-author delta sync: ops from two different nodes, peer has seen
|
|
||||||
/// all of one author but none of the other.
|
|
||||||
#[test]
|
|
||||||
fn delta_sync_multi_author() {
|
|
||||||
use fastcrypto::traits::KeyPair;
|
|
||||||
|
|
||||||
let kp_a = make_keypair();
|
|
||||||
let kp_b = make_keypair();
|
|
||||||
let mut crdt_a = BaseCrdt::<PipelineDoc>::new(&kp_a);
|
|
||||||
let mut crdt_b = BaseCrdt::<PipelineDoc>::new(&kp_b);
|
|
||||||
|
|
||||||
let ops_a = make_ops(&kp_a, &mut crdt_a, 30, "631_ma_a");
|
|
||||||
let ops_b = make_ops(&kp_b, &mut crdt_b, 20, "631_ma_b");
|
|
||||||
|
|
||||||
// Combined journal on a hypothetical server.
|
|
||||||
let mut all_ops: Vec<(SignedOp, String)> = ops_a.clone();
|
|
||||||
all_ops.extend(ops_b);
|
|
||||||
|
|
||||||
// Peer has seen all of A's ops but none of B's.
|
|
||||||
let mut peer_clock = VectorClock::new();
|
|
||||||
let author_a_hex = hex::encode(&kp_a.public().0.to_bytes());
|
|
||||||
peer_clock.insert(author_a_hex, 30);
|
|
||||||
|
|
||||||
let delta = local_ops_since(&all_ops, &peer_clock);
|
|
||||||
assert_eq!(
|
|
||||||
delta.len(),
|
|
||||||
20,
|
|
||||||
"peer should receive 20 ops from author B, got {}",
|
|
||||||
delta.len()
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Vector clock construction from ops.
|
|
||||||
#[test]
|
|
||||||
fn build_vector_clock_from_ops() {
|
|
||||||
use fastcrypto::traits::KeyPair;
|
|
||||||
|
|
||||||
let kp = make_keypair();
|
|
||||||
let mut crdt = BaseCrdt::<PipelineDoc>::new(&kp);
|
|
||||||
let ops = make_ops(&kp, &mut crdt, 10, "631_vc");
|
|
||||||
|
|
||||||
let clock = build_clock(&ops);
|
|
||||||
let author_hex = hex::encode(&kp.public().0.to_bytes());
|
|
||||||
|
|
||||||
assert_eq!(clock.len(), 1, "single author should produce 1 clock entry");
|
|
||||||
assert_eq!(clock[&author_hex], 10, "clock should show 10 ops");
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Wire format: clock message serialization round-trip.
|
|
||||||
#[test]
|
|
||||||
fn clock_message_serialization_roundtrip() {
|
|
||||||
let mut clock = VectorClock::new();
|
|
||||||
clock.insert("aabbcc".to_string(), 42);
|
|
||||||
clock.insert("ddeeff".to_string(), 7);
|
|
||||||
|
|
||||||
let json = serde_json::to_value(&clock).unwrap();
|
|
||||||
assert!(json.is_object());
|
|
||||||
let deserialized: VectorClock = serde_json::from_value(json).unwrap();
|
|
||||||
assert_eq!(deserialized["aabbcc"], 42);
|
|
||||||
assert_eq!(deserialized["ddeeff"], 7);
|
|
||||||
}
|
|
||||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
+477
-1
@@ -1403,4 +1403,480 @@ async fn handle_tools_call(id: Option<Value>, params: &Value, ctx: &AppContext)
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests;
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
use crate::http::test_helpers::test_ctx;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn json_rpc_response_serializes_success() {
|
||||||
|
let resp = JsonRpcResponse::success(Some(json!(1)), json!({"ok": true}));
|
||||||
|
let s = serde_json::to_string(&resp).unwrap();
|
||||||
|
assert!(s.contains("\"result\""));
|
||||||
|
assert!(!s.contains("\"error\""));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn json_rpc_response_serializes_error() {
|
||||||
|
let resp = JsonRpcResponse::error(Some(json!(1)), -32600, "bad".into());
|
||||||
|
let s = serde_json::to_string(&resp).unwrap();
|
||||||
|
assert!(s.contains("\"error\""));
|
||||||
|
assert!(!s.contains("\"result\""));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn initialize_returns_capabilities() {
|
||||||
|
let resp = handle_initialize(
|
||||||
|
Some(json!(1)),
|
||||||
|
&json!({"protocolVersion": "2025-03-26", "capabilities": {}, "clientInfo": {"name": "test", "version": "1.0"}}),
|
||||||
|
);
|
||||||
|
let result = resp.result.unwrap();
|
||||||
|
assert_eq!(result["protocolVersion"], "2025-03-26");
|
||||||
|
assert!(result["capabilities"]["tools"].is_object());
|
||||||
|
assert_eq!(result["serverInfo"]["name"], "huskies");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn tools_list_returns_all_tools() {
|
||||||
|
let resp = handle_tools_list(Some(json!(2)));
|
||||||
|
let result = resp.result.unwrap();
|
||||||
|
let tools = result["tools"].as_array().unwrap();
|
||||||
|
let names: Vec<&str> = tools.iter().map(|t| t["name"].as_str().unwrap()).collect();
|
||||||
|
assert!(names.contains(&"create_story"));
|
||||||
|
assert!(names.contains(&"validate_stories"));
|
||||||
|
assert!(names.contains(&"list_upcoming"));
|
||||||
|
assert!(names.contains(&"get_story_todos"));
|
||||||
|
assert!(names.contains(&"record_tests"));
|
||||||
|
assert!(names.contains(&"ensure_acceptance"));
|
||||||
|
assert!(names.contains(&"start_agent"));
|
||||||
|
assert!(names.contains(&"stop_agent"));
|
||||||
|
assert!(names.contains(&"list_agents"));
|
||||||
|
assert!(names.contains(&"get_agent_config"));
|
||||||
|
assert!(names.contains(&"reload_agent_config"));
|
||||||
|
assert!(names.contains(&"get_agent_output"));
|
||||||
|
assert!(names.contains(&"wait_for_agent"));
|
||||||
|
assert!(names.contains(&"get_agent_remaining_turns_and_budget"));
|
||||||
|
assert!(names.contains(&"create_worktree"));
|
||||||
|
assert!(names.contains(&"list_worktrees"));
|
||||||
|
assert!(names.contains(&"remove_worktree"));
|
||||||
|
assert!(names.contains(&"get_editor_command"));
|
||||||
|
assert!(!names.contains(&"report_completion"));
|
||||||
|
assert!(names.contains(&"accept_story"));
|
||||||
|
assert!(names.contains(&"check_criterion"));
|
||||||
|
assert!(names.contains(&"add_criterion"));
|
||||||
|
assert!(names.contains(&"update_story"));
|
||||||
|
assert!(names.contains(&"create_spike"));
|
||||||
|
assert!(names.contains(&"create_bug"));
|
||||||
|
assert!(names.contains(&"list_bugs"));
|
||||||
|
assert!(names.contains(&"close_bug"));
|
||||||
|
assert!(names.contains(&"create_refactor"));
|
||||||
|
assert!(names.contains(&"list_refactors"));
|
||||||
|
assert!(names.contains(&"merge_agent_work"));
|
||||||
|
assert!(names.contains(&"get_merge_status"));
|
||||||
|
assert!(names.contains(&"move_story_to_merge"));
|
||||||
|
assert!(names.contains(&"report_merge_failure"));
|
||||||
|
assert!(names.contains(&"request_qa"));
|
||||||
|
assert!(names.contains(&"approve_qa"));
|
||||||
|
assert!(names.contains(&"reject_qa"));
|
||||||
|
assert!(names.contains(&"launch_qa_app"));
|
||||||
|
assert!(names.contains(&"get_server_logs"));
|
||||||
|
assert!(names.contains(&"prompt_permission"));
|
||||||
|
assert!(names.contains(&"get_pipeline_status"));
|
||||||
|
assert!(names.contains(&"rebuild_and_restart"));
|
||||||
|
assert!(names.contains(&"get_token_usage"));
|
||||||
|
assert!(names.contains(&"move_story"));
|
||||||
|
assert!(names.contains(&"unblock_story"));
|
||||||
|
assert!(names.contains(&"delete_story"));
|
||||||
|
assert!(names.contains(&"run_command"));
|
||||||
|
assert!(names.contains(&"run_tests"));
|
||||||
|
assert!(names.contains(&"get_test_result"));
|
||||||
|
assert!(names.contains(&"run_build"));
|
||||||
|
assert!(names.contains(&"run_lint"));
|
||||||
|
assert!(names.contains(&"git_status"));
|
||||||
|
assert!(names.contains(&"git_diff"));
|
||||||
|
assert!(names.contains(&"git_add"));
|
||||||
|
assert!(names.contains(&"git_commit"));
|
||||||
|
assert!(names.contains(&"git_log"));
|
||||||
|
assert!(names.contains(&"status"));
|
||||||
|
assert!(names.contains(&"loc_file"));
|
||||||
|
assert!(names.contains(&"dump_crdt"));
|
||||||
|
assert!(names.contains(&"get_version"));
|
||||||
|
assert!(names.contains(&"remove_criterion"));
|
||||||
|
assert_eq!(tools.len(), 66);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn tools_list_schemas_have_required_fields() {
|
||||||
|
let resp = handle_tools_list(Some(json!(1)));
|
||||||
|
let tools = resp.result.unwrap()["tools"].as_array().unwrap().clone();
|
||||||
|
for tool in &tools {
|
||||||
|
assert!(tool["name"].is_string(), "tool missing name");
|
||||||
|
assert!(tool["description"].is_string(), "tool missing description");
|
||||||
|
assert!(tool["inputSchema"].is_object(), "tool missing inputSchema");
|
||||||
|
assert_eq!(tool["inputSchema"]["type"], "object");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn handle_tools_call_unknown_tool() {
|
||||||
|
let tmp = tempfile::tempdir().unwrap();
|
||||||
|
let ctx = test_ctx(tmp.path());
|
||||||
|
let rt = tokio::runtime::Runtime::new().unwrap();
|
||||||
|
let resp = rt.block_on(handle_tools_call(
|
||||||
|
Some(json!(1)),
|
||||||
|
&json!({"name": "bogus_tool", "arguments": {}}),
|
||||||
|
&ctx,
|
||||||
|
));
|
||||||
|
let result = resp.result.unwrap();
|
||||||
|
assert_eq!(result["isError"], true);
|
||||||
|
assert!(
|
||||||
|
result["content"][0]["text"]
|
||||||
|
.as_str()
|
||||||
|
.unwrap()
|
||||||
|
.contains("Unknown tool")
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn to_sse_response_wraps_in_data_prefix() {
|
||||||
|
let resp = JsonRpcResponse::success(Some(json!(1)), json!({"ok": true}));
|
||||||
|
let http_resp = to_sse_response(resp);
|
||||||
|
assert_eq!(
|
||||||
|
http_resp.headers().get("content-type").unwrap(),
|
||||||
|
"text/event-stream"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn wants_sse_detects_accept_header() {
|
||||||
|
// Can't easily construct a Request in tests without TestClient,
|
||||||
|
// so test the logic indirectly via to_sse_response format
|
||||||
|
let resp = JsonRpcResponse::success(Some(json!(1)), json!("ok"));
|
||||||
|
let json_resp = to_json_response(resp);
|
||||||
|
assert_eq!(
|
||||||
|
json_resp.headers().get("content-type").unwrap(),
|
||||||
|
"application/json"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn json_rpc_error_response_builds_json_response() {
|
||||||
|
let resp = json_rpc_error_response(Some(json!(42)), -32600, "test error".into());
|
||||||
|
assert_eq!(resp.status(), poem::http::StatusCode::OK);
|
||||||
|
assert_eq!(
|
||||||
|
resp.headers().get("content-type").unwrap(),
|
||||||
|
"application/json"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
// ── HTTP handler tests (TestClient) ───────────────────────────
|
||||||
|
|
||||||
|
fn test_mcp_app(ctx: std::sync::Arc<AppContext>) -> impl poem::Endpoint {
|
||||||
|
use poem::EndpointExt;
|
||||||
|
poem::Route::new()
|
||||||
|
.at("/mcp", poem::post(mcp_post_handler).get(mcp_get_handler))
|
||||||
|
.data(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn read_body_json(resp: poem::test::TestResponse) -> Value {
|
||||||
|
let body = resp.0.into_body().into_string().await.unwrap();
|
||||||
|
serde_json::from_str(&body).unwrap()
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn post_json_mcp<E: poem::Endpoint>(
|
||||||
|
cli: &poem::test::TestClient<E>,
|
||||||
|
payload: &str,
|
||||||
|
) -> Value {
|
||||||
|
let resp = cli
|
||||||
|
.post("/mcp")
|
||||||
|
.header("content-type", "application/json")
|
||||||
|
.body(payload.to_string())
|
||||||
|
.send()
|
||||||
|
.await;
|
||||||
|
read_body_json(resp).await
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn mcp_get_handler_returns_405() {
|
||||||
|
let tmp = tempfile::tempdir().unwrap();
|
||||||
|
let ctx = std::sync::Arc::new(test_ctx(tmp.path()));
|
||||||
|
let cli = poem::test::TestClient::new(test_mcp_app(ctx));
|
||||||
|
let resp = cli.get("/mcp").send().await;
|
||||||
|
assert_eq!(resp.0.status(), poem::http::StatusCode::METHOD_NOT_ALLOWED);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn mcp_post_invalid_content_type_returns_error() {
|
||||||
|
let tmp = tempfile::tempdir().unwrap();
|
||||||
|
let ctx = std::sync::Arc::new(test_ctx(tmp.path()));
|
||||||
|
let cli = poem::test::TestClient::new(test_mcp_app(ctx));
|
||||||
|
let resp = cli
|
||||||
|
.post("/mcp")
|
||||||
|
.header("content-type", "text/plain")
|
||||||
|
.body("{}")
|
||||||
|
.send()
|
||||||
|
.await;
|
||||||
|
let body = read_body_json(resp).await;
|
||||||
|
assert!(body.get("error").is_some(), "expected error field: {body}");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn mcp_post_invalid_json_returns_parse_error() {
|
||||||
|
let tmp = tempfile::tempdir().unwrap();
|
||||||
|
let ctx = std::sync::Arc::new(test_ctx(tmp.path()));
|
||||||
|
let cli = poem::test::TestClient::new(test_mcp_app(ctx));
|
||||||
|
let resp = cli
|
||||||
|
.post("/mcp")
|
||||||
|
.header("content-type", "application/json")
|
||||||
|
.body("not-valid-json")
|
||||||
|
.send()
|
||||||
|
.await;
|
||||||
|
let body = read_body_json(resp).await;
|
||||||
|
assert!(body.get("error").is_some(), "expected error field: {body}");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn mcp_post_wrong_jsonrpc_version_returns_error() {
|
||||||
|
let tmp = tempfile::tempdir().unwrap();
|
||||||
|
let ctx = std::sync::Arc::new(test_ctx(tmp.path()));
|
||||||
|
let cli = poem::test::TestClient::new(test_mcp_app(ctx));
|
||||||
|
let body = post_json_mcp(
|
||||||
|
&cli,
|
||||||
|
r#"{"jsonrpc":"1.0","id":1,"method":"initialize","params":{}}"#,
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
assert!(
|
||||||
|
body["error"]["message"]
|
||||||
|
.as_str()
|
||||||
|
.unwrap_or("")
|
||||||
|
.contains("version"),
|
||||||
|
"expected version error: {body}"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn mcp_post_notification_with_null_id_returns_accepted() {
|
||||||
|
let tmp = tempfile::tempdir().unwrap();
|
||||||
|
let ctx = std::sync::Arc::new(test_ctx(tmp.path()));
|
||||||
|
let cli = poem::test::TestClient::new(test_mcp_app(ctx));
|
||||||
|
let resp = cli
|
||||||
|
.post("/mcp")
|
||||||
|
.header("content-type", "application/json")
|
||||||
|
.body(r#"{"jsonrpc":"2.0","method":"notifications/initialized","params":{}}"#)
|
||||||
|
.send()
|
||||||
|
.await;
|
||||||
|
assert_eq!(resp.0.status(), poem::http::StatusCode::ACCEPTED);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn mcp_post_notification_with_explicit_null_id_returns_accepted() {
|
||||||
|
let tmp = tempfile::tempdir().unwrap();
|
||||||
|
let ctx = std::sync::Arc::new(test_ctx(tmp.path()));
|
||||||
|
let cli = poem::test::TestClient::new(test_mcp_app(ctx));
|
||||||
|
let resp = cli
|
||||||
|
.post("/mcp")
|
||||||
|
.header("content-type", "application/json")
|
||||||
|
.body(r#"{"jsonrpc":"2.0","id":null,"method":"notifications/initialized","params":{}}"#)
|
||||||
|
.send()
|
||||||
|
.await;
|
||||||
|
assert_eq!(resp.0.status(), poem::http::StatusCode::ACCEPTED);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn mcp_post_missing_id_non_notification_returns_error() {
|
||||||
|
let tmp = tempfile::tempdir().unwrap();
|
||||||
|
let ctx = std::sync::Arc::new(test_ctx(tmp.path()));
|
||||||
|
let cli = poem::test::TestClient::new(test_mcp_app(ctx));
|
||||||
|
let body = post_json_mcp(
|
||||||
|
&cli,
|
||||||
|
r#"{"jsonrpc":"2.0","method":"initialize","params":{}}"#,
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
assert!(body.get("error").is_some(), "expected error: {body}");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn mcp_post_unknown_method_returns_error() {
|
||||||
|
let tmp = tempfile::tempdir().unwrap();
|
||||||
|
let ctx = std::sync::Arc::new(test_ctx(tmp.path()));
|
||||||
|
let cli = poem::test::TestClient::new(test_mcp_app(ctx));
|
||||||
|
let body = post_json_mcp(
|
||||||
|
&cli,
|
||||||
|
r#"{"jsonrpc":"2.0","id":1,"method":"bogus/method","params":{}}"#,
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
assert!(
|
||||||
|
body["error"]["message"]
|
||||||
|
.as_str()
|
||||||
|
.unwrap_or("")
|
||||||
|
.contains("Unknown method"),
|
||||||
|
"expected unknown method error: {body}"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn mcp_post_initialize_returns_capabilities() {
|
||||||
|
let tmp = tempfile::tempdir().unwrap();
|
||||||
|
let ctx = std::sync::Arc::new(test_ctx(tmp.path()));
|
||||||
|
let cli = poem::test::TestClient::new(test_mcp_app(ctx));
|
||||||
|
let body = post_json_mcp(
|
||||||
|
&cli,
|
||||||
|
r#"{"jsonrpc":"2.0","id":1,"method":"initialize","params":{"protocolVersion":"2025-03-26","capabilities":{},"clientInfo":{"name":"test","version":"1.0"}}}"#,
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
assert_eq!(body["result"]["protocolVersion"], "2025-03-26");
|
||||||
|
assert_eq!(body["result"]["serverInfo"]["name"], "huskies");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn mcp_post_tools_list_returns_tools() {
|
||||||
|
let tmp = tempfile::tempdir().unwrap();
|
||||||
|
let ctx = std::sync::Arc::new(test_ctx(tmp.path()));
|
||||||
|
let cli = poem::test::TestClient::new(test_mcp_app(ctx));
|
||||||
|
let body = post_json_mcp(
|
||||||
|
&cli,
|
||||||
|
r#"{"jsonrpc":"2.0","id":1,"method":"tools/list","params":{}}"#,
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
assert!(body["result"]["tools"].is_array());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn mcp_post_sse_returns_event_stream_content_type() {
|
||||||
|
let tmp = tempfile::tempdir().unwrap();
|
||||||
|
let ctx = std::sync::Arc::new(test_ctx(tmp.path()));
|
||||||
|
let cli = poem::test::TestClient::new(test_mcp_app(ctx));
|
||||||
|
let resp = cli
|
||||||
|
.post("/mcp")
|
||||||
|
.header("content-type", "application/json")
|
||||||
|
.header("accept", "text/event-stream")
|
||||||
|
.body(r#"{"jsonrpc":"2.0","id":1,"method":"tools/list","params":{}}"#)
|
||||||
|
.send()
|
||||||
|
.await;
|
||||||
|
assert_eq!(
|
||||||
|
resp.0.headers().get("content-type").unwrap(),
|
||||||
|
"text/event-stream"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn mcp_post_sse_get_agent_output_missing_story_id() {
|
||||||
|
let tmp = tempfile::tempdir().unwrap();
|
||||||
|
let ctx = std::sync::Arc::new(test_ctx(tmp.path()));
|
||||||
|
let cli = poem::test::TestClient::new(test_mcp_app(ctx));
|
||||||
|
let resp = cli
|
||||||
|
.post("/mcp")
|
||||||
|
.header("content-type", "application/json")
|
||||||
|
.header("accept", "text/event-stream")
|
||||||
|
.body(r#"{"jsonrpc":"2.0","id":1,"method":"tools/call","params":{"name":"get_agent_output","arguments":{}}}"#)
|
||||||
|
.send()
|
||||||
|
.await;
|
||||||
|
assert_eq!(
|
||||||
|
resp.0.headers().get("content-type").unwrap(),
|
||||||
|
"text/event-stream",
|
||||||
|
"expected SSE content-type"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn mcp_post_sse_get_agent_output_without_agent_name_returns_disk_content() {
|
||||||
|
// Without agent_name the SSE live-streaming intercept is skipped and
|
||||||
|
// the disk-based handler runs. The transport still wraps the result in
|
||||||
|
// SSE format (data: …\n\n) because the client sent Accept: text/event-stream,
|
||||||
|
// but the content should be a valid JSON-RPC result, not a subscribe error.
|
||||||
|
let tmp = tempfile::tempdir().unwrap();
|
||||||
|
let ctx = std::sync::Arc::new(test_ctx(tmp.path()));
|
||||||
|
let cli = poem::test::TestClient::new(test_mcp_app(ctx));
|
||||||
|
let resp = cli
|
||||||
|
.post("/mcp")
|
||||||
|
.header("content-type", "application/json")
|
||||||
|
.header("accept", "text/event-stream")
|
||||||
|
.body(r#"{"jsonrpc":"2.0","id":1,"method":"tools/call","params":{"name":"get_agent_output","arguments":{"story_id":"1_test"}}}"#)
|
||||||
|
.send()
|
||||||
|
.await;
|
||||||
|
let body = resp.0.into_body().into_string().await.unwrap();
|
||||||
|
// Body is SSE-wrapped: "data: {…}\n\n" — strip the prefix and verify it's
|
||||||
|
// a valid JSON-RPC result (not an error about missing agent_name).
|
||||||
|
let json_part = body
|
||||||
|
.trim_start_matches("data: ")
|
||||||
|
.trim_end_matches("\n\n")
|
||||||
|
.trim();
|
||||||
|
let parsed: serde_json::Value = serde_json::from_str(json_part)
|
||||||
|
.unwrap_or_else(|_| panic!("expected JSON-RPC in SSE body, got: {body}"));
|
||||||
|
assert!(
|
||||||
|
parsed.get("result").is_some(),
|
||||||
|
"expected JSON-RPC result (disk-based handler ran): {parsed}"
|
||||||
|
);
|
||||||
|
// Must NOT be an error about missing agent_name (agent_name is now optional)
|
||||||
|
assert!(
|
||||||
|
parsed.get("error").is_none(),
|
||||||
|
"unexpected error when agent_name omitted: {parsed}"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn mcp_post_sse_get_agent_output_no_agent_no_logs_returns_not_found() {
|
||||||
|
// Agent not in pool and no log files → SSE success with "No log files found" message.
|
||||||
|
let tmp = tempfile::tempdir().unwrap();
|
||||||
|
let ctx = std::sync::Arc::new(test_ctx(tmp.path()));
|
||||||
|
let cli = poem::test::TestClient::new(test_mcp_app(ctx));
|
||||||
|
let resp = cli
|
||||||
|
.post("/mcp")
|
||||||
|
.header("content-type", "application/json")
|
||||||
|
.header("accept", "text/event-stream")
|
||||||
|
.body(r#"{"jsonrpc":"2.0","id":1,"method":"tools/call","params":{"name":"get_agent_output","arguments":{"story_id":"99_nope","agent_name":"bot"}}}"#)
|
||||||
|
.send()
|
||||||
|
.await;
|
||||||
|
assert_eq!(
|
||||||
|
resp.0.headers().get("content-type").unwrap(),
|
||||||
|
"text/event-stream"
|
||||||
|
);
|
||||||
|
let body = resp.0.into_body().into_string().await.unwrap();
|
||||||
|
assert!(body.contains("data:"), "expected SSE data prefix: {body}");
|
||||||
|
// Must NOT return isError — should be a success result with "No log files found"
|
||||||
|
assert!(
|
||||||
|
!body.contains("isError"),
|
||||||
|
"expected no isError for missing agent: {body}"
|
||||||
|
);
|
||||||
|
assert!(
|
||||||
|
body.contains("No log files found"),
|
||||||
|
"expected not-found message: {body}"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn mcp_post_sse_get_agent_output_exited_agent_reads_disk_logs() {
|
||||||
|
use crate::agent_log::AgentLogWriter;
|
||||||
|
use crate::agents::AgentEvent;
|
||||||
|
// Agent has exited (not in pool) but wrote logs to disk.
|
||||||
|
let tmp = tempfile::tempdir().unwrap();
|
||||||
|
let root = tmp.path();
|
||||||
|
let mut writer = AgentLogWriter::new(root, "42_story_foo", "coder-1", "sess-sse").unwrap();
|
||||||
|
writer
|
||||||
|
.write_event(&AgentEvent::Output {
|
||||||
|
story_id: "42_story_foo".to_string(),
|
||||||
|
agent_name: "coder-1".to_string(),
|
||||||
|
text: "disk output".to_string(),
|
||||||
|
})
|
||||||
|
.unwrap();
|
||||||
|
drop(writer);
|
||||||
|
|
||||||
|
let ctx = std::sync::Arc::new(test_ctx(root));
|
||||||
|
let cli = poem::test::TestClient::new(test_mcp_app(ctx));
|
||||||
|
let resp = cli
|
||||||
|
.post("/mcp")
|
||||||
|
.header("content-type", "application/json")
|
||||||
|
.header("accept", "text/event-stream")
|
||||||
|
.body(r#"{"jsonrpc":"2.0","id":1,"method":"tools/call","params":{"name":"get_agent_output","arguments":{"story_id":"42_story_foo","agent_name":"coder-1"}}}"#)
|
||||||
|
.send()
|
||||||
|
.await;
|
||||||
|
let body = resp.0.into_body().into_string().await.unwrap();
|
||||||
|
assert!(
|
||||||
|
body.contains("disk output"),
|
||||||
|
"expected disk log content in SSE response: {body}"
|
||||||
|
);
|
||||||
|
assert!(
|
||||||
|
!body.contains("isError"),
|
||||||
|
"expected no error for exited agent with logs: {body}"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@@ -1,725 +0,0 @@
|
|||||||
//! MCP story tools — create, update, move, and manage stories, bugs, and refactors via MCP.
|
|
||||||
//!
|
|
||||||
//! This file is a thin adapter: it deserialises MCP payloads, delegates to
|
|
||||||
//! `crate::service::story` and `crate::http::workflow` for business logic,
|
|
||||||
//! and serialises responses.
|
|
||||||
use crate::agents::{
|
|
||||||
close_bug_to_archive, feature_branch_has_unmerged_changes, move_story_to_done,
|
|
||||||
};
|
|
||||||
use crate::http::context::AppContext;
|
|
||||||
use crate::http::workflow::{
|
|
||||||
add_criterion_to_file, check_criterion_in_file, create_bug_file, create_refactor_file,
|
|
||||||
create_spike_file, create_story_file, edit_criterion_in_file, list_bug_files,
|
|
||||||
list_refactor_files, load_pipeline_state, load_upcoming_stories, remove_criterion_from_file,
|
|
||||||
update_story_in_file, validate_story_dirs,
|
|
||||||
};
|
|
||||||
use crate::io::story_metadata::{
|
|
||||||
check_archived_deps, check_archived_deps_from_list, parse_front_matter, parse_unchecked_todos,
|
|
||||||
};
|
|
||||||
use crate::service::story::parse_test_cases;
|
|
||||||
use crate::slog_warn;
|
|
||||||
#[allow(unused_imports)]
|
|
||||||
use crate::workflow::{TestCaseResult, TestStatus, evaluate_acceptance_with_coverage};
|
|
||||||
use serde_json::{Value, json};
|
|
||||||
use std::collections::HashMap;
|
|
||||||
use std::fs;
|
|
||||||
|
|
||||||
pub(super) fn tool_create_story(args: &Value, ctx: &AppContext) -> Result<String, String> {
|
|
||||||
let name = args
|
|
||||||
.get("name")
|
|
||||||
.and_then(|v| v.as_str())
|
|
||||||
.ok_or("Missing required argument: name")?;
|
|
||||||
let user_story = args.get("user_story").and_then(|v| v.as_str());
|
|
||||||
let description = args.get("description").and_then(|v| v.as_str());
|
|
||||||
let acceptance_criteria: Option<Vec<String>> = args
|
|
||||||
.get("acceptance_criteria")
|
|
||||||
.and_then(|v| serde_json::from_value(v.clone()).ok());
|
|
||||||
let depends_on: Option<Vec<u32>> = args
|
|
||||||
.get("depends_on")
|
|
||||||
.and_then(|v| serde_json::from_value(v.clone()).ok());
|
|
||||||
// Spike 61: write the file only — the filesystem watcher detects the new
|
|
||||||
// .md file in work/1_backlog/ and auto-commits with a deterministic message.
|
|
||||||
let commit = false;
|
|
||||||
|
|
||||||
let root = ctx.state.get_project_root()?;
|
|
||||||
let story_id = create_story_file(
|
|
||||||
&root,
|
|
||||||
name,
|
|
||||||
user_story,
|
|
||||||
description,
|
|
||||||
acceptance_criteria.as_deref(),
|
|
||||||
depends_on.as_deref(),
|
|
||||||
commit,
|
|
||||||
)?;
|
|
||||||
|
|
||||||
// Bug 503: warn at creation time if any depends_on points at an already-archived story.
|
|
||||||
// Archived = satisfied semantics: the dep will resolve immediately on the next promotion
|
|
||||||
// tick, which is surprising if the archived story was abandoned rather than cleanly done.
|
|
||||||
let archived_deps = depends_on
|
|
||||||
.as_deref()
|
|
||||||
.map(|deps| check_archived_deps_from_list(&root, deps))
|
|
||||||
.unwrap_or_default();
|
|
||||||
if !archived_deps.is_empty() {
|
|
||||||
slog_warn!(
|
|
||||||
"[create-story] Story '{story_id}' depends_on {archived_deps:?} which \
|
|
||||||
are already in 6_archived. The dep will be treated as satisfied on the \
|
|
||||||
next promotion tick. If these deps were abandoned (not cleanly completed), \
|
|
||||||
consider removing the depends_on or keeping the story in backlog manually."
|
|
||||||
);
|
|
||||||
return Ok(format!(
|
|
||||||
"Created story: {story_id}\n\n\
|
|
||||||
WARNING: depends_on {archived_deps:?} point at stories already in \
|
|
||||||
6_archived. These deps are treated as satisfied (archived = satisfied \
|
|
||||||
semantics), so this story may be auto-promoted from backlog immediately. \
|
|
||||||
If the archived deps were abandoned rather than completed, remove the \
|
|
||||||
depends_on or move the story back to backlog manually after promotion."
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(format!("Created story: {story_id}"))
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Purge a story from the in-memory CRDT by writing a tombstone op (story 521).
|
|
||||||
///
|
|
||||||
/// This is the eviction primitive for the four-state-machine drift problem
|
|
||||||
/// we hit on 2026-04-09 — when a story gets stuck in the running server's
|
|
||||||
/// in-memory CRDT and can't be cleared by sqlite deletes alone (because the
|
|
||||||
/// in-memory state outlives any pipeline_items / crdt_ops manipulation),
|
|
||||||
/// this tool writes a proper CRDT delete op via `crdt_state::evict_item`.
|
|
||||||
///
|
|
||||||
/// The tombstone op:
|
|
||||||
/// - Marks the in-memory CRDT item as `is_deleted = true` immediately
|
|
||||||
/// (so subsequent `read_all_items` / `read_item` calls skip it)
|
|
||||||
/// - Is persisted to `crdt_ops` so the eviction survives a server restart
|
|
||||||
/// - Drops the in-memory `CONTENT_STORE` entry for the story
|
|
||||||
///
|
|
||||||
/// This tool does NOT touch: running agents, worktrees, the `pipeline_items`
|
|
||||||
/// shadow table, `timers.json`, or filesystem shadows. Compose with
|
|
||||||
/// `stop_agent`, `remove_worktree`, etc. as needed for a full purge — or
|
|
||||||
/// see story 514 (delete_story full cleanup) for a future "do it all" tool.
|
|
||||||
pub(super) fn tool_purge_story(args: &Value, _ctx: &AppContext) -> Result<String, String> {
|
|
||||||
let story_id = args
|
|
||||||
.get("story_id")
|
|
||||||
.and_then(|v| v.as_str())
|
|
||||||
.ok_or("Missing required argument: story_id")?;
|
|
||||||
|
|
||||||
crate::crdt_state::evict_item(story_id)?;
|
|
||||||
|
|
||||||
Ok(format!(
|
|
||||||
"Evicted '{story_id}' from in-memory CRDT (tombstone op persisted to crdt_ops; CONTENT_STORE entry dropped)."
|
|
||||||
))
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(super) fn tool_validate_stories(ctx: &AppContext) -> Result<String, String> {
|
|
||||||
let root = ctx.state.get_project_root()?;
|
|
||||||
let results = validate_story_dirs(&root)?;
|
|
||||||
serde_json::to_string_pretty(&json!(
|
|
||||||
results
|
|
||||||
.iter()
|
|
||||||
.map(|r| json!({
|
|
||||||
"story_id": r.story_id,
|
|
||||||
"valid": r.valid,
|
|
||||||
"error": r.error,
|
|
||||||
}))
|
|
||||||
.collect::<Vec<_>>()
|
|
||||||
))
|
|
||||||
.map_err(|e| format!("Serialization error: {e}"))
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(super) fn tool_list_upcoming(ctx: &AppContext) -> Result<String, String> {
|
|
||||||
let stories = load_upcoming_stories(ctx)?;
|
|
||||||
serde_json::to_string_pretty(&json!(
|
|
||||||
stories
|
|
||||||
.iter()
|
|
||||||
.map(|s| json!({
|
|
||||||
"story_id": s.story_id,
|
|
||||||
"name": s.name,
|
|
||||||
"error": s.error,
|
|
||||||
}))
|
|
||||||
.collect::<Vec<_>>()
|
|
||||||
))
|
|
||||||
.map_err(|e| format!("Serialization error: {e}"))
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(super) fn tool_get_pipeline_status(ctx: &AppContext) -> Result<String, String> {
|
|
||||||
let state = load_pipeline_state(ctx)?;
|
|
||||||
|
|
||||||
fn map_items(items: &[crate::http::workflow::UpcomingStory], stage: &str) -> Vec<Value> {
|
|
||||||
items
|
|
||||||
.iter()
|
|
||||||
.map(|s| {
|
|
||||||
let mut item = json!({
|
|
||||||
"story_id": s.story_id,
|
|
||||||
"name": s.name,
|
|
||||||
"stage": stage,
|
|
||||||
"agent": s.agent.as_ref().map(|a| json!({
|
|
||||||
"agent_name": a.agent_name,
|
|
||||||
"model": a.model,
|
|
||||||
"status": a.status,
|
|
||||||
})),
|
|
||||||
});
|
|
||||||
// Include blocked/retry_count when present so callers can
|
|
||||||
// identify stories stuck in the pipeline.
|
|
||||||
if let Some(true) = s.blocked {
|
|
||||||
item["blocked"] = json!(true);
|
|
||||||
}
|
|
||||||
if let Some(rc) = s.retry_count {
|
|
||||||
item["retry_count"] = json!(rc);
|
|
||||||
}
|
|
||||||
if let Some(ref mf) = s.merge_failure {
|
|
||||||
item["merge_failure"] = json!(mf);
|
|
||||||
}
|
|
||||||
item
|
|
||||||
})
|
|
||||||
.collect()
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut active: Vec<Value> = Vec::new();
|
|
||||||
active.extend(map_items(&state.current, "current"));
|
|
||||||
active.extend(map_items(&state.qa, "qa"));
|
|
||||||
active.extend(map_items(&state.merge, "merge"));
|
|
||||||
active.extend(map_items(&state.done, "done"));
|
|
||||||
|
|
||||||
let backlog: Vec<Value> = state
|
|
||||||
.backlog
|
|
||||||
.iter()
|
|
||||||
.map(|s| json!({ "story_id": s.story_id, "name": s.name }))
|
|
||||||
.collect();
|
|
||||||
|
|
||||||
serde_json::to_string_pretty(&json!({
|
|
||||||
"active": active,
|
|
||||||
"backlog": backlog,
|
|
||||||
"backlog_count": backlog.len(),
|
|
||||||
}))
|
|
||||||
.map_err(|e| format!("Serialization error: {e}"))
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(super) fn tool_get_story_todos(args: &Value, ctx: &AppContext) -> Result<String, String> {
|
|
||||||
let story_id = args
|
|
||||||
.get("story_id")
|
|
||||||
.and_then(|v| v.as_str())
|
|
||||||
.ok_or("Missing required argument: story_id")?;
|
|
||||||
|
|
||||||
let root = ctx.state.get_project_root()?;
|
|
||||||
|
|
||||||
// Read from DB content store, falling back to filesystem.
|
|
||||||
let contents = crate::http::workflow::read_story_content(&root, story_id)
|
|
||||||
.map_err(|_| format!("Story file not found: {story_id}.md"))?;
|
|
||||||
|
|
||||||
let story_name = parse_front_matter(&contents).ok().and_then(|m| m.name);
|
|
||||||
let todos = parse_unchecked_todos(&contents);
|
|
||||||
|
|
||||||
serde_json::to_string_pretty(&json!({
|
|
||||||
"story_id": story_id,
|
|
||||||
"story_name": story_name,
|
|
||||||
"todos": todos,
|
|
||||||
}))
|
|
||||||
.map_err(|e| format!("Serialization error: {e}"))
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(super) fn tool_record_tests(args: &Value, ctx: &AppContext) -> Result<String, String> {
|
|
||||||
let story_id = args
|
|
||||||
.get("story_id")
|
|
||||||
.and_then(|v| v.as_str())
|
|
||||||
.ok_or("Missing required argument: story_id")?;
|
|
||||||
|
|
||||||
let unit = parse_test_cases(args.get("unit"))?;
|
|
||||||
let integration = parse_test_cases(args.get("integration"))?;
|
|
||||||
|
|
||||||
let mut workflow = ctx
|
|
||||||
.workflow
|
|
||||||
.lock()
|
|
||||||
.map_err(|e| format!("Lock error: {e}"))?;
|
|
||||||
|
|
||||||
workflow.record_test_results_validated(story_id.to_string(), unit, integration)?;
|
|
||||||
|
|
||||||
// Persist to story file (best-effort — file write errors are warnings, not failures).
|
|
||||||
if let Ok(project_root) = ctx.state.get_project_root()
|
|
||||||
&& let Some(results) = workflow.results.get(story_id)
|
|
||||||
&& let Err(e) = crate::http::workflow::write_test_results_to_story_file(
|
|
||||||
&project_root,
|
|
||||||
story_id,
|
|
||||||
results,
|
|
||||||
)
|
|
||||||
{
|
|
||||||
slog_warn!("[record_tests] Could not persist results to story file: {e}");
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok("Test results recorded.".to_string())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(super) fn tool_ensure_acceptance(args: &Value, ctx: &AppContext) -> Result<String, String> {
|
|
||||||
let story_id = args
|
|
||||||
.get("story_id")
|
|
||||||
.and_then(|v| v.as_str())
|
|
||||||
.ok_or("Missing required argument: story_id")?;
|
|
||||||
|
|
||||||
let workflow = ctx
|
|
||||||
.workflow
|
|
||||||
.lock()
|
|
||||||
.map_err(|e| format!("Lock error: {e}"))?;
|
|
||||||
|
|
||||||
// Use in-memory results if present; otherwise fall back to file-persisted results.
|
|
||||||
let file_results;
|
|
||||||
let results = if let Some(r) = workflow.results.get(story_id) {
|
|
||||||
r
|
|
||||||
} else {
|
|
||||||
let project_root = ctx.state.get_project_root().ok();
|
|
||||||
file_results = project_root.as_deref().and_then(|root| {
|
|
||||||
crate::http::workflow::read_test_results_from_story_file(root, story_id)
|
|
||||||
});
|
|
||||||
file_results.as_ref().map_or_else(
|
|
||||||
|| {
|
|
||||||
// No results anywhere — use empty default for the acceptance check
|
|
||||||
// (it will fail with "No test results recorded")
|
|
||||||
static EMPTY: std::sync::OnceLock<crate::workflow::StoryTestResults> =
|
|
||||||
std::sync::OnceLock::new();
|
|
||||||
EMPTY.get_or_init(Default::default)
|
|
||||||
},
|
|
||||||
|r| r,
|
|
||||||
)
|
|
||||||
};
|
|
||||||
|
|
||||||
let coverage = workflow.coverage.get(story_id);
|
|
||||||
let decision = evaluate_acceptance_with_coverage(results, coverage);
|
|
||||||
|
|
||||||
if decision.can_accept {
|
|
||||||
Ok("Story can be accepted. All gates pass.".to_string())
|
|
||||||
} else {
|
|
||||||
let mut parts = decision.reasons;
|
|
||||||
if let Some(w) = decision.warning {
|
|
||||||
parts.push(w);
|
|
||||||
}
|
|
||||||
Err(format!("Acceptance blocked: {}", parts.join("; ")))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(super) fn tool_accept_story(args: &Value, ctx: &AppContext) -> Result<String, String> {
|
|
||||||
let story_id = args
|
|
||||||
.get("story_id")
|
|
||||||
.and_then(|v| v.as_str())
|
|
||||||
.ok_or("Missing required argument: story_id")?;
|
|
||||||
|
|
||||||
let project_root = ctx.services.agents.get_project_root(&ctx.state)?;
|
|
||||||
|
|
||||||
// Bug 226: Refuse to accept if the feature branch has unmerged code.
|
|
||||||
// The code must be squash-merged via merge_agent_work first.
|
|
||||||
if feature_branch_has_unmerged_changes(&project_root, story_id) {
|
|
||||||
return Err(format!(
|
|
||||||
"Cannot accept story '{story_id}': feature branch 'feature/story-{story_id}' \
|
|
||||||
has unmerged changes. Use merge_agent_work to squash-merge the code into \
|
|
||||||
master first."
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
move_story_to_done(&project_root, story_id)?;
|
|
||||||
ctx.services.agents.remove_agents_for_story(story_id);
|
|
||||||
|
|
||||||
Ok(format!(
|
|
||||||
"Story '{story_id}' accepted, moved to done/, and committed to master."
|
|
||||||
))
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(super) fn tool_check_criterion(args: &Value, ctx: &AppContext) -> Result<String, String> {
|
|
||||||
let story_id = args
|
|
||||||
.get("story_id")
|
|
||||||
.and_then(|v| v.as_str())
|
|
||||||
.ok_or("Missing required argument: story_id")?;
|
|
||||||
let criterion_index = args
|
|
||||||
.get("criterion_index")
|
|
||||||
.and_then(|v| v.as_u64())
|
|
||||||
.ok_or("Missing required argument: criterion_index")? as usize;
|
|
||||||
|
|
||||||
let root = ctx.state.get_project_root()?;
|
|
||||||
check_criterion_in_file(&root, story_id, criterion_index)?;
|
|
||||||
|
|
||||||
Ok(format!(
|
|
||||||
"Criterion {criterion_index} checked for story '{story_id}'. Committed to master."
|
|
||||||
))
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(super) fn tool_edit_criterion(args: &Value, ctx: &AppContext) -> Result<String, String> {
|
|
||||||
let story_id = args
|
|
||||||
.get("story_id")
|
|
||||||
.and_then(|v| v.as_str())
|
|
||||||
.ok_or("Missing required argument: story_id")?;
|
|
||||||
let criterion_index = args
|
|
||||||
.get("criterion_index")
|
|
||||||
.and_then(|v| v.as_u64())
|
|
||||||
.ok_or("Missing required argument: criterion_index")? as usize;
|
|
||||||
let new_text = args
|
|
||||||
.get("new_text")
|
|
||||||
.and_then(|v| v.as_str())
|
|
||||||
.ok_or("Missing required argument: new_text")?;
|
|
||||||
|
|
||||||
let root = ctx.state.get_project_root()?;
|
|
||||||
edit_criterion_in_file(&root, story_id, criterion_index, new_text)?;
|
|
||||||
|
|
||||||
Ok(format!(
|
|
||||||
"Criterion {criterion_index} updated for story '{story_id}'."
|
|
||||||
))
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(super) fn tool_add_criterion(args: &Value, ctx: &AppContext) -> Result<String, String> {
|
|
||||||
let story_id = args
|
|
||||||
.get("story_id")
|
|
||||||
.and_then(|v| v.as_str())
|
|
||||||
.ok_or("Missing required argument: story_id")?;
|
|
||||||
let criterion = args
|
|
||||||
.get("criterion")
|
|
||||||
.and_then(|v| v.as_str())
|
|
||||||
.ok_or("Missing required argument: criterion")?;
|
|
||||||
|
|
||||||
let root = ctx.state.get_project_root()?;
|
|
||||||
add_criterion_to_file(&root, story_id, criterion)?;
|
|
||||||
|
|
||||||
Ok(format!(
|
|
||||||
"Added criterion to story '{story_id}': - [ ] {criterion}"
|
|
||||||
))
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(super) fn tool_remove_criterion(args: &Value, ctx: &AppContext) -> Result<String, String> {
|
|
||||||
let story_id = args
|
|
||||||
.get("story_id")
|
|
||||||
.and_then(|v| v.as_str())
|
|
||||||
.ok_or("Missing required argument: story_id")?;
|
|
||||||
let criterion_index = args
|
|
||||||
.get("criterion_index")
|
|
||||||
.and_then(|v| v.as_u64())
|
|
||||||
.ok_or("Missing required argument: criterion_index")? as usize;
|
|
||||||
|
|
||||||
let root = ctx.state.get_project_root()?;
|
|
||||||
remove_criterion_from_file(&root, story_id, criterion_index)?;
|
|
||||||
|
|
||||||
Ok(format!(
|
|
||||||
"Removed criterion {criterion_index} from story '{story_id}'."
|
|
||||||
))
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(super) fn tool_update_story(args: &Value, ctx: &AppContext) -> Result<String, String> {
|
|
||||||
let story_id = args
|
|
||||||
.get("story_id")
|
|
||||||
.and_then(|v| v.as_str())
|
|
||||||
.ok_or("Missing required argument: story_id")?;
|
|
||||||
let user_story = args.get("user_story").and_then(|v| v.as_str());
|
|
||||||
let description = args.get("description").and_then(|v| v.as_str());
|
|
||||||
|
|
||||||
// Collect front matter fields: explicit `agent` param + arbitrary `front_matter` object.
|
|
||||||
// Values are passed as serde_json::Value so native booleans, numbers, and arrays are
|
|
||||||
// preserved and encoded correctly as unquoted YAML by update_story_in_file.
|
|
||||||
let mut front_matter: HashMap<String, Value> = HashMap::new();
|
|
||||||
if let Some(agent) = args.get("agent").and_then(|v| v.as_str()) {
|
|
||||||
front_matter.insert("agent".to_string(), Value::String(agent.to_string()));
|
|
||||||
}
|
|
||||||
if let Some(obj) = args.get("front_matter").and_then(|v| v.as_object()) {
|
|
||||||
for (k, v) in obj {
|
|
||||||
front_matter.insert(k.clone(), v.clone());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
let front_matter_opt = if front_matter.is_empty() {
|
|
||||||
None
|
|
||||||
} else {
|
|
||||||
Some(&front_matter)
|
|
||||||
};
|
|
||||||
|
|
||||||
let root = ctx.state.get_project_root()?;
|
|
||||||
update_story_in_file(&root, story_id, user_story, description, front_matter_opt)?;
|
|
||||||
|
|
||||||
// Bug 503: warn if any depends_on in the (now updated) story points at an archived story.
|
|
||||||
let stage = crate::pipeline_state::read_typed(story_id)
|
|
||||||
.ok()
|
|
||||||
.flatten()
|
|
||||||
.map(|i| i.stage.dir_name().to_string())
|
|
||||||
.unwrap_or_else(|| "1_backlog".to_string());
|
|
||||||
let archived_deps = check_archived_deps(&root, &stage, story_id);
|
|
||||||
if !archived_deps.is_empty() {
|
|
||||||
slog_warn!(
|
|
||||||
"[update-story] Story '{story_id}' depends_on {archived_deps:?} which \
|
|
||||||
are already in 6_archived. The dep will be treated as satisfied on the \
|
|
||||||
next promotion tick. If these deps were abandoned (not cleanly completed), \
|
|
||||||
consider removing the depends_on or keeping the story in backlog manually."
|
|
||||||
);
|
|
||||||
return Ok(format!(
|
|
||||||
"Updated story '{story_id}'.\n\n\
|
|
||||||
WARNING: depends_on {archived_deps:?} point at stories already in \
|
|
||||||
6_archived. These deps are treated as satisfied (archived = satisfied \
|
|
||||||
semantics), so this story may be auto-promoted from backlog immediately. \
|
|
||||||
If the archived deps were abandoned rather than completed, remove the \
|
|
||||||
depends_on or move the story back to backlog manually after promotion."
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(format!("Updated story '{story_id}'."))
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(super) fn tool_create_spike(args: &Value, ctx: &AppContext) -> Result<String, String> {
|
|
||||||
let name = args
|
|
||||||
.get("name")
|
|
||||||
.and_then(|v| v.as_str())
|
|
||||||
.ok_or("Missing required argument: name")?;
|
|
||||||
let description = args.get("description").and_then(|v| v.as_str());
|
|
||||||
|
|
||||||
let root = ctx.state.get_project_root()?;
|
|
||||||
let spike_id = create_spike_file(&root, name, description)?;
|
|
||||||
|
|
||||||
Ok(format!("Created spike: {spike_id}"))
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(super) fn tool_create_bug(args: &Value, ctx: &AppContext) -> Result<String, String> {
|
|
||||||
let name = args
|
|
||||||
.get("name")
|
|
||||||
.and_then(|v| v.as_str())
|
|
||||||
.ok_or("Missing required argument: name")?;
|
|
||||||
let description = args
|
|
||||||
.get("description")
|
|
||||||
.and_then(|v| v.as_str())
|
|
||||||
.ok_or("Missing required argument: description")?;
|
|
||||||
let steps_to_reproduce = args
|
|
||||||
.get("steps_to_reproduce")
|
|
||||||
.and_then(|v| v.as_str())
|
|
||||||
.ok_or("Missing required argument: steps_to_reproduce")?;
|
|
||||||
let actual_result = args
|
|
||||||
.get("actual_result")
|
|
||||||
.and_then(|v| v.as_str())
|
|
||||||
.ok_or("Missing required argument: actual_result")?;
|
|
||||||
let expected_result = args
|
|
||||||
.get("expected_result")
|
|
||||||
.and_then(|v| v.as_str())
|
|
||||||
.ok_or("Missing required argument: expected_result")?;
|
|
||||||
let acceptance_criteria: Option<Vec<String>> = args
|
|
||||||
.get("acceptance_criteria")
|
|
||||||
.and_then(|v| serde_json::from_value(v.clone()).ok());
|
|
||||||
let depends_on: Option<Vec<u32>> = args
|
|
||||||
.get("depends_on")
|
|
||||||
.and_then(|v| serde_json::from_value(v.clone()).ok());
|
|
||||||
|
|
||||||
let root = ctx.state.get_project_root()?;
|
|
||||||
let bug_id = create_bug_file(
|
|
||||||
&root,
|
|
||||||
name,
|
|
||||||
description,
|
|
||||||
steps_to_reproduce,
|
|
||||||
actual_result,
|
|
||||||
expected_result,
|
|
||||||
acceptance_criteria.as_deref(),
|
|
||||||
depends_on.as_deref(),
|
|
||||||
)?;
|
|
||||||
|
|
||||||
Ok(format!("Created bug: {bug_id}"))
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(super) fn tool_list_bugs(ctx: &AppContext) -> Result<String, String> {
|
|
||||||
let root = ctx.state.get_project_root()?;
|
|
||||||
let bugs = list_bug_files(&root)?;
|
|
||||||
serde_json::to_string_pretty(&json!(
|
|
||||||
bugs.iter()
|
|
||||||
.map(|(id, name)| json!({ "bug_id": id, "name": name }))
|
|
||||||
.collect::<Vec<_>>()
|
|
||||||
))
|
|
||||||
.map_err(|e| format!("Serialization error: {e}"))
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(super) fn tool_close_bug(args: &Value, ctx: &AppContext) -> Result<String, String> {
|
|
||||||
let bug_id = args
|
|
||||||
.get("bug_id")
|
|
||||||
.and_then(|v| v.as_str())
|
|
||||||
.ok_or("Missing required argument: bug_id")?;
|
|
||||||
|
|
||||||
let root = ctx.services.agents.get_project_root(&ctx.state)?;
|
|
||||||
close_bug_to_archive(&root, bug_id)?;
|
|
||||||
ctx.services.agents.remove_agents_for_story(bug_id);
|
|
||||||
|
|
||||||
Ok(format!(
|
|
||||||
"Bug '{bug_id}' closed, moved to bugs/archive/, and committed to master."
|
|
||||||
))
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(super) fn tool_unblock_story(args: &Value, ctx: &AppContext) -> Result<String, String> {
|
|
||||||
let story_id = args
|
|
||||||
.get("story_id")
|
|
||||||
.and_then(|v| v.as_str())
|
|
||||||
.ok_or("Missing required argument: story_id")?;
|
|
||||||
|
|
||||||
let root = ctx.state.get_project_root()?;
|
|
||||||
|
|
||||||
// Extract the numeric prefix (e.g. "42" from "42_story_foo")
|
|
||||||
let story_number = story_id
|
|
||||||
.split('_')
|
|
||||||
.next()
|
|
||||||
.filter(|s| !s.is_empty() && s.chars().all(|c| c.is_ascii_digit()))
|
|
||||||
.ok_or_else(|| format!("Invalid story_id format: '{story_id}'. Expected a numeric prefix (e.g. '42_story_foo')."))?;
|
|
||||||
|
|
||||||
Ok(crate::chat::commands::unblock::unblock_by_number(
|
|
||||||
&root,
|
|
||||||
story_number,
|
|
||||||
))
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(super) async fn tool_delete_story(args: &Value, ctx: &AppContext) -> Result<String, String> {
|
|
||||||
let story_id = args
|
|
||||||
.get("story_id")
|
|
||||||
.and_then(|v| v.as_str())
|
|
||||||
.ok_or("Missing required argument: story_id")?;
|
|
||||||
|
|
||||||
let project_root = ctx.services.agents.get_project_root(&ctx.state)?;
|
|
||||||
let mut failed_steps: Vec<String> = Vec::new();
|
|
||||||
|
|
||||||
// 0. Cancel any pending rate-limit retry timers for this story (bug 514).
|
|
||||||
// Must happen before stopping agents so the tick loop cannot re-spawn
|
|
||||||
// an agent after we tear everything else down.
|
|
||||||
let timer_removed = ctx.timer_store.remove(story_id);
|
|
||||||
if timer_removed {
|
|
||||||
slog_warn!("[delete_story] Cancelled pending timer for '{story_id}'");
|
|
||||||
} else {
|
|
||||||
slog_warn!("[delete_story] No pending timer found for '{story_id}'");
|
|
||||||
}
|
|
||||||
|
|
||||||
// 1. Stop any running agents for this story (best-effort).
|
|
||||||
if let Ok(agents) = ctx.services.agents.list_agents() {
|
|
||||||
for agent in agents.iter().filter(|a| a.story_id == story_id) {
|
|
||||||
match ctx
|
|
||||||
.services
|
|
||||||
.agents
|
|
||||||
.stop_agent(&project_root, story_id, &agent.agent_name)
|
|
||||||
.await
|
|
||||||
{
|
|
||||||
Ok(()) => {
|
|
||||||
slog_warn!(
|
|
||||||
"[delete_story] Stopped agent '{}' for '{story_id}'",
|
|
||||||
agent.agent_name
|
|
||||||
);
|
|
||||||
}
|
|
||||||
Err(e) => {
|
|
||||||
slog_warn!(
|
|
||||||
"[delete_story] Failed to stop agent '{}' for '{story_id}': {e}",
|
|
||||||
agent.agent_name
|
|
||||||
);
|
|
||||||
failed_steps.push(format!("stop_agent({}): {e}", agent.agent_name));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// 2. Remove agent pool entries.
|
|
||||||
let removed_count = ctx.services.agents.remove_agents_for_story(story_id);
|
|
||||||
slog_warn!("[delete_story] Removed {removed_count} agent pool entries for '{story_id}'");
|
|
||||||
|
|
||||||
// 3. Remove worktree (best-effort).
|
|
||||||
if let Ok(config) = crate::config::ProjectConfig::load(&project_root) {
|
|
||||||
match crate::worktree::remove_worktree_by_story_id(&project_root, story_id, &config).await {
|
|
||||||
Ok(()) => slog_warn!("[delete_story] Removed worktree for '{story_id}'"),
|
|
||||||
Err(e) => slog_warn!("[delete_story] Worktree removal for '{story_id}': {e}"),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// 4. Write a CRDT tombstone op so the story is evicted from the in-memory
|
|
||||||
// state machine and the deletion is persisted to crdt_ops (survives
|
|
||||||
// restart). Best-effort: legacy filesystem-only stories may not have a
|
|
||||||
// CRDT entry, so a "not found" error is expected and non-fatal.
|
|
||||||
match crate::crdt_state::evict_item(story_id) {
|
|
||||||
Ok(()) => {
|
|
||||||
slog_warn!(
|
|
||||||
"[delete_story] Evicted '{story_id}' from CRDT (tombstone persisted to crdt_ops)"
|
|
||||||
);
|
|
||||||
}
|
|
||||||
Err(e) => {
|
|
||||||
slog_warn!("[delete_story] CRDT eviction for '{story_id}': {e}");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// 5. Delete from database content store and shadow table.
|
|
||||||
let found_in_db = crate::db::read_content(story_id).is_some()
|
|
||||||
|| crate::pipeline_state::read_typed(story_id)
|
|
||||||
.ok()
|
|
||||||
.flatten()
|
|
||||||
.is_some();
|
|
||||||
crate::db::delete_item(story_id);
|
|
||||||
slog_warn!("[delete_story] Deleted '{story_id}' from content store / shadow table");
|
|
||||||
|
|
||||||
// 6. Remove the filesystem shadow file from work/N_stage/.
|
|
||||||
let sk = project_root.join(".huskies").join("work");
|
|
||||||
let stage_dirs = [
|
|
||||||
"1_backlog",
|
|
||||||
"2_current",
|
|
||||||
"3_qa",
|
|
||||||
"4_merge",
|
|
||||||
"5_done",
|
|
||||||
"6_archived",
|
|
||||||
];
|
|
||||||
let mut deleted_from_fs = false;
|
|
||||||
for stage in &stage_dirs {
|
|
||||||
let path = sk.join(stage).join(format!("{story_id}.md"));
|
|
||||||
if path.exists() {
|
|
||||||
match fs::remove_file(&path) {
|
|
||||||
Ok(()) => {
|
|
||||||
slog_warn!(
|
|
||||||
"[delete_story] Deleted filesystem shadow '{story_id}' from work/{stage}/"
|
|
||||||
);
|
|
||||||
deleted_from_fs = true;
|
|
||||||
}
|
|
||||||
Err(e) => {
|
|
||||||
slog_warn!(
|
|
||||||
"[delete_story] Failed to delete filesystem shadow '{story_id}' from work/{stage}/: {e}"
|
|
||||||
);
|
|
||||||
failed_steps.push(format!("delete_filesystem({stage}): {e}"));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if !found_in_db && !deleted_from_fs && !timer_removed {
|
|
||||||
return Err(format!(
|
|
||||||
"Story '{story_id}' not found in any pipeline stage."
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
if !failed_steps.is_empty() {
|
|
||||||
return Err(format!(
|
|
||||||
"Story '{story_id}' partially deleted. Failed steps: {}.",
|
|
||||||
failed_steps.join("; ")
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(format!("Story '{story_id}' deleted from pipeline."))
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(super) fn tool_create_refactor(args: &Value, ctx: &AppContext) -> Result<String, String> {
|
|
||||||
let name = args
|
|
||||||
.get("name")
|
|
||||||
.and_then(|v| v.as_str())
|
|
||||||
.ok_or("Missing required argument: name")?;
|
|
||||||
let description = args.get("description").and_then(|v| v.as_str());
|
|
||||||
let acceptance_criteria: Option<Vec<String>> = args
|
|
||||||
.get("acceptance_criteria")
|
|
||||||
.and_then(|v| serde_json::from_value(v.clone()).ok());
|
|
||||||
let depends_on: Option<Vec<u32>> = args
|
|
||||||
.get("depends_on")
|
|
||||||
.and_then(|v| serde_json::from_value(v.clone()).ok());
|
|
||||||
|
|
||||||
let root = ctx.state.get_project_root()?;
|
|
||||||
let refactor_id = create_refactor_file(
|
|
||||||
&root,
|
|
||||||
name,
|
|
||||||
description,
|
|
||||||
acceptance_criteria.as_deref(),
|
|
||||||
depends_on.as_deref(),
|
|
||||||
)?;
|
|
||||||
|
|
||||||
Ok(format!("Created refactor: {refactor_id}"))
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(super) fn tool_list_refactors(ctx: &AppContext) -> Result<String, String> {
|
|
||||||
let root = ctx.state.get_project_root()?;
|
|
||||||
let refactors = list_refactor_files(&root)?;
|
|
||||||
serde_json::to_string_pretty(&json!(
|
|
||||||
refactors
|
|
||||||
.iter()
|
|
||||||
.map(|(id, name)| json!({ "refactor_id": id, "name": name }))
|
|
||||||
.collect::<Vec<_>>()
|
|
||||||
))
|
|
||||||
.map_err(|e| format!("Serialization error: {e}"))
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod tests;
|
|
||||||
File diff suppressed because it is too large
Load Diff
@@ -1,472 +0,0 @@
|
|||||||
use super::*;
|
|
||||||
use crate::http::test_helpers::test_ctx;
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn json_rpc_response_serializes_success() {
|
|
||||||
let resp = JsonRpcResponse::success(Some(json!(1)), json!({"ok": true}));
|
|
||||||
let s = serde_json::to_string(&resp).unwrap();
|
|
||||||
assert!(s.contains("\"result\""));
|
|
||||||
assert!(!s.contains("\"error\""));
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn json_rpc_response_serializes_error() {
|
|
||||||
let resp = JsonRpcResponse::error(Some(json!(1)), -32600, "bad".into());
|
|
||||||
let s = serde_json::to_string(&resp).unwrap();
|
|
||||||
assert!(s.contains("\"error\""));
|
|
||||||
assert!(!s.contains("\"result\""));
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn initialize_returns_capabilities() {
|
|
||||||
let resp = handle_initialize(
|
|
||||||
Some(json!(1)),
|
|
||||||
&json!({"protocolVersion": "2025-03-26", "capabilities": {}, "clientInfo": {"name": "test", "version": "1.0"}}),
|
|
||||||
);
|
|
||||||
let result = resp.result.unwrap();
|
|
||||||
assert_eq!(result["protocolVersion"], "2025-03-26");
|
|
||||||
assert!(result["capabilities"]["tools"].is_object());
|
|
||||||
assert_eq!(result["serverInfo"]["name"], "huskies");
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn tools_list_returns_all_tools() {
|
|
||||||
let resp = handle_tools_list(Some(json!(2)));
|
|
||||||
let result = resp.result.unwrap();
|
|
||||||
let tools = result["tools"].as_array().unwrap();
|
|
||||||
let names: Vec<&str> = tools.iter().map(|t| t["name"].as_str().unwrap()).collect();
|
|
||||||
assert!(names.contains(&"create_story"));
|
|
||||||
assert!(names.contains(&"validate_stories"));
|
|
||||||
assert!(names.contains(&"list_upcoming"));
|
|
||||||
assert!(names.contains(&"get_story_todos"));
|
|
||||||
assert!(names.contains(&"record_tests"));
|
|
||||||
assert!(names.contains(&"ensure_acceptance"));
|
|
||||||
assert!(names.contains(&"start_agent"));
|
|
||||||
assert!(names.contains(&"stop_agent"));
|
|
||||||
assert!(names.contains(&"list_agents"));
|
|
||||||
assert!(names.contains(&"get_agent_config"));
|
|
||||||
assert!(names.contains(&"reload_agent_config"));
|
|
||||||
assert!(names.contains(&"get_agent_output"));
|
|
||||||
assert!(names.contains(&"wait_for_agent"));
|
|
||||||
assert!(names.contains(&"get_agent_remaining_turns_and_budget"));
|
|
||||||
assert!(names.contains(&"create_worktree"));
|
|
||||||
assert!(names.contains(&"list_worktrees"));
|
|
||||||
assert!(names.contains(&"remove_worktree"));
|
|
||||||
assert!(names.contains(&"get_editor_command"));
|
|
||||||
assert!(!names.contains(&"report_completion"));
|
|
||||||
assert!(names.contains(&"accept_story"));
|
|
||||||
assert!(names.contains(&"check_criterion"));
|
|
||||||
assert!(names.contains(&"add_criterion"));
|
|
||||||
assert!(names.contains(&"update_story"));
|
|
||||||
assert!(names.contains(&"create_spike"));
|
|
||||||
assert!(names.contains(&"create_bug"));
|
|
||||||
assert!(names.contains(&"list_bugs"));
|
|
||||||
assert!(names.contains(&"close_bug"));
|
|
||||||
assert!(names.contains(&"create_refactor"));
|
|
||||||
assert!(names.contains(&"list_refactors"));
|
|
||||||
assert!(names.contains(&"merge_agent_work"));
|
|
||||||
assert!(names.contains(&"get_merge_status"));
|
|
||||||
assert!(names.contains(&"move_story_to_merge"));
|
|
||||||
assert!(names.contains(&"report_merge_failure"));
|
|
||||||
assert!(names.contains(&"request_qa"));
|
|
||||||
assert!(names.contains(&"approve_qa"));
|
|
||||||
assert!(names.contains(&"reject_qa"));
|
|
||||||
assert!(names.contains(&"launch_qa_app"));
|
|
||||||
assert!(names.contains(&"get_server_logs"));
|
|
||||||
assert!(names.contains(&"prompt_permission"));
|
|
||||||
assert!(names.contains(&"get_pipeline_status"));
|
|
||||||
assert!(names.contains(&"rebuild_and_restart"));
|
|
||||||
assert!(names.contains(&"get_token_usage"));
|
|
||||||
assert!(names.contains(&"move_story"));
|
|
||||||
assert!(names.contains(&"unblock_story"));
|
|
||||||
assert!(names.contains(&"delete_story"));
|
|
||||||
assert!(names.contains(&"run_command"));
|
|
||||||
assert!(names.contains(&"run_tests"));
|
|
||||||
assert!(names.contains(&"get_test_result"));
|
|
||||||
assert!(names.contains(&"run_build"));
|
|
||||||
assert!(names.contains(&"run_lint"));
|
|
||||||
assert!(names.contains(&"git_status"));
|
|
||||||
assert!(names.contains(&"git_diff"));
|
|
||||||
assert!(names.contains(&"git_add"));
|
|
||||||
assert!(names.contains(&"git_commit"));
|
|
||||||
assert!(names.contains(&"git_log"));
|
|
||||||
assert!(names.contains(&"status"));
|
|
||||||
assert!(names.contains(&"loc_file"));
|
|
||||||
assert!(names.contains(&"dump_crdt"));
|
|
||||||
assert!(names.contains(&"get_version"));
|
|
||||||
assert!(names.contains(&"remove_criterion"));
|
|
||||||
assert_eq!(tools.len(), 66);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn tools_list_schemas_have_required_fields() {
|
|
||||||
let resp = handle_tools_list(Some(json!(1)));
|
|
||||||
let tools = resp.result.unwrap()["tools"].as_array().unwrap().clone();
|
|
||||||
for tool in &tools {
|
|
||||||
assert!(tool["name"].is_string(), "tool missing name");
|
|
||||||
assert!(tool["description"].is_string(), "tool missing description");
|
|
||||||
assert!(tool["inputSchema"].is_object(), "tool missing inputSchema");
|
|
||||||
assert_eq!(tool["inputSchema"]["type"], "object");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn handle_tools_call_unknown_tool() {
|
|
||||||
let tmp = tempfile::tempdir().unwrap();
|
|
||||||
let ctx = test_ctx(tmp.path());
|
|
||||||
let rt = tokio::runtime::Runtime::new().unwrap();
|
|
||||||
let resp = rt.block_on(handle_tools_call(
|
|
||||||
Some(json!(1)),
|
|
||||||
&json!({"name": "bogus_tool", "arguments": {}}),
|
|
||||||
&ctx,
|
|
||||||
));
|
|
||||||
let result = resp.result.unwrap();
|
|
||||||
assert_eq!(result["isError"], true);
|
|
||||||
assert!(
|
|
||||||
result["content"][0]["text"]
|
|
||||||
.as_str()
|
|
||||||
.unwrap()
|
|
||||||
.contains("Unknown tool")
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn to_sse_response_wraps_in_data_prefix() {
|
|
||||||
let resp = JsonRpcResponse::success(Some(json!(1)), json!({"ok": true}));
|
|
||||||
let http_resp = to_sse_response(resp);
|
|
||||||
assert_eq!(
|
|
||||||
http_resp.headers().get("content-type").unwrap(),
|
|
||||||
"text/event-stream"
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn wants_sse_detects_accept_header() {
|
|
||||||
// Can't easily construct a Request in tests without TestClient,
|
|
||||||
// so test the logic indirectly via to_sse_response format
|
|
||||||
let resp = JsonRpcResponse::success(Some(json!(1)), json!("ok"));
|
|
||||||
let json_resp = to_json_response(resp);
|
|
||||||
assert_eq!(
|
|
||||||
json_resp.headers().get("content-type").unwrap(),
|
|
||||||
"application/json"
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn json_rpc_error_response_builds_json_response() {
|
|
||||||
let resp = json_rpc_error_response(Some(json!(42)), -32600, "test error".into());
|
|
||||||
assert_eq!(resp.status(), poem::http::StatusCode::OK);
|
|
||||||
assert_eq!(
|
|
||||||
resp.headers().get("content-type").unwrap(),
|
|
||||||
"application/json"
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
// ── HTTP handler tests (TestClient) ───────────────────────────
|
|
||||||
|
|
||||||
fn test_mcp_app(ctx: std::sync::Arc<AppContext>) -> impl poem::Endpoint {
|
|
||||||
use poem::EndpointExt;
|
|
||||||
poem::Route::new()
|
|
||||||
.at("/mcp", poem::post(mcp_post_handler).get(mcp_get_handler))
|
|
||||||
.data(ctx)
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn read_body_json(resp: poem::test::TestResponse) -> Value {
|
|
||||||
let body = resp.0.into_body().into_string().await.unwrap();
|
|
||||||
serde_json::from_str(&body).unwrap()
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn post_json_mcp<E: poem::Endpoint>(cli: &poem::test::TestClient<E>, payload: &str) -> Value {
|
|
||||||
let resp = cli
|
|
||||||
.post("/mcp")
|
|
||||||
.header("content-type", "application/json")
|
|
||||||
.body(payload.to_string())
|
|
||||||
.send()
|
|
||||||
.await;
|
|
||||||
read_body_json(resp).await
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
async fn mcp_get_handler_returns_405() {
|
|
||||||
let tmp = tempfile::tempdir().unwrap();
|
|
||||||
let ctx = std::sync::Arc::new(test_ctx(tmp.path()));
|
|
||||||
let cli = poem::test::TestClient::new(test_mcp_app(ctx));
|
|
||||||
let resp = cli.get("/mcp").send().await;
|
|
||||||
assert_eq!(resp.0.status(), poem::http::StatusCode::METHOD_NOT_ALLOWED);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
async fn mcp_post_invalid_content_type_returns_error() {
|
|
||||||
let tmp = tempfile::tempdir().unwrap();
|
|
||||||
let ctx = std::sync::Arc::new(test_ctx(tmp.path()));
|
|
||||||
let cli = poem::test::TestClient::new(test_mcp_app(ctx));
|
|
||||||
let resp = cli
|
|
||||||
.post("/mcp")
|
|
||||||
.header("content-type", "text/plain")
|
|
||||||
.body("{}")
|
|
||||||
.send()
|
|
||||||
.await;
|
|
||||||
let body = read_body_json(resp).await;
|
|
||||||
assert!(body.get("error").is_some(), "expected error field: {body}");
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
async fn mcp_post_invalid_json_returns_parse_error() {
|
|
||||||
let tmp = tempfile::tempdir().unwrap();
|
|
||||||
let ctx = std::sync::Arc::new(test_ctx(tmp.path()));
|
|
||||||
let cli = poem::test::TestClient::new(test_mcp_app(ctx));
|
|
||||||
let resp = cli
|
|
||||||
.post("/mcp")
|
|
||||||
.header("content-type", "application/json")
|
|
||||||
.body("not-valid-json")
|
|
||||||
.send()
|
|
||||||
.await;
|
|
||||||
let body = read_body_json(resp).await;
|
|
||||||
assert!(body.get("error").is_some(), "expected error field: {body}");
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
async fn mcp_post_wrong_jsonrpc_version_returns_error() {
|
|
||||||
let tmp = tempfile::tempdir().unwrap();
|
|
||||||
let ctx = std::sync::Arc::new(test_ctx(tmp.path()));
|
|
||||||
let cli = poem::test::TestClient::new(test_mcp_app(ctx));
|
|
||||||
let body = post_json_mcp(
|
|
||||||
&cli,
|
|
||||||
r#"{"jsonrpc":"1.0","id":1,"method":"initialize","params":{}}"#,
|
|
||||||
)
|
|
||||||
.await;
|
|
||||||
assert!(
|
|
||||||
body["error"]["message"]
|
|
||||||
.as_str()
|
|
||||||
.unwrap_or("")
|
|
||||||
.contains("version"),
|
|
||||||
"expected version error: {body}"
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
async fn mcp_post_notification_with_null_id_returns_accepted() {
|
|
||||||
let tmp = tempfile::tempdir().unwrap();
|
|
||||||
let ctx = std::sync::Arc::new(test_ctx(tmp.path()));
|
|
||||||
let cli = poem::test::TestClient::new(test_mcp_app(ctx));
|
|
||||||
let resp = cli
|
|
||||||
.post("/mcp")
|
|
||||||
.header("content-type", "application/json")
|
|
||||||
.body(r#"{"jsonrpc":"2.0","method":"notifications/initialized","params":{}}"#)
|
|
||||||
.send()
|
|
||||||
.await;
|
|
||||||
assert_eq!(resp.0.status(), poem::http::StatusCode::ACCEPTED);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
async fn mcp_post_notification_with_explicit_null_id_returns_accepted() {
|
|
||||||
let tmp = tempfile::tempdir().unwrap();
|
|
||||||
let ctx = std::sync::Arc::new(test_ctx(tmp.path()));
|
|
||||||
let cli = poem::test::TestClient::new(test_mcp_app(ctx));
|
|
||||||
let resp = cli
|
|
||||||
.post("/mcp")
|
|
||||||
.header("content-type", "application/json")
|
|
||||||
.body(r#"{"jsonrpc":"2.0","id":null,"method":"notifications/initialized","params":{}}"#)
|
|
||||||
.send()
|
|
||||||
.await;
|
|
||||||
assert_eq!(resp.0.status(), poem::http::StatusCode::ACCEPTED);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
async fn mcp_post_missing_id_non_notification_returns_error() {
|
|
||||||
let tmp = tempfile::tempdir().unwrap();
|
|
||||||
let ctx = std::sync::Arc::new(test_ctx(tmp.path()));
|
|
||||||
let cli = poem::test::TestClient::new(test_mcp_app(ctx));
|
|
||||||
let body = post_json_mcp(
|
|
||||||
&cli,
|
|
||||||
r#"{"jsonrpc":"2.0","method":"initialize","params":{}}"#,
|
|
||||||
)
|
|
||||||
.await;
|
|
||||||
assert!(body.get("error").is_some(), "expected error: {body}");
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
async fn mcp_post_unknown_method_returns_error() {
|
|
||||||
let tmp = tempfile::tempdir().unwrap();
|
|
||||||
let ctx = std::sync::Arc::new(test_ctx(tmp.path()));
|
|
||||||
let cli = poem::test::TestClient::new(test_mcp_app(ctx));
|
|
||||||
let body = post_json_mcp(
|
|
||||||
&cli,
|
|
||||||
r#"{"jsonrpc":"2.0","id":1,"method":"bogus/method","params":{}}"#,
|
|
||||||
)
|
|
||||||
.await;
|
|
||||||
assert!(
|
|
||||||
body["error"]["message"]
|
|
||||||
.as_str()
|
|
||||||
.unwrap_or("")
|
|
||||||
.contains("Unknown method"),
|
|
||||||
"expected unknown method error: {body}"
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
async fn mcp_post_initialize_returns_capabilities() {
|
|
||||||
let tmp = tempfile::tempdir().unwrap();
|
|
||||||
let ctx = std::sync::Arc::new(test_ctx(tmp.path()));
|
|
||||||
let cli = poem::test::TestClient::new(test_mcp_app(ctx));
|
|
||||||
let body = post_json_mcp(
|
|
||||||
&cli,
|
|
||||||
r#"{"jsonrpc":"2.0","id":1,"method":"initialize","params":{"protocolVersion":"2025-03-26","capabilities":{},"clientInfo":{"name":"test","version":"1.0"}}}"#,
|
|
||||||
)
|
|
||||||
.await;
|
|
||||||
assert_eq!(body["result"]["protocolVersion"], "2025-03-26");
|
|
||||||
assert_eq!(body["result"]["serverInfo"]["name"], "huskies");
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
async fn mcp_post_tools_list_returns_tools() {
|
|
||||||
let tmp = tempfile::tempdir().unwrap();
|
|
||||||
let ctx = std::sync::Arc::new(test_ctx(tmp.path()));
|
|
||||||
let cli = poem::test::TestClient::new(test_mcp_app(ctx));
|
|
||||||
let body = post_json_mcp(
|
|
||||||
&cli,
|
|
||||||
r#"{"jsonrpc":"2.0","id":1,"method":"tools/list","params":{}}"#,
|
|
||||||
)
|
|
||||||
.await;
|
|
||||||
assert!(body["result"]["tools"].is_array());
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
async fn mcp_post_sse_returns_event_stream_content_type() {
|
|
||||||
let tmp = tempfile::tempdir().unwrap();
|
|
||||||
let ctx = std::sync::Arc::new(test_ctx(tmp.path()));
|
|
||||||
let cli = poem::test::TestClient::new(test_mcp_app(ctx));
|
|
||||||
let resp = cli
|
|
||||||
.post("/mcp")
|
|
||||||
.header("content-type", "application/json")
|
|
||||||
.header("accept", "text/event-stream")
|
|
||||||
.body(r#"{"jsonrpc":"2.0","id":1,"method":"tools/list","params":{}}"#)
|
|
||||||
.send()
|
|
||||||
.await;
|
|
||||||
assert_eq!(
|
|
||||||
resp.0.headers().get("content-type").unwrap(),
|
|
||||||
"text/event-stream"
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
async fn mcp_post_sse_get_agent_output_missing_story_id() {
|
|
||||||
let tmp = tempfile::tempdir().unwrap();
|
|
||||||
let ctx = std::sync::Arc::new(test_ctx(tmp.path()));
|
|
||||||
let cli = poem::test::TestClient::new(test_mcp_app(ctx));
|
|
||||||
let resp = cli
|
|
||||||
.post("/mcp")
|
|
||||||
.header("content-type", "application/json")
|
|
||||||
.header("accept", "text/event-stream")
|
|
||||||
.body(r#"{"jsonrpc":"2.0","id":1,"method":"tools/call","params":{"name":"get_agent_output","arguments":{}}}"#)
|
|
||||||
.send()
|
|
||||||
.await;
|
|
||||||
assert_eq!(
|
|
||||||
resp.0.headers().get("content-type").unwrap(),
|
|
||||||
"text/event-stream",
|
|
||||||
"expected SSE content-type"
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
async fn mcp_post_sse_get_agent_output_without_agent_name_returns_disk_content() {
|
|
||||||
// Without agent_name the SSE live-streaming intercept is skipped and
|
|
||||||
// the disk-based handler runs. The transport still wraps the result in
|
|
||||||
// SSE format (data: …\n\n) because the client sent Accept: text/event-stream,
|
|
||||||
// but the content should be a valid JSON-RPC result, not a subscribe error.
|
|
||||||
let tmp = tempfile::tempdir().unwrap();
|
|
||||||
let ctx = std::sync::Arc::new(test_ctx(tmp.path()));
|
|
||||||
let cli = poem::test::TestClient::new(test_mcp_app(ctx));
|
|
||||||
let resp = cli
|
|
||||||
.post("/mcp")
|
|
||||||
.header("content-type", "application/json")
|
|
||||||
.header("accept", "text/event-stream")
|
|
||||||
.body(r#"{"jsonrpc":"2.0","id":1,"method":"tools/call","params":{"name":"get_agent_output","arguments":{"story_id":"1_test"}}}"#)
|
|
||||||
.send()
|
|
||||||
.await;
|
|
||||||
let body = resp.0.into_body().into_string().await.unwrap();
|
|
||||||
// Body is SSE-wrapped: "data: {…}\n\n" — strip the prefix and verify it's
|
|
||||||
// a valid JSON-RPC result (not an error about missing agent_name).
|
|
||||||
let json_part = body
|
|
||||||
.trim_start_matches("data: ")
|
|
||||||
.trim_end_matches("\n\n")
|
|
||||||
.trim();
|
|
||||||
let parsed: serde_json::Value = serde_json::from_str(json_part)
|
|
||||||
.unwrap_or_else(|_| panic!("expected JSON-RPC in SSE body, got: {body}"));
|
|
||||||
assert!(
|
|
||||||
parsed.get("result").is_some(),
|
|
||||||
"expected JSON-RPC result (disk-based handler ran): {parsed}"
|
|
||||||
);
|
|
||||||
// Must NOT be an error about missing agent_name (agent_name is now optional)
|
|
||||||
assert!(
|
|
||||||
parsed.get("error").is_none(),
|
|
||||||
"unexpected error when agent_name omitted: {parsed}"
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
async fn mcp_post_sse_get_agent_output_no_agent_no_logs_returns_not_found() {
|
|
||||||
// Agent not in pool and no log files → SSE success with "No log files found" message.
|
|
||||||
let tmp = tempfile::tempdir().unwrap();
|
|
||||||
let ctx = std::sync::Arc::new(test_ctx(tmp.path()));
|
|
||||||
let cli = poem::test::TestClient::new(test_mcp_app(ctx));
|
|
||||||
let resp = cli
|
|
||||||
.post("/mcp")
|
|
||||||
.header("content-type", "application/json")
|
|
||||||
.header("accept", "text/event-stream")
|
|
||||||
.body(r#"{"jsonrpc":"2.0","id":1,"method":"tools/call","params":{"name":"get_agent_output","arguments":{"story_id":"99_nope","agent_name":"bot"}}}"#)
|
|
||||||
.send()
|
|
||||||
.await;
|
|
||||||
assert_eq!(
|
|
||||||
resp.0.headers().get("content-type").unwrap(),
|
|
||||||
"text/event-stream"
|
|
||||||
);
|
|
||||||
let body = resp.0.into_body().into_string().await.unwrap();
|
|
||||||
assert!(body.contains("data:"), "expected SSE data prefix: {body}");
|
|
||||||
// Must NOT return isError — should be a success result with "No log files found"
|
|
||||||
assert!(
|
|
||||||
!body.contains("isError"),
|
|
||||||
"expected no isError for missing agent: {body}"
|
|
||||||
);
|
|
||||||
assert!(
|
|
||||||
body.contains("No log files found"),
|
|
||||||
"expected not-found message: {body}"
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
async fn mcp_post_sse_get_agent_output_exited_agent_reads_disk_logs() {
|
|
||||||
use crate::agent_log::AgentLogWriter;
|
|
||||||
use crate::agents::AgentEvent;
|
|
||||||
// Agent has exited (not in pool) but wrote logs to disk.
|
|
||||||
let tmp = tempfile::tempdir().unwrap();
|
|
||||||
let root = tmp.path();
|
|
||||||
let mut writer = AgentLogWriter::new(root, "42_story_foo", "coder-1", "sess-sse").unwrap();
|
|
||||||
writer
|
|
||||||
.write_event(&AgentEvent::Output {
|
|
||||||
story_id: "42_story_foo".to_string(),
|
|
||||||
agent_name: "coder-1".to_string(),
|
|
||||||
text: "disk output".to_string(),
|
|
||||||
})
|
|
||||||
.unwrap();
|
|
||||||
drop(writer);
|
|
||||||
|
|
||||||
let ctx = std::sync::Arc::new(test_ctx(root));
|
|
||||||
let cli = poem::test::TestClient::new(test_mcp_app(ctx));
|
|
||||||
let resp = cli
|
|
||||||
.post("/mcp")
|
|
||||||
.header("content-type", "application/json")
|
|
||||||
.header("accept", "text/event-stream")
|
|
||||||
.body(r#"{"jsonrpc":"2.0","id":1,"method":"tools/call","params":{"name":"get_agent_output","arguments":{"story_id":"42_story_foo","agent_name":"coder-1"}}}"#)
|
|
||||||
.send()
|
|
||||||
.await;
|
|
||||||
let body = resp.0.into_body().into_string().await.unwrap();
|
|
||||||
assert!(
|
|
||||||
body.contains("disk output"),
|
|
||||||
"expected disk log content in SSE response: {body}"
|
|
||||||
);
|
|
||||||
assert!(
|
|
||||||
!body.contains("isError"),
|
|
||||||
"expected no error for exited agent with logs: {body}"
|
|
||||||
);
|
|
||||||
}
|
|
||||||
File diff suppressed because it is too large
Load Diff
@@ -1,702 +0,0 @@
|
|||||||
//! Project scaffolding — creates the `.huskies/` directory structure and default files.
|
|
||||||
use std::fs;
|
|
||||||
use std::path::Path;
|
|
||||||
|
|
||||||
const STORY_KIT_README: &str = include_str!("../../../../../.huskies/README.md");
|
|
||||||
|
|
||||||
const BOT_TOML_MATRIX_EXAMPLE: &str =
|
|
||||||
include_str!("../../../../../.huskies/bot.toml.matrix.example");
|
|
||||||
const BOT_TOML_WHATSAPP_META_EXAMPLE: &str =
|
|
||||||
include_str!("../../../../../.huskies/bot.toml.whatsapp-meta.example");
|
|
||||||
const BOT_TOML_WHATSAPP_TWILIO_EXAMPLE: &str =
|
|
||||||
include_str!("../../../../../.huskies/bot.toml.whatsapp-twilio.example");
|
|
||||||
const BOT_TOML_SLACK_EXAMPLE: &str = include_str!("../../../../../.huskies/bot.toml.slack.example");
|
|
||||||
|
|
||||||
const STORY_KIT_CONTEXT: &str = "<!-- huskies:scaffold-template -->\n\
|
|
||||||
# Project Context\n\
|
|
||||||
\n\
|
|
||||||
## High-Level Goal\n\
|
|
||||||
\n\
|
|
||||||
TODO: Describe the high-level goal of this project.\n\
|
|
||||||
\n\
|
|
||||||
## Core Features\n\
|
|
||||||
\n\
|
|
||||||
TODO: List the core features of this project.\n\
|
|
||||||
\n\
|
|
||||||
## Domain Definition\n\
|
|
||||||
\n\
|
|
||||||
TODO: Define the key domain concepts and entities.\n\
|
|
||||||
\n\
|
|
||||||
## Glossary\n\
|
|
||||||
\n\
|
|
||||||
TODO: Define abbreviations and technical terms.\n";
|
|
||||||
|
|
||||||
const STORY_KIT_STACK: &str = "<!-- huskies:scaffold-template -->\n\
|
|
||||||
# Tech Stack & Constraints\n\
|
|
||||||
\n\
|
|
||||||
## Core Stack\n\
|
|
||||||
\n\
|
|
||||||
TODO: Describe the language, frameworks, and runtimes.\n\
|
|
||||||
\n\
|
|
||||||
## Coding Standards\n\
|
|
||||||
\n\
|
|
||||||
TODO: Describe code style, linting rules, and error handling conventions.\n\
|
|
||||||
\n\
|
|
||||||
## Quality Gates\n\
|
|
||||||
\n\
|
|
||||||
TODO: List the commands that must pass before merging (e.g., cargo test, npm run build).\n\
|
|
||||||
\n\
|
|
||||||
## Libraries\n\
|
|
||||||
\n\
|
|
||||||
TODO: List approved libraries and their purpose.\n";
|
|
||||||
|
|
||||||
const STORY_KIT_SCRIPT_TEST: &str = "#!/usr/bin/env bash\nset -euo pipefail\n\n# Add your project's test commands here.\n# Story Kit agents invoke this script as the canonical test runner.\n# Exit 0 on success, non-zero on failure.\necho \"No tests configured\"\n";
|
|
||||||
|
|
||||||
const STORY_KIT_CLAUDE_MD: &str = "<!-- huskies:scaffold-template -->\n\
|
|
||||||
Never chain shell commands with `&&`, `||`, or `;` in a single Bash call. \
|
|
||||||
The permission system validates the entire command string, and chained commands \
|
|
||||||
won't match allow rules like `Bash(git *)`. Use separate Bash calls instead — \
|
|
||||||
parallel calls work fine.\n\
|
|
||||||
\n\
|
|
||||||
Read .huskies/README.md to see our dev process.\n\
|
|
||||||
\n\
|
|
||||||
IMPORTANT: On your first conversation, call `wizard_status` to check if \
|
|
||||||
project setup is complete. If not, read .huskies/README.md for the full \
|
|
||||||
setup wizard instructions and guide the user through it conversationally.\n";
|
|
||||||
|
|
||||||
const STORY_KIT_CLAUDE_SETTINGS: &str = r#"{
|
|
||||||
"permissions": {
|
|
||||||
"allow": [
|
|
||||||
"Bash(cargo build:*)",
|
|
||||||
"Bash(cargo check:*)",
|
|
||||||
"Bash(git *)",
|
|
||||||
"Bash(ls *)",
|
|
||||||
"Bash(mkdir *)",
|
|
||||||
"Bash(mv *)",
|
|
||||||
"Bash(rm *)",
|
|
||||||
"Bash(touch *)",
|
|
||||||
"Bash(echo:*)",
|
|
||||||
"Bash(pwd *)",
|
|
||||||
"Bash(grep:*)",
|
|
||||||
"Bash(find *)",
|
|
||||||
"Bash(head *)",
|
|
||||||
"Bash(tail *)",
|
|
||||||
"Bash(wc *)",
|
|
||||||
"Bash(cat *)",
|
|
||||||
"Read",
|
|
||||||
"Edit",
|
|
||||||
"Write",
|
|
||||||
"Glob",
|
|
||||||
"Grep",
|
|
||||||
"mcp__huskies__*"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"enabledMcpjsonServers": [
|
|
||||||
"huskies"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
"#;
|
|
||||||
|
|
||||||
const DEFAULT_PROJECT_SETTINGS_TOML: &str = r#"# Project-wide default QA mode: "server", "agent", or "human".
|
|
||||||
# Per-story `qa` front matter overrides this setting.
|
|
||||||
default_qa = "server"
|
|
||||||
|
|
||||||
# Maximum number of retries per story per pipeline stage before marking as blocked.
|
|
||||||
# Set to 0 to disable retry limits.
|
|
||||||
max_retries = 2
|
|
||||||
|
|
||||||
# Default model for coder-stage agents (e.g. "sonnet", "opus").
|
|
||||||
# When set, only coder agents whose model matches this value are considered for
|
|
||||||
# auto-assignment, so opus agents are only used when explicitly requested via
|
|
||||||
# story front matter `agent:` field.
|
|
||||||
# default_coder_model = "sonnet"
|
|
||||||
|
|
||||||
# Maximum number of concurrent coder-stage agents.
|
|
||||||
# Stories wait in 2_current/ until a slot frees up.
|
|
||||||
# max_coders = 3
|
|
||||||
|
|
||||||
# Override the base branch for worktree creation and merge operations.
|
|
||||||
# When not set, the system auto-detects the base branch from the current HEAD.
|
|
||||||
# base_branch = "main"
|
|
||||||
|
|
||||||
# Suppress soft rate-limit warning notifications in chat.
|
|
||||||
# Hard blocks and story-blocked notifications are always sent.
|
|
||||||
# rate_limit_notifications = true
|
|
||||||
|
|
||||||
# IANA timezone for timer scheduling (e.g. "Europe/London", "America/New_York").
|
|
||||||
# Timer HH:MM inputs are interpreted in this timezone.
|
|
||||||
# timezone = "America/New_York"
|
|
||||||
"#;
|
|
||||||
|
|
||||||
const DEFAULT_AGENTS_TOML: &str = r#"[[agent]]
|
|
||||||
name = "coder-1"
|
|
||||||
stage = "coder"
|
|
||||||
role = "Full-stack engineer. Implements features across all components."
|
|
||||||
model = "sonnet"
|
|
||||||
max_turns = 50
|
|
||||||
max_budget_usd = 5.00
|
|
||||||
prompt = "You are working in a git worktree on story {{story_id}}. Read CLAUDE.md first, then .huskies/README.md to understand the dev process. Follow the workflow through implementation and verification. The worktree and feature branch already exist - do not create them. Check .mcp.json for MCP tools. Do NOT accept the story or merge - commit your work and stop.\n\nIMPORTANT: Commit all your work before your process exits. The server will automatically run acceptance gates when your process exits.\n\nIf `script/test` still contains the generic 'No tests configured' stub, update it to run the project's actual test suite before starting implementation."
|
|
||||||
system_prompt = "You are a full-stack engineer working autonomously in a git worktree. Follow the Story-Driven Test Workflow strictly. Commit all your work before finishing. Do not accept stories, move them to archived, or merge to master."
|
|
||||||
|
|
||||||
[[agent]]
|
|
||||||
name = "qa"
|
|
||||||
stage = "qa"
|
|
||||||
role = "Reviews coder work: runs quality gates, generates testing plans, and reports findings."
|
|
||||||
model = "sonnet"
|
|
||||||
max_turns = 40
|
|
||||||
max_budget_usd = 4.00
|
|
||||||
prompt = "You are the QA agent for story {{story_id}}. Review the coder's work and produce a structured QA report. Run quality gates (linting, tests), attempt a build, and generate a manual testing plan. Do NOT modify any code."
|
|
||||||
system_prompt = "You are a QA agent. Your job is read-only: review code quality, run tests, and produce a structured QA report. Do not modify code."
|
|
||||||
|
|
||||||
[[agent]]
|
|
||||||
name = "mergemaster"
|
|
||||||
stage = "mergemaster"
|
|
||||||
role = "Merges completed work into master, runs quality gates, and archives stories."
|
|
||||||
model = "sonnet"
|
|
||||||
max_turns = 30
|
|
||||||
max_budget_usd = 5.00
|
|
||||||
prompt = "You are the mergemaster agent for story {{story_id}}. Call merge_agent_work(story_id='{{story_id}}') to start the merge pipeline. Then poll get_merge_status(story_id='{{story_id}}') every 15 seconds until the status is 'completed' or 'failed'. Report the final result. If the merge fails, call report_merge_failure."
|
|
||||||
system_prompt = "You are the mergemaster agent. Call merge_agent_work to start the merge, then poll get_merge_status every 15 seconds until done. Never manually move story files. Call report_merge_failure when merges fail."
|
|
||||||
"#;
|
|
||||||
|
|
||||||
/// Detect the tech stack from the project root and return TOML `[[component]]` entries.
|
|
||||||
///
|
|
||||||
/// Inspects well-known marker files at the project root to identify which
|
|
||||||
/// tech stacks are present, then emits one `[[component]]` entry per detected
|
|
||||||
/// stack with sensible default `setup` commands. If no markers are found, a
|
|
||||||
/// single fallback `app` component with an empty `setup` list is returned so
|
|
||||||
/// that the pipeline never breaks on an unknown stack.
|
|
||||||
pub fn detect_components_toml(root: &Path) -> String {
|
|
||||||
let mut sections = Vec::new();
|
|
||||||
|
|
||||||
if root.join("Cargo.toml").exists() {
|
|
||||||
sections.push(
|
|
||||||
"[[component]]\nname = \"server\"\npath = \".\"\nsetup = [\"cargo check\"]\n"
|
|
||||||
.to_string(),
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
if root.join("package.json").exists() {
|
|
||||||
let setup_cmd = if root.join("pnpm-lock.yaml").exists() {
|
|
||||||
"pnpm install"
|
|
||||||
} else {
|
|
||||||
"npm install"
|
|
||||||
};
|
|
||||||
sections.push(format!(
|
|
||||||
"[[component]]\nname = \"frontend\"\npath = \".\"\nsetup = [\"{setup_cmd}\"]\n"
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
if root.join("pyproject.toml").exists() || root.join("requirements.txt").exists() {
|
|
||||||
sections.push(
|
|
||||||
"[[component]]\nname = \"python\"\npath = \".\"\nsetup = [\"pip install -r requirements.txt\"]\n"
|
|
||||||
.to_string(),
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
if root.join("go.mod").exists() {
|
|
||||||
sections.push(
|
|
||||||
"[[component]]\nname = \"go\"\npath = \".\"\nsetup = [\"go build ./...\"]\n"
|
|
||||||
.to_string(),
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
if root.join("Gemfile").exists() {
|
|
||||||
sections.push(
|
|
||||||
"[[component]]\nname = \"ruby\"\npath = \".\"\nsetup = [\"bundle install\"]\n"
|
|
||||||
.to_string(),
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
if sections.is_empty() {
|
|
||||||
// No tech stack markers detected — emit a single generic component
|
|
||||||
// with an empty setup list. The ONBOARDING_PROMPT instructs the chat
|
|
||||||
// agent to inspect the project and replace this with real definitions.
|
|
||||||
sections.push("[[component]]\nname = \"app\"\npath = \".\"\nsetup = []\n".to_string());
|
|
||||||
}
|
|
||||||
|
|
||||||
sections.join("\n")
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Detect the appropriate Node.js test command for a directory containing `package.json`.
|
|
||||||
///
|
|
||||||
/// Reads the `package.json` content to identify known test runners (vitest, jest).
|
|
||||||
/// Falls back to `npm test` or `pnpm test` based on which lock file is present.
|
|
||||||
fn detect_node_test_cmd(pkg_dir: &Path) -> String {
|
|
||||||
let has_pnpm = pkg_dir.join("pnpm-lock.yaml").exists();
|
|
||||||
let content = std::fs::read_to_string(pkg_dir.join("package.json")).unwrap_or_default();
|
|
||||||
|
|
||||||
if content.contains("\"vitest\"") {
|
|
||||||
let pm = if has_pnpm { "pnpm" } else { "npx" };
|
|
||||||
return format!("{} vitest run", pm);
|
|
||||||
}
|
|
||||||
if content.contains("\"jest\"") {
|
|
||||||
let pm = if has_pnpm { "pnpm" } else { "npx" };
|
|
||||||
return format!("{} jest", pm);
|
|
||||||
}
|
|
||||||
|
|
||||||
if has_pnpm {
|
|
||||||
"pnpm test".to_string()
|
|
||||||
} else {
|
|
||||||
"npm test".to_string()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Detect the appropriate Node.js build command for a directory containing `package.json`.
|
|
||||||
fn detect_node_build_cmd(pkg_dir: &Path) -> String {
|
|
||||||
if pkg_dir.join("pnpm-lock.yaml").exists() {
|
|
||||||
"pnpm run build".to_string()
|
|
||||||
} else {
|
|
||||||
"npm run build".to_string()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Detect the appropriate Node.js lint command for a directory containing `package.json`.
|
|
||||||
///
|
|
||||||
/// Reads the `package.json` content to identify eslint. Falls back to
|
|
||||||
/// `npm run lint` or `pnpm run lint` based on which lock file is present.
|
|
||||||
fn detect_node_lint_cmd(pkg_dir: &Path) -> String {
|
|
||||||
let has_pnpm = pkg_dir.join("pnpm-lock.yaml").exists();
|
|
||||||
let content = std::fs::read_to_string(pkg_dir.join("package.json")).unwrap_or_default();
|
|
||||||
if content.contains("\"eslint\"") {
|
|
||||||
let pm = if has_pnpm { "pnpm" } else { "npx" };
|
|
||||||
return format!("{pm} eslint .");
|
|
||||||
}
|
|
||||||
if has_pnpm {
|
|
||||||
"pnpm run lint".to_string()
|
|
||||||
} else {
|
|
||||||
"npm run lint".to_string()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Generate `script/build` content for a new project at `root`.
|
|
||||||
///
|
|
||||||
/// Inspects well-known marker files to identify which tech stacks are present
|
|
||||||
/// and emits the appropriate build commands. Multi-stack projects get combined
|
|
||||||
/// commands run sequentially. Falls back to a generic stub when no markers
|
|
||||||
/// are found so the scaffold is always valid.
|
|
||||||
///
|
|
||||||
/// For projects with a frontend in a known subdirectory (`frontend/`, `client/`),
|
|
||||||
/// the build command is detected from the presence of `pnpm-lock.yaml`.
|
|
||||||
pub fn detect_script_build(root: &Path) -> String {
|
|
||||||
let mut commands: Vec<String> = Vec::new();
|
|
||||||
|
|
||||||
if root.join("Cargo.toml").exists() {
|
|
||||||
commands.push("cargo build --release".to_string());
|
|
||||||
}
|
|
||||||
|
|
||||||
if root.join("package.json").exists() {
|
|
||||||
commands.push(detect_node_build_cmd(root));
|
|
||||||
}
|
|
||||||
|
|
||||||
// Detect frontend in known subdirectories (e.g. frontend/, client/)
|
|
||||||
for subdir in &["frontend", "client"] {
|
|
||||||
let sub_path = root.join(subdir);
|
|
||||||
if sub_path.join("package.json").exists() {
|
|
||||||
let cmd = detect_node_build_cmd(&sub_path);
|
|
||||||
commands.push(format!("(cd {} && {})", subdir, cmd));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if root.join("pyproject.toml").exists() {
|
|
||||||
commands.push("python -m build".to_string());
|
|
||||||
}
|
|
||||||
|
|
||||||
if root.join("go.mod").exists() {
|
|
||||||
commands.push("go build ./...".to_string());
|
|
||||||
}
|
|
||||||
|
|
||||||
if commands.is_empty() {
|
|
||||||
return "#!/usr/bin/env bash\nset -euo pipefail\n\n# Add your project's build commands here.\necho \"No build configured\"\n".to_string();
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut script = "#!/usr/bin/env bash\nset -euo pipefail\n\n".to_string();
|
|
||||||
for cmd in commands {
|
|
||||||
script.push_str(&cmd);
|
|
||||||
script.push('\n');
|
|
||||||
}
|
|
||||||
script
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Generate `script/lint` content for a new project at `root`.
|
|
||||||
///
|
|
||||||
/// Inspects well-known marker files to identify which linters are present
|
|
||||||
/// and emits the appropriate lint commands. Multi-stack projects get combined
|
|
||||||
/// commands run sequentially. Falls back to a generic stub when no markers
|
|
||||||
/// are found so the scaffold is always valid.
|
|
||||||
///
|
|
||||||
/// For projects with a frontend in a known subdirectory (`frontend/`, `client/`),
|
|
||||||
/// the lint command is detected from the `package.json` (eslint, npm, pnpm).
|
|
||||||
pub fn detect_script_lint(root: &Path) -> String {
|
|
||||||
let mut commands: Vec<String> = Vec::new();
|
|
||||||
|
|
||||||
if root.join("Cargo.toml").exists() {
|
|
||||||
commands.push("cargo fmt --all --check".to_string());
|
|
||||||
commands.push("cargo clippy -- -D warnings".to_string());
|
|
||||||
}
|
|
||||||
|
|
||||||
if root.join("package.json").exists() {
|
|
||||||
commands.push(detect_node_lint_cmd(root));
|
|
||||||
}
|
|
||||||
|
|
||||||
// Detect frontend in known subdirectories (e.g. frontend/, client/)
|
|
||||||
for subdir in &["frontend", "client"] {
|
|
||||||
let sub_path = root.join(subdir);
|
|
||||||
if sub_path.join("package.json").exists() {
|
|
||||||
let cmd = detect_node_lint_cmd(&sub_path);
|
|
||||||
commands.push(format!("(cd {} && {})", subdir, cmd));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if root.join("pyproject.toml").exists() || root.join("requirements.txt").exists() {
|
|
||||||
let mut content = std::fs::read_to_string(root.join("pyproject.toml")).unwrap_or_default();
|
|
||||||
content
|
|
||||||
.push_str(&std::fs::read_to_string(root.join("requirements.txt")).unwrap_or_default());
|
|
||||||
if content.contains("ruff") {
|
|
||||||
commands.push("ruff check .".to_string());
|
|
||||||
} else {
|
|
||||||
commands.push("flake8 .".to_string());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if root.join("go.mod").exists() {
|
|
||||||
commands.push("go vet ./...".to_string());
|
|
||||||
}
|
|
||||||
|
|
||||||
if commands.is_empty() {
|
|
||||||
return "#!/usr/bin/env bash\nset -euo pipefail\n\n# Add your project's lint commands here.\necho \"No linters configured\"\n".to_string();
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut script = "#!/usr/bin/env bash\nset -euo pipefail\n\n".to_string();
|
|
||||||
for cmd in commands {
|
|
||||||
script.push_str(&cmd);
|
|
||||||
script.push('\n');
|
|
||||||
}
|
|
||||||
script
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Generate `script/test` content for a new project at `root`.
|
|
||||||
///
|
|
||||||
/// Inspects well-known marker files to identify which tech stacks are present
|
|
||||||
/// and emits the appropriate test commands. Multi-stack projects get combined
|
|
||||||
/// commands run sequentially. Falls back to the generic stub when no markers
|
|
||||||
/// are found so the scaffold is always valid.
|
|
||||||
///
|
|
||||||
/// For projects with a frontend in a known subdirectory (`frontend/`, `client/`),
|
|
||||||
/// the test runner is detected from the `package.json` (vitest, jest, npm, pnpm).
|
|
||||||
pub fn detect_script_test(root: &Path) -> String {
|
|
||||||
let mut commands: Vec<String> = Vec::new();
|
|
||||||
|
|
||||||
if root.join("Cargo.toml").exists() {
|
|
||||||
commands.push("cargo test".to_string());
|
|
||||||
}
|
|
||||||
|
|
||||||
if root.join("package.json").exists() {
|
|
||||||
if root.join("pnpm-lock.yaml").exists() {
|
|
||||||
commands.push("pnpm test".to_string());
|
|
||||||
} else {
|
|
||||||
commands.push("npm test".to_string());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Detect frontend in known subdirectories (e.g. frontend/, client/)
|
|
||||||
for subdir in &["frontend", "client"] {
|
|
||||||
let sub_path = root.join(subdir);
|
|
||||||
if sub_path.join("package.json").exists() {
|
|
||||||
let cmd = detect_node_test_cmd(&sub_path);
|
|
||||||
commands.push(format!("(cd {} && {})", subdir, cmd));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if root.join("pyproject.toml").exists() || root.join("requirements.txt").exists() {
|
|
||||||
commands.push("pytest".to_string());
|
|
||||||
}
|
|
||||||
|
|
||||||
if root.join("go.mod").exists() {
|
|
||||||
commands.push("go test ./...".to_string());
|
|
||||||
}
|
|
||||||
|
|
||||||
if commands.is_empty() {
|
|
||||||
return STORY_KIT_SCRIPT_TEST.to_string();
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut script = "#!/usr/bin/env bash\nset -euo pipefail\n\n".to_string();
|
|
||||||
for cmd in commands {
|
|
||||||
script.push_str(&cmd);
|
|
||||||
script.push('\n');
|
|
||||||
}
|
|
||||||
script
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Generate a `project.toml` for a new project at `root`.
|
|
||||||
///
|
|
||||||
/// Detects the tech stack via [`detect_components_toml`] and combines the
|
|
||||||
/// resulting `[[component]]` entries with the default project settings.
|
|
||||||
/// Agent definitions are written to `agents.toml` separately.
|
|
||||||
fn generate_project_toml(root: &Path) -> String {
|
|
||||||
let components = detect_components_toml(root);
|
|
||||||
format!("{components}\n{DEFAULT_PROJECT_SETTINGS_TOML}")
|
|
||||||
}
|
|
||||||
|
|
||||||
fn write_file_if_missing(path: &Path, content: &str) -> Result<(), String> {
|
|
||||||
if path.exists() {
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
fs::write(path, content).map_err(|e| format!("Failed to write file: {}", e))?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Write `content` to `path` if missing, then ensure the file is executable.
|
|
||||||
fn write_script_if_missing(path: &Path, content: &str) -> Result<(), String> {
|
|
||||||
write_file_if_missing(path, content)?;
|
|
||||||
|
|
||||||
#[cfg(unix)]
|
|
||||||
{
|
|
||||||
use std::os::unix::fs::PermissionsExt;
|
|
||||||
let mut perms = fs::metadata(path)
|
|
||||||
.map_err(|e| format!("Failed to read permissions for {}: {}", path.display(), e))?
|
|
||||||
.permissions();
|
|
||||||
perms.set_mode(0o755);
|
|
||||||
fs::set_permissions(path, perms)
|
|
||||||
.map_err(|e| format!("Failed to set permissions on {}: {}", path.display(), e))?;
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Write (or idempotently update) `.huskies/.gitignore` with Story Kit–specific
|
|
||||||
/// ignore patterns for files that live inside the `.huskies/` directory.
|
|
||||||
/// Patterns are relative to `.huskies/` as git resolves `.gitignore` files
|
|
||||||
/// relative to the directory that contains them.
|
|
||||||
fn write_story_kit_gitignore(root: &Path) -> Result<(), String> {
|
|
||||||
// Entries that belong inside .huskies/.gitignore (relative to .huskies/).
|
|
||||||
let entries = [
|
|
||||||
"bot.toml",
|
|
||||||
"matrix_store/",
|
|
||||||
"matrix_device_id",
|
|
||||||
"matrix_history.json",
|
|
||||||
"timers.json",
|
|
||||||
"worktrees/",
|
|
||||||
"merge_workspace/",
|
|
||||||
"coverage/",
|
|
||||||
"work/2_current/",
|
|
||||||
"work/3_qa/",
|
|
||||||
"work/4_merge/",
|
|
||||||
"logs/",
|
|
||||||
"token_usage.jsonl",
|
|
||||||
"wizard_state.json",
|
|
||||||
"store.json",
|
|
||||||
"pipeline.db",
|
|
||||||
"*.db",
|
|
||||||
];
|
|
||||||
|
|
||||||
let gitignore_path = root.join(".huskies").join(".gitignore");
|
|
||||||
let existing = if gitignore_path.exists() {
|
|
||||||
fs::read_to_string(&gitignore_path)
|
|
||||||
.map_err(|e| format!("Failed to read .huskies/.gitignore: {}", e))?
|
|
||||||
} else {
|
|
||||||
String::new()
|
|
||||||
};
|
|
||||||
|
|
||||||
let missing: Vec<&str> = entries
|
|
||||||
.iter()
|
|
||||||
.copied()
|
|
||||||
.filter(|e| !existing.lines().any(|l| l.trim() == *e))
|
|
||||||
.collect();
|
|
||||||
|
|
||||||
if missing.is_empty() {
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut new_content = existing;
|
|
||||||
if !new_content.is_empty() && !new_content.ends_with('\n') {
|
|
||||||
new_content.push('\n');
|
|
||||||
}
|
|
||||||
for entry in missing {
|
|
||||||
new_content.push_str(entry);
|
|
||||||
new_content.push('\n');
|
|
||||||
}
|
|
||||||
|
|
||||||
fs::write(&gitignore_path, new_content)
|
|
||||||
.map_err(|e| format!("Failed to write .huskies/.gitignore: {}", e))?;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Append root-level Story Kit entries to the project `.gitignore`.
|
|
||||||
/// Only `.huskies_port` and `.mcp.json` remain here because they live at
|
|
||||||
/// the project root and git does not support `../` patterns in `.gitignore`
|
|
||||||
/// files, so they cannot be expressed in `.huskies/.gitignore`.
|
|
||||||
/// `store.json` is excluded via `.huskies/.gitignore` since it now lives
|
|
||||||
/// inside the `.huskies/` directory.
|
|
||||||
fn append_root_gitignore_entries(root: &Path) -> Result<(), String> {
|
|
||||||
let entries = [".huskies_port", ".mcp.json"];
|
|
||||||
|
|
||||||
let gitignore_path = root.join(".gitignore");
|
|
||||||
let existing = if gitignore_path.exists() {
|
|
||||||
fs::read_to_string(&gitignore_path)
|
|
||||||
.map_err(|e| format!("Failed to read .gitignore: {}", e))?
|
|
||||||
} else {
|
|
||||||
String::new()
|
|
||||||
};
|
|
||||||
|
|
||||||
let missing: Vec<&str> = entries
|
|
||||||
.iter()
|
|
||||||
.copied()
|
|
||||||
.filter(|e| !existing.lines().any(|l| l.trim() == *e))
|
|
||||||
.collect();
|
|
||||||
|
|
||||||
if missing.is_empty() {
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut new_content = existing;
|
|
||||||
if !new_content.is_empty() && !new_content.ends_with('\n') {
|
|
||||||
new_content.push('\n');
|
|
||||||
}
|
|
||||||
for entry in missing {
|
|
||||||
new_content.push_str(entry);
|
|
||||||
new_content.push('\n');
|
|
||||||
}
|
|
||||||
|
|
||||||
fs::write(&gitignore_path, new_content)
|
|
||||||
.map_err(|e| format!("Failed to write .gitignore: {}", e))?;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn scaffold_story_kit(root: &Path, port: u16) -> Result<(), String> {
|
|
||||||
let story_kit_root = root.join(".huskies");
|
|
||||||
let specs_root = story_kit_root.join("specs");
|
|
||||||
let tech_root = specs_root.join("tech");
|
|
||||||
let functional_root = specs_root.join("functional");
|
|
||||||
let script_root = root.join("script");
|
|
||||||
|
|
||||||
// Create the work/ pipeline directories, each with a .gitkeep so empty dirs survive git clone
|
|
||||||
let work_stages = [
|
|
||||||
"1_backlog",
|
|
||||||
"2_current",
|
|
||||||
"3_qa",
|
|
||||||
"4_merge",
|
|
||||||
"5_done",
|
|
||||||
"6_archived",
|
|
||||||
];
|
|
||||||
for stage in &work_stages {
|
|
||||||
let dir = story_kit_root.join("work").join(stage);
|
|
||||||
fs::create_dir_all(&dir).map_err(|e| format!("Failed to create work/{}: {}", stage, e))?;
|
|
||||||
write_file_if_missing(&dir.join(".gitkeep"), "")?;
|
|
||||||
}
|
|
||||||
|
|
||||||
fs::create_dir_all(&tech_root).map_err(|e| format!("Failed to create specs/tech: {}", e))?;
|
|
||||||
fs::create_dir_all(&functional_root)
|
|
||||||
.map_err(|e| format!("Failed to create specs/functional: {}", e))?;
|
|
||||||
fs::create_dir_all(&script_root)
|
|
||||||
.map_err(|e| format!("Failed to create script/ directory: {}", e))?;
|
|
||||||
|
|
||||||
write_file_if_missing(&story_kit_root.join("README.md"), STORY_KIT_README)?;
|
|
||||||
let project_toml_content = generate_project_toml(root);
|
|
||||||
write_file_if_missing(&story_kit_root.join("project.toml"), &project_toml_content)?;
|
|
||||||
write_file_if_missing(&story_kit_root.join("agents.toml"), DEFAULT_AGENTS_TOML)?;
|
|
||||||
write_file_if_missing(&specs_root.join("00_CONTEXT.md"), STORY_KIT_CONTEXT)?;
|
|
||||||
write_file_if_missing(&tech_root.join("STACK.md"), STORY_KIT_STACK)?;
|
|
||||||
let script_test_content = detect_script_test(root);
|
|
||||||
write_script_if_missing(&script_root.join("test"), &script_test_content)?;
|
|
||||||
let script_build_content = detect_script_build(root);
|
|
||||||
write_script_if_missing(&script_root.join("build"), &script_build_content)?;
|
|
||||||
let script_lint_content = detect_script_lint(root);
|
|
||||||
write_script_if_missing(&script_root.join("lint"), &script_lint_content)?;
|
|
||||||
write_file_if_missing(&root.join("CLAUDE.md"), STORY_KIT_CLAUDE_MD)?;
|
|
||||||
|
|
||||||
// Write per-transport bot.toml example files so users can see all options.
|
|
||||||
write_file_if_missing(
|
|
||||||
&story_kit_root.join("bot.toml.matrix.example"),
|
|
||||||
BOT_TOML_MATRIX_EXAMPLE,
|
|
||||||
)?;
|
|
||||||
write_file_if_missing(
|
|
||||||
&story_kit_root.join("bot.toml.whatsapp-meta.example"),
|
|
||||||
BOT_TOML_WHATSAPP_META_EXAMPLE,
|
|
||||||
)?;
|
|
||||||
write_file_if_missing(
|
|
||||||
&story_kit_root.join("bot.toml.whatsapp-twilio.example"),
|
|
||||||
BOT_TOML_WHATSAPP_TWILIO_EXAMPLE,
|
|
||||||
)?;
|
|
||||||
write_file_if_missing(
|
|
||||||
&story_kit_root.join("bot.toml.slack.example"),
|
|
||||||
BOT_TOML_SLACK_EXAMPLE,
|
|
||||||
)?;
|
|
||||||
|
|
||||||
// Write .mcp.json at the project root so agents can find the MCP server.
|
|
||||||
// Only written when missing — never overwrites an existing file, because
|
|
||||||
// the port is environment-specific and must not clobber a running instance.
|
|
||||||
let mcp_content = format!(
|
|
||||||
"{{\n \"mcpServers\": {{\n \"huskies\": {{\n \"type\": \"http\",\n \"url\": \"http://localhost:{port}/mcp\"\n }}\n }}\n}}\n"
|
|
||||||
);
|
|
||||||
write_file_if_missing(&root.join(".mcp.json"), &mcp_content)?;
|
|
||||||
|
|
||||||
// Create .claude/settings.json with sensible permission defaults so that
|
|
||||||
// Claude Code (both agents and web UI chat) can operate without constant
|
|
||||||
// permission prompts.
|
|
||||||
let claude_dir = root.join(".claude");
|
|
||||||
fs::create_dir_all(&claude_dir)
|
|
||||||
.map_err(|e| format!("Failed to create .claude/ directory: {}", e))?;
|
|
||||||
write_file_if_missing(&claude_dir.join("settings.json"), STORY_KIT_CLAUDE_SETTINGS)?;
|
|
||||||
|
|
||||||
write_story_kit_gitignore(root)?;
|
|
||||||
append_root_gitignore_entries(root)?;
|
|
||||||
|
|
||||||
// Run `git init` if the directory is not already a git repo, then make an initial commit
|
|
||||||
if !root.join(".git").exists() {
|
|
||||||
let init_status = std::process::Command::new("git")
|
|
||||||
.args(["init"])
|
|
||||||
.current_dir(root)
|
|
||||||
.status()
|
|
||||||
.map_err(|e| format!("Failed to run git init: {}", e))?;
|
|
||||||
if !init_status.success() {
|
|
||||||
return Err("git init failed".to_string());
|
|
||||||
}
|
|
||||||
|
|
||||||
let add_output = std::process::Command::new("git")
|
|
||||||
.args([
|
|
||||||
"add",
|
|
||||||
".huskies",
|
|
||||||
"script",
|
|
||||||
".gitignore",
|
|
||||||
"CLAUDE.md",
|
|
||||||
".claude",
|
|
||||||
])
|
|
||||||
.current_dir(root)
|
|
||||||
.output()
|
|
||||||
.map_err(|e| format!("Failed to run git add: {}", e))?;
|
|
||||||
if !add_output.status.success() {
|
|
||||||
return Err(format!(
|
|
||||||
"git add failed: {}",
|
|
||||||
String::from_utf8_lossy(&add_output.stderr)
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
let commit_output = std::process::Command::new("git")
|
|
||||||
.args([
|
|
||||||
"-c",
|
|
||||||
"user.email=huskies@localhost",
|
|
||||||
"-c",
|
|
||||||
"user.name=Story Kit",
|
|
||||||
"commit",
|
|
||||||
"-m",
|
|
||||||
"Initial Story Kit scaffold",
|
|
||||||
])
|
|
||||||
.current_dir(root)
|
|
||||||
.output()
|
|
||||||
.map_err(|e| format!("Failed to run git commit: {}", e))?;
|
|
||||||
if !commit_output.status.success() {
|
|
||||||
return Err(format!(
|
|
||||||
"git commit failed: {}",
|
|
||||||
String::from_utf8_lossy(&commit_output.stderr)
|
|
||||||
));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod tests;
|
|
||||||
File diff suppressed because it is too large
Load Diff
Reference in New Issue
Block a user