298 lines
10 KiB
Rust
298 lines
10 KiB
Rust
//! Integration tests for the LWW-map CRDT collections.
|
|
//!
|
|
//! The `tokens` collection is used as the representative collection for the
|
|
//! required CRDT semantics tests (insert, update, delete-via-tombstone, and
|
|
//! concurrent write semantics). The remaining four collections have smoke tests.
|
|
|
|
use super::super::state::init_for_test;
|
|
use super::*;
|
|
use bft_json_crdt::json_crdt::{BaseCrdt, JsonValue, OpState};
|
|
use bft_json_crdt::keypair::make_keypair;
|
|
use bft_json_crdt::op::ROOT_ID;
|
|
use serde_json::json;
|
|
|
|
use super::super::types::PipelineDoc;
|
|
|
|
// ── insert ────────────────────────────────────────────────────────
|
|
|
|
#[test]
|
|
fn token_insert_is_visible_via_read() {
|
|
init_for_test();
|
|
write_token_usage("coder-1:42", "42", 100.0, 200.0, 1_000_000.0);
|
|
|
|
let view = read_token_usage("coder-1:42").expect("entry must exist after insert");
|
|
assert_eq!(view.agent_id, "coder-1:42");
|
|
assert_eq!(view.story_id.as_deref(), Some("42"));
|
|
assert!((view.input_tokens - 100.0).abs() < f64::EPSILON);
|
|
assert!((view.output_tokens - 200.0).abs() < f64::EPSILON);
|
|
assert!((view.timestamp - 1_000_000.0).abs() < f64::EPSILON);
|
|
}
|
|
|
|
#[test]
|
|
fn token_read_all_returns_inserted_entries() {
|
|
init_for_test();
|
|
write_token_usage("coder-a:10", "10", 10.0, 20.0, 1.0);
|
|
write_token_usage("coder-b:10", "10", 30.0, 40.0, 2.0);
|
|
|
|
let all = read_all_token_usage().unwrap_or_default();
|
|
let ids: Vec<&str> = all.iter().map(|v| v.agent_id.as_str()).collect();
|
|
assert!(
|
|
ids.contains(&"coder-a:10"),
|
|
"coder-a:10 must be in read_all"
|
|
);
|
|
assert!(
|
|
ids.contains(&"coder-b:10"),
|
|
"coder-b:10 must be in read_all"
|
|
);
|
|
}
|
|
|
|
// ── update ────────────────────────────────────────────────────────
|
|
|
|
#[test]
|
|
fn token_update_overwrites_fields() {
|
|
init_for_test();
|
|
write_token_usage("coder-2:55", "55", 50.0, 60.0, 2_000_000.0);
|
|
// Update with new token counts.
|
|
write_token_usage("coder-2:55", "55", 500.0, 600.0, 3_000_000.0);
|
|
|
|
let view = read_token_usage("coder-2:55").expect("entry must exist after update");
|
|
assert!((view.input_tokens - 500.0).abs() < f64::EPSILON);
|
|
assert!((view.output_tokens - 600.0).abs() < f64::EPSILON);
|
|
assert!((view.timestamp - 3_000_000.0).abs() < f64::EPSILON);
|
|
}
|
|
|
|
// ── delete-via-tombstone ──────────────────────────────────────────
|
|
|
|
#[test]
|
|
fn token_delete_removes_entry_from_read() {
|
|
init_for_test();
|
|
write_token_usage("coder-3:77", "77", 1.0, 2.0, 9_999.0);
|
|
assert!(
|
|
read_token_usage("coder-3:77").is_some(),
|
|
"entry must exist before delete"
|
|
);
|
|
|
|
let deleted = delete_token_usage("coder-3:77");
|
|
assert!(deleted, "delete must return true for a known entry");
|
|
|
|
assert!(
|
|
read_token_usage("coder-3:77").is_none(),
|
|
"entry must be absent after tombstone"
|
|
);
|
|
}
|
|
|
|
#[test]
|
|
fn token_delete_nonexistent_returns_false() {
|
|
init_for_test();
|
|
assert!(!delete_token_usage("no-such-agent"));
|
|
}
|
|
|
|
#[test]
|
|
fn token_delete_not_returned_by_read_all() {
|
|
init_for_test();
|
|
write_token_usage("coder-4:88", "88", 5.0, 10.0, 1.0);
|
|
delete_token_usage("coder-4:88");
|
|
|
|
let all = read_all_token_usage().unwrap_or_default();
|
|
assert!(
|
|
!all.iter().any(|v| v.agent_id == "coder-4:88"),
|
|
"deleted entry must not appear in read_all"
|
|
);
|
|
}
|
|
|
|
// ── concurrent write semantics (LWW convergence) ──────────────────
|
|
|
|
#[test]
|
|
fn token_concurrent_writes_converge_via_lww() {
|
|
// Two independent CRDTs simulate two nodes writing concurrently.
|
|
let kp_a = make_keypair();
|
|
let kp_b = make_keypair();
|
|
let mut crdt_a = BaseCrdt::<PipelineDoc>::new(&kp_a);
|
|
let mut crdt_b = BaseCrdt::<PipelineDoc>::new(&kp_b);
|
|
|
|
// Node A inserts a token entry.
|
|
let entry_a: JsonValue = json!({
|
|
"agent_id": "coder-x:99",
|
|
"story_id": "99",
|
|
"input_tokens": 10.0,
|
|
"output_tokens": 20.0,
|
|
"timestamp": 1.0,
|
|
})
|
|
.into();
|
|
let insert_a = crdt_a.doc.tokens.insert(ROOT_ID, entry_a).sign(&kp_a);
|
|
assert_eq!(crdt_a.apply(insert_a.clone()), OpState::Ok);
|
|
|
|
// Node B inserts the same key with different values.
|
|
let entry_b: JsonValue = json!({
|
|
"agent_id": "coder-x:99",
|
|
"story_id": "99",
|
|
"input_tokens": 999.0,
|
|
"output_tokens": 888.0,
|
|
"timestamp": 1.0,
|
|
})
|
|
.into();
|
|
let insert_b = crdt_b.doc.tokens.insert(ROOT_ID, entry_b).sign(&kp_b);
|
|
assert_eq!(crdt_b.apply(insert_b.clone()), OpState::Ok);
|
|
|
|
// Both nodes update input_tokens concurrently.
|
|
let update_a = crdt_a.doc.tokens[0].input_tokens.set(111.0).sign(&kp_a);
|
|
let update_b = crdt_b.doc.tokens[0].input_tokens.set(222.0).sign(&kp_b);
|
|
|
|
assert_eq!(crdt_a.apply(update_a.clone()), OpState::Ok);
|
|
assert_eq!(crdt_b.apply(update_b.clone()), OpState::Ok);
|
|
|
|
// Cross-apply: A gets B's ops, B gets A's ops.
|
|
assert_eq!(crdt_a.apply(insert_b), OpState::Ok);
|
|
assert_eq!(crdt_a.apply(update_b), OpState::Ok);
|
|
assert_eq!(crdt_b.apply(insert_a), OpState::Ok);
|
|
assert_eq!(crdt_b.apply(update_a), OpState::Ok);
|
|
|
|
// Both CRDTs must converge to the same view — compare field by field.
|
|
assert_eq!(
|
|
crdt_a.doc.tokens.view().len(),
|
|
crdt_b.doc.tokens.view().len(),
|
|
"both peers must have the same number of token entries"
|
|
);
|
|
assert_eq!(
|
|
crdt_a.doc.tokens[0].input_tokens.view(),
|
|
crdt_b.doc.tokens[0].input_tokens.view(),
|
|
"concurrent writes to input_tokens must converge to the same value"
|
|
);
|
|
assert_eq!(
|
|
crdt_a.doc.tokens[0].output_tokens.view(),
|
|
crdt_b.doc.tokens[0].output_tokens.view(),
|
|
"concurrent writes must converge for output_tokens"
|
|
);
|
|
}
|
|
|
|
// ── smoke tests for the other four collections ────────────────────
|
|
|
|
#[test]
|
|
fn merge_job_insert_update_delete() {
|
|
init_for_test();
|
|
write_merge_job("100", "pending", 1.0, None, None);
|
|
let v = read_merge_job("100").expect("merge job must exist");
|
|
assert_eq!(v.status, "pending");
|
|
|
|
write_merge_job("100", "done", 1.0, Some(2.0), None);
|
|
let v2 = read_merge_job("100").expect("merge job must exist after update");
|
|
assert_eq!(v2.status, "done");
|
|
assert_eq!(v2.finished_at, Some(2.0));
|
|
|
|
assert!(delete_merge_job("100"));
|
|
assert!(read_merge_job("100").is_none());
|
|
}
|
|
|
|
#[test]
|
|
fn active_agent_insert_update_delete() {
|
|
init_for_test();
|
|
write_active_agent("coder-5", "200", "node-abc", 5.0);
|
|
let v = read_active_agent("coder-5").expect("active agent must exist");
|
|
assert_eq!(v.story_id.as_deref(), Some("200"));
|
|
|
|
write_active_agent("coder-5", "201", "node-abc", 6.0);
|
|
let v2 = read_active_agent("coder-5").expect("active agent must exist after update");
|
|
assert_eq!(v2.story_id.as_deref(), Some("201"));
|
|
|
|
assert!(delete_active_agent("coder-5"));
|
|
assert!(read_active_agent("coder-5").is_none());
|
|
}
|
|
|
|
#[test]
|
|
fn test_job_insert_update_delete() {
|
|
init_for_test();
|
|
write_test_job("300", "running", 7.0, None, None);
|
|
let v = read_test_job("300").expect("test job must exist");
|
|
assert_eq!(v.status, "running");
|
|
|
|
write_test_job("300", "pass", 7.0, Some(8.0), Some("all green"));
|
|
let v2 = read_test_job("300").expect("test job must exist after update");
|
|
assert_eq!(v2.status, "pass");
|
|
assert_eq!(v2.output.as_deref(), Some("all green"));
|
|
|
|
assert!(delete_test_job("300"));
|
|
assert!(read_test_job("300").is_none());
|
|
}
|
|
|
|
#[test]
|
|
fn agent_throttle_insert_update_delete() {
|
|
init_for_test();
|
|
write_agent_throttle("node-z", 1000.0, 2.0, 5.0);
|
|
let v = read_agent_throttle("node-z").expect("throttle must exist");
|
|
assert!((v.count - 2.0).abs() < f64::EPSILON);
|
|
assert!((v.limit - 5.0).abs() < f64::EPSILON);
|
|
|
|
write_agent_throttle("node-z", 1000.0, 4.0, 5.0);
|
|
let v2 = read_agent_throttle("node-z").expect("throttle must exist after update");
|
|
assert!((v2.count - 4.0).abs() < f64::EPSILON);
|
|
|
|
assert!(delete_agent_throttle("node-z"));
|
|
assert!(read_agent_throttle("node-z").is_none());
|
|
}
|
|
|
|
// ── merge_jobs: concurrent-write LWW resolution ───────────────────
|
|
|
|
#[test]
|
|
fn merge_job_concurrent_writes_converge_via_lww() {
|
|
// Two independent CRDTs simulate two nodes writing concurrently.
|
|
let kp_a = make_keypair();
|
|
let kp_b = make_keypair();
|
|
let mut crdt_a = BaseCrdt::<PipelineDoc>::new(&kp_a);
|
|
let mut crdt_b = BaseCrdt::<PipelineDoc>::new(&kp_b);
|
|
|
|
// Node A inserts a merge-job entry.
|
|
let entry_a: JsonValue = json!({
|
|
"story_id": "500_story_concurrent",
|
|
"status": "pending",
|
|
"started_at": 1.0,
|
|
"finished_at": 0.0,
|
|
"error": "",
|
|
})
|
|
.into();
|
|
let insert_a = crdt_a.doc.merge_jobs.insert(ROOT_ID, entry_a).sign(&kp_a);
|
|
assert_eq!(crdt_a.apply(insert_a.clone()), OpState::Ok);
|
|
|
|
// Node B inserts the same story_id with a different status concurrently.
|
|
let entry_b: JsonValue = json!({
|
|
"story_id": "500_story_concurrent",
|
|
"status": "running",
|
|
"started_at": 1.0,
|
|
"finished_at": 0.0,
|
|
"error": "",
|
|
})
|
|
.into();
|
|
let insert_b = crdt_b.doc.merge_jobs.insert(ROOT_ID, entry_b).sign(&kp_b);
|
|
assert_eq!(crdt_b.apply(insert_b.clone()), OpState::Ok);
|
|
|
|
// Both nodes concurrently update the status field.
|
|
let update_a = crdt_a.doc.merge_jobs[0]
|
|
.status
|
|
.set("done".to_string())
|
|
.sign(&kp_a);
|
|
let update_b = crdt_b.doc.merge_jobs[0]
|
|
.status
|
|
.set("failed".to_string())
|
|
.sign(&kp_b);
|
|
|
|
assert_eq!(crdt_a.apply(update_a.clone()), OpState::Ok);
|
|
assert_eq!(crdt_b.apply(update_b.clone()), OpState::Ok);
|
|
|
|
// Cross-apply: A gets B's ops, B gets A's ops.
|
|
assert_eq!(crdt_a.apply(insert_b), OpState::Ok);
|
|
assert_eq!(crdt_a.apply(update_b), OpState::Ok);
|
|
assert_eq!(crdt_b.apply(insert_a), OpState::Ok);
|
|
assert_eq!(crdt_b.apply(update_a), OpState::Ok);
|
|
|
|
// Both CRDTs must converge to the same view.
|
|
assert_eq!(
|
|
crdt_a.doc.merge_jobs.view().len(),
|
|
crdt_b.doc.merge_jobs.view().len(),
|
|
"both peers must have the same number of merge_job entries"
|
|
);
|
|
assert_eq!(
|
|
crdt_a.doc.merge_jobs[0].status.view(),
|
|
crdt_b.doc.merge_jobs[0].status.view(),
|
|
"concurrent writes to status must converge to the same value via LWW"
|
|
);
|
|
}
|