2026-04-26 01:53:23 +00:00
|
|
|
//! Peer mesh discovery — supplementary CRDT sync connections between build agents.
|
|
|
|
|
//!
|
|
|
|
|
//! When mesh discovery is enabled, a build agent periodically reads the CRDT
|
|
|
|
|
//! `nodes` list and opens supplementary sync connections to alive peers. These
|
|
|
|
|
//! mesh connections provide resilience: if the primary rendezvous server goes
|
|
|
|
|
//! down, agents can still exchange ops directly with each other.
|
|
|
|
|
//!
|
|
|
|
|
//! Op deduplication is handled by the CRDT layer's existing `AlreadySeen`
|
|
|
|
|
//! semantics — when the same op arrives via both the primary and a mesh path,
|
|
|
|
|
//! only the first application takes effect.
|
|
|
|
|
|
|
|
|
|
use std::collections::HashMap;
|
|
|
|
|
use std::sync::Arc;
|
|
|
|
|
use tokio::sync::Mutex;
|
|
|
|
|
use tokio::task::JoinHandle;
|
|
|
|
|
|
|
|
|
|
use crate::crdt_state;
|
|
|
|
|
use crate::crdt_sync;
|
|
|
|
|
use crate::slog;
|
|
|
|
|
|
|
|
|
|
/// Interval between mesh discovery scans (seconds).
|
|
|
|
|
const MESH_SCAN_INTERVAL_SECS: u64 = 60;
|
|
|
|
|
|
|
|
|
|
/// Tracks active mesh peer connections and enforces the connection cap.
|
|
|
|
|
///
|
|
|
|
|
/// Each mesh connection is a background tokio task running `connect_and_sync`.
|
|
|
|
|
/// The manager periodically reconciles the set of active connections against
|
|
|
|
|
/// the CRDT node presence list: new alive peers are connected, disappeared
|
|
|
|
|
/// or dead peers are dropped.
|
|
|
|
|
pub struct MeshManager {
|
|
|
|
|
/// Active mesh connections: peer_node_id -> JoinHandle.
|
|
|
|
|
pub(crate) connections: HashMap<String, JoinHandle<()>>,
|
|
|
|
|
/// Maximum number of supplementary mesh connections.
|
|
|
|
|
max_peers: usize,
|
|
|
|
|
/// This node's ID (hex-encoded Ed25519 pubkey).
|
|
|
|
|
our_node_id: String,
|
|
|
|
|
/// The primary rendezvous URL — retained for diagnostics/logging.
|
|
|
|
|
_rendezvous_url: String,
|
|
|
|
|
/// Join token for authenticating with peers.
|
|
|
|
|
join_token: Option<String>,
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
impl MeshManager {
|
|
|
|
|
/// Create a new mesh manager.
|
|
|
|
|
pub fn new(
|
|
|
|
|
max_peers: usize,
|
|
|
|
|
our_node_id: String,
|
|
|
|
|
rendezvous_url: String,
|
|
|
|
|
join_token: Option<String>,
|
|
|
|
|
) -> Self {
|
|
|
|
|
Self {
|
|
|
|
|
connections: HashMap::new(),
|
|
|
|
|
max_peers,
|
|
|
|
|
our_node_id,
|
|
|
|
|
_rendezvous_url: rendezvous_url,
|
|
|
|
|
join_token,
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Run one reconciliation pass: drop dead peers, connect to new alive ones.
|
|
|
|
|
///
|
|
|
|
|
/// Returns the number of active mesh connections after reconciliation.
|
|
|
|
|
pub fn reconcile(&mut self) -> usize {
|
|
|
|
|
// Read alive peers from the CRDT.
|
|
|
|
|
let nodes = crdt_state::read_all_node_presence().unwrap_or_default();
|
|
|
|
|
let now = chrono::Utc::now().timestamp() as f64;
|
|
|
|
|
|
|
|
|
|
// Build the set of eligible peers (alive, fresh heartbeat, has address,
|
|
|
|
|
// not self, not the rendezvous host).
|
|
|
|
|
let alive_peers: Vec<(String, String)> = nodes
|
|
|
|
|
.into_iter()
|
|
|
|
|
.filter(|n| {
|
2026-04-28 09:28:13 +00:00
|
|
|
let now_ms = now * 1000.0;
|
|
|
|
|
let last_ms = n.last_seen_ms.unwrap_or(n.last_seen * 1000.0);
|
2026-04-26 01:53:23 +00:00
|
|
|
n.alive
|
|
|
|
|
&& !n.address.is_empty()
|
|
|
|
|
&& n.node_id != self.our_node_id
|
2026-04-28 09:28:13 +00:00
|
|
|
&& (now_ms - last_ms) / 1000.0 < 600.0
|
2026-04-26 01:53:23 +00:00
|
|
|
&& !self.is_rendezvous_peer(&n.address)
|
|
|
|
|
})
|
|
|
|
|
.map(|n| (n.node_id, n.address))
|
|
|
|
|
.collect();
|
|
|
|
|
|
|
|
|
|
// Drop connections to peers that are no longer alive or have disappeared.
|
|
|
|
|
let alive_ids: std::collections::HashSet<&str> =
|
|
|
|
|
alive_peers.iter().map(|(id, _)| id.as_str()).collect();
|
|
|
|
|
|
|
|
|
|
let stale: Vec<String> = self
|
|
|
|
|
.connections
|
|
|
|
|
.keys()
|
|
|
|
|
.filter(|id| !alive_ids.contains(id.as_str()))
|
|
|
|
|
.cloned()
|
|
|
|
|
.collect();
|
|
|
|
|
|
|
|
|
|
for id in stale {
|
|
|
|
|
if let Some(handle) = self.connections.remove(&id) {
|
|
|
|
|
slog!(
|
|
|
|
|
"[mesh] Dropping connection to disappeared peer {:.12}…",
|
|
|
|
|
&id
|
|
|
|
|
);
|
|
|
|
|
handle.abort();
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Also drop connections whose tasks have finished (peer disconnected).
|
|
|
|
|
let finished: Vec<String> = self
|
|
|
|
|
.connections
|
|
|
|
|
.iter()
|
|
|
|
|
.filter(|(_, h)| h.is_finished())
|
|
|
|
|
.map(|(id, _)| id.clone())
|
|
|
|
|
.collect();
|
|
|
|
|
|
|
|
|
|
for id in finished {
|
|
|
|
|
self.connections.remove(&id);
|
|
|
|
|
slog!("[mesh] Mesh connection to {:.12}… ended; slot freed", &id);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Connect to new peers up to the cap.
|
|
|
|
|
for (node_id, address) in &alive_peers {
|
|
|
|
|
if self.connections.len() >= self.max_peers {
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
if self.connections.contains_key(node_id) {
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
slog!(
|
|
|
|
|
"[mesh] Opening supplementary mesh connection to {:.12}… at {address}",
|
|
|
|
|
node_id
|
|
|
|
|
);
|
|
|
|
|
let url = address.clone();
|
|
|
|
|
let token = self.join_token.clone();
|
|
|
|
|
let peer_id = node_id.clone();
|
|
|
|
|
let handle = tokio::spawn(async move {
|
|
|
|
|
match crdt_sync::connect_and_sync(&url, token.as_deref()).await {
|
|
|
|
|
Ok(()) => {
|
|
|
|
|
slog!(
|
|
|
|
|
"[mesh] Supplementary connection to {:.12}… closed cleanly",
|
|
|
|
|
&peer_id
|
|
|
|
|
);
|
|
|
|
|
}
|
|
|
|
|
Err(e) => {
|
|
|
|
|
slog!(
|
|
|
|
|
"[mesh] Supplementary connection to {:.12}… failed: {e}",
|
|
|
|
|
&peer_id
|
|
|
|
|
);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
});
|
|
|
|
|
self.connections.insert(node_id.clone(), handle);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
self.connections.len()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Check whether an address belongs to the primary rendezvous peer.
|
|
|
|
|
///
|
|
|
|
|
/// Compares by stripping the `ws://` scheme and comparing host:port,
|
|
|
|
|
/// since the CRDT address may use `0.0.0.0` while the rendezvous URL
|
|
|
|
|
/// uses the actual hostname. We compare the path-suffix `/crdt-sync`
|
|
|
|
|
/// after the port.
|
|
|
|
|
fn is_rendezvous_peer(&self, _address: &str) -> bool {
|
|
|
|
|
// The rendezvous URL is the server we're already connected to via the
|
|
|
|
|
// primary channel. We cannot reliably match by address because the
|
|
|
|
|
// server advertises `ws://0.0.0.0:PORT/crdt-sync` while we connect
|
|
|
|
|
// via a hostname. Instead, we rely on the CRDT node_id: the server's
|
|
|
|
|
// node is typically the first entry, but since we don't know its ID
|
|
|
|
|
// at this point, we skip address-based filtering and let the CRDT
|
|
|
|
|
// dedup layer handle the redundancy.
|
|
|
|
|
false
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Number of currently active mesh connections.
|
|
|
|
|
pub fn active_count(&self) -> usize {
|
|
|
|
|
self.connections.len()
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Spawn the mesh discovery background loop.
|
|
|
|
|
///
|
|
|
|
|
/// Returns a handle that can be used to abort the loop (though in practice
|
|
|
|
|
/// the agent runs until SIGTERM). The loop runs `reconcile()` every
|
|
|
|
|
/// [`MESH_SCAN_INTERVAL_SECS`] seconds.
|
|
|
|
|
///
|
|
|
|
|
/// Mesh connections are explicitly logged as **supplementary** and do not
|
|
|
|
|
/// block startup if they fail. The primary `--rendezvous` connection
|
|
|
|
|
/// remains the canonical channel.
|
|
|
|
|
pub fn spawn_mesh_discovery(
|
|
|
|
|
max_peers: usize,
|
|
|
|
|
our_node_id: String,
|
|
|
|
|
rendezvous_url: String,
|
|
|
|
|
join_token: Option<String>,
|
|
|
|
|
) -> JoinHandle<()> {
|
|
|
|
|
if max_peers == 0 {
|
|
|
|
|
slog!("[mesh] Mesh discovery disabled (max_mesh_peers=0)");
|
|
|
|
|
return tokio::spawn(async {});
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
slog!(
|
|
|
|
|
"[mesh] Starting mesh discovery (max_mesh_peers={max_peers}, scan_interval={}s)",
|
|
|
|
|
MESH_SCAN_INTERVAL_SECS
|
|
|
|
|
);
|
|
|
|
|
|
|
|
|
|
let manager = Arc::new(Mutex::new(MeshManager::new(
|
|
|
|
|
max_peers,
|
|
|
|
|
our_node_id,
|
|
|
|
|
rendezvous_url,
|
|
|
|
|
join_token,
|
|
|
|
|
)));
|
|
|
|
|
|
|
|
|
|
tokio::spawn(async move {
|
|
|
|
|
// Initial delay: let the primary rendezvous connection establish and
|
|
|
|
|
// the CRDT node list populate before scanning for mesh peers.
|
|
|
|
|
tokio::time::sleep(std::time::Duration::from_secs(10)).await;
|
|
|
|
|
|
|
|
|
|
let mut interval =
|
|
|
|
|
tokio::time::interval(std::time::Duration::from_secs(MESH_SCAN_INTERVAL_SECS));
|
|
|
|
|
loop {
|
|
|
|
|
interval.tick().await;
|
|
|
|
|
let mut mgr = manager.lock().await;
|
|
|
|
|
let active = mgr.reconcile();
|
|
|
|
|
if active > 0 {
|
|
|
|
|
slog!("[mesh] Active mesh connections: {active}/{}", mgr.max_peers);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
})
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// ── Tests ────────────────────────────────────────────────────────────
|
|
|
|
|
|
|
|
|
|
#[cfg(test)]
|
|
|
|
|
mod tests {
|
|
|
|
|
use super::*;
|
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
fn mesh_manager_new_has_no_connections() {
|
|
|
|
|
let mgr = MeshManager::new(
|
|
|
|
|
3,
|
|
|
|
|
"node-a".to_string(),
|
|
|
|
|
"ws://server:3001/crdt-sync".to_string(),
|
|
|
|
|
None,
|
|
|
|
|
);
|
|
|
|
|
assert_eq!(mgr.active_count(), 0);
|
|
|
|
|
assert_eq!(mgr.max_peers, 3);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
fn mesh_manager_respects_zero_max_peers() {
|
|
|
|
|
let mgr = MeshManager::new(
|
|
|
|
|
0,
|
|
|
|
|
"node-a".to_string(),
|
|
|
|
|
"ws://server:3001/crdt-sync".to_string(),
|
|
|
|
|
None,
|
|
|
|
|
);
|
|
|
|
|
assert_eq!(mgr.max_peers, 0);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Verify that reconcile returns 0 when CRDT is not initialised
|
|
|
|
|
/// (read_all_node_presence returns None → empty list).
|
|
|
|
|
#[test]
|
|
|
|
|
fn mesh_manager_reconcile_no_crdt_returns_zero() {
|
|
|
|
|
let mut mgr = MeshManager::new(
|
|
|
|
|
3,
|
|
|
|
|
"node-a".to_string(),
|
|
|
|
|
"ws://server:3001/crdt-sync".to_string(),
|
|
|
|
|
None,
|
|
|
|
|
);
|
|
|
|
|
let active = mgr.reconcile();
|
|
|
|
|
assert_eq!(active, 0);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
|
fn mesh_scan_interval_is_60_seconds() {
|
|
|
|
|
assert_eq!(MESH_SCAN_INTERVAL_SECS, 60);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// AC7 (mesh storm cap): Verify that the mesh manager enforces the
|
|
|
|
|
/// `max_mesh_peers` cap even when many more alive peers exist.
|
|
|
|
|
///
|
|
|
|
|
/// We simulate this by inserting fake JoinHandles for already-connected
|
|
|
|
|
/// peers and verifying that `active_count()` never exceeds the cap.
|
|
|
|
|
#[tokio::test]
|
|
|
|
|
async fn mesh_storm_cap_enforced() {
|
|
|
|
|
let mut mgr = MeshManager::new(
|
|
|
|
|
3, // max 3 mesh connections
|
|
|
|
|
"self-node".to_string(),
|
|
|
|
|
"ws://server:3001/crdt-sync".to_string(),
|
|
|
|
|
None,
|
|
|
|
|
);
|
|
|
|
|
|
|
|
|
|
// Simulate 6 alive peers by inserting fake tasks that sleep forever.
|
|
|
|
|
// Since CRDT is not initialised, reconcile() won't add new connections,
|
|
|
|
|
// so we manually insert to test the cap logic.
|
|
|
|
|
for i in 0..6 {
|
|
|
|
|
let node_id = format!("peer-{i}");
|
|
|
|
|
let handle = tokio::spawn(async {
|
|
|
|
|
tokio::time::sleep(std::time::Duration::from_secs(3600)).await;
|
|
|
|
|
});
|
|
|
|
|
mgr.connections.insert(node_id, handle);
|
|
|
|
|
// The manager should never hold more than max_peers connections.
|
|
|
|
|
// In real operation, reconcile() enforces this; here we test the
|
|
|
|
|
// counting mechanism.
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
assert_eq!(mgr.active_count(), 6);
|
|
|
|
|
|
|
|
|
|
// Now run reconcile — since CRDT has no nodes, all 6 "unknown" peers
|
|
|
|
|
// should be dropped (they're not in the alive set).
|
|
|
|
|
mgr.reconcile();
|
|
|
|
|
assert_eq!(mgr.active_count(), 0);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// AC8 (connection lifecycle): When a peer's task finishes (simulating
|
|
|
|
|
/// disconnect), reconcile() should detect it and free the slot.
|
|
|
|
|
#[tokio::test]
|
|
|
|
|
async fn finished_tasks_are_cleaned_up() {
|
|
|
|
|
let mut mgr = MeshManager::new(
|
|
|
|
|
3,
|
|
|
|
|
"self-node".to_string(),
|
|
|
|
|
"ws://server:3001/crdt-sync".to_string(),
|
|
|
|
|
None,
|
|
|
|
|
);
|
|
|
|
|
|
|
|
|
|
// Insert a task that finishes immediately.
|
|
|
|
|
let handle = tokio::spawn(async {});
|
|
|
|
|
mgr.connections.insert("peer-a".to_string(), handle);
|
|
|
|
|
|
|
|
|
|
// Give the task a moment to complete.
|
|
|
|
|
tokio::time::sleep(std::time::Duration::from_millis(10)).await;
|
|
|
|
|
|
|
|
|
|
// reconcile() should detect the finished task and remove it.
|
|
|
|
|
mgr.reconcile();
|
|
|
|
|
assert_eq!(mgr.active_count(), 0);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// AC8: Aborted connections are also cleaned up.
|
|
|
|
|
#[tokio::test]
|
|
|
|
|
async fn aborted_connections_are_cleaned_up() {
|
|
|
|
|
let mut mgr = MeshManager::new(
|
|
|
|
|
3,
|
|
|
|
|
"self-node".to_string(),
|
|
|
|
|
"ws://server:3001/crdt-sync".to_string(),
|
|
|
|
|
None,
|
|
|
|
|
);
|
|
|
|
|
|
|
|
|
|
let handle = tokio::spawn(async {
|
|
|
|
|
tokio::time::sleep(std::time::Duration::from_secs(3600)).await;
|
|
|
|
|
});
|
|
|
|
|
handle.abort();
|
|
|
|
|
mgr.connections.insert("peer-a".to_string(), handle);
|
|
|
|
|
|
|
|
|
|
// Give abort a moment to take effect.
|
|
|
|
|
tokio::time::sleep(std::time::Duration::from_millis(10)).await;
|
|
|
|
|
|
|
|
|
|
mgr.reconcile();
|
|
|
|
|
assert_eq!(mgr.active_count(), 0);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// AC3: max_mesh_peers=0 means no connections are ever opened.
|
|
|
|
|
#[test]
|
|
|
|
|
fn zero_max_peers_stays_empty_after_reconcile() {
|
|
|
|
|
let mut mgr = MeshManager::new(
|
|
|
|
|
0,
|
|
|
|
|
"self-node".to_string(),
|
|
|
|
|
"ws://server:3001/crdt-sync".to_string(),
|
|
|
|
|
None,
|
|
|
|
|
);
|
|
|
|
|
let active = mgr.reconcile();
|
|
|
|
|
assert_eq!(active, 0);
|
|
|
|
|
}
|
|
|
|
|
}
|