Files
huskies/server/src/pipeline_state/events.rs
T

240 lines
8.0 KiB
Rust
Raw Normal View History

//! Event bus for pipeline state transitions.
use chrono::{DateTime, Utc};
2026-05-13 19:29:08 +00:00
use std::sync::OnceLock;
use tokio::sync::broadcast;
2026-05-13 19:29:08 +00:00
use super::{PipelineEvent, Stage, StoryId};
// ── Static transition broadcast channel ─────────────────────────────────────
static TRANSITION_TX: OnceLock<broadcast::Sender<TransitionFired>> = OnceLock::new();
fn get_or_init_tx() -> &'static broadcast::Sender<TransitionFired> {
TRANSITION_TX.get_or_init(|| {
let (tx, _) = broadcast::channel(256);
tx
})
}
/// Subscribe to all pipeline stage transitions.
///
/// Every call to [`apply_transition`][super::apply_transition] broadcasts the
/// resulting [`TransitionFired`] on this channel. Returns a new receiver that
/// replays events from the moment of subscription. Lagged receivers silently
/// skip missed events — callers should handle
/// [`broadcast::error::RecvError::Lagged`].
pub fn subscribe_transitions() -> broadcast::Receiver<TransitionFired> {
get_or_init_tx().subscribe()
}
/// Broadcast `fired` to all active transition subscribers.
///
/// Called from [`apply_transition`][super::apply] after writing the new stage
/// to the CRDT. No-ops safely when there are no subscribers.
pub(super) fn try_broadcast(fired: &TransitionFired) {
let _ = get_or_init_tx().send(fired.clone());
}
2026-05-13 23:46:30 +00:00
/// Replay the current CRDT pipeline state as a burst of synthetic
/// [`TransitionFired`] events at server startup.
///
/// Reads every item from the CRDT and broadcasts a self-transition
/// (`before == after`) for each one so that all existing subscribers
/// (worktree lifecycle, merge-failure auto-spawn, auto-assign) react
/// identically to a live event. This replaces the legacy scan-based
/// `reconcile_on_startup` path.
///
/// Idempotent: a second call produces another burst of events, but every
/// subscriber already guards against duplicate work (e.g.
/// `is_story_assigned_for_stage` returns true once an agent is running,
/// and worktree creation is a no-op when the worktree already exists).
pub fn replay_current_pipeline_state() {
for item in super::read_all_typed() {
let fired = TransitionFired {
story_id: item.story_id.clone(),
before: item.stage.clone(),
after: item.stage,
event: super::PipelineEvent::DepsMet,
at: chrono::Utc::now(),
};
try_broadcast(&fired);
}
}
/// Fired when a pipeline stage transition completes.
#[derive(Debug, Clone)]
pub struct TransitionFired {
pub story_id: StoryId,
pub before: Stage,
pub after: Stage,
pub event: PipelineEvent,
pub at: DateTime<Utc>,
}
/// Trait for side-effect handlers that react to pipeline transitions.
pub trait TransitionSubscriber: Send + Sync {
fn name(&self) -> &'static str;
fn on_transition(&self, fired: &TransitionFired);
}
2026-04-29 10:41:32 +00:00
/// Collects [`TransitionSubscriber`]s and dispatches [`TransitionFired`] events to each.
pub struct EventBus {
subscribers: Vec<Box<dyn TransitionSubscriber>>,
}
impl EventBus {
2026-04-29 10:41:32 +00:00
/// Create an empty event bus with no subscribers.
pub fn new() -> Self {
Self {
subscribers: Vec::new(),
}
}
2026-04-29 10:41:32 +00:00
/// Register a subscriber to receive all future transition events.
pub fn subscribe<S: TransitionSubscriber + 'static>(&mut self, subscriber: S) {
self.subscribers.push(Box::new(subscriber));
}
2026-04-29 10:41:32 +00:00
/// Fire a transition event, calling every registered subscriber in order.
pub fn fire(&self, event: TransitionFired) {
for sub in &self.subscribers {
sub.on_transition(&event);
}
}
}
impl Default for EventBus {
fn default() -> Self {
Self::new()
}
}
#[cfg(test)]
mod tests {
2026-05-14 08:07:43 +00:00
use super::super::{BranchName, PlanState};
use super::*;
use std::num::NonZeroU32;
fn nz(n: u32) -> NonZeroU32 {
NonZeroU32::new(n).unwrap()
}
fn fb(name: &str) -> BranchName {
BranchName(name.to_string())
}
fn sid(s: &str) -> StoryId {
StoryId(s.to_string())
}
#[test]
fn event_bus_fires_to_all_subscribers() {
use std::sync::Arc;
use std::sync::atomic::{AtomicU32, Ordering};
struct CountingSub(Arc<AtomicU32>);
impl TransitionSubscriber for CountingSub {
fn name(&self) -> &'static str {
"counter"
}
fn on_transition(&self, _: &TransitionFired) {
self.0.fetch_add(1, Ordering::SeqCst);
}
}
let counter = Arc::new(AtomicU32::new(0));
let mut bus = EventBus::new();
bus.subscribe(CountingSub(counter.clone()));
bus.subscribe(CountingSub(counter.clone()));
bus.fire(TransitionFired {
story_id: StoryId("test".into()),
before: Stage::Backlog,
2026-05-14 08:07:43 +00:00
after: Stage::Coding {
claim: None,
plan: PlanState::Missing,
2026-05-14 11:01:06 +00:00
retries: 0,
2026-05-14 08:07:43 +00:00
},
event: PipelineEvent::DepsMet,
at: Utc::now(),
});
assert_eq!(counter.load(Ordering::SeqCst), 2);
}
// ── Bug 502 regression: agent field is not part of Stage ────────────
#[test]
fn bug_502_agent_not_in_stage() {
// Bug 502 was caused by a coder agent being assigned to a story in
// Merge stage. In the typed system, Stage has no `agent` field at all.
// Agent assignment is per-node ExecutionState. This test documents that
// the old failure mode is structurally impossible.
let merge = Stage::Merge {
feature_branch: BranchName("feature/story-1".into()),
commits_ahead: NonZeroU32::new(3).unwrap(),
2026-05-13 22:50:13 +00:00
claim: None,
2026-05-14 11:01:06 +00:00
retries: 0,
};
// Stage::Merge has exactly two fields: feature_branch and commits_ahead.
// There is no way to attach an agent name to it. The type system
// prevents bug 502 by construction.
assert!(matches!(merge, Stage::Merge { .. }));
}
// ── TransitionError Display ─────────────────────────────────────────
2026-05-13 23:46:30 +00:00
// ── replay_current_pipeline_state ──────────────────────────────────
/// AC1: replay broadcasts a synthetic event for every item in the CRDT.
#[test]
fn replay_broadcasts_event_for_crdt_item_in_coding_stage() {
crate::crdt_state::init_for_test();
crate::db::ensure_content_store();
let story_id = "9901_replay_coding";
crate::db::write_item_with_content(
story_id,
"2_current",
"---\nname: Replay Coding\n---\n",
crate::db::ItemMeta::named("Replay Coding"),
);
let mut rx = subscribe_transitions();
replay_current_pipeline_state();
let mut found = false;
while let Ok(fired) = rx.try_recv() {
if fired.story_id.0 == story_id && matches!(fired.after, Stage::Coding { .. }) {
found = true;
}
}
assert!(
found,
"replay must broadcast a Coding event for a story in 2_current"
);
}
/// AC3: calling replay_current_pipeline_state twice fires events both times.
///
/// Pool-state idempotency (no duplicate agents) is enforced by subscribers,
/// not by the replay function itself. This test verifies that replay is safe
/// to call multiple times without panicking.
#[test]
fn replay_twice_does_not_panic() {
crate::crdt_state::init_for_test();
crate::db::ensure_content_store();
let story_id = "9902_replay_idem";
crate::db::write_item_with_content(
story_id,
"3_qa",
"---\nname: Replay QA\n---\n",
crate::db::ItemMeta::named("Replay QA"),
);
// Two successive replays must not panic.
replay_current_pipeline_state();
replay_current_pipeline_state();
}
}