huskies: merge 791
This commit is contained in:
@@ -0,0 +1,370 @@
|
||||
//! Chat-transport context construction, startup announcements, stage-transition
|
||||
//! notification listeners, and shutdown notifications.
|
||||
|
||||
use crate::chat;
|
||||
use crate::config;
|
||||
use crate::rebuild::{BotShutdownNotifier, ShutdownReason};
|
||||
use crate::service;
|
||||
use crate::services::Services;
|
||||
use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
use tokio::sync::broadcast;
|
||||
|
||||
/// All chat-transport contexts built at startup, plus the Matrix shutdown channel.
|
||||
pub(crate) struct BotContexts {
|
||||
pub(crate) whatsapp_ctx: Option<Arc<chat::transport::whatsapp::WhatsAppWebhookContext>>,
|
||||
pub(crate) slack_ctx: Option<Arc<chat::transport::slack::SlackWebhookContext>>,
|
||||
pub(crate) discord_ctx: Option<Arc<chat::transport::discord::DiscordContext>>,
|
||||
/// Best-effort shutdown notifier for Slack / Discord (fixed channel list).
|
||||
pub(crate) shutdown_notifier: Option<Arc<BotShutdownNotifier>>,
|
||||
/// Retained for shutdown-time WhatsApp notifications (reads ambient_rooms).
|
||||
pub(crate) whatsapp_ctx_for_shutdown:
|
||||
Option<Arc<chat::transport::whatsapp::WhatsAppWebhookContext>>,
|
||||
/// Sender used to signal the Matrix bot task at shutdown.
|
||||
pub(crate) matrix_shutdown_tx: Arc<tokio::sync::watch::Sender<Option<ShutdownReason>>>,
|
||||
}
|
||||
|
||||
/// Build WhatsApp, Slack, and Discord contexts from `bot.toml`, along with
|
||||
/// the shutdown notifier and Matrix watch channel.
|
||||
///
|
||||
/// Returns the `BotContexts` struct and the Matrix shutdown receiver separately
|
||||
/// so the receiver can be moved into `spawn_bot` without partially moving the struct.
|
||||
pub(crate) fn build_bot_contexts(
|
||||
startup_root: &Option<PathBuf>,
|
||||
services: &Arc<Services>,
|
||||
) -> (
|
||||
BotContexts,
|
||||
tokio::sync::watch::Receiver<Option<ShutdownReason>>,
|
||||
) {
|
||||
let whatsapp_ctx: Option<Arc<chat::transport::whatsapp::WhatsAppWebhookContext>> = startup_root
|
||||
.as_ref()
|
||||
.and_then(|root| chat::transport::matrix::BotConfig::load(root))
|
||||
.filter(|cfg| cfg.transport == "whatsapp")
|
||||
.map(|cfg| {
|
||||
let provider = cfg.whatsapp_provider.clone();
|
||||
let transport: Arc<dyn crate::chat::ChatTransport> = if provider == "twilio" {
|
||||
Arc::new(chat::transport::whatsapp::TwilioWhatsAppTransport::new(
|
||||
cfg.twilio_account_sid.clone().unwrap_or_default(),
|
||||
cfg.twilio_auth_token.clone().unwrap_or_default(),
|
||||
cfg.twilio_whatsapp_number.clone().unwrap_or_default(),
|
||||
))
|
||||
} else {
|
||||
let template_name = cfg
|
||||
.whatsapp_notification_template
|
||||
.clone()
|
||||
.unwrap_or_else(|| "pipeline_notification".to_string());
|
||||
Arc::new(chat::transport::whatsapp::WhatsAppTransport::new(
|
||||
cfg.whatsapp_phone_number_id.clone().unwrap_or_default(),
|
||||
cfg.whatsapp_access_token.clone().unwrap_or_default(),
|
||||
template_name,
|
||||
))
|
||||
};
|
||||
let root = startup_root.clone().unwrap();
|
||||
let history = chat::transport::whatsapp::load_whatsapp_history(&root);
|
||||
Arc::new(chat::transport::whatsapp::WhatsAppWebhookContext {
|
||||
services: Arc::clone(services),
|
||||
verify_token: cfg.whatsapp_verify_token.clone().unwrap_or_default(),
|
||||
provider,
|
||||
transport,
|
||||
history: std::sync::Arc::new(tokio::sync::Mutex::new(history)),
|
||||
history_size: cfg.history_size,
|
||||
window_tracker: Arc::new(chat::transport::whatsapp::MessagingWindowTracker::new()),
|
||||
allowed_phones: cfg.whatsapp_allowed_phones.clone(),
|
||||
})
|
||||
});
|
||||
|
||||
let slack_ctx: Option<Arc<chat::transport::slack::SlackWebhookContext>> = startup_root
|
||||
.as_ref()
|
||||
.and_then(|root| chat::transport::matrix::BotConfig::load(root))
|
||||
.filter(|cfg| cfg.transport == "slack")
|
||||
.map(|cfg| {
|
||||
let transport = Arc::new(chat::transport::slack::SlackTransport::new(
|
||||
cfg.slack_bot_token.clone().unwrap_or_default(),
|
||||
));
|
||||
let root = startup_root.clone().unwrap();
|
||||
let history = chat::transport::slack::load_slack_history(&root);
|
||||
let channel_ids: std::collections::HashSet<String> =
|
||||
cfg.slack_channel_ids.iter().cloned().collect();
|
||||
Arc::new(chat::transport::slack::SlackWebhookContext {
|
||||
services: Arc::clone(services),
|
||||
signing_secret: cfg.slack_signing_secret.clone().unwrap_or_default(),
|
||||
transport,
|
||||
history: std::sync::Arc::new(tokio::sync::Mutex::new(history)),
|
||||
history_size: cfg.history_size,
|
||||
channel_ids,
|
||||
})
|
||||
});
|
||||
|
||||
let discord_ctx: Option<Arc<chat::transport::discord::DiscordContext>> = startup_root
|
||||
.as_ref()
|
||||
.and_then(|root| chat::transport::matrix::BotConfig::load(root))
|
||||
.filter(|cfg| cfg.transport == "discord")
|
||||
.map(|cfg| {
|
||||
let transport = Arc::new(chat::transport::discord::DiscordTransport::new(
|
||||
cfg.discord_bot_token.clone().unwrap_or_default(),
|
||||
));
|
||||
let root = startup_root.clone().unwrap();
|
||||
let history = chat::transport::discord::load_discord_history(&root);
|
||||
let channel_ids: std::collections::HashSet<String> =
|
||||
cfg.discord_channel_ids.iter().cloned().collect();
|
||||
let allowed_users: std::collections::HashSet<String> =
|
||||
cfg.discord_allowed_users.iter().cloned().collect();
|
||||
Arc::new(chat::transport::discord::DiscordContext {
|
||||
services: Arc::clone(services),
|
||||
bot_token: cfg.discord_bot_token.clone().unwrap_or_default(),
|
||||
transport,
|
||||
history: std::sync::Arc::new(tokio::sync::Mutex::new(history)),
|
||||
history_size: cfg.history_size,
|
||||
channel_ids,
|
||||
allowed_users,
|
||||
})
|
||||
});
|
||||
|
||||
// Build a best-effort shutdown notifier.
|
||||
// Slack and Discord have fixed channel lists; WhatsApp rooms are tracked at
|
||||
// runtime via ambient_rooms and handled separately in `notify_shutdown`.
|
||||
let shutdown_notifier: Option<Arc<BotShutdownNotifier>> = if let Some(ref ctx) = slack_ctx {
|
||||
let channels: Vec<String> = ctx.channel_ids.iter().cloned().collect();
|
||||
Some(Arc::new(BotShutdownNotifier::new(
|
||||
Arc::clone(&ctx.transport) as Arc<dyn crate::chat::ChatTransport>,
|
||||
channels,
|
||||
ctx.services.bot_name.clone(),
|
||||
)))
|
||||
} else if let Some(ref ctx) = discord_ctx {
|
||||
let channels: Vec<String> = ctx.channel_ids.iter().cloned().collect();
|
||||
Some(Arc::new(BotShutdownNotifier::new(
|
||||
Arc::clone(&ctx.transport) as Arc<dyn crate::chat::ChatTransport>,
|
||||
channels,
|
||||
ctx.services.bot_name.clone(),
|
||||
)))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let whatsapp_ctx_for_shutdown = whatsapp_ctx.clone();
|
||||
let (matrix_shutdown_tx, matrix_shutdown_rx) =
|
||||
tokio::sync::watch::channel::<Option<ShutdownReason>>(None);
|
||||
let matrix_shutdown_tx = Arc::new(matrix_shutdown_tx);
|
||||
|
||||
(
|
||||
BotContexts {
|
||||
whatsapp_ctx,
|
||||
slack_ctx,
|
||||
discord_ctx,
|
||||
shutdown_notifier,
|
||||
whatsapp_ctx_for_shutdown,
|
||||
matrix_shutdown_tx,
|
||||
},
|
||||
matrix_shutdown_rx,
|
||||
)
|
||||
}
|
||||
|
||||
/// Send `"{bot_name} is online."` to all known WhatsApp, Slack, and Discord
|
||||
/// contacts so users know the bot is ready after a (re)start.
|
||||
pub(crate) fn spawn_startup_announcements(ctxs: &BotContexts) {
|
||||
use chat::transport::whatsapp::WhatsAppConversationHistory;
|
||||
|
||||
if let Some(ref ctx) = ctxs.whatsapp_ctx {
|
||||
let transport = Arc::clone(&ctx.transport);
|
||||
let bot_name = ctx.services.bot_name.clone();
|
||||
let history: WhatsAppConversationHistory = Arc::clone(&ctx.history);
|
||||
tokio::spawn(async move {
|
||||
let senders: Vec<String> = history.lock().await.keys().cloned().collect();
|
||||
if senders.is_empty() {
|
||||
return;
|
||||
}
|
||||
let notifier = BotShutdownNotifier::new(transport, senders, bot_name);
|
||||
notifier.notify_startup().await;
|
||||
});
|
||||
}
|
||||
|
||||
if let Some(ref ctx) = ctxs.slack_ctx {
|
||||
let transport = Arc::clone(&ctx.transport) as Arc<dyn crate::chat::ChatTransport>;
|
||||
let bot_name = ctx.services.bot_name.clone();
|
||||
let channels: Vec<String> = ctx.channel_ids.iter().cloned().collect();
|
||||
tokio::spawn(async move {
|
||||
if channels.is_empty() {
|
||||
return;
|
||||
}
|
||||
let notifier = BotShutdownNotifier::new(transport, channels, bot_name);
|
||||
notifier.notify_startup().await;
|
||||
});
|
||||
}
|
||||
|
||||
if let Some(ref ctx) = ctxs.discord_ctx {
|
||||
let transport = Arc::clone(&ctx.transport) as Arc<dyn crate::chat::ChatTransport>;
|
||||
let bot_name = ctx.services.bot_name.clone();
|
||||
let channels: Vec<String> = ctx.channel_ids.iter().cloned().collect();
|
||||
tokio::spawn(async move {
|
||||
if channels.is_empty() {
|
||||
return;
|
||||
}
|
||||
let notifier = BotShutdownNotifier::new(transport, channels, bot_name);
|
||||
notifier.notify_startup().await;
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
/// Spawn stage-transition notification listeners and status-event consumers for
|
||||
/// all configured chat transports (WhatsApp, Slack, Discord).
|
||||
///
|
||||
/// Watcher receivers that have no matching transport are dropped immediately.
|
||||
pub(crate) fn spawn_notification_listeners(
|
||||
ctxs: &BotContexts,
|
||||
startup_root: &Option<PathBuf>,
|
||||
watcher_rx_for_whatsapp: broadcast::Receiver<crate::io::watcher::WatcherEvent>,
|
||||
watcher_rx_for_slack: broadcast::Receiver<crate::io::watcher::WatcherEvent>,
|
||||
watcher_rx_for_discord: broadcast::Receiver<crate::io::watcher::WatcherEvent>,
|
||||
) {
|
||||
if let (Some(ctx), Some(root)) = (&ctxs.whatsapp_ctx, startup_root) {
|
||||
let ambient_rooms = Arc::clone(&ctx.services.ambient_rooms);
|
||||
service::notifications::spawn_notification_listener(
|
||||
Arc::clone(&ctx.transport),
|
||||
move || ambient_rooms.lock().unwrap().iter().cloned().collect(),
|
||||
watcher_rx_for_whatsapp,
|
||||
root.clone(),
|
||||
);
|
||||
|
||||
{
|
||||
use crate::service::status::format::format_status_event;
|
||||
|
||||
let status_enabled = config::ProjectConfig::load(root)
|
||||
.map(|c| c.whatsapp_status_consumer)
|
||||
.unwrap_or(true);
|
||||
|
||||
if status_enabled {
|
||||
let mut sub = ctx.services.status.subscribe();
|
||||
let transport = Arc::clone(&ctx.transport) as Arc<dyn crate::chat::ChatTransport>;
|
||||
let ambient_rooms = Arc::clone(&ctx.services.ambient_rooms);
|
||||
tokio::spawn(async move {
|
||||
while let Some(event) = sub.recv().await {
|
||||
let plain = format_status_event(&event);
|
||||
let rooms: Vec<String> =
|
||||
ambient_rooms.lock().unwrap().iter().cloned().collect();
|
||||
for room in &rooms {
|
||||
if let Err(e) = transport.send_message(room, &plain, "").await {
|
||||
crate::slog!(
|
||||
"[whatsapp] Failed to send status event to {room}: {e}"
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
crate::slog!("[whatsapp] Status subscriber task exiting — broadcaster dropped");
|
||||
});
|
||||
}
|
||||
}
|
||||
} else {
|
||||
drop(watcher_rx_for_whatsapp);
|
||||
}
|
||||
|
||||
if let (Some(ctx), Some(root)) = (&ctxs.slack_ctx, startup_root) {
|
||||
let channel_ids: Vec<String> = ctx.channel_ids.iter().cloned().collect();
|
||||
service::notifications::spawn_notification_listener(
|
||||
Arc::clone(&ctx.transport) as Arc<dyn crate::chat::ChatTransport>,
|
||||
move || channel_ids.clone(),
|
||||
watcher_rx_for_slack,
|
||||
root.clone(),
|
||||
);
|
||||
|
||||
{
|
||||
use crate::service::status::format::format_status_event;
|
||||
|
||||
let status_enabled = config::ProjectConfig::load(root)
|
||||
.map(|c| c.slack_status_consumer)
|
||||
.unwrap_or(true);
|
||||
|
||||
if status_enabled {
|
||||
let mut sub = ctx.services.status.subscribe();
|
||||
let transport = Arc::clone(&ctx.transport) as Arc<dyn crate::chat::ChatTransport>;
|
||||
let channels: Vec<String> = ctx.channel_ids.iter().cloned().collect();
|
||||
tokio::spawn(async move {
|
||||
while let Some(event) = sub.recv().await {
|
||||
let plain = format_status_event(&event);
|
||||
for channel in &channels {
|
||||
if let Err(e) = transport.send_message(channel, &plain, "").await {
|
||||
crate::slog!(
|
||||
"[slack] Failed to send status event to {channel}: {e}"
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
crate::slog!("[slack] Status subscriber task exiting — broadcaster dropped");
|
||||
});
|
||||
}
|
||||
}
|
||||
} else {
|
||||
drop(watcher_rx_for_slack);
|
||||
}
|
||||
|
||||
if let (Some(ctx), Some(root)) = (&ctxs.discord_ctx, startup_root) {
|
||||
chat::transport::discord::gateway::spawn_gateway(Arc::clone(ctx));
|
||||
|
||||
let channel_ids: Vec<String> = ctx.channel_ids.iter().cloned().collect();
|
||||
service::notifications::spawn_notification_listener(
|
||||
Arc::clone(&ctx.transport) as Arc<dyn crate::chat::ChatTransport>,
|
||||
move || channel_ids.clone(),
|
||||
watcher_rx_for_discord,
|
||||
root.clone(),
|
||||
);
|
||||
|
||||
{
|
||||
use crate::service::status::format::format_status_event;
|
||||
|
||||
let status_enabled = config::ProjectConfig::load(root)
|
||||
.map(|c| c.discord_status_consumer)
|
||||
.unwrap_or(true);
|
||||
|
||||
if status_enabled {
|
||||
let mut sub = ctx.services.status.subscribe();
|
||||
let transport = Arc::clone(&ctx.transport) as Arc<dyn crate::chat::ChatTransport>;
|
||||
let channels: Vec<String> = ctx.channel_ids.iter().cloned().collect();
|
||||
tokio::spawn(async move {
|
||||
while let Some(event) = sub.recv().await {
|
||||
let plain = format_status_event(&event);
|
||||
for channel in &channels {
|
||||
if let Err(e) = transport.send_message(channel, &plain, "").await {
|
||||
crate::slog!(
|
||||
"[discord] Failed to send status event to {channel}: {e}"
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
crate::slog!("[discord] Status subscriber task exiting — broadcaster dropped");
|
||||
});
|
||||
}
|
||||
}
|
||||
} else {
|
||||
drop(watcher_rx_for_discord);
|
||||
}
|
||||
}
|
||||
|
||||
/// Send shutdown notifications to all active bot channels (best-effort).
|
||||
///
|
||||
/// Called after the HTTP server stops accepting connections so that
|
||||
/// network I/O can still complete before PTY children are killed.
|
||||
pub(crate) async fn notify_shutdown(ctxs: &BotContexts) {
|
||||
// Slack / Discord: notifier holds the fixed channel list.
|
||||
if let Some(ref notifier) = ctxs.shutdown_notifier {
|
||||
notifier.notify(ShutdownReason::Manual).await;
|
||||
}
|
||||
|
||||
// WhatsApp: read the current set of ambient rooms and notify each sender.
|
||||
if let Some(ref ctx) = ctxs.whatsapp_ctx_for_shutdown {
|
||||
let rooms: Vec<String> = ctx
|
||||
.services
|
||||
.ambient_rooms
|
||||
.lock()
|
||||
.unwrap()
|
||||
.iter()
|
||||
.cloned()
|
||||
.collect();
|
||||
if !rooms.is_empty() {
|
||||
let wa_notifier = BotShutdownNotifier::new(
|
||||
Arc::clone(&ctx.transport) as Arc<dyn crate::chat::ChatTransport>,
|
||||
rooms,
|
||||
ctx.services.bot_name.clone(),
|
||||
);
|
||||
wa_notifier.notify(ShutdownReason::Manual).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,5 @@
|
||||
//! Server startup helpers: project initialisation, background tasks, and bot setup.
|
||||
|
||||
pub(crate) mod bots;
|
||||
pub(crate) mod project;
|
||||
pub(crate) mod tick_loop;
|
||||
@@ -0,0 +1,213 @@
|
||||
//! Project-root discovery, subsystem initialisation (log, identity, DB, CRDT),
|
||||
//! and CRDT-sync configuration.
|
||||
|
||||
use crate::config;
|
||||
use crate::crdt_state;
|
||||
use crate::crdt_sync;
|
||||
use crate::db;
|
||||
use crate::io::fs::find_story_kit_root;
|
||||
use crate::log_buffer;
|
||||
use crate::node_identity;
|
||||
use crate::state::SessionState;
|
||||
use crate::store::JsonFileStore;
|
||||
use crate::worktree;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::sync::Arc;
|
||||
|
||||
/// Open (or scaffold) the project root according to the CLI flags and CWD.
|
||||
///
|
||||
/// Handles `--init`, an explicit path argument, and the default auto-detect
|
||||
/// behaviour. Modifies `app_state.project_root` as a side effect.
|
||||
pub(crate) async fn open_project_root(
|
||||
is_init: bool,
|
||||
explicit_path: Option<PathBuf>,
|
||||
cwd: &Path,
|
||||
app_state: &Arc<SessionState>,
|
||||
store: &Arc<JsonFileStore>,
|
||||
port: u16,
|
||||
) {
|
||||
if is_init {
|
||||
let init_root = explicit_path.unwrap_or_else(|| cwd.to_path_buf());
|
||||
if !init_root.exists() {
|
||||
std::fs::create_dir_all(&init_root).unwrap_or_else(|e| {
|
||||
eprintln!(
|
||||
"error: cannot create directory {}: {e}",
|
||||
init_root.display()
|
||||
);
|
||||
std::process::exit(1);
|
||||
});
|
||||
}
|
||||
match crate::io::fs::open_project(
|
||||
init_root.to_string_lossy().to_string(),
|
||||
app_state,
|
||||
store.as_ref(),
|
||||
port,
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok(_) => {
|
||||
if let Some(root) = app_state.project_root.lock().unwrap().as_ref() {
|
||||
config::ProjectConfig::load(root)
|
||||
.unwrap_or_else(|e| panic!("Invalid project.toml: {e}"));
|
||||
crate::io::wizard::WizardState::init_if_missing(root);
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
eprintln!("error: {e}");
|
||||
std::process::exit(1);
|
||||
}
|
||||
}
|
||||
} else if let Some(explicit_root) = explicit_path {
|
||||
match crate::io::fs::open_project(
|
||||
explicit_root.to_string_lossy().to_string(),
|
||||
app_state,
|
||||
store.as_ref(),
|
||||
port,
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok(_) => {
|
||||
if let Some(root) = app_state.project_root.lock().unwrap().as_ref() {
|
||||
config::ProjectConfig::load(root)
|
||||
.unwrap_or_else(|e| panic!("Invalid project.toml: {e}"));
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
eprintln!("error: {e}");
|
||||
std::process::exit(1);
|
||||
}
|
||||
}
|
||||
} else if let Some(project_root) = find_story_kit_root(cwd) {
|
||||
crate::io::fs::open_project(
|
||||
project_root.to_string_lossy().to_string(),
|
||||
app_state,
|
||||
store.as_ref(),
|
||||
port,
|
||||
)
|
||||
.await
|
||||
.unwrap_or_else(|e| {
|
||||
crate::slog!("Warning: failed to auto-open project at {project_root:?}: {e}");
|
||||
project_root.to_string_lossy().to_string()
|
||||
});
|
||||
|
||||
config::ProjectConfig::load(&project_root)
|
||||
.unwrap_or_else(|e| panic!("Invalid project.toml: {e}"));
|
||||
} else {
|
||||
crate::io::fs::open_project(
|
||||
cwd.to_string_lossy().to_string(),
|
||||
app_state,
|
||||
store.as_ref(),
|
||||
port,
|
||||
)
|
||||
.await
|
||||
.unwrap_or_else(|e| {
|
||||
crate::slog!("Warning: failed to scaffold project at {cwd:?}: {e}");
|
||||
cwd.to_string_lossy().to_string()
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
/// Set up the server log file, node identity keypair, pipeline DB, and CRDT state.
|
||||
pub(crate) async fn init_subsystems(app_state: &Arc<SessionState>, cwd: &Path) {
|
||||
// Enable persistent server log file now that the project root is known.
|
||||
if let Some(ref root) = *app_state.project_root.lock().unwrap() {
|
||||
let log_dir = root.join(".huskies").join("logs");
|
||||
let _ = std::fs::create_dir_all(&log_dir);
|
||||
log_buffer::global().set_log_file(log_dir.join("server.log"));
|
||||
}
|
||||
|
||||
// Initialise the node's Ed25519 identity keypair (file-based, mode 0600).
|
||||
// The key is stored at .huskies/node_identity.key and persisted across restarts.
|
||||
{
|
||||
let key_path = app_state
|
||||
.project_root
|
||||
.lock()
|
||||
.unwrap()
|
||||
.as_ref()
|
||||
.map(|root| root.join(".huskies").join("node_identity.key"))
|
||||
.unwrap_or_else(|| cwd.join(".huskies").join("node_identity.key"));
|
||||
if let Err(e) = node_identity::init_identity(&key_path) {
|
||||
crate::slog!("[identity] Failed to initialise node identity keypair: {e}");
|
||||
} else if let Some(id) = node_identity::get_identity() {
|
||||
crate::slog!("[identity] Node ID: {}", id.node_id);
|
||||
}
|
||||
}
|
||||
|
||||
// Initialise the SQLite pipeline shadow-write database and CRDT state layer.
|
||||
// Clone the path out before the await so we don't hold the MutexGuard across
|
||||
// an await point.
|
||||
let pipeline_db_path = app_state
|
||||
.project_root
|
||||
.lock()
|
||||
.unwrap()
|
||||
.as_ref()
|
||||
.map(|root| root.join(".huskies").join("pipeline.db"));
|
||||
|
||||
if let Some(ref db_path) = pipeline_db_path {
|
||||
if let Err(e) = db::init(db_path).await {
|
||||
crate::slog!("[db] Failed to initialise pipeline.db: {e}");
|
||||
}
|
||||
if let Err(e) = crdt_state::init(db_path).await {
|
||||
crate::slog!("[crdt] Failed to initialise CRDT state layer: {e}");
|
||||
} else {
|
||||
crdt_state::migrate_names_from_slugs();
|
||||
let id_migrations = crdt_state::migrate_story_ids_to_numeric();
|
||||
if !id_migrations.is_empty()
|
||||
&& let Some(project_root) = db_path.parent().and_then(|p| p.parent())
|
||||
{
|
||||
worktree::migrate_slug_paths(project_root, &id_migrations);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Wire up CRDT sync: trusted keys, token auth, and the rendezvous client.
|
||||
///
|
||||
/// In agent mode the rendezvous URL comes from the CLI; otherwise it is read
|
||||
/// from `project.toml`.
|
||||
pub(crate) fn configure_crdt_sync(
|
||||
app_state: &Arc<SessionState>,
|
||||
is_agent: bool,
|
||||
agent_rendezvous: Option<String>,
|
||||
crdt_join_token: Option<String>,
|
||||
) {
|
||||
let sync_config = if is_agent {
|
||||
agent_rendezvous
|
||||
.clone()
|
||||
.map(|url| (url, Vec::new(), false, Vec::new()))
|
||||
} else {
|
||||
app_state
|
||||
.project_root
|
||||
.lock()
|
||||
.unwrap()
|
||||
.as_ref()
|
||||
.and_then(|root| config::ProjectConfig::load(root).ok())
|
||||
.and_then(|cfg| {
|
||||
cfg.rendezvous.map(|url| {
|
||||
(
|
||||
url,
|
||||
cfg.trusted_keys,
|
||||
cfg.crdt_require_token,
|
||||
cfg.crdt_tokens,
|
||||
)
|
||||
})
|
||||
})
|
||||
};
|
||||
|
||||
if let Some((rendezvous_url, trusted_keys, require_token, crdt_tokens)) = sync_config {
|
||||
crdt_sync::init_trusted_keys(trusted_keys);
|
||||
crdt_sync::init_token_auth(require_token, crdt_tokens);
|
||||
crdt_sync::spawn_rendezvous_client(rendezvous_url, crdt_join_token);
|
||||
} else {
|
||||
let (keys, require_token, crdt_tokens) = app_state
|
||||
.project_root
|
||||
.lock()
|
||||
.unwrap()
|
||||
.as_ref()
|
||||
.and_then(|root| config::ProjectConfig::load(root).ok())
|
||||
.map(|cfg| (cfg.trusted_keys, cfg.crdt_require_token, cfg.crdt_tokens))
|
||||
.unwrap_or_default();
|
||||
crdt_sync::init_trusted_keys(keys);
|
||||
crdt_sync::init_token_auth(require_token, crdt_tokens);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,187 @@
|
||||
//! Background tasks: CRDT-event bridge, auto-assign subscriber, unified tick
|
||||
//! loop, gateway relay, and startup reconciliation.
|
||||
|
||||
use crate::agents::{AgentPool, ReconciliationEvent};
|
||||
use crate::config;
|
||||
use crate::gateway_relay;
|
||||
use crate::io;
|
||||
use crate::pipeline_state;
|
||||
use crate::service;
|
||||
use crate::service::status::StatusBroadcaster;
|
||||
use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
use tokio::sync::broadcast;
|
||||
|
||||
/// Bridge CRDT state-transition events to the watcher broadcast channel and
|
||||
/// spawn the auto-assign subscriber that triggers on active-stage transitions.
|
||||
pub(crate) fn spawn_event_bridges(
|
||||
watcher_tx: broadcast::Sender<io::watcher::WatcherEvent>,
|
||||
project_root: Option<PathBuf>,
|
||||
agents: Arc<AgentPool>,
|
||||
) {
|
||||
// CRDT → watcher bridge: translate CRDT stage-transition events into
|
||||
// WatcherEvent::WorkItem so downstream consumers (WebSocket, auto-assign)
|
||||
// see a uniform stream regardless of whether the event originated from the
|
||||
// filesystem watcher or from a CRDT sync peer.
|
||||
{
|
||||
let crdt_watcher_tx = watcher_tx.clone();
|
||||
let crdt_prune_root = project_root.clone();
|
||||
if let Some(mut crdt_rx) = crate::crdt_state::subscribe() {
|
||||
tokio::spawn(async move {
|
||||
while let Ok(evt) = crdt_rx.recv().await {
|
||||
if crate::pipeline_state::Stage::from_dir(&evt.to_stage)
|
||||
.is_some_and(|s| matches!(s, crate::pipeline_state::Stage::Archived { .. }))
|
||||
&& let Some(root) = crdt_prune_root.as_ref().cloned()
|
||||
{
|
||||
let story_id = evt.story_id.clone();
|
||||
tokio::task::spawn_blocking(move || {
|
||||
if let Err(e) = crate::worktree::prune_worktree_sync(&root, &story_id) {
|
||||
crate::slog!("[crdt] worktree prune failed for {story_id}: {e}");
|
||||
}
|
||||
});
|
||||
}
|
||||
let (action, commit_msg) =
|
||||
io::watcher::stage_metadata(&evt.to_stage, &evt.story_id)
|
||||
.unwrap_or(("update", format!("huskies: update {}", evt.story_id)));
|
||||
let watcher_evt = io::watcher::WatcherEvent::WorkItem {
|
||||
stage: evt.to_stage,
|
||||
item_id: evt.story_id,
|
||||
action: action.to_string(),
|
||||
commit_msg,
|
||||
from_stage: evt.from_stage,
|
||||
};
|
||||
let _ = crdt_watcher_tx.send(watcher_evt);
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// Auto-assign: trigger `auto_assign_available_work` whenever a work item
|
||||
// enters an active pipeline stage (2_current/, 3_qa/, 4_merge/).
|
||||
if let Some(root) = project_root {
|
||||
let watcher_auto_rx = watcher_tx.subscribe();
|
||||
let watcher_auto_agents = Arc::clone(&agents);
|
||||
tokio::spawn(async move {
|
||||
let mut rx = watcher_auto_rx;
|
||||
while let Ok(event) = rx.recv().await {
|
||||
if let io::watcher::WatcherEvent::WorkItem { ref stage, .. } = event
|
||||
&& pipeline_state::Stage::from_dir(stage.as_str())
|
||||
.is_some_and(|s| s.is_active())
|
||||
{
|
||||
crate::slog!(
|
||||
"[auto-assign] CRDT transition detected in {stage}/; \
|
||||
triggering auto-assign."
|
||||
);
|
||||
watcher_auto_agents.auto_assign_available_work(&root).await;
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
/// Spawn the unified 1-second background tick loop.
|
||||
///
|
||||
/// Fires due timers, runs the agent watchdog every 30 ticks, and promotes
|
||||
/// done→archived items every `sweep_interval_secs` ticks.
|
||||
pub(crate) fn spawn_tick_loop(
|
||||
agents: Arc<AgentPool>,
|
||||
timer_store: Arc<service::timer::TimerStore>,
|
||||
root: Option<PathBuf>,
|
||||
) {
|
||||
let sweep_cfg = root
|
||||
.as_ref()
|
||||
.and_then(|r| config::ProjectConfig::load(r).ok())
|
||||
.map(|c| c.watcher)
|
||||
.unwrap_or_default();
|
||||
let sweep_every = sweep_cfg.sweep_interval_secs.max(1);
|
||||
let done_retention = std::time::Duration::from_secs(sweep_cfg.done_retention_secs);
|
||||
let pending_count = timer_store.list().len();
|
||||
crate::slog!("[tick] Unified tick loop started; {pending_count} pending timer(s)");
|
||||
|
||||
tokio::spawn(async move {
|
||||
let mut interval = tokio::time::interval(std::time::Duration::from_secs(1));
|
||||
let mut tick_count: u64 = 0;
|
||||
loop {
|
||||
interval.tick().await;
|
||||
tick_count = tick_count.wrapping_add(1);
|
||||
|
||||
// Timer: fire due timers every second.
|
||||
if let Some(ref r) = root {
|
||||
let result = service::timer::tick_once(&timer_store, &agents, r).await;
|
||||
if let Err(msg) = result {
|
||||
crate::slog_error!("[tick] Timer tick panicked: {msg}");
|
||||
}
|
||||
}
|
||||
|
||||
// Watchdog: detect orphaned Running agents every 30 ticks.
|
||||
if tick_count.is_multiple_of(30) {
|
||||
let found = agents.run_watchdog_pass(root.as_deref());
|
||||
if found > 0 {
|
||||
crate::slog!(
|
||||
"[tick] {found} orphaned agent(s) detected; triggering auto-assign."
|
||||
);
|
||||
if let Some(ref r) = root {
|
||||
agents.auto_assign_available_work(r).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Sweep: promote done→archived every sweep_interval_secs ticks.
|
||||
if tick_count.is_multiple_of(sweep_every) {
|
||||
io::watcher::sweep_done_to_archived(done_retention);
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
/// Spawn the gateway relay task if `gateway_url` is configured in
|
||||
/// `project.toml` or the `HUSKIES_GATEWAY_URL` environment variable.
|
||||
pub(crate) fn spawn_gateway_relay(startup_root: &Option<PathBuf>, status: Arc<StatusBroadcaster>) {
|
||||
let relay_gateway_url = startup_root
|
||||
.as_ref()
|
||||
.and_then(|r| config::ProjectConfig::load(r).ok())
|
||||
.and_then(|c| c.gateway_url)
|
||||
.or_else(|| std::env::var("HUSKIES_GATEWAY_URL").ok())
|
||||
.unwrap_or_default();
|
||||
|
||||
if !relay_gateway_url.is_empty() {
|
||||
let relay_project_name = startup_root
|
||||
.as_ref()
|
||||
.and_then(|r| config::ProjectConfig::load(r).ok())
|
||||
.and_then(|c| c.gateway_project)
|
||||
.or_else(|| std::env::var("HUSKIES_GATEWAY_PROJECT").ok())
|
||||
.or_else(|| {
|
||||
startup_root
|
||||
.as_ref()
|
||||
.and_then(|r| r.file_name())
|
||||
.map(|n| n.to_string_lossy().into_owned())
|
||||
})
|
||||
.unwrap_or_else(|| "project".to_string());
|
||||
|
||||
gateway_relay::spawn_relay_task(
|
||||
relay_gateway_url,
|
||||
relay_project_name,
|
||||
status,
|
||||
reqwest::Client::new(),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
/// Spawn the startup reconciliation task: reconcile any stories whose agent
|
||||
/// work was committed while the server was offline, then auto-assign free agents.
|
||||
pub(crate) fn spawn_startup_reconciliation(
|
||||
startup_root: Option<PathBuf>,
|
||||
startup_agents: Arc<AgentPool>,
|
||||
startup_reconciliation_tx: broadcast::Sender<ReconciliationEvent>,
|
||||
) {
|
||||
if let Some(root) = startup_root {
|
||||
tokio::spawn(async move {
|
||||
crate::slog!("[startup] Reconciling completed worktrees from previous session.");
|
||||
startup_agents
|
||||
.reconcile_on_startup(&root, &startup_reconciliation_tx)
|
||||
.await;
|
||||
crate::slog!("[auto-assign] Scanning pipeline stages for unassigned work.");
|
||||
startup_agents.auto_assign_available_work(&root).await;
|
||||
});
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user