Files
storkit/server/src/io/watcher.rs

1239 lines
49 KiB
Rust
Raw Normal View History

//! Filesystem watcher for `.story_kit/work/` and `.story_kit/project.toml`.
//!
//! Watches the work pipeline directories for file changes, infers the lifecycle
//! stage from the target directory name, auto-commits with a deterministic message,
//! and broadcasts a [`WatcherEvent`] to all connected WebSocket clients.
//!
//! Also watches `.story_kit/project.toml` for modifications and broadcasts
//! [`WatcherEvent::ConfigChanged`] so the frontend can reload the agent roster
//! without a server restart.
//!
//! # Debouncing
//! Events are buffered for 300 ms after the last activity. All changes within the
//! window are batched into a single `git add + commit`. This avoids double-commits
//! when `fs::rename` fires both a remove and a create event.
//!
//! # Race conditions
//! If a mutation handler (e.g. `move_story_to_current`) already committed the
//! change, `git commit` will return "nothing to commit". The watcher detects this
//! via exit-code inspection and silently skips the commit while still broadcasting
//! the event so connected clients stay in sync.
use crate::config::{ProjectConfig, WatcherConfig};
use crate::io::story_metadata::clear_front_matter_field;
use crate::slog;
use notify::{EventKind, RecommendedWatcher, RecursiveMode, Watcher, recommended_watcher};
use serde::Serialize;
use std::collections::HashMap;
use std::path::{Path, PathBuf};
use std::sync::mpsc;
use std::time::{Duration, Instant, SystemTime};
use tokio::sync::broadcast;
/// A lifecycle event emitted by the filesystem watcher.
#[derive(Clone, Debug, Serialize)]
#[serde(tag = "type", rename_all = "snake_case")]
pub enum WatcherEvent {
/// A work-pipeline file was created, modified, or deleted.
WorkItem {
/// Pipeline stage directory (e.g. `"2_current"`, `"5_archived"`).
stage: String,
/// Work item ID (filename stem without extension, e.g. `"42_story_my_feature"`).
item_id: String,
/// Semantic action inferred from the stage (e.g. `"start"`, `"accept"`).
action: String,
/// The deterministic git commit message used (or that would have been used).
commit_msg: String,
},
/// `.story_kit/project.toml` was modified at the project root (not inside a worktree).
ConfigChanged,
/// An agent's state changed (started, stopped, completed, etc.).
/// Triggers a pipeline state refresh so the frontend can update agent
/// assignments without waiting for a filesystem event.
AgentStateChanged,
/// A story encountered a failure (e.g. merge failure).
/// Triggers an error notification to configured Matrix rooms.
MergeFailure {
/// Work item ID (e.g. `"42_story_my_feature"`).
story_id: String,
/// Human-readable description of the failure.
reason: String,
},
}
/// Return `true` if `path` is the root-level `.story_kit/project.toml`, i.e.
/// `{git_root}/.story_kit/project.toml`.
///
/// Returns `false` for paths inside worktree directories (paths containing
/// a `worktrees` component).
pub fn is_config_file(path: &Path, git_root: &Path) -> bool {
// Reject any path that passes through the worktrees directory.
if path.components().any(|c| c.as_os_str() == "worktrees") {
return false;
}
let expected = git_root.join(".story_kit").join("project.toml");
path == expected
}
/// Map a pipeline directory name to a (action, commit-message-prefix) pair.
fn stage_metadata(stage: &str, item_id: &str) -> Option<(&'static str, String)> {
let (action, prefix) = match stage {
"1_upcoming" => ("create", format!("story-kit: create {item_id}")),
"2_current" => ("start", format!("story-kit: start {item_id}")),
"3_qa" => ("qa", format!("story-kit: queue {item_id} for QA")),
"4_merge" => ("merge", format!("story-kit: queue {item_id} for merge")),
"5_done" => ("done", format!("story-kit: done {item_id}")),
"6_archived" => ("accept", format!("story-kit: accept {item_id}")),
_ => return None,
};
Some((action, prefix))
}
/// Return the pipeline stage name for a path if it is a `.md` file living
/// directly inside one of the known work subdirectories, otherwise `None`.
///
/// Explicitly returns `None` for any path under `.story_kit/worktrees/` so
/// that code changes made by agents in their isolated worktrees are never
/// auto-committed to master by the watcher.
fn stage_for_path(path: &Path) -> Option<String> {
// Reject any path that passes through the worktrees directory.
if path
.components()
.any(|c| c.as_os_str() == "worktrees")
{
return None;
}
if path.extension().is_none_or(|e| e != "md") {
return None;
}
let stage = path
.parent()
.and_then(|p| p.file_name())
.and_then(|n| n.to_str())?;
matches!(stage, "1_upcoming" | "2_current" | "3_qa" | "4_merge" | "5_done" | "6_archived")
.then(|| stage.to_string())
}
/// Stage all changes in the work directory and commit with the given message.
///
/// Uses `git add -A .story_kit/work/` to catch both additions and deletions in
/// a single commit. Returns `Ok(true)` if a commit was made, `Ok(false)` if
/// there was nothing to commit, and `Err` for unexpected failures.
fn git_add_work_and_commit(git_root: &Path, message: &str) -> Result<bool, String> {
let work_rel = PathBuf::from(".story_kit").join("work");
let add_out = std::process::Command::new("git")
.args(["add", "-A"])
.arg(&work_rel)
.current_dir(git_root)
.output()
.map_err(|e| format!("git add: {e}"))?;
if !add_out.status.success() {
return Err(format!(
"git add failed: {}",
String::from_utf8_lossy(&add_out.stderr)
));
}
let commit_out = std::process::Command::new("git")
.args(["commit", "-m", message])
.current_dir(git_root)
.output()
.map_err(|e| format!("git commit: {e}"))?;
if commit_out.status.success() {
return Ok(true);
}
let stderr = String::from_utf8_lossy(&commit_out.stderr);
let stdout = String::from_utf8_lossy(&commit_out.stdout);
if stdout.contains("nothing to commit") || stderr.contains("nothing to commit") {
return Ok(false);
}
Err(format!("git commit failed: {stderr}"))
}
/// Stages that represent meaningful git checkpoints (creation and archival).
/// Intermediate stages (current, qa, merge, done) are transient pipeline state
/// that don't need to be committed — they're only relevant while the server is
/// running and are broadcast to WebSocket clients for real-time UI updates.
const COMMIT_WORTHY_STAGES: &[&str] = &["1_upcoming", "6_archived"];
/// Return `true` if changes in `stage` should be committed to git.
fn should_commit_stage(stage: &str) -> bool {
COMMIT_WORTHY_STAGES.contains(&stage)
}
/// Process a batch of pending (path → stage) entries: commit and broadcast.
///
/// Only files that still exist on disk are used to derive the commit message
/// (they represent the destination of a move or a new file). Deletions are
/// captured by `git add -A .story_kit/work/` automatically.
///
/// Only terminal stages (`1_upcoming` and `6_archived`) trigger git commits.
/// All stages broadcast a [`WatcherEvent`] so the frontend stays in sync.
fn flush_pending(
pending: &HashMap<PathBuf, String>,
git_root: &Path,
event_tx: &broadcast::Sender<WatcherEvent>,
) {
// Separate into files that exist (additions) vs gone (deletions).
let mut additions: Vec<(&PathBuf, &str)> = Vec::new();
for (path, stage) in pending {
if path.exists() {
additions.push((path, stage.as_str()));
}
}
// Pick the commit message from the first addition (the meaningful side of a move).
// If there are only deletions, use a generic message.
let (action, item_id, commit_msg) = if let Some((path, stage)) = additions.first() {
let item = path.file_stem().and_then(|s| s.to_str()).unwrap_or("unknown");
if let Some((act, msg)) = stage_metadata(stage, item) {
(act, item.to_string(), msg)
} else {
return;
}
} else {
// Only deletions — pick any pending path for the item name.
let Some((path, _)) = pending.iter().next() else {
return;
};
let item = path.file_stem().and_then(|s| s.to_str()).unwrap_or("unknown");
("remove", item.to_string(), format!("story-kit: remove {item}"))
};
// Strip stale merge_failure front matter from any story that has left 4_merge/.
for (path, stage) in &additions {
if *stage != "4_merge"
&& let Err(e) = clear_front_matter_field(path, "merge_failure")
{
slog!("[watcher] Warning: could not clear merge_failure from {}: {e}", path.display());
}
}
// Only commit for terminal stages; intermediate moves are broadcast-only.
let dest_stage = additions.first().map_or("unknown", |(_, s)| *s);
let should_commit = should_commit_stage(dest_stage);
if should_commit {
slog!("[watcher] flush: {commit_msg}");
match git_add_work_and_commit(git_root, &commit_msg) {
Ok(committed) => {
if committed {
slog!("[watcher] committed: {commit_msg}");
} else {
slog!("[watcher] skipped (already committed): {commit_msg}");
}
}
Err(e) => {
slog!("[watcher] git error: {e}");
return;
}
}
} else {
slog!("[watcher] flush (broadcast-only): {commit_msg}");
}
// Always broadcast the event so connected WebSocket clients stay in sync.
let evt = WatcherEvent::WorkItem {
stage: dest_stage.to_string(),
item_id,
action: action.to_string(),
commit_msg,
};
let _ = event_tx.send(evt);
}
/// Scan `work/5_done/` and move any `.md` files whose mtime is older than
/// `done_retention` to `work/6_archived/`. After each successful promotion,
/// removes the associated git worktree (if any) via [`crate::worktree::prune_worktree_sync`].
///
/// Also scans `work/6_archived/` for stories that still have a live worktree
/// and removes them (catches items that were archived before this sweep was
/// added).
///
/// Worktree removal failures are logged but never block the file move or other
/// cleanup work.
///
/// Called periodically from the watcher thread. File moves will trigger normal
/// watcher events, which `flush_pending` will commit and broadcast.
fn sweep_done_to_archived(work_dir: &Path, git_root: &Path, done_retention: Duration) {
// ── Part 1: promote old items from 5_done/ → 6_archived/ ───────────────
let done_dir = work_dir.join("5_done");
if done_dir.exists() {
let archived_dir = work_dir.join("6_archived");
match std::fs::read_dir(&done_dir) {
Err(e) => slog!("[watcher] sweep: failed to read 5_done/: {e}"),
Ok(entries) => {
for entry in entries.flatten() {
let path = entry.path();
if path.extension().is_none_or(|e| e != "md") {
continue;
}
let mtime = match entry.metadata().and_then(|m| m.modified()) {
Ok(t) => t,
Err(_) => continue,
};
let age = SystemTime::now()
.duration_since(mtime)
.unwrap_or_default();
if age >= done_retention {
if let Err(e) = std::fs::create_dir_all(&archived_dir) {
slog!("[watcher] sweep: failed to create 6_archived/: {e}");
continue;
}
let dest = archived_dir.join(entry.file_name());
match std::fs::rename(&path, &dest) {
Ok(()) => {
let item_id = path
.file_stem()
.and_then(|s| s.to_str())
.unwrap_or("unknown");
slog!("[watcher] sweep: promoted {item_id} → 6_archived/");
// Prune the worktree for this story (best effort).
if let Err(e) =
crate::worktree::prune_worktree_sync(git_root, item_id)
{
slog!(
"[watcher] sweep: worktree prune failed for {item_id}: {e}"
);
}
}
Err(e) => {
slog!("[watcher] sweep: failed to move {}: {e}", path.display());
}
}
}
}
}
}
}
// ── Part 2: prune stale worktrees for items already in 6_archived/ ──────
let archived_dir = work_dir.join("6_archived");
if archived_dir.exists()
&& let Ok(entries) = std::fs::read_dir(&archived_dir)
{
for entry in entries.flatten() {
let path = entry.path();
if path.extension().is_none_or(|e| e != "md") {
continue;
}
if let Some(item_id) = path.file_stem().and_then(|s| s.to_str())
&& let Err(e) = crate::worktree::prune_worktree_sync(git_root, item_id)
{
slog!("[watcher] sweep: worktree prune failed for {item_id}: {e}");
}
}
}
}
/// Start the filesystem watcher on a dedicated OS thread.
///
/// `work_dir` — absolute path to `.story_kit/work/` (watched recursively).
/// `git_root` — project root (passed to `git` commands as cwd, and used to
/// derive the config file path `.story_kit/project.toml`).
/// `event_tx` — broadcast sender; each connected WebSocket client holds a receiver.
/// `watcher_config` — initial sweep configuration loaded from `project.toml`.
pub fn start_watcher(
work_dir: PathBuf,
git_root: PathBuf,
event_tx: broadcast::Sender<WatcherEvent>,
watcher_config: WatcherConfig,
) {
std::thread::spawn(move || {
let (notify_tx, notify_rx) = mpsc::channel::<notify::Result<notify::Event>>();
let mut watcher: RecommendedWatcher = match recommended_watcher(move |res| {
let _ = notify_tx.send(res);
}) {
Ok(w) => w,
Err(e) => {
slog!("[watcher] failed to create watcher: {e}");
return;
}
};
if let Err(e) = watcher.watch(&work_dir, RecursiveMode::Recursive) {
slog!("[watcher] failed to watch {}: {e}", work_dir.display());
return;
}
// Also watch .story_kit/project.toml for hot-reload of agent config.
let config_file = git_root.join(".story_kit").join("project.toml");
if config_file.exists()
&& let Err(e) = watcher.watch(&config_file, RecursiveMode::NonRecursive)
{
slog!("[watcher] failed to watch config file {}: {e}", config_file.display());
}
slog!("[watcher] watching {}", work_dir.display());
const DEBOUNCE: Duration = Duration::from_millis(300);
// Mutable sweep config — hot-reloaded when project.toml changes.
let mut sweep_interval = Duration::from_secs(watcher_config.sweep_interval_secs);
let mut done_retention = Duration::from_secs(watcher_config.done_retention_secs);
slog!(
"[watcher] sweep_interval={}s done_retention={}s",
watcher_config.sweep_interval_secs,
watcher_config.done_retention_secs
);
// Map path → stage for pending (uncommitted) work-item changes.
let mut pending: HashMap<PathBuf, String> = HashMap::new();
// Whether a config file change is pending in the current debounce window.
let mut config_changed_pending = false;
let mut deadline: Option<Instant> = None;
// Track when we last swept 5_done/ → 6_archived/.
// Initialise to "now minus interval" so the first sweep runs on startup.
let mut last_sweep = Instant::now()
.checked_sub(sweep_interval)
.unwrap_or_else(Instant::now);
loop {
// How long until the debounce window closes (or wait for next event).
let timeout = deadline.map_or(Duration::from_secs(60), |d| {
d.saturating_duration_since(Instant::now())
});
let flush = match notify_rx.recv_timeout(timeout) {
Ok(Ok(event)) => {
// Track creates, modifies, AND removes. Removes are needed so
// that standalone deletions trigger a flush, and so that moves
// (which fire Remove + Create) land in the same debounce window.
let is_relevant_kind = matches!(
event.kind,
EventKind::Create(_) | EventKind::Modify(_) | EventKind::Remove(_)
);
if is_relevant_kind {
for path in event.paths {
if is_config_file(&path, &git_root) {
slog!("[watcher] config change detected: {}", path.display());
config_changed_pending = true;
deadline = Some(Instant::now() + DEBOUNCE);
} else if let Some(stage) = stage_for_path(&path) {
pending.insert(path, stage);
deadline = Some(Instant::now() + DEBOUNCE);
}
}
}
false
}
Ok(Err(e)) => {
slog!("[watcher] notify error: {e}");
false
}
// Debounce window expired — time to flush.
Err(mpsc::RecvTimeoutError::Timeout) => true,
Err(mpsc::RecvTimeoutError::Disconnected) => {
slog!("[watcher] channel disconnected, shutting down");
break;
}
};
if flush {
if !pending.is_empty() {
flush_pending(&pending, &git_root, &event_tx);
pending.clear();
}
if config_changed_pending {
slog!("[watcher] broadcasting agent_config_changed");
let _ = event_tx.send(WatcherEvent::ConfigChanged);
// Hot-reload sweep config from project.toml.
match ProjectConfig::load(&git_root) {
Ok(cfg) => {
let new_sweep =
Duration::from_secs(cfg.watcher.sweep_interval_secs);
let new_retention =
Duration::from_secs(cfg.watcher.done_retention_secs);
if new_sweep != sweep_interval
|| new_retention != done_retention
{
slog!(
"[watcher] hot-reload: sweep_interval={}s done_retention={}s",
cfg.watcher.sweep_interval_secs,
cfg.watcher.done_retention_secs
);
sweep_interval = new_sweep;
done_retention = new_retention;
}
}
Err(e) => {
slog!("[watcher] hot-reload: failed to parse config: {e}");
}
}
config_changed_pending = false;
}
deadline = None;
// Periodically promote old items from 5_done/ to 6_archived/.
let now = Instant::now();
if now.duration_since(last_sweep) >= sweep_interval {
last_sweep = now;
sweep_done_to_archived(&work_dir, &git_root, done_retention);
}
}
}
});
}
#[cfg(test)]
mod tests {
use super::*;
use std::collections::HashMap;
use std::fs;
use tempfile::TempDir;
/// Initialise a minimal git repo so commit operations work.
fn init_git_repo(dir: &std::path::Path) {
use std::process::Command;
Command::new("git")
.args(["init"])
.current_dir(dir)
.output()
.expect("git init");
Command::new("git")
.args(["config", "user.email", "test@example.com"])
.current_dir(dir)
.output()
.expect("git config email");
Command::new("git")
.args(["config", "user.name", "Test"])
.current_dir(dir)
.output()
.expect("git config name");
Command::new("git")
.args(["commit", "--allow-empty", "-m", "init"])
.current_dir(dir)
.output()
.expect("git initial commit");
}
/// Create the `.story_kit/work/{stage}/` dir tree inside `root`.
fn make_stage_dir(root: &std::path::Path, stage: &str) -> PathBuf {
let dir = root.join(".story_kit").join("work").join(stage);
fs::create_dir_all(&dir).expect("create stage dir");
dir
}
// ── git_add_work_and_commit ───────────────────────────────────────────────
#[test]
fn git_commit_returns_true_when_file_added() {
let tmp = TempDir::new().unwrap();
init_git_repo(tmp.path());
let stage_dir = make_stage_dir(tmp.path(), "2_current");
fs::write(
stage_dir.join("42_story_foo.md"),
"---\nname: test\n---\n",
)
.unwrap();
let result = git_add_work_and_commit(tmp.path(), "story-kit: start 42_story_foo");
assert_eq!(result, Ok(true), "should return Ok(true) when a commit was made");
}
#[test]
fn git_commit_returns_false_when_nothing_to_commit() {
let tmp = TempDir::new().unwrap();
init_git_repo(tmp.path());
let stage_dir = make_stage_dir(tmp.path(), "2_current");
fs::write(
stage_dir.join("42_story_foo.md"),
"---\nname: test\n---\n",
)
.unwrap();
// First commit — should succeed.
git_add_work_and_commit(tmp.path(), "story-kit: start 42_story_foo").unwrap();
// Second call with no changes — should return Ok(false).
let result = git_add_work_and_commit(tmp.path(), "story-kit: start 42_story_foo");
assert_eq!(
result,
Ok(false),
"should return Ok(false) when nothing to commit"
);
}
// ── flush_pending ─────────────────────────────────────────────────────────
#[test]
fn flush_pending_commits_and_broadcasts_for_terminal_stage() {
let tmp = TempDir::new().unwrap();
init_git_repo(tmp.path());
let stage_dir = make_stage_dir(tmp.path(), "1_upcoming");
let story_path = stage_dir.join("42_story_foo.md");
fs::write(&story_path, "---\nname: test\n---\n").unwrap();
let (tx, mut rx) = tokio::sync::broadcast::channel(16);
let mut pending = HashMap::new();
pending.insert(story_path, "1_upcoming".to_string());
flush_pending(&pending, tmp.path(), &tx);
let evt = rx.try_recv().expect("expected a broadcast event");
match evt {
WatcherEvent::WorkItem {
stage,
item_id,
action,
commit_msg,
} => {
assert_eq!(stage, "1_upcoming");
assert_eq!(item_id, "42_story_foo");
assert_eq!(action, "create");
assert_eq!(commit_msg, "story-kit: create 42_story_foo");
}
other => panic!("unexpected event: {other:?}"),
}
// Verify the file was actually committed.
let log = std::process::Command::new("git")
.args(["log", "--oneline", "-1"])
.current_dir(tmp.path())
.output()
.expect("git log");
let log_msg = String::from_utf8_lossy(&log.stdout);
assert!(
log_msg.contains("story-kit: create 42_story_foo"),
"terminal stage should produce a git commit"
);
}
#[test]
fn flush_pending_broadcasts_without_commit_for_intermediate_stage() {
let tmp = TempDir::new().unwrap();
init_git_repo(tmp.path());
let stage_dir = make_stage_dir(tmp.path(), "2_current");
let story_path = stage_dir.join("42_story_foo.md");
fs::write(&story_path, "---\nname: test\n---\n").unwrap();
let (tx, mut rx) = tokio::sync::broadcast::channel(16);
let mut pending = HashMap::new();
pending.insert(story_path, "2_current".to_string());
flush_pending(&pending, tmp.path(), &tx);
// Event should still be broadcast for frontend sync.
let evt = rx.try_recv().expect("expected a broadcast event");
match evt {
WatcherEvent::WorkItem {
stage,
item_id,
action,
commit_msg,
} => {
assert_eq!(stage, "2_current");
assert_eq!(item_id, "42_story_foo");
assert_eq!(action, "start");
assert_eq!(commit_msg, "story-kit: start 42_story_foo");
}
other => panic!("unexpected event: {other:?}"),
}
// Verify NO git commit was made (only the initial empty commit should exist).
let log = std::process::Command::new("git")
.args(["log", "--oneline"])
.current_dir(tmp.path())
.output()
.expect("git log");
let log_msg = String::from_utf8_lossy(&log.stdout);
assert!(
!log_msg.contains("story-kit:"),
"intermediate stage should NOT produce a git commit"
);
}
#[test]
fn flush_pending_broadcasts_for_all_pipeline_stages() {
let stages = [
("1_upcoming", "create", "story-kit: create 10_story_x"),
("3_qa", "qa", "story-kit: queue 10_story_x for QA"),
("4_merge", "merge", "story-kit: queue 10_story_x for merge"),
("5_done", "done", "story-kit: done 10_story_x"),
("6_archived", "accept", "story-kit: accept 10_story_x"),
];
for (stage, expected_action, expected_msg) in stages {
let tmp = TempDir::new().unwrap();
init_git_repo(tmp.path());
let stage_dir = make_stage_dir(tmp.path(), stage);
let story_path = stage_dir.join("10_story_x.md");
fs::write(&story_path, "---\nname: test\n---\n").unwrap();
let (tx, mut rx) = tokio::sync::broadcast::channel(16);
let mut pending = HashMap::new();
pending.insert(story_path, stage.to_string());
flush_pending(&pending, tmp.path(), &tx);
// All stages should broadcast events regardless of commit behavior.
let evt = rx.try_recv().expect("expected broadcast for stage {stage}");
match evt {
WatcherEvent::WorkItem {
action, commit_msg, ..
} => {
assert_eq!(action, expected_action, "stage {stage}");
assert_eq!(commit_msg, expected_msg, "stage {stage}");
}
other => panic!("unexpected event for stage {stage}: {other:?}"),
}
}
}
#[test]
fn flush_pending_deletion_only_broadcasts_remove_event() {
let tmp = TempDir::new().unwrap();
init_git_repo(tmp.path());
// Create the work dir tree but NOT the file (simulates a deletion).
make_stage_dir(tmp.path(), "2_current");
let deleted_path = tmp
.path()
.join(".story_kit")
.join("work")
.join("2_current")
.join("42_story_foo.md");
let (tx, mut rx) = tokio::sync::broadcast::channel(16);
let mut pending = HashMap::new();
pending.insert(deleted_path, "2_current".to_string());
flush_pending(&pending, tmp.path(), &tx);
// Even when nothing was committed (file never existed), an event is broadcast.
let evt = rx.try_recv().expect("expected a broadcast event for deletion");
match evt {
WatcherEvent::WorkItem {
action, item_id, ..
} => {
assert_eq!(action, "remove");
assert_eq!(item_id, "42_story_foo");
}
other => panic!("unexpected event: {other:?}"),
}
}
#[test]
fn flush_pending_skips_unknown_stage_for_addition() {
let tmp = TempDir::new().unwrap();
init_git_repo(tmp.path());
// File sits in an unrecognised directory.
let unknown_dir = tmp.path().join(".story_kit").join("work").join("9_unknown");
fs::create_dir_all(&unknown_dir).unwrap();
let path = unknown_dir.join("42_story_foo.md");
fs::write(&path, "---\nname: test\n---\n").unwrap();
let (tx, mut rx) = tokio::sync::broadcast::channel(16);
let mut pending = HashMap::new();
pending.insert(path, "9_unknown".to_string());
flush_pending(&pending, tmp.path(), &tx);
// No event should be broadcast because stage_metadata returns None for unknown stages.
assert!(
rx.try_recv().is_err(),
"no event should be broadcast for unknown stage"
);
}
#[test]
fn flush_pending_empty_pending_does_nothing() {
let tmp = TempDir::new().unwrap();
init_git_repo(tmp.path());
make_stage_dir(tmp.path(), "2_current");
let (tx, mut rx) = tokio::sync::broadcast::channel(16);
let pending: HashMap<PathBuf, String> = HashMap::new();
// Should not panic and should not broadcast anything.
flush_pending(&pending, tmp.path(), &tx);
assert!(rx.try_recv().is_err(), "no event for empty pending map");
}
// ── flush_pending clears merge_failure ─────────────────────────────────────
#[test]
fn flush_pending_clears_merge_failure_when_leaving_merge_stage() {
let tmp = TempDir::new().unwrap();
init_git_repo(tmp.path());
let stage_dir = make_stage_dir(tmp.path(), "2_current");
let story_path = stage_dir.join("50_story_retry.md");
fs::write(
&story_path,
"---\nname: Retry Story\nmerge_failure: \"conflicts detected\"\n---\n# Story\n",
)
.unwrap();
let (tx, _rx) = tokio::sync::broadcast::channel(16);
let mut pending = HashMap::new();
pending.insert(story_path.clone(), "2_current".to_string());
flush_pending(&pending, tmp.path(), &tx);
let contents = fs::read_to_string(&story_path).unwrap();
assert!(
!contents.contains("merge_failure"),
"merge_failure should be stripped when story lands in 2_current"
);
assert!(contents.contains("name: Retry Story"));
}
#[test]
fn flush_pending_clears_merge_failure_when_moving_to_upcoming() {
let tmp = TempDir::new().unwrap();
init_git_repo(tmp.path());
let stage_dir = make_stage_dir(tmp.path(), "1_upcoming");
let story_path = stage_dir.join("51_story_reset.md");
fs::write(
&story_path,
"---\nname: Reset Story\nmerge_failure: \"gate failed\"\n---\n# Story\n",
)
.unwrap();
let (tx, _rx) = tokio::sync::broadcast::channel(16);
let mut pending = HashMap::new();
pending.insert(story_path.clone(), "1_upcoming".to_string());
flush_pending(&pending, tmp.path(), &tx);
let contents = fs::read_to_string(&story_path).unwrap();
assert!(
!contents.contains("merge_failure"),
"merge_failure should be stripped when story lands in 1_upcoming"
);
}
#[test]
fn flush_pending_clears_merge_failure_when_moving_to_done() {
let tmp = TempDir::new().unwrap();
init_git_repo(tmp.path());
let stage_dir = make_stage_dir(tmp.path(), "5_done");
let story_path = stage_dir.join("52_story_done.md");
fs::write(
&story_path,
"---\nname: Done Story\nmerge_failure: \"stale error\"\n---\n# Story\n",
)
.unwrap();
let (tx, _rx) = tokio::sync::broadcast::channel(16);
let mut pending = HashMap::new();
pending.insert(story_path.clone(), "5_done".to_string());
flush_pending(&pending, tmp.path(), &tx);
let contents = fs::read_to_string(&story_path).unwrap();
assert!(
!contents.contains("merge_failure"),
"merge_failure should be stripped when story lands in 5_done"
);
}
#[test]
fn flush_pending_preserves_merge_failure_when_in_merge_stage() {
let tmp = TempDir::new().unwrap();
init_git_repo(tmp.path());
let stage_dir = make_stage_dir(tmp.path(), "4_merge");
let story_path = stage_dir.join("53_story_merging.md");
fs::write(
&story_path,
"---\nname: Merging Story\nmerge_failure: \"conflicts\"\n---\n# Story\n",
)
.unwrap();
let (tx, _rx) = tokio::sync::broadcast::channel(16);
let mut pending = HashMap::new();
pending.insert(story_path.clone(), "4_merge".to_string());
flush_pending(&pending, tmp.path(), &tx);
let contents = fs::read_to_string(&story_path).unwrap();
assert!(
contents.contains("merge_failure"),
"merge_failure should be preserved when story is in 4_merge"
);
}
#[test]
fn flush_pending_no_op_when_no_merge_failure() {
let tmp = TempDir::new().unwrap();
init_git_repo(tmp.path());
let stage_dir = make_stage_dir(tmp.path(), "2_current");
let story_path = stage_dir.join("54_story_clean.md");
let original = "---\nname: Clean Story\n---\n# Story\n";
fs::write(&story_path, original).unwrap();
let (tx, _rx) = tokio::sync::broadcast::channel(16);
let mut pending = HashMap::new();
pending.insert(story_path.clone(), "2_current".to_string());
flush_pending(&pending, tmp.path(), &tx);
let contents = fs::read_to_string(&story_path).unwrap();
assert_eq!(contents, original, "file without merge_failure should be unchanged");
}
// ── stage_for_path (additional edge cases) ────────────────────────────────
#[test]
fn stage_for_path_recognises_pipeline_dirs() {
let base = PathBuf::from("/proj/.story_kit/work");
assert_eq!(
stage_for_path(&base.join("2_current/42_story_foo.md")),
Some("2_current".to_string())
);
assert_eq!(
stage_for_path(&base.join("5_done/10_bug_bar.md")),
Some("5_done".to_string())
);
assert_eq!(
stage_for_path(&base.join("6_archived/10_bug_bar.md")),
Some("6_archived".to_string())
);
assert_eq!(stage_for_path(&base.join("other/file.md")), None);
assert_eq!(
stage_for_path(&base.join("2_current/42_story_foo.txt")),
None
);
}
#[test]
fn stage_for_path_ignores_worktree_paths() {
let worktrees = PathBuf::from("/proj/.story_kit/worktrees");
// Code changes inside a worktree must be ignored.
assert_eq!(
stage_for_path(&worktrees.join("42_story_foo/server/src/main.rs")),
None,
);
// Even if a worktree happens to contain a path component that looks
// like a pipeline stage, it must still be ignored.
assert_eq!(
stage_for_path(&worktrees.join("42_story_foo/.story_kit/work/2_current/42_story_foo.md")),
None,
);
// A path that only contains the word "worktrees" as part of a longer
// segment (not an exact component) must NOT be filtered out.
assert_eq!(
stage_for_path(&PathBuf::from("/proj/.story_kit/work/2_current/not_worktrees_story.md")),
Some("2_current".to_string()),
);
}
#[test]
fn should_commit_stage_only_for_terminal_stages() {
// Terminal stages — should commit.
assert!(should_commit_stage("1_upcoming"));
assert!(should_commit_stage("6_archived"));
// Intermediate stages — broadcast-only, no commit.
assert!(!should_commit_stage("2_current"));
assert!(!should_commit_stage("3_qa"));
assert!(!should_commit_stage("4_merge"));
assert!(!should_commit_stage("5_done"));
// Unknown — no commit.
assert!(!should_commit_stage("unknown"));
}
#[test]
fn stage_metadata_returns_correct_actions() {
let (action, msg) = stage_metadata("2_current", "42_story_foo").unwrap();
assert_eq!(action, "start");
assert_eq!(msg, "story-kit: start 42_story_foo");
let (action, msg) = stage_metadata("5_done", "42_story_foo").unwrap();
assert_eq!(action, "done");
assert_eq!(msg, "story-kit: done 42_story_foo");
let (action, msg) = stage_metadata("6_archived", "42_story_foo").unwrap();
assert_eq!(action, "accept");
assert_eq!(msg, "story-kit: accept 42_story_foo");
assert!(stage_metadata("unknown", "id").is_none());
}
#[test]
fn is_config_file_identifies_root_project_toml() {
let git_root = PathBuf::from("/proj");
let config = git_root.join(".story_kit").join("project.toml");
assert!(is_config_file(&config, &git_root));
}
#[test]
fn is_config_file_rejects_worktree_copies() {
let git_root = PathBuf::from("/proj");
// project.toml inside a worktree must NOT be treated as the root config.
let worktree_config = PathBuf::from(
"/proj/.story_kit/worktrees/42_story_foo/.story_kit/project.toml",
);
assert!(!is_config_file(&worktree_config, &git_root));
}
#[test]
fn is_config_file_rejects_other_files() {
let git_root = PathBuf::from("/proj");
// Random files must not match.
assert!(!is_config_file(
&PathBuf::from("/proj/.story_kit/work/2_current/42_story_foo.md"),
&git_root
));
assert!(!is_config_file(
&PathBuf::from("/proj/.story_kit/README.md"),
&git_root
));
}
#[test]
fn is_config_file_rejects_wrong_root() {
let git_root = PathBuf::from("/proj");
let other_root_config = PathBuf::from("/other/.story_kit/project.toml");
assert!(!is_config_file(&other_root_config, &git_root));
}
// ── sweep_done_to_archived ────────────────────────────────────────────────
#[test]
fn sweep_moves_old_items_to_archived() {
let tmp = TempDir::new().unwrap();
let work_dir = tmp.path().join(".story_kit").join("work");
let done_dir = work_dir.join("5_done");
let archived_dir = work_dir.join("6_archived");
fs::create_dir_all(&done_dir).unwrap();
// Write a file and backdate its mtime to 5 hours ago.
let story_path = done_dir.join("10_story_old.md");
fs::write(&story_path, "---\nname: old\n---\n").unwrap();
let past = SystemTime::now()
.checked_sub(Duration::from_secs(5 * 60 * 60))
.unwrap();
filetime::set_file_mtime(&story_path, filetime::FileTime::from_system_time(past))
.unwrap();
let retention = Duration::from_secs(4 * 60 * 60);
// tmp.path() has no worktrees dir — prune_worktree_sync is a no-op.
sweep_done_to_archived(&work_dir, tmp.path(), retention);
assert!(!story_path.exists(), "old item should be moved out of 5_done/");
assert!(
archived_dir.join("10_story_old.md").exists(),
"old item should appear in 6_archived/"
);
}
#[test]
fn sweep_keeps_recent_items_in_done() {
let tmp = TempDir::new().unwrap();
let work_dir = tmp.path().join(".story_kit").join("work");
let done_dir = work_dir.join("5_done");
fs::create_dir_all(&done_dir).unwrap();
// Write a file with a recent mtime (now).
let story_path = done_dir.join("11_story_new.md");
fs::write(&story_path, "---\nname: new\n---\n").unwrap();
let retention = Duration::from_secs(4 * 60 * 60);
sweep_done_to_archived(&work_dir, tmp.path(), retention);
assert!(story_path.exists(), "recent item should remain in 5_done/");
}
#[test]
fn sweep_respects_custom_retention() {
let tmp = TempDir::new().unwrap();
let work_dir = tmp.path().join(".story_kit").join("work");
let done_dir = work_dir.join("5_done");
let archived_dir = work_dir.join("6_archived");
fs::create_dir_all(&done_dir).unwrap();
// Write a file and backdate its mtime to 2 minutes ago.
let story_path = done_dir.join("12_story_custom.md");
fs::write(&story_path, "---\nname: custom\n---\n").unwrap();
let past = SystemTime::now()
.checked_sub(Duration::from_secs(120))
.unwrap();
filetime::set_file_mtime(&story_path, filetime::FileTime::from_system_time(past))
.unwrap();
// With a 1-minute retention, the 2-minute-old file should be swept.
sweep_done_to_archived(&work_dir, tmp.path(), Duration::from_secs(60));
assert!(
!story_path.exists(),
"item older than custom retention should be moved"
);
assert!(
archived_dir.join("12_story_custom.md").exists(),
"item should appear in 6_archived/"
);
}
#[test]
fn sweep_custom_retention_keeps_younger_items() {
let tmp = TempDir::new().unwrap();
let work_dir = tmp.path().join(".story_kit").join("work");
let done_dir = work_dir.join("5_done");
fs::create_dir_all(&done_dir).unwrap();
// Write a file and backdate its mtime to 30 seconds ago.
let story_path = done_dir.join("13_story_young.md");
fs::write(&story_path, "---\nname: young\n---\n").unwrap();
let past = SystemTime::now()
.checked_sub(Duration::from_secs(30))
.unwrap();
filetime::set_file_mtime(&story_path, filetime::FileTime::from_system_time(past))
.unwrap();
// With a 1-minute retention, the 30-second-old file should stay.
sweep_done_to_archived(&work_dir, tmp.path(), Duration::from_secs(60));
assert!(
story_path.exists(),
"item younger than custom retention should remain"
);
}
// ── sweep worktree pruning ─────────────────────────────────────────────
/// Helper: create a real git worktree at `wt_path` on a new branch.
fn create_git_worktree(git_root: &std::path::Path, wt_path: &std::path::Path, branch: &str) {
use std::process::Command;
// Create the branch first (ignore errors if it already exists).
let _ = Command::new("git")
.args(["branch", branch])
.current_dir(git_root)
.output();
Command::new("git")
.args(["worktree", "add", &wt_path.to_string_lossy(), branch])
.current_dir(git_root)
.output()
.expect("git worktree add");
}
#[test]
fn sweep_prunes_worktree_when_story_promoted_to_archived() {
let tmp = TempDir::new().unwrap();
let git_root = tmp.path().to_path_buf();
init_git_repo(&git_root);
let work_dir = git_root.join(".story_kit").join("work");
let done_dir = work_dir.join("5_done");
fs::create_dir_all(&done_dir).unwrap();
let story_id = "60_story_prune_on_promote";
let story_path = done_dir.join(format!("{story_id}.md"));
fs::write(&story_path, "---\nname: test\n---\n").unwrap();
let past = SystemTime::now()
.checked_sub(Duration::from_secs(5 * 60 * 60))
.unwrap();
filetime::set_file_mtime(&story_path, filetime::FileTime::from_system_time(past))
.unwrap();
// Create a real git worktree for this story.
let wt_path = crate::worktree::worktree_path(&git_root, story_id);
fs::create_dir_all(wt_path.parent().unwrap()).unwrap();
create_git_worktree(&git_root, &wt_path, &format!("feature/story-{story_id}"));
assert!(wt_path.exists(), "worktree must exist before sweep");
let retention = Duration::from_secs(4 * 60 * 60);
sweep_done_to_archived(&work_dir, &git_root, retention);
// Story must be archived.
assert!(
!story_path.exists(),
"story should be moved out of 5_done/"
);
assert!(
work_dir.join("6_archived").join(format!("{story_id}.md")).exists(),
"story should appear in 6_archived/"
);
// Worktree must be removed.
assert!(!wt_path.exists(), "worktree should be removed after archiving");
}
#[test]
fn sweep_prunes_worktrees_for_already_archived_stories() {
let tmp = TempDir::new().unwrap();
let git_root = tmp.path().to_path_buf();
init_git_repo(&git_root);
let work_dir = git_root.join(".story_kit").join("work");
let archived_dir = work_dir.join("6_archived");
fs::create_dir_all(&archived_dir).unwrap();
// Story is already in 6_archived.
let story_id = "61_story_stale_worktree";
let story_path = archived_dir.join(format!("{story_id}.md"));
fs::write(&story_path, "---\nname: stale\n---\n").unwrap();
// Create a real git worktree that was never cleaned up.
let wt_path = crate::worktree::worktree_path(&git_root, story_id);
fs::create_dir_all(wt_path.parent().unwrap()).unwrap();
create_git_worktree(&git_root, &wt_path, &format!("feature/story-{story_id}"));
assert!(wt_path.exists(), "stale worktree must exist before sweep");
// 5_done/ is empty — only Part 2 runs.
fs::create_dir_all(work_dir.join("5_done")).unwrap();
let retention = Duration::from_secs(4 * 60 * 60);
sweep_done_to_archived(&work_dir, &git_root, retention);
// Stale worktree should be pruned.
assert!(!wt_path.exists(), "stale worktree should be pruned by sweep");
// Story file must remain untouched.
assert!(story_path.exists(), "archived story file must not be removed");
}
#[test]
fn sweep_archives_story_even_when_worktree_removal_fails() {
// Use a git repo so prune_worktree_sync can attempt removal,
// but the fake directory is not a registered git worktree so
// `git worktree remove` will fail — the story must still be archived.
let tmp = TempDir::new().unwrap();
let git_root = tmp.path().to_path_buf();
init_git_repo(&git_root);
let work_dir = git_root.join(".story_kit").join("work");
let done_dir = work_dir.join("5_done");
fs::create_dir_all(&done_dir).unwrap();
let story_id = "62_story_fake_worktree";
let story_path = done_dir.join(format!("{story_id}.md"));
fs::write(&story_path, "---\nname: test\n---\n").unwrap();
let past = SystemTime::now()
.checked_sub(Duration::from_secs(5 * 60 * 60))
.unwrap();
filetime::set_file_mtime(&story_path, filetime::FileTime::from_system_time(past))
.unwrap();
// Create a plain directory at the expected worktree path — not a real
// git worktree, so `git worktree remove` will fail.
let wt_path = crate::worktree::worktree_path(&git_root, story_id);
fs::create_dir_all(&wt_path).unwrap();
let retention = Duration::from_secs(4 * 60 * 60);
sweep_done_to_archived(&work_dir, &git_root, retention);
// Story must still be archived despite the worktree removal failure.
assert!(
!story_path.exists(),
"story should be archived even when worktree removal fails"
);
assert!(
work_dir.join("6_archived").join(format!("{story_id}.md")).exists(),
"story should appear in 6_archived/ despite worktree removal failure"
);
}
}