731 lines
24 KiB
Rust
731 lines
24 KiB
Rust
|
|
use super::*;
|
||
|
|
use std::collections::HashMap;
|
||
|
|
use std::fs;
|
||
|
|
use tempfile::TempDir;
|
||
|
|
|
||
|
|
/// Initialise a minimal git repo so commit operations work.
|
||
|
|
fn init_git_repo(dir: &std::path::Path) {
|
||
|
|
use std::process::Command;
|
||
|
|
Command::new("git")
|
||
|
|
.args(["init"])
|
||
|
|
.current_dir(dir)
|
||
|
|
.output()
|
||
|
|
.expect("git init");
|
||
|
|
Command::new("git")
|
||
|
|
.args(["config", "user.email", "test@example.com"])
|
||
|
|
.current_dir(dir)
|
||
|
|
.output()
|
||
|
|
.expect("git config email");
|
||
|
|
Command::new("git")
|
||
|
|
.args(["config", "user.name", "Test"])
|
||
|
|
.current_dir(dir)
|
||
|
|
.output()
|
||
|
|
.expect("git config name");
|
||
|
|
Command::new("git")
|
||
|
|
.args(["commit", "--allow-empty", "-m", "init"])
|
||
|
|
.current_dir(dir)
|
||
|
|
.output()
|
||
|
|
.expect("git initial commit");
|
||
|
|
}
|
||
|
|
|
||
|
|
/// Create the `.huskies/work/{stage}/` dir tree inside `root`.
|
||
|
|
fn make_stage_dir(root: &std::path::Path, stage: &str) -> PathBuf {
|
||
|
|
let dir = root.join(".huskies").join("work").join(stage);
|
||
|
|
fs::create_dir_all(&dir).expect("create stage dir");
|
||
|
|
dir
|
||
|
|
}
|
||
|
|
|
||
|
|
// ── git_add_work_and_commit ───────────────────────────────────────────────
|
||
|
|
|
||
|
|
#[test]
|
||
|
|
fn git_commit_returns_true_when_file_added() {
|
||
|
|
let tmp = TempDir::new().unwrap();
|
||
|
|
init_git_repo(tmp.path());
|
||
|
|
let stage_dir = make_stage_dir(tmp.path(), "2_current");
|
||
|
|
fs::write(stage_dir.join("42_story_foo.md"), "---\nname: test\n---\n").unwrap();
|
||
|
|
|
||
|
|
let result = git_add_work_and_commit(tmp.path(), "huskies: start 42_story_foo");
|
||
|
|
assert_eq!(
|
||
|
|
result,
|
||
|
|
Ok(true),
|
||
|
|
"should return Ok(true) when a commit was made"
|
||
|
|
);
|
||
|
|
}
|
||
|
|
|
||
|
|
#[test]
|
||
|
|
fn git_commit_returns_false_when_nothing_to_commit() {
|
||
|
|
let tmp = TempDir::new().unwrap();
|
||
|
|
init_git_repo(tmp.path());
|
||
|
|
let stage_dir = make_stage_dir(tmp.path(), "2_current");
|
||
|
|
fs::write(stage_dir.join("42_story_foo.md"), "---\nname: test\n---\n").unwrap();
|
||
|
|
|
||
|
|
// First commit — should succeed.
|
||
|
|
git_add_work_and_commit(tmp.path(), "huskies: start 42_story_foo").unwrap();
|
||
|
|
|
||
|
|
// Second call with no changes — should return Ok(false).
|
||
|
|
let result = git_add_work_and_commit(tmp.path(), "huskies: start 42_story_foo");
|
||
|
|
assert_eq!(
|
||
|
|
result,
|
||
|
|
Ok(false),
|
||
|
|
"should return Ok(false) when nothing to commit"
|
||
|
|
);
|
||
|
|
}
|
||
|
|
|
||
|
|
// ── flush_pending ─────────────────────────────────────────────────────────
|
||
|
|
|
||
|
|
#[test]
|
||
|
|
fn flush_pending_commits_and_broadcasts_for_terminal_stage() {
|
||
|
|
let tmp = TempDir::new().unwrap();
|
||
|
|
init_git_repo(tmp.path());
|
||
|
|
let stage_dir = make_stage_dir(tmp.path(), "1_backlog");
|
||
|
|
let story_path = stage_dir.join("42_story_foo.md");
|
||
|
|
fs::write(&story_path, "---\nname: test\n---\n").unwrap();
|
||
|
|
|
||
|
|
let (tx, mut rx) = tokio::sync::broadcast::channel(16);
|
||
|
|
let mut pending = HashMap::new();
|
||
|
|
pending.insert(story_path, "1_backlog".to_string());
|
||
|
|
|
||
|
|
flush_pending(&pending, tmp.path(), &tx);
|
||
|
|
|
||
|
|
let evt = rx.try_recv().expect("expected a broadcast event");
|
||
|
|
match evt {
|
||
|
|
WatcherEvent::WorkItem {
|
||
|
|
stage,
|
||
|
|
item_id,
|
||
|
|
action,
|
||
|
|
commit_msg,
|
||
|
|
..
|
||
|
|
} => {
|
||
|
|
assert_eq!(stage, "1_backlog");
|
||
|
|
assert_eq!(item_id, "42_story_foo");
|
||
|
|
assert_eq!(action, "create");
|
||
|
|
assert_eq!(commit_msg, "huskies: create 42_story_foo");
|
||
|
|
}
|
||
|
|
other => panic!("unexpected event: {other:?}"),
|
||
|
|
}
|
||
|
|
|
||
|
|
// Verify the file was actually committed.
|
||
|
|
let log = std::process::Command::new("git")
|
||
|
|
.args(["log", "--oneline", "-1"])
|
||
|
|
.current_dir(tmp.path())
|
||
|
|
.output()
|
||
|
|
.expect("git log");
|
||
|
|
let log_msg = String::from_utf8_lossy(&log.stdout);
|
||
|
|
assert!(
|
||
|
|
log_msg.contains("huskies: create 42_story_foo"),
|
||
|
|
"terminal stage should produce a git commit"
|
||
|
|
);
|
||
|
|
}
|
||
|
|
|
||
|
|
#[test]
|
||
|
|
fn flush_pending_broadcasts_without_commit_for_intermediate_stage() {
|
||
|
|
let tmp = TempDir::new().unwrap();
|
||
|
|
init_git_repo(tmp.path());
|
||
|
|
let stage_dir = make_stage_dir(tmp.path(), "2_current");
|
||
|
|
let story_path = stage_dir.join("42_story_foo.md");
|
||
|
|
fs::write(&story_path, "---\nname: test\n---\n").unwrap();
|
||
|
|
|
||
|
|
let (tx, mut rx) = tokio::sync::broadcast::channel(16);
|
||
|
|
let mut pending = HashMap::new();
|
||
|
|
pending.insert(story_path, "2_current".to_string());
|
||
|
|
|
||
|
|
flush_pending(&pending, tmp.path(), &tx);
|
||
|
|
|
||
|
|
// Event should still be broadcast for frontend sync.
|
||
|
|
let evt = rx.try_recv().expect("expected a broadcast event");
|
||
|
|
match evt {
|
||
|
|
WatcherEvent::WorkItem {
|
||
|
|
stage,
|
||
|
|
item_id,
|
||
|
|
action,
|
||
|
|
commit_msg,
|
||
|
|
..
|
||
|
|
} => {
|
||
|
|
assert_eq!(stage, "2_current");
|
||
|
|
assert_eq!(item_id, "42_story_foo");
|
||
|
|
assert_eq!(action, "start");
|
||
|
|
assert_eq!(commit_msg, "huskies: start 42_story_foo");
|
||
|
|
}
|
||
|
|
other => panic!("unexpected event: {other:?}"),
|
||
|
|
}
|
||
|
|
|
||
|
|
// Verify NO git commit was made (only the initial empty commit should exist).
|
||
|
|
let log = std::process::Command::new("git")
|
||
|
|
.args(["log", "--oneline"])
|
||
|
|
.current_dir(tmp.path())
|
||
|
|
.output()
|
||
|
|
.expect("git log");
|
||
|
|
let log_msg = String::from_utf8_lossy(&log.stdout);
|
||
|
|
assert!(
|
||
|
|
!log_msg.contains("huskies:"),
|
||
|
|
"intermediate stage should NOT produce a git commit"
|
||
|
|
);
|
||
|
|
}
|
||
|
|
|
||
|
|
#[test]
|
||
|
|
fn flush_pending_broadcasts_for_all_pipeline_stages() {
|
||
|
|
let stages = [
|
||
|
|
("1_backlog", "create", "huskies: create 10_story_x"),
|
||
|
|
("3_qa", "qa", "huskies: queue 10_story_x for QA"),
|
||
|
|
("4_merge", "merge", "huskies: queue 10_story_x for merge"),
|
||
|
|
("5_done", "done", "huskies: done 10_story_x"),
|
||
|
|
("6_archived", "accept", "huskies: accept 10_story_x"),
|
||
|
|
];
|
||
|
|
|
||
|
|
for (stage, expected_action, expected_msg) in stages {
|
||
|
|
let tmp = TempDir::new().unwrap();
|
||
|
|
init_git_repo(tmp.path());
|
||
|
|
let stage_dir = make_stage_dir(tmp.path(), stage);
|
||
|
|
let story_path = stage_dir.join("10_story_x.md");
|
||
|
|
fs::write(&story_path, "---\nname: test\n---\n").unwrap();
|
||
|
|
|
||
|
|
let (tx, mut rx) = tokio::sync::broadcast::channel(16);
|
||
|
|
let mut pending = HashMap::new();
|
||
|
|
pending.insert(story_path, stage.to_string());
|
||
|
|
|
||
|
|
flush_pending(&pending, tmp.path(), &tx);
|
||
|
|
|
||
|
|
// All stages should broadcast events regardless of commit behavior.
|
||
|
|
let evt = rx.try_recv().expect("expected broadcast for stage {stage}");
|
||
|
|
match evt {
|
||
|
|
WatcherEvent::WorkItem {
|
||
|
|
action, commit_msg, ..
|
||
|
|
} => {
|
||
|
|
assert_eq!(action, expected_action, "stage {stage}");
|
||
|
|
assert_eq!(commit_msg, expected_msg, "stage {stage}");
|
||
|
|
}
|
||
|
|
other => panic!("unexpected event for stage {stage}: {other:?}"),
|
||
|
|
}
|
||
|
|
}
|
||
|
|
}
|
||
|
|
|
||
|
|
#[test]
|
||
|
|
fn flush_pending_deletion_only_broadcasts_remove_event() {
|
||
|
|
let tmp = TempDir::new().unwrap();
|
||
|
|
init_git_repo(tmp.path());
|
||
|
|
// Create the work dir tree but NOT the file (simulates a deletion).
|
||
|
|
make_stage_dir(tmp.path(), "2_current");
|
||
|
|
let deleted_path = tmp
|
||
|
|
.path()
|
||
|
|
.join(".huskies")
|
||
|
|
.join("work")
|
||
|
|
.join("2_current")
|
||
|
|
.join("42_story_foo.md");
|
||
|
|
|
||
|
|
let (tx, mut rx) = tokio::sync::broadcast::channel(16);
|
||
|
|
let mut pending = HashMap::new();
|
||
|
|
pending.insert(deleted_path, "2_current".to_string());
|
||
|
|
|
||
|
|
flush_pending(&pending, tmp.path(), &tx);
|
||
|
|
|
||
|
|
// Even when nothing was committed (file never existed), an event is broadcast.
|
||
|
|
let evt = rx
|
||
|
|
.try_recv()
|
||
|
|
.expect("expected a broadcast event for deletion");
|
||
|
|
match evt {
|
||
|
|
WatcherEvent::WorkItem {
|
||
|
|
action, item_id, ..
|
||
|
|
} => {
|
||
|
|
assert_eq!(action, "remove");
|
||
|
|
assert_eq!(item_id, "42_story_foo");
|
||
|
|
}
|
||
|
|
other => panic!("unexpected event: {other:?}"),
|
||
|
|
}
|
||
|
|
}
|
||
|
|
|
||
|
|
#[test]
|
||
|
|
fn flush_pending_skips_unknown_stage_for_addition() {
|
||
|
|
let tmp = TempDir::new().unwrap();
|
||
|
|
init_git_repo(tmp.path());
|
||
|
|
// File sits in an unrecognised directory.
|
||
|
|
let unknown_dir = tmp.path().join(".huskies").join("work").join("9_unknown");
|
||
|
|
fs::create_dir_all(&unknown_dir).unwrap();
|
||
|
|
let path = unknown_dir.join("42_story_foo.md");
|
||
|
|
fs::write(&path, "---\nname: test\n---\n").unwrap();
|
||
|
|
|
||
|
|
let (tx, mut rx) = tokio::sync::broadcast::channel(16);
|
||
|
|
let mut pending = HashMap::new();
|
||
|
|
pending.insert(path, "9_unknown".to_string());
|
||
|
|
|
||
|
|
flush_pending(&pending, tmp.path(), &tx);
|
||
|
|
|
||
|
|
// No event should be broadcast because stage_metadata returns None for unknown stages.
|
||
|
|
assert!(
|
||
|
|
rx.try_recv().is_err(),
|
||
|
|
"no event should be broadcast for unknown stage"
|
||
|
|
);
|
||
|
|
}
|
||
|
|
|
||
|
|
#[test]
|
||
|
|
fn flush_pending_empty_pending_does_nothing() {
|
||
|
|
let tmp = TempDir::new().unwrap();
|
||
|
|
init_git_repo(tmp.path());
|
||
|
|
make_stage_dir(tmp.path(), "2_current");
|
||
|
|
|
||
|
|
let (tx, mut rx) = tokio::sync::broadcast::channel(16);
|
||
|
|
let pending: HashMap<PathBuf, String> = HashMap::new();
|
||
|
|
|
||
|
|
// Should not panic and should not broadcast anything.
|
||
|
|
flush_pending(&pending, tmp.path(), &tx);
|
||
|
|
assert!(rx.try_recv().is_err(), "no event for empty pending map");
|
||
|
|
}
|
||
|
|
|
||
|
|
// ── flush_pending clears merge_failure ─────────────────────────────────────
|
||
|
|
|
||
|
|
#[test]
|
||
|
|
fn flush_pending_clears_merge_failure_when_leaving_merge_stage() {
|
||
|
|
let tmp = TempDir::new().unwrap();
|
||
|
|
init_git_repo(tmp.path());
|
||
|
|
let stage_dir = make_stage_dir(tmp.path(), "2_current");
|
||
|
|
let story_path = stage_dir.join("50_story_retry.md");
|
||
|
|
fs::write(
|
||
|
|
&story_path,
|
||
|
|
"---\nname: Retry Story\nmerge_failure: \"conflicts detected\"\n---\n# Story\n",
|
||
|
|
)
|
||
|
|
.unwrap();
|
||
|
|
|
||
|
|
let (tx, _rx) = tokio::sync::broadcast::channel(16);
|
||
|
|
let mut pending = HashMap::new();
|
||
|
|
pending.insert(story_path.clone(), "2_current".to_string());
|
||
|
|
|
||
|
|
flush_pending(&pending, tmp.path(), &tx);
|
||
|
|
|
||
|
|
let contents = fs::read_to_string(&story_path).unwrap();
|
||
|
|
assert!(
|
||
|
|
!contents.contains("merge_failure"),
|
||
|
|
"merge_failure should be stripped when story lands in 2_current"
|
||
|
|
);
|
||
|
|
assert!(contents.contains("name: Retry Story"));
|
||
|
|
}
|
||
|
|
|
||
|
|
#[test]
|
||
|
|
fn flush_pending_clears_merge_failure_when_moving_to_backlog() {
|
||
|
|
let tmp = TempDir::new().unwrap();
|
||
|
|
init_git_repo(tmp.path());
|
||
|
|
let stage_dir = make_stage_dir(tmp.path(), "1_backlog");
|
||
|
|
let story_path = stage_dir.join("51_story_reset.md");
|
||
|
|
fs::write(
|
||
|
|
&story_path,
|
||
|
|
"---\nname: Reset Story\nmerge_failure: \"gate failed\"\n---\n# Story\n",
|
||
|
|
)
|
||
|
|
.unwrap();
|
||
|
|
|
||
|
|
let (tx, _rx) = tokio::sync::broadcast::channel(16);
|
||
|
|
let mut pending = HashMap::new();
|
||
|
|
pending.insert(story_path.clone(), "1_backlog".to_string());
|
||
|
|
|
||
|
|
flush_pending(&pending, tmp.path(), &tx);
|
||
|
|
|
||
|
|
let contents = fs::read_to_string(&story_path).unwrap();
|
||
|
|
assert!(
|
||
|
|
!contents.contains("merge_failure"),
|
||
|
|
"merge_failure should be stripped when story lands in 1_backlog"
|
||
|
|
);
|
||
|
|
}
|
||
|
|
|
||
|
|
#[test]
|
||
|
|
fn flush_pending_clears_merge_failure_when_moving_to_done() {
|
||
|
|
let tmp = TempDir::new().unwrap();
|
||
|
|
init_git_repo(tmp.path());
|
||
|
|
let stage_dir = make_stage_dir(tmp.path(), "5_done");
|
||
|
|
let story_path = stage_dir.join("52_story_done.md");
|
||
|
|
fs::write(
|
||
|
|
&story_path,
|
||
|
|
"---\nname: Done Story\nmerge_failure: \"stale error\"\n---\n# Story\n",
|
||
|
|
)
|
||
|
|
.unwrap();
|
||
|
|
|
||
|
|
let (tx, _rx) = tokio::sync::broadcast::channel(16);
|
||
|
|
let mut pending = HashMap::new();
|
||
|
|
pending.insert(story_path.clone(), "5_done".to_string());
|
||
|
|
|
||
|
|
flush_pending(&pending, tmp.path(), &tx);
|
||
|
|
|
||
|
|
let contents = fs::read_to_string(&story_path).unwrap();
|
||
|
|
assert!(
|
||
|
|
!contents.contains("merge_failure"),
|
||
|
|
"merge_failure should be stripped when story lands in 5_done"
|
||
|
|
);
|
||
|
|
}
|
||
|
|
|
||
|
|
#[test]
|
||
|
|
fn flush_pending_preserves_merge_failure_when_in_merge_stage() {
|
||
|
|
let tmp = TempDir::new().unwrap();
|
||
|
|
init_git_repo(tmp.path());
|
||
|
|
let stage_dir = make_stage_dir(tmp.path(), "4_merge");
|
||
|
|
let story_path = stage_dir.join("53_story_merging.md");
|
||
|
|
fs::write(
|
||
|
|
&story_path,
|
||
|
|
"---\nname: Merging Story\nmerge_failure: \"conflicts\"\n---\n# Story\n",
|
||
|
|
)
|
||
|
|
.unwrap();
|
||
|
|
|
||
|
|
let (tx, _rx) = tokio::sync::broadcast::channel(16);
|
||
|
|
let mut pending = HashMap::new();
|
||
|
|
pending.insert(story_path.clone(), "4_merge".to_string());
|
||
|
|
|
||
|
|
flush_pending(&pending, tmp.path(), &tx);
|
||
|
|
|
||
|
|
let contents = fs::read_to_string(&story_path).unwrap();
|
||
|
|
assert!(
|
||
|
|
contents.contains("merge_failure"),
|
||
|
|
"merge_failure should be preserved when story is in 4_merge"
|
||
|
|
);
|
||
|
|
}
|
||
|
|
|
||
|
|
#[test]
|
||
|
|
fn flush_pending_no_op_when_no_merge_failure() {
|
||
|
|
let tmp = TempDir::new().unwrap();
|
||
|
|
init_git_repo(tmp.path());
|
||
|
|
let stage_dir = make_stage_dir(tmp.path(), "2_current");
|
||
|
|
let story_path = stage_dir.join("54_story_clean.md");
|
||
|
|
let original = "---\nname: Clean Story\n---\n# Story\n";
|
||
|
|
fs::write(&story_path, original).unwrap();
|
||
|
|
|
||
|
|
let (tx, _rx) = tokio::sync::broadcast::channel(16);
|
||
|
|
let mut pending = HashMap::new();
|
||
|
|
pending.insert(story_path.clone(), "2_current".to_string());
|
||
|
|
|
||
|
|
flush_pending(&pending, tmp.path(), &tx);
|
||
|
|
|
||
|
|
let contents = fs::read_to_string(&story_path).unwrap();
|
||
|
|
assert_eq!(
|
||
|
|
contents, original,
|
||
|
|
"file without merge_failure should be unchanged"
|
||
|
|
);
|
||
|
|
}
|
||
|
|
|
||
|
|
// ── flush_pending from_stage ─────────────────────────────────────────────
|
||
|
|
|
||
|
|
/// AC3: when a pending map contains both a deletion (source stage) and a
|
||
|
|
/// creation (dest stage) for the same item_id, the broadcast event should
|
||
|
|
/// have `from_stage` set to the source stage key.
|
||
|
|
#[test]
|
||
|
|
fn flush_pending_sets_from_stage_for_move_operations() {
|
||
|
|
let tmp = TempDir::new().unwrap();
|
||
|
|
init_git_repo(tmp.path());
|
||
|
|
|
||
|
|
// Destination exists (file moved here).
|
||
|
|
let merge_dir = make_stage_dir(tmp.path(), "4_merge");
|
||
|
|
let merge_path = merge_dir.join("42_story_foo.md");
|
||
|
|
fs::write(&merge_path, "---\nname: test\n---\n").unwrap();
|
||
|
|
|
||
|
|
// Source path does NOT exist (file was moved away).
|
||
|
|
make_stage_dir(tmp.path(), "3_qa");
|
||
|
|
let qa_path = tmp
|
||
|
|
.path()
|
||
|
|
.join(".huskies")
|
||
|
|
.join("work")
|
||
|
|
.join("3_qa")
|
||
|
|
.join("42_story_foo.md");
|
||
|
|
|
||
|
|
let (tx, mut rx) = tokio::sync::broadcast::channel(16);
|
||
|
|
let mut pending = HashMap::new();
|
||
|
|
pending.insert(merge_path, "4_merge".to_string()); // addition
|
||
|
|
pending.insert(qa_path, "3_qa".to_string()); // deletion
|
||
|
|
|
||
|
|
flush_pending(&pending, tmp.path(), &tx);
|
||
|
|
|
||
|
|
let evt = rx.try_recv().expect("expected event");
|
||
|
|
match evt {
|
||
|
|
WatcherEvent::WorkItem {
|
||
|
|
stage, from_stage, ..
|
||
|
|
} => {
|
||
|
|
assert_eq!(stage, "4_merge");
|
||
|
|
assert_eq!(from_stage, Some("3_qa".to_string()));
|
||
|
|
}
|
||
|
|
other => panic!("unexpected event: {other:?}"),
|
||
|
|
}
|
||
|
|
}
|
||
|
|
|
||
|
|
/// AC3: when a pending map has only an addition (creation, not a move),
|
||
|
|
/// `from_stage` should be `None`.
|
||
|
|
#[test]
|
||
|
|
fn flush_pending_sets_from_stage_to_none_for_creations() {
|
||
|
|
let tmp = TempDir::new().unwrap();
|
||
|
|
init_git_repo(tmp.path());
|
||
|
|
|
||
|
|
let stage_dir = make_stage_dir(tmp.path(), "2_current");
|
||
|
|
let story_path = stage_dir.join("55_story_new.md");
|
||
|
|
fs::write(&story_path, "---\nname: New Story\n---\n").unwrap();
|
||
|
|
|
||
|
|
let (tx, mut rx) = tokio::sync::broadcast::channel(16);
|
||
|
|
let mut pending = HashMap::new();
|
||
|
|
pending.insert(story_path, "2_current".to_string());
|
||
|
|
|
||
|
|
flush_pending(&pending, tmp.path(), &tx);
|
||
|
|
|
||
|
|
let evt = rx.try_recv().expect("expected event");
|
||
|
|
match evt {
|
||
|
|
WatcherEvent::WorkItem { from_stage, .. } => {
|
||
|
|
assert_eq!(from_stage, None, "creation should have no from_stage");
|
||
|
|
}
|
||
|
|
other => panic!("unexpected event: {other:?}"),
|
||
|
|
}
|
||
|
|
}
|
||
|
|
|
||
|
|
// ── stage_for_path (additional edge cases) ────────────────────────────────
|
||
|
|
|
||
|
|
#[test]
|
||
|
|
fn stage_for_path_recognises_pipeline_dirs() {
|
||
|
|
let base = PathBuf::from("/proj/.huskies/work");
|
||
|
|
assert_eq!(
|
||
|
|
stage_for_path(&base.join("2_current/42_story_foo.md")),
|
||
|
|
Some("2_current".to_string())
|
||
|
|
);
|
||
|
|
assert_eq!(
|
||
|
|
stage_for_path(&base.join("5_done/10_bug_bar.md")),
|
||
|
|
Some("5_done".to_string())
|
||
|
|
);
|
||
|
|
assert_eq!(
|
||
|
|
stage_for_path(&base.join("6_archived/10_bug_bar.md")),
|
||
|
|
Some("6_archived".to_string())
|
||
|
|
);
|
||
|
|
assert_eq!(stage_for_path(&base.join("other/file.md")), None);
|
||
|
|
assert_eq!(
|
||
|
|
stage_for_path(&base.join("2_current/42_story_foo.txt")),
|
||
|
|
None
|
||
|
|
);
|
||
|
|
}
|
||
|
|
|
||
|
|
#[test]
|
||
|
|
fn stage_for_path_ignores_worktree_paths() {
|
||
|
|
let worktrees = PathBuf::from("/proj/.huskies/worktrees");
|
||
|
|
|
||
|
|
// Code changes inside a worktree must be ignored.
|
||
|
|
assert_eq!(
|
||
|
|
stage_for_path(&worktrees.join("42_story_foo/server/src/main.rs")),
|
||
|
|
None,
|
||
|
|
);
|
||
|
|
|
||
|
|
// Even if a worktree happens to contain a path component that looks
|
||
|
|
// like a pipeline stage, it must still be ignored.
|
||
|
|
assert_eq!(
|
||
|
|
stage_for_path(&worktrees.join("42_story_foo/.huskies/work/2_current/42_story_foo.md")),
|
||
|
|
None,
|
||
|
|
);
|
||
|
|
|
||
|
|
// A path that only contains the word "worktrees" as part of a longer
|
||
|
|
// segment (not an exact component) must NOT be filtered out.
|
||
|
|
assert_eq!(
|
||
|
|
stage_for_path(&PathBuf::from(
|
||
|
|
"/proj/.huskies/work/2_current/not_worktrees_story.md"
|
||
|
|
)),
|
||
|
|
Some("2_current".to_string()),
|
||
|
|
);
|
||
|
|
}
|
||
|
|
|
||
|
|
#[test]
|
||
|
|
fn should_commit_stage_only_for_terminal_stages() {
|
||
|
|
// Terminal stages — should commit.
|
||
|
|
assert!(should_commit_stage("1_backlog"));
|
||
|
|
assert!(should_commit_stage("5_done"));
|
||
|
|
assert!(should_commit_stage("6_archived"));
|
||
|
|
// Intermediate stages — broadcast-only, no commit.
|
||
|
|
assert!(!should_commit_stage("2_current"));
|
||
|
|
assert!(!should_commit_stage("3_qa"));
|
||
|
|
assert!(!should_commit_stage("4_merge"));
|
||
|
|
// Unknown — no commit.
|
||
|
|
assert!(!should_commit_stage("unknown"));
|
||
|
|
}
|
||
|
|
|
||
|
|
#[test]
|
||
|
|
fn stage_metadata_returns_correct_actions() {
|
||
|
|
let (action, msg) = stage_metadata("2_current", "42_story_foo").unwrap();
|
||
|
|
assert_eq!(action, "start");
|
||
|
|
assert_eq!(msg, "huskies: start 42_story_foo");
|
||
|
|
|
||
|
|
let (action, msg) = stage_metadata("5_done", "42_story_foo").unwrap();
|
||
|
|
assert_eq!(action, "done");
|
||
|
|
assert_eq!(msg, "huskies: done 42_story_foo");
|
||
|
|
|
||
|
|
let (action, msg) = stage_metadata("6_archived", "42_story_foo").unwrap();
|
||
|
|
assert_eq!(action, "accept");
|
||
|
|
assert_eq!(msg, "huskies: accept 42_story_foo");
|
||
|
|
|
||
|
|
assert!(stage_metadata("unknown", "id").is_none());
|
||
|
|
}
|
||
|
|
|
||
|
|
#[test]
|
||
|
|
fn is_config_file_identifies_root_project_toml() {
|
||
|
|
let git_root = PathBuf::from("/proj");
|
||
|
|
let config = git_root.join(".huskies").join("project.toml");
|
||
|
|
assert!(is_config_file(&config, &git_root));
|
||
|
|
}
|
||
|
|
|
||
|
|
#[test]
|
||
|
|
fn is_config_file_identifies_root_agents_toml() {
|
||
|
|
let git_root = PathBuf::from("/proj");
|
||
|
|
let agents = git_root.join(".huskies").join("agents.toml");
|
||
|
|
assert!(is_config_file(&agents, &git_root));
|
||
|
|
}
|
||
|
|
|
||
|
|
#[test]
|
||
|
|
fn is_config_file_rejects_worktree_copies() {
|
||
|
|
let git_root = PathBuf::from("/proj");
|
||
|
|
// project.toml inside a worktree must NOT be treated as the root config.
|
||
|
|
let worktree_config =
|
||
|
|
PathBuf::from("/proj/.huskies/worktrees/42_story_foo/.huskies/project.toml");
|
||
|
|
assert!(!is_config_file(&worktree_config, &git_root));
|
||
|
|
}
|
||
|
|
|
||
|
|
#[test]
|
||
|
|
fn is_config_file_rejects_other_files() {
|
||
|
|
let git_root = PathBuf::from("/proj");
|
||
|
|
// Random files must not match.
|
||
|
|
assert!(!is_config_file(
|
||
|
|
&PathBuf::from("/proj/.huskies/work/2_current/42_story_foo.md"),
|
||
|
|
&git_root
|
||
|
|
));
|
||
|
|
assert!(!is_config_file(
|
||
|
|
&PathBuf::from("/proj/.huskies/README.md"),
|
||
|
|
&git_root
|
||
|
|
));
|
||
|
|
}
|
||
|
|
|
||
|
|
#[test]
|
||
|
|
fn is_config_file_rejects_wrong_root() {
|
||
|
|
let git_root = PathBuf::from("/proj");
|
||
|
|
let other_root_config = PathBuf::from("/other/.huskies/project.toml");
|
||
|
|
assert!(!is_config_file(&other_root_config, &git_root));
|
||
|
|
}
|
||
|
|
|
||
|
|
// ── sweep_done_to_archived (CRDT-based) ─────────────────────────────────
|
||
|
|
//
|
||
|
|
// The sweep function reads from `read_all_typed()` and checks
|
||
|
|
// `Stage::Done { merged_at, .. }`. Items created via
|
||
|
|
// `write_item_with_content("5_done")` project `merged_at = Utc::now()`,
|
||
|
|
// so we test with Duration::ZERO to sweep immediately and with a long
|
||
|
|
// retention to verify items are kept. No filesystem access is involved.
|
||
|
|
|
||
|
|
#[test]
|
||
|
|
fn sweep_moves_old_items_to_archived() {
|
||
|
|
crate::db::ensure_content_store();
|
||
|
|
crate::db::write_item_with_content("9880_story_sweep_old", "5_done", "---\nname: old\n---\n");
|
||
|
|
|
||
|
|
// With ZERO retention, any Done item should be swept.
|
||
|
|
sweep_done_to_archived(Duration::ZERO);
|
||
|
|
|
||
|
|
// Verify the item was moved to 6_archived in the CRDT.
|
||
|
|
let items = crate::pipeline_state::read_all_typed();
|
||
|
|
let item = items
|
||
|
|
.iter()
|
||
|
|
.find(|i| i.story_id.0 == "9880_story_sweep_old");
|
||
|
|
assert!(
|
||
|
|
item.is_some_and(|i| matches!(i.stage, crate::pipeline_state::Stage::Archived { .. })),
|
||
|
|
"item should be archived after sweep"
|
||
|
|
);
|
||
|
|
}
|
||
|
|
|
||
|
|
#[test]
|
||
|
|
fn sweep_keeps_recent_items_in_done() {
|
||
|
|
crate::db::ensure_content_store();
|
||
|
|
crate::db::write_item_with_content("9881_story_sweep_new", "5_done", "---\nname: new\n---\n");
|
||
|
|
|
||
|
|
// With a very long retention, the item (merged_at ≈ now) should stay.
|
||
|
|
sweep_done_to_archived(Duration::from_secs(999_999));
|
||
|
|
|
||
|
|
let items = crate::pipeline_state::read_all_typed();
|
||
|
|
let item = items
|
||
|
|
.iter()
|
||
|
|
.find(|i| i.story_id.0 == "9881_story_sweep_new");
|
||
|
|
assert!(
|
||
|
|
item.is_some_and(|i| matches!(i.stage, crate::pipeline_state::Stage::Done { .. })),
|
||
|
|
"item should remain in Done with long retention"
|
||
|
|
);
|
||
|
|
}
|
||
|
|
|
||
|
|
#[test]
|
||
|
|
fn sweep_respects_custom_retention() {
|
||
|
|
crate::db::ensure_content_store();
|
||
|
|
crate::db::write_item_with_content(
|
||
|
|
"9882_story_sweep_custom",
|
||
|
|
"5_done",
|
||
|
|
"---\nname: custom\n---\n",
|
||
|
|
);
|
||
|
|
|
||
|
|
// With ZERO retention, sweep should promote.
|
||
|
|
sweep_done_to_archived(Duration::ZERO);
|
||
|
|
|
||
|
|
let items = crate::pipeline_state::read_all_typed();
|
||
|
|
let item = items
|
||
|
|
.iter()
|
||
|
|
.find(|i| i.story_id.0 == "9882_story_sweep_custom");
|
||
|
|
assert!(
|
||
|
|
item.is_some_and(|i| matches!(i.stage, crate::pipeline_state::Stage::Archived { .. })),
|
||
|
|
"item should be archived with zero retention"
|
||
|
|
);
|
||
|
|
}
|
||
|
|
|
||
|
|
/// Prove that the sweep reads `merged_at` from the CRDT (not `Utc::now()`).
|
||
|
|
///
|
||
|
|
/// This test sets `merged_at` to 10 seconds in the past and uses a 5-second
|
||
|
|
/// retention. If the sweep were still using `Utc::now()` as the start time
|
||
|
|
/// (the original bug), the elapsed time would be ~0 and the item would NOT
|
||
|
|
/// be swept. With the fix, the item is swept because 10s > 5s retention.
|
||
|
|
#[test]
|
||
|
|
fn sweep_uses_crdt_merged_at_not_utc_now() {
|
||
|
|
crate::db::ensure_content_store();
|
||
|
|
|
||
|
|
let ten_seconds_ago = (chrono::Utc::now() - chrono::Duration::seconds(10)).timestamp() as f64;
|
||
|
|
|
||
|
|
// Write item in 5_done with an explicit past merged_at timestamp.
|
||
|
|
crate::crdt_state::write_item(
|
||
|
|
"9883_story_sweep_merged_at",
|
||
|
|
"5_done",
|
||
|
|
Some("merged_at test"),
|
||
|
|
None,
|
||
|
|
None,
|
||
|
|
None,
|
||
|
|
None,
|
||
|
|
None,
|
||
|
|
None,
|
||
|
|
Some(ten_seconds_ago),
|
||
|
|
);
|
||
|
|
|
||
|
|
// 5-second retention: item is 10s old → should be swept.
|
||
|
|
sweep_done_to_archived(Duration::from_secs(5));
|
||
|
|
|
||
|
|
let items = crate::pipeline_state::read_all_typed();
|
||
|
|
let item = items
|
||
|
|
.iter()
|
||
|
|
.find(|i| i.story_id.0 == "9883_story_sweep_merged_at");
|
||
|
|
assert!(
|
||
|
|
item.is_some_and(|i| matches!(i.stage, crate::pipeline_state::Stage::Archived { .. })),
|
||
|
|
"item with merged_at 10s ago should be archived with 5s retention"
|
||
|
|
);
|
||
|
|
}
|
||
|
|
|
||
|
|
/// Prove that an item with merged_at NEWER than done_retention is NOT swept.
|
||
|
|
#[test]
|
||
|
|
fn sweep_keeps_item_newer_than_retention() {
|
||
|
|
crate::db::ensure_content_store();
|
||
|
|
|
||
|
|
let one_second_ago = (chrono::Utc::now() - chrono::Duration::seconds(1)).timestamp() as f64;
|
||
|
|
|
||
|
|
crate::crdt_state::write_item(
|
||
|
|
"9884_story_sweep_recent",
|
||
|
|
"5_done",
|
||
|
|
Some("recent merged_at test"),
|
||
|
|
None,
|
||
|
|
None,
|
||
|
|
None,
|
||
|
|
None,
|
||
|
|
None,
|
||
|
|
None,
|
||
|
|
Some(one_second_ago),
|
||
|
|
);
|
||
|
|
|
||
|
|
// 1-hour retention: item is only 1s old → should NOT be swept.
|
||
|
|
sweep_done_to_archived(Duration::from_secs(3600));
|
||
|
|
|
||
|
|
let items = crate::pipeline_state::read_all_typed();
|
||
|
|
let item = items
|
||
|
|
.iter()
|
||
|
|
.find(|i| i.story_id.0 == "9884_story_sweep_recent");
|
||
|
|
assert!(
|
||
|
|
item.is_some_and(|i| matches!(i.stage, crate::pipeline_state::Stage::Done { .. })),
|
||
|
|
"item with merged_at 1s ago should stay in Done with 1-hour retention"
|
||
|
|
);
|
||
|
|
}
|