huskies: merge 530_story_eliminate_filesystem_markdown_shadows_entirely_crdt_db_is_the_only_story_store

This commit is contained in:
dave
2026-04-10 14:56:13 +00:00
parent 1dd675796b
commit 11d19d8902
26 changed files with 966 additions and 1668 deletions
+62 -127
View File
@@ -1,8 +1,7 @@
use crate::io::story_metadata::parse_front_matter;
use std::fs;
use std::path::Path;
use super::{next_item_number, slugify_name, write_story_content_with_fs};
use super::{next_item_number, slugify_name, write_story_content};
/// Create a bug file and store it in the database.
///
@@ -52,14 +51,8 @@ pub fn create_bug_file(
content.push_str("- [ ] Bug is fixed and verified\n");
}
// Write to database content store.
write_story_content_with_fs(root, &bug_id, "1_backlog", &content);
// Also write to filesystem for backwards compatibility.
let bugs_dir = root.join(".huskies").join("work").join("1_backlog");
if let Ok(()) = fs::create_dir_all(&bugs_dir) {
let _ = fs::write(bugs_dir.join(format!("{bug_id}.md")), &content);
}
// Write to database content store and CRDT.
write_story_content(root, &bug_id, "1_backlog", &content);
Ok(bug_id)
}
@@ -105,14 +98,8 @@ pub fn create_spike_file(
content.push_str("## Recommendation\n\n");
content.push_str("- TBD\n");
// Write to database content store.
write_story_content_with_fs(root, &spike_id, "1_backlog", &content);
// Also write to filesystem for backwards compatibility.
let backlog_dir = root.join(".huskies").join("work").join("1_backlog");
if let Ok(()) = fs::create_dir_all(&backlog_dir) {
let _ = fs::write(backlog_dir.join(format!("{spike_id}.md")), &content);
}
// Write to database content store and CRDT.
write_story_content(root, &spike_id, "1_backlog", &content);
Ok(spike_id)
}
@@ -162,14 +149,8 @@ pub fn create_refactor_file(
content.push_str("## Out of Scope\n\n");
content.push_str("- TBD\n");
// Write to database content store.
write_story_content_with_fs(root, &refactor_id, "1_backlog", &content);
// Also write to filesystem for backwards compatibility.
let backlog_dir = root.join(".huskies").join("work").join("1_backlog");
if let Ok(()) = fs::create_dir_all(&backlog_dir) {
let _ = fs::write(backlog_dir.join(format!("{refactor_id}.md")), &content);
}
// Write to database content store and CRDT.
write_story_content(root, &refactor_id, "1_backlog", &content);
Ok(refactor_id)
}
@@ -195,14 +176,12 @@ fn extract_bug_name_from_content(content: &str) -> Option<String> {
None
}
/// List all open bugs from CRDT + content store, falling back to filesystem.
/// List all open bugs from CRDT + content store.
///
/// Returns a sorted list of `(bug_id, name)` pairs.
pub fn list_bug_files(root: &Path) -> Result<Vec<(String, String)>, String> {
pub fn list_bug_files(_root: &Path) -> Result<Vec<(String, String)>, String> {
let mut bugs = Vec::new();
let mut seen = std::collections::HashSet::new();
// First: typed projection items in backlog that are bugs.
for item in crate::pipeline_state::read_all_typed() {
if !matches!(item.stage, crate::pipeline_state::Stage::Backlog) || !is_bug_item(&item.story_id.0) {
continue;
@@ -214,41 +193,9 @@ pub fn list_bug_files(root: &Path) -> Result<Vec<(String, String)>, String> {
.and_then(|c| extract_bug_name_from_content(&c))
})
.unwrap_or_else(|| sid.clone());
seen.insert(sid.clone());
bugs.push((sid, name));
}
// Then: filesystem fallback.
let backlog_dir = root.join(".huskies").join("work").join("1_backlog");
if backlog_dir.exists() {
for entry in
fs::read_dir(&backlog_dir).map_err(|e| format!("Failed to read backlog directory: {e}"))?
{
let entry = entry.map_err(|e| format!("Failed to read entry: {e}"))?;
let path = entry.path();
if path.is_dir() || path.extension().and_then(|ext| ext.to_str()) != Some("md") {
continue;
}
let stem = path
.file_stem()
.and_then(|s| s.to_str())
.ok_or_else(|| "Invalid file name.".to_string())?;
if !is_bug_item(stem) || seen.contains(stem) {
continue;
}
let bug_id = stem.to_string();
let name = fs::read_to_string(&path)
.ok()
.and_then(|c| extract_bug_name_from_content(&c))
.unwrap_or_else(|| bug_id.clone());
bugs.push((bug_id, name));
}
}
bugs.sort_by(|a, b| a.0.cmp(&b.0));
Ok(bugs)
}
@@ -259,14 +206,12 @@ fn is_refactor_item(stem: &str) -> bool {
after_num.starts_with("_refactor_")
}
/// List all open refactors from CRDT + content store, falling back to filesystem.
/// List all open refactors from CRDT + content store.
///
/// Returns a sorted list of `(refactor_id, name)` pairs.
pub fn list_refactor_files(root: &Path) -> Result<Vec<(String, String)>, String> {
pub fn list_refactor_files(_root: &Path) -> Result<Vec<(String, String)>, String> {
let mut refactors = Vec::new();
let mut seen = std::collections::HashSet::new();
// First: typed projection items.
for item in crate::pipeline_state::read_all_typed() {
if !matches!(item.stage, crate::pipeline_state::Stage::Backlog) || !is_refactor_item(&item.story_id.0) {
continue;
@@ -279,42 +224,9 @@ pub fn list_refactor_files(root: &Path) -> Result<Vec<(String, String)>, String>
.and_then(|m| m.name)
})
.unwrap_or_else(|| sid.clone());
seen.insert(sid.clone());
refactors.push((sid, name));
}
// Then: filesystem fallback.
let backlog_dir = root.join(".huskies").join("work").join("1_backlog");
if backlog_dir.exists() {
for entry in fs::read_dir(&backlog_dir)
.map_err(|e| format!("Failed to read backlog directory: {e}"))?
{
let entry = entry.map_err(|e| format!("Failed to read entry: {e}"))?;
let path = entry.path();
if path.is_dir() || path.extension().and_then(|ext| ext.to_str()) != Some("md") {
continue;
}
let stem = path
.file_stem()
.and_then(|s| s.to_str())
.ok_or_else(|| "Invalid file name.".to_string())?;
if !is_refactor_item(stem) || seen.contains(stem) {
continue;
}
let refactor_id = stem.to_string();
let name = fs::read_to_string(&path)
.ok()
.and_then(|contents| parse_front_matter(&contents).ok())
.and_then(|m| m.name)
.unwrap_or_else(|| refactor_id.clone());
refactors.push((refactor_id, name));
}
}
refactors.sort_by(|a, b| a.0.cmp(&b.0));
Ok(refactors)
}
@@ -322,6 +234,7 @@ pub fn list_refactor_files(root: &Path) -> Result<Vec<(String, String)>, String>
#[cfg(test)]
mod tests {
use super::*;
use std::fs;
fn setup_git_repo(root: &std::path::Path) {
std::process::Command::new("git")
@@ -376,42 +289,63 @@ mod tests {
}
#[test]
fn list_bug_files_empty_when_no_bugs_dir() {
fn list_bug_files_no_crash_on_missing_dir() {
// list_bug_files now reads from the global CRDT, not the filesystem.
// Verify it does not panic when called with a non-existent project root.
let tmp = tempfile::tempdir().unwrap();
let result = list_bug_files(tmp.path()).unwrap();
assert!(result.is_empty());
let result = list_bug_files(tmp.path());
assert!(result.is_ok());
}
#[test]
fn list_bug_files_excludes_archive_subdir() {
let tmp = tempfile::tempdir().unwrap();
let backlog_dir = tmp.path().join(".huskies/work/1_backlog");
let archived_dir = tmp.path().join(".huskies/work/5_done");
fs::create_dir_all(&backlog_dir).unwrap();
fs::create_dir_all(&archived_dir).unwrap();
fs::write(backlog_dir.join("1_bug_open.md"), "# Bug 1: Open Bug\n").unwrap();
fs::write(archived_dir.join("2_bug_closed.md"), "# Bug 2: Closed Bug\n").unwrap();
crate::db::ensure_content_store();
// Bug in backlog (should appear).
crate::db::write_item_with_content(
"7001_bug_open",
"1_backlog",
"---\nname: Open Bug\n---\n# Bug 7001: Open Bug\n",
);
// Bug in done (should NOT appear — list_bug_files only returns Backlog).
crate::db::write_item_with_content(
"7002_bug_closed",
"5_done",
"---\nname: Closed Bug\n---\n# Bug 7002: Closed Bug\n",
);
let result = list_bug_files(tmp.path()).unwrap();
assert_eq!(result.len(), 1);
assert_eq!(result[0].0, "1_bug_open");
assert_eq!(result[0].1, "Open Bug");
assert!(result.iter().any(|(id, name)| id == "7001_bug_open" && name == "Open Bug"));
assert!(!result.iter().any(|(id, _)| id == "7002_bug_closed"));
}
#[test]
fn list_bug_files_sorted_by_id() {
let tmp = tempfile::tempdir().unwrap();
let backlog_dir = tmp.path().join(".huskies/work/1_backlog");
fs::create_dir_all(&backlog_dir).unwrap();
fs::write(backlog_dir.join("3_bug_third.md"), "# Bug 3: Third\n").unwrap();
fs::write(backlog_dir.join("1_bug_first.md"), "# Bug 1: First\n").unwrap();
fs::write(backlog_dir.join("2_bug_second.md"), "# Bug 2: Second\n").unwrap();
crate::db::ensure_content_store();
crate::db::write_item_with_content(
"7013_bug_third",
"1_backlog",
"---\nname: Third\n---\n# Bug 7013: Third\n",
);
crate::db::write_item_with_content(
"7011_bug_first",
"1_backlog",
"---\nname: First\n---\n# Bug 7011: First\n",
);
crate::db::write_item_with_content(
"7012_bug_second",
"1_backlog",
"---\nname: Second\n---\n# Bug 7012: Second\n",
);
let result = list_bug_files(tmp.path()).unwrap();
assert_eq!(result.len(), 3);
assert_eq!(result[0].0, "1_bug_first");
assert_eq!(result[1].0, "2_bug_second");
assert_eq!(result[2].0, "3_bug_third");
// Find positions of our three bugs in the sorted result.
let pos_first = result.iter().position(|(id, _)| id == "7011_bug_first").unwrap();
let pos_second = result.iter().position(|(id, _)| id == "7012_bug_second").unwrap();
let pos_third = result.iter().position(|(id, _)| id == "7013_bug_third").unwrap();
assert!(pos_first < pos_second);
assert!(pos_second < pos_third);
}
#[test]
@@ -593,16 +527,17 @@ mod tests {
#[test]
fn create_spike_file_increments_from_existing_items() {
let tmp = tempfile::tempdir().unwrap();
let backlog = tmp.path().join(".huskies/work/1_backlog");
fs::create_dir_all(&backlog).unwrap();
fs::write(backlog.join("5_story_existing.md"), "").unwrap();
crate::db::ensure_content_store();
// Seed a high-numbered item into the CRDT so next_item_number goes beyond it.
crate::db::write_item_with_content(
"7050_story_existing",
"1_backlog",
"---\nname: Existing\n---\n",
);
let spike_id = create_spike_file(tmp.path(), "My Spike", None).unwrap();
// The spike number must be > 5 (the highest filesystem item) but the global
// content store may have higher-numbered items from parallel tests, so we
// only assert the suffix and that the prefix is a number >= 6.
assert!(spike_id.ends_with("_spike_my_spike"), "expected ID to end with _spike_my_spike, got: {spike_id}");
let num: u32 = spike_id.chars().take_while(|c| c.is_ascii_digit()).collect::<String>().parse().unwrap();
assert!(num >= 6, "expected spike number >= 6, got: {spike_id}");
assert!(num >= 7051, "expected spike number >= 7051, got: {spike_id}");
}
}
+270 -470
View File
@@ -18,6 +18,7 @@ use crate::http::context::AppContext;
use crate::io::story_metadata::parse_front_matter;
use serde::Serialize;
use std::collections::HashMap;
use std::path::Path;
/// Agent assignment embedded in a pipeline stage item.
@@ -74,137 +75,91 @@ pub struct PipelineState {
///
/// Reads from the CRDT document and enriches with content from the
/// in-memory content store. Agent assignments are overlaid from the
/// in-memory agent pool. Falls back to filesystem for items not yet
/// migrated to the database.
/// in-memory agent pool.
pub fn load_pipeline_state(ctx: &AppContext) -> Result<PipelineState, String> {
let agent_map = build_active_agent_map(ctx);
// Try CRDT-first read via the typed projection layer.
use crate::pipeline_state::Stage;
let typed_items = crate::pipeline_state::read_all_typed();
if !typed_items.is_empty() {
use crate::pipeline_state::Stage;
let mut state = PipelineState {
backlog: Vec::new(),
current: Vec::new(),
qa: Vec::new(),
merge: Vec::new(),
done: Vec::new(),
let mut state = PipelineState {
backlog: Vec::new(),
current: Vec::new(),
qa: Vec::new(),
merge: Vec::new(),
done: Vec::new(),
};
for item in typed_items {
let sid = &item.story_id.0;
let agent = agent_map.get(sid).cloned();
// Enrich with content-derived metadata (merge_failure, review_hold, qa).
let (merge_failure, review_hold, qa) = crate::db::read_content(sid)
.and_then(|c| parse_front_matter(&c).ok())
.map(|meta| {
(
meta.merge_failure,
meta.review_hold,
meta.qa.map(|m| m.as_str().to_string()),
)
})
.unwrap_or((None, None, None));
let story = UpcomingStory {
story_id: sid.clone(),
name: if item.name.is_empty() {
None
} else {
Some(item.name.clone())
},
error: None,
merge_failure,
agent,
review_hold,
qa,
retry_count: if item.retry_count > 0 {
Some(item.retry_count)
} else {
None
},
blocked: if item.stage.is_blocked() {
Some(true)
} else {
None
},
depends_on: if item.depends_on.is_empty() {
None
} else {
Some(
item.depends_on
.iter()
.filter_map(|d| d.0.split('_').next()?.parse::<u32>().ok())
.collect(),
)
},
};
for item in typed_items {
let sid = &item.story_id.0;
let agent = agent_map.get(sid).cloned();
// Enrich with content-derived metadata (merge_failure, review_hold, qa).
let (merge_failure, review_hold, qa) = crate::db::read_content(sid)
.and_then(|c| parse_front_matter(&c).ok())
.map(|meta| {
(
meta.merge_failure,
meta.review_hold,
meta.qa.map(|m| m.as_str().to_string()),
)
})
.unwrap_or((None, None, None));
let story = UpcomingStory {
story_id: sid.clone(),
name: if item.name.is_empty() {
None
} else {
Some(item.name.clone())
},
error: None,
merge_failure,
agent,
review_hold,
qa,
retry_count: if item.retry_count > 0 {
Some(item.retry_count)
} else {
None
},
blocked: if item.stage.is_blocked() {
Some(true)
} else {
None
},
depends_on: if item.depends_on.is_empty() {
None
} else {
Some(
item.depends_on
.iter()
.filter_map(|d| d.0.split('_').next()?.parse::<u32>().ok())
.collect(),
)
},
};
match &item.stage {
Stage::Backlog => state.backlog.push(story),
Stage::Coding => state.current.push(story),
Stage::Qa => state.qa.push(story),
Stage::Merge { .. } => state.merge.push(story),
Stage::Done { .. } => state.done.push(story),
Stage::Archived { .. } => {} // skip archived
}
match &item.stage {
Stage::Backlog => state.backlog.push(story),
Stage::Coding => state.current.push(story),
Stage::Qa => state.qa.push(story),
Stage::Merge { .. } => state.merge.push(story),
Stage::Done { .. } => state.done.push(story),
Stage::Archived { .. } => {} // skip archived
}
// Sort each stage for deterministic output.
state.backlog.sort_by(|a, b| a.story_id.cmp(&b.story_id));
state.current.sort_by(|a, b| a.story_id.cmp(&b.story_id));
state.qa.sort_by(|a, b| a.story_id.cmp(&b.story_id));
state.merge.sort_by(|a, b| a.story_id.cmp(&b.story_id));
state.done.sort_by(|a, b| a.story_id.cmp(&b.story_id));
// Merge in any filesystem-only items not yet in the CRDT (migration fallback).
merge_filesystem_items(ctx, &mut state, &agent_map)?;
return Ok(state);
}
// Fallback: filesystem-only read (CRDT not initialised).
Ok(PipelineState {
backlog: load_stage_items_from_fs(ctx, "1_backlog", &HashMap::new())?,
current: load_stage_items_from_fs(ctx, "2_current", &agent_map)?,
qa: load_stage_items_from_fs(ctx, "3_qa", &agent_map)?,
merge: load_stage_items_from_fs(ctx, "4_merge", &agent_map)?,
done: load_stage_items_from_fs(ctx, "5_done", &HashMap::new())?,
})
// Sort each stage for deterministic output.
state.backlog.sort_by(|a, b| a.story_id.cmp(&b.story_id));
state.current.sort_by(|a, b| a.story_id.cmp(&b.story_id));
state.qa.sort_by(|a, b| a.story_id.cmp(&b.story_id));
state.merge.sort_by(|a, b| a.story_id.cmp(&b.story_id));
state.done.sort_by(|a, b| a.story_id.cmp(&b.story_id));
Ok(state)
}
/// Merge filesystem items that are not already present in the CRDT state.
fn merge_filesystem_items(
ctx: &AppContext,
state: &mut PipelineState,
agent_map: &HashMap<String, AgentAssignment>,
) -> Result<(), String> {
let stages = [
("1_backlog", &mut state.backlog),
("2_current", &mut state.current),
("3_qa", &mut state.qa),
("4_merge", &mut state.merge),
("5_done", &mut state.done),
];
for (stage_dir, stage_vec) in stages {
let empty_map = HashMap::new();
let map = if stage_dir == "2_current" || stage_dir == "3_qa" || stage_dir == "4_merge" {
agent_map
} else {
&empty_map
};
let fs_items = load_stage_items_from_fs(ctx, stage_dir, map)?;
for fs_item in fs_items {
if !stage_vec.iter().any(|s| s.story_id == fs_item.story_id) {
stage_vec.push(fs_item);
}
}
stage_vec.sort_by(|a, b| a.story_id.cmp(&b.story_id));
}
Ok(())
}
/// Build a map from story_id → AgentAssignment for all pending/running agents.
fn build_active_agent_map(ctx: &AppContext) -> HashMap<String, AgentAssignment> {
@@ -240,141 +195,71 @@ fn build_active_agent_map(ctx: &AppContext) -> HashMap<String, AgentAssignment>
map
}
/// Load work items from filesystem (fallback for backwards compatibility).
fn load_stage_items_from_fs(
ctx: &AppContext,
stage_dir: &str,
agent_map: &HashMap<String, AgentAssignment>,
) -> Result<Vec<UpcomingStory>, String> {
let root = ctx.state.get_project_root()?;
let dir = root.join(".huskies").join("work").join(stage_dir);
let mut stories = Vec::new();
pub fn load_upcoming_stories(_ctx: &AppContext) -> Result<Vec<UpcomingStory>, String> {
use crate::pipeline_state::Stage;
if dir.exists() {
for entry in std::fs::read_dir(&dir)
.map_err(|e| format!("Failed to read {stage_dir} directory: {e}"))?
{
let entry = entry.map_err(|e| format!("Failed to read {stage_dir} entry: {e}"))?;
let path = entry.path();
if path.extension().and_then(|ext| ext.to_str()) != Some("md") {
continue;
}
let story_id = path
.file_stem()
.and_then(|stem| stem.to_str())
.ok_or_else(|| "Invalid story file name.".to_string())?
.to_string();
let contents = std::fs::read_to_string(&path)
.map_err(|e| format!("Failed to read story file {}: {e}", path.display()))?;
let (name, error, merge_failure, review_hold, qa, retry_count, blocked, depends_on) = match parse_front_matter(&contents) {
Ok(meta) => (meta.name, None, meta.merge_failure, meta.review_hold, meta.qa.map(|m| m.as_str().to_string()), meta.retry_count, meta.blocked, meta.depends_on),
Err(e) => (None, Some(e.to_string()), None, None, None, None, None, None),
};
let agent = agent_map.get(&story_id).cloned();
stories.push(UpcomingStory { story_id, name, error, merge_failure, agent, review_hold, qa, retry_count, blocked, depends_on });
}
}
let typed_items = crate::pipeline_state::read_all_typed();
let mut stories: Vec<UpcomingStory> = typed_items
.into_iter()
.filter(|item| matches!(item.stage, Stage::Backlog))
.map(|item| UpcomingStory {
story_id: item.story_id.0,
name: if item.name.is_empty() {
None
} else {
Some(item.name)
},
error: None,
merge_failure: None,
agent: None,
review_hold: None,
qa: None,
retry_count: if item.retry_count > 0 {
Some(item.retry_count)
} else {
None
},
blocked: if item.stage.is_blocked() {
Some(true)
} else {
None
},
depends_on: if item.depends_on.is_empty() {
None
} else {
Some(
item.depends_on
.iter()
.filter_map(|d| d.0.split('_').next()?.parse::<u32>().ok())
.collect(),
)
},
})
.collect();
stories.sort_by(|a, b| a.story_id.cmp(&b.story_id));
Ok(stories)
}
pub fn load_upcoming_stories(ctx: &AppContext) -> Result<Vec<UpcomingStory>, String> {
// Try typed projection first.
let typed_items = crate::pipeline_state::read_all_typed();
if !typed_items.is_empty() {
use crate::pipeline_state::Stage;
let mut stories: Vec<UpcomingStory> = typed_items
.into_iter()
.filter(|item| matches!(item.stage, Stage::Backlog))
.map(|item| UpcomingStory {
story_id: item.story_id.0,
name: if item.name.is_empty() {
None
} else {
Some(item.name)
},
error: None,
merge_failure: None,
agent: None,
review_hold: None,
qa: None,
retry_count: if item.retry_count > 0 {
Some(item.retry_count)
} else {
None
},
blocked: if item.stage.is_blocked() {
Some(true)
} else {
None
},
depends_on: if item.depends_on.is_empty() {
None
} else {
Some(
item.depends_on
.iter()
.filter_map(|d| d.0.split('_').next()?.parse::<u32>().ok())
.collect(),
)
},
})
.collect();
stories.sort_by(|a, b| a.story_id.cmp(&b.story_id));
// Merge filesystem fallback.
let fs_stories = load_stage_items_from_fs(ctx, "1_backlog", &HashMap::new())?;
for fs_item in fs_stories {
if !stories.iter().any(|s| s.story_id == fs_item.story_id) {
stories.push(fs_item);
}
}
stories.sort_by(|a, b| a.story_id.cmp(&b.story_id));
return Ok(stories);
}
load_stage_items_from_fs(ctx, "1_backlog", &HashMap::new())
}
pub fn validate_story_dirs(
root: &std::path::Path,
_root: &std::path::Path,
) -> Result<Vec<StoryValidationResult>, String> {
use crate::pipeline_state::Stage;
let mut results = Vec::new();
// Validate from filesystem shadows under the given root.
// NOTE: We intentionally read the filesystem here (not the global CRDT
// singleton) so that tests can pass an isolated tempdir and get
// deterministic results. See bug 525.
let dirs_to_validate = vec![
root.join(".huskies").join("work").join("2_current"),
root.join(".huskies").join("work").join("1_backlog"),
];
for dir in &dirs_to_validate {
let subdir = dir.file_name().map(|n| n.to_string_lossy().into_owned()).unwrap_or_default();
if !dir.exists() {
let typed_items = crate::pipeline_state::read_all_typed();
for item in typed_items {
// Only validate backlog and current items (matching the old behaviour).
if !matches!(item.stage, Stage::Backlog | Stage::Coding) {
continue;
}
for entry in
std::fs::read_dir(dir).map_err(|e| format!("Failed to read {subdir} directory: {e}"))?
{
let entry = entry.map_err(|e| format!("Failed to read entry: {e}"))?;
let path = entry.path();
if path.extension().and_then(|ext| ext.to_str()) != Some("md") {
continue;
}
let story_id = path
.file_stem()
.and_then(|stem| stem.to_str())
.unwrap_or_default()
.to_string();
let story_id = item.story_id.0.clone();
let contents = std::fs::read_to_string(&path)
.map_err(|e| format!("Failed to read {}: {e}", path.display()))?;
match parse_front_matter(&contents) {
match crate::db::read_content(&story_id) {
Some(contents) => match parse_front_matter(&contents) {
Ok(meta) => {
let mut errors = Vec::new();
if meta.name.is_none() {
@@ -399,7 +284,12 @@ pub fn validate_story_dirs(
valid: false,
error: Some(e.to_string()),
}),
}
},
None => results.push(StoryValidationResult {
story_id,
valid: false,
error: Some("No content found in content store".to_string()),
}),
}
}
@@ -409,38 +299,17 @@ pub fn validate_story_dirs(
// ── Shared utilities used by submodules ──────────────────────────
/// Read story content from the database content store, falling back to
/// the filesystem if not yet migrated.
/// Read story content from the database content store.
///
/// Returns the story content or an error if not found.
pub(super) fn read_story_content(project_root: &Path, story_id: &str) -> Result<String, String> {
// Try content store first.
if let Some(content) = crate::db::read_content(story_id) {
return Ok(content);
}
// Filesystem fallback.
let path = find_story_file_on_disk(project_root, story_id)?;
let content = std::fs::read_to_string(&path)
.map_err(|e| format!("Failed to read story file: {e}"))?;
// Import into content store for future reads.
crate::db::write_content(story_id, &content);
Ok(content)
pub(super) fn read_story_content(_project_root: &Path, story_id: &str) -> Result<String, String> {
crate::db::read_content(story_id)
.ok_or_else(|| format!("Story '{story_id}' not found in any pipeline stage."))
}
/// Write story content to both DB and filesystem (backwards compat).
///
/// Use this variant when a project_root is available to keep the filesystem
/// in sync during the migration period.
pub(super) fn write_story_content_with_fs(project_root: &Path, story_id: &str, stage: &str, content: &str) {
/// Write story content to the DB content store and CRDT.
pub(super) fn write_story_content(_project_root: &Path, story_id: &str, stage: &str, content: &str) {
crate::db::write_item_with_content(story_id, stage, content);
// Also write to filesystem if the file exists.
if let Ok(path) = find_story_file_on_disk(project_root, story_id) {
let _ = std::fs::write(&path, content);
}
}
/// Determine what stage a story is in (from CRDT).
@@ -451,22 +320,6 @@ pub(super) fn story_stage(story_id: &str) -> Option<String> {
.map(|item| item.stage.dir_name().to_string())
}
/// Locate a work item file by searching all active pipeline stages on disk.
///
/// This is a filesystem fallback used during migration.
pub(crate) fn find_story_file_on_disk(project_root: &Path, story_id: &str) -> Result<std::path::PathBuf, String> {
let filename = format!("{story_id}.md");
let sk = project_root.join(".huskies").join("work");
for stage in &["2_current", "1_backlog", "3_qa", "4_merge", "5_done", "6_archived"] {
let path = sk.join(stage).join(&filename);
if path.exists() {
return Ok(path);
}
}
Err(format!(
"Story '{story_id}' not found in any pipeline stage."
))
}
/// Replace the content of a named `## Section` in a story file.
///
@@ -641,88 +494,54 @@ pub(super) fn slugify_name(name: &str) -> String {
result
}
/// Get the next available item number by scanning both the database and filesystem.
pub(super) fn next_item_number(root: &std::path::Path) -> Result<u32, String> {
let mut max_num = crate::db::next_item_number().saturating_sub(1); // db returns next, we want max
// Also scan filesystem for backwards compatibility.
let work_base = root.join(".huskies").join("work");
for subdir in &["1_backlog", "2_current", "3_qa", "4_merge", "5_done", "6_archived"] {
let dir = work_base.join(subdir);
if !dir.exists() {
continue;
}
for entry in
std::fs::read_dir(&dir).map_err(|e| format!("Failed to read {subdir} directory: {e}"))?
{
let entry = entry.map_err(|e| format!("Failed to read entry: {e}"))?;
let name = entry.file_name();
let name_str = name.to_string_lossy();
let num_str: String = name_str.chars().take_while(|c| c.is_ascii_digit()).collect();
if let Ok(n) = num_str.parse::<u32>()
&& n > max_num
{
max_num = n;
}
}
}
Ok(max_num + 1)
/// Get the next available item number from the database/CRDT.
pub(super) fn next_item_number(_root: &std::path::Path) -> Result<u32, String> {
Ok(crate::db::next_item_number())
}
#[cfg(test)]
mod tests {
use super::*;
use std::fs;
#[test]
fn load_pipeline_state_loads_all_stages() {
let tmp = tempfile::tempdir().unwrap();
let root = tmp.path().to_path_buf();
crate::db::ensure_content_store();
for (stage, id) in &[
("1_backlog", "10_story_upcoming"),
("2_current", "20_story_current"),
("3_qa", "30_story_qa"),
("4_merge", "40_story_merge"),
("5_done", "50_story_done"),
("1_backlog", "9810_story_upcoming"),
("2_current", "9820_story_current"),
("3_qa", "9830_story_qa"),
("4_merge", "9840_story_merge"),
("5_done", "9850_story_done"),
] {
let dir = root.join(".huskies").join("work").join(stage);
fs::create_dir_all(&dir).unwrap();
fs::write(
dir.join(format!("{id}.md")),
format!("---\nname: {id}\n---\n"),
)
.unwrap();
crate::db::write_item_with_content(
id,
stage,
&format!("---\nname: {id}\n---\n"),
);
}
let ctx = crate::http::context::AppContext::new_test(root);
let state = load_pipeline_state(&ctx).unwrap();
assert_eq!(state.backlog.len(), 1);
assert_eq!(state.backlog[0].story_id, "10_story_upcoming");
assert_eq!(state.current.len(), 1);
assert_eq!(state.current[0].story_id, "20_story_current");
assert_eq!(state.qa.len(), 1);
assert_eq!(state.qa[0].story_id, "30_story_qa");
assert_eq!(state.merge.len(), 1);
assert_eq!(state.merge[0].story_id, "40_story_merge");
assert_eq!(state.done.len(), 1);
assert_eq!(state.done[0].story_id, "50_story_done");
assert!(state.backlog.iter().any(|s| s.story_id == "9810_story_upcoming"));
assert!(state.current.iter().any(|s| s.story_id == "9820_story_current"));
assert!(state.qa.iter().any(|s| s.story_id == "9830_story_qa"));
assert!(state.merge.iter().any(|s| s.story_id == "9840_story_merge"));
assert!(state.done.iter().any(|s| s.story_id == "9850_story_done"));
}
#[test]
fn load_upcoming_returns_empty_when_no_dir() {
// With CRDT there is no filesystem dependency. The function should
// succeed even without a .huskies directory. Other tests may have
// inserted items into the global CRDT, so we only assert no error.
let tmp = tempfile::tempdir().unwrap();
let root = tmp.path().to_path_buf();
// No .huskies directory at all
let ctx = crate::http::context::AppContext::new_test(root);
let result = load_upcoming_stories(&ctx).unwrap();
assert!(result.is_empty());
let _result = load_upcoming_stories(&ctx).unwrap();
}
#[test]
@@ -730,21 +549,19 @@ mod tests {
let tmp = tempfile::tempdir().unwrap();
let root = tmp.path().to_path_buf();
let current = root.join(".huskies/work/2_current");
fs::create_dir_all(&current).unwrap();
fs::write(
current.join("10_story_test.md"),
crate::db::ensure_content_store();
crate::db::write_item_with_content(
"9860_story_test",
"2_current",
"---\nname: Test Story\n---\n# Story\n",
)
.unwrap();
);
let ctx = crate::http::context::AppContext::new_test(root);
ctx.agents.inject_test_agent("10_story_test", "coder-1", crate::agents::AgentStatus::Running);
ctx.agents.inject_test_agent("9860_story_test", "coder-1", crate::agents::AgentStatus::Running);
let state = load_pipeline_state(&ctx).unwrap();
assert_eq!(state.current.len(), 1);
let item = &state.current[0];
let item = state.current.iter().find(|s| s.story_id == "9860_story_test").unwrap();
assert!(item.agent.is_some(), "running agent should appear on work item");
let agent = item.agent.as_ref().unwrap();
assert_eq!(agent.agent_name, "coder-1");
@@ -756,22 +573,21 @@ mod tests {
let tmp = tempfile::tempdir().unwrap();
let root = tmp.path().to_path_buf();
let current = root.join(".huskies/work/2_current");
fs::create_dir_all(&current).unwrap();
fs::write(
current.join("11_story_done.md"),
crate::db::ensure_content_store();
crate::db::write_item_with_content(
"9861_story_done",
"2_current",
"---\nname: Done Story\n---\n# Story\n",
)
.unwrap();
);
let ctx = crate::http::context::AppContext::new_test(root);
ctx.agents.inject_test_agent("11_story_done", "coder-1", crate::agents::AgentStatus::Completed);
ctx.agents.inject_test_agent("9861_story_done", "coder-1", crate::agents::AgentStatus::Completed);
let state = load_pipeline_state(&ctx).unwrap();
assert_eq!(state.current.len(), 1);
let item = state.current.iter().find(|s| s.story_id == "9861_story_done").unwrap();
assert!(
state.current[0].agent.is_none(),
item.agent.is_none(),
"completed agent should not appear on work item"
);
}
@@ -781,150 +597,148 @@ mod tests {
let tmp = tempfile::tempdir().unwrap();
let root = tmp.path().to_path_buf();
let current = root.join(".huskies/work/2_current");
fs::create_dir_all(&current).unwrap();
fs::write(
current.join("12_story_pending.md"),
crate::db::ensure_content_store();
crate::db::write_item_with_content(
"9862_story_pending",
"2_current",
"---\nname: Pending Story\n---\n# Story\n",
)
.unwrap();
);
let ctx = crate::http::context::AppContext::new_test(root);
ctx.agents.inject_test_agent("12_story_pending", "coder-1", crate::agents::AgentStatus::Pending);
ctx.agents.inject_test_agent("9862_story_pending", "coder-1", crate::agents::AgentStatus::Pending);
let state = load_pipeline_state(&ctx).unwrap();
assert_eq!(state.current.len(), 1);
let item = &state.current[0];
let item = state.current.iter().find(|s| s.story_id == "9862_story_pending").unwrap();
assert!(item.agent.is_some(), "pending agent should appear on work item");
assert_eq!(item.agent.as_ref().unwrap().status, "pending");
}
#[test]
fn pipeline_state_includes_depends_on() {
let tmp = tempfile::tempdir().unwrap();
let backlog = tmp.path().join(".huskies/work/1_backlog");
fs::create_dir_all(&backlog).unwrap();
fs::write(
backlog.join("20_story_dependent.md"),
crate::db::ensure_content_store();
crate::db::write_item_with_content(
"9863_story_dependent",
"1_backlog",
"---\nname: Dependent Story\ndepends_on: [10, 11]\n---\n",
)
.unwrap();
fs::write(
backlog.join("21_story_independent.md"),
);
crate::db::write_item_with_content(
"9864_story_independent",
"1_backlog",
"---\nname: Independent Story\n---\n",
)
.unwrap();
);
let tmp = tempfile::tempdir().unwrap();
let ctx = crate::http::context::AppContext::new_test(tmp.path().to_path_buf());
let state = load_pipeline_state(&ctx).unwrap();
let dependent = state.backlog.iter().find(|s| s.story_id == "20_story_dependent").unwrap();
let dependent = state.backlog.iter().find(|s| s.story_id == "9863_story_dependent").unwrap();
assert_eq!(dependent.depends_on, Some(vec![10, 11]));
let independent = state.backlog.iter().find(|s| s.story_id == "21_story_independent").unwrap();
let independent = state.backlog.iter().find(|s| s.story_id == "9864_story_independent").unwrap();
assert_eq!(independent.depends_on, None);
}
#[test]
fn load_upcoming_parses_metadata() {
let tmp = tempfile::tempdir().unwrap();
let backlog = tmp.path().join(".huskies/work/1_backlog");
fs::create_dir_all(&backlog).unwrap();
fs::write(
backlog.join("31_story_view_upcoming.md"),
crate::db::ensure_content_store();
crate::db::write_item_with_content(
"9870_story_view_upcoming",
"1_backlog",
"---\nname: View Upcoming\n---\n# Story\n",
)
.unwrap();
fs::write(
backlog.join("32_story_worktree.md"),
);
crate::db::write_item_with_content(
"9871_story_worktree",
"1_backlog",
"---\nname: Worktree Orchestration\n---\n# Story\n",
)
.unwrap();
);
let tmp = tempfile::tempdir().unwrap();
let ctx = crate::http::context::AppContext::new_test(tmp.path().to_path_buf());
let stories = load_upcoming_stories(&ctx).unwrap();
assert_eq!(stories.len(), 2);
assert_eq!(stories[0].story_id, "31_story_view_upcoming");
assert_eq!(stories[0].name.as_deref(), Some("View Upcoming"));
assert_eq!(stories[1].story_id, "32_story_worktree");
assert_eq!(stories[1].name.as_deref(), Some("Worktree Orchestration"));
let s1 = stories.iter().find(|s| s.story_id == "9870_story_view_upcoming").unwrap();
assert_eq!(s1.name.as_deref(), Some("View Upcoming"));
let s2 = stories.iter().find(|s| s.story_id == "9871_story_worktree").unwrap();
assert_eq!(s2.name.as_deref(), Some("Worktree Orchestration"));
}
#[test]
fn load_upcoming_skips_non_md_files() {
let tmp = tempfile::tempdir().unwrap();
let backlog = tmp.path().join(".huskies/work/1_backlog");
fs::create_dir_all(&backlog).unwrap();
fs::write(backlog.join(".gitkeep"), "").unwrap();
fs::write(
backlog.join("31_story_example.md"),
// Non-.md files are a filesystem concept. With CRDT, only real items
// appear. Just verify the CRDT item is returned.
crate::db::ensure_content_store();
crate::db::write_item_with_content(
"9872_story_example",
"1_backlog",
"---\nname: A Story\n---\n",
)
.unwrap();
);
let tmp = tempfile::tempdir().unwrap();
let ctx = crate::http::context::AppContext::new_test(tmp.path().to_path_buf());
let stories = load_upcoming_stories(&ctx).unwrap();
assert_eq!(stories.len(), 1);
assert_eq!(stories[0].story_id, "31_story_example");
assert!(stories.iter().any(|s| s.story_id == "9872_story_example"));
}
#[test]
fn validate_story_dirs_valid_files() {
let tmp = tempfile::tempdir().unwrap();
let current = tmp.path().join(".huskies/work/2_current");
let backlog = tmp.path().join(".huskies/work/1_backlog");
fs::create_dir_all(&current).unwrap();
fs::create_dir_all(&backlog).unwrap();
fs::write(
current.join("28_story_todos.md"),
crate::db::ensure_content_store();
crate::db::write_item_with_content(
"9873_story_todos",
"2_current",
"---\nname: Show TODOs\n---\n# Story\n",
)
.unwrap();
fs::write(
backlog.join("36_story_front_matter.md"),
);
crate::db::write_item_with_content(
"9874_story_front_matter",
"1_backlog",
"---\nname: Enforce Front Matter\n---\n# Story\n",
)
.unwrap();
);
let tmp = tempfile::tempdir().unwrap();
let results = validate_story_dirs(tmp.path()).unwrap();
assert_eq!(results.len(), 2);
assert!(results.iter().all(|r| r.valid));
assert!(results.iter().all(|r| r.error.is_none()));
let r1 = results.iter().find(|r| r.story_id == "9873_story_todos").unwrap();
assert!(r1.valid);
let r2 = results.iter().find(|r| r.story_id == "9874_story_front_matter").unwrap();
assert!(r2.valid);
}
#[test]
fn validate_story_dirs_missing_front_matter() {
let tmp = tempfile::tempdir().unwrap();
let current = tmp.path().join(".huskies/work/2_current");
fs::create_dir_all(&current).unwrap();
fs::write(current.join("28_story_todos.md"), "# No front matter\n").unwrap();
crate::db::ensure_content_store();
crate::db::write_item_with_content(
"9875_story_no_fm",
"2_current",
"# No front matter\n",
);
let tmp = tempfile::tempdir().unwrap();
let results = validate_story_dirs(tmp.path()).unwrap();
assert_eq!(results.len(), 1);
assert!(!results[0].valid);
assert_eq!(results[0].error.as_deref(), Some("Missing front matter"));
let r = results.iter().find(|r| r.story_id == "9875_story_no_fm").unwrap();
assert!(!r.valid);
assert_eq!(r.error.as_deref(), Some("Missing front matter"));
}
#[test]
fn validate_story_dirs_missing_required_fields() {
let tmp = tempfile::tempdir().unwrap();
let current = tmp.path().join(".huskies/work/2_current");
fs::create_dir_all(&current).unwrap();
fs::write(current.join("28_story_todos.md"), "---\n---\n# Story\n").unwrap();
crate::db::ensure_content_store();
crate::db::write_item_with_content(
"9876_story_no_name",
"2_current",
"---\n---\n# Story\n",
);
let tmp = tempfile::tempdir().unwrap();
let results = validate_story_dirs(tmp.path()).unwrap();
assert_eq!(results.len(), 1);
assert!(!results[0].valid);
let err = results[0].error.as_deref().unwrap();
let r = results.iter().find(|r| r.story_id == "9876_story_no_name").unwrap();
assert!(!r.valid);
let err = r.error.as_deref().unwrap();
assert!(err.contains("Missing 'name' field"));
}
#[test]
fn validate_story_dirs_empty_when_no_dirs() {
// With CRDT there's always global state; this test just ensures no panic.
let tmp = tempfile::tempdir().unwrap();
let results = validate_story_dirs(tmp.path()).unwrap();
assert!(results.is_empty());
let _results = validate_story_dirs(tmp.path());
}
// --- slugify_name tests ---
@@ -965,55 +779,41 @@ mod tests {
// --- next_item_number tests ---
#[test]
fn next_item_number_empty_dirs() {
fn next_item_number_returns_at_least_1() {
let tmp = tempfile::tempdir().unwrap();
let base = tmp.path().join(".huskies/work/1_backlog");
fs::create_dir_all(&base).unwrap();
// At least 1; may be higher due to shared global CRDT state in tests.
// May be higher due to shared global CRDT state in tests.
assert!(next_item_number(tmp.path()).unwrap() >= 1);
}
#[test]
fn next_item_number_scans_all_dirs() {
fn next_item_number_increments_beyond_existing() {
crate::db::ensure_content_store();
crate::db::write_item_with_content(
"9877_story_foo",
"1_backlog",
"---\nname: Foo\n---\n",
);
let tmp = tempfile::tempdir().unwrap();
let backlog = tmp.path().join(".huskies/work/1_backlog");
let current = tmp.path().join(".huskies/work/2_current");
let archived = tmp.path().join(".huskies/work/5_done");
fs::create_dir_all(&backlog).unwrap();
fs::create_dir_all(&current).unwrap();
fs::create_dir_all(&archived).unwrap();
fs::write(backlog.join("10_story_foo.md"), "").unwrap();
fs::write(current.join("20_story_bar.md"), "").unwrap();
fs::write(archived.join("15_story_baz.md"), "").unwrap();
// At least 21 (filesystem max is 20); may be higher due to shared CRDT state.
assert!(next_item_number(tmp.path()).unwrap() >= 21);
}
#[test]
fn next_item_number_no_work_dirs() {
let tmp = tempfile::tempdir().unwrap();
// No .huskies at all — at least 1.
assert!(next_item_number(tmp.path()).unwrap() >= 1);
assert!(next_item_number(tmp.path()).unwrap() >= 9878);
}
// --- read_story_content tests ---
#[test]
fn read_story_content_from_filesystem_fallback() {
let tmp = tempfile::tempdir().unwrap();
let current = tmp.path().join(".huskies/work/2_current");
fs::create_dir_all(&current).unwrap();
fn read_story_content_from_content_store() {
crate::db::ensure_content_store();
let content = "---\nname: Test\n---\n# Story\n";
fs::write(current.join("6_test.md"), content).unwrap();
crate::db::write_content("9878_story_read_test", content);
let result = read_story_content(tmp.path(), "6_test").unwrap();
let tmp = tempfile::tempdir().unwrap();
let result = read_story_content(tmp.path(), "9878_story_read_test").unwrap();
assert_eq!(result, content);
}
#[test]
fn read_story_content_not_found_returns_error() {
let tmp = tempfile::tempdir().unwrap();
let result = read_story_content(tmp.path(), "99_missing");
let result = read_story_content(tmp.path(), "99999_missing");
assert!(result.is_err());
assert!(result.unwrap_err().contains("not found"));
}
+9 -15
View File
@@ -3,7 +3,7 @@ use serde_json::Value;
use std::collections::HashMap;
use std::path::Path;
use super::{create_section_content, next_item_number, read_story_content, replace_section_content, slugify_name, story_stage, write_story_content_with_fs};
use super::{create_section_content, next_item_number, read_story_content, replace_section_content, slugify_name, story_stage, write_story_content};
/// Shared create-story logic used by both the OpenApi and MCP handlers.
///
@@ -66,14 +66,8 @@ pub fn create_story_file(
content.push_str("## Out of Scope\n\n");
content.push_str("- TBD\n");
// Write to database content store.
write_story_content_with_fs(root, &story_id, "1_backlog", &content);
// Also write to filesystem for backwards compatibility during migration.
let backlog_dir = root.join(".huskies").join("work").join("1_backlog");
if let Ok(()) = std::fs::create_dir_all(&backlog_dir) {
let _ = std::fs::write(backlog_dir.join(format!("{story_id}.md")), &content);
}
// Write to database content store and CRDT.
write_story_content(root, &story_id, "1_backlog", &content);
Ok(story_id)
}
@@ -123,7 +117,7 @@ pub fn check_criterion_in_file(
// Write back to content store.
let stage = story_stage(story_id).unwrap_or_else(|| "2_current".to_string());
write_story_content_with_fs(project_root, story_id, &stage, &new_str);
write_story_content(project_root, story_id, &stage, &new_str);
Ok(())
}
@@ -177,7 +171,7 @@ pub fn add_criterion_to_file(
// Write back to content store.
let stage = story_stage(story_id).unwrap_or_else(|| "2_current".to_string());
write_story_content_with_fs(project_root, story_id, &stage, &new_str);
write_story_content(project_root, story_id, &stage, &new_str);
Ok(())
}
@@ -263,7 +257,7 @@ pub fn update_story_in_file(
// Write back to content store.
let stage = story_stage(story_id).unwrap_or_else(|| "2_current".to_string());
write_story_content_with_fs(project_root, story_id, &stage, &contents);
write_story_content(project_root, story_id, &stage, &contents);
Ok(())
}
@@ -732,13 +726,13 @@ mod tests {
#[test]
fn update_story_native_integer_written_unquoted() {
let tmp = tempfile::tempdir().unwrap();
setup_story_in_fs(tmp.path(), "33_test", "---\nname: T\n---\n\nNo sections.\n");
setup_story_in_fs(tmp.path(), "33b_test", "---\nname: T\n---\n\nNo sections.\n");
let mut fields = HashMap::new();
fields.insert("retry_count".to_string(), serde_json::json!(3));
update_story_in_file(tmp.path(), "33_test", None, None, Some(&fields)).unwrap();
update_story_in_file(tmp.path(), "33b_test", None, None, Some(&fields)).unwrap();
let result = read_story_content(tmp.path(), "33_test").unwrap();
let result = read_story_content(tmp.path(), "33b_test").unwrap();
assert!(result.contains("retry_count: 3"), "native integer should be unquoted: {result}");
assert!(!result.contains("retry_count: \"3\""), "must not be quoted: {result}");
}
+45 -63
View File
@@ -2,7 +2,7 @@ use crate::io::story_metadata::set_front_matter_field;
use crate::workflow::{StoryTestResults, TestCaseResult, TestStatus};
use std::path::Path;
use super::{read_story_content, replace_or_append_section, story_stage, write_story_content_with_fs};
use super::{read_story_content, replace_or_append_section, story_stage, write_story_content};
const TEST_RESULTS_MARKER: &str = "<!-- huskies-test-results:";
@@ -24,14 +24,9 @@ pub fn write_test_results_to_story_file(
let section = build_test_results_section(&json, results);
let new_contents = replace_or_append_section(&contents, "## Test Results", &section);
// Write back to content store.
// Write back to content store and CRDT.
let stage = story_stage(story_id).unwrap_or_else(|| "2_current".to_string());
write_story_content_with_fs(project_root, story_id, &stage, &new_contents);
// Also write to filesystem if the file exists (backwards compat).
if let Ok(path) = super::find_story_file_on_disk(project_root, story_id) {
let _ = std::fs::write(&path, &new_contents);
}
write_story_content(project_root, story_id, &stage, &new_contents);
Ok(())
}
@@ -63,12 +58,7 @@ pub fn write_coverage_baseline_to_story_file(
let updated = set_front_matter_field(&contents, "coverage_baseline", &format!("{coverage_pct:.1}%"));
let stage = story_stage(story_id).unwrap_or_else(|| "2_current".to_string());
write_story_content_with_fs(project_root, story_id, &stage, &updated);
// Also update filesystem if the file exists (backwards compat).
if let Ok(path) = super::find_story_file_on_disk(project_root, story_id) {
let _ = std::fs::write(&path, &updated);
}
write_story_content(project_root, story_id, &stage, &updated);
Ok(())
}
@@ -149,7 +139,6 @@ fn parse_test_results_from_contents(contents: &str) -> Option<StoryTestResults>
mod tests {
use super::*;
use crate::workflow::{StoryTestResults, TestCaseResult, TestStatus};
use std::fs;
fn make_results() -> StoryTestResults {
StoryTestResults {
@@ -176,18 +165,17 @@ mod tests {
#[test]
fn write_and_read_test_results_roundtrip() {
let tmp = tempfile::tempdir().unwrap();
let current = tmp.path().join(".huskies/work/2_current");
fs::create_dir_all(&current).unwrap();
fs::write(
current.join("1_story_test.md"),
crate::db::ensure_content_store();
crate::db::write_item_with_content(
"8001_story_test",
"2_current",
"---\nname: Test\n---\n# Story\n",
)
.unwrap();
);
let results = make_results();
write_test_results_to_story_file(tmp.path(), "1_story_test", &results).unwrap();
write_test_results_to_story_file(tmp.path(), "8001_story_test", &results).unwrap();
let read_back = read_test_results_from_story_file(tmp.path(), "1_story_test")
let read_back = read_test_results_from_story_file(tmp.path(), "8001_story_test")
.expect("should read back results");
assert_eq!(read_back.unit.len(), 2);
assert_eq!(read_back.integration.len(), 1);
@@ -202,19 +190,17 @@ mod tests {
#[test]
fn write_test_results_creates_readable_section() {
let tmp = tempfile::tempdir().unwrap();
let current = tmp.path().join(".huskies/work/2_current");
fs::create_dir_all(&current).unwrap();
let story_path = current.join("2_story_check.md");
fs::write(
&story_path,
crate::db::ensure_content_store();
crate::db::write_item_with_content(
"8002_story_check",
"2_current",
"---\nname: Check\n---\n# Story\n\n## Acceptance Criteria\n\n- [ ] AC1\n",
)
.unwrap();
);
let results = make_results();
write_test_results_to_story_file(tmp.path(), "2_story_check", &results).unwrap();
write_test_results_to_story_file(tmp.path(), "8002_story_check", &results).unwrap();
let contents = read_story_content(tmp.path(), "2_story_check").unwrap();
let contents = read_story_content(tmp.path(), "8002_story_check").unwrap();
assert!(contents.contains("## Test Results"));
assert!(contents.contains("✅ unit-pass"));
assert!(contents.contains("❌ unit-fail"));
@@ -226,18 +212,17 @@ mod tests {
#[test]
fn write_test_results_overwrites_existing_section() {
let tmp = tempfile::tempdir().unwrap();
let current = tmp.path().join(".huskies/work/2_current");
fs::create_dir_all(&current).unwrap();
fs::write(
current.join("3_story_overwrite.md"),
crate::db::ensure_content_store();
crate::db::write_item_with_content(
"8003_story_overwrite",
"2_current",
"---\nname: Overwrite\n---\n# Story\n\n## Test Results\n\n<!-- huskies-test-results: {} -->\n\n### Unit Tests (0 passed, 0 failed)\n\n*No unit tests recorded.*\n",
)
.unwrap();
);
let results = make_results();
write_test_results_to_story_file(tmp.path(), "3_story_overwrite", &results).unwrap();
write_test_results_to_story_file(tmp.path(), "8003_story_overwrite", &results).unwrap();
let contents = read_story_content(tmp.path(), "3_story_overwrite").unwrap();
let contents = read_story_content(tmp.path(), "8003_story_overwrite").unwrap();
assert!(contents.contains("✅ unit-pass"));
let count = contents.matches("## Test Results").count();
assert_eq!(count, 1, "should have exactly one ## Test Results section");
@@ -246,15 +231,14 @@ mod tests {
#[test]
fn read_test_results_returns_none_when_no_section() {
let tmp = tempfile::tempdir().unwrap();
let current = tmp.path().join(".huskies/work/2_current");
fs::create_dir_all(&current).unwrap();
fs::write(
current.join("4_story_empty.md"),
crate::db::ensure_content_store();
crate::db::write_item_with_content(
"8004_story_empty",
"2_current",
"---\nname: Empty\n---\n# Story\n",
)
.unwrap();
);
let result = read_test_results_from_story_file(tmp.path(), "4_story_empty");
let result = read_test_results_from_story_file(tmp.path(), "8004_story_empty");
assert!(result.is_none());
}
@@ -268,13 +252,12 @@ mod tests {
#[test]
fn write_test_results_finds_story_in_any_stage() {
let tmp = tempfile::tempdir().unwrap();
let qa_dir = tmp.path().join(".huskies/work/3_qa");
fs::create_dir_all(&qa_dir).unwrap();
fs::write(
qa_dir.join("5_story_qa.md"),
crate::db::ensure_content_store();
crate::db::write_item_with_content(
"8005_story_qa",
"3_qa",
"---\nname: QA Story\n---\n# Story\n",
)
.unwrap();
);
let results = StoryTestResults {
unit: vec![TestCaseResult {
@@ -284,26 +267,25 @@ mod tests {
}],
integration: vec![],
};
write_test_results_to_story_file(tmp.path(), "5_story_qa", &results).unwrap();
write_test_results_to_story_file(tmp.path(), "8005_story_qa", &results).unwrap();
let read_back = read_test_results_from_story_file(tmp.path(), "5_story_qa").unwrap();
let read_back = read_test_results_from_story_file(tmp.path(), "8005_story_qa").unwrap();
assert_eq!(read_back.unit.len(), 1);
}
#[test]
fn write_coverage_baseline_to_story_file_updates_front_matter() {
let tmp = tempfile::tempdir().unwrap();
let current = tmp.path().join(".huskies/work/2_current");
fs::create_dir_all(&current).unwrap();
fs::write(
current.join("6_story_cov.md"),
crate::db::ensure_content_store();
crate::db::write_item_with_content(
"8006_story_cov",
"2_current",
"---\nname: Cov Story\n---\n# Story\n",
)
.unwrap();
);
write_coverage_baseline_to_story_file(tmp.path(), "6_story_cov", 75.4).unwrap();
write_coverage_baseline_to_story_file(tmp.path(), "8006_story_cov", 75.4).unwrap();
let contents = read_story_content(tmp.path(), "6_story_cov").unwrap();
let contents = read_story_content(tmp.path(), "8006_story_cov").unwrap();
assert!(
contents.contains("coverage_baseline: 75.4%"),
"got: {contents}"