diff --git a/.gitignore b/.gitignore index 356450cd..e081b693 100644 --- a/.gitignore +++ b/.gitignore @@ -10,6 +10,7 @@ script/local-release # App specific (root-level; huskies subdirectory patterns live in .huskies/.gitignore) store.json +_merge_parsed.json .huskies_port .huskies/bot.toml.bak .huskies/build_hash diff --git a/server/src/http/mcp/diagnostics.rs b/server/src/http/mcp/diagnostics.rs index d6312561..5f498b03 100644 --- a/server/src/http/mcp/diagnostics.rs +++ b/server/src/http/mcp/diagnostics.rs @@ -1,10 +1,15 @@ //! MCP diagnostic tools — server logs, CRDT dump, and story movement helpers. +//! +//! This file is a thin adapter: it deserialises MCP payloads, delegates to +//! `crate::service::diagnostics` for all business logic, and serialises responses. use crate::agents::move_story_to_stage; use crate::http::context::AppContext; use crate::log_buffer; +use crate::service::diagnostics::{add_permission_rule, generate_permission_rule}; use crate::slog; use crate::slog_warn; use serde_json::{Value, json}; +#[allow(unused_imports)] use std::fs; pub(super) fn tool_get_server_logs(args: &Value) -> Result { @@ -44,94 +49,6 @@ pub(super) async fn tool_rebuild_and_restart(ctx: &AppContext) -> Result String { - if tool_name == "Bash" { - // Extract command from tool_input.command and use first word as prefix - let command_str = tool_input - .get("command") - .and_then(|v| v.as_str()) - .unwrap_or(""); - let first_word = command_str.split_whitespace().next().unwrap_or("unknown"); - format!("Bash({first_word} *)") - } else { - // For Edit, Write, Read, Glob, Grep, MCP tools, etc. — use the tool name directly - tool_name.to_string() - } -} - -/// Add a permission rule to `.claude/settings.json` in the project root. -/// Does nothing if the rule already exists. Creates the file if missing. -pub(super) fn add_permission_rule( - project_root: &std::path::Path, - rule: &str, -) -> Result<(), String> { - let claude_dir = project_root.join(".claude"); - fs::create_dir_all(&claude_dir) - .map_err(|e| format!("Failed to create .claude/ directory: {e}"))?; - - let settings_path = claude_dir.join("settings.json"); - let mut settings: Value = if settings_path.exists() { - let content = fs::read_to_string(&settings_path) - .map_err(|e| format!("Failed to read settings.json: {e}"))?; - serde_json::from_str(&content).map_err(|e| format!("Failed to parse settings.json: {e}"))? - } else { - json!({ "permissions": { "allow": [] } }) - }; - - let allow_arr = settings - .pointer_mut("/permissions/allow") - .and_then(|v| v.as_array_mut()); - - let allow = match allow_arr { - Some(arr) => arr, - None => { - // Ensure the structure exists - settings - .as_object_mut() - .unwrap() - .entry("permissions") - .or_insert(json!({ "allow": [] })); - settings - .pointer_mut("/permissions/allow") - .unwrap() - .as_array_mut() - .unwrap() - } - }; - - // Check for duplicates — exact string match - let rule_value = Value::String(rule.to_string()); - if allow.contains(&rule_value) { - return Ok(()); - } - - // Also check for wildcard coverage: if "mcp__huskies__*" exists, don't add - // a more specific "mcp__huskies__create_story". - let dominated = allow.iter().any(|existing| { - if let Some(pat) = existing.as_str() - && let Some(prefix) = pat.strip_suffix('*') - { - return rule.starts_with(prefix); - } - false - }); - if dominated { - return Ok(()); - } - - allow.push(rule_value); - - let pretty = - serde_json::to_string_pretty(&settings).map_err(|e| format!("Failed to serialize: {e}"))?; - fs::write(&settings_path, pretty).map_err(|e| format!("Failed to write settings.json: {e}"))?; - Ok(()) -} - /// MCP tool called by Claude Code via `--permission-prompt-tool`. /// /// Forwards the permission request through the shared channel to the active diff --git a/server/src/http/mcp/git_tools.rs b/server/src/http/mcp/git_tools.rs index 5295319d..062f702e 100644 --- a/server/src/http/mcp/git_tools.rs +++ b/server/src/http/mcp/git_tools.rs @@ -1,68 +1,34 @@ //! MCP git tools — status, diff, add, commit, and log operations on agent worktrees. +//! +//! This file is a thin adapter: it deserialises MCP payloads, delegates to +//! `crate::service::git_ops` for all business logic, and serialises responses. use crate::http::context::AppContext; use serde_json::{Value, json}; use std::path::PathBuf; /// Validates that `worktree_path` exists and is inside the project's /// `.huskies/worktrees/` directory. Returns the canonicalized path. +/// +/// Thin wrapper that obtains the project root from `ctx` and delegates to +/// `service::git_ops::io::validate_worktree_path`. fn validate_worktree_path(worktree_path: &str, ctx: &AppContext) -> Result { - let wd = PathBuf::from(worktree_path); - - if !wd.is_absolute() { - return Err("worktree_path must be an absolute path".to_string()); - } - if !wd.exists() { - return Err(format!("worktree_path does not exist: {worktree_path}")); - } - let project_root = ctx.agents.get_project_root(&ctx.state)?; - let worktrees_root = project_root.join(".huskies").join("worktrees"); - - let canonical_wd = wd - .canonicalize() - .map_err(|e| format!("Cannot canonicalize worktree_path: {e}"))?; - - let canonical_wt = if worktrees_root.exists() { - worktrees_root - .canonicalize() - .map_err(|e| format!("Cannot canonicalize worktrees root: {e}"))? - } else { - return Err("No worktrees directory found in project".to_string()); - }; - - if !canonical_wd.starts_with(&canonical_wt) { - return Err(format!( - "worktree_path must be inside .huskies/worktrees/. Got: {worktree_path}" - )); - } - - Ok(canonical_wd) + crate::service::git_ops::io::validate_worktree_path(worktree_path, &project_root) + .map_err(|e| e.to_string()) } /// Run a git command in the given directory and return its output. async fn run_git(args: Vec<&'static str>, dir: PathBuf) -> Result { - tokio::task::spawn_blocking(move || { - std::process::Command::new("git") - .args(&args) - .current_dir(&dir) - .output() - }) - .await - .map_err(|e| format!("Task join error: {e}"))? - .map_err(|e| format!("Failed to run git: {e}")) + crate::service::git_ops::io::run_git(args, dir) + .await + .map_err(|e| e.to_string()) } /// Run a git command with owned args in the given directory. async fn run_git_owned(args: Vec, dir: PathBuf) -> Result { - tokio::task::spawn_blocking(move || { - std::process::Command::new("git") - .args(&args) - .current_dir(&dir) - .output() - }) - .await - .map_err(|e| format!("Task join error: {e}"))? - .map_err(|e| format!("Failed to run git: {e}")) + crate::service::git_ops::io::run_git_owned(args, dir) + .await + .map_err(|e| e.to_string()) } /// git_status — returns working tree status (staged, unstaged, untracked files). @@ -86,29 +52,8 @@ pub(super) async fn tool_git_status(args: &Value, ctx: &AppContext) -> Result = Vec::new(); - let mut unstaged: Vec = Vec::new(); - let mut untracked: Vec = Vec::new(); - - for line in stdout.lines() { - if line.len() < 3 { - continue; - } - let x = line.chars().next().unwrap_or(' '); - let y = line.chars().nth(1).unwrap_or(' '); - let path = line[3..].to_string(); - - match (x, y) { - ('?', '?') => untracked.push(path), - (' ', _) => unstaged.push(path), - (_, ' ') => staged.push(path), - _ => { - // Both staged and unstaged modifications - staged.push(path.clone()); - unstaged.push(path); - } - } - } + let (staged, unstaged, untracked) = + crate::service::git_ops::parse_git_status_porcelain(&stdout); serde_json::to_string_pretty(&json!({ "staged": staged, diff --git a/server/src/http/mcp/qa_tools.rs b/server/src/http/mcp/qa_tools.rs index 67b74db7..130c8fba 100644 --- a/server/src/http/mcp/qa_tools.rs +++ b/server/src/http/mcp/qa_tools.rs @@ -1,8 +1,12 @@ //! MCP QA tools — request, approve, and reject QA reviews for stories. +//! +//! This file is a thin adapter: it deserialises MCP payloads, delegates to +//! `crate::service::qa` for all business logic, and serialises responses. use crate::agents::{ move_story_to_done, move_story_to_merge, move_story_to_qa, reject_story_from_qa, }; use crate::http::context::AppContext; +use crate::service::qa::{find_free_port, is_spike, merge_spike_branch_to_master}; use crate::slog; use crate::slog_warn; use serde_json::{Value, json}; @@ -57,8 +61,7 @@ pub(super) async fn tool_approve_qa(args: &Value, ctx: &AppContext) -> Result Result Result Result { - use std::process::Command; - - // Check the branch exists and has unmerged changes. - if !crate::agents::lifecycle::feature_branch_has_unmerged_changes(project_root, story_id) { - slog!("[qa] Spike '{story_id}': feature branch has no unmerged changes, skipping merge."); - return Ok(false); - } - - // Ensure we are on master. - let checkout = Command::new("git") - .args(["checkout", "master"]) - .current_dir(project_root) - .output() - .map_err(|e| format!("git checkout master failed: {e}"))?; - if !checkout.status.success() { - return Err(format!( - "Failed to checkout master: {}", - String::from_utf8_lossy(&checkout.stderr) - )); - } - - // Try fast-forward first, then fall back to a regular merge. - let ff = Command::new("git") - .args(["merge", "--ff-only", branch]) - .current_dir(project_root) - .output() - .map_err(|e| format!("git merge --ff-only failed: {e}"))?; - - if ff.status.success() { - slog!("[qa] Spike '{story_id}': fast-forward merged '{branch}' into master."); - return Ok(true); - } - - // Fast-forward failed (diverged history) — fall back to a regular merge. - let merge = Command::new("git") - .args([ - "merge", - "--no-ff", - branch, - "-m", - &format!("Merge spike branch '{branch}' into master"), - ]) - .current_dir(project_root) - .output() - .map_err(|e| format!("git merge failed: {e}"))?; - - if merge.status.success() { - slog!("[qa] Spike '{story_id}': merged '{branch}' into master (no-ff)."); - Ok(true) - } else { - Err(format!( - "Failed to merge spike branch '{branch}' into master: {}", - String::from_utf8_lossy(&merge.stderr) - )) - } -} - pub(super) async fn tool_reject_qa(args: &Value, ctx: &AppContext) -> Result { let story_id = args .get("story_id") @@ -294,16 +231,6 @@ pub(super) async fn tool_launch_qa_app(args: &Value, ctx: &AppContext) -> Result .map_err(|e| format!("Serialization error: {e}")) } -/// Find a free TCP port starting from `start`. -pub(super) fn find_free_port(start: u16) -> u16 { - for port in start..start + 100 { - if std::net::TcpListener::bind(("127.0.0.1", port)).is_ok() { - return port; - } - } - start // fallback -} - #[cfg(test)] mod tests { use super::*; diff --git a/server/src/http/mcp/shell_tools.rs b/server/src/http/mcp/shell_tools.rs index 81e0d8b5..a1b06f59 100644 --- a/server/src/http/mcp/shell_tools.rs +++ b/server/src/http/mcp/shell_tools.rs @@ -1,5 +1,10 @@ //! MCP shell tools — run commands, execute tests, and stream output via MCP. +//! +//! This file is a thin adapter: it deserialises MCP payloads, delegates to +//! `crate::service::shell` for all business logic, and serialises responses. use crate::http::context::AppContext; +#[allow(unused_imports)] +use crate::service::shell::{extract_count, is_dangerous, parse_test_counts, truncate_output}; use bytes::Bytes; use futures::StreamExt; use poem::{Body, Response}; @@ -11,92 +16,15 @@ const MAX_TIMEOUT_SECS: u64 = 600; const TEST_TIMEOUT_SECS: u64 = 1200; const MAX_OUTPUT_LINES: usize = 100; -/// Patterns that are unconditionally blocked regardless of context. -static BLOCKED_PATTERNS: &[&str] = &[ - "rm -rf /", - "rm -fr /", - "rm -rf /*", - "rm -fr /*", - "rm --no-preserve-root", - ":(){ :|:& };:", - "> /dev/sda", - "dd if=/dev", -]; - -/// Binaries that are unconditionally blocked. -static BLOCKED_BINARIES: &[&str] = &[ - "sudo", "su", "shutdown", "reboot", "halt", "poweroff", "mkfs", -]; - -/// Returns an error message if the command matches a blocked pattern or binary. -fn is_dangerous(command: &str) -> Option { - let trimmed = command.trim(); - - // Check each blocked pattern (substring match) - for &pattern in BLOCKED_PATTERNS { - if trimmed.contains(pattern) { - return Some(format!( - "Command blocked: dangerous pattern '{pattern}' detected" - )); - } - } - - // Check first token of the command against blocked binaries - if let Some(first_token) = trimmed.split_whitespace().next() { - let binary = std::path::Path::new(first_token) - .file_name() - .and_then(|n| n.to_str()) - .unwrap_or(first_token); - if BLOCKED_BINARIES.contains(&binary) { - return Some(format!("Command blocked: '{binary}' is not permitted")); - } - } - - None -} - /// Validates that `working_dir` exists and is inside the project's /// `.huskies/worktrees/` directory. Returns the canonicalized path. +/// +/// Thin wrapper that obtains the project root from `ctx` and delegates to +/// `service::shell::io::validate_working_dir`. fn validate_working_dir(working_dir: &str, ctx: &AppContext) -> Result { - let wd = PathBuf::from(working_dir); - - if !wd.is_absolute() { - return Err("working_dir must be an absolute path".to_string()); - } - if !wd.exists() { - return Err(format!("working_dir does not exist: {working_dir}")); - } - let project_root = ctx.agents.get_project_root(&ctx.state)?; - let worktrees_root = project_root.join(".huskies").join("worktrees"); - - let canonical_wd = wd - .canonicalize() - .map_err(|e| format!("Cannot canonicalize working_dir: {e}"))?; - - // If worktrees_root doesn't exist yet, we can't allow anything - let canonical_wt = if worktrees_root.exists() { - worktrees_root - .canonicalize() - .map_err(|e| format!("Cannot canonicalize worktrees root: {e}"))? - } else { - return Err("No worktrees directory found in project".to_string()); - }; - - // Also allow the merge workspace so mergemaster can fix conflicts. - let merge_workspace = project_root.join(".huskies").join("merge_workspace"); - let canonical_mw = merge_workspace.canonicalize().unwrap_or_default(); - - let in_worktrees = canonical_wd.starts_with(&canonical_wt); - let in_merge_ws = - !canonical_mw.as_os_str().is_empty() && canonical_wd.starts_with(&canonical_mw); - if !in_worktrees && !in_merge_ws { - return Err(format!( - "working_dir must be inside .huskies/worktrees/ or .huskies/merge_workspace/. Got: {working_dir}" - )); - } - - Ok(canonical_wd) + crate::service::shell::io::validate_working_dir(working_dir, &project_root) + .map_err(|e| e.to_string()) } /// Regular (non-SSE) run_command: runs the bash command to completion and @@ -328,51 +256,6 @@ pub(super) fn handle_run_command_sse( .body(Body::from_bytes_stream(stream.map(|r| r.map(Bytes::from)))) } -/// Truncate output to at most `max_lines` lines, keeping the tail. -fn truncate_output(output: &str, max_lines: usize) -> String { - let lines: Vec<&str> = output.lines().collect(); - if lines.len() <= max_lines { - return output.to_string(); - } - let omitted = lines.len() - max_lines; - let tail = lines[lines.len() - max_lines..].join("\n"); - format!("[... {omitted} lines omitted ...]\n{tail}") -} - -/// Parse cumulative passed/failed counts from `cargo test` output lines like: -/// `"test result: ok. 5 passed; 0 failed; ..."` -fn parse_test_counts(output: &str) -> (u64, u64) { - let mut total_passed = 0u64; - let mut total_failed = 0u64; - for line in output.lines() { - if line.contains("test result:") { - if let Some(p) = extract_count(line, "passed") { - total_passed += p; - } - if let Some(f) = extract_count(line, "failed") { - total_failed += f; - } - } - } - (total_passed, total_failed) -} - -/// Extract a count immediately before `label` in `line` (e.g. `"5 passed"` → 5). -fn extract_count(line: &str, label: &str) -> Option { - let pos = line.find(label)?; - let before = line[..pos].trim_end(); - let num_str: String = before - .chars() - .rev() - .take_while(|c| c.is_ascii_digit()) - .collect(); - if num_str.is_empty() { - return None; - } - let num_str: String = num_str.chars().rev().collect(); - num_str.parse().ok() -} - /// Run the project's test suite (`script/test`) and block until complete. /// /// Spawns the test process, then polls every second server-side until the diff --git a/server/src/http/mcp/story_tools.rs b/server/src/http/mcp/story_tools.rs index ebad1416..a41b0a20 100644 --- a/server/src/http/mcp/story_tools.rs +++ b/server/src/http/mcp/story_tools.rs @@ -1,4 +1,8 @@ //! MCP story tools — create, update, move, and manage stories, bugs, and refactors via MCP. +//! +//! This file is a thin adapter: it deserialises MCP payloads, delegates to +//! `crate::service::story` and `crate::http::workflow` for business logic, +//! and serialises responses. use crate::agents::{ close_bug_to_archive, feature_branch_has_unmerged_changes, move_story_to_done, }; @@ -12,7 +16,9 @@ use crate::http::workflow::{ use crate::io::story_metadata::{ check_archived_deps, check_archived_deps_from_list, parse_front_matter, parse_unchecked_todos, }; +use crate::service::story::parse_test_cases; use crate::slog_warn; +#[allow(unused_imports)] use crate::workflow::{TestCaseResult, TestStatus, evaluate_acceptance_with_coverage}; use serde_json::{Value, json}; use std::collections::HashMap; @@ -702,46 +708,6 @@ pub(super) fn tool_list_refactors(ctx: &AppContext) -> Result { .map_err(|e| format!("Serialization error: {e}")) } -pub(super) fn parse_test_cases(value: Option<&Value>) -> Result, String> { - let arr = match value { - Some(Value::Array(a)) => a, - Some(Value::Null) | None => return Ok(Vec::new()), - _ => return Err("Expected array for test cases".to_string()), - }; - - arr.iter() - .map(|item| { - let name = item - .get("name") - .and_then(|v| v.as_str()) - .ok_or("Test case missing 'name'")? - .to_string(); - let status_str = item - .get("status") - .and_then(|v| v.as_str()) - .ok_or("Test case missing 'status'")?; - let status = match status_str { - "pass" => TestStatus::Pass, - "fail" => TestStatus::Fail, - other => { - return Err(format!( - "Invalid test status '{other}'. Use 'pass' or 'fail'." - )); - } - }; - let details = item - .get("details") - .and_then(|v| v.as_str()) - .map(String::from); - Ok(TestCaseResult { - name, - status, - details, - }) - }) - .collect() -} - #[cfg(test)] mod tests { use super::*; diff --git a/server/src/service/diagnostics/io.rs b/server/src/service/diagnostics/io.rs new file mode 100644 index 00000000..6f1ed555 --- /dev/null +++ b/server/src/service/diagnostics/io.rs @@ -0,0 +1,72 @@ +//! Diagnostics I/O — the ONLY place in `service::diagnostics/` that may perform side effects. +//! +//! Side effects here include: reading and writing `.claude/settings.json` via `std::fs`. +//! Pure permission-rule logic (pattern derivation, wildcard domination checks) lives in +//! `permission.rs`. + +use serde_json::{Value, json}; +use std::fs; +use std::path::Path; + +/// Add a permission rule to `.claude/settings.json` in the project root. +/// +/// Does nothing if the rule already exists (exact match) or is already covered +/// by a wildcard pattern in the allow list. Creates the file and any missing +/// parent directories if they do not yet exist. +/// +/// # Errors +/// Returns `Err(String)` if the directory cannot be created, the file cannot be +/// read or written, or the JSON cannot be parsed or serialised. +pub fn add_permission_rule(project_root: &Path, rule: &str) -> Result<(), String> { + let claude_dir = project_root.join(".claude"); + fs::create_dir_all(&claude_dir) + .map_err(|e| format!("Failed to create .claude/ directory: {e}"))?; + + let settings_path = claude_dir.join("settings.json"); + let mut settings: Value = if settings_path.exists() { + let content = fs::read_to_string(&settings_path) + .map_err(|e| format!("Failed to read settings.json: {e}"))?; + serde_json::from_str(&content).map_err(|e| format!("Failed to parse settings.json: {e}"))? + } else { + json!({ "permissions": { "allow": [] } }) + }; + + let allow_arr = settings + .pointer_mut("/permissions/allow") + .and_then(|v| v.as_array_mut()); + + let allow = match allow_arr { + Some(arr) => arr, + None => { + settings + .as_object_mut() + .unwrap() + .entry("permissions") + .or_insert(json!({ "allow": [] })); + settings + .pointer_mut("/permissions/allow") + .unwrap() + .as_array_mut() + .unwrap() + } + }; + + let rule_value = Value::String(rule.to_string()); + + // Exact duplicate check. + if allow.contains(&rule_value) { + return Ok(()); + } + + // Wildcard-coverage check: if "mcp__huskies__*" exists, skip more-specific rules. + if super::permission::is_dominated_by_wildcard(rule, allow) { + return Ok(()); + } + + allow.push(rule_value); + + let pretty = + serde_json::to_string_pretty(&settings).map_err(|e| format!("Failed to serialize: {e}"))?; + fs::write(&settings_path, pretty).map_err(|e| format!("Failed to write settings.json: {e}"))?; + Ok(()) +} diff --git a/server/src/service/diagnostics/mod.rs b/server/src/service/diagnostics/mod.rs new file mode 100644 index 00000000..1be5170a --- /dev/null +++ b/server/src/service/diagnostics/mod.rs @@ -0,0 +1,89 @@ +//! Diagnostics service — server logs, CRDT dump, permission management, and story movement. +//! +//! Extracted from `http/mcp/diagnostics.rs` following the conventions in +//! `docs/architecture/service-modules.md`: +//! - `mod.rs` (this file) — public API, typed [`Error`], orchestration +//! - `io.rs` — the ONLY place that performs side effects (filesystem reads/writes) +//! - `permission.rs` — pure permission-rule generation and wildcard checks + +pub mod io; +pub mod permission; + +pub use io::add_permission_rule; +pub use permission::generate_permission_rule; +#[allow(unused_imports)] +pub use permission::is_dominated_by_wildcard; + +// ── Error type ──────────────────────────────────────────────────────────────── + +/// Typed errors returned by `service::diagnostics` functions. +/// +/// HTTP handlers map these to status codes: +/// - [`Error::NotFound`] → 404 Not Found +/// - [`Error::Validation`] → 400 Bad Request +/// - [`Error::Conflict`] → 409 Conflict +/// - [`Error::Io`] → 500 Internal Server Error +/// - [`Error::UpstreamFailure`] → 500 Internal Server Error +#[allow(dead_code)] +#[derive(Debug)] +pub enum Error { + /// The requested resource was not found. + NotFound(String), + /// A required argument is missing or has an invalid value. + Validation(String), + /// The operation cannot proceed due to a conflicting state. + Conflict(String), + /// A filesystem read or write operation failed. + Io(String), + /// An upstream dependency returned an unexpected error. + UpstreamFailure(String), +} + +impl std::fmt::Display for Error { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + Self::NotFound(msg) => write!(f, "Not found: {msg}"), + Self::Validation(msg) => write!(f, "Validation error: {msg}"), + Self::Conflict(msg) => write!(f, "Conflict: {msg}"), + Self::Io(msg) => write!(f, "I/O error: {msg}"), + Self::UpstreamFailure(msg) => write!(f, "Upstream failure: {msg}"), + } + } +} + +// ── Tests ───────────────────────────────────────────────────────────────────── + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn error_display_not_found() { + let e = Error::NotFound("log file missing".to_string()); + assert!(e.to_string().contains("Not found")); + } + + #[test] + fn error_display_validation() { + let e = Error::Validation("invalid filter".to_string()); + assert!(e.to_string().contains("Validation error")); + } + + #[test] + fn error_display_conflict() { + let e = Error::Conflict("story in wrong stage".to_string()); + assert!(e.to_string().contains("Conflict")); + } + + #[test] + fn error_display_io() { + let e = Error::Io("settings.json write failed".to_string()); + assert!(e.to_string().contains("I/O error")); + } + + #[test] + fn error_display_upstream_failure() { + let e = Error::UpstreamFailure("rebuild failed".to_string()); + assert!(e.to_string().contains("Upstream failure")); + } +} diff --git a/server/src/service/diagnostics/permission.rs b/server/src/service/diagnostics/permission.rs new file mode 100644 index 00000000..45b3aa63 --- /dev/null +++ b/server/src/service/diagnostics/permission.rs @@ -0,0 +1,105 @@ +//! Pure permission-rule generation for `service::diagnostics`. +//! +//! These functions produce Claude Code permission-rule strings from tool call +//! metadata. No I/O: they take `&str` / `&Value` and return `String`. + +use serde_json::Value; + +/// Generate a Claude Code permission rule string for the given tool name and input. +/// +/// - `Bash` tools → `Bash(first_word *)` derived from the `command` field. +/// - All other tools → the tool name verbatim (e.g. `Edit`, `mcp__huskies__create_story`). +pub fn generate_permission_rule(tool_name: &str, tool_input: &Value) -> String { + if tool_name == "Bash" { + let command_str = tool_input + .get("command") + .and_then(|v| v.as_str()) + .unwrap_or(""); + let first_word = command_str.split_whitespace().next().unwrap_or("unknown"); + format!("Bash({first_word} *)") + } else { + tool_name.to_string() + } +} + +/// Return `true` if `rule` is already covered by an existing wildcard in `allow_list`. +/// +/// For example, if `allow_list` contains `"mcp__huskies__*"`, then the more +/// specific rule `"mcp__huskies__create_story"` is already covered. +pub fn is_dominated_by_wildcard(rule: &str, allow_list: &[Value]) -> bool { + allow_list.iter().any(|existing| { + if let Some(pat) = existing.as_str() + && let Some(prefix) = pat.strip_suffix('*') + { + return rule.starts_with(prefix); + } + false + }) +} + +// ── Tests ───────────────────────────────────────────────────────────────────── + +#[cfg(test)] +mod tests { + use super::*; + use serde_json::json; + + #[test] + fn generate_rule_for_edit_tool() { + let rule = generate_permission_rule("Edit", &json!({})); + assert_eq!(rule, "Edit"); + } + + #[test] + fn generate_rule_for_write_tool() { + let rule = generate_permission_rule("Write", &json!({})); + assert_eq!(rule, "Write"); + } + + #[test] + fn generate_rule_for_bash_git() { + let rule = generate_permission_rule("Bash", &json!({"command": "git status"})); + assert_eq!(rule, "Bash(git *)"); + } + + #[test] + fn generate_rule_for_bash_cargo() { + let rule = generate_permission_rule("Bash", &json!({"command": "cargo test --all"})); + assert_eq!(rule, "Bash(cargo *)"); + } + + #[test] + fn generate_rule_for_bash_empty_command() { + let rule = generate_permission_rule("Bash", &json!({})); + assert_eq!(rule, "Bash(unknown *)"); + } + + #[test] + fn generate_rule_for_mcp_tool() { + let rule = generate_permission_rule("mcp__huskies__create_story", &json!({"name": "foo"})); + assert_eq!(rule, "mcp__huskies__create_story"); + } + + #[test] + fn is_dominated_by_exact_wildcard() { + let allow = vec![json!("mcp__huskies__*")]; + assert!(is_dominated_by_wildcard( + "mcp__huskies__create_story", + &allow + )); + } + + #[test] + fn is_not_dominated_by_different_prefix() { + let allow = vec![json!("mcp__other__*")]; + assert!(!is_dominated_by_wildcard( + "mcp__huskies__create_story", + &allow + )); + } + + #[test] + fn is_not_dominated_when_list_is_empty() { + assert!(!is_dominated_by_wildcard("Edit", &[])); + } +} diff --git a/server/src/service/git_ops/io.rs b/server/src/service/git_ops/io.rs new file mode 100644 index 00000000..8c004d45 --- /dev/null +++ b/server/src/service/git_ops/io.rs @@ -0,0 +1,90 @@ +//! Git I/O — the ONLY place in `service::git_ops/` that may perform side effects. +//! +//! Side effects here include: spawning git processes via `std::process::Command` +//! (wrapped in `tokio::task::spawn_blocking`), and filesystem existence and +//! canonicalization checks for path validation. +//! All pure logic (path-prefix checks, porcelain parsing) lives in `path_guard.rs` +//! and `porcelain.rs`. + +use super::Error; +use std::path::{Path, PathBuf}; +use std::process::Output; + +/// Validate that `worktree_path` is an absolute path that exists on disk and +/// lies inside the project's `.huskies/worktrees/` directory. Returns the +/// canonicalized path on success. +/// +/// # Errors +/// - [`Error::Validation`] if the path is relative or does not exist. +/// - [`Error::PathNotAllowed`] if the path is outside `.huskies/worktrees/`. +/// - [`Error::Io`] if canonicalization fails. +pub fn validate_worktree_path(worktree_path: &str, project_root: &Path) -> Result { + let wd = PathBuf::from(worktree_path); + + if !wd.is_absolute() { + return Err(Error::Validation( + "worktree_path must be an absolute path".to_string(), + )); + } + if !wd.exists() { + return Err(Error::Validation(format!( + "worktree_path does not exist: {worktree_path}" + ))); + } + + let worktrees_root = project_root.join(".huskies").join("worktrees"); + + let canonical_wd = wd + .canonicalize() + .map_err(|e| Error::Io(format!("Cannot canonicalize worktree_path: {e}")))?; + + let canonical_wt = if worktrees_root.exists() { + worktrees_root + .canonicalize() + .map_err(|e| Error::Io(format!("Cannot canonicalize worktrees root: {e}")))? + } else { + return Err(Error::PathNotAllowed( + "No worktrees directory found in project".to_string(), + )); + }; + + if !super::path_guard::is_under_root(&canonical_wd, &canonical_wt) { + return Err(Error::PathNotAllowed(format!( + "worktree_path must be inside .huskies/worktrees/. Got: {worktree_path}" + ))); + } + + Ok(canonical_wd) +} + +/// Run a git command with static arg slices in `dir` and return the process output. +/// +/// # Errors +/// - [`Error::UpstreamFailure`] if the task panics or git cannot be spawned. +pub async fn run_git(args: Vec<&'static str>, dir: PathBuf) -> Result { + tokio::task::spawn_blocking(move || { + std::process::Command::new("git") + .args(&args) + .current_dir(&dir) + .output() + }) + .await + .map_err(|e| Error::UpstreamFailure(format!("Task join error: {e}")))? + .map_err(|e| Error::Io(format!("Failed to run git: {e}"))) +} + +/// Run a git command with owned `String` args in `dir` and return the process output. +/// +/// # Errors +/// - [`Error::UpstreamFailure`] if the task panics or git cannot be spawned. +pub async fn run_git_owned(args: Vec, dir: PathBuf) -> Result { + tokio::task::spawn_blocking(move || { + std::process::Command::new("git") + .args(&args) + .current_dir(&dir) + .output() + }) + .await + .map_err(|e| Error::UpstreamFailure(format!("Task join error: {e}")))? + .map_err(|e| Error::Io(format!("Failed to run git: {e}"))) +} diff --git a/server/src/service/git_ops/mod.rs b/server/src/service/git_ops/mod.rs new file mode 100644 index 00000000..049104f6 --- /dev/null +++ b/server/src/service/git_ops/mod.rs @@ -0,0 +1,100 @@ +//! Git operations service — worktree path validation and git command execution. +//! +//! Extracted from `http/mcp/git_tools.rs` following the conventions in +//! `docs/architecture/service-modules.md`: +//! - `mod.rs` (this file) — public API, typed [`Error`], orchestration +//! - `io.rs` — the ONLY place that performs side effects (git processes, filesystem) +//! - `path_guard.rs` — pure path-prefix safety checks +//! - `porcelain.rs` — pure git porcelain output parsers + +pub mod io; +pub mod path_guard; +pub mod porcelain; + +#[allow(unused_imports)] +pub use path_guard::is_under_root; +pub use porcelain::parse_git_status_porcelain; + +// ── Error type ──────────────────────────────────────────────────────────────── + +/// Typed errors returned by `service::git_ops` functions. +/// +/// HTTP handlers map these to status codes: +/// - [`Error::NotFound`] → 404 Not Found +/// - [`Error::Validation`] → 400 Bad Request +/// - [`Error::Conflict`] → 409 Conflict +/// - [`Error::PathNotAllowed`] → 400 Bad Request (sandbox violation) +/// - [`Error::Io`] → 500 Internal Server Error +/// - [`Error::UpstreamFailure`] → 500 Internal Server Error +#[allow(dead_code)] +#[derive(Debug)] +pub enum Error { + /// The requested worktree or path does not exist. + NotFound(String), + /// A required argument is missing or has an invalid value. + Validation(String), + /// The git operation cannot proceed due to a conflicting state. + Conflict(String), + /// The path is outside the allowed sandbox. + PathNotAllowed(String), + /// A filesystem or git I/O operation failed. + Io(String), + /// An upstream git command returned an unexpected error. + UpstreamFailure(String), +} + +impl std::fmt::Display for Error { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + Self::NotFound(msg) => write!(f, "Not found: {msg}"), + Self::Validation(msg) => write!(f, "Validation error: {msg}"), + Self::Conflict(msg) => write!(f, "Conflict: {msg}"), + Self::PathNotAllowed(msg) => write!(f, "Path not allowed: {msg}"), + Self::Io(msg) => write!(f, "I/O error: {msg}"), + Self::UpstreamFailure(msg) => write!(f, "Upstream failure: {msg}"), + } + } +} + +// ── Tests ───────────────────────────────────────────────────────────────────── + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn error_display_not_found() { + let e = Error::NotFound("worktree missing".to_string()); + assert!(e.to_string().contains("Not found")); + } + + #[test] + fn error_display_validation() { + let e = Error::Validation("relative path".to_string()); + assert!(e.to_string().contains("Validation error")); + } + + #[test] + fn error_display_conflict() { + let e = Error::Conflict("uncommitted changes".to_string()); + assert!(e.to_string().contains("Conflict")); + } + + #[test] + fn error_display_path_not_allowed() { + let e = Error::PathNotAllowed("outside sandbox".to_string()); + assert!(e.to_string().contains("Path not allowed")); + } + + #[test] + fn error_display_io() { + let e = Error::Io("permission denied".to_string()); + assert!(e.to_string().contains("I/O error")); + } + + #[test] + fn error_display_upstream_failure() { + let e = Error::UpstreamFailure("git not found".to_string()); + assert!(e.to_string().contains("Upstream failure")); + } +} diff --git a/server/src/service/git_ops/path_guard.rs b/server/src/service/git_ops/path_guard.rs new file mode 100644 index 00000000..f8cde399 --- /dev/null +++ b/server/src/service/git_ops/path_guard.rs @@ -0,0 +1,58 @@ +//! Pure path-guard helpers for `service::git_ops`. +//! +//! These functions are free of side effects — they operate on already-resolved +//! `Path` values and perform no filesystem I/O. Path existence checks and +//! canonicalization belong in `io.rs`. + +use std::path::Path; + +/// Return `true` if `canonical_path` starts with (i.e. is under) `root`. +/// +/// Both paths must already be canonicalized so that symlinks, `.`, and `..` +/// components do not cause false negatives. +pub fn is_under_root(canonical_path: &Path, root: &Path) -> bool { + canonical_path.starts_with(root) +} + +// ── Tests ───────────────────────────────────────────────────────────────────── + +#[cfg(test)] +mod tests { + use super::*; + use std::path::PathBuf; + + #[test] + fn is_under_root_returns_true_for_child() { + let root = PathBuf::from("/project/.huskies/worktrees"); + let child = PathBuf::from("/project/.huskies/worktrees/42_story_foo"); + assert!(is_under_root(&child, &root)); + } + + #[test] + fn is_under_root_returns_false_for_sibling() { + let root = PathBuf::from("/project/.huskies/worktrees"); + let sibling = PathBuf::from("/project/.huskies/other"); + assert!(!is_under_root(&sibling, &root)); + } + + #[test] + fn is_under_root_returns_false_for_parent() { + let root = PathBuf::from("/project/.huskies/worktrees"); + let parent = PathBuf::from("/project/.huskies"); + assert!(!is_under_root(&parent, &root)); + } + + #[test] + fn is_under_root_returns_true_for_exact_match() { + let root = PathBuf::from("/project/.huskies/worktrees"); + assert!(is_under_root(&root, &root)); + } + + #[test] + fn is_under_root_returns_false_for_path_with_shared_prefix_but_not_child() { + // /foo/bar-extra is NOT under /foo/bar + let root = PathBuf::from("/foo/bar"); + let other = PathBuf::from("/foo/bar-extra"); + assert!(!is_under_root(&other, &root)); + } +} diff --git a/server/src/service/git_ops/porcelain.rs b/server/src/service/git_ops/porcelain.rs new file mode 100644 index 00000000..3038481b --- /dev/null +++ b/server/src/service/git_ops/porcelain.rs @@ -0,0 +1,107 @@ +//! Pure git porcelain output parsers for `service::git_ops`. +//! +//! These functions parse the text output of `git status --porcelain=v1` +//! and similar commands. No I/O: they take `&str` and return structured data. + +/// Parse `git status --porcelain=v1 -u` output into three file lists. +/// +/// Returns `(staged, unstaged, untracked)` where each entry is the file path +/// string from the porcelain line. +pub fn parse_git_status_porcelain(stdout: &str) -> (Vec, Vec, Vec) { + let mut staged: Vec = Vec::new(); + let mut unstaged: Vec = Vec::new(); + let mut untracked: Vec = Vec::new(); + + for line in stdout.lines() { + if line.len() < 3 { + continue; + } + let x = line.chars().next().unwrap_or(' '); + let y = line.chars().nth(1).unwrap_or(' '); + let path = line[3..].to_string(); + + match (x, y) { + ('?', '?') => untracked.push(path), + (' ', _) => unstaged.push(path), + (_, ' ') => staged.push(path), + _ => { + // Both staged and unstaged modifications. + staged.push(path.clone()); + unstaged.push(path); + } + } + } + + (staged, unstaged, untracked) +} + +// ── Tests ───────────────────────────────────────────────────────────────────── + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn parse_empty_output_returns_empty_vecs() { + let (s, u, t) = parse_git_status_porcelain(""); + assert!(s.is_empty()); + assert!(u.is_empty()); + assert!(t.is_empty()); + } + + #[test] + fn parse_untracked_file() { + let output = "?? new_file.txt\n"; + let (staged, unstaged, untracked) = parse_git_status_porcelain(output); + assert!(staged.is_empty()); + assert!(unstaged.is_empty()); + assert_eq!(untracked, vec!["new_file.txt"]); + } + + #[test] + fn parse_staged_file() { + let output = "A staged.txt\n"; + let (staged, unstaged, untracked) = parse_git_status_porcelain(output); + assert_eq!(staged, vec!["staged.txt"]); + assert!(unstaged.is_empty()); + assert!(untracked.is_empty()); + } + + #[test] + fn parse_unstaged_modified_file() { + // 'M' in second column = unstaged modification + let output = " M modified.txt\n"; + let (staged, unstaged, untracked) = parse_git_status_porcelain(output); + assert!(staged.is_empty()); + assert_eq!(unstaged, vec!["modified.txt"]); + assert!(untracked.is_empty()); + } + + #[test] + fn parse_both_staged_and_unstaged() { + // 'MM' = staged + unstaged in same file + let output = "MM both.txt\n"; + let (staged, unstaged, untracked) = parse_git_status_porcelain(output); + assert_eq!(staged, vec!["both.txt"]); + assert_eq!(unstaged, vec!["both.txt"]); + assert!(untracked.is_empty()); + } + + #[test] + fn parse_mixed_output() { + let output = "A staged.rs\n M unstaged.rs\n?? untracked.rs\n"; + let (staged, unstaged, untracked) = parse_git_status_porcelain(output); + assert_eq!(staged, vec!["staged.rs"]); + assert_eq!(unstaged, vec!["unstaged.rs"]); + assert_eq!(untracked, vec!["untracked.rs"]); + } + + #[test] + fn parse_skips_short_lines() { + // Lines shorter than 3 chars should be skipped. + let output = "A \nMM both.txt\n"; + let (staged, _unstaged, _untracked) = parse_git_status_porcelain(output); + // Only "both.txt" should appear — the 2-char "A " line is skipped. + assert_eq!(staged, vec!["both.txt"]); + } +} diff --git a/server/src/service/merge/io.rs b/server/src/service/merge/io.rs new file mode 100644 index 00000000..12f51c23 --- /dev/null +++ b/server/src/service/merge/io.rs @@ -0,0 +1,5 @@ +//! Merge I/O — the ONLY place in `service::merge/` that may perform side effects. +//! +//! Currently, the bulk of the merge I/O is handled by `crate::agents::merge` +//! and `crate::io::story_metadata`. This file is the designated home for any +//! future I/O helpers that are extracted from merge-related MCP handlers. diff --git a/server/src/service/merge/mod.rs b/server/src/service/merge/mod.rs new file mode 100644 index 00000000..1789f083 --- /dev/null +++ b/server/src/service/merge/mod.rs @@ -0,0 +1,87 @@ +//! Merge service — domain logic for merging agent work to master. +//! +//! Extracted from `http/mcp/merge_tools.rs` following the conventions in +//! `docs/architecture/service-modules.md`: +//! - `mod.rs` (this file) — public API, typed [`Error`], orchestration +//! - `io.rs` — the ONLY place that performs side effects +//! - `status.rs` — pure merge-status message formatting + +pub mod io; +pub mod status; + +#[allow(unused_imports)] +pub use status::format_merge_status_message; + +// ── Error type ──────────────────────────────────────────────────────────────── + +/// Typed errors returned by `service::merge` functions. +/// +/// HTTP handlers map these to status codes: +/// - [`Error::NotFound`] → 404 Not Found +/// - [`Error::Validation`] → 400 Bad Request +/// - [`Error::Conflict`] → 409 Conflict +/// - [`Error::Io`] → 500 Internal Server Error +/// - [`Error::UpstreamFailure`] → 500 Internal Server Error +#[allow(dead_code)] +#[derive(Debug)] +pub enum Error { + /// The requested story or merge job was not found. + NotFound(String), + /// A required argument is missing or has an invalid value. + Validation(String), + /// The merge cannot proceed due to a conflicting state. + Conflict(String), + /// A filesystem or process I/O operation failed. + Io(String), + /// An upstream dependency (agents, git) returned an unexpected error. + UpstreamFailure(String), +} + +impl std::fmt::Display for Error { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + Self::NotFound(msg) => write!(f, "Not found: {msg}"), + Self::Validation(msg) => write!(f, "Validation error: {msg}"), + Self::Conflict(msg) => write!(f, "Conflict: {msg}"), + Self::Io(msg) => write!(f, "I/O error: {msg}"), + Self::UpstreamFailure(msg) => write!(f, "Upstream failure: {msg}"), + } + } +} + +// ── Tests ───────────────────────────────────────────────────────────────────── + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn error_display_not_found() { + let e = Error::NotFound("merge job missing".to_string()); + assert!(e.to_string().contains("Not found")); + } + + #[test] + fn error_display_validation() { + let e = Error::Validation("story_id required".to_string()); + assert!(e.to_string().contains("Validation error")); + } + + #[test] + fn error_display_conflict() { + let e = Error::Conflict("story already merged".to_string()); + assert!(e.to_string().contains("Conflict")); + } + + #[test] + fn error_display_io() { + let e = Error::Io("write failed".to_string()); + assert!(e.to_string().contains("I/O error")); + } + + #[test] + fn error_display_upstream_failure() { + let e = Error::UpstreamFailure("git crashed".to_string()); + assert!(e.to_string().contains("Upstream failure")); + } +} diff --git a/server/src/service/merge/status.rs b/server/src/service/merge/status.rs new file mode 100644 index 00000000..bc854d3b --- /dev/null +++ b/server/src/service/merge/status.rs @@ -0,0 +1,89 @@ +//! Pure merge-status message formatting for `service::merge`. +//! +//! These functions transform a completed merge report into human-readable +//! status messages. No I/O: they are pure functions over plain data. + +use crate::agents::merge::MergeReport; + +#[allow(dead_code)] +/// Derive a human-readable status message from a completed [`MergeReport`]. +/// +/// The message explains what happened and (on failure) what the caller +/// should do next. +pub fn format_merge_status_message(report: &MergeReport) -> &'static str { + if report.success && report.gates_passed && report.conflicts_resolved { + "Merge complete: conflicts were auto-resolved and all quality gates passed. Story moved to done and worktree cleaned up." + } else if report.success && report.gates_passed { + "Merge complete: all quality gates passed. Story moved to done and worktree cleaned up." + } else if report.had_conflicts && !report.conflicts_resolved { + "Merge failed: conflicts detected that could not be auto-resolved. Merge was aborted — master is untouched. Call report_merge_failure with the conflict details so the human can resolve them. Do NOT manually move the story file or call accept_story." + } else if report.success && !report.gates_passed { + "Merge committed but quality gates failed. Review gate_output and fix issues before re-running." + } else { + "Merge failed. Review gate_output for details. Call report_merge_failure to record the failure. Do NOT manually move the story file or call accept_story." + } +} + +// ── Tests ───────────────────────────────────────────────────────────────────── + +#[cfg(test)] +mod tests { + use super::*; + + fn report( + success: bool, + had_conflicts: bool, + conflicts_resolved: bool, + gates_passed: bool, + ) -> MergeReport { + MergeReport { + story_id: String::new(), + success, + had_conflicts, + conflicts_resolved, + conflict_details: None, + gates_passed, + gate_output: String::new(), + worktree_cleaned_up: false, + story_archived: false, + } + } + + #[test] + fn clean_merge_message() { + let r = report(true, false, false, true); + let msg = format_merge_status_message(&r); + assert!(msg.contains("quality gates passed")); + assert!(msg.contains("done")); + } + + #[test] + fn conflicts_resolved_message() { + let r = report(true, true, true, true); + let msg = format_merge_status_message(&r); + assert!(msg.contains("auto-resolved")); + } + + #[test] + fn unresolved_conflicts_message() { + let r = report(false, true, false, false); + let msg = format_merge_status_message(&r); + assert!(msg.contains("could not be auto-resolved")); + assert!(msg.contains("report_merge_failure")); + } + + #[test] + fn gates_failed_message() { + let r = report(true, false, false, false); + let msg = format_merge_status_message(&r); + assert!(msg.contains("quality gates failed")); + } + + #[test] + fn general_failure_message() { + let r = report(false, false, false, false); + let msg = format_merge_status_message(&r); + assert!(msg.contains("Merge failed")); + assert!(msg.contains("report_merge_failure")); + } +} diff --git a/server/src/service/mod.rs b/server/src/service/mod.rs index e10e396b..5acbb5d9 100644 --- a/server/src/service/mod.rs +++ b/server/src/service/mod.rs @@ -8,16 +8,21 @@ pub mod agents; pub mod anthropic; pub mod bot_command; +pub mod diagnostics; pub mod events; pub mod file_io; +pub mod gateway; +pub mod git_ops; pub mod health; +pub mod merge; pub mod notifications; pub mod oauth; +pub mod pipeline; pub mod project; +pub mod qa; pub mod settings; +pub mod shell; +pub mod story; pub mod timer; pub mod wizard; pub mod ws; - -pub mod gateway; -pub mod pipeline; diff --git a/server/src/service/qa/io.rs b/server/src/service/qa/io.rs new file mode 100644 index 00000000..4cbf669c --- /dev/null +++ b/server/src/service/qa/io.rs @@ -0,0 +1,92 @@ +//! QA I/O — the ONLY place in `service::qa/` that may perform side effects. +//! +//! Side effects here include: spawning git processes via `std::process::Command`, +//! binding TCP sockets to discover free ports, and launching the QA app process. + +use super::Error; + +/// Find a free TCP port by attempting to bind starting from `start`. +/// +/// Scans up to 100 ports above `start` and returns the first available one. +/// Falls back to `start` if none are found (unlikely in practice). +pub fn find_free_port(start: u16) -> u16 { + for port in start..start + 100 { + if std::net::TcpListener::bind(("127.0.0.1", port)).is_ok() { + return port; + } + } + start +} + +/// Merge a spike's feature branch into master using a fast-forward or simple merge. +/// +/// Unlike the squash-merge pipeline used for stories, spikes skip quality gates +/// and preserve their commit history. Returns `true` if a merge was performed, +/// `false` if the branch had no unmerged commits (already up to date). +/// +/// # Errors +/// - [`Error::Conflict`] if the merge fails due to conflicts. +/// - [`Error::UpstreamFailure`] if a git command cannot be run. +pub fn merge_spike_branch_to_master( + project_root: &std::path::Path, + branch: &str, + story_id: &str, +) -> Result { + use std::process::Command; + + // Check the branch exists and has unmerged changes. + if !crate::agents::lifecycle::feature_branch_has_unmerged_changes(project_root, story_id) { + crate::slog!( + "[qa] Spike '{story_id}': feature branch has no unmerged changes, skipping merge." + ); + return Ok(false); + } + + // Ensure we are on master. + let checkout = Command::new("git") + .args(["checkout", "master"]) + .current_dir(project_root) + .output() + .map_err(|e| Error::UpstreamFailure(format!("git checkout master failed: {e}")))?; + if !checkout.status.success() { + return Err(Error::UpstreamFailure(format!( + "Failed to checkout master: {}", + String::from_utf8_lossy(&checkout.stderr) + ))); + } + + // Try fast-forward first, then fall back to a regular merge. + let ff = Command::new("git") + .args(["merge", "--ff-only", branch]) + .current_dir(project_root) + .output() + .map_err(|e| Error::UpstreamFailure(format!("git merge --ff-only failed: {e}")))?; + + if ff.status.success() { + crate::slog!("[qa] Spike '{story_id}': fast-forward merged '{branch}' into master."); + return Ok(true); + } + + // Fast-forward failed (diverged history) — fall back to a regular merge. + let merge = Command::new("git") + .args([ + "merge", + "--no-ff", + branch, + "-m", + &format!("Merge spike branch '{branch}' into master"), + ]) + .current_dir(project_root) + .output() + .map_err(|e| Error::UpstreamFailure(format!("git merge failed: {e}")))?; + + if merge.status.success() { + crate::slog!("[qa] Spike '{story_id}': merged '{branch}' into master (no-ff)."); + Ok(true) + } else { + Err(Error::Conflict(format!( + "Failed to merge spike branch '{branch}' into master: {}", + String::from_utf8_lossy(&merge.stderr) + ))) + } +} diff --git a/server/src/service/qa/lifecycle.rs b/server/src/service/qa/lifecycle.rs new file mode 100644 index 00000000..9ca73a10 --- /dev/null +++ b/server/src/service/qa/lifecycle.rs @@ -0,0 +1,42 @@ +//! Pure QA lifecycle helpers for `service::qa`. +//! +//! These functions classify work items and make routing decisions without +//! performing any I/O. The spike-vs-story distinction determines whether a +//! QA approval goes through the merge pipeline or bypasses it. + +use crate::agents::lifecycle::item_type_from_id; + +/// Return `true` if `story_id` identifies a spike (e.g. `"42_spike_foo"`). +/// +/// Spikes bypass the normal merge pipeline: when approved from QA they are +/// merged directly to master and moved straight to done. +pub fn is_spike(story_id: &str) -> bool { + item_type_from_id(story_id) == "spike" +} + +// ── Tests ───────────────────────────────────────────────────────────────────── + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn is_spike_returns_true_for_spike_id() { + assert!(is_spike("42_spike_my_research")); + } + + #[test] + fn is_spike_returns_false_for_story_id() { + assert!(!is_spike("42_story_my_feature")); + } + + #[test] + fn is_spike_returns_false_for_bug_id() { + assert!(!is_spike("42_bug_login_crash")); + } + + #[test] + fn is_spike_returns_false_for_refactor_id() { + assert!(!is_spike("42_refactor_cleanup")); + } +} diff --git a/server/src/service/qa/mod.rs b/server/src/service/qa/mod.rs new file mode 100644 index 00000000..da4aa375 --- /dev/null +++ b/server/src/service/qa/mod.rs @@ -0,0 +1,97 @@ +//! QA service — domain logic for requesting, approving, and rejecting QA reviews. +//! +//! Extracted from `http/mcp/qa_tools.rs` following the conventions in +//! `docs/architecture/service-modules.md`: +//! - `mod.rs` (this file) — public API, typed [`Error`], orchestration +//! - `io.rs` — the ONLY place that performs side effects (git, TCP, process) +//! - `lifecycle.rs` — pure QA routing decisions (spike vs. normal story) + +pub mod io; +pub mod lifecycle; + +pub use io::{find_free_port, merge_spike_branch_to_master}; +pub use lifecycle::is_spike; + +// ── Error type ──────────────────────────────────────────────────────────────── + +/// Typed errors returned by `service::qa` functions. +/// +/// HTTP handlers map these to status codes: +/// - [`Error::NotFound`] → 404 Not Found +/// - [`Error::Validation`] → 400 Bad Request +/// - [`Error::Conflict`] → 409 Conflict (merge conflicts) +/// - [`Error::Io`] → 500 Internal Server Error +/// - [`Error::UpstreamFailure`] → 500 Internal Server Error +#[allow(dead_code)] +#[derive(Debug)] +pub enum Error { + /// The requested story or worktree was not found. + NotFound(String), + /// A required argument is missing or has an invalid value. + Validation(String), + /// The QA approval cannot proceed due to a git conflict. + Conflict(String), + /// A filesystem or process I/O operation failed. + Io(String), + /// An upstream dependency (git, agents) returned an unexpected error. + UpstreamFailure(String), +} + +impl std::fmt::Display for Error { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + Self::NotFound(msg) => write!(f, "Not found: {msg}"), + Self::Validation(msg) => write!(f, "Validation error: {msg}"), + Self::Conflict(msg) => write!(f, "Conflict: {msg}"), + Self::Io(msg) => write!(f, "I/O error: {msg}"), + Self::UpstreamFailure(msg) => write!(f, "Upstream failure: {msg}"), + } + } +} + +// ── Tests ───────────────────────────────────────────────────────────────────── + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn error_display_not_found() { + let e = Error::NotFound("story missing".to_string()); + assert!(e.to_string().contains("Not found")); + } + + #[test] + fn error_display_validation() { + let e = Error::Validation("notes required".to_string()); + assert!(e.to_string().contains("Validation error")); + } + + #[test] + fn error_display_conflict() { + let e = Error::Conflict("merge conflict".to_string()); + assert!(e.to_string().contains("Conflict")); + } + + #[test] + fn error_display_io() { + let e = Error::Io("port bind failed".to_string()); + assert!(e.to_string().contains("I/O error")); + } + + #[test] + fn error_display_upstream_failure() { + let e = Error::UpstreamFailure("git not found".to_string()); + assert!(e.to_string().contains("Upstream failure")); + } + + #[test] + fn find_free_port_returns_bindable_port() { + let port = find_free_port(3100); + // The returned port must be bindable. + assert!( + std::net::TcpListener::bind(("127.0.0.1", port)).is_ok(), + "port {port} should be bindable" + ); + } +} diff --git a/server/src/service/shell/io.rs b/server/src/service/shell/io.rs new file mode 100644 index 00000000..8c4903e5 --- /dev/null +++ b/server/src/service/shell/io.rs @@ -0,0 +1,64 @@ +//! Shell I/O — the ONLY place in `service::shell/` that may perform side effects. +//! +//! Side effects here include: filesystem existence and canonicalization checks, +//! process spawning via `std::process::Command`, and reading pipe output. +//! All pure logic (pattern matching, output truncation, count parsing) lives in +//! `path_guard.rs`. + +use super::Error; +use std::path::{Path, PathBuf}; + +/// Validate that `working_dir` is an absolute path that exists on disk and +/// lies inside the project's `.huskies/worktrees/` or `.huskies/merge_workspace/` +/// directory. Returns the canonicalized path on success. +/// +/// # Errors +/// - [`Error::Validation`] if the path is relative or does not exist. +/// - [`Error::PathNotAllowed`] if the path is outside the allowed roots. +/// - [`Error::Io`] if canonicalization fails. +pub fn validate_working_dir(working_dir: &str, project_root: &Path) -> Result { + let wd = PathBuf::from(working_dir); + + if !wd.is_absolute() { + return Err(Error::Validation( + "working_dir must be an absolute path".to_string(), + )); + } + if !wd.exists() { + return Err(Error::Validation(format!( + "working_dir does not exist: {working_dir}" + ))); + } + + let worktrees_root = project_root.join(".huskies").join("worktrees"); + + let canonical_wd = wd + .canonicalize() + .map_err(|e| Error::Io(format!("Cannot canonicalize working_dir: {e}")))?; + + let canonical_wt = if worktrees_root.exists() { + worktrees_root + .canonicalize() + .map_err(|e| Error::Io(format!("Cannot canonicalize worktrees root: {e}")))? + } else { + return Err(Error::PathNotAllowed( + "No worktrees directory found in project".to_string(), + )); + }; + + // Also allow the merge workspace so mergemaster can fix conflicts. + let merge_workspace = project_root.join(".huskies").join("merge_workspace"); + let canonical_mw = merge_workspace.canonicalize().unwrap_or_default(); + + let in_worktrees = canonical_wd.starts_with(&canonical_wt); + let in_merge_ws = + !canonical_mw.as_os_str().is_empty() && canonical_wd.starts_with(&canonical_mw); + + if !in_worktrees && !in_merge_ws { + return Err(Error::PathNotAllowed(format!( + "working_dir must be inside .huskies/worktrees/ or .huskies/merge_workspace/. Got: {working_dir}" + ))); + } + + Ok(canonical_wd) +} diff --git a/server/src/service/shell/mod.rs b/server/src/service/shell/mod.rs new file mode 100644 index 00000000..50b286b1 --- /dev/null +++ b/server/src/service/shell/mod.rs @@ -0,0 +1,90 @@ +//! Shell service — command safety, path sandboxing, and output helpers. +//! +//! Extracted from `http/mcp/shell_tools.rs` following the conventions in +//! `docs/architecture/service-modules.md`: +//! - `mod.rs` (this file) — public API, typed [`Error`], orchestration +//! - `io.rs` — the ONLY place that performs side effects (filesystem checks) +//! - `path_guard.rs` — pure command-safety checks and output utilities + +pub mod io; +pub mod path_guard; + +#[allow(unused_imports)] +pub use path_guard::{ + BLOCKED_BINARIES, BLOCKED_PATTERNS, extract_count, is_dangerous, parse_test_counts, + truncate_output, +}; + +// ── Error type ──────────────────────────────────────────────────────────────── + +/// Typed errors returned by `service::shell` functions. +/// +/// HTTP handlers map these to status codes: +/// - [`Error::DangerousCommand`] → 400 Bad Request +/// - [`Error::PathNotAllowed`] → 400 Bad Request (sandbox violation) +/// - [`Error::Validation`] → 400 Bad Request +/// - [`Error::Io`] → 500 Internal Server Error +/// - [`Error::UpstreamFailure`] → 500 Internal Server Error +#[allow(dead_code)] +#[derive(Debug)] +pub enum Error { + /// The command matches a blocked pattern or binary. + DangerousCommand(String), + /// The working directory is outside the allowed sandbox. + PathNotAllowed(String), + /// A required argument is missing or has an invalid value. + Validation(String), + /// A filesystem or process I/O operation failed. + Io(String), + /// An upstream dependency (e.g. the shell) returned an unexpected error. + UpstreamFailure(String), +} + +impl std::fmt::Display for Error { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + Self::DangerousCommand(msg) => write!(f, "Dangerous command: {msg}"), + Self::PathNotAllowed(msg) => write!(f, "Path not allowed: {msg}"), + Self::Validation(msg) => write!(f, "Validation error: {msg}"), + Self::Io(msg) => write!(f, "I/O error: {msg}"), + Self::UpstreamFailure(msg) => write!(f, "Upstream failure: {msg}"), + } + } +} + +// ── Tests ───────────────────────────────────────────────────────────────────── + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn error_display_dangerous_command() { + let e = Error::DangerousCommand("rm -rf / detected".to_string()); + assert!(e.to_string().contains("Dangerous command")); + } + + #[test] + fn error_display_path_not_allowed() { + let e = Error::PathNotAllowed("outside worktrees".to_string()); + assert!(e.to_string().contains("Path not allowed")); + } + + #[test] + fn error_display_validation() { + let e = Error::Validation("must be absolute".to_string()); + assert!(e.to_string().contains("Validation error")); + } + + #[test] + fn error_display_io() { + let e = Error::Io("disk full".to_string()); + assert!(e.to_string().contains("I/O error")); + } + + #[test] + fn error_display_upstream_failure() { + let e = Error::UpstreamFailure("bash not found".to_string()); + assert!(e.to_string().contains("Upstream failure")); + } +} diff --git a/server/src/service/shell/path_guard.rs b/server/src/service/shell/path_guard.rs new file mode 100644 index 00000000..33cdc596 --- /dev/null +++ b/server/src/service/shell/path_guard.rs @@ -0,0 +1,223 @@ +//! Pure command safety and output helpers for `service::shell`. +//! +//! All functions here are free of side effects: no filesystem access, +//! no process spawning, no I/O of any kind. They may be tested without +//! temporary directories or an async runtime. + +/// Patterns that are unconditionally blocked regardless of context. +pub static BLOCKED_PATTERNS: &[&str] = &[ + "rm -rf /", + "rm -fr /", + "rm -rf /*", + "rm -fr /*", + "rm --no-preserve-root", + ":(){ :|:& };:", + "> /dev/sda", + "dd if=/dev", +]; + +/// Binaries that are unconditionally blocked. +pub static BLOCKED_BINARIES: &[&str] = &[ + "sudo", "su", "shutdown", "reboot", "halt", "poweroff", "mkfs", +]; + +/// Returns an error message if the command matches a blocked pattern or binary, +/// or `None` if the command is safe to run. +/// +/// Checks are purely string-based — no I/O or process inspection. +pub fn is_dangerous(command: &str) -> Option { + let trimmed = command.trim(); + + for &pattern in BLOCKED_PATTERNS { + if trimmed.contains(pattern) { + return Some(format!( + "Command blocked: dangerous pattern '{pattern}' detected" + )); + } + } + + if let Some(first_token) = trimmed.split_whitespace().next() { + let binary = std::path::Path::new(first_token) + .file_name() + .and_then(|n| n.to_str()) + .unwrap_or(first_token); + if BLOCKED_BINARIES.contains(&binary) { + return Some(format!("Command blocked: '{binary}' is not permitted")); + } + } + + None +} + +/// Truncate `output` to at most `max_lines` lines, keeping the tail. +/// +/// If the output fits within `max_lines`, it is returned unchanged. +/// Otherwise a `"[... N lines omitted ...]"` header is prepended to the +/// last `max_lines` lines so callers still see the most recent output. +pub fn truncate_output(output: &str, max_lines: usize) -> String { + let lines: Vec<&str> = output.lines().collect(); + if lines.len() <= max_lines { + return output.to_string(); + } + let omitted = lines.len() - max_lines; + let tail = lines[lines.len() - max_lines..].join("\n"); + format!("[... {omitted} lines omitted ...]\n{tail}") +} + +/// Parse cumulative passed/failed counts from `cargo test` output. +/// +/// Scans each line for `"test result:"` summaries and accumulates the +/// `passed` and `failed` counts across all crates in the output. +pub fn parse_test_counts(output: &str) -> (u64, u64) { + let mut total_passed = 0u64; + let mut total_failed = 0u64; + for line in output.lines() { + if line.contains("test result:") { + if let Some(p) = extract_count(line, "passed") { + total_passed += p; + } + if let Some(f) = extract_count(line, "failed") { + total_failed += f; + } + } + } + (total_passed, total_failed) +} + +/// Extract the integer immediately before `label` in `line`. +/// +/// For example, `extract_count("5 passed; 0 failed", "passed")` returns +/// `Some(5)`. Returns `None` if no digit sequence precedes `label`. +pub fn extract_count(line: &str, label: &str) -> Option { + let pos = line.find(label)?; + let before = line[..pos].trim_end(); + let num_str: String = before + .chars() + .rev() + .take_while(|c| c.is_ascii_digit()) + .collect(); + if num_str.is_empty() { + return None; + } + let num_str: String = num_str.chars().rev().collect(); + num_str.parse().ok() +} + +// ── Tests ───────────────────────────────────────────────────────────────────── + +#[cfg(test)] +mod tests { + use super::*; + + // ── is_dangerous ────────────────────────────────────────────────────────── + + #[test] + fn is_dangerous_blocks_rm_rf_root() { + assert!(is_dangerous("rm -rf /").is_some()); + assert!(is_dangerous(" rm -rf / ").is_some()); + } + + #[test] + fn is_dangerous_blocks_rm_fr_root() { + assert!(is_dangerous("rm -fr /").is_some()); + } + + #[test] + fn is_dangerous_blocks_rm_rf_star() { + assert!(is_dangerous("rm -rf /*").is_some()); + assert!(is_dangerous("rm -fr /*").is_some()); + } + + #[test] + fn is_dangerous_blocks_sudo() { + assert!(is_dangerous("sudo ls").is_some()); + } + + #[test] + fn is_dangerous_blocks_shutdown() { + assert!(is_dangerous("shutdown -h now").is_some()); + } + + #[test] + fn is_dangerous_blocks_mkfs() { + assert!(is_dangerous("mkfs /dev/sda1").is_some()); + } + + #[test] + fn is_dangerous_blocks_fork_bomb() { + assert!(is_dangerous(":(){ :|:& };:").is_some()); + } + + #[test] + fn is_dangerous_allows_safe_commands() { + assert!(is_dangerous("cargo build").is_none()); + assert!(is_dangerous("npm test").is_none()); + assert!(is_dangerous("git status").is_none()); + assert!(is_dangerous("ls -la").is_none()); + assert!(is_dangerous("rm -rf target/").is_none()); + } + + // ── truncate_output ─────────────────────────────────────────────────────── + + #[test] + fn truncate_short_text_unchanged() { + let text = "line1\nline2\nline3"; + assert_eq!(truncate_output(text, 10), text); + } + + #[test] + fn truncate_long_text_keeps_tail() { + let lines: Vec = (1..=200).map(|i| format!("line {i}")).collect(); + let text = lines.join("\n"); + let result = truncate_output(&text, 50); + assert!( + result.contains("line 200"), + "should keep last line: {result}" + ); + assert!( + result.contains("omitted"), + "should note omitted lines: {result}" + ); + assert!( + !result.contains("line 1\n"), + "should not keep first line: {result}" + ); + } + + #[test] + fn truncate_exact_max_unchanged() { + let text = "a\nb\nc"; + assert_eq!(truncate_output(text, 3), text); + } + + // ── parse_test_counts ───────────────────────────────────────────────────── + + #[test] + fn parse_counts_extracts_passed_and_failed() { + let output = "test result: ok. 5 passed; 0 failed; 0 ignored\ntest result: FAILED. 2 passed; 3 failed;"; + let (passed, failed) = parse_test_counts(output); + assert_eq!(passed, 7); + assert_eq!(failed, 3); + } + + #[test] + fn parse_counts_no_results_returns_zeros() { + let (passed, failed) = parse_test_counts("no test output here"); + assert_eq!(passed, 0); + assert_eq!(failed, 0); + } + + // ── extract_count ───────────────────────────────────────────────────────── + + #[test] + fn extract_count_finds_number_before_label() { + assert_eq!(extract_count("5 passed; 0 failed", "passed"), Some(5)); + assert_eq!(extract_count("0 failed", "failed"), Some(0)); + assert_eq!(extract_count("no number here passed", "passed"), None); + } + + #[test] + fn extract_count_returns_none_for_missing_label() { + assert_eq!(extract_count("5 passed", "failed"), None); + } +} diff --git a/server/src/service/story/criteria.rs b/server/src/service/story/criteria.rs new file mode 100644 index 00000000..90407cbd --- /dev/null +++ b/server/src/service/story/criteria.rs @@ -0,0 +1,129 @@ +//! Pure criterion helpers for `service::story`. +//! +//! These functions parse, validate, and manipulate story acceptance criteria +//! without performing any I/O. + +use crate::workflow::{TestCaseResult, TestStatus}; +use serde_json::Value; + +/// Parse an optional JSON array of test-case objects into a `Vec`. +/// +/// Each object must have `"name"` (string) and `"status"` (`"pass"` or `"fail"`) +/// fields. The optional `"details"` field is preserved when present. +/// +/// Returns an empty vector for `None` or `Value::Null` inputs. +/// +/// # Errors +/// Returns `Err(String)` if the value is not an array, or if any item is +/// missing a required field or has an unrecognised status string. +pub fn parse_test_cases(value: Option<&Value>) -> Result, String> { + let arr = match value { + Some(Value::Array(a)) => a, + Some(Value::Null) | None => return Ok(Vec::new()), + _ => return Err("Expected array for test cases".to_string()), + }; + + arr.iter() + .map(|item| { + let name = item + .get("name") + .and_then(|v| v.as_str()) + .ok_or("Test case missing 'name'")? + .to_string(); + let status_str = item + .get("status") + .and_then(|v| v.as_str()) + .ok_or("Test case missing 'status'")?; + let status = match status_str { + "pass" => TestStatus::Pass, + "fail" => TestStatus::Fail, + other => { + return Err(format!( + "Invalid test status '{other}'. Use 'pass' or 'fail'." + )); + } + }; + let details = item + .get("details") + .and_then(|v| v.as_str()) + .map(String::from); + Ok(TestCaseResult { + name, + status, + details, + }) + }) + .collect() +} + +// ── Tests ───────────────────────────────────────────────────────────────────── + +#[cfg(test)] +mod tests { + use super::*; + use serde_json::json; + + #[test] + fn parse_none_returns_empty() { + let result = parse_test_cases(None).unwrap(); + assert!(result.is_empty()); + } + + #[test] + fn parse_null_value_returns_empty() { + let null_val = json!(null); + let result = parse_test_cases(Some(&null_val)).unwrap(); + assert!(result.is_empty()); + } + + #[test] + fn parse_valid_cases() { + let input = json!([ + {"name": "test1", "status": "pass"}, + {"name": "test2", "status": "fail", "details": "assertion failed"} + ]); + let result = parse_test_cases(Some(&input)).unwrap(); + assert_eq!(result.len(), 2); + assert_eq!(result[0].name, "test1"); + assert_eq!(result[0].status, TestStatus::Pass); + assert_eq!(result[1].status, TestStatus::Fail); + assert_eq!(result[1].details, Some("assertion failed".to_string())); + } + + #[test] + fn parse_invalid_status_returns_error() { + let input = json!([{"name": "t", "status": "maybe"}]); + assert!(parse_test_cases(Some(&input)).is_err()); + } + + #[test] + fn parse_non_array_returns_error() { + let obj = json!({"invalid": "input"}); + let result = parse_test_cases(Some(&obj)); + assert!(result.is_err()); + assert!(result.unwrap_err().contains("Expected array")); + } + + #[test] + fn parse_missing_name_returns_error() { + let input = json!([{"status": "pass"}]); + let result = parse_test_cases(Some(&input)); + assert!(result.is_err()); + assert!(result.unwrap_err().contains("name")); + } + + #[test] + fn parse_missing_status_returns_error() { + let input = json!([{"name": "test1"}]); + let result = parse_test_cases(Some(&input)); + assert!(result.is_err()); + assert!(result.unwrap_err().contains("status")); + } + + #[test] + fn parse_details_is_optional() { + let input = json!([{"name": "no_details", "status": "pass"}]); + let result = parse_test_cases(Some(&input)).unwrap(); + assert_eq!(result[0].details, None); + } +} diff --git a/server/src/service/story/front_matter.rs b/server/src/service/story/front_matter.rs new file mode 100644 index 00000000..be0ee24c --- /dev/null +++ b/server/src/service/story/front_matter.rs @@ -0,0 +1,77 @@ +//! Pure front-matter helpers for `service::story`. +//! +//! These functions validate and inspect story front-matter field values +//! without performing any I/O. Parsing is delegated to `crate::io::story_metadata`. + +#[allow(dead_code)] +/// Return `true` if `stage` is a recognised pipeline stage directory name. +/// +/// Valid stage names match the `.huskies/work/N_name/` directory scheme. +pub fn is_valid_stage(stage: &str) -> bool { + matches!( + stage, + "1_backlog" | "2_current" | "3_qa" | "4_merge" | "5_done" | "6_archived" + ) +} + +#[allow(dead_code)] +/// Map a human-readable stage alias (e.g. `"backlog"`) to its directory name +/// (e.g. `"1_backlog"`). Returns `None` for unrecognised aliases. +pub fn stage_alias_to_dir(alias: &str) -> Option<&'static str> { + match alias { + "backlog" | "1_backlog" => Some("1_backlog"), + "current" | "2_current" => Some("2_current"), + "qa" | "3_qa" => Some("3_qa"), + "merge" | "4_merge" => Some("4_merge"), + "done" | "5_done" => Some("5_done"), + "archived" | "6_archived" => Some("6_archived"), + _ => None, + } +} + +// ── Tests ───────────────────────────────────────────────────────────────────── + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn is_valid_stage_accepts_all_known_stages() { + assert!(is_valid_stage("1_backlog")); + assert!(is_valid_stage("2_current")); + assert!(is_valid_stage("3_qa")); + assert!(is_valid_stage("4_merge")); + assert!(is_valid_stage("5_done")); + assert!(is_valid_stage("6_archived")); + } + + #[test] + fn is_valid_stage_rejects_unknown() { + assert!(!is_valid_stage("current")); + assert!(!is_valid_stage("backlog")); + assert!(!is_valid_stage("7_future")); + assert!(!is_valid_stage("")); + } + + #[test] + fn stage_alias_maps_short_names() { + assert_eq!(stage_alias_to_dir("backlog"), Some("1_backlog")); + assert_eq!(stage_alias_to_dir("current"), Some("2_current")); + assert_eq!(stage_alias_to_dir("qa"), Some("3_qa")); + assert_eq!(stage_alias_to_dir("merge"), Some("4_merge")); + assert_eq!(stage_alias_to_dir("done"), Some("5_done")); + assert_eq!(stage_alias_to_dir("archived"), Some("6_archived")); + } + + #[test] + fn stage_alias_maps_full_dir_names() { + assert_eq!(stage_alias_to_dir("1_backlog"), Some("1_backlog")); + assert_eq!(stage_alias_to_dir("6_archived"), Some("6_archived")); + } + + #[test] + fn stage_alias_returns_none_for_unknown() { + assert_eq!(stage_alias_to_dir("unknown"), None); + assert_eq!(stage_alias_to_dir(""), None); + } +} diff --git a/server/src/service/story/io.rs b/server/src/service/story/io.rs new file mode 100644 index 00000000..e52fb75b --- /dev/null +++ b/server/src/service/story/io.rs @@ -0,0 +1,7 @@ +//! Story I/O — the ONLY place in `service::story/` that may perform side effects. +//! +//! Currently, the bulk of story file I/O is handled by `crate::http::workflow` +//! (story file creation, criterion editing, stage moves) and +//! `crate::io::story_metadata` (front-matter parsing, merge-failure writes). +//! This file is the designated home for any future story-specific I/O helpers +//! that are extracted from those modules. diff --git a/server/src/service/story/lifecycle.rs b/server/src/service/story/lifecycle.rs new file mode 100644 index 00000000..89a272ac --- /dev/null +++ b/server/src/service/story/lifecycle.rs @@ -0,0 +1,58 @@ +//! Pure story-lifecycle helpers for `service::story`. +//! +//! These functions reason about story IDs and dependencies without performing +//! any I/O. They inform routing decisions in `mod.rs` and the MCP adapter. + +#[allow(dead_code)] +/// Extract the numeric prefix from a story ID (e.g. `"42"` from `"42_story_foo"`). +/// +/// Returns `None` if the ID has no leading digit sequence. +pub fn story_number(story_id: &str) -> Option<&str> { + let num = story_id.split('_').next()?; + if num.is_empty() || !num.chars().all(|c| c.is_ascii_digit()) { + return None; + } + Some(num) +} + +#[allow(dead_code)] +/// Return `true` if `story_id` has a valid `{digits}_` prefix format. +/// +/// Valid: `"42_story_foo"`, `"1_bug_bar"`. +/// Invalid: `"story_without_number"`, `""`, `"abc_story"`. +pub fn has_valid_id_prefix(story_id: &str) -> bool { + story_number(story_id).is_some() +} + +// ── Tests ───────────────────────────────────────────────────────────────────── + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn story_number_extracts_prefix() { + assert_eq!(story_number("42_story_foo"), Some("42")); + assert_eq!(story_number("1_bug_bar"), Some("1")); + assert_eq!(story_number("100_refactor_baz"), Some("100")); + } + + #[test] + fn story_number_returns_none_for_no_numeric_prefix() { + assert_eq!(story_number("story_without_number"), None); + assert_eq!(story_number("abc_story"), None); + assert_eq!(story_number(""), None); + } + + #[test] + fn has_valid_id_prefix_returns_true_for_valid_ids() { + assert!(has_valid_id_prefix("42_story_foo")); + assert!(has_valid_id_prefix("1_bug_bar")); + } + + #[test] + fn has_valid_id_prefix_returns_false_for_invalid_ids() { + assert!(!has_valid_id_prefix("story_no_number")); + assert!(!has_valid_id_prefix("")); + } +} diff --git a/server/src/service/story/mod.rs b/server/src/service/story/mod.rs new file mode 100644 index 00000000..7872b2bb --- /dev/null +++ b/server/src/service/story/mod.rs @@ -0,0 +1,120 @@ +//! Story service — domain logic for creating, updating, and managing pipeline work items. +//! +//! Extracted from `http/mcp/story_tools.rs` following the conventions in +//! `docs/architecture/service-modules.md`: +//! - `mod.rs` (this file) — public API, typed [`Error`], orchestration +//! - `io.rs` — the ONLY place that performs side effects (story file I/O) +//! - `criteria.rs` — pure criterion parsing and validation +//! - `lifecycle.rs` — pure story-ID and lifecycle helpers +//! - `front_matter.rs` — pure front-matter field validation +//! - `validation.rs` — pure story content validation +//! +//! # State-Mutation Inventory (Axis 3) +//! +//! The story service orchestrates the following state writes across all +//! lifecycle operations: +//! +//! ## CRDT Writes +//! - `crdt_state::write_item` — on story/bug/spike/refactor creation (via `http::workflow`) +//! - `crdt_state::evict_item` — on story deletion or purge (tombstone op) +//! - `crdt_state::move_item` — on stage transitions (backlog → current → qa → merge → done → archived) +//! +//! ## Filesystem Shadow Writes +//! - `.huskies/work/1_backlog/.md` — written on story/bug/spike/refactor creation +//! - `.huskies/work//.md` — moved between stages on lifecycle transitions +//! - `.huskies/work//.md` front matter — updated by `update_story`, `check_criterion`, +//! `edit_criterion`, `add_criterion`, `remove_criterion`, `accept_story`, `reject_qa` +//! - `.huskies/bugs/archive/.md` — written on `close_bug` +//! - `.huskies/work/4_merge/.md` merge_failure field — written by `report_merge_failure` +//! +//! ## Database Shadow Table +//! - `pipeline_items` row — updated on stage transitions and item creation/deletion +//! - `content_store` entry — updated on story content changes, deleted on purge/delete + +pub mod criteria; +pub mod front_matter; +pub mod io; +pub mod lifecycle; +pub mod validation; + +pub use criteria::parse_test_cases; +#[allow(unused_imports)] +pub use front_matter::{is_valid_stage, stage_alias_to_dir}; +#[allow(unused_imports)] +pub use lifecycle::{has_valid_id_prefix, story_number}; +#[allow(unused_imports)] +pub use validation::{is_valid_story_name, validate_criterion_index}; + +// ── Error type ──────────────────────────────────────────────────────────────── + +/// Typed errors returned by `service::story` functions. +/// +/// HTTP handlers map these to status codes: +/// - [`Error::NotFound`] → 404 Not Found +/// - [`Error::Validation`] → 400 Bad Request +/// - [`Error::Conflict`] → 409 Conflict +/// - [`Error::Io`] → 500 Internal Server Error +/// - [`Error::UpstreamFailure`] → 500 Internal Server Error +#[allow(dead_code)] +#[derive(Debug)] +pub enum Error { + /// The requested story or work item was not found. + NotFound(String), + /// A required argument is missing or has an invalid value. + Validation(String), + /// The operation cannot proceed due to a conflicting state (e.g. unmerged branch). + Conflict(String), + /// A filesystem read or write operation failed. + Io(String), + /// An upstream dependency (agents, CRDT, git) returned an unexpected error. + UpstreamFailure(String), +} + +impl std::fmt::Display for Error { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + Self::NotFound(msg) => write!(f, "Not found: {msg}"), + Self::Validation(msg) => write!(f, "Validation error: {msg}"), + Self::Conflict(msg) => write!(f, "Conflict: {msg}"), + Self::Io(msg) => write!(f, "I/O error: {msg}"), + Self::UpstreamFailure(msg) => write!(f, "Upstream failure: {msg}"), + } + } +} + +// ── Tests ───────────────────────────────────────────────────────────────────── + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn error_display_not_found() { + let e = Error::NotFound("story 42 not found".to_string()); + assert!(e.to_string().contains("Not found")); + } + + #[test] + fn error_display_validation() { + let e = Error::Validation("name is required".to_string()); + assert!(e.to_string().contains("Validation error")); + } + + #[test] + fn error_display_conflict() { + let e = Error::Conflict("unmerged changes".to_string()); + assert!(e.to_string().contains("Conflict")); + } + + #[test] + fn error_display_io() { + let e = Error::Io("story file write failed".to_string()); + assert!(e.to_string().contains("I/O error")); + } + + #[test] + fn error_display_upstream_failure() { + let e = Error::UpstreamFailure("CRDT eviction failed".to_string()); + assert!(e.to_string().contains("Upstream failure")); + } +} diff --git a/server/src/service/story/validation.rs b/server/src/service/story/validation.rs new file mode 100644 index 00000000..d36fbcba --- /dev/null +++ b/server/src/service/story/validation.rs @@ -0,0 +1,70 @@ +//! Pure story validation helpers for `service::story`. +//! +//! These functions validate story content and metadata without performing +//! any I/O. + +#[allow(dead_code)] +/// Return `true` if `name` is a valid story name. +/// +/// A valid name must contain at least one alphanumeric character (letters or +/// digits). Pure punctuation, symbols, or blank names are rejected. +pub fn is_valid_story_name(name: &str) -> bool { + name.chars().any(|c| c.is_alphanumeric()) +} + +#[allow(dead_code)] +/// Validate that `criterion_index` is within bounds for `total_criteria`. +/// +/// Returns `Ok(())` if valid, or `Err(message)` if out of range. +pub fn validate_criterion_index( + criterion_index: usize, + total_criteria: usize, +) -> Result<(), String> { + if criterion_index >= total_criteria { + Err(format!( + "criterion_index {criterion_index} is out of range — story has {total_criteria} criteria (0-based)" + )) + } else { + Ok(()) + } +} + +// ── Tests ───────────────────────────────────────────────────────────────────── + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn is_valid_story_name_accepts_alphanumeric() { + assert!(is_valid_story_name("My Story")); + assert!(is_valid_story_name("story123")); + assert!(is_valid_story_name("Test")); + } + + #[test] + fn is_valid_story_name_rejects_symbols_only() { + assert!(!is_valid_story_name("!!!")); + assert!(!is_valid_story_name("---")); + assert!(!is_valid_story_name("")); + } + + #[test] + fn validate_criterion_index_in_range() { + assert!(validate_criterion_index(0, 3).is_ok()); + assert!(validate_criterion_index(2, 3).is_ok()); + } + + #[test] + fn validate_criterion_index_out_of_range() { + let e = validate_criterion_index(3, 3).unwrap_err(); + assert!(e.contains("out of range")); + assert!(e.contains("3")); + } + + #[test] + fn validate_criterion_index_zero_criteria() { + let e = validate_criterion_index(0, 0).unwrap_err(); + assert!(e.contains("out of range")); + } +}