Files
storkit/server/src/http/mcp.rs

4736 lines
185 KiB
Rust

use crate::agents::{close_bug_to_archive, feature_branch_has_unmerged_changes, move_story_to_archived, move_story_to_merge, move_story_to_qa, reject_story_from_qa, AgentStatus, PipelineStage};
use crate::config::ProjectConfig;
use crate::log_buffer;
use crate::slog;
use crate::slog_warn;
use crate::http::context::AppContext;
use crate::http::settings::get_editor_command_from_store;
use crate::http::workflow::{
add_criterion_to_file, check_criterion_in_file, create_bug_file, create_refactor_file,
create_spike_file, create_story_file, list_bug_files, list_refactor_files,
load_pipeline_state, load_upcoming_stories, update_story_in_file, validate_story_dirs,
};
use crate::worktree;
use crate::io::story_metadata::{parse_front_matter, parse_unchecked_todos, write_merge_failure};
use crate::workflow::{evaluate_acceptance_with_coverage, TestCaseResult, TestStatus};
use poem::handler;
use poem::http::StatusCode;
use poem::web::Data;
use poem::{Body, Request, Response};
use serde::{Deserialize, Serialize};
use serde_json::{json, Value};
use std::collections::HashMap;
use std::fs;
use std::sync::Arc;
/// Returns true when the Accept header includes text/event-stream.
fn wants_sse(req: &Request) -> bool {
req.header("accept")
.unwrap_or("")
.contains("text/event-stream")
}
// ── JSON-RPC structs ──────────────────────────────────────────────
#[derive(Deserialize)]
struct JsonRpcRequest {
jsonrpc: String,
id: Option<Value>,
method: String,
#[serde(default)]
params: Value,
}
#[derive(Serialize)]
struct JsonRpcResponse {
jsonrpc: &'static str,
#[serde(skip_serializing_if = "Option::is_none")]
id: Option<Value>,
#[serde(skip_serializing_if = "Option::is_none")]
result: Option<Value>,
#[serde(skip_serializing_if = "Option::is_none")]
error: Option<JsonRpcError>,
}
#[derive(Serialize)]
struct JsonRpcError {
code: i64,
message: String,
#[serde(skip_serializing_if = "Option::is_none")]
data: Option<Value>,
}
impl JsonRpcResponse {
fn success(id: Option<Value>, result: Value) -> Self {
Self {
jsonrpc: "2.0",
id,
result: Some(result),
error: None,
}
}
fn error(id: Option<Value>, code: i64, message: String) -> Self {
Self {
jsonrpc: "2.0",
id,
result: None,
error: Some(JsonRpcError {
code,
message,
data: None,
}),
}
}
}
// ── Poem handlers ─────────────────────────────────────────────────
#[handler]
pub async fn mcp_get_handler() -> Response {
Response::builder()
.status(StatusCode::METHOD_NOT_ALLOWED)
.body(Body::empty())
}
#[handler]
pub async fn mcp_post_handler(req: &Request, body: Body, ctx: Data<&Arc<AppContext>>) -> Response {
// Validate Content-Type
let content_type = req.header("content-type").unwrap_or("");
if !content_type.is_empty() && !content_type.contains("application/json") {
return json_rpc_error_response(
None,
-32700,
"Unsupported Content-Type; expected application/json".into(),
);
}
let bytes = match body.into_bytes().await {
Ok(b) => b,
Err(_) => return json_rpc_error_response(None, -32700, "Parse error".into()),
};
let rpc: JsonRpcRequest = match serde_json::from_slice(&bytes) {
Ok(r) => r,
Err(_) => return json_rpc_error_response(None, -32700, "Parse error".into()),
};
if rpc.jsonrpc != "2.0" {
return json_rpc_error_response(rpc.id, -32600, "Invalid JSON-RPC version".into());
}
// Notifications (no id) — accept silently
if rpc.id.is_none() || rpc.id.as_ref() == Some(&Value::Null) {
if rpc.method.starts_with("notifications/") {
return Response::builder()
.status(StatusCode::ACCEPTED)
.body(Body::empty());
}
return json_rpc_error_response(None, -32600, "Missing id".into());
}
let sse = wants_sse(req);
// Streaming agent output over SSE
if sse && rpc.method == "tools/call" {
let tool_name = rpc
.params
.get("name")
.and_then(|v| v.as_str())
.unwrap_or("");
if tool_name == "get_agent_output" {
return handle_agent_output_sse(rpc.id, &rpc.params, &ctx);
}
}
let resp = match rpc.method.as_str() {
"initialize" => handle_initialize(rpc.id, &rpc.params),
"tools/list" => handle_tools_list(rpc.id),
"tools/call" => handle_tools_call(rpc.id, &rpc.params, &ctx).await,
_ => JsonRpcResponse::error(rpc.id, -32601, format!("Unknown method: {}", rpc.method)),
};
if sse {
to_sse_response(resp)
} else {
to_json_response(resp)
}
}
fn json_rpc_error_response(id: Option<Value>, code: i64, message: String) -> Response {
to_json_response(JsonRpcResponse::error(id, code, message))
}
fn to_json_response(resp: JsonRpcResponse) -> Response {
let body = serde_json::to_vec(&resp).unwrap_or_default();
Response::builder()
.status(StatusCode::OK)
.header("Content-Type", "application/json")
.body(Body::from(body))
}
fn to_sse_response(resp: JsonRpcResponse) -> Response {
let json = serde_json::to_string(&resp).unwrap_or_default();
let sse_body = format!("data: {json}\n\n");
Response::builder()
.status(StatusCode::OK)
.header("Content-Type", "text/event-stream")
.header("Cache-Control", "no-cache")
.body(Body::from_string(sse_body))
}
/// Stream agent events as SSE — each event is a separate JSON-RPC notification,
/// followed by a final JSON-RPC response with the matching request id.
fn handle_agent_output_sse(
id: Option<Value>,
params: &Value,
ctx: &AppContext,
) -> Response {
let args = params.get("arguments").cloned().unwrap_or(json!({}));
let story_id = match args.get("story_id").and_then(|v| v.as_str()) {
Some(s) => s.to_string(),
None => return to_sse_response(JsonRpcResponse::error(
id,
-32602,
"Missing required argument: story_id".into(),
)),
};
let agent_name = match args.get("agent_name").and_then(|v| v.as_str()) {
Some(s) => s.to_string(),
None => return to_sse_response(JsonRpcResponse::error(
id,
-32602,
"Missing required argument: agent_name".into(),
)),
};
let timeout_ms = args
.get("timeout_ms")
.and_then(|v| v.as_u64())
.unwrap_or(10000)
.min(30000);
let mut rx = match ctx.agents.subscribe(&story_id, &agent_name) {
Ok(rx) => rx,
Err(e) => return to_sse_response(JsonRpcResponse::success(
id,
json!({ "content": [{"type": "text", "text": e}], "isError": true }),
)),
};
let final_id = id;
let stream = async_stream::stream! {
let deadline = tokio::time::Instant::now()
+ std::time::Duration::from_millis(timeout_ms);
let mut done = false;
loop {
let remaining = deadline.saturating_duration_since(tokio::time::Instant::now());
if remaining.is_zero() {
break;
}
match tokio::time::timeout(remaining, rx.recv()).await {
Ok(Ok(event)) => {
let is_terminal = matches!(
&event,
crate::agents::AgentEvent::Done { .. }
| crate::agents::AgentEvent::Error { .. }
);
// Send each event as a JSON-RPC notification (no id)
if let Ok(event_json) = serde_json::to_value(&event) {
let notification = json!({
"jsonrpc": "2.0",
"method": "notifications/tools/progress",
"params": { "event": event_json }
});
if let Ok(s) = serde_json::to_string(&notification) {
yield Ok::<_, std::io::Error>(format!("data: {s}\n\n"));
}
}
if is_terminal {
done = true;
break;
}
}
Ok(Err(tokio::sync::broadcast::error::RecvError::Lagged(n))) => {
let notification = json!({
"jsonrpc": "2.0",
"method": "notifications/tools/progress",
"params": { "event": {"type": "warning", "message": format!("Skipped {n} events")} }
});
if let Ok(s) = serde_json::to_string(&notification) {
yield Ok::<_, std::io::Error>(format!("data: {s}\n\n"));
}
}
Ok(Err(tokio::sync::broadcast::error::RecvError::Closed)) => {
done = true;
break;
}
Err(_) => break, // timeout
}
}
// Final response with the request id
let final_resp = JsonRpcResponse::success(
final_id,
json!({
"content": [{
"type": "text",
"text": if done { "Agent stream ended." } else { "Stream timed out; call again to continue." }
}]
}),
);
if let Ok(s) = serde_json::to_string(&final_resp) {
yield Ok::<_, std::io::Error>(format!("data: {s}\n\n"));
}
};
Response::builder()
.status(StatusCode::OK)
.header("Content-Type", "text/event-stream")
.header("Cache-Control", "no-cache")
.body(Body::from_bytes_stream(
futures::StreamExt::map(stream, |r| r.map(bytes::Bytes::from)),
))
}
// ── MCP protocol handlers ─────────────────────────────────────────
fn handle_initialize(id: Option<Value>, params: &Value) -> JsonRpcResponse {
let _protocol_version = params
.get("protocolVersion")
.and_then(|v| v.as_str())
.unwrap_or("2025-03-26");
JsonRpcResponse::success(
id,
json!({
"protocolVersion": "2025-03-26",
"capabilities": {
"tools": {}
},
"serverInfo": {
"name": "story-kit",
"version": "1.0.0"
}
}),
)
}
fn handle_tools_list(id: Option<Value>) -> JsonRpcResponse {
JsonRpcResponse::success(
id,
json!({
"tools": [
{
"name": "create_story",
"description": "Create a new story file with front matter in upcoming/. Returns the story_id.",
"inputSchema": {
"type": "object",
"properties": {
"name": {
"type": "string",
"description": "Human-readable story name"
},
"user_story": {
"type": "string",
"description": "Optional user story text (As a..., I want..., so that...)"
},
"acceptance_criteria": {
"type": "array",
"items": { "type": "string" },
"description": "Optional list of acceptance criteria"
},
"commit": {
"type": "boolean",
"description": "If true, git-add and git-commit the new story file to the current branch"
}
},
"required": ["name"]
}
},
{
"name": "validate_stories",
"description": "Validate front matter on all current and upcoming story files.",
"inputSchema": {
"type": "object",
"properties": {}
}
},
{
"name": "list_upcoming",
"description": "List all upcoming stories with their names and any parsing errors.",
"inputSchema": {
"type": "object",
"properties": {}
}
},
{
"name": "get_story_todos",
"description": "Get unchecked acceptance criteria (todos) for a story file in current/.",
"inputSchema": {
"type": "object",
"properties": {
"story_id": {
"type": "string",
"description": "Story identifier (filename stem, e.g. '28_my_story')"
}
},
"required": ["story_id"]
}
},
{
"name": "record_tests",
"description": "Record test results for a story. Only one failing test at a time is allowed.",
"inputSchema": {
"type": "object",
"properties": {
"story_id": {
"type": "string",
"description": "Story identifier"
},
"unit": {
"type": "array",
"items": {
"type": "object",
"properties": {
"name": { "type": "string" },
"status": { "type": "string", "enum": ["pass", "fail"] },
"details": { "type": "string" }
},
"required": ["name", "status"]
},
"description": "Unit test results"
},
"integration": {
"type": "array",
"items": {
"type": "object",
"properties": {
"name": { "type": "string" },
"status": { "type": "string", "enum": ["pass", "fail"] },
"details": { "type": "string" }
},
"required": ["name", "status"]
},
"description": "Integration test results"
}
},
"required": ["story_id", "unit", "integration"]
}
},
{
"name": "ensure_acceptance",
"description": "Check whether a story can be accepted. Returns acceptance status with reasons if blocked.",
"inputSchema": {
"type": "object",
"properties": {
"story_id": {
"type": "string",
"description": "Story identifier"
}
},
"required": ["story_id"]
}
},
{
"name": "start_agent",
"description": "Start an agent for a story. Creates a worktree, runs setup, and spawns the agent process.",
"inputSchema": {
"type": "object",
"properties": {
"story_id": {
"type": "string",
"description": "Story identifier (e.g. '28_my_story')"
},
"agent_name": {
"type": "string",
"description": "Agent name from project.toml config. If omitted, uses the first coder agent (stage = \"coder\"). Supervisor must be requested explicitly by name."
}
},
"required": ["story_id"]
}
},
{
"name": "stop_agent",
"description": "Stop a running agent. Worktree is preserved for inspection.",
"inputSchema": {
"type": "object",
"properties": {
"story_id": {
"type": "string",
"description": "Story identifier"
},
"agent_name": {
"type": "string",
"description": "Agent name to stop"
}
},
"required": ["story_id", "agent_name"]
}
},
{
"name": "list_agents",
"description": "List all agents with their current status, story assignment, and worktree path.",
"inputSchema": {
"type": "object",
"properties": {}
}
},
{
"name": "get_agent_config",
"description": "Get the configured agent roster from project.toml (names, roles, models, allowed tools, limits).",
"inputSchema": {
"type": "object",
"properties": {}
}
},
{
"name": "reload_agent_config",
"description": "Reload project.toml and return the updated agent roster.",
"inputSchema": {
"type": "object",
"properties": {}
}
},
{
"name": "get_agent_output",
"description": "Poll recent output from a running agent. Subscribes to the agent's event stream and collects events for up to 2 seconds. Returns text output and status events. Call repeatedly to follow progress.",
"inputSchema": {
"type": "object",
"properties": {
"story_id": {
"type": "string",
"description": "Story identifier"
},
"agent_name": {
"type": "string",
"description": "Agent name"
},
"timeout_ms": {
"type": "integer",
"description": "How long to wait for events in milliseconds (default: 2000, max: 10000)"
}
},
"required": ["story_id", "agent_name"]
}
},
{
"name": "wait_for_agent",
"description": "Block until the agent reaches a terminal state (completed, failed, stopped). Returns final status and summary including session_id, worktree_path, and any commits made. Use this instead of polling get_agent_output when you want to fire-and-forget and be notified on completion.",
"inputSchema": {
"type": "object",
"properties": {
"story_id": {
"type": "string",
"description": "Story identifier"
},
"agent_name": {
"type": "string",
"description": "Agent name to wait for"
},
"timeout_ms": {
"type": "integer",
"description": "Maximum time to wait in milliseconds (default: 300000 = 5 minutes)"
}
},
"required": ["story_id", "agent_name"]
}
},
{
"name": "create_worktree",
"description": "Create a git worktree for a story under .story_kit/worktrees/{story_id} with deterministic naming. Writes .mcp.json and runs component setup. Returns the worktree path.",
"inputSchema": {
"type": "object",
"properties": {
"story_id": {
"type": "string",
"description": "Story identifier (e.g. '42_my_story')"
}
},
"required": ["story_id"]
}
},
{
"name": "list_worktrees",
"description": "List all worktrees under .story_kit/worktrees/ for the current project.",
"inputSchema": {
"type": "object",
"properties": {}
}
},
{
"name": "remove_worktree",
"description": "Remove a git worktree and its feature branch for a story.",
"inputSchema": {
"type": "object",
"properties": {
"story_id": {
"type": "string",
"description": "Story identifier"
}
},
"required": ["story_id"]
}
},
{
"name": "get_editor_command",
"description": "Get the open-in-editor command for a worktree. Returns a ready-to-paste shell command like 'zed /path/to/worktree'. Requires the editor preference to be configured via PUT /api/settings/editor.",
"inputSchema": {
"type": "object",
"properties": {
"worktree_path": {
"type": "string",
"description": "Absolute path to the worktree directory"
}
},
"required": ["worktree_path"]
}
},
{
"name": "accept_story",
"description": "Accept a story: moves it from current/ to done/ and auto-commits to master.",
"inputSchema": {
"type": "object",
"properties": {
"story_id": {
"type": "string",
"description": "Story identifier (filename stem, e.g. '28_my_story')"
}
},
"required": ["story_id"]
}
},
{
"name": "check_criterion",
"description": "Check off an acceptance criterion (- [ ] → - [x]) by 0-based index among unchecked items, then auto-commit to master. Use get_story_todos to see the current list of unchecked criteria.",
"inputSchema": {
"type": "object",
"properties": {
"story_id": {
"type": "string",
"description": "Story identifier (filename stem, e.g. '28_my_story')"
},
"criterion_index": {
"type": "integer",
"description": "0-based index of the unchecked criterion to check off"
}
},
"required": ["story_id", "criterion_index"]
}
},
{
"name": "add_criterion",
"description": "Add an acceptance criterion to an existing story file. Appends '- [ ] {criterion}' after the last existing criterion in the '## Acceptance Criteria' section. Auto-commits via the filesystem watcher.",
"inputSchema": {
"type": "object",
"properties": {
"story_id": {
"type": "string",
"description": "Story identifier (filename stem, e.g. '28_my_story')"
},
"criterion": {
"type": "string",
"description": "The acceptance criterion text to add (without the '- [ ] ' prefix)"
}
},
"required": ["story_id", "criterion"]
}
},
{
"name": "update_story",
"description": "Update an existing story file. Can replace the '## User Story' and/or '## Description' section content, and/or set YAML front matter fields (e.g. agent, manual_qa). Auto-commits via the filesystem watcher.",
"inputSchema": {
"type": "object",
"properties": {
"story_id": {
"type": "string",
"description": "Story identifier (filename stem, e.g. '28_my_story')"
},
"user_story": {
"type": "string",
"description": "New user story text to replace the '## User Story' section content"
},
"description": {
"type": "string",
"description": "New description text to replace the '## Description' section content"
},
"agent": {
"type": "string",
"description": "Set or change the 'agent' YAML front matter field"
},
"front_matter": {
"type": "object",
"description": "Arbitrary YAML front matter key-value pairs to set or update",
"additionalProperties": {
"type": "string"
}
}
},
"required": ["story_id"]
}
},
{
"name": "create_spike",
"description": "Create a spike file in .story_kit/work/1_backlog/ with a deterministic filename and YAML front matter. Returns the spike_id.",
"inputSchema": {
"type": "object",
"properties": {
"name": {
"type": "string",
"description": "Human-readable spike name"
},
"description": {
"type": "string",
"description": "Optional description / question the spike aims to answer"
}
},
"required": ["name"]
}
},
{
"name": "create_bug",
"description": "Create a bug file in work/1_backlog/ with a deterministic filename and auto-commit to master. Returns the bug_id.",
"inputSchema": {
"type": "object",
"properties": {
"name": {
"type": "string",
"description": "Short human-readable bug name"
},
"description": {
"type": "string",
"description": "Description of the bug"
},
"steps_to_reproduce": {
"type": "string",
"description": "Steps to reproduce the bug"
},
"actual_result": {
"type": "string",
"description": "What actually happens"
},
"expected_result": {
"type": "string",
"description": "What should happen"
},
"acceptance_criteria": {
"type": "array",
"items": { "type": "string" },
"description": "Optional list of acceptance criteria for the fix"
}
},
"required": ["name", "description", "steps_to_reproduce", "actual_result", "expected_result"]
}
},
{
"name": "list_bugs",
"description": "List all open bugs in work/1_backlog/ matching the _bug_ naming convention.",
"inputSchema": {
"type": "object",
"properties": {}
}
},
{
"name": "create_refactor",
"description": "Create a refactor work item in work/1_backlog/ with a deterministic filename and YAML front matter. Returns the refactor_id.",
"inputSchema": {
"type": "object",
"properties": {
"name": {
"type": "string",
"description": "Short human-readable refactor name"
},
"description": {
"type": "string",
"description": "Optional description of the desired state after refactoring"
},
"acceptance_criteria": {
"type": "array",
"items": { "type": "string" },
"description": "Optional list of acceptance criteria"
}
},
"required": ["name"]
}
},
{
"name": "list_refactors",
"description": "List all open refactors in work/1_backlog/ matching the _refactor_ naming convention.",
"inputSchema": {
"type": "object",
"properties": {}
}
},
{
"name": "close_bug",
"description": "Archive a bug from work/2_current/ or work/1_backlog/ to work/5_done/ and auto-commit to master.",
"inputSchema": {
"type": "object",
"properties": {
"bug_id": {
"type": "string",
"description": "Bug identifier (e.g. 'bug-3-login_crash')"
}
},
"required": ["bug_id"]
}
},
{
"name": "merge_agent_work",
"description": "Start the mergemaster pipeline for a completed story as a background job. Returns immediately — poll get_merge_status(story_id) until the merge completes or fails. The pipeline squash-merges the feature branch into master, runs quality gates, moves the story to done, and cleans up.",
"inputSchema": {
"type": "object",
"properties": {
"story_id": {
"type": "string",
"description": "Story identifier (e.g. '52_story_mergemaster_agent_role')"
},
"agent_name": {
"type": "string",
"description": "Optional: name of the coder agent whose work is being merged (for logging)"
}
},
"required": ["story_id"]
}
},
{
"name": "get_merge_status",
"description": "Check the status of a merge_agent_work background job. Returns running/completed/failed. When completed, includes the full merge report with conflict details, gate output, and whether the story was archived.",
"inputSchema": {
"type": "object",
"properties": {
"story_id": {
"type": "string",
"description": "Story identifier (same as passed to merge_agent_work)"
}
},
"required": ["story_id"]
}
},
{
"name": "move_story_to_merge",
"description": "Move a story or bug from work/2_current/ to work/4_merge/ to queue it for the mergemaster pipeline and automatically spawn the mergemaster agent to squash-merge, run quality gates, and archive.",
"inputSchema": {
"type": "object",
"properties": {
"story_id": {
"type": "string",
"description": "Story identifier (filename stem, e.g. '28_my_story')"
},
"agent_name": {
"type": "string",
"description": "Agent name to use for merging (defaults to 'mergemaster')"
}
},
"required": ["story_id"]
}
},
{
"name": "report_merge_failure",
"description": "Report that a merge failed for a story. Leaves the story in work/4_merge/ and logs the failure reason. Use this when merge_agent_work returns success=false instead of manually moving the story file.",
"inputSchema": {
"type": "object",
"properties": {
"story_id": {
"type": "string",
"description": "Story identifier (e.g. '52_story_mergemaster_agent_role')"
},
"reason": {
"type": "string",
"description": "Human-readable explanation of why the merge failed"
}
},
"required": ["story_id", "reason"]
}
},
{
"name": "request_qa",
"description": "Trigger QA review of a completed story worktree: moves the item from work/2_current/ to work/3_qa/ and starts the qa agent to run quality gates, tests, and generate a manual testing plan.",
"inputSchema": {
"type": "object",
"properties": {
"story_id": {
"type": "string",
"description": "Story identifier (e.g. '53_story_qa_agent_role')"
},
"agent_name": {
"type": "string",
"description": "Agent name to use for QA (defaults to 'qa')"
}
},
"required": ["story_id"]
}
},
{
"name": "approve_qa",
"description": "Approve a story that passed machine QA and is awaiting human review. Moves the story from work/3_qa/ to work/4_merge/ and starts the mergemaster agent.",
"inputSchema": {
"type": "object",
"properties": {
"story_id": {
"type": "string",
"description": "Story identifier (e.g. '247_story_human_qa_gate')"
}
},
"required": ["story_id"]
}
},
{
"name": "reject_qa",
"description": "Reject a story during human QA review. Moves the story from work/3_qa/ back to work/2_current/ with rejection notes so the coder agent can fix the issues.",
"inputSchema": {
"type": "object",
"properties": {
"story_id": {
"type": "string",
"description": "Story identifier (e.g. '247_story_human_qa_gate')"
},
"notes": {
"type": "string",
"description": "Explanation of what is broken or needs fixing"
}
},
"required": ["story_id", "notes"]
}
},
{
"name": "launch_qa_app",
"description": "Launch the app from a story's worktree for manual QA testing. Automatically assigns a free port, writes it to .story_kit_port, and starts the backend server. Only one QA app instance runs at a time.",
"inputSchema": {
"type": "object",
"properties": {
"story_id": {
"type": "string",
"description": "Story identifier whose worktree app to launch"
}
},
"required": ["story_id"]
}
},
{
"name": "get_pipeline_status",
"description": "Return a structured snapshot of the full work item pipeline. Includes all active stages (current, qa, merge, done) with each item's stage, name, and assigned agent. Also includes upcoming backlog items.",
"inputSchema": {
"type": "object",
"properties": {}
}
},
{
"name": "get_server_logs",
"description": "Return recent server log lines captured in the in-process ring buffer. Useful for diagnosing runtime behaviour such as WebSocket events, MCP call flow, and filesystem watcher activity.",
"inputSchema": {
"type": "object",
"properties": {
"lines": {
"type": "integer",
"description": "Number of recent lines to return (default 100, max 1000)"
},
"filter": {
"type": "string",
"description": "Optional substring filter (e.g. 'watcher', 'mcp', 'permission')"
},
"severity": {
"type": "string",
"description": "Filter by severity level: ERROR, WARN, or INFO. Returns only entries at that level."
}
}
}
},
{
"name": "rebuild_and_restart",
"description": "Rebuild the server binary from source and re-exec with the new binary. Gracefully stops all running agents before restart. If the build fails, the server stays up and returns the build error.",
"inputSchema": {
"type": "object",
"properties": {}
}
},
{
"name": "prompt_permission",
"description": "Present a permission request to the user via the web UI. Used by Claude Code's --permission-prompt-tool to delegate permission decisions to the frontend dialog. Returns on approval; returns an error on denial.",
"inputSchema": {
"type": "object",
"properties": {
"tool_name": {
"type": "string",
"description": "The tool requesting permission (e.g. 'Bash', 'Write')"
},
"input": {
"type": "object",
"description": "The tool's input arguments"
}
},
"required": ["tool_name", "input"]
}
},
{
"name": "get_token_usage",
"description": "Return per-agent token usage records from the persistent log. Shows input tokens, output tokens, cache tokens, and cost in USD for each agent session. Optionally filter by story_id.",
"inputSchema": {
"type": "object",
"properties": {
"story_id": {
"type": "string",
"description": "Optional: filter records to a specific story (e.g. '42_my_story')"
}
}
}
}
]
}),
)
}
// ── Tool dispatch ─────────────────────────────────────────────────
async fn handle_tools_call(
id: Option<Value>,
params: &Value,
ctx: &AppContext,
) -> JsonRpcResponse {
let tool_name = params
.get("name")
.and_then(|v| v.as_str())
.unwrap_or("");
let args = params.get("arguments").cloned().unwrap_or(json!({}));
let result = match tool_name {
// Workflow tools
"create_story" => tool_create_story(&args, ctx),
"validate_stories" => tool_validate_stories(ctx),
"list_upcoming" => tool_list_upcoming(ctx),
"get_story_todos" => tool_get_story_todos(&args, ctx),
"record_tests" => tool_record_tests(&args, ctx),
"ensure_acceptance" => tool_ensure_acceptance(&args, ctx),
// Agent tools (async)
"start_agent" => tool_start_agent(&args, ctx).await,
"stop_agent" => tool_stop_agent(&args, ctx).await,
"list_agents" => tool_list_agents(ctx),
"get_agent_config" => tool_get_agent_config(ctx),
"reload_agent_config" => tool_get_agent_config(ctx),
"get_agent_output" => tool_get_agent_output_poll(&args, ctx).await,
"wait_for_agent" => tool_wait_for_agent(&args, ctx).await,
// Worktree tools
"create_worktree" => tool_create_worktree(&args, ctx).await,
"list_worktrees" => tool_list_worktrees(ctx),
"remove_worktree" => tool_remove_worktree(&args, ctx).await,
// Editor tools
"get_editor_command" => tool_get_editor_command(&args, ctx),
// Lifecycle tools
"accept_story" => tool_accept_story(&args, ctx),
// Story mutation tools (auto-commit to master)
"check_criterion" => tool_check_criterion(&args, ctx),
"add_criterion" => tool_add_criterion(&args, ctx),
"update_story" => tool_update_story(&args, ctx),
// Spike lifecycle tools
"create_spike" => tool_create_spike(&args, ctx),
// Bug lifecycle tools
"create_bug" => tool_create_bug(&args, ctx),
"list_bugs" => tool_list_bugs(ctx),
"close_bug" => tool_close_bug(&args, ctx),
// Refactor lifecycle tools
"create_refactor" => tool_create_refactor(&args, ctx),
"list_refactors" => tool_list_refactors(ctx),
// Mergemaster tools
"merge_agent_work" => tool_merge_agent_work(&args, ctx),
"get_merge_status" => tool_get_merge_status(&args, ctx),
"move_story_to_merge" => tool_move_story_to_merge(&args, ctx).await,
"report_merge_failure" => tool_report_merge_failure(&args, ctx),
// QA tools
"request_qa" => tool_request_qa(&args, ctx).await,
"approve_qa" => tool_approve_qa(&args, ctx).await,
"reject_qa" => tool_reject_qa(&args, ctx).await,
"launch_qa_app" => tool_launch_qa_app(&args, ctx).await,
// Pipeline status
"get_pipeline_status" => tool_get_pipeline_status(ctx),
// Diagnostics
"get_server_logs" => tool_get_server_logs(&args),
// Server lifecycle
"rebuild_and_restart" => tool_rebuild_and_restart(ctx).await,
// Permission bridge (Claude Code → frontend dialog)
"prompt_permission" => tool_prompt_permission(&args, ctx).await,
// Token usage
"get_token_usage" => tool_get_token_usage(&args, ctx),
_ => Err(format!("Unknown tool: {tool_name}")),
};
match result {
Ok(content) => JsonRpcResponse::success(
id,
json!({
"content": [{ "type": "text", "text": content }]
}),
),
Err(msg) => {
slog_warn!("[mcp] Tool call failed: tool={tool_name} error={msg}");
JsonRpcResponse::success(
id,
json!({
"content": [{ "type": "text", "text": msg }],
"isError": true
}),
)
}
}
}
// ── Tool implementations ──────────────────────────────────────────
fn tool_create_story(args: &Value, ctx: &AppContext) -> Result<String, String> {
let name = args
.get("name")
.and_then(|v| v.as_str())
.ok_or("Missing required argument: name")?;
let user_story = args.get("user_story").and_then(|v| v.as_str());
let acceptance_criteria: Option<Vec<String>> = args
.get("acceptance_criteria")
.and_then(|v| serde_json::from_value(v.clone()).ok());
// Spike 61: write the file only — the filesystem watcher detects the new
// .md file in work/1_backlog/ and auto-commits with a deterministic message.
let commit = false;
let root = ctx.state.get_project_root()?;
let story_id = create_story_file(
&root,
name,
user_story,
acceptance_criteria.as_deref(),
commit,
)?;
Ok(format!("Created story: {story_id}"))
}
fn tool_validate_stories(ctx: &AppContext) -> Result<String, String> {
let root = ctx.state.get_project_root()?;
let results = validate_story_dirs(&root)?;
serde_json::to_string_pretty(&json!(results
.iter()
.map(|r| json!({
"story_id": r.story_id,
"valid": r.valid,
"error": r.error,
}))
.collect::<Vec<_>>()))
.map_err(|e| format!("Serialization error: {e}"))
}
fn tool_list_upcoming(ctx: &AppContext) -> Result<String, String> {
let stories = load_upcoming_stories(ctx)?;
serde_json::to_string_pretty(&json!(stories
.iter()
.map(|s| json!({
"story_id": s.story_id,
"name": s.name,
"error": s.error,
}))
.collect::<Vec<_>>()))
.map_err(|e| format!("Serialization error: {e}"))
}
fn tool_get_pipeline_status(ctx: &AppContext) -> Result<String, String> {
let state = load_pipeline_state(ctx)?;
fn map_items(items: &[crate::http::workflow::UpcomingStory], stage: &str) -> Vec<Value> {
items
.iter()
.map(|s| {
json!({
"story_id": s.story_id,
"name": s.name,
"stage": stage,
"agent": s.agent.as_ref().map(|a| json!({
"agent_name": a.agent_name,
"model": a.model,
"status": a.status,
})),
})
})
.collect()
}
let mut active: Vec<Value> = Vec::new();
active.extend(map_items(&state.current, "current"));
active.extend(map_items(&state.qa, "qa"));
active.extend(map_items(&state.merge, "merge"));
active.extend(map_items(&state.done, "done"));
let backlog: Vec<Value> = state
.backlog
.iter()
.map(|s| json!({ "story_id": s.story_id, "name": s.name }))
.collect();
serde_json::to_string_pretty(&json!({
"active": active,
"backlog": backlog,
"backlog_count": backlog.len(),
}))
.map_err(|e| format!("Serialization error: {e}"))
}
fn tool_get_story_todos(args: &Value, ctx: &AppContext) -> Result<String, String> {
let story_id = args
.get("story_id")
.and_then(|v| v.as_str())
.ok_or("Missing required argument: story_id")?;
let root = ctx.state.get_project_root()?;
let current_dir = root.join(".story_kit").join("work").join("2_current");
let filepath = current_dir.join(format!("{story_id}.md"));
if !filepath.exists() {
return Err(format!("Story file not found: {story_id}.md"));
}
let contents = fs::read_to_string(&filepath)
.map_err(|e| format!("Failed to read story file: {e}"))?;
let story_name = parse_front_matter(&contents)
.ok()
.and_then(|m| m.name);
let todos = parse_unchecked_todos(&contents);
serde_json::to_string_pretty(&json!({
"story_id": story_id,
"story_name": story_name,
"todos": todos,
}))
.map_err(|e| format!("Serialization error: {e}"))
}
fn tool_record_tests(args: &Value, ctx: &AppContext) -> Result<String, String> {
let story_id = args
.get("story_id")
.and_then(|v| v.as_str())
.ok_or("Missing required argument: story_id")?;
let unit = parse_test_cases(args.get("unit"))?;
let integration = parse_test_cases(args.get("integration"))?;
let mut workflow = ctx
.workflow
.lock()
.map_err(|e| format!("Lock error: {e}"))?;
workflow.record_test_results_validated(story_id.to_string(), unit, integration)?;
// Persist to story file (best-effort — file write errors are warnings, not failures).
if let Ok(project_root) = ctx.state.get_project_root()
&& let Some(results) = workflow.results.get(story_id)
&& let Err(e) =
crate::http::workflow::write_test_results_to_story_file(&project_root, story_id, results)
{
slog_warn!("[record_tests] Could not persist results to story file: {e}");
}
Ok("Test results recorded.".to_string())
}
fn tool_ensure_acceptance(args: &Value, ctx: &AppContext) -> Result<String, String> {
let story_id = args
.get("story_id")
.and_then(|v| v.as_str())
.ok_or("Missing required argument: story_id")?;
let workflow = ctx
.workflow
.lock()
.map_err(|e| format!("Lock error: {e}"))?;
// Use in-memory results if present; otherwise fall back to file-persisted results.
let file_results;
let results = if let Some(r) = workflow.results.get(story_id) {
r
} else {
let project_root = ctx.state.get_project_root().ok();
file_results = project_root.as_deref().and_then(|root| {
crate::http::workflow::read_test_results_from_story_file(root, story_id)
});
file_results.as_ref().map_or_else(
|| {
// No results anywhere — use empty default for the acceptance check
// (it will fail with "No test results recorded")
static EMPTY: std::sync::OnceLock<crate::workflow::StoryTestResults> =
std::sync::OnceLock::new();
EMPTY.get_or_init(Default::default)
},
|r| r,
)
};
let coverage = workflow.coverage.get(story_id);
let decision = evaluate_acceptance_with_coverage(results, coverage);
if decision.can_accept {
Ok("Story can be accepted. All gates pass.".to_string())
} else {
let mut parts = decision.reasons;
if let Some(w) = decision.warning {
parts.push(w);
}
Err(format!("Acceptance blocked: {}", parts.join("; ")))
}
}
// ── Agent tool implementations ────────────────────────────────────
async fn tool_start_agent(args: &Value, ctx: &AppContext) -> Result<String, String> {
let story_id = args
.get("story_id")
.and_then(|v| v.as_str())
.ok_or("Missing required argument: story_id")?;
let agent_name = args.get("agent_name").and_then(|v| v.as_str());
let project_root = ctx.agents.get_project_root(&ctx.state)?;
let info = ctx
.agents
.start_agent(&project_root, story_id, agent_name, None)
.await?;
// Snapshot coverage baseline from the most recent coverage report (best-effort).
if let Some(pct) = read_coverage_percent_from_json(&project_root)
&& let Err(e) = crate::http::workflow::write_coverage_baseline_to_story_file(
&project_root,
story_id,
pct,
)
{
slog_warn!("[start_agent] Could not write coverage baseline to story file: {e}");
}
serde_json::to_string_pretty(&json!({
"story_id": info.story_id,
"agent_name": info.agent_name,
"status": info.status.to_string(),
"session_id": info.session_id,
"worktree_path": info.worktree_path,
}))
.map_err(|e| format!("Serialization error: {e}"))
}
/// Try to read the overall line coverage percentage from the llvm-cov JSON report.
///
/// Expects the file at `{project_root}/.story_kit/coverage/server.json`.
/// Returns `None` if the file is absent, unreadable, or cannot be parsed.
fn read_coverage_percent_from_json(project_root: &std::path::Path) -> Option<f64> {
let path = project_root
.join(".story_kit")
.join("coverage")
.join("server.json");
let contents = std::fs::read_to_string(&path).ok()?;
let json: Value = serde_json::from_str(&contents).ok()?;
// cargo llvm-cov --json format: data[0].totals.lines.percent
json.pointer("/data/0/totals/lines/percent")
.and_then(|v| v.as_f64())
}
async fn tool_stop_agent(args: &Value, ctx: &AppContext) -> Result<String, String> {
let story_id = args
.get("story_id")
.and_then(|v| v.as_str())
.ok_or("Missing required argument: story_id")?;
let agent_name = args
.get("agent_name")
.and_then(|v| v.as_str())
.ok_or("Missing required argument: agent_name")?;
let project_root = ctx.agents.get_project_root(&ctx.state)?;
ctx.agents
.stop_agent(&project_root, story_id, agent_name)
.await?;
Ok(format!("Agent '{agent_name}' for story '{story_id}' stopped."))
}
fn tool_list_agents(ctx: &AppContext) -> Result<String, String> {
let project_root = ctx.agents.get_project_root(&ctx.state).ok();
let agents = ctx.agents.list_agents()?;
serde_json::to_string_pretty(&json!(agents
.iter()
.filter(|a| {
project_root
.as_deref()
.map(|root| !crate::http::agents::story_is_archived(root, &a.story_id))
.unwrap_or(true)
})
.map(|a| json!({
"story_id": a.story_id,
"agent_name": a.agent_name,
"status": a.status.to_string(),
"session_id": a.session_id,
"worktree_path": a.worktree_path,
}))
.collect::<Vec<_>>()))
.map_err(|e| format!("Serialization error: {e}"))
}
async fn tool_get_agent_output_poll(args: &Value, ctx: &AppContext) -> Result<String, String> {
let story_id = args
.get("story_id")
.and_then(|v| v.as_str())
.ok_or("Missing required argument: story_id")?;
let agent_name = args
.get("agent_name")
.and_then(|v| v.as_str())
.ok_or("Missing required argument: agent_name")?;
// Try draining in-memory events first.
match ctx.agents.drain_events(story_id, agent_name) {
Ok(drained) => {
let done = drained.iter().any(|e| {
matches!(
e,
crate::agents::AgentEvent::Done { .. }
| crate::agents::AgentEvent::Error { .. }
)
});
let events: Vec<serde_json::Value> = drained
.into_iter()
.filter_map(|e| serde_json::to_value(&e).ok())
.collect();
serde_json::to_string_pretty(&json!({
"events": events,
"done": done,
"event_count": events.len(),
"message": if done { "Agent stream ended." } else if events.is_empty() { "No new events. Call again to continue." } else { "Events returned. Call again to continue." }
}))
.map_err(|e| format!("Serialization error: {e}"))
}
Err(_) => {
// Agent not in memory — fall back to persistent log file.
get_agent_output_from_log(story_id, agent_name, ctx)
}
}
}
/// Fall back to reading agent output from the persistent log file on disk.
///
/// Tries to find the log file via the agent's stored log_session_id first,
/// then falls back to `find_latest_log` scanning the log directory.
fn get_agent_output_from_log(
story_id: &str,
agent_name: &str,
ctx: &AppContext,
) -> Result<String, String> {
use crate::agent_log;
let project_root = ctx.agents.get_project_root(&ctx.state)?;
// Try to find the log file: first from in-memory agent info, then by scanning.
let log_path = ctx
.agents
.get_log_info(story_id, agent_name)
.map(|(session_id, root)| agent_log::log_file_path(&root, story_id, agent_name, &session_id))
.filter(|p| p.exists())
.or_else(|| agent_log::find_latest_log(&project_root, story_id, agent_name));
let log_path = match log_path {
Some(p) => p,
None => {
return serde_json::to_string_pretty(&json!({
"events": [],
"done": true,
"event_count": 0,
"message": format!("No agent '{agent_name}' for story '{story_id}' and no log file found."),
"source": "none",
}))
.map_err(|e| format!("Serialization error: {e}"));
}
};
match agent_log::read_log(&log_path) {
Ok(entries) => {
let events: Vec<serde_json::Value> = entries
.into_iter()
.map(|e| {
let mut val = e.event;
if let serde_json::Value::Object(ref mut map) = val {
map.insert(
"timestamp".to_string(),
serde_json::Value::String(e.timestamp),
);
}
val
})
.collect();
let count = events.len();
serde_json::to_string_pretty(&json!({
"events": events,
"done": true,
"event_count": count,
"message": "Events loaded from persistent log file.",
"source": "log_file",
"log_file": log_path.to_string_lossy(),
}))
.map_err(|e| format!("Serialization error: {e}"))
}
Err(e) => Err(format!("Failed to read log file: {e}")),
}
}
fn tool_get_agent_config(ctx: &AppContext) -> Result<String, String> {
let project_root = ctx.agents.get_project_root(&ctx.state)?;
let config = ProjectConfig::load(&project_root)?;
// Collect available (idle) agent names across all stages so the caller can
// see at a glance which agents are free to start (story 190).
let mut available_names: std::collections::HashSet<String> =
std::collections::HashSet::new();
for stage in &[
PipelineStage::Coder,
PipelineStage::Qa,
PipelineStage::Mergemaster,
PipelineStage::Other,
] {
if let Ok(names) = ctx.agents.available_agents_for_stage(&config, stage) {
available_names.extend(names);
}
}
serde_json::to_string_pretty(&json!(config
.agent
.iter()
.map(|a| json!({
"name": a.name,
"role": a.role,
"model": a.model,
"allowed_tools": a.allowed_tools,
"max_turns": a.max_turns,
"max_budget_usd": a.max_budget_usd,
"available": available_names.contains(&a.name),
}))
.collect::<Vec<_>>()))
.map_err(|e| format!("Serialization error: {e}"))
}
async fn tool_wait_for_agent(args: &Value, ctx: &AppContext) -> Result<String, String> {
let story_id = args
.get("story_id")
.and_then(|v| v.as_str())
.ok_or("Missing required argument: story_id")?;
let agent_name = args
.get("agent_name")
.and_then(|v| v.as_str())
.ok_or("Missing required argument: agent_name")?;
let timeout_ms = args
.get("timeout_ms")
.and_then(|v| v.as_u64())
.unwrap_or(300_000); // default: 5 minutes
let info = ctx
.agents
.wait_for_agent(story_id, agent_name, timeout_ms)
.await?;
let commits = match (&info.worktree_path, &info.base_branch) {
(Some(wt_path), Some(base)) => get_worktree_commits(wt_path, base).await,
_ => None,
};
let completion = info.completion.as_ref().map(|r| json!({
"summary": r.summary,
"gates_passed": r.gates_passed,
"gate_output": r.gate_output,
}));
serde_json::to_string_pretty(&json!({
"story_id": info.story_id,
"agent_name": info.agent_name,
"status": info.status.to_string(),
"session_id": info.session_id,
"worktree_path": info.worktree_path,
"base_branch": info.base_branch,
"commits": commits,
"completion": completion,
}))
.map_err(|e| format!("Serialization error: {e}"))
}
// ── Worktree tool implementations ────────────────────────────────
async fn tool_create_worktree(args: &Value, ctx: &AppContext) -> Result<String, String> {
let story_id = args
.get("story_id")
.and_then(|v| v.as_str())
.ok_or("Missing required argument: story_id")?;
let project_root = ctx.agents.get_project_root(&ctx.state)?;
let info = ctx.agents.create_worktree(&project_root, story_id).await?;
serde_json::to_string_pretty(&json!({
"story_id": story_id,
"worktree_path": info.path.to_string_lossy(),
"branch": info.branch,
"base_branch": info.base_branch,
}))
.map_err(|e| format!("Serialization error: {e}"))
}
fn tool_list_worktrees(ctx: &AppContext) -> Result<String, String> {
let project_root = ctx.agents.get_project_root(&ctx.state)?;
let entries = worktree::list_worktrees(&project_root)?;
serde_json::to_string_pretty(&json!(entries
.iter()
.map(|e| json!({
"story_id": e.story_id,
"path": e.path.to_string_lossy(),
}))
.collect::<Vec<_>>()))
.map_err(|e| format!("Serialization error: {e}"))
}
async fn tool_remove_worktree(args: &Value, ctx: &AppContext) -> Result<String, String> {
let story_id = args
.get("story_id")
.and_then(|v| v.as_str())
.ok_or("Missing required argument: story_id")?;
let project_root = ctx.agents.get_project_root(&ctx.state)?;
let config = ProjectConfig::load(&project_root)?;
worktree::remove_worktree_by_story_id(&project_root, story_id, &config).await?;
Ok(format!("Worktree for story '{story_id}' removed."))
}
// ── Editor tool implementations ───────────────────────────────────
fn tool_get_editor_command(args: &Value, ctx: &AppContext) -> Result<String, String> {
let worktree_path = args
.get("worktree_path")
.and_then(|v| v.as_str())
.ok_or("Missing required argument: worktree_path")?;
let editor = get_editor_command_from_store(ctx)
.ok_or_else(|| "No editor configured. Set one via PUT /api/settings/editor.".to_string())?;
Ok(format!("{editor} {worktree_path}"))
}
fn tool_accept_story(args: &Value, ctx: &AppContext) -> Result<String, String> {
let story_id = args
.get("story_id")
.and_then(|v| v.as_str())
.ok_or("Missing required argument: story_id")?;
let project_root = ctx.agents.get_project_root(&ctx.state)?;
// Bug 226: Refuse to accept if the feature branch has unmerged code.
// The code must be squash-merged via merge_agent_work first.
if feature_branch_has_unmerged_changes(&project_root, story_id) {
return Err(format!(
"Cannot accept story '{story_id}': feature branch 'feature/story-{story_id}' \
has unmerged changes. Use merge_agent_work to squash-merge the code into \
master first."
));
}
move_story_to_archived(&project_root, story_id)?;
ctx.agents.remove_agents_for_story(story_id);
Ok(format!(
"Story '{story_id}' accepted, moved to done/, and committed to master."
))
}
fn tool_check_criterion(args: &Value, ctx: &AppContext) -> Result<String, String> {
let story_id = args
.get("story_id")
.and_then(|v| v.as_str())
.ok_or("Missing required argument: story_id")?;
let criterion_index = args
.get("criterion_index")
.and_then(|v| v.as_u64())
.ok_or("Missing required argument: criterion_index")? as usize;
let root = ctx.state.get_project_root()?;
check_criterion_in_file(&root, story_id, criterion_index)?;
Ok(format!(
"Criterion {criterion_index} checked for story '{story_id}'. Committed to master."
))
}
fn tool_add_criterion(args: &Value, ctx: &AppContext) -> Result<String, String> {
let story_id = args
.get("story_id")
.and_then(|v| v.as_str())
.ok_or("Missing required argument: story_id")?;
let criterion = args
.get("criterion")
.and_then(|v| v.as_str())
.ok_or("Missing required argument: criterion")?;
let root = ctx.state.get_project_root()?;
add_criterion_to_file(&root, story_id, criterion)?;
Ok(format!(
"Added criterion to story '{story_id}': - [ ] {criterion}"
))
}
fn tool_update_story(args: &Value, ctx: &AppContext) -> Result<String, String> {
let story_id = args
.get("story_id")
.and_then(|v| v.as_str())
.ok_or("Missing required argument: story_id")?;
let user_story = args.get("user_story").and_then(|v| v.as_str());
let description = args.get("description").and_then(|v| v.as_str());
// Collect front matter fields: explicit `agent` param + arbitrary `front_matter` object.
let mut front_matter: HashMap<String, String> = HashMap::new();
if let Some(agent) = args.get("agent").and_then(|v| v.as_str()) {
front_matter.insert("agent".to_string(), agent.to_string());
}
if let Some(obj) = args.get("front_matter").and_then(|v| v.as_object()) {
for (k, v) in obj {
let val = match v {
Value::String(s) => s.clone(),
other => other.to_string(),
};
front_matter.insert(k.clone(), val);
}
}
let front_matter_opt = if front_matter.is_empty() { None } else { Some(&front_matter) };
let root = ctx.state.get_project_root()?;
update_story_in_file(&root, story_id, user_story, description, front_matter_opt)?;
Ok(format!("Updated story '{story_id}'."))
}
// ── Spike lifecycle tool implementations ─────────────────────────
fn tool_create_spike(args: &Value, ctx: &AppContext) -> Result<String, String> {
let name = args
.get("name")
.and_then(|v| v.as_str())
.ok_or("Missing required argument: name")?;
let description = args.get("description").and_then(|v| v.as_str());
let root = ctx.state.get_project_root()?;
let spike_id = create_spike_file(&root, name, description)?;
Ok(format!("Created spike: {spike_id}"))
}
// ── Bug lifecycle tool implementations ───────────────────────────
fn tool_create_bug(args: &Value, ctx: &AppContext) -> Result<String, String> {
let name = args
.get("name")
.and_then(|v| v.as_str())
.ok_or("Missing required argument: name")?;
let description = args
.get("description")
.and_then(|v| v.as_str())
.ok_or("Missing required argument: description")?;
let steps_to_reproduce = args
.get("steps_to_reproduce")
.and_then(|v| v.as_str())
.ok_or("Missing required argument: steps_to_reproduce")?;
let actual_result = args
.get("actual_result")
.and_then(|v| v.as_str())
.ok_or("Missing required argument: actual_result")?;
let expected_result = args
.get("expected_result")
.and_then(|v| v.as_str())
.ok_or("Missing required argument: expected_result")?;
let acceptance_criteria: Option<Vec<String>> = args
.get("acceptance_criteria")
.and_then(|v| serde_json::from_value(v.clone()).ok());
let root = ctx.state.get_project_root()?;
let bug_id = create_bug_file(
&root,
name,
description,
steps_to_reproduce,
actual_result,
expected_result,
acceptance_criteria.as_deref(),
)?;
Ok(format!("Created bug: {bug_id}"))
}
fn tool_list_bugs(ctx: &AppContext) -> Result<String, String> {
let root = ctx.state.get_project_root()?;
let bugs = list_bug_files(&root)?;
serde_json::to_string_pretty(&json!(bugs
.iter()
.map(|(id, name)| json!({ "bug_id": id, "name": name }))
.collect::<Vec<_>>()))
.map_err(|e| format!("Serialization error: {e}"))
}
fn tool_close_bug(args: &Value, ctx: &AppContext) -> Result<String, String> {
let bug_id = args
.get("bug_id")
.and_then(|v| v.as_str())
.ok_or("Missing required argument: bug_id")?;
let root = ctx.agents.get_project_root(&ctx.state)?;
close_bug_to_archive(&root, bug_id)?;
ctx.agents.remove_agents_for_story(bug_id);
Ok(format!(
"Bug '{bug_id}' closed, moved to bugs/archive/, and committed to master."
))
}
// ── Refactor lifecycle tool implementations ───────────────────────
fn tool_create_refactor(args: &Value, ctx: &AppContext) -> Result<String, String> {
let name = args
.get("name")
.and_then(|v| v.as_str())
.ok_or("Missing required argument: name")?;
let description = args.get("description").and_then(|v| v.as_str());
let acceptance_criteria: Option<Vec<String>> = args
.get("acceptance_criteria")
.and_then(|v| serde_json::from_value(v.clone()).ok());
let root = ctx.state.get_project_root()?;
let refactor_id = create_refactor_file(
&root,
name,
description,
acceptance_criteria.as_deref(),
)?;
Ok(format!("Created refactor: {refactor_id}"))
}
fn tool_list_refactors(ctx: &AppContext) -> Result<String, String> {
let root = ctx.state.get_project_root()?;
let refactors = list_refactor_files(&root)?;
serde_json::to_string_pretty(&json!(refactors
.iter()
.map(|(id, name)| json!({ "refactor_id": id, "name": name }))
.collect::<Vec<_>>()))
.map_err(|e| format!("Serialization error: {e}"))
}
// ── Mergemaster tool implementations ─────────────────────────────
fn tool_merge_agent_work(args: &Value, ctx: &AppContext) -> Result<String, String> {
let story_id = args
.get("story_id")
.and_then(|v| v.as_str())
.ok_or("Missing required argument: story_id")?;
let project_root = ctx.agents.get_project_root(&ctx.state)?;
ctx.agents.start_merge_agent_work(&project_root, story_id)?;
serde_json::to_string_pretty(&json!({
"story_id": story_id,
"status": "started",
"message": "Merge pipeline started. Poll get_merge_status(story_id) every 10-15 seconds until status is 'completed' or 'failed'."
}))
.map_err(|e| format!("Serialization error: {e}"))
}
fn tool_get_merge_status(args: &Value, ctx: &AppContext) -> Result<String, String> {
let story_id = args
.get("story_id")
.and_then(|v| v.as_str())
.ok_or("Missing required argument: story_id")?;
let job = ctx.agents.get_merge_status(story_id)
.ok_or_else(|| format!("No merge job found for story '{story_id}'. Call merge_agent_work first."))?;
match &job.status {
crate::agents::merge::MergeJobStatus::Running => {
serde_json::to_string_pretty(&json!({
"story_id": story_id,
"status": "running",
"message": "Merge pipeline is still running. Poll again in 10-15 seconds."
}))
.map_err(|e| format!("Serialization error: {e}"))
}
crate::agents::merge::MergeJobStatus::Completed(report) => {
let status_msg = if report.success && report.gates_passed && report.conflicts_resolved {
"Merge complete: conflicts were auto-resolved and all quality gates passed. Story moved to done and worktree cleaned up."
} else if report.success && report.gates_passed {
"Merge complete: all quality gates passed. Story moved to done and worktree cleaned up."
} else if report.had_conflicts && !report.conflicts_resolved {
"Merge failed: conflicts detected that could not be auto-resolved. Merge was aborted — master is untouched. Call report_merge_failure with the conflict details so the human can resolve them. Do NOT manually move the story file or call accept_story."
} else if report.success && !report.gates_passed {
"Merge committed but quality gates failed. Review gate_output and fix issues before re-running."
} else {
"Merge failed. Review gate_output for details. Call report_merge_failure to record the failure. Do NOT manually move the story file or call accept_story."
};
serde_json::to_string_pretty(&json!({
"story_id": story_id,
"status": "completed",
"success": report.success,
"had_conflicts": report.had_conflicts,
"conflicts_resolved": report.conflicts_resolved,
"conflict_details": report.conflict_details,
"gates_passed": report.gates_passed,
"gate_output": report.gate_output,
"worktree_cleaned_up": report.worktree_cleaned_up,
"story_archived": report.story_archived,
"message": status_msg,
}))
.map_err(|e| format!("Serialization error: {e}"))
}
crate::agents::merge::MergeJobStatus::Failed(err) => {
serde_json::to_string_pretty(&json!({
"story_id": story_id,
"status": "failed",
"error": err,
"message": format!("Merge pipeline failed: {err}. Call report_merge_failure to record the failure.")
}))
.map_err(|e| format!("Serialization error: {e}"))
}
}
}
async fn tool_move_story_to_merge(args: &Value, ctx: &AppContext) -> Result<String, String> {
let story_id = args
.get("story_id")
.and_then(|v| v.as_str())
.ok_or("Missing required argument: story_id")?;
let agent_name = args
.get("agent_name")
.and_then(|v| v.as_str())
.unwrap_or("mergemaster");
let project_root = ctx.agents.get_project_root(&ctx.state)?;
// Move story from work/2_current/ to work/4_merge/
move_story_to_merge(&project_root, story_id)?;
// Start the mergemaster agent on the story worktree
let info = ctx
.agents
.start_agent(&project_root, story_id, Some(agent_name), None)
.await?;
serde_json::to_string_pretty(&json!({
"story_id": info.story_id,
"agent_name": info.agent_name,
"status": info.status.to_string(),
"worktree_path": info.worktree_path,
"message": format!(
"Story '{story_id}' moved to work/4_merge/ and mergemaster agent '{}' started.",
info.agent_name
),
}))
.map_err(|e| format!("Serialization error: {e}"))
}
fn tool_report_merge_failure(args: &Value, ctx: &AppContext) -> Result<String, String> {
let story_id = args
.get("story_id")
.and_then(|v| v.as_str())
.ok_or("Missing required argument: story_id")?;
let reason = args
.get("reason")
.and_then(|v| v.as_str())
.ok_or("Missing required argument: reason")?;
slog!("[mergemaster] Merge failure reported for '{story_id}': {reason}");
ctx.agents.set_merge_failure_reported(story_id);
// Broadcast the failure so the Matrix notification listener can post an
// error message to configured rooms without coupling this tool to the bot.
let _ = ctx.watcher_tx.send(crate::io::watcher::WatcherEvent::MergeFailure {
story_id: story_id.to_string(),
reason: reason.to_string(),
});
// Persist the failure reason to the story file's front matter so it
// survives server restarts and is visible in the web UI.
if let Ok(project_root) = ctx.state.get_project_root() {
let story_file = project_root
.join(".story_kit")
.join("work")
.join("4_merge")
.join(format!("{story_id}.md"));
if story_file.exists() {
if let Err(e) = write_merge_failure(&story_file, reason) {
slog_warn!(
"[mergemaster] Failed to persist merge_failure to story file for '{story_id}': {e}"
);
}
} else {
slog_warn!(
"[mergemaster] Story file not found in 4_merge/ for '{story_id}'; \
merge_failure not persisted to front matter"
);
}
}
Ok(format!(
"Merge failure for '{story_id}' recorded. Story remains in work/4_merge/. Reason: {reason}"
))
}
// ── QA tool implementations ───────────────────────────────────────
async fn tool_request_qa(args: &Value, ctx: &AppContext) -> Result<String, String> {
let story_id = args
.get("story_id")
.and_then(|v| v.as_str())
.ok_or("Missing required argument: story_id")?;
let agent_name = args
.get("agent_name")
.and_then(|v| v.as_str())
.unwrap_or("qa");
let project_root = ctx.agents.get_project_root(&ctx.state)?;
// Move story from work/2_current/ to work/3_qa/
move_story_to_qa(&project_root, story_id)?;
// Start the QA agent on the story worktree
let info = ctx
.agents
.start_agent(&project_root, story_id, Some(agent_name), None)
.await?;
serde_json::to_string_pretty(&json!({
"story_id": info.story_id,
"agent_name": info.agent_name,
"status": info.status.to_string(),
"worktree_path": info.worktree_path,
"message": format!(
"Story '{story_id}' moved to work/3_qa/ and QA agent '{}' started.",
info.agent_name
),
}))
.map_err(|e| format!("Serialization error: {e}"))
}
async fn tool_approve_qa(args: &Value, ctx: &AppContext) -> Result<String, String> {
let story_id = args
.get("story_id")
.and_then(|v| v.as_str())
.ok_or("Missing required argument: story_id")?;
let project_root = ctx.agents.get_project_root(&ctx.state)?;
// Clear review_hold before moving
let qa_path = project_root
.join(".story_kit/work/3_qa")
.join(format!("{story_id}.md"));
if qa_path.exists() {
let _ = crate::io::story_metadata::clear_front_matter_field(&qa_path, "review_hold");
}
// Move story from work/3_qa/ to work/4_merge/
move_story_to_merge(&project_root, story_id)?;
// Start the mergemaster agent
let info = ctx
.agents
.start_agent(&project_root, story_id, Some("mergemaster"), None)
.await?;
serde_json::to_string_pretty(&json!({
"story_id": info.story_id,
"agent_name": info.agent_name,
"status": info.status.to_string(),
"message": format!(
"Story '{story_id}' approved. Moved to work/4_merge/ and mergemaster agent '{}' started.",
info.agent_name
),
}))
.map_err(|e| format!("Serialization error: {e}"))
}
async fn tool_reject_qa(args: &Value, ctx: &AppContext) -> Result<String, String> {
let story_id = args
.get("story_id")
.and_then(|v| v.as_str())
.ok_or("Missing required argument: story_id")?;
let notes = args
.get("notes")
.and_then(|v| v.as_str())
.ok_or("Missing required argument: notes")?;
let project_root = ctx.agents.get_project_root(&ctx.state)?;
// Move story from work/3_qa/ back to work/2_current/ with rejection notes
reject_story_from_qa(&project_root, story_id, notes)?;
// Restart the coder agent with rejection context
let story_path = project_root
.join(".story_kit/work/2_current")
.join(format!("{story_id}.md"));
let agent_name = if story_path.exists() {
let contents = std::fs::read_to_string(&story_path).unwrap_or_default();
crate::io::story_metadata::parse_front_matter(&contents)
.ok()
.and_then(|meta| meta.agent)
} else {
None
};
let agent_name = agent_name.as_deref().unwrap_or("coder-opus");
let context = format!(
"\n\n---\n## QA Rejection\n\
Your previous implementation was rejected during human QA review.\n\
Rejection notes:\n{notes}\n\n\
Please fix the issues described above and try again."
);
if let Err(e) = ctx
.agents
.start_agent(&project_root, story_id, Some(agent_name), Some(&context))
.await
{
slog_warn!("[qa] Failed to restart coder for '{story_id}' after rejection: {e}");
}
Ok(format!(
"Story '{story_id}' rejected and moved back to work/2_current/. Coder agent '{agent_name}' restarted with rejection notes."
))
}
async fn tool_launch_qa_app(args: &Value, ctx: &AppContext) -> Result<String, String> {
let story_id = args
.get("story_id")
.and_then(|v| v.as_str())
.ok_or("Missing required argument: story_id")?;
let project_root = ctx.agents.get_project_root(&ctx.state)?;
// Find the worktree path for this story
let worktrees = crate::worktree::list_worktrees(&project_root)?;
let wt = worktrees
.iter()
.find(|w| w.story_id == story_id)
.ok_or_else(|| format!("No worktree found for story '{story_id}'"))?;
let wt_path = wt.path.clone();
// Stop any existing QA app instance
{
let mut guard = ctx.qa_app_process.lock().unwrap();
if let Some(mut child) = guard.take() {
let _ = child.kill();
let _ = child.wait();
slog!("[qa-app] Stopped previous QA app instance.");
}
}
// Find a free port starting from 3100
let port = find_free_port(3100);
// Write .story_kit_port so the frontend dev server knows where to connect
let port_file = wt_path.join(".story_kit_port");
std::fs::write(&port_file, port.to_string())
.map_err(|e| format!("Failed to write .story_kit_port: {e}"))?;
// Launch the server from the worktree
let child = std::process::Command::new("cargo")
.args(["run"])
.env("STORYKIT_PORT", port.to_string())
.current_dir(&wt_path)
.stdout(std::process::Stdio::null())
.stderr(std::process::Stdio::null())
.spawn()
.map_err(|e| format!("Failed to launch QA app: {e}"))?;
{
let mut guard = ctx.qa_app_process.lock().unwrap();
*guard = Some(child);
}
serde_json::to_string_pretty(&json!({
"story_id": story_id,
"port": port,
"worktree_path": wt_path.to_string_lossy(),
"message": format!("QA app launched on port {port} from worktree at {}", wt_path.display()),
}))
.map_err(|e| format!("Serialization error: {e}"))
}
/// Find a free TCP port starting from `start`.
fn find_free_port(start: u16) -> u16 {
for port in start..start + 100 {
if std::net::TcpListener::bind(("127.0.0.1", port)).is_ok() {
return port;
}
}
start // fallback
}
/// Run `git log <base>..HEAD --oneline` in the worktree and return the commit
/// summaries, or `None` if git is unavailable or there are no new commits.
async fn get_worktree_commits(worktree_path: &str, base_branch: &str) -> Option<Vec<String>> {
let wt = worktree_path.to_string();
let base = base_branch.to_string();
tokio::task::spawn_blocking(move || {
let output = std::process::Command::new("git")
.args(["log", &format!("{base}..HEAD"), "--oneline"])
.current_dir(&wt)
.output()
.ok()?;
if output.status.success() {
let lines: Vec<String> = String::from_utf8(output.stdout)
.ok()?
.lines()
.filter(|l| !l.is_empty())
.map(|l| l.to_string())
.collect();
Some(lines)
} else {
None
}
})
.await
.ok()
.flatten()
}
// ── Helpers ───────────────────────────────────────────────────────
fn parse_test_cases(value: Option<&Value>) -> Result<Vec<TestCaseResult>, String> {
let arr = match value {
Some(Value::Array(a)) => a,
Some(Value::Null) | None => return Ok(Vec::new()),
_ => return Err("Expected array for test cases".to_string()),
};
arr.iter()
.map(|item| {
let name = item
.get("name")
.and_then(|v| v.as_str())
.ok_or("Test case missing 'name'")?
.to_string();
let status_str = item
.get("status")
.and_then(|v| v.as_str())
.ok_or("Test case missing 'status'")?;
let status = match status_str {
"pass" => TestStatus::Pass,
"fail" => TestStatus::Fail,
other => return Err(format!("Invalid test status '{other}'. Use 'pass' or 'fail'.")),
};
let details = item.get("details").and_then(|v| v.as_str()).map(String::from);
Ok(TestCaseResult {
name,
status,
details,
})
})
.collect()
}
fn tool_get_server_logs(args: &Value) -> Result<String, String> {
let lines_count = args
.get("lines")
.and_then(|v| v.as_u64())
.map(|n| n.min(1000) as usize)
.unwrap_or(100);
let filter = args.get("filter").and_then(|v| v.as_str());
let severity = args
.get("severity")
.and_then(|v| v.as_str())
.and_then(log_buffer::LogLevel::from_str_ci);
let recent = log_buffer::global().get_recent(lines_count, filter, severity.as_ref());
let joined = recent.join("\n");
// Clamp to lines_count actual lines in case any entry contains embedded newlines.
let all_lines: Vec<&str> = joined.lines().collect();
let start = all_lines.len().saturating_sub(lines_count);
Ok(all_lines[start..].join("\n"))
}
/// Generate a Claude Code permission rule string for the given tool name and input.
///
/// - `Edit` / `Write` / `Read` / `Grep` / `Glob` etc. → just the tool name
/// - `Bash` → `Bash(first_word *)` derived from the `command` field in `tool_input`
/// - `mcp__*` → the full tool name (e.g. `mcp__story-kit__create_story`)
fn generate_permission_rule(tool_name: &str, tool_input: &Value) -> String {
if tool_name == "Bash" {
// Extract command from tool_input.command and use first word as prefix
let command_str = tool_input
.get("command")
.and_then(|v| v.as_str())
.unwrap_or("");
let first_word = command_str.split_whitespace().next().unwrap_or("unknown");
format!("Bash({first_word} *)")
} else {
// For Edit, Write, Read, Glob, Grep, MCP tools, etc. — use the tool name directly
tool_name.to_string()
}
}
/// Add a permission rule to `.claude/settings.json` in the project root.
/// Does nothing if the rule already exists. Creates the file if missing.
fn add_permission_rule(project_root: &std::path::Path, rule: &str) -> Result<(), String> {
let claude_dir = project_root.join(".claude");
fs::create_dir_all(&claude_dir)
.map_err(|e| format!("Failed to create .claude/ directory: {e}"))?;
let settings_path = claude_dir.join("settings.json");
let mut settings: Value = if settings_path.exists() {
let content = fs::read_to_string(&settings_path)
.map_err(|e| format!("Failed to read settings.json: {e}"))?;
serde_json::from_str(&content)
.map_err(|e| format!("Failed to parse settings.json: {e}"))?
} else {
json!({ "permissions": { "allow": [] } })
};
let allow_arr = settings
.pointer_mut("/permissions/allow")
.and_then(|v| v.as_array_mut());
let allow = match allow_arr {
Some(arr) => arr,
None => {
// Ensure the structure exists
settings
.as_object_mut()
.unwrap()
.entry("permissions")
.or_insert(json!({ "allow": [] }));
settings
.pointer_mut("/permissions/allow")
.unwrap()
.as_array_mut()
.unwrap()
}
};
// Check for duplicates — exact string match
let rule_value = Value::String(rule.to_string());
if allow.contains(&rule_value) {
return Ok(());
}
// Also check for wildcard coverage: if "mcp__story-kit__*" exists, don't add
// a more specific "mcp__story-kit__create_story".
let dominated = allow.iter().any(|existing| {
if let Some(pat) = existing.as_str()
&& let Some(prefix) = pat.strip_suffix('*')
{
return rule.starts_with(prefix);
}
false
});
if dominated {
return Ok(());
}
allow.push(rule_value);
let pretty =
serde_json::to_string_pretty(&settings).map_err(|e| format!("Failed to serialize: {e}"))?;
fs::write(&settings_path, pretty)
.map_err(|e| format!("Failed to write settings.json: {e}"))?;
Ok(())
}
/// Rebuild the server binary and re-exec.
///
/// 1. Gracefully stops all running agents (kills PTY children).
/// 2. Runs `cargo build [-p story-kit]` from the workspace root, matching
/// the current build profile (debug or release).
/// 3. If the build fails, returns the build error (server stays up).
/// 4. If the build succeeds, re-execs the process with the new binary via
/// `std::os::unix::process::CommandExt::exec()`.
async fn tool_rebuild_and_restart(ctx: &AppContext) -> Result<String, String> {
slog!("[rebuild] Rebuild and restart requested via MCP tool");
// 1. Gracefully stop all running agents.
let running_agents = ctx.agents.list_agents().unwrap_or_default();
let running_count = running_agents
.iter()
.filter(|a| a.status == AgentStatus::Running)
.count();
if running_count > 0 {
slog!("[rebuild] Stopping {running_count} running agent(s) before rebuild");
}
ctx.agents.kill_all_children();
// 2. Find the workspace root (parent of the server binary's source).
// CARGO_MANIFEST_DIR at compile time points to the `server/` crate;
// the workspace root is its parent.
let manifest_dir = std::path::Path::new(env!("CARGO_MANIFEST_DIR"));
let workspace_root = manifest_dir
.parent()
.ok_or_else(|| "Cannot determine workspace root from CARGO_MANIFEST_DIR".to_string())?;
slog!(
"[rebuild] Building server from workspace root: {}",
workspace_root.display()
);
// 3. Build the server binary, matching the current build profile so the
// re-exec via current_exe() picks up the new binary.
let build_args: Vec<&str> = if cfg!(debug_assertions) {
vec!["build", "-p", "story-kit"]
} else {
vec!["build", "--release", "-p", "story-kit"]
};
slog!("[rebuild] cargo {}", build_args.join(" "));
let output = tokio::task::spawn_blocking({
let workspace_root = workspace_root.to_path_buf();
move || {
std::process::Command::new("cargo")
.args(&build_args)
.current_dir(&workspace_root)
.output()
}
})
.await
.map_err(|e| format!("Build task panicked: {e}"))?
.map_err(|e| format!("Failed to run cargo build: {e}"))?;
if !output.status.success() {
let stderr = String::from_utf8_lossy(&output.stderr);
slog!("[rebuild] Build failed:\n{stderr}");
return Err(format!("Build failed:\n{stderr}"));
}
slog!("[rebuild] Build succeeded, re-execing with new binary");
// 4. Re-exec with the new binary.
// Collect current argv so we preserve any CLI arguments (e.g. project path).
let current_exe = std::env::current_exe()
.map_err(|e| format!("Cannot determine current executable: {e}"))?;
let args: Vec<String> = std::env::args().collect();
// Remove the port file before re-exec so the new process can write its own.
if let Ok(root) = ctx.state.get_project_root() {
let port_file = root.join(".story_kit_port");
if port_file.exists() {
let _ = std::fs::remove_file(&port_file);
}
}
// Also check cwd for port file.
let cwd_port_file = std::path::Path::new(".story_kit_port");
if cwd_port_file.exists() {
let _ = std::fs::remove_file(cwd_port_file);
}
// Use exec() to replace the current process.
// This never returns on success.
use std::os::unix::process::CommandExt;
let err = std::process::Command::new(&current_exe)
.args(&args[1..])
.exec();
// If we get here, exec() failed.
Err(format!("Failed to exec new binary: {err}"))
}
/// MCP tool called by Claude Code via `--permission-prompt-tool`.
///
/// Forwards the permission request through the shared channel to the active
/// WebSocket session, which presents a dialog to the user. Blocks until the
/// user approves or denies (with a 5-minute timeout).
async fn tool_prompt_permission(args: &Value, ctx: &AppContext) -> Result<String, String> {
let tool_name = args
.get("tool_name")
.and_then(|v| v.as_str())
.unwrap_or("unknown")
.to_string();
let tool_input = args
.get("input")
.cloned()
.unwrap_or(json!({}));
let request_id = uuid::Uuid::new_v4().to_string();
let (response_tx, response_rx) = tokio::sync::oneshot::channel();
ctx.perm_tx
.send(crate::http::context::PermissionForward {
request_id: request_id.clone(),
tool_name: tool_name.clone(),
tool_input: tool_input.clone(),
response_tx,
})
.map_err(|_| "No active WebSocket session to receive permission request".to_string())?;
use crate::http::context::PermissionDecision;
let decision = tokio::time::timeout(
std::time::Duration::from_secs(300),
response_rx,
)
.await
.map_err(|_| {
let msg = format!("Permission request for '{tool_name}' timed out after 5 minutes");
slog_warn!("[permission] {msg}");
msg
})?
.map_err(|_| "Permission response channel closed unexpectedly".to_string())?;
if decision == PermissionDecision::AlwaysAllow {
// Persist the rule so Claude Code won't prompt again for this tool.
if let Some(root) = ctx.state.project_root.lock().unwrap().clone() {
let rule = generate_permission_rule(&tool_name, &tool_input);
if let Err(e) = add_permission_rule(&root, &rule) {
slog_warn!("[permission] Failed to write always-allow rule: {e}");
} else {
slog!("[permission] Added always-allow rule: {rule}");
}
}
}
if decision == PermissionDecision::Approve || decision == PermissionDecision::AlwaysAllow {
// Claude Code SDK expects:
// Allow: { behavior: "allow", updatedInput: <record> }
// Deny: { behavior: "deny", message: string }
Ok(json!({"behavior": "allow", "updatedInput": tool_input}).to_string())
} else {
slog_warn!("[permission] User denied permission for '{tool_name}'");
Ok(json!({
"behavior": "deny",
"message": format!("User denied permission for '{tool_name}'")
})
.to_string())
}
}
fn tool_get_token_usage(args: &Value, ctx: &AppContext) -> Result<String, String> {
let root = ctx.state.get_project_root()?;
let filter_story = args.get("story_id").and_then(|v| v.as_str());
let all_records = crate::agents::token_usage::read_all(&root)?;
let records: Vec<_> = all_records
.into_iter()
.filter(|r| filter_story.is_none_or(|s| r.story_id == s))
.collect();
let total_cost: f64 = records.iter().map(|r| r.usage.total_cost_usd).sum();
let total_input: u64 = records.iter().map(|r| r.usage.input_tokens).sum();
let total_output: u64 = records.iter().map(|r| r.usage.output_tokens).sum();
let total_cache_create: u64 = records
.iter()
.map(|r| r.usage.cache_creation_input_tokens)
.sum();
let total_cache_read: u64 = records
.iter()
.map(|r| r.usage.cache_read_input_tokens)
.sum();
serde_json::to_string_pretty(&json!({
"records": records.iter().map(|r| json!({
"story_id": r.story_id,
"agent_name": r.agent_name,
"timestamp": r.timestamp,
"input_tokens": r.usage.input_tokens,
"output_tokens": r.usage.output_tokens,
"cache_creation_input_tokens": r.usage.cache_creation_input_tokens,
"cache_read_input_tokens": r.usage.cache_read_input_tokens,
"total_cost_usd": r.usage.total_cost_usd,
})).collect::<Vec<_>>(),
"totals": {
"records": records.len(),
"input_tokens": total_input,
"output_tokens": total_output,
"cache_creation_input_tokens": total_cache_create,
"cache_read_input_tokens": total_cache_read,
"total_cost_usd": total_cost,
}
}))
.map_err(|e| format!("Serialization error: {e}"))
}
#[cfg(test)]
mod tests {
use super::*;
use crate::http::context::AppContext;
use crate::store::StoreOps;
// ── Unit tests ────────────────────────────────────────────────
#[test]
fn parse_test_cases_empty() {
let result = parse_test_cases(None).unwrap();
assert!(result.is_empty());
}
#[test]
fn parse_test_cases_valid() {
let input = json!([
{"name": "test1", "status": "pass"},
{"name": "test2", "status": "fail", "details": "assertion failed"}
]);
let result = parse_test_cases(Some(&input)).unwrap();
assert_eq!(result.len(), 2);
assert_eq!(result[0].status, TestStatus::Pass);
assert_eq!(result[1].status, TestStatus::Fail);
assert_eq!(result[1].details, Some("assertion failed".to_string()));
}
#[test]
fn parse_test_cases_invalid_status() {
let input = json!([{"name": "t", "status": "maybe"}]);
assert!(parse_test_cases(Some(&input)).is_err());
}
#[test]
fn json_rpc_response_serializes_success() {
let resp = JsonRpcResponse::success(Some(json!(1)), json!({"ok": true}));
let s = serde_json::to_string(&resp).unwrap();
assert!(s.contains("\"result\""));
assert!(!s.contains("\"error\""));
}
#[test]
fn json_rpc_response_serializes_error() {
let resp = JsonRpcResponse::error(Some(json!(1)), -32600, "bad".into());
let s = serde_json::to_string(&resp).unwrap();
assert!(s.contains("\"error\""));
assert!(!s.contains("\"result\""));
}
// ── Protocol handler integration tests ────────────────────────
#[test]
fn initialize_returns_capabilities() {
let resp = handle_initialize(
Some(json!(1)),
&json!({"protocolVersion": "2025-03-26", "capabilities": {}, "clientInfo": {"name": "test", "version": "1.0"}}),
);
let result = resp.result.unwrap();
assert_eq!(result["protocolVersion"], "2025-03-26");
assert!(result["capabilities"]["tools"].is_object());
assert_eq!(result["serverInfo"]["name"], "story-kit");
}
#[test]
fn tools_list_returns_all_tools() {
let resp = handle_tools_list(Some(json!(2)));
let result = resp.result.unwrap();
let tools = result["tools"].as_array().unwrap();
let names: Vec<&str> = tools.iter().map(|t| t["name"].as_str().unwrap()).collect();
assert!(names.contains(&"create_story"));
assert!(names.contains(&"validate_stories"));
assert!(names.contains(&"list_upcoming"));
assert!(names.contains(&"get_story_todos"));
assert!(names.contains(&"record_tests"));
assert!(names.contains(&"ensure_acceptance"));
assert!(names.contains(&"start_agent"));
assert!(names.contains(&"stop_agent"));
assert!(names.contains(&"list_agents"));
assert!(names.contains(&"get_agent_config"));
assert!(names.contains(&"reload_agent_config"));
assert!(names.contains(&"get_agent_output"));
assert!(names.contains(&"wait_for_agent"));
assert!(names.contains(&"create_worktree"));
assert!(names.contains(&"list_worktrees"));
assert!(names.contains(&"remove_worktree"));
assert!(names.contains(&"get_editor_command"));
assert!(!names.contains(&"report_completion"));
assert!(names.contains(&"accept_story"));
assert!(names.contains(&"check_criterion"));
assert!(names.contains(&"add_criterion"));
assert!(names.contains(&"update_story"));
assert!(names.contains(&"create_spike"));
assert!(names.contains(&"create_bug"));
assert!(names.contains(&"list_bugs"));
assert!(names.contains(&"close_bug"));
assert!(names.contains(&"create_refactor"));
assert!(names.contains(&"list_refactors"));
assert!(names.contains(&"merge_agent_work"));
assert!(names.contains(&"get_merge_status"));
assert!(names.contains(&"move_story_to_merge"));
assert!(names.contains(&"report_merge_failure"));
assert!(names.contains(&"request_qa"));
assert!(names.contains(&"approve_qa"));
assert!(names.contains(&"reject_qa"));
assert!(names.contains(&"launch_qa_app"));
assert!(names.contains(&"get_server_logs"));
assert!(names.contains(&"prompt_permission"));
assert!(names.contains(&"get_pipeline_status"));
assert!(names.contains(&"rebuild_and_restart"));
assert!(names.contains(&"get_token_usage"));
assert_eq!(tools.len(), 40);
}
#[test]
fn tools_list_schemas_have_required_fields() {
let resp = handle_tools_list(Some(json!(1)));
let tools = resp.result.unwrap()["tools"].as_array().unwrap().clone();
for tool in &tools {
assert!(tool["name"].is_string(), "tool missing name");
assert!(tool["description"].is_string(), "tool missing description");
assert!(tool["inputSchema"].is_object(), "tool missing inputSchema");
assert_eq!(tool["inputSchema"]["type"], "object");
}
}
fn test_ctx(dir: &std::path::Path) -> AppContext {
AppContext::new_test(dir.to_path_buf())
}
#[test]
fn tool_validate_stories_empty_project() {
let tmp = tempfile::tempdir().unwrap();
let ctx = test_ctx(tmp.path());
let result = tool_validate_stories(&ctx).unwrap();
let parsed: Vec<Value> = serde_json::from_str(&result).unwrap();
assert!(parsed.is_empty());
}
#[test]
fn tool_create_story_and_list_upcoming() {
let tmp = tempfile::tempdir().unwrap();
// No git repo needed: spike 61 — create_story just writes the file;
// the filesystem watcher handles the commit asynchronously.
let ctx = test_ctx(tmp.path());
let result = tool_create_story(
&json!({"name": "Test Story", "acceptance_criteria": ["AC1", "AC2"]}),
&ctx,
)
.unwrap();
assert!(result.contains("Created story:"));
// List should return it
let list = tool_list_upcoming(&ctx).unwrap();
let parsed: Vec<Value> = serde_json::from_str(&list).unwrap();
assert_eq!(parsed.len(), 1);
assert_eq!(parsed[0]["name"], "Test Story");
}
#[test]
fn tool_create_story_rejects_empty_name() {
let tmp = tempfile::tempdir().unwrap();
let ctx = test_ctx(tmp.path());
let result = tool_create_story(&json!({"name": "!!!"}), &ctx);
assert!(result.is_err());
assert!(result.unwrap_err().contains("alphanumeric"));
}
#[test]
fn tool_create_story_missing_name() {
let tmp = tempfile::tempdir().unwrap();
let ctx = test_ctx(tmp.path());
let result = tool_create_story(&json!({}), &ctx);
assert!(result.is_err());
assert!(result.unwrap_err().contains("Missing required argument"));
}
#[test]
fn tool_get_pipeline_status_returns_structured_response() {
let tmp = tempfile::tempdir().unwrap();
let root = tmp.path();
for (stage, id, name) in &[
("1_backlog", "10_story_upcoming", "Upcoming Story"),
("2_current", "20_story_current", "Current Story"),
("3_qa", "30_story_qa", "QA Story"),
("4_merge", "40_story_merge", "Merge Story"),
("5_done", "50_story_done", "Done Story"),
] {
let dir = root.join(".story_kit/work").join(stage);
std::fs::create_dir_all(&dir).unwrap();
std::fs::write(
dir.join(format!("{id}.md")),
format!("---\nname: \"{name}\"\n---\n"),
)
.unwrap();
}
let ctx = test_ctx(root);
let result = tool_get_pipeline_status(&ctx).unwrap();
let parsed: Value = serde_json::from_str(&result).unwrap();
// Active stages include current, qa, merge, done
let active = parsed["active"].as_array().unwrap();
assert_eq!(active.len(), 4);
let stages: Vec<&str> = active.iter().map(|i| i["stage"].as_str().unwrap()).collect();
assert!(stages.contains(&"current"));
assert!(stages.contains(&"qa"));
assert!(stages.contains(&"merge"));
assert!(stages.contains(&"done"));
// Backlog
let backlog = parsed["backlog"].as_array().unwrap();
assert_eq!(backlog.len(), 1);
assert_eq!(backlog[0]["story_id"], "10_story_upcoming");
assert_eq!(parsed["backlog_count"], 1);
}
#[test]
fn tool_get_pipeline_status_includes_agent_assignment() {
let tmp = tempfile::tempdir().unwrap();
let root = tmp.path();
let current = root.join(".story_kit/work/2_current");
std::fs::create_dir_all(&current).unwrap();
std::fs::write(
current.join("20_story_active.md"),
"---\nname: \"Active Story\"\n---\n",
)
.unwrap();
let ctx = test_ctx(root);
ctx.agents.inject_test_agent(
"20_story_active",
"coder-1",
crate::agents::AgentStatus::Running,
);
let result = tool_get_pipeline_status(&ctx).unwrap();
let parsed: Value = serde_json::from_str(&result).unwrap();
let active = parsed["active"].as_array().unwrap();
assert_eq!(active.len(), 1);
let item = &active[0];
assert_eq!(item["story_id"], "20_story_active");
assert_eq!(item["stage"], "current");
assert!(!item["agent"].is_null(), "agent should be present");
assert_eq!(item["agent"]["agent_name"], "coder-1");
assert_eq!(item["agent"]["status"], "running");
}
#[test]
fn tool_get_story_todos_missing_file() {
let tmp = tempfile::tempdir().unwrap();
let ctx = test_ctx(tmp.path());
let result = tool_get_story_todos(&json!({"story_id": "99_nonexistent"}), &ctx);
assert!(result.is_err());
assert!(result.unwrap_err().contains("not found"));
}
#[test]
fn tool_get_story_todos_returns_unchecked() {
let tmp = tempfile::tempdir().unwrap();
let current_dir = tmp.path().join(".story_kit").join("work").join("2_current");
fs::create_dir_all(&current_dir).unwrap();
fs::write(
current_dir.join("1_test.md"),
"---\nname: Test\n---\n## AC\n- [ ] First\n- [x] Done\n- [ ] Second\n",
)
.unwrap();
let ctx = test_ctx(tmp.path());
let result = tool_get_story_todos(&json!({"story_id": "1_test"}), &ctx).unwrap();
let parsed: Value = serde_json::from_str(&result).unwrap();
assert_eq!(parsed["todos"].as_array().unwrap().len(), 2);
assert_eq!(parsed["story_name"], "Test");
}
#[test]
fn tool_record_tests_and_ensure_acceptance() {
let tmp = tempfile::tempdir().unwrap();
let ctx = test_ctx(tmp.path());
// Record passing tests
let result = tool_record_tests(
&json!({
"story_id": "1_test",
"unit": [{"name": "u1", "status": "pass"}],
"integration": [{"name": "i1", "status": "pass"}]
}),
&ctx,
)
.unwrap();
assert!(result.contains("recorded"));
// Should be acceptable
let result = tool_ensure_acceptance(&json!({"story_id": "1_test"}), &ctx).unwrap();
assert!(result.contains("All gates pass"));
}
#[test]
fn tool_ensure_acceptance_blocks_on_failures() {
let tmp = tempfile::tempdir().unwrap();
let ctx = test_ctx(tmp.path());
tool_record_tests(
&json!({
"story_id": "1_test",
"unit": [{"name": "u1", "status": "fail"}],
"integration": []
}),
&ctx,
)
.unwrap();
let result = tool_ensure_acceptance(&json!({"story_id": "1_test"}), &ctx);
assert!(result.is_err());
assert!(result.unwrap_err().contains("blocked"));
}
#[test]
fn tool_list_agents_empty() {
let tmp = tempfile::tempdir().unwrap();
let ctx = test_ctx(tmp.path());
let result = tool_list_agents(&ctx).unwrap();
let parsed: Vec<Value> = serde_json::from_str(&result).unwrap();
assert!(parsed.is_empty());
}
#[test]
fn handle_tools_call_unknown_tool() {
let tmp = tempfile::tempdir().unwrap();
let ctx = test_ctx(tmp.path());
let rt = tokio::runtime::Runtime::new().unwrap();
let resp = rt.block_on(handle_tools_call(
Some(json!(1)),
&json!({"name": "bogus_tool", "arguments": {}}),
&ctx,
));
let result = resp.result.unwrap();
assert_eq!(result["isError"], true);
assert!(result["content"][0]["text"].as_str().unwrap().contains("Unknown tool"));
}
#[test]
fn to_sse_response_wraps_in_data_prefix() {
let resp = JsonRpcResponse::success(Some(json!(1)), json!({"ok": true}));
let http_resp = to_sse_response(resp);
assert_eq!(
http_resp.headers().get("content-type").unwrap(),
"text/event-stream"
);
}
#[test]
fn wants_sse_detects_accept_header() {
// Can't easily construct a Request in tests without TestClient,
// so test the logic indirectly via to_sse_response format
let resp = JsonRpcResponse::success(Some(json!(1)), json!("ok"));
let json_resp = to_json_response(resp);
assert_eq!(
json_resp.headers().get("content-type").unwrap(),
"application/json"
);
}
#[test]
fn wait_for_agent_tool_in_list() {
let resp = handle_tools_list(Some(json!(1)));
let tools = resp.result.unwrap()["tools"].as_array().unwrap().clone();
let wait_tool = tools.iter().find(|t| t["name"] == "wait_for_agent");
assert!(wait_tool.is_some(), "wait_for_agent missing from tools list");
let t = wait_tool.unwrap();
assert!(t["description"].as_str().unwrap().contains("block") || t["description"].as_str().unwrap().contains("Block"));
let required = t["inputSchema"]["required"].as_array().unwrap();
let req_names: Vec<&str> = required.iter().map(|v| v.as_str().unwrap()).collect();
assert!(req_names.contains(&"story_id"));
assert!(req_names.contains(&"agent_name"));
}
#[tokio::test]
async fn wait_for_agent_tool_missing_story_id() {
let tmp = tempfile::tempdir().unwrap();
let ctx = test_ctx(tmp.path());
let result = tool_wait_for_agent(&json!({"agent_name": "bot"}), &ctx).await;
assert!(result.is_err());
assert!(result.unwrap_err().contains("story_id"));
}
#[tokio::test]
async fn wait_for_agent_tool_missing_agent_name() {
let tmp = tempfile::tempdir().unwrap();
let ctx = test_ctx(tmp.path());
let result = tool_wait_for_agent(&json!({"story_id": "1_test"}), &ctx).await;
assert!(result.is_err());
assert!(result.unwrap_err().contains("agent_name"));
}
#[tokio::test]
async fn wait_for_agent_tool_nonexistent_agent_returns_error() {
let tmp = tempfile::tempdir().unwrap();
let ctx = test_ctx(tmp.path());
let result =
tool_wait_for_agent(&json!({"story_id": "99_nope", "agent_name": "bot", "timeout_ms": 50}), &ctx)
.await;
// No agent registered — should error
assert!(result.is_err());
}
#[tokio::test]
async fn wait_for_agent_tool_returns_completed_agent() {
use crate::agents::AgentStatus;
let tmp = tempfile::tempdir().unwrap();
let ctx = test_ctx(tmp.path());
ctx.agents
.inject_test_agent("41_story", "worker", AgentStatus::Completed);
let result = tool_wait_for_agent(
&json!({"story_id": "41_story", "agent_name": "worker"}),
&ctx,
)
.await
.unwrap();
let parsed: Value = serde_json::from_str(&result).unwrap();
assert_eq!(parsed["status"], "completed");
assert_eq!(parsed["story_id"], "41_story");
assert_eq!(parsed["agent_name"], "worker");
// commits key present (may be null since no real worktree)
assert!(parsed.get("commits").is_some());
// completion key present (null for agents that didn't call report_completion)
assert!(parsed.get("completion").is_some());
}
// ── Editor command tool tests ─────────────────────────────────
#[test]
fn tool_get_editor_command_missing_worktree_path() {
let tmp = tempfile::tempdir().unwrap();
let ctx = test_ctx(tmp.path());
let result = tool_get_editor_command(&json!({}), &ctx);
assert!(result.is_err());
assert!(result.unwrap_err().contains("worktree_path"));
}
#[test]
fn tool_get_editor_command_no_editor_configured() {
let tmp = tempfile::tempdir().unwrap();
let ctx = test_ctx(tmp.path());
let result = tool_get_editor_command(
&json!({"worktree_path": "/some/path"}),
&ctx,
);
assert!(result.is_err());
assert!(result.unwrap_err().contains("No editor configured"));
}
#[test]
fn tool_get_editor_command_formats_correctly() {
let tmp = tempfile::tempdir().unwrap();
let ctx = test_ctx(tmp.path());
ctx.store.set("editor_command", json!("zed"));
let result = tool_get_editor_command(
&json!({"worktree_path": "/home/user/worktrees/37_my_story"}),
&ctx,
)
.unwrap();
assert_eq!(result, "zed /home/user/worktrees/37_my_story");
}
#[test]
fn tool_get_editor_command_works_with_vscode() {
let tmp = tempfile::tempdir().unwrap();
let ctx = test_ctx(tmp.path());
ctx.store.set("editor_command", json!("code"));
let result = tool_get_editor_command(
&json!({"worktree_path": "/path/to/worktree"}),
&ctx,
)
.unwrap();
assert_eq!(result, "code /path/to/worktree");
}
#[test]
fn get_editor_command_in_tools_list() {
let resp = handle_tools_list(Some(json!(1)));
let tools = resp.result.unwrap()["tools"].as_array().unwrap().clone();
let tool = tools.iter().find(|t| t["name"] == "get_editor_command");
assert!(tool.is_some(), "get_editor_command missing from tools list");
let t = tool.unwrap();
assert!(t["description"].is_string());
let required = t["inputSchema"]["required"].as_array().unwrap();
let req_names: Vec<&str> = required.iter().map(|v| v.as_str().unwrap()).collect();
assert!(req_names.contains(&"worktree_path"));
}
// ── Bug lifecycle tool tests ──────────────────────────────────
fn setup_git_repo_in(dir: &std::path::Path) {
std::process::Command::new("git")
.args(["init"])
.current_dir(dir)
.output()
.unwrap();
std::process::Command::new("git")
.args(["config", "user.email", "test@test.com"])
.current_dir(dir)
.output()
.unwrap();
std::process::Command::new("git")
.args(["config", "user.name", "Test"])
.current_dir(dir)
.output()
.unwrap();
std::process::Command::new("git")
.args(["commit", "--allow-empty", "-m", "init"])
.current_dir(dir)
.output()
.unwrap();
}
#[test]
fn create_bug_in_tools_list() {
let resp = handle_tools_list(Some(json!(1)));
let tools = resp.result.unwrap()["tools"].as_array().unwrap().clone();
let tool = tools.iter().find(|t| t["name"] == "create_bug");
assert!(tool.is_some(), "create_bug missing from tools list");
let t = tool.unwrap();
let desc = t["description"].as_str().unwrap();
assert!(
desc.contains("work/1_backlog/"),
"create_bug description should reference work/1_backlog/, got: {desc}"
);
assert!(
!desc.contains(".story_kit/bugs"),
"create_bug description should not reference nonexistent .story_kit/bugs/, got: {desc}"
);
let required = t["inputSchema"]["required"].as_array().unwrap();
let req_names: Vec<&str> = required.iter().map(|v| v.as_str().unwrap()).collect();
assert!(req_names.contains(&"name"));
assert!(req_names.contains(&"description"));
assert!(req_names.contains(&"steps_to_reproduce"));
assert!(req_names.contains(&"actual_result"));
assert!(req_names.contains(&"expected_result"));
}
#[test]
fn list_bugs_in_tools_list() {
let resp = handle_tools_list(Some(json!(1)));
let tools = resp.result.unwrap()["tools"].as_array().unwrap().clone();
let tool = tools.iter().find(|t| t["name"] == "list_bugs");
assert!(tool.is_some(), "list_bugs missing from tools list");
let t = tool.unwrap();
let desc = t["description"].as_str().unwrap();
assert!(
desc.contains("work/1_backlog/"),
"list_bugs description should reference work/1_backlog/, got: {desc}"
);
assert!(
!desc.contains(".story_kit/bugs"),
"list_bugs description should not reference nonexistent .story_kit/bugs/, got: {desc}"
);
}
#[test]
fn close_bug_in_tools_list() {
let resp = handle_tools_list(Some(json!(1)));
let tools = resp.result.unwrap()["tools"].as_array().unwrap().clone();
let tool = tools.iter().find(|t| t["name"] == "close_bug");
assert!(tool.is_some(), "close_bug missing from tools list");
let t = tool.unwrap();
let desc = t["description"].as_str().unwrap();
assert!(
!desc.contains(".story_kit/bugs"),
"close_bug description should not reference nonexistent .story_kit/bugs/, got: {desc}"
);
assert!(
desc.contains("work/5_done/"),
"close_bug description should reference work/5_done/, got: {desc}"
);
let required = t["inputSchema"]["required"].as_array().unwrap();
let req_names: Vec<&str> = required.iter().map(|v| v.as_str().unwrap()).collect();
assert!(req_names.contains(&"bug_id"));
}
#[test]
fn tool_create_bug_missing_name() {
let tmp = tempfile::tempdir().unwrap();
let ctx = test_ctx(tmp.path());
let result = tool_create_bug(
&json!({
"description": "d",
"steps_to_reproduce": "s",
"actual_result": "a",
"expected_result": "e"
}),
&ctx,
);
assert!(result.is_err());
assert!(result.unwrap_err().contains("name"));
}
#[test]
fn tool_create_bug_missing_description() {
let tmp = tempfile::tempdir().unwrap();
let ctx = test_ctx(tmp.path());
let result = tool_create_bug(
&json!({
"name": "Bug",
"steps_to_reproduce": "s",
"actual_result": "a",
"expected_result": "e"
}),
&ctx,
);
assert!(result.is_err());
assert!(result.unwrap_err().contains("description"));
}
#[test]
fn tool_create_bug_creates_file_and_commits() {
let tmp = tempfile::tempdir().unwrap();
setup_git_repo_in(tmp.path());
let ctx = test_ctx(tmp.path());
let result = tool_create_bug(
&json!({
"name": "Login Crash",
"description": "The app crashes on login.",
"steps_to_reproduce": "1. Open app\n2. Click login",
"actual_result": "500 error",
"expected_result": "Successful login"
}),
&ctx,
)
.unwrap();
assert!(result.contains("1_bug_login_crash"));
let bug_file = tmp
.path()
.join(".story_kit/work/1_backlog/1_bug_login_crash.md");
assert!(bug_file.exists());
}
#[test]
fn tool_list_bugs_empty() {
let tmp = tempfile::tempdir().unwrap();
let ctx = test_ctx(tmp.path());
let result = tool_list_bugs(&ctx).unwrap();
let parsed: Vec<Value> = serde_json::from_str(&result).unwrap();
assert!(parsed.is_empty());
}
#[test]
fn tool_list_bugs_returns_open_bugs() {
let tmp = tempfile::tempdir().unwrap();
let backlog_dir = tmp.path().join(".story_kit/work/1_backlog");
std::fs::create_dir_all(&backlog_dir).unwrap();
std::fs::write(
backlog_dir.join("1_bug_crash.md"),
"# Bug 1: App Crash\n",
)
.unwrap();
std::fs::write(
backlog_dir.join("2_bug_typo.md"),
"# Bug 2: Typo in Header\n",
)
.unwrap();
let ctx = test_ctx(tmp.path());
let result = tool_list_bugs(&ctx).unwrap();
let parsed: Vec<Value> = serde_json::from_str(&result).unwrap();
assert_eq!(parsed.len(), 2);
assert_eq!(parsed[0]["bug_id"], "1_bug_crash");
assert_eq!(parsed[0]["name"], "App Crash");
assert_eq!(parsed[1]["bug_id"], "2_bug_typo");
assert_eq!(parsed[1]["name"], "Typo in Header");
}
#[test]
fn tool_close_bug_missing_bug_id() {
let tmp = tempfile::tempdir().unwrap();
let ctx = test_ctx(tmp.path());
let result = tool_close_bug(&json!({}), &ctx);
assert!(result.is_err());
assert!(result.unwrap_err().contains("bug_id"));
}
#[test]
fn tool_close_bug_moves_to_archive() {
let tmp = tempfile::tempdir().unwrap();
setup_git_repo_in(tmp.path());
let backlog_dir = tmp.path().join(".story_kit/work/1_backlog");
std::fs::create_dir_all(&backlog_dir).unwrap();
let bug_file = backlog_dir.join("1_bug_crash.md");
std::fs::write(&bug_file, "# Bug 1: Crash\n").unwrap();
// Stage the file so it's tracked
std::process::Command::new("git")
.args(["add", "."])
.current_dir(tmp.path())
.output()
.unwrap();
std::process::Command::new("git")
.args(["commit", "-m", "add bug"])
.current_dir(tmp.path())
.output()
.unwrap();
let ctx = test_ctx(tmp.path());
let result = tool_close_bug(&json!({"bug_id": "1_bug_crash"}), &ctx).unwrap();
assert!(result.contains("1_bug_crash"));
assert!(!bug_file.exists());
assert!(tmp.path().join(".story_kit/work/5_done/1_bug_crash.md").exists());
}
// ── Spike lifecycle tool tests ─────────────────────────────────────────
#[test]
fn create_spike_in_tools_list() {
let resp = handle_tools_list(Some(json!(1)));
let tools = resp.result.unwrap()["tools"].as_array().unwrap().clone();
let tool = tools.iter().find(|t| t["name"] == "create_spike");
assert!(tool.is_some(), "create_spike missing from tools list");
let t = tool.unwrap();
assert!(t["description"].is_string());
let required = t["inputSchema"]["required"].as_array().unwrap();
let req_names: Vec<&str> = required.iter().map(|v| v.as_str().unwrap()).collect();
assert!(req_names.contains(&"name"));
// description is optional
assert!(!req_names.contains(&"description"));
}
#[test]
fn tool_create_spike_missing_name() {
let tmp = tempfile::tempdir().unwrap();
let ctx = test_ctx(tmp.path());
let result = tool_create_spike(&json!({}), &ctx);
assert!(result.is_err());
assert!(result.unwrap_err().contains("name"));
}
#[test]
fn tool_create_spike_rejects_empty_name() {
let tmp = tempfile::tempdir().unwrap();
let ctx = test_ctx(tmp.path());
let result = tool_create_spike(&json!({"name": "!!!"}), &ctx);
assert!(result.is_err());
assert!(result.unwrap_err().contains("alphanumeric"));
}
#[test]
fn tool_create_spike_creates_file() {
let tmp = tempfile::tempdir().unwrap();
let ctx = test_ctx(tmp.path());
let result = tool_create_spike(
&json!({"name": "Compare Encoders", "description": "Which encoder is fastest?"}),
&ctx,
)
.unwrap();
assert!(result.contains("1_spike_compare_encoders"));
let spike_file = tmp
.path()
.join(".story_kit/work/1_backlog/1_spike_compare_encoders.md");
assert!(spike_file.exists());
let contents = std::fs::read_to_string(&spike_file).unwrap();
assert!(contents.starts_with("---\nname: \"Compare Encoders\"\n---"));
assert!(contents.contains("Which encoder is fastest?"));
}
#[test]
fn tool_create_spike_creates_file_without_description() {
let tmp = tempfile::tempdir().unwrap();
let ctx = test_ctx(tmp.path());
let result = tool_create_spike(&json!({"name": "My Spike"}), &ctx).unwrap();
assert!(result.contains("1_spike_my_spike"));
let spike_file = tmp.path().join(".story_kit/work/1_backlog/1_spike_my_spike.md");
assert!(spike_file.exists());
let contents = std::fs::read_to_string(&spike_file).unwrap();
assert!(contents.starts_with("---\nname: \"My Spike\"\n---"));
assert!(contents.contains("## Question\n\n- TBD\n"));
}
// ── Mergemaster tool tests ─────────────────────────────────────────────
#[test]
fn merge_agent_work_in_tools_list() {
let resp = handle_tools_list(Some(json!(1)));
let tools = resp.result.unwrap()["tools"].as_array().unwrap().clone();
let tool = tools.iter().find(|t| t["name"] == "merge_agent_work");
assert!(tool.is_some(), "merge_agent_work missing from tools list");
let t = tool.unwrap();
assert!(t["description"].is_string());
let required = t["inputSchema"]["required"].as_array().unwrap();
let req_names: Vec<&str> = required.iter().map(|v| v.as_str().unwrap()).collect();
assert!(req_names.contains(&"story_id"));
// agent_name is optional
assert!(!req_names.contains(&"agent_name"));
}
#[test]
fn move_story_to_merge_in_tools_list() {
let resp = handle_tools_list(Some(json!(1)));
let tools = resp.result.unwrap()["tools"].as_array().unwrap().clone();
let tool = tools.iter().find(|t| t["name"] == "move_story_to_merge");
assert!(tool.is_some(), "move_story_to_merge missing from tools list");
let t = tool.unwrap();
assert!(t["description"].is_string());
let required = t["inputSchema"]["required"].as_array().unwrap();
let req_names: Vec<&str> = required.iter().map(|v| v.as_str().unwrap()).collect();
assert!(req_names.contains(&"story_id"));
// agent_name is optional
assert!(!req_names.contains(&"agent_name"));
}
#[test]
fn tool_merge_agent_work_missing_story_id() {
let tmp = tempfile::tempdir().unwrap();
let ctx = test_ctx(tmp.path());
let result = tool_merge_agent_work(&json!({}), &ctx);
assert!(result.is_err());
assert!(result.unwrap_err().contains("story_id"));
}
#[tokio::test]
async fn tool_move_story_to_merge_missing_story_id() {
let tmp = tempfile::tempdir().unwrap();
let ctx = test_ctx(tmp.path());
let result = tool_move_story_to_merge(&json!({}), &ctx).await;
assert!(result.is_err());
assert!(result.unwrap_err().contains("story_id"));
}
#[tokio::test]
async fn tool_move_story_to_merge_moves_file() {
let tmp = tempfile::tempdir().unwrap();
setup_git_repo_in(tmp.path());
let current_dir = tmp.path().join(".story_kit/work/2_current");
std::fs::create_dir_all(&current_dir).unwrap();
let story_file = current_dir.join("24_story_test.md");
std::fs::write(&story_file, "---\nname: Test\n---\n").unwrap();
std::process::Command::new("git")
.args(["add", "."])
.current_dir(tmp.path())
.output()
.unwrap();
std::process::Command::new("git")
.args(["commit", "-m", "add story"])
.current_dir(tmp.path())
.output()
.unwrap();
let ctx = test_ctx(tmp.path());
// The agent start will fail in test (no worktree/config), but the file move should succeed
let result = tool_move_story_to_merge(&json!({"story_id": "24_story_test"}), &ctx).await;
// File should have been moved regardless of agent start outcome
assert!(!story_file.exists(), "2_current file should be gone");
assert!(
tmp.path().join(".story_kit/work/4_merge/24_story_test.md").exists(),
"4_merge file should exist"
);
// Result is either Ok (agent started) or Err (agent failed - acceptable in tests)
let _ = result;
}
#[tokio::test]
async fn tool_merge_agent_work_returns_started() {
let tmp = tempfile::tempdir().unwrap();
setup_git_repo_in(tmp.path());
let ctx = test_ctx(tmp.path());
let result = tool_merge_agent_work(
&json!({"story_id": "99_nonexistent", "agent_name": "coder-1"}),
&ctx,
)
.unwrap();
let parsed: Value = serde_json::from_str(&result).unwrap();
assert_eq!(parsed["story_id"], "99_nonexistent");
assert_eq!(parsed["status"], "started");
assert!(parsed.get("message").is_some());
}
#[test]
fn tool_get_merge_status_no_job() {
let tmp = tempfile::tempdir().unwrap();
let ctx = test_ctx(tmp.path());
let result = tool_get_merge_status(&json!({"story_id": "99_nonexistent"}), &ctx);
assert!(result.is_err());
assert!(result.unwrap_err().contains("No merge job"));
}
#[tokio::test]
async fn tool_get_merge_status_returns_running() {
let tmp = tempfile::tempdir().unwrap();
setup_git_repo_in(tmp.path());
let ctx = test_ctx(tmp.path());
// Start a merge (it will run in background)
tool_merge_agent_work(
&json!({"story_id": "99_nonexistent"}),
&ctx,
)
.unwrap();
// Immediately check — should be running (or already finished if very fast)
let result = tool_get_merge_status(&json!({"story_id": "99_nonexistent"}), &ctx).unwrap();
let parsed: Value = serde_json::from_str(&result).unwrap();
let status = parsed["status"].as_str().unwrap();
assert!(
status == "running" || status == "completed" || status == "failed",
"unexpected status: {status}"
);
}
// ── report_merge_failure tool tests ─────────────────────────────
#[test]
fn report_merge_failure_in_tools_list() {
let resp = handle_tools_list(Some(json!(1)));
let tools = resp.result.unwrap()["tools"].as_array().unwrap().clone();
let tool = tools.iter().find(|t| t["name"] == "report_merge_failure");
assert!(
tool.is_some(),
"report_merge_failure missing from tools list"
);
let t = tool.unwrap();
assert!(t["description"].is_string());
let required = t["inputSchema"]["required"].as_array().unwrap();
let req_names: Vec<&str> = required.iter().map(|v| v.as_str().unwrap()).collect();
assert!(req_names.contains(&"story_id"));
assert!(req_names.contains(&"reason"));
}
#[test]
fn tool_report_merge_failure_missing_story_id() {
let tmp = tempfile::tempdir().unwrap();
let ctx = test_ctx(tmp.path());
let result = tool_report_merge_failure(&json!({"reason": "conflicts"}), &ctx);
assert!(result.is_err());
assert!(result.unwrap_err().contains("story_id"));
}
#[test]
fn tool_report_merge_failure_missing_reason() {
let tmp = tempfile::tempdir().unwrap();
let ctx = test_ctx(tmp.path());
let result = tool_report_merge_failure(&json!({"story_id": "42_story_foo"}), &ctx);
assert!(result.is_err());
assert!(result.unwrap_err().contains("reason"));
}
#[test]
fn tool_report_merge_failure_returns_confirmation() {
let tmp = tempfile::tempdir().unwrap();
let ctx = test_ctx(tmp.path());
let result = tool_report_merge_failure(
&json!({
"story_id": "42_story_foo",
"reason": "Unresolvable merge conflicts in src/main.rs"
}),
&ctx,
);
assert!(result.is_ok());
let msg = result.unwrap();
assert!(msg.contains("42_story_foo"));
assert!(msg.contains("work/4_merge/"));
assert!(msg.contains("Unresolvable merge conflicts"));
}
// ── HTTP handler tests (TestClient) ───────────────────────────
fn test_mcp_app(ctx: std::sync::Arc<AppContext>) -> impl poem::Endpoint {
use poem::EndpointExt;
poem::Route::new()
.at("/mcp", poem::post(mcp_post_handler).get(mcp_get_handler))
.data(ctx)
}
async fn read_body_json(resp: poem::test::TestResponse) -> Value {
let body = resp.0.into_body().into_string().await.unwrap();
serde_json::from_str(&body).unwrap()
}
async fn post_json_mcp<E: poem::Endpoint>(
cli: &poem::test::TestClient<E>,
payload: &str,
) -> Value {
let resp = cli
.post("/mcp")
.header("content-type", "application/json")
.body(payload.to_string())
.send()
.await;
read_body_json(resp).await
}
#[tokio::test]
async fn mcp_get_handler_returns_405() {
let tmp = tempfile::tempdir().unwrap();
let ctx = std::sync::Arc::new(test_ctx(tmp.path()));
let cli = poem::test::TestClient::new(test_mcp_app(ctx));
let resp = cli.get("/mcp").send().await;
assert_eq!(resp.0.status(), poem::http::StatusCode::METHOD_NOT_ALLOWED);
}
#[tokio::test]
async fn mcp_post_invalid_content_type_returns_error() {
let tmp = tempfile::tempdir().unwrap();
let ctx = std::sync::Arc::new(test_ctx(tmp.path()));
let cli = poem::test::TestClient::new(test_mcp_app(ctx));
let resp = cli
.post("/mcp")
.header("content-type", "text/plain")
.body("{}")
.send()
.await;
let body = read_body_json(resp).await;
assert!(body.get("error").is_some(), "expected error field: {body}");
}
#[tokio::test]
async fn mcp_post_invalid_json_returns_parse_error() {
let tmp = tempfile::tempdir().unwrap();
let ctx = std::sync::Arc::new(test_ctx(tmp.path()));
let cli = poem::test::TestClient::new(test_mcp_app(ctx));
let resp = cli
.post("/mcp")
.header("content-type", "application/json")
.body("not-valid-json")
.send()
.await;
let body = read_body_json(resp).await;
assert!(body.get("error").is_some(), "expected error field: {body}");
}
#[tokio::test]
async fn mcp_post_wrong_jsonrpc_version_returns_error() {
let tmp = tempfile::tempdir().unwrap();
let ctx = std::sync::Arc::new(test_ctx(tmp.path()));
let cli = poem::test::TestClient::new(test_mcp_app(ctx));
let body = post_json_mcp(
&cli,
r#"{"jsonrpc":"1.0","id":1,"method":"initialize","params":{}}"#,
)
.await;
assert!(
body["error"]["message"].as_str().unwrap_or("").contains("version"),
"expected version error: {body}"
);
}
#[tokio::test]
async fn mcp_post_notification_with_null_id_returns_accepted() {
let tmp = tempfile::tempdir().unwrap();
let ctx = std::sync::Arc::new(test_ctx(tmp.path()));
let cli = poem::test::TestClient::new(test_mcp_app(ctx));
let resp = cli
.post("/mcp")
.header("content-type", "application/json")
.body(r#"{"jsonrpc":"2.0","method":"notifications/initialized","params":{}}"#)
.send()
.await;
assert_eq!(resp.0.status(), poem::http::StatusCode::ACCEPTED);
}
#[tokio::test]
async fn mcp_post_notification_with_explicit_null_id_returns_accepted() {
let tmp = tempfile::tempdir().unwrap();
let ctx = std::sync::Arc::new(test_ctx(tmp.path()));
let cli = poem::test::TestClient::new(test_mcp_app(ctx));
let resp = cli
.post("/mcp")
.header("content-type", "application/json")
.body(
r#"{"jsonrpc":"2.0","id":null,"method":"notifications/initialized","params":{}}"#,
)
.send()
.await;
assert_eq!(resp.0.status(), poem::http::StatusCode::ACCEPTED);
}
#[tokio::test]
async fn mcp_post_missing_id_non_notification_returns_error() {
let tmp = tempfile::tempdir().unwrap();
let ctx = std::sync::Arc::new(test_ctx(tmp.path()));
let cli = poem::test::TestClient::new(test_mcp_app(ctx));
let body = post_json_mcp(
&cli,
r#"{"jsonrpc":"2.0","method":"initialize","params":{}}"#,
)
.await;
assert!(body.get("error").is_some(), "expected error: {body}");
}
#[tokio::test]
async fn mcp_post_unknown_method_returns_error() {
let tmp = tempfile::tempdir().unwrap();
let ctx = std::sync::Arc::new(test_ctx(tmp.path()));
let cli = poem::test::TestClient::new(test_mcp_app(ctx));
let body = post_json_mcp(
&cli,
r#"{"jsonrpc":"2.0","id":1,"method":"bogus/method","params":{}}"#,
)
.await;
assert!(
body["error"]["message"].as_str().unwrap_or("").contains("Unknown method"),
"expected unknown method error: {body}"
);
}
#[tokio::test]
async fn mcp_post_initialize_returns_capabilities() {
let tmp = tempfile::tempdir().unwrap();
let ctx = std::sync::Arc::new(test_ctx(tmp.path()));
let cli = poem::test::TestClient::new(test_mcp_app(ctx));
let body = post_json_mcp(
&cli,
r#"{"jsonrpc":"2.0","id":1,"method":"initialize","params":{"protocolVersion":"2025-03-26","capabilities":{},"clientInfo":{"name":"test","version":"1.0"}}}"#,
)
.await;
assert_eq!(body["result"]["protocolVersion"], "2025-03-26");
assert_eq!(body["result"]["serverInfo"]["name"], "story-kit");
}
#[tokio::test]
async fn mcp_post_tools_list_returns_tools() {
let tmp = tempfile::tempdir().unwrap();
let ctx = std::sync::Arc::new(test_ctx(tmp.path()));
let cli = poem::test::TestClient::new(test_mcp_app(ctx));
let body = post_json_mcp(
&cli,
r#"{"jsonrpc":"2.0","id":1,"method":"tools/list","params":{}}"#,
)
.await;
assert!(body["result"]["tools"].is_array());
}
#[tokio::test]
async fn mcp_post_sse_returns_event_stream_content_type() {
let tmp = tempfile::tempdir().unwrap();
let ctx = std::sync::Arc::new(test_ctx(tmp.path()));
let cli = poem::test::TestClient::new(test_mcp_app(ctx));
let resp = cli
.post("/mcp")
.header("content-type", "application/json")
.header("accept", "text/event-stream")
.body(r#"{"jsonrpc":"2.0","id":1,"method":"tools/list","params":{}}"#)
.send()
.await;
assert_eq!(
resp.0.headers().get("content-type").unwrap(),
"text/event-stream"
);
}
#[tokio::test]
async fn mcp_post_sse_get_agent_output_missing_story_id() {
let tmp = tempfile::tempdir().unwrap();
let ctx = std::sync::Arc::new(test_ctx(tmp.path()));
let cli = poem::test::TestClient::new(test_mcp_app(ctx));
let resp = cli
.post("/mcp")
.header("content-type", "application/json")
.header("accept", "text/event-stream")
.body(r#"{"jsonrpc":"2.0","id":1,"method":"tools/call","params":{"name":"get_agent_output","arguments":{}}}"#)
.send()
.await;
assert_eq!(
resp.0.headers().get("content-type").unwrap(),
"text/event-stream",
"expected SSE content-type"
);
}
#[tokio::test]
async fn mcp_post_sse_get_agent_output_missing_agent_name() {
let tmp = tempfile::tempdir().unwrap();
let ctx = std::sync::Arc::new(test_ctx(tmp.path()));
let cli = poem::test::TestClient::new(test_mcp_app(ctx));
let resp = cli
.post("/mcp")
.header("content-type", "application/json")
.header("accept", "text/event-stream")
.body(r#"{"jsonrpc":"2.0","id":1,"method":"tools/call","params":{"name":"get_agent_output","arguments":{"story_id":"1_test"}}}"#)
.send()
.await;
assert_eq!(
resp.0.headers().get("content-type").unwrap(),
"text/event-stream",
"expected SSE content-type"
);
}
#[tokio::test]
async fn mcp_post_sse_get_agent_output_no_agent_returns_sse_error() {
let tmp = tempfile::tempdir().unwrap();
let ctx = std::sync::Arc::new(test_ctx(tmp.path()));
let cli = poem::test::TestClient::new(test_mcp_app(ctx));
let resp = cli
.post("/mcp")
.header("content-type", "application/json")
.header("accept", "text/event-stream")
.body(r#"{"jsonrpc":"2.0","id":1,"method":"tools/call","params":{"name":"get_agent_output","arguments":{"story_id":"99_nope","agent_name":"bot"}}}"#)
.send()
.await;
assert_eq!(
resp.0.headers().get("content-type").unwrap(),
"text/event-stream"
);
let body = resp.0.into_body().into_string().await.unwrap();
assert!(body.contains("data:"), "expected SSE data prefix: {body}");
}
// ── tool_get_server_logs tests ────────────────────────────────
#[test]
fn tool_get_server_logs_no_args_returns_string() {
let result = tool_get_server_logs(&json!({})).unwrap();
// Returns recent log lines (possibly empty in tests) — just verify no panic
let _ = result;
}
#[test]
fn tool_get_server_logs_with_filter_returns_matching_lines() {
let result =
tool_get_server_logs(&json!({"filter": "xyz_unlikely_match_999"})).unwrap();
assert_eq!(result, "", "filter with no matches should return empty string");
}
#[test]
fn tool_get_server_logs_with_line_limit() {
let result = tool_get_server_logs(&json!({"lines": 5})).unwrap();
assert!(result.lines().count() <= 5);
}
#[test]
fn tool_get_server_logs_max_cap_is_1000() {
// Lines > 1000 are capped — just verify it returns without error
let result = tool_get_server_logs(&json!({"lines": 9999})).unwrap();
let _ = result;
}
// ── tool_get_token_usage tests ────────────────────────────────
#[test]
fn tool_get_token_usage_empty_returns_zero_totals() {
let tmp = tempfile::tempdir().unwrap();
let ctx = test_ctx(tmp.path());
let result = tool_get_token_usage(&json!({}), &ctx).unwrap();
let parsed: Value = serde_json::from_str(&result).unwrap();
assert_eq!(parsed["records"].as_array().unwrap().len(), 0);
assert_eq!(parsed["totals"]["records"], 0);
assert_eq!(parsed["totals"]["total_cost_usd"], 0.0);
}
#[test]
fn tool_get_token_usage_returns_written_records() {
let tmp = tempfile::tempdir().unwrap();
let root = tmp.path();
let ctx = test_ctx(root);
let usage = crate::agents::TokenUsage {
input_tokens: 100,
output_tokens: 200,
cache_creation_input_tokens: 5000,
cache_read_input_tokens: 10000,
total_cost_usd: 1.57,
};
let record =
crate::agents::token_usage::build_record("42_story_foo", "coder-1", usage);
crate::agents::token_usage::append_record(root, &record).unwrap();
let result = tool_get_token_usage(&json!({}), &ctx).unwrap();
let parsed: Value = serde_json::from_str(&result).unwrap();
assert_eq!(parsed["records"].as_array().unwrap().len(), 1);
assert_eq!(parsed["records"][0]["story_id"], "42_story_foo");
assert_eq!(parsed["records"][0]["agent_name"], "coder-1");
assert_eq!(parsed["records"][0]["input_tokens"], 100);
assert_eq!(parsed["totals"]["records"], 1);
assert!((parsed["totals"]["total_cost_usd"].as_f64().unwrap() - 1.57).abs() < f64::EPSILON);
}
#[test]
fn tool_get_token_usage_filters_by_story_id() {
let tmp = tempfile::tempdir().unwrap();
let root = tmp.path();
let ctx = test_ctx(root);
let usage = crate::agents::TokenUsage {
input_tokens: 50,
output_tokens: 60,
cache_creation_input_tokens: 0,
cache_read_input_tokens: 0,
total_cost_usd: 0.5,
};
let r1 = crate::agents::token_usage::build_record("10_story_a", "coder-1", usage.clone());
let r2 = crate::agents::token_usage::build_record("20_story_b", "coder-2", usage);
crate::agents::token_usage::append_record(root, &r1).unwrap();
crate::agents::token_usage::append_record(root, &r2).unwrap();
let result =
tool_get_token_usage(&json!({"story_id": "10_story_a"}), &ctx).unwrap();
let parsed: Value = serde_json::from_str(&result).unwrap();
assert_eq!(parsed["records"].as_array().unwrap().len(), 1);
assert_eq!(parsed["records"][0]["story_id"], "10_story_a");
assert_eq!(parsed["totals"]["records"], 1);
}
// ── tool_list_worktrees tests ─────────────────────────────────
#[test]
fn tool_list_worktrees_empty_dir() {
let tmp = tempfile::tempdir().unwrap();
let ctx = test_ctx(tmp.path());
let result = tool_list_worktrees(&ctx).unwrap();
let parsed: Vec<Value> = serde_json::from_str(&result).unwrap();
assert!(parsed.is_empty());
}
// ── tool_accept_story tests ───────────────────────────────────
#[test]
fn tool_accept_story_missing_story_id() {
let tmp = tempfile::tempdir().unwrap();
let ctx = test_ctx(tmp.path());
let result = tool_accept_story(&json!({}), &ctx);
assert!(result.is_err());
assert!(result.unwrap_err().contains("story_id"));
}
#[test]
fn tool_accept_story_nonexistent_story_returns_error() {
let tmp = tempfile::tempdir().unwrap();
setup_git_repo_in(tmp.path());
let ctx = test_ctx(tmp.path());
// No story file in current/ — should fail
let result = tool_accept_story(&json!({"story_id": "99_nonexistent"}), &ctx);
assert!(result.is_err());
}
/// Bug 226: accept_story must refuse when the feature branch has unmerged code.
#[test]
fn tool_accept_story_refuses_when_feature_branch_has_unmerged_code() {
let tmp = tempfile::tempdir().unwrap();
setup_git_repo_in(tmp.path());
// Create a feature branch with code changes.
std::process::Command::new("git")
.args(["checkout", "-b", "feature/story-50_story_test"])
.current_dir(tmp.path())
.output()
.unwrap();
std::fs::write(tmp.path().join("feature.rs"), "fn main() {}").unwrap();
std::process::Command::new("git")
.args(["add", "."])
.current_dir(tmp.path())
.output()
.unwrap();
std::process::Command::new("git")
.args(["commit", "-m", "add feature"])
.current_dir(tmp.path())
.output()
.unwrap();
std::process::Command::new("git")
.args(["checkout", "master"])
.current_dir(tmp.path())
.output()
.unwrap();
// Create story file in current/ so move_story_to_archived would work.
let current_dir = tmp.path().join(".story_kit/work/2_current");
std::fs::create_dir_all(&current_dir).unwrap();
std::fs::write(
current_dir.join("50_story_test.md"),
"---\nname: Test\n---\n",
)
.unwrap();
let ctx = test_ctx(tmp.path());
let result =
tool_accept_story(&json!({"story_id": "50_story_test"}), &ctx);
assert!(result.is_err(), "should refuse when feature branch has unmerged code");
let err = result.unwrap_err();
assert!(
err.contains("unmerged"),
"error should mention unmerged changes: {err}"
);
}
/// Bug 226: accept_story succeeds when no feature branch exists (e.g. manual stories).
#[test]
fn tool_accept_story_succeeds_when_no_feature_branch() {
let tmp = tempfile::tempdir().unwrap();
setup_git_repo_in(tmp.path());
// Create story file in current/ (no feature branch).
let current_dir = tmp.path().join(".story_kit/work/2_current");
std::fs::create_dir_all(&current_dir).unwrap();
std::fs::write(
current_dir.join("51_story_no_branch.md"),
"---\nname: No Branch\n---\n",
)
.unwrap();
let ctx = test_ctx(tmp.path());
let result =
tool_accept_story(&json!({"story_id": "51_story_no_branch"}), &ctx);
assert!(result.is_ok(), "should succeed when no feature branch: {result:?}");
}
// ── tool_check_criterion tests ────────────────────────────────
#[test]
fn tool_check_criterion_missing_story_id() {
let tmp = tempfile::tempdir().unwrap();
let ctx = test_ctx(tmp.path());
let result = tool_check_criterion(&json!({"criterion_index": 0}), &ctx);
assert!(result.is_err());
assert!(result.unwrap_err().contains("story_id"));
}
#[test]
fn tool_check_criterion_missing_criterion_index() {
let tmp = tempfile::tempdir().unwrap();
let ctx = test_ctx(tmp.path());
let result = tool_check_criterion(&json!({"story_id": "1_test"}), &ctx);
assert!(result.is_err());
assert!(result.unwrap_err().contains("criterion_index"));
}
#[test]
fn tool_check_criterion_marks_unchecked_item() {
let tmp = tempfile::tempdir().unwrap();
setup_git_repo_in(tmp.path());
let current_dir = tmp.path().join(".story_kit").join("work").join("2_current");
fs::create_dir_all(&current_dir).unwrap();
fs::write(
current_dir.join("1_test.md"),
"---\nname: Test\n---\n## AC\n- [ ] First criterion\n- [x] Already done\n",
)
.unwrap();
std::process::Command::new("git")
.args(["add", "."])
.current_dir(tmp.path())
.output()
.unwrap();
std::process::Command::new("git")
.args(["commit", "-m", "add story"])
.current_dir(tmp.path())
.output()
.unwrap();
let ctx = test_ctx(tmp.path());
let result = tool_check_criterion(
&json!({"story_id": "1_test", "criterion_index": 0}),
&ctx,
);
assert!(result.is_ok(), "Expected ok: {result:?}");
assert!(result.unwrap().contains("Criterion 0 checked"));
}
// ── tool_get_agent_config tests ───────────────────────────────
#[test]
fn tool_get_agent_config_no_project_toml_returns_default_agent() {
let tmp = tempfile::tempdir().unwrap();
let ctx = test_ctx(tmp.path());
// No project.toml → default config with one fallback agent
let result = tool_get_agent_config(&ctx).unwrap();
let parsed: Vec<Value> = serde_json::from_str(&result).unwrap();
// Default config contains one agent entry with default values
assert_eq!(parsed.len(), 1, "default config should have one fallback agent");
assert!(parsed[0].get("name").is_some());
assert!(parsed[0].get("role").is_some());
}
// ── tool_get_agent_output_poll tests ─────────────────────────
#[tokio::test]
async fn tool_get_agent_output_poll_missing_story_id() {
let tmp = tempfile::tempdir().unwrap();
let ctx = test_ctx(tmp.path());
let result = tool_get_agent_output_poll(&json!({"agent_name": "bot"}), &ctx).await;
assert!(result.is_err());
assert!(result.unwrap_err().contains("story_id"));
}
#[tokio::test]
async fn tool_get_agent_output_poll_missing_agent_name() {
let tmp = tempfile::tempdir().unwrap();
let ctx = test_ctx(tmp.path());
let result =
tool_get_agent_output_poll(&json!({"story_id": "1_test"}), &ctx).await;
assert!(result.is_err());
assert!(result.unwrap_err().contains("agent_name"));
}
#[tokio::test]
async fn tool_get_agent_output_poll_no_agent_falls_back_to_empty_log() {
let tmp = tempfile::tempdir().unwrap();
let ctx = test_ctx(tmp.path());
// No agent registered, no log file → returns empty response from log fallback
let result = tool_get_agent_output_poll(
&json!({"story_id": "99_nope", "agent_name": "bot"}),
&ctx,
)
.await
.unwrap();
let parsed: Value = serde_json::from_str(&result).unwrap();
assert_eq!(parsed["done"], true);
assert_eq!(parsed["event_count"], 0);
assert!(
parsed["message"].as_str().unwrap_or("").contains("No agent"),
"expected 'No agent' message: {parsed}"
);
}
#[tokio::test]
async fn tool_get_agent_output_poll_with_running_agent_returns_empty_events() {
let tmp = tempfile::tempdir().unwrap();
let ctx = test_ctx(tmp.path());
// Inject a running agent — no events broadcast yet
ctx.agents
.inject_test_agent("10_story", "worker", crate::agents::AgentStatus::Running);
let result = tool_get_agent_output_poll(
&json!({"story_id": "10_story", "agent_name": "worker"}),
&ctx,
)
.await
.unwrap();
let parsed: Value = serde_json::from_str(&result).unwrap();
assert_eq!(parsed["done"], false);
assert_eq!(parsed["event_count"], 0);
assert!(parsed["events"].is_array());
}
// ── Missing-arg tests for async tools ────────────────────────
#[tokio::test]
async fn tool_stop_agent_missing_story_id() {
let tmp = tempfile::tempdir().unwrap();
let ctx = test_ctx(tmp.path());
let result = tool_stop_agent(&json!({"agent_name": "bot"}), &ctx).await;
assert!(result.is_err());
assert!(result.unwrap_err().contains("story_id"));
}
#[tokio::test]
async fn tool_stop_agent_missing_agent_name() {
let tmp = tempfile::tempdir().unwrap();
let ctx = test_ctx(tmp.path());
let result = tool_stop_agent(&json!({"story_id": "1_test"}), &ctx).await;
assert!(result.is_err());
assert!(result.unwrap_err().contains("agent_name"));
}
#[tokio::test]
async fn tool_start_agent_missing_story_id() {
let tmp = tempfile::tempdir().unwrap();
let ctx = test_ctx(tmp.path());
let result = tool_start_agent(&json!({}), &ctx).await;
assert!(result.is_err());
assert!(result.unwrap_err().contains("story_id"));
}
#[tokio::test]
async fn tool_start_agent_no_agent_name_no_coder_returns_clear_error() {
// Config has only a supervisor — start_agent without agent_name should
// refuse rather than silently assigning supervisor.
let tmp = tempfile::tempdir().unwrap();
let sk = tmp.path().join(".story_kit");
std::fs::create_dir_all(&sk).unwrap();
std::fs::write(
sk.join("project.toml"),
r#"
[[agent]]
name = "supervisor"
stage = "other"
"#,
)
.unwrap();
let ctx = test_ctx(tmp.path());
let result = tool_start_agent(&json!({"story_id": "42_my_story"}), &ctx).await;
assert!(result.is_err());
let err = result.unwrap_err();
assert!(
err.contains("coder"),
"error should mention 'coder', got: {err}"
);
}
#[tokio::test]
async fn tool_start_agent_no_agent_name_picks_coder_not_supervisor() {
// Config has supervisor first, then coder-1. Without agent_name the
// coder should be selected, not supervisor. The call will fail due to
// missing git repo / worktree, but the error must NOT be about
// "No coder agent configured".
let tmp = tempfile::tempdir().unwrap();
let sk = tmp.path().join(".story_kit");
std::fs::create_dir_all(&sk).unwrap();
std::fs::write(
sk.join("project.toml"),
r#"
[[agent]]
name = "supervisor"
stage = "other"
[[agent]]
name = "coder-1"
stage = "coder"
"#,
)
.unwrap();
let ctx = test_ctx(tmp.path());
let result = tool_start_agent(&json!({"story_id": "42_my_story"}), &ctx).await;
// May succeed or fail for infrastructure reasons (no git repo), but
// must NOT fail with "No coder agent configured".
if let Err(err) = result {
assert!(
!err.contains("No coder agent configured"),
"should not fail on agent selection, got: {err}"
);
// Should also not complain about supervisor being absent.
assert!(
!err.contains("supervisor"),
"should not select supervisor, got: {err}"
);
}
}
#[tokio::test]
async fn tool_create_worktree_missing_story_id() {
let tmp = tempfile::tempdir().unwrap();
let ctx = test_ctx(tmp.path());
let result = tool_create_worktree(&json!({}), &ctx).await;
assert!(result.is_err());
assert!(result.unwrap_err().contains("story_id"));
}
#[tokio::test]
async fn tool_remove_worktree_missing_story_id() {
let tmp = tempfile::tempdir().unwrap();
let ctx = test_ctx(tmp.path());
let result = tool_remove_worktree(&json!({}), &ctx).await;
assert!(result.is_err());
assert!(result.unwrap_err().contains("story_id"));
}
#[tokio::test]
async fn tool_request_qa_missing_story_id() {
let tmp = tempfile::tempdir().unwrap();
let ctx = test_ctx(tmp.path());
let result = tool_request_qa(&json!({}), &ctx).await;
assert!(result.is_err());
assert!(result.unwrap_err().contains("story_id"));
}
// ── parse_test_cases additional coverage ─────────────────────
#[test]
fn parse_test_cases_null_value_returns_empty() {
let null_val = json!(null);
let result = parse_test_cases(Some(&null_val)).unwrap();
assert!(result.is_empty());
}
#[test]
fn parse_test_cases_non_array_returns_error() {
let obj = json!({"invalid": "input"});
let result = parse_test_cases(Some(&obj));
assert!(result.is_err());
assert!(result.unwrap_err().contains("Expected array"));
}
#[test]
fn parse_test_cases_missing_name_returns_error() {
let input = json!([{"status": "pass"}]);
let result = parse_test_cases(Some(&input));
assert!(result.is_err());
assert!(result.unwrap_err().contains("name"));
}
#[test]
fn parse_test_cases_missing_status_returns_error() {
let input = json!([{"name": "test1"}]);
let result = parse_test_cases(Some(&input));
assert!(result.is_err());
assert!(result.unwrap_err().contains("status"));
}
// ── json_rpc_error_response direct test ──────────────────────
#[test]
fn json_rpc_error_response_builds_json_response() {
let resp = json_rpc_error_response(Some(json!(42)), -32600, "test error".into());
assert_eq!(resp.status(), poem::http::StatusCode::OK);
assert_eq!(
resp.headers().get("content-type").unwrap(),
"application/json"
);
}
// ── request_qa in tools list ──────────────────────────────────
#[test]
fn request_qa_in_tools_list() {
let resp = handle_tools_list(Some(json!(1)));
let tools = resp.result.unwrap()["tools"].as_array().unwrap().clone();
let tool = tools.iter().find(|t| t["name"] == "request_qa");
assert!(tool.is_some(), "request_qa missing from tools list");
let t = tool.unwrap();
let required = t["inputSchema"]["required"].as_array().unwrap();
let req_names: Vec<&str> = required.iter().map(|v| v.as_str().unwrap()).collect();
assert!(req_names.contains(&"story_id"));
// agent_name is optional
assert!(!req_names.contains(&"agent_name"));
}
// ── approve_qa in tools list ──────────────────────────────────
#[test]
fn approve_qa_in_tools_list() {
let resp = handle_tools_list(Some(json!(1)));
let tools = resp.result.unwrap()["tools"].as_array().unwrap().clone();
let tool = tools.iter().find(|t| t["name"] == "approve_qa");
assert!(tool.is_some(), "approve_qa missing from tools list");
let t = tool.unwrap();
let required = t["inputSchema"]["required"].as_array().unwrap();
let req_names: Vec<&str> = required.iter().map(|v| v.as_str().unwrap()).collect();
assert!(req_names.contains(&"story_id"));
}
// ── reject_qa in tools list ──────────────────────────────────
#[test]
fn reject_qa_in_tools_list() {
let resp = handle_tools_list(Some(json!(1)));
let tools = resp.result.unwrap()["tools"].as_array().unwrap().clone();
let tool = tools.iter().find(|t| t["name"] == "reject_qa");
assert!(tool.is_some(), "reject_qa missing from tools list");
let t = tool.unwrap();
let required = t["inputSchema"]["required"].as_array().unwrap();
let req_names: Vec<&str> = required.iter().map(|v| v.as_str().unwrap()).collect();
assert!(req_names.contains(&"story_id"));
assert!(req_names.contains(&"notes"));
}
// ── launch_qa_app in tools list ──────────────────────────────
#[test]
fn launch_qa_app_in_tools_list() {
let resp = handle_tools_list(Some(json!(1)));
let tools = resp.result.unwrap()["tools"].as_array().unwrap().clone();
let tool = tools.iter().find(|t| t["name"] == "launch_qa_app");
assert!(tool.is_some(), "launch_qa_app missing from tools list");
let t = tool.unwrap();
let required = t["inputSchema"]["required"].as_array().unwrap();
let req_names: Vec<&str> = required.iter().map(|v| v.as_str().unwrap()).collect();
assert!(req_names.contains(&"story_id"));
}
// ── approve_qa missing story_id ──────────────────────────────
#[tokio::test]
async fn tool_approve_qa_missing_story_id() {
let tmp = tempfile::tempdir().unwrap();
let ctx = test_ctx(tmp.path());
let result = tool_approve_qa(&json!({}), &ctx).await;
assert!(result.is_err());
assert!(result.unwrap_err().contains("story_id"));
}
// ── reject_qa missing arguments ──────────────────────────────
#[tokio::test]
async fn tool_reject_qa_missing_story_id() {
let tmp = tempfile::tempdir().unwrap();
let ctx = test_ctx(tmp.path());
let result = tool_reject_qa(&json!({"notes": "broken"}), &ctx).await;
assert!(result.is_err());
assert!(result.unwrap_err().contains("story_id"));
}
#[tokio::test]
async fn tool_reject_qa_missing_notes() {
let tmp = tempfile::tempdir().unwrap();
let ctx = test_ctx(tmp.path());
let result = tool_reject_qa(&json!({"story_id": "1_story_test"}), &ctx).await;
assert!(result.is_err());
assert!(result.unwrap_err().contains("notes"));
}
// ── tool_validate_stories with file content ───────────────────
#[test]
fn tool_validate_stories_with_valid_story() {
let tmp = tempfile::tempdir().unwrap();
let current_dir = tmp.path().join(".story_kit").join("work").join("2_current");
fs::create_dir_all(&current_dir).unwrap();
fs::write(
current_dir.join("1_test.md"),
"---\nname: \"Valid Story\"\n---\n## AC\n- [ ] First\n",
)
.unwrap();
let ctx = test_ctx(tmp.path());
let result = tool_validate_stories(&ctx).unwrap();
let parsed: Vec<Value> = serde_json::from_str(&result).unwrap();
assert_eq!(parsed.len(), 1);
assert_eq!(parsed[0]["valid"], true);
}
#[test]
fn tool_validate_stories_with_invalid_front_matter() {
let tmp = tempfile::tempdir().unwrap();
let current_dir = tmp.path().join(".story_kit").join("work").join("2_current");
fs::create_dir_all(&current_dir).unwrap();
fs::write(
current_dir.join("1_test.md"),
"## No front matter at all\n",
)
.unwrap();
let ctx = test_ctx(tmp.path());
let result = tool_validate_stories(&ctx).unwrap();
let parsed: Vec<Value> = serde_json::from_str(&result).unwrap();
assert!(!parsed.is_empty());
assert_eq!(parsed[0]["valid"], false);
}
// ── tool_record_tests and tool_ensure_acceptance edge cases ──
#[test]
fn tool_record_tests_missing_story_id() {
let tmp = tempfile::tempdir().unwrap();
let ctx = test_ctx(tmp.path());
let result = tool_record_tests(
&json!({"unit": [], "integration": []}),
&ctx,
);
assert!(result.is_err());
assert!(result.unwrap_err().contains("story_id"));
}
#[test]
fn tool_record_tests_invalid_unit_type_returns_error() {
let tmp = tempfile::tempdir().unwrap();
let ctx = test_ctx(tmp.path());
let result = tool_record_tests(
&json!({
"story_id": "1_test",
"unit": "not_an_array",
"integration": []
}),
&ctx,
);
assert!(result.is_err());
}
#[test]
fn tool_ensure_acceptance_missing_story_id() {
let tmp = tempfile::tempdir().unwrap();
let ctx = test_ctx(tmp.path());
let result = tool_ensure_acceptance(&json!({}), &ctx);
assert!(result.is_err());
assert!(result.unwrap_err().contains("story_id"));
}
#[tokio::test]
async fn tool_prompt_permission_approved_returns_updated_input() {
let tmp = tempfile::tempdir().unwrap();
let ctx = test_ctx(tmp.path());
// Spawn a task that immediately sends approval through the channel.
let perm_rx = ctx.perm_rx.clone();
tokio::spawn(async move {
let mut rx = perm_rx.lock().await;
if let Some(forward) = rx.recv().await {
let _ = forward.response_tx.send(crate::http::context::PermissionDecision::Approve);
}
});
let result = tool_prompt_permission(
&json!({"tool_name": "Bash", "input": {"command": "echo hello"}}),
&ctx,
)
.await
.expect("should succeed on approval");
let parsed: Value = serde_json::from_str(&result).expect("result should be valid JSON");
assert_eq!(
parsed["behavior"], "allow",
"approved must return behavior:allow"
);
assert_eq!(
parsed["updatedInput"]["command"], "echo hello",
"approved must return updatedInput with original tool input for Claude Code SDK compatibility"
);
}
#[tokio::test]
async fn tool_prompt_permission_denied_returns_deny_json() {
let tmp = tempfile::tempdir().unwrap();
let ctx = test_ctx(tmp.path());
// Spawn a task that immediately sends denial through the channel.
let perm_rx = ctx.perm_rx.clone();
tokio::spawn(async move {
let mut rx = perm_rx.lock().await;
if let Some(forward) = rx.recv().await {
let _ = forward.response_tx.send(crate::http::context::PermissionDecision::Deny);
}
});
let result = tool_prompt_permission(
&json!({"tool_name": "Write", "input": {}}),
&ctx,
)
.await
.expect("denial must return Ok, not Err");
let parsed: Value = serde_json::from_str(&result).expect("result should be valid JSON");
assert_eq!(parsed["behavior"], "deny", "denied must return behavior:deny");
assert!(parsed["message"].is_string(), "deny must include a message");
}
// ── record_tests / ensure_acceptance persistence tests ───────
#[test]
fn record_tests_persists_to_story_file() {
let tmp = tempfile::tempdir().unwrap();
let current = tmp.path().join(".story_kit/work/2_current");
fs::create_dir_all(&current).unwrap();
fs::write(current.join("1_story_persist.md"), "---\nname: Persist\n---\n# Story\n").unwrap();
let ctx = test_ctx(tmp.path());
tool_record_tests(
&json!({
"story_id": "1_story_persist",
"unit": [{"name": "u1", "status": "pass"}],
"integration": []
}),
&ctx,
)
.unwrap();
let contents = fs::read_to_string(current.join("1_story_persist.md")).unwrap();
assert!(contents.contains("## Test Results"), "file should have Test Results section");
assert!(contents.contains("story-kit-test-results:"), "file should have JSON marker");
assert!(contents.contains("u1"), "file should contain test name");
}
#[test]
fn ensure_acceptance_reads_from_file_when_not_in_memory() {
let tmp = tempfile::tempdir().unwrap();
let current = tmp.path().join(".story_kit/work/2_current");
fs::create_dir_all(&current).unwrap();
// Write a story file with a pre-populated Test Results section (simulating a restart)
let story_content = "---\nname: Persist\n---\n# Story\n\n## Test Results\n\n<!-- story-kit-test-results: {\"unit\":[{\"name\":\"u1\",\"status\":\"pass\",\"details\":null}],\"integration\":[{\"name\":\"i1\",\"status\":\"pass\",\"details\":null}]} -->\n";
fs::write(current.join("2_story_file_only.md"), story_content).unwrap();
// Use a fresh context (empty in-memory state, simulating a restart)
let ctx = test_ctx(tmp.path());
// ensure_acceptance should read from file and succeed
let result = tool_ensure_acceptance(&json!({"story_id": "2_story_file_only"}), &ctx);
assert!(result.is_ok(), "should accept based on file data, got: {:?}", result);
assert!(result.unwrap().contains("All gates pass"));
}
#[test]
fn ensure_acceptance_file_with_failures_still_blocks() {
let tmp = tempfile::tempdir().unwrap();
let current = tmp.path().join(".story_kit/work/2_current");
fs::create_dir_all(&current).unwrap();
let story_content = "---\nname: Fail\n---\n# Story\n\n## Test Results\n\n<!-- story-kit-test-results: {\"unit\":[{\"name\":\"u1\",\"status\":\"fail\",\"details\":\"error\"}],\"integration\":[]} -->\n";
fs::write(current.join("3_story_fail.md"), story_content).unwrap();
let ctx = test_ctx(tmp.path());
let result = tool_ensure_acceptance(&json!({"story_id": "3_story_fail"}), &ctx);
assert!(result.is_err());
assert!(result.unwrap_err().contains("blocked"));
}
#[test]
fn read_coverage_percent_from_json_parses_llvm_cov_format() {
let tmp = tempfile::tempdir().unwrap();
let cov_dir = tmp.path().join(".story_kit/coverage");
fs::create_dir_all(&cov_dir).unwrap();
let json_content = r#"{"data":[{"totals":{"lines":{"count":100,"covered":78,"percent":78.0}}}]}"#;
fs::write(cov_dir.join("server.json"), json_content).unwrap();
let pct = read_coverage_percent_from_json(tmp.path());
assert_eq!(pct, Some(78.0));
}
#[test]
fn read_coverage_percent_from_json_returns_none_when_absent() {
let tmp = tempfile::tempdir().unwrap();
let pct = read_coverage_percent_from_json(tmp.path());
assert!(pct.is_none());
}
// ── Permission rule generation tests ─────────────────────────
#[test]
fn generate_rule_for_edit_tool() {
let rule = generate_permission_rule("Edit", &json!({}));
assert_eq!(rule, "Edit");
}
#[test]
fn generate_rule_for_write_tool() {
let rule = generate_permission_rule("Write", &json!({}));
assert_eq!(rule, "Write");
}
#[test]
fn generate_rule_for_bash_git() {
let rule =
generate_permission_rule("Bash", &json!({"command": "git status"}));
assert_eq!(rule, "Bash(git *)");
}
#[test]
fn generate_rule_for_bash_cargo() {
let rule =
generate_permission_rule("Bash", &json!({"command": "cargo test --all"}));
assert_eq!(rule, "Bash(cargo *)");
}
#[test]
fn generate_rule_for_bash_empty_command() {
let rule = generate_permission_rule("Bash", &json!({}));
assert_eq!(rule, "Bash(unknown *)");
}
#[test]
fn generate_rule_for_mcp_tool() {
let rule = generate_permission_rule(
"mcp__story-kit__create_story",
&json!({"name": "foo"}),
);
assert_eq!(rule, "mcp__story-kit__create_story");
}
// ── Settings.json writing tests ──────────────────────────────
#[test]
fn add_rule_creates_settings_file_when_missing() {
let tmp = tempfile::tempdir().unwrap();
add_permission_rule(tmp.path(), "Edit").unwrap();
let content = fs::read_to_string(tmp.path().join(".claude/settings.json")).unwrap();
let settings: Value = serde_json::from_str(&content).unwrap();
let allow = settings["permissions"]["allow"].as_array().unwrap();
assert!(allow.contains(&json!("Edit")));
}
#[test]
fn add_rule_does_not_duplicate_existing() {
let tmp = tempfile::tempdir().unwrap();
add_permission_rule(tmp.path(), "Edit").unwrap();
add_permission_rule(tmp.path(), "Edit").unwrap();
let content = fs::read_to_string(tmp.path().join(".claude/settings.json")).unwrap();
let settings: Value = serde_json::from_str(&content).unwrap();
let allow = settings["permissions"]["allow"].as_array().unwrap();
let count = allow.iter().filter(|v| v == &&json!("Edit")).count();
assert_eq!(count, 1);
}
#[test]
fn add_rule_skips_when_wildcard_already_covers() {
let tmp = tempfile::tempdir().unwrap();
let claude_dir = tmp.path().join(".claude");
fs::create_dir_all(&claude_dir).unwrap();
fs::write(
claude_dir.join("settings.json"),
r#"{"permissions":{"allow":["mcp__story-kit__*"]}}"#,
)
.unwrap();
add_permission_rule(tmp.path(), "mcp__story-kit__create_story").unwrap();
let content = fs::read_to_string(claude_dir.join("settings.json")).unwrap();
let settings: Value = serde_json::from_str(&content).unwrap();
let allow = settings["permissions"]["allow"].as_array().unwrap();
assert_eq!(allow.len(), 1);
assert_eq!(allow[0], "mcp__story-kit__*");
}
#[test]
fn add_rule_appends_to_existing_rules() {
let tmp = tempfile::tempdir().unwrap();
let claude_dir = tmp.path().join(".claude");
fs::create_dir_all(&claude_dir).unwrap();
fs::write(
claude_dir.join("settings.json"),
r#"{"permissions":{"allow":["Edit"]}}"#,
)
.unwrap();
add_permission_rule(tmp.path(), "Write").unwrap();
let content = fs::read_to_string(claude_dir.join("settings.json")).unwrap();
let settings: Value = serde_json::from_str(&content).unwrap();
let allow = settings["permissions"]["allow"].as_array().unwrap();
assert_eq!(allow.len(), 2);
assert!(allow.contains(&json!("Edit")));
assert!(allow.contains(&json!("Write")));
}
#[test]
fn add_rule_preserves_other_settings_fields() {
let tmp = tempfile::tempdir().unwrap();
let claude_dir = tmp.path().join(".claude");
fs::create_dir_all(&claude_dir).unwrap();
fs::write(
claude_dir.join("settings.json"),
r#"{"permissions":{"allow":["Edit"]},"enabledMcpjsonServers":["story-kit"]}"#,
)
.unwrap();
add_permission_rule(tmp.path(), "Write").unwrap();
let content = fs::read_to_string(claude_dir.join("settings.json")).unwrap();
let settings: Value = serde_json::from_str(&content).unwrap();
let servers = settings["enabledMcpjsonServers"].as_array().unwrap();
assert_eq!(servers.len(), 1);
assert_eq!(servers[0], "story-kit");
}
// ── rebuild_and_restart ──────────────────────────────────────────
#[test]
fn rebuild_and_restart_in_tools_list() {
let resp = handle_tools_list(Some(json!(1)));
let tools = resp.result.unwrap()["tools"].as_array().unwrap().clone();
let tool = tools.iter().find(|t| t["name"] == "rebuild_and_restart");
assert!(
tool.is_some(),
"rebuild_and_restart missing from tools list"
);
let t = tool.unwrap();
assert!(t["description"].as_str().unwrap().contains("Rebuild"));
assert!(t["inputSchema"].is_object());
}
#[tokio::test]
async fn rebuild_and_restart_kills_agents_before_build() {
// Verify that calling rebuild_and_restart on an empty pool doesn't
// panic and proceeds to the build step. We can't test exec() in a
// unit test, but we can verify the build attempt happens.
let tmp = tempfile::tempdir().unwrap();
let ctx = test_ctx(tmp.path());
// The build will succeed (we're running in the real workspace) and
// then exec() will be called — which would replace our test process.
// So we only test that the function *runs* without panicking up to
// the agent-kill step. We do this by checking the pool is empty.
assert_eq!(ctx.agents.list_agents().unwrap().len(), 0);
ctx.agents.kill_all_children(); // should not panic on empty pool
}
#[test]
fn rebuild_uses_matching_build_profile() {
// The build must use the same profile (debug/release) as the running
// binary, otherwise cargo build outputs to a different target dir and
// current_exe() still points at the old binary.
let build_args: Vec<&str> = if cfg!(debug_assertions) {
vec!["build", "-p", "story-kit"]
} else {
vec!["build", "--release", "-p", "story-kit"]
};
// Tests always run in debug mode, so --release must NOT be present.
assert!(
!build_args.contains(&"--release"),
"In debug builds, rebuild must not pass --release (would put \
the binary in target/release/ while current_exe() points to \
target/debug/)"
);
}
}