Files
storkit/server/src/http/mcp/mod.rs
Dave 9581e5d51a rename .story_kit directory to .storkit and update all references
Renames the config directory and updates 514 references across 42 Rust
source files, plus CLAUDE.md, .gitignore, Makefile, script/release,
and .mcp.json files. All 1205 tests pass.

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-20 11:34:53 +00:00

1526 lines
66 KiB
Rust

use crate::slog_warn;
use crate::http::context::AppContext;
use poem::handler;
use poem::http::StatusCode;
use poem::web::Data;
use poem::{Body, Request, Response};
use serde::{Deserialize, Serialize};
use serde_json::{json, Value};
use std::sync::Arc;
pub mod agent_tools;
pub mod diagnostics;
pub mod merge_tools;
pub mod qa_tools;
pub mod shell_tools;
pub mod story_tools;
/// Returns true when the Accept header includes text/event-stream.
fn wants_sse(req: &Request) -> bool {
req.header("accept")
.unwrap_or("")
.contains("text/event-stream")
}
// ── JSON-RPC structs ──────────────────────────────────────────────
#[derive(Deserialize)]
struct JsonRpcRequest {
jsonrpc: String,
id: Option<Value>,
method: String,
#[serde(default)]
params: Value,
}
#[derive(Serialize)]
pub(super) struct JsonRpcResponse {
jsonrpc: &'static str,
#[serde(skip_serializing_if = "Option::is_none")]
id: Option<Value>,
#[serde(skip_serializing_if = "Option::is_none")]
result: Option<Value>,
#[serde(skip_serializing_if = "Option::is_none")]
error: Option<JsonRpcError>,
}
#[derive(Serialize)]
struct JsonRpcError {
code: i64,
message: String,
#[serde(skip_serializing_if = "Option::is_none")]
data: Option<Value>,
}
impl JsonRpcResponse {
pub(super) fn success(id: Option<Value>, result: Value) -> Self {
Self {
jsonrpc: "2.0",
id,
result: Some(result),
error: None,
}
}
pub(super) fn error(id: Option<Value>, code: i64, message: String) -> Self {
Self {
jsonrpc: "2.0",
id,
result: None,
error: Some(JsonRpcError {
code,
message,
data: None,
}),
}
}
}
// ── Poem handlers ─────────────────────────────────────────────────
#[handler]
pub async fn mcp_get_handler() -> Response {
Response::builder()
.status(StatusCode::METHOD_NOT_ALLOWED)
.body(Body::empty())
}
#[handler]
pub async fn mcp_post_handler(req: &Request, body: Body, ctx: Data<&Arc<AppContext>>) -> Response {
// Validate Content-Type
let content_type = req.header("content-type").unwrap_or("");
if !content_type.is_empty() && !content_type.contains("application/json") {
return json_rpc_error_response(
None,
-32700,
"Unsupported Content-Type; expected application/json".into(),
);
}
let bytes = match body.into_bytes().await {
Ok(b) => b,
Err(_) => return json_rpc_error_response(None, -32700, "Parse error".into()),
};
let rpc: JsonRpcRequest = match serde_json::from_slice(&bytes) {
Ok(r) => r,
Err(_) => return json_rpc_error_response(None, -32700, "Parse error".into()),
};
if rpc.jsonrpc != "2.0" {
return json_rpc_error_response(rpc.id, -32600, "Invalid JSON-RPC version".into());
}
// Notifications (no id) — accept silently
if rpc.id.is_none() || rpc.id.as_ref() == Some(&Value::Null) {
if rpc.method.starts_with("notifications/") {
return Response::builder()
.status(StatusCode::ACCEPTED)
.body(Body::empty());
}
return json_rpc_error_response(None, -32600, "Missing id".into());
}
let sse = wants_sse(req);
// Streaming agent output over SSE
if sse && rpc.method == "tools/call" {
let tool_name = rpc
.params
.get("name")
.and_then(|v| v.as_str())
.unwrap_or("");
if tool_name == "get_agent_output" {
return handle_agent_output_sse(rpc.id, &rpc.params, &ctx);
}
if tool_name == "run_command" {
return shell_tools::handle_run_command_sse(rpc.id, &rpc.params, &ctx);
}
}
let resp = match rpc.method.as_str() {
"initialize" => handle_initialize(rpc.id, &rpc.params),
"tools/list" => handle_tools_list(rpc.id),
"tools/call" => handle_tools_call(rpc.id, &rpc.params, &ctx).await,
_ => JsonRpcResponse::error(rpc.id, -32601, format!("Unknown method: {}", rpc.method)),
};
if sse {
to_sse_response(resp)
} else {
to_json_response(resp)
}
}
fn json_rpc_error_response(id: Option<Value>, code: i64, message: String) -> Response {
to_json_response(JsonRpcResponse::error(id, code, message))
}
fn to_json_response(resp: JsonRpcResponse) -> Response {
let body = serde_json::to_vec(&resp).unwrap_or_default();
Response::builder()
.status(StatusCode::OK)
.header("Content-Type", "application/json")
.body(Body::from(body))
}
pub(super) fn to_sse_response(resp: JsonRpcResponse) -> Response {
let json = serde_json::to_string(&resp).unwrap_or_default();
let sse_body = format!("data: {json}\n\n");
Response::builder()
.status(StatusCode::OK)
.header("Content-Type", "text/event-stream")
.header("Cache-Control", "no-cache")
.body(Body::from_string(sse_body))
}
/// Stream agent events as SSE — each event is a separate JSON-RPC notification,
/// followed by a final JSON-RPC response with the matching request id.
fn handle_agent_output_sse(
id: Option<Value>,
params: &Value,
ctx: &AppContext,
) -> Response {
let args = params.get("arguments").cloned().unwrap_or(json!({}));
let story_id = match args.get("story_id").and_then(|v| v.as_str()) {
Some(s) => s.to_string(),
None => return to_sse_response(JsonRpcResponse::error(
id,
-32602,
"Missing required argument: story_id".into(),
)),
};
let agent_name = match args.get("agent_name").and_then(|v| v.as_str()) {
Some(s) => s.to_string(),
None => return to_sse_response(JsonRpcResponse::error(
id,
-32602,
"Missing required argument: agent_name".into(),
)),
};
let timeout_ms = args
.get("timeout_ms")
.and_then(|v| v.as_u64())
.unwrap_or(10000)
.min(30000);
let mut rx = match ctx.agents.subscribe(&story_id, &agent_name) {
Ok(rx) => rx,
Err(e) => return to_sse_response(JsonRpcResponse::success(
id,
json!({ "content": [{"type": "text", "text": e}], "isError": true }),
)),
};
let final_id = id;
let stream = async_stream::stream! {
let deadline = tokio::time::Instant::now()
+ std::time::Duration::from_millis(timeout_ms);
let mut done = false;
loop {
let remaining = deadline.saturating_duration_since(tokio::time::Instant::now());
if remaining.is_zero() {
break;
}
match tokio::time::timeout(remaining, rx.recv()).await {
Ok(Ok(event)) => {
let is_terminal = matches!(
&event,
crate::agents::AgentEvent::Done { .. }
| crate::agents::AgentEvent::Error { .. }
);
// Send each event as a JSON-RPC notification (no id)
if let Ok(event_json) = serde_json::to_value(&event) {
let notification = json!({
"jsonrpc": "2.0",
"method": "notifications/tools/progress",
"params": { "event": event_json }
});
if let Ok(s) = serde_json::to_string(&notification) {
yield Ok::<_, std::io::Error>(format!("data: {s}\n\n"));
}
}
if is_terminal {
done = true;
break;
}
}
Ok(Err(tokio::sync::broadcast::error::RecvError::Lagged(n))) => {
let notification = json!({
"jsonrpc": "2.0",
"method": "notifications/tools/progress",
"params": { "event": {"type": "warning", "message": format!("Skipped {n} events")} }
});
if let Ok(s) = serde_json::to_string(&notification) {
yield Ok::<_, std::io::Error>(format!("data: {s}\n\n"));
}
}
Ok(Err(tokio::sync::broadcast::error::RecvError::Closed)) => {
done = true;
break;
}
Err(_) => break, // timeout
}
}
// Final response with the request id
let final_resp = JsonRpcResponse::success(
final_id,
json!({
"content": [{
"type": "text",
"text": if done { "Agent stream ended." } else { "Stream timed out; call again to continue." }
}]
}),
);
if let Ok(s) = serde_json::to_string(&final_resp) {
yield Ok::<_, std::io::Error>(format!("data: {s}\n\n"));
}
};
Response::builder()
.status(StatusCode::OK)
.header("Content-Type", "text/event-stream")
.header("Cache-Control", "no-cache")
.body(Body::from_bytes_stream(
futures::StreamExt::map(stream, |r| r.map(bytes::Bytes::from)),
))
}
// ── MCP protocol handlers ─────────────────────────────────────────
fn handle_initialize(id: Option<Value>, params: &Value) -> JsonRpcResponse {
let _protocol_version = params
.get("protocolVersion")
.and_then(|v| v.as_str())
.unwrap_or("2025-03-26");
JsonRpcResponse::success(
id,
json!({
"protocolVersion": "2025-03-26",
"capabilities": {
"tools": {}
},
"serverInfo": {
"name": "storkit",
"version": "1.0.0"
}
}),
)
}
fn handle_tools_list(id: Option<Value>) -> JsonRpcResponse {
JsonRpcResponse::success(
id,
json!({
"tools": [
{
"name": "create_story",
"description": "Create a new story file with front matter in upcoming/. Returns the story_id.",
"inputSchema": {
"type": "object",
"properties": {
"name": {
"type": "string",
"description": "Human-readable story name"
},
"user_story": {
"type": "string",
"description": "Optional user story text (As a..., I want..., so that...)"
},
"acceptance_criteria": {
"type": "array",
"items": { "type": "string" },
"description": "Optional list of acceptance criteria"
},
"commit": {
"type": "boolean",
"description": "If true, git-add and git-commit the new story file to the current branch"
}
},
"required": ["name"]
}
},
{
"name": "validate_stories",
"description": "Validate front matter on all current and upcoming story files.",
"inputSchema": {
"type": "object",
"properties": {}
}
},
{
"name": "list_upcoming",
"description": "List all upcoming stories with their names and any parsing errors.",
"inputSchema": {
"type": "object",
"properties": {}
}
},
{
"name": "get_story_todos",
"description": "Get unchecked acceptance criteria (todos) for a story file in current/.",
"inputSchema": {
"type": "object",
"properties": {
"story_id": {
"type": "string",
"description": "Story identifier (filename stem, e.g. '28_my_story')"
}
},
"required": ["story_id"]
}
},
{
"name": "record_tests",
"description": "Record test results for a story. Only one failing test at a time is allowed.",
"inputSchema": {
"type": "object",
"properties": {
"story_id": {
"type": "string",
"description": "Story identifier"
},
"unit": {
"type": "array",
"items": {
"type": "object",
"properties": {
"name": { "type": "string" },
"status": { "type": "string", "enum": ["pass", "fail"] },
"details": { "type": "string" }
},
"required": ["name", "status"]
},
"description": "Unit test results"
},
"integration": {
"type": "array",
"items": {
"type": "object",
"properties": {
"name": { "type": "string" },
"status": { "type": "string", "enum": ["pass", "fail"] },
"details": { "type": "string" }
},
"required": ["name", "status"]
},
"description": "Integration test results"
}
},
"required": ["story_id", "unit", "integration"]
}
},
{
"name": "ensure_acceptance",
"description": "Check whether a story can be accepted. Returns acceptance status with reasons if blocked.",
"inputSchema": {
"type": "object",
"properties": {
"story_id": {
"type": "string",
"description": "Story identifier"
}
},
"required": ["story_id"]
}
},
{
"name": "start_agent",
"description": "Start an agent for a story. Creates a worktree, runs setup, and spawns the agent process.",
"inputSchema": {
"type": "object",
"properties": {
"story_id": {
"type": "string",
"description": "Story identifier (e.g. '28_my_story')"
},
"agent_name": {
"type": "string",
"description": "Agent name from project.toml config. If omitted, uses the first coder agent (stage = \"coder\"). Supervisor must be requested explicitly by name."
}
},
"required": ["story_id"]
}
},
{
"name": "stop_agent",
"description": "Stop a running agent. Worktree is preserved for inspection.",
"inputSchema": {
"type": "object",
"properties": {
"story_id": {
"type": "string",
"description": "Story identifier"
},
"agent_name": {
"type": "string",
"description": "Agent name to stop"
}
},
"required": ["story_id", "agent_name"]
}
},
{
"name": "list_agents",
"description": "List all agents with their current status, story assignment, and worktree path.",
"inputSchema": {
"type": "object",
"properties": {}
}
},
{
"name": "get_agent_config",
"description": "Get the configured agent roster from project.toml (names, roles, models, allowed tools, limits).",
"inputSchema": {
"type": "object",
"properties": {}
}
},
{
"name": "reload_agent_config",
"description": "Reload project.toml and return the updated agent roster.",
"inputSchema": {
"type": "object",
"properties": {}
}
},
{
"name": "get_agent_output",
"description": "Poll recent output from a running agent. Subscribes to the agent's event stream and collects events for up to 2 seconds. Returns text output and status events. Call repeatedly to follow progress.",
"inputSchema": {
"type": "object",
"properties": {
"story_id": {
"type": "string",
"description": "Story identifier"
},
"agent_name": {
"type": "string",
"description": "Agent name"
},
"timeout_ms": {
"type": "integer",
"description": "How long to wait for events in milliseconds (default: 2000, max: 10000)"
}
},
"required": ["story_id", "agent_name"]
}
},
{
"name": "wait_for_agent",
"description": "Block until the agent reaches a terminal state (completed, failed, stopped). Returns final status and summary including session_id, worktree_path, and any commits made. Use this instead of polling get_agent_output when you want to fire-and-forget and be notified on completion.",
"inputSchema": {
"type": "object",
"properties": {
"story_id": {
"type": "string",
"description": "Story identifier"
},
"agent_name": {
"type": "string",
"description": "Agent name to wait for"
},
"timeout_ms": {
"type": "integer",
"description": "Maximum time to wait in milliseconds (default: 300000 = 5 minutes)"
}
},
"required": ["story_id", "agent_name"]
}
},
{
"name": "create_worktree",
"description": "Create a git worktree for a story under .storkit/worktrees/{story_id} with deterministic naming. Writes .mcp.json and runs component setup. Returns the worktree path.",
"inputSchema": {
"type": "object",
"properties": {
"story_id": {
"type": "string",
"description": "Story identifier (e.g. '42_my_story')"
}
},
"required": ["story_id"]
}
},
{
"name": "list_worktrees",
"description": "List all worktrees under .storkit/worktrees/ for the current project.",
"inputSchema": {
"type": "object",
"properties": {}
}
},
{
"name": "remove_worktree",
"description": "Remove a git worktree and its feature branch for a story.",
"inputSchema": {
"type": "object",
"properties": {
"story_id": {
"type": "string",
"description": "Story identifier"
}
},
"required": ["story_id"]
}
},
{
"name": "get_editor_command",
"description": "Get the open-in-editor command for a worktree. Returns a ready-to-paste shell command like 'zed /path/to/worktree'. Requires the editor preference to be configured via PUT /api/settings/editor.",
"inputSchema": {
"type": "object",
"properties": {
"worktree_path": {
"type": "string",
"description": "Absolute path to the worktree directory"
}
},
"required": ["worktree_path"]
}
},
{
"name": "accept_story",
"description": "Accept a story: moves it from current/ to done/ and auto-commits to master.",
"inputSchema": {
"type": "object",
"properties": {
"story_id": {
"type": "string",
"description": "Story identifier (filename stem, e.g. '28_my_story')"
}
},
"required": ["story_id"]
}
},
{
"name": "check_criterion",
"description": "Check off an acceptance criterion (- [ ] → - [x]) by 0-based index among unchecked items, then auto-commit to master. Use get_story_todos to see the current list of unchecked criteria.",
"inputSchema": {
"type": "object",
"properties": {
"story_id": {
"type": "string",
"description": "Story identifier (filename stem, e.g. '28_my_story')"
},
"criterion_index": {
"type": "integer",
"description": "0-based index of the unchecked criterion to check off"
}
},
"required": ["story_id", "criterion_index"]
}
},
{
"name": "add_criterion",
"description": "Add an acceptance criterion to an existing story file. Appends '- [ ] {criterion}' after the last existing criterion in the '## Acceptance Criteria' section. Auto-commits via the filesystem watcher.",
"inputSchema": {
"type": "object",
"properties": {
"story_id": {
"type": "string",
"description": "Story identifier (filename stem, e.g. '28_my_story')"
},
"criterion": {
"type": "string",
"description": "The acceptance criterion text to add (without the '- [ ] ' prefix)"
}
},
"required": ["story_id", "criterion"]
}
},
{
"name": "update_story",
"description": "Update an existing story file. Can replace the '## User Story' and/or '## Description' section content, and/or set YAML front matter fields (e.g. agent, qa). Auto-commits via the filesystem watcher.",
"inputSchema": {
"type": "object",
"properties": {
"story_id": {
"type": "string",
"description": "Story identifier (filename stem, e.g. '28_my_story')"
},
"user_story": {
"type": "string",
"description": "New user story text to replace the '## User Story' section content"
},
"description": {
"type": "string",
"description": "New description text to replace the '## Description' section content"
},
"agent": {
"type": "string",
"description": "Set or change the 'agent' YAML front matter field"
},
"front_matter": {
"type": "object",
"description": "Arbitrary YAML front matter key-value pairs to set or update",
"additionalProperties": {
"type": "string"
}
}
},
"required": ["story_id"]
}
},
{
"name": "create_spike",
"description": "Create a spike file in .storkit/work/1_backlog/ with a deterministic filename and YAML front matter. Returns the spike_id.",
"inputSchema": {
"type": "object",
"properties": {
"name": {
"type": "string",
"description": "Human-readable spike name"
},
"description": {
"type": "string",
"description": "Optional description / question the spike aims to answer"
}
},
"required": ["name"]
}
},
{
"name": "create_bug",
"description": "Create a bug file in work/1_backlog/ with a deterministic filename and auto-commit to master. Returns the bug_id.",
"inputSchema": {
"type": "object",
"properties": {
"name": {
"type": "string",
"description": "Short human-readable bug name"
},
"description": {
"type": "string",
"description": "Description of the bug"
},
"steps_to_reproduce": {
"type": "string",
"description": "Steps to reproduce the bug"
},
"actual_result": {
"type": "string",
"description": "What actually happens"
},
"expected_result": {
"type": "string",
"description": "What should happen"
},
"acceptance_criteria": {
"type": "array",
"items": { "type": "string" },
"description": "Optional list of acceptance criteria for the fix"
}
},
"required": ["name", "description", "steps_to_reproduce", "actual_result", "expected_result"]
}
},
{
"name": "list_bugs",
"description": "List all open bugs in work/1_backlog/ matching the _bug_ naming convention.",
"inputSchema": {
"type": "object",
"properties": {}
}
},
{
"name": "create_refactor",
"description": "Create a refactor work item in work/1_backlog/ with a deterministic filename and YAML front matter. Returns the refactor_id.",
"inputSchema": {
"type": "object",
"properties": {
"name": {
"type": "string",
"description": "Short human-readable refactor name"
},
"description": {
"type": "string",
"description": "Optional description of the desired state after refactoring"
},
"acceptance_criteria": {
"type": "array",
"items": { "type": "string" },
"description": "Optional list of acceptance criteria"
}
},
"required": ["name"]
}
},
{
"name": "list_refactors",
"description": "List all open refactors in work/1_backlog/ matching the _refactor_ naming convention.",
"inputSchema": {
"type": "object",
"properties": {}
}
},
{
"name": "close_bug",
"description": "Archive a bug from work/2_current/ or work/1_backlog/ to work/5_done/ and auto-commit to master.",
"inputSchema": {
"type": "object",
"properties": {
"bug_id": {
"type": "string",
"description": "Bug identifier (e.g. 'bug-3-login_crash')"
}
},
"required": ["bug_id"]
}
},
{
"name": "merge_agent_work",
"description": "Start the mergemaster pipeline for a completed story as a background job. Returns immediately — poll get_merge_status(story_id) until the merge completes or fails. The pipeline squash-merges the feature branch into master, runs quality gates, moves the story to done, and cleans up.",
"inputSchema": {
"type": "object",
"properties": {
"story_id": {
"type": "string",
"description": "Story identifier (e.g. '52_story_mergemaster_agent_role')"
},
"agent_name": {
"type": "string",
"description": "Optional: name of the coder agent whose work is being merged (for logging)"
}
},
"required": ["story_id"]
}
},
{
"name": "get_merge_status",
"description": "Check the status of a merge_agent_work background job. Returns running/completed/failed. When completed, includes the full merge report with conflict details, gate output, and whether the story was archived.",
"inputSchema": {
"type": "object",
"properties": {
"story_id": {
"type": "string",
"description": "Story identifier (same as passed to merge_agent_work)"
}
},
"required": ["story_id"]
}
},
{
"name": "move_story_to_merge",
"description": "Move a story or bug from work/2_current/ to work/4_merge/ to queue it for the mergemaster pipeline and automatically spawn the mergemaster agent to squash-merge, run quality gates, and archive.",
"inputSchema": {
"type": "object",
"properties": {
"story_id": {
"type": "string",
"description": "Story identifier (filename stem, e.g. '28_my_story')"
},
"agent_name": {
"type": "string",
"description": "Agent name to use for merging (defaults to 'mergemaster')"
}
},
"required": ["story_id"]
}
},
{
"name": "report_merge_failure",
"description": "Report that a merge failed for a story. Leaves the story in work/4_merge/ and logs the failure reason. Use this when merge_agent_work returns success=false instead of manually moving the story file.",
"inputSchema": {
"type": "object",
"properties": {
"story_id": {
"type": "string",
"description": "Story identifier (e.g. '52_story_mergemaster_agent_role')"
},
"reason": {
"type": "string",
"description": "Human-readable explanation of why the merge failed"
}
},
"required": ["story_id", "reason"]
}
},
{
"name": "request_qa",
"description": "Trigger QA review of a completed story worktree: moves the item from work/2_current/ to work/3_qa/ and starts the qa agent to run quality gates, tests, and generate a manual testing plan.",
"inputSchema": {
"type": "object",
"properties": {
"story_id": {
"type": "string",
"description": "Story identifier (e.g. '53_story_qa_agent_role')"
},
"agent_name": {
"type": "string",
"description": "Agent name to use for QA (defaults to 'qa')"
}
},
"required": ["story_id"]
}
},
{
"name": "approve_qa",
"description": "Approve a story that passed machine QA and is awaiting human review. Moves the story from work/3_qa/ to work/4_merge/ and starts the mergemaster agent.",
"inputSchema": {
"type": "object",
"properties": {
"story_id": {
"type": "string",
"description": "Story identifier (e.g. '247_story_human_qa_gate')"
}
},
"required": ["story_id"]
}
},
{
"name": "reject_qa",
"description": "Reject a story during human QA review. Moves the story from work/3_qa/ back to work/2_current/ with rejection notes so the coder agent can fix the issues.",
"inputSchema": {
"type": "object",
"properties": {
"story_id": {
"type": "string",
"description": "Story identifier (e.g. '247_story_human_qa_gate')"
},
"notes": {
"type": "string",
"description": "Explanation of what is broken or needs fixing"
}
},
"required": ["story_id", "notes"]
}
},
{
"name": "launch_qa_app",
"description": "Launch the app from a story's worktree for manual QA testing. Automatically assigns a free port, writes it to .storkit_port, and starts the backend server. Only one QA app instance runs at a time.",
"inputSchema": {
"type": "object",
"properties": {
"story_id": {
"type": "string",
"description": "Story identifier whose worktree app to launch"
}
},
"required": ["story_id"]
}
},
{
"name": "get_pipeline_status",
"description": "Return a structured snapshot of the full work item pipeline. Includes all active stages (current, qa, merge, done) with each item's stage, name, and assigned agent. Also includes upcoming backlog items.",
"inputSchema": {
"type": "object",
"properties": {}
}
},
{
"name": "get_server_logs",
"description": "Return recent server log lines captured in the in-process ring buffer. Useful for diagnosing runtime behaviour such as WebSocket events, MCP call flow, and filesystem watcher activity.",
"inputSchema": {
"type": "object",
"properties": {
"lines": {
"type": "integer",
"description": "Number of recent lines to return (default 100, max 1000)"
},
"filter": {
"type": "string",
"description": "Optional substring filter (e.g. 'watcher', 'mcp', 'permission')"
},
"severity": {
"type": "string",
"description": "Filter by severity level: ERROR, WARN, or INFO. Returns only entries at that level."
}
}
}
},
{
"name": "rebuild_and_restart",
"description": "Rebuild the server binary from source and re-exec with the new binary. Gracefully stops all running agents before restart. If the build fails, the server stays up and returns the build error.",
"inputSchema": {
"type": "object",
"properties": {}
}
},
{
"name": "prompt_permission",
"description": "Present a permission request to the user via the web UI. Used by Claude Code's --permission-prompt-tool to delegate permission decisions to the frontend dialog. Returns on approval; returns an error on denial.",
"inputSchema": {
"type": "object",
"properties": {
"tool_name": {
"type": "string",
"description": "The tool requesting permission (e.g. 'Bash', 'Write')"
},
"input": {
"type": "object",
"description": "The tool's input arguments"
}
},
"required": ["tool_name", "input"]
}
},
{
"name": "get_token_usage",
"description": "Return per-agent token usage records from the persistent log. Shows input tokens, output tokens, cache tokens, and cost in USD for each agent session. Optionally filter by story_id.",
"inputSchema": {
"type": "object",
"properties": {
"story_id": {
"type": "string",
"description": "Optional: filter records to a specific story (e.g. '42_my_story')"
}
}
}
},
{
"name": "delete_story",
"description": "Delete a work item from the pipeline entirely. Stops any running agent, removes the worktree, and deletes the story file. Use only for removing obsolete or duplicate items.",
"inputSchema": {
"type": "object",
"properties": {
"story_id": {
"type": "string",
"description": "Work item identifier (filename stem, e.g. '28_story_my_feature')"
}
},
"required": ["story_id"]
}
},
{
"name": "move_story",
"description": "Move a work item (story, bug, spike, or refactor) to an arbitrary pipeline stage. Prefer dedicated tools when available: use accept_story to mark items done, move_story_to_merge to queue for merging, or request_qa to trigger QA review. Use move_story only for arbitrary moves that lack a dedicated tool — for example, moving a story back to backlog or recovering a ghost story by moving it back to current.",
"inputSchema": {
"type": "object",
"properties": {
"story_id": {
"type": "string",
"description": "Work item identifier (filename stem, e.g. '28_story_my_feature')"
},
"target_stage": {
"type": "string",
"enum": ["backlog", "current", "qa", "merge", "done"],
"description": "Target pipeline stage: backlog (1_backlog), current (2_current), qa (3_qa), merge (4_merge), done (5_done)"
}
},
"required": ["story_id", "target_stage"]
}
},
{
"name": "run_command",
"description": "Execute a shell command in an agent's worktree directory. The working_dir must be inside .storkit/worktrees/. Returns stdout, stderr, exit_code, and timed_out. Supports SSE streaming (send Accept: text/event-stream) for long-running commands. Dangerous commands (rm -rf /, sudo, etc.) are blocked.",
"inputSchema": {
"type": "object",
"properties": {
"command": {
"type": "string",
"description": "The bash command to execute (passed to bash -c)"
},
"working_dir": {
"type": "string",
"description": "Absolute path to the worktree directory to run the command in. Must be inside .storkit/worktrees/."
},
"timeout": {
"type": "integer",
"description": "Timeout in seconds (default: 120, max: 600)"
}
},
"required": ["command", "working_dir"]
}
}
]
}),
)
}
// ── Tool dispatch ─────────────────────────────────────────────────
async fn handle_tools_call(
id: Option<Value>,
params: &Value,
ctx: &AppContext,
) -> JsonRpcResponse {
let tool_name = params
.get("name")
.and_then(|v| v.as_str())
.unwrap_or("");
let args = params.get("arguments").cloned().unwrap_or(json!({}));
let result = match tool_name {
// Workflow tools
"create_story" => story_tools::tool_create_story(&args, ctx),
"validate_stories" => story_tools::tool_validate_stories(ctx),
"list_upcoming" => story_tools::tool_list_upcoming(ctx),
"get_story_todos" => story_tools::tool_get_story_todos(&args, ctx),
"record_tests" => story_tools::tool_record_tests(&args, ctx),
"ensure_acceptance" => story_tools::tool_ensure_acceptance(&args, ctx),
// Agent tools (async)
"start_agent" => agent_tools::tool_start_agent(&args, ctx).await,
"stop_agent" => agent_tools::tool_stop_agent(&args, ctx).await,
"list_agents" => agent_tools::tool_list_agents(ctx),
"get_agent_config" => agent_tools::tool_get_agent_config(ctx),
"reload_agent_config" => agent_tools::tool_get_agent_config(ctx),
"get_agent_output" => agent_tools::tool_get_agent_output_poll(&args, ctx).await,
"wait_for_agent" => agent_tools::tool_wait_for_agent(&args, ctx).await,
// Worktree tools
"create_worktree" => agent_tools::tool_create_worktree(&args, ctx).await,
"list_worktrees" => agent_tools::tool_list_worktrees(ctx),
"remove_worktree" => agent_tools::tool_remove_worktree(&args, ctx).await,
// Editor tools
"get_editor_command" => agent_tools::tool_get_editor_command(&args, ctx),
// Lifecycle tools
"accept_story" => story_tools::tool_accept_story(&args, ctx),
// Story mutation tools (auto-commit to master)
"check_criterion" => story_tools::tool_check_criterion(&args, ctx),
"add_criterion" => story_tools::tool_add_criterion(&args, ctx),
"update_story" => story_tools::tool_update_story(&args, ctx),
// Spike lifecycle tools
"create_spike" => story_tools::tool_create_spike(&args, ctx),
// Bug lifecycle tools
"create_bug" => story_tools::tool_create_bug(&args, ctx),
"list_bugs" => story_tools::tool_list_bugs(ctx),
"close_bug" => story_tools::tool_close_bug(&args, ctx),
// Refactor lifecycle tools
"create_refactor" => story_tools::tool_create_refactor(&args, ctx),
"list_refactors" => story_tools::tool_list_refactors(ctx),
// Mergemaster tools
"merge_agent_work" => merge_tools::tool_merge_agent_work(&args, ctx),
"get_merge_status" => merge_tools::tool_get_merge_status(&args, ctx),
"move_story_to_merge" => merge_tools::tool_move_story_to_merge(&args, ctx).await,
"report_merge_failure" => merge_tools::tool_report_merge_failure(&args, ctx),
// QA tools
"request_qa" => qa_tools::tool_request_qa(&args, ctx).await,
"approve_qa" => qa_tools::tool_approve_qa(&args, ctx).await,
"reject_qa" => qa_tools::tool_reject_qa(&args, ctx).await,
"launch_qa_app" => qa_tools::tool_launch_qa_app(&args, ctx).await,
// Pipeline status
"get_pipeline_status" => story_tools::tool_get_pipeline_status(ctx),
// Diagnostics
"get_server_logs" => diagnostics::tool_get_server_logs(&args),
// Server lifecycle
"rebuild_and_restart" => diagnostics::tool_rebuild_and_restart(ctx).await,
// Permission bridge (Claude Code → frontend dialog)
"prompt_permission" => diagnostics::tool_prompt_permission(&args, ctx).await,
// Token usage
"get_token_usage" => diagnostics::tool_get_token_usage(&args, ctx),
// Delete story
"delete_story" => story_tools::tool_delete_story(&args, ctx).await,
// Arbitrary pipeline movement
"move_story" => diagnostics::tool_move_story(&args, ctx),
// Shell command execution
"run_command" => shell_tools::tool_run_command(&args, ctx).await,
_ => Err(format!("Unknown tool: {tool_name}")),
};
match result {
Ok(content) => JsonRpcResponse::success(
id,
json!({
"content": [{ "type": "text", "text": content }]
}),
),
Err(msg) => {
slog_warn!("[mcp] Tool call failed: tool={tool_name} error={msg}");
JsonRpcResponse::success(
id,
json!({
"content": [{ "type": "text", "text": msg }],
"isError": true
}),
)
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::http::context::AppContext;
fn test_ctx(dir: &std::path::Path) -> AppContext {
AppContext::new_test(dir.to_path_buf())
}
#[test]
fn json_rpc_response_serializes_success() {
let resp = JsonRpcResponse::success(Some(json!(1)), json!({"ok": true}));
let s = serde_json::to_string(&resp).unwrap();
assert!(s.contains("\"result\""));
assert!(!s.contains("\"error\""));
}
#[test]
fn json_rpc_response_serializes_error() {
let resp = JsonRpcResponse::error(Some(json!(1)), -32600, "bad".into());
let s = serde_json::to_string(&resp).unwrap();
assert!(s.contains("\"error\""));
assert!(!s.contains("\"result\""));
}
#[test]
fn initialize_returns_capabilities() {
let resp = handle_initialize(
Some(json!(1)),
&json!({"protocolVersion": "2025-03-26", "capabilities": {}, "clientInfo": {"name": "test", "version": "1.0"}}),
);
let result = resp.result.unwrap();
assert_eq!(result["protocolVersion"], "2025-03-26");
assert!(result["capabilities"]["tools"].is_object());
assert_eq!(result["serverInfo"]["name"], "storkit");
}
#[test]
fn tools_list_returns_all_tools() {
let resp = handle_tools_list(Some(json!(2)));
let result = resp.result.unwrap();
let tools = result["tools"].as_array().unwrap();
let names: Vec<&str> = tools.iter().map(|t| t["name"].as_str().unwrap()).collect();
assert!(names.contains(&"create_story"));
assert!(names.contains(&"validate_stories"));
assert!(names.contains(&"list_upcoming"));
assert!(names.contains(&"get_story_todos"));
assert!(names.contains(&"record_tests"));
assert!(names.contains(&"ensure_acceptance"));
assert!(names.contains(&"start_agent"));
assert!(names.contains(&"stop_agent"));
assert!(names.contains(&"list_agents"));
assert!(names.contains(&"get_agent_config"));
assert!(names.contains(&"reload_agent_config"));
assert!(names.contains(&"get_agent_output"));
assert!(names.contains(&"wait_for_agent"));
assert!(names.contains(&"create_worktree"));
assert!(names.contains(&"list_worktrees"));
assert!(names.contains(&"remove_worktree"));
assert!(names.contains(&"get_editor_command"));
assert!(!names.contains(&"report_completion"));
assert!(names.contains(&"accept_story"));
assert!(names.contains(&"check_criterion"));
assert!(names.contains(&"add_criterion"));
assert!(names.contains(&"update_story"));
assert!(names.contains(&"create_spike"));
assert!(names.contains(&"create_bug"));
assert!(names.contains(&"list_bugs"));
assert!(names.contains(&"close_bug"));
assert!(names.contains(&"create_refactor"));
assert!(names.contains(&"list_refactors"));
assert!(names.contains(&"merge_agent_work"));
assert!(names.contains(&"get_merge_status"));
assert!(names.contains(&"move_story_to_merge"));
assert!(names.contains(&"report_merge_failure"));
assert!(names.contains(&"request_qa"));
assert!(names.contains(&"approve_qa"));
assert!(names.contains(&"reject_qa"));
assert!(names.contains(&"launch_qa_app"));
assert!(names.contains(&"get_server_logs"));
assert!(names.contains(&"prompt_permission"));
assert!(names.contains(&"get_pipeline_status"));
assert!(names.contains(&"rebuild_and_restart"));
assert!(names.contains(&"get_token_usage"));
assert!(names.contains(&"move_story"));
assert!(names.contains(&"delete_story"));
assert!(names.contains(&"run_command"));
assert_eq!(tools.len(), 43);
}
#[test]
fn tools_list_schemas_have_required_fields() {
let resp = handle_tools_list(Some(json!(1)));
let tools = resp.result.unwrap()["tools"].as_array().unwrap().clone();
for tool in &tools {
assert!(tool["name"].is_string(), "tool missing name");
assert!(tool["description"].is_string(), "tool missing description");
assert!(tool["inputSchema"].is_object(), "tool missing inputSchema");
assert_eq!(tool["inputSchema"]["type"], "object");
}
}
#[test]
fn handle_tools_call_unknown_tool() {
let tmp = tempfile::tempdir().unwrap();
let ctx = test_ctx(tmp.path());
let rt = tokio::runtime::Runtime::new().unwrap();
let resp = rt.block_on(handle_tools_call(
Some(json!(1)),
&json!({"name": "bogus_tool", "arguments": {}}),
&ctx,
));
let result = resp.result.unwrap();
assert_eq!(result["isError"], true);
assert!(result["content"][0]["text"].as_str().unwrap().contains("Unknown tool"));
}
#[test]
fn to_sse_response_wraps_in_data_prefix() {
let resp = JsonRpcResponse::success(Some(json!(1)), json!({"ok": true}));
let http_resp = to_sse_response(resp);
assert_eq!(
http_resp.headers().get("content-type").unwrap(),
"text/event-stream"
);
}
#[test]
fn wants_sse_detects_accept_header() {
// Can't easily construct a Request in tests without TestClient,
// so test the logic indirectly via to_sse_response format
let resp = JsonRpcResponse::success(Some(json!(1)), json!("ok"));
let json_resp = to_json_response(resp);
assert_eq!(
json_resp.headers().get("content-type").unwrap(),
"application/json"
);
}
#[test]
fn json_rpc_error_response_builds_json_response() {
let resp = json_rpc_error_response(Some(json!(42)), -32600, "test error".into());
assert_eq!(resp.status(), poem::http::StatusCode::OK);
assert_eq!(
resp.headers().get("content-type").unwrap(),
"application/json"
);
}
// ── HTTP handler tests (TestClient) ───────────────────────────
fn test_mcp_app(ctx: std::sync::Arc<AppContext>) -> impl poem::Endpoint {
use poem::EndpointExt;
poem::Route::new()
.at("/mcp", poem::post(mcp_post_handler).get(mcp_get_handler))
.data(ctx)
}
async fn read_body_json(resp: poem::test::TestResponse) -> Value {
let body = resp.0.into_body().into_string().await.unwrap();
serde_json::from_str(&body).unwrap()
}
async fn post_json_mcp<E: poem::Endpoint>(
cli: &poem::test::TestClient<E>,
payload: &str,
) -> Value {
let resp = cli
.post("/mcp")
.header("content-type", "application/json")
.body(payload.to_string())
.send()
.await;
read_body_json(resp).await
}
#[tokio::test]
async fn mcp_get_handler_returns_405() {
let tmp = tempfile::tempdir().unwrap();
let ctx = std::sync::Arc::new(test_ctx(tmp.path()));
let cli = poem::test::TestClient::new(test_mcp_app(ctx));
let resp = cli.get("/mcp").send().await;
assert_eq!(resp.0.status(), poem::http::StatusCode::METHOD_NOT_ALLOWED);
}
#[tokio::test]
async fn mcp_post_invalid_content_type_returns_error() {
let tmp = tempfile::tempdir().unwrap();
let ctx = std::sync::Arc::new(test_ctx(tmp.path()));
let cli = poem::test::TestClient::new(test_mcp_app(ctx));
let resp = cli
.post("/mcp")
.header("content-type", "text/plain")
.body("{}")
.send()
.await;
let body = read_body_json(resp).await;
assert!(body.get("error").is_some(), "expected error field: {body}");
}
#[tokio::test]
async fn mcp_post_invalid_json_returns_parse_error() {
let tmp = tempfile::tempdir().unwrap();
let ctx = std::sync::Arc::new(test_ctx(tmp.path()));
let cli = poem::test::TestClient::new(test_mcp_app(ctx));
let resp = cli
.post("/mcp")
.header("content-type", "application/json")
.body("not-valid-json")
.send()
.await;
let body = read_body_json(resp).await;
assert!(body.get("error").is_some(), "expected error field: {body}");
}
#[tokio::test]
async fn mcp_post_wrong_jsonrpc_version_returns_error() {
let tmp = tempfile::tempdir().unwrap();
let ctx = std::sync::Arc::new(test_ctx(tmp.path()));
let cli = poem::test::TestClient::new(test_mcp_app(ctx));
let body = post_json_mcp(
&cli,
r#"{"jsonrpc":"1.0","id":1,"method":"initialize","params":{}}"#,
)
.await;
assert!(
body["error"]["message"].as_str().unwrap_or("").contains("version"),
"expected version error: {body}"
);
}
#[tokio::test]
async fn mcp_post_notification_with_null_id_returns_accepted() {
let tmp = tempfile::tempdir().unwrap();
let ctx = std::sync::Arc::new(test_ctx(tmp.path()));
let cli = poem::test::TestClient::new(test_mcp_app(ctx));
let resp = cli
.post("/mcp")
.header("content-type", "application/json")
.body(r#"{"jsonrpc":"2.0","method":"notifications/initialized","params":{}}"#)
.send()
.await;
assert_eq!(resp.0.status(), poem::http::StatusCode::ACCEPTED);
}
#[tokio::test]
async fn mcp_post_notification_with_explicit_null_id_returns_accepted() {
let tmp = tempfile::tempdir().unwrap();
let ctx = std::sync::Arc::new(test_ctx(tmp.path()));
let cli = poem::test::TestClient::new(test_mcp_app(ctx));
let resp = cli
.post("/mcp")
.header("content-type", "application/json")
.body(
r#"{"jsonrpc":"2.0","id":null,"method":"notifications/initialized","params":{}}"#,
)
.send()
.await;
assert_eq!(resp.0.status(), poem::http::StatusCode::ACCEPTED);
}
#[tokio::test]
async fn mcp_post_missing_id_non_notification_returns_error() {
let tmp = tempfile::tempdir().unwrap();
let ctx = std::sync::Arc::new(test_ctx(tmp.path()));
let cli = poem::test::TestClient::new(test_mcp_app(ctx));
let body = post_json_mcp(
&cli,
r#"{"jsonrpc":"2.0","method":"initialize","params":{}}"#,
)
.await;
assert!(body.get("error").is_some(), "expected error: {body}");
}
#[tokio::test]
async fn mcp_post_unknown_method_returns_error() {
let tmp = tempfile::tempdir().unwrap();
let ctx = std::sync::Arc::new(test_ctx(tmp.path()));
let cli = poem::test::TestClient::new(test_mcp_app(ctx));
let body = post_json_mcp(
&cli,
r#"{"jsonrpc":"2.0","id":1,"method":"bogus/method","params":{}}"#,
)
.await;
assert!(
body["error"]["message"].as_str().unwrap_or("").contains("Unknown method"),
"expected unknown method error: {body}"
);
}
#[tokio::test]
async fn mcp_post_initialize_returns_capabilities() {
let tmp = tempfile::tempdir().unwrap();
let ctx = std::sync::Arc::new(test_ctx(tmp.path()));
let cli = poem::test::TestClient::new(test_mcp_app(ctx));
let body = post_json_mcp(
&cli,
r#"{"jsonrpc":"2.0","id":1,"method":"initialize","params":{"protocolVersion":"2025-03-26","capabilities":{},"clientInfo":{"name":"test","version":"1.0"}}}"#,
)
.await;
assert_eq!(body["result"]["protocolVersion"], "2025-03-26");
assert_eq!(body["result"]["serverInfo"]["name"], "storkit");
}
#[tokio::test]
async fn mcp_post_tools_list_returns_tools() {
let tmp = tempfile::tempdir().unwrap();
let ctx = std::sync::Arc::new(test_ctx(tmp.path()));
let cli = poem::test::TestClient::new(test_mcp_app(ctx));
let body = post_json_mcp(
&cli,
r#"{"jsonrpc":"2.0","id":1,"method":"tools/list","params":{}}"#,
)
.await;
assert!(body["result"]["tools"].is_array());
}
#[tokio::test]
async fn mcp_post_sse_returns_event_stream_content_type() {
let tmp = tempfile::tempdir().unwrap();
let ctx = std::sync::Arc::new(test_ctx(tmp.path()));
let cli = poem::test::TestClient::new(test_mcp_app(ctx));
let resp = cli
.post("/mcp")
.header("content-type", "application/json")
.header("accept", "text/event-stream")
.body(r#"{"jsonrpc":"2.0","id":1,"method":"tools/list","params":{}}"#)
.send()
.await;
assert_eq!(
resp.0.headers().get("content-type").unwrap(),
"text/event-stream"
);
}
#[tokio::test]
async fn mcp_post_sse_get_agent_output_missing_story_id() {
let tmp = tempfile::tempdir().unwrap();
let ctx = std::sync::Arc::new(test_ctx(tmp.path()));
let cli = poem::test::TestClient::new(test_mcp_app(ctx));
let resp = cli
.post("/mcp")
.header("content-type", "application/json")
.header("accept", "text/event-stream")
.body(r#"{"jsonrpc":"2.0","id":1,"method":"tools/call","params":{"name":"get_agent_output","arguments":{}}}"#)
.send()
.await;
assert_eq!(
resp.0.headers().get("content-type").unwrap(),
"text/event-stream",
"expected SSE content-type"
);
}
#[tokio::test]
async fn mcp_post_sse_get_agent_output_missing_agent_name() {
let tmp = tempfile::tempdir().unwrap();
let ctx = std::sync::Arc::new(test_ctx(tmp.path()));
let cli = poem::test::TestClient::new(test_mcp_app(ctx));
let resp = cli
.post("/mcp")
.header("content-type", "application/json")
.header("accept", "text/event-stream")
.body(r#"{"jsonrpc":"2.0","id":1,"method":"tools/call","params":{"name":"get_agent_output","arguments":{"story_id":"1_test"}}}"#)
.send()
.await;
assert_eq!(
resp.0.headers().get("content-type").unwrap(),
"text/event-stream",
"expected SSE content-type"
);
}
#[tokio::test]
async fn mcp_post_sse_get_agent_output_no_agent_returns_sse_error() {
let tmp = tempfile::tempdir().unwrap();
let ctx = std::sync::Arc::new(test_ctx(tmp.path()));
let cli = poem::test::TestClient::new(test_mcp_app(ctx));
let resp = cli
.post("/mcp")
.header("content-type", "application/json")
.header("accept", "text/event-stream")
.body(r#"{"jsonrpc":"2.0","id":1,"method":"tools/call","params":{"name":"get_agent_output","arguments":{"story_id":"99_nope","agent_name":"bot"}}}"#)
.send()
.await;
assert_eq!(
resp.0.headers().get("content-type").unwrap(),
"text/event-stream"
);
let body = resp.0.into_body().into_string().await.unwrap();
assert!(body.contains("data:"), "expected SSE data prefix: {body}");
}
}