Compare commits

...

74 Commits

Author SHA1 Message Date
Timmy b75679175b Bump version to 0.8.2 2026-03-30 11:57:05 +01:00
dave 440081016d storkit: accept 433_story_setup_wizard_interviews_user_on_bare_projects_with_no_existing_code 2026-03-29 04:29:58 +00:00
dave e8f3629c76 storkit: accept 438_story_slash_command_autocomplete_in_web_ui_text_input 2026-03-29 02:08:37 +00:00
dave c5cdc0f594 storkit: done 433_story_setup_wizard_interviews_user_on_bare_projects_with_no_existing_code 2026-03-29 00:46:08 +00:00
dave fec417cb16 storkit: merge 433_story_setup_wizard_interviews_user_on_bare_projects_with_no_existing_code 2026-03-29 00:46:05 +00:00
dave a70a06a5fb storkit: create 433_story_setup_wizard_interviews_user_on_bare_projects_with_no_existing_code 2026-03-29 00:29:17 +00:00
dave 0a617e1c18 storkit: accept 445_bug_rate_limited_mergemaster_exits_advance_stories_to_done_without_merging 2026-03-29 00:05:27 +00:00
dave 4527f71857 storkit: accept 444_refactor_extract_shared_test_helpers_test_ctx_write_story_file_make_api 2026-03-28 23:46:26 +00:00
dave 6e0d12d145 storkit: accept 440_refactor_consolidate_is_permission_approval_into_chat_util 2026-03-28 23:44:25 +00:00
dave d471d29c72 storkit: accept 434_story_wizard_auto_checks_completion_on_first_conversation 2026-03-28 23:34:10 +00:00
dave 0b652eec21 storkit: done 434_story_wizard_auto_checks_completion_on_first_conversation 2026-03-28 23:33:07 +00:00
dave b32fdf7d65 storkit: merge 434_story_wizard_auto_checks_completion_on_first_conversation 2026-03-28 23:33:05 +00:00
dave 2da0e1eb55 storkit: accept 442_refactor_deduplicate_stage_display_name_into_shared_module 2026-03-28 22:58:18 +00:00
dave 269124a1fd storkit: accept 443_refactor_extract_shared_find_story_name_from_commands 2026-03-28 22:40:14 +00:00
dave 5992f9bd19 storkit: merge 438_story_slash_command_autocomplete_in_web_ui_text_input 2026-03-28 22:27:40 +00:00
dave a53967453e storkit: done 438_story_slash_command_autocomplete_in_web_ui_text_input 2026-03-28 22:26:16 +00:00
dave ab4b218ac7 storkit: accept 441_refactor_deduplicate_get_project_root_wrappers_in_io_modules 2026-03-28 20:35:05 +00:00
dave d5b936c88d storkit: accept 439_refactor_unify_story_stuck_states_into_a_single_status_field 2026-03-28 20:28:04 +00:00
dave 07cc0e3f29 storkit: accept 437_bug_strip_prefix_ci_panics_on_multi_byte_utf_8_input 2026-03-28 20:22:04 +00:00
dave db4a84c70f storkit: done 445_bug_rate_limited_mergemaster_exits_advance_stories_to_done_without_merging 2026-03-28 20:08:18 +00:00
dave 3048d26e66 storkit: merge 445_bug_rate_limited_mergemaster_exits_advance_stories_to_done_without_merging 2026-03-28 20:08:15 +00:00
dave 8e45b2a08d storkit: done 444_refactor_extract_shared_test_helpers_test_ctx_write_story_file_make_api 2026-03-28 19:51:20 +00:00
dave ddc4a57cd2 storkit: merge 444_refactor_extract_shared_test_helpers_test_ctx_write_story_file_make_api 2026-03-28 19:51:17 +00:00
dave d216f3c267 storkit: done 440_refactor_consolidate_is_permission_approval_into_chat_util 2026-03-28 19:47:36 +00:00
dave 8cd881c8f1 storkit: merge 440_refactor_consolidate_is_permission_approval_into_chat_util 2026-03-28 19:47:33 +00:00
dave 2867e1d15f storkit: accept 431_story_qa_agent_reviews_code_changes_against_acceptance_criteria 2026-03-28 19:30:48 +00:00
dave c2c9d3f9cb storkit: create 445_bug_rate_limited_mergemaster_exits_advance_stories_to_done_without_merging 2026-03-28 19:19:17 +00:00
dave f734b4a3c6 storkit: done 443_refactor_extract_shared_find_story_name_from_commands 2026-03-28 19:09:13 +00:00
dave 890693efda storkit: done 442_refactor_deduplicate_stage_display_name_into_shared_module 2026-03-28 18:57:31 +00:00
dave 5403b29261 storkit: done 439_refactor_unify_story_stuck_states_into_a_single_status_field 2026-03-28 18:36:45 +00:00
dave 8ee59f5dc1 storkit: merge 439_refactor_unify_story_stuck_states_into_a_single_status_field 2026-03-28 18:36:42 +00:00
dave 5dcc35a1b3 fix: gate runner delegates to script/test instead of hardcoding cargo clippy
The acceptance gate was hardcoded to run cargo clippy, which fails on
non-Rust projects (Go, Node, etc.). Now the gate only runs script/test
which is project-specific. Clippy is added to storkit's own script/test
so Rust linting is preserved for this project.

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-28 18:15:29 +00:00
dave af70b68cd1 storkit: accept 432_story_complete_setup_wizard_with_mcp_tools_and_agent_driven_file_generation 2026-03-28 18:12:43 +00:00
dave e356f9b2dd storkit: accept 423_story_auto_schedule_timer_on_rate_limit_to_resume_after_reset 2026-03-28 17:42:42 +00:00
dave 96793de11b storkit: merge 441_refactor_deduplicate_get_project_root_wrappers_in_io_modules 2026-03-28 16:48:49 +00:00
dave bfe70f5599 storkit: done 439_refactor_unify_story_stuck_states_into_a_single_status_field 2026-03-28 16:48:42 +00:00
dave 98aedaddf0 storkit: done 442_refactor_deduplicate_stage_display_name_into_shared_module 2026-03-28 16:47:58 +00:00
dave 496ce864d7 storkit: done 441_refactor_deduplicate_get_project_root_wrappers_in_io_modules 2026-03-28 16:46:18 +00:00
dave 243738551c fix: wizard README instructions explicitly require LLM to generate and write files
The LLM was having the conversation with the user but never following
through with wizard_generate calls. The instructions now spell out
the full workflow: get hint, write content, stage it, show user, confirm.
Also adds "keep moving" instruction so the LLM auto-advances to the
next step after confirmation.

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-28 16:37:25 +00:00
dave 20f2d97f06 storkit: create 444_refactor_extract_shared_test_helpers_test_ctx_write_story_file_make_api 2026-03-28 16:34:45 +00:00
dave b6edc1bff7 storkit: create 443_refactor_extract_shared_find_story_name_from_commands 2026-03-28 16:34:41 +00:00
dave c45613a3ad storkit: create 442_refactor_deduplicate_stage_display_name_into_shared_module 2026-03-28 16:34:39 +00:00
dave 7efed33851 storkit: create 441_refactor_deduplicate_get_project_root_wrappers_in_io_modules 2026-03-28 16:34:36 +00:00
dave b00a477070 storkit: create 440_refactor_consolidate_is_permission_approval_into_chat_util 2026-03-28 16:34:35 +00:00
dave 52f2e89659 storkit: done 437_bug_strip_prefix_ci_panics_on_multi_byte_utf_8_input 2026-03-28 16:33:29 +00:00
dave 08db28d9d6 storkit: merge 437_bug_strip_prefix_ci_panics_on_multi_byte_utf_8_input 2026-03-28 16:33:26 +00:00
dave 77ff0ce093 storkit: create 439_refactor_unify_story_stuck_states_into_a_single_status_field 2026-03-28 16:27:51 +00:00
dave 0ab1b1232b storkit: create 439_refactor_unify_story_stuck_states_into_a_single_status_field 2026-03-28 16:27:36 +00:00
dave 209e01bc06 storkit: create 438_story_slash_command_autocomplete_in_web_ui_text_input 2026-03-28 16:24:44 +00:00
dave 2650b1a42e storkit: create 437_bug_strip_prefix_ci_panics_on_multi_byte_utf_8_input 2026-03-28 16:21:19 +00:00
Timmy 3595df4d9d Bump version to 0.8.1 2026-03-28 15:37:08 +00:00
dave 5d84100c41 storkit: create 436_refactor_unify_story_stuck_states_into_a_single_status_field 2026-03-28 15:35:14 +00:00
dave dd436ad186 storkit: create 435_story_unblock_command_handles_all_stuck_states_not_just_blocked_flag 2026-03-28 15:33:39 +00:00
dave b811b9188f storkit: done 431_story_qa_agent_reviews_code_changes_against_acceptance_criteria 2026-03-28 15:33:19 +00:00
dave 9935311c35 storkit: merge 431_story_qa_agent_reviews_code_changes_against_acceptance_criteria 2026-03-28 15:33:16 +00:00
dave be0036922a fix: unblock command also clears merge_failure field
Previously unblock only checked for blocked=true. Stories stuck in
merge with a merge_failure field were not considered "blocked" and
unblock refused to act. Now it clears both blocked and merge_failure,
and reports which fields were cleared.

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-28 15:33:01 +00:00
dave 361f9dff0d fix(426): also narrow pre-cherry-pick code change check to .storkit/work/
There were two places checking for code changes: the post-cherry-pick
verification (already fixed) and a pre-cherry-pick check in the
merge-queue worktree. The pre-cherry-pick check was still filtering
all of .storkit/ which rejected stories that only change project.toml.

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-28 15:27:19 +00:00
dave fc160b5c5f feat: wizard detects bare projects and prompts user interview for context/stack
wizard_generate now checks if the project has no source code. On bare
projects, the generation hints tell the LLM to ask the user what they
want to build and what tech stack they plan to use, rather than trying
to read a nonexistent codebase.

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-28 15:17:42 +00:00
dave 9092b8a2c9 fix: wizard hints address the LLM not the user, README adds bare project guidance
The format_wizard_state hints now tell the LLM what to do ("show it
to the user and ask if they're happy") rather than exposing tool names
to the user ("Run wizard_generate").

README wizard instructions now distinguish between existing-code projects
(read codebase, generate files) and bare projects (interview the user
about what they want to build).

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-28 15:15:14 +00:00
dave dfe3d96313 docs: move wizard instructions to .storkit/README.md for LLM-agnostic access
The wizard check was only in CLAUDE.md which is Claude-specific.
Move the primary instruction to .storkit/README.md (step 1 of First
Steps) so any LLM reading the dev process docs will discover the wizard.
CLAUDE.md keeps a shorter pointer to the README.

Also fix stale .story_kit/ paths to .storkit/ in the README.

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-28 15:12:54 +00:00
dave bcefa6a25d storkit: create 434_story_wizard_auto_checks_completion_on_first_conversation 2026-03-28 15:06:53 +00:00
dave 50bfeddcb5 fix: scaffold CLAUDE.md uses active wizard instruction
Change from passive "call wizard_status to check progress" to active
"On your first conversation, call wizard_status" with IMPORTANT prefix.
Without the direct instruction, Claude ignores the wizard tools.

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-28 15:06:21 +00:00
dave 8e6b8ef338 storkit: create 433_story_setup_wizard_interviews_user_on_bare_projects_with_no_existing_code 2026-03-28 15:00:04 +00:00
dave d363eb63e2 fix: scaffold CLAUDE.md now mentions wizard and MCP tools
Without this, Claude Code in a freshly scaffolded project has no idea
storkit's wizard or MCP tools exist and gives generic setup advice.

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-28 14:37:44 +00:00
dave 422cec370d docs: rewrite quickstart as a step-by-step with wizard flow
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-28 14:34:45 +00:00
dave 973b7d6f72 storkit: done 432_story_complete_setup_wizard_with_mcp_tools_and_agent_driven_file_generation 2026-03-28 14:24:02 +00:00
dave 49b78f3642 storkit: merge 432_story_complete_setup_wizard_with_mcp_tools_and_agent_driven_file_generation 2026-03-28 14:23:59 +00:00
dave 93576e3f83 fix(426): narrow merge verification exclude to .storkit/work/ only
The post-cherry-pick diff check was excluding all of .storkit/, which
rejected stories whose deliverable is .storkit/project.toml changes
(e.g. 431 updating QA agent prompts). Narrow the exclusion to
.storkit/work/ which is where pipeline file moves live.

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-28 14:21:57 +00:00
dave dd7f71dd87 docs: add Claude Code quickstart, web UI, and chat transport sections to README
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-28 14:03:23 +00:00
dave 9a8492c72f storkit: create 432_story_complete_setup_wizard_with_mcp_tools_and_agent_driven_file_generation 2026-03-28 14:00:20 +00:00
dave ac9bdde164 storkit: create 431_story_qa_agent_reviews_code_changes_against_acceptance_criteria 2026-03-28 13:58:38 +00:00
dave 0b2ec64c74 storkit: done 430_bug_status_command_traffic_light_dots_not_coloured_in_matrix 2026-03-28 13:57:41 +00:00
dave fe0a032e8e storkit: merge 430_bug_status_command_traffic_light_dots_not_coloured_in_matrix 2026-03-28 13:57:38 +00:00
dave eff8f6a6a6 feat(399): add --port CLI flag with project.toml persistence
Manual merge of story 399 feature branch, adapted for the current CLI
parser (which includes the init subcommand from 429).

- storkit --port 3000 sets the listening port
- storkit --port=3000 also works
- Port resolution: CLI flag > STORKIT_PORT env > default 3001
- Supports combining with init: storkit init --port 3000 /path
- Replaces CliDirective enum with CliArgs struct that handles both
  --port and init in a single pass

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-28 13:47:02 +00:00
94 changed files with 2920 additions and 870 deletions
+10 -4
View File
@@ -9,16 +9,22 @@
When you start a new session with this project: When you start a new session with this project:
1. **Check for MCP Tools:** Read `.mcp.json` to discover the MCP server endpoint. Then list available tools by calling: 1. **Check Setup Wizard:** Call `wizard_status` to check if project setup is complete. If the wizard is not complete, guide the user through the remaining steps. Important rules for the wizard flow:
- **Be conversational.** Don't show tool names, step numbers, or raw wizard output to the user.
- **On projects with existing code:** Read the codebase and generate each file, then show the user what you wrote and ask if it looks right.
- **On bare projects with no code:** Ask the user what they want to build, what language/framework they plan to use, and generate files from their answers.
- **You must actually generate the files.** The workflow for each step is: (1) call `wizard_generate` with no args to get a hint, (2) write the file content yourself based on the conversation, (3) call `wizard_generate` again with the `content` argument containing the full file body, (4) show the user what you wrote, (5) call `wizard_confirm` (they approve), `wizard_retry` (they want changes), or `wizard_skip` (they want to skip). Do not stop after discussing — follow through and write the files.
- **Keep moving.** After each step is confirmed, immediately proceed to the next wizard step without waiting for the user to ask.
2. **Check for MCP Tools:** Read `.mcp.json` to discover the MCP server endpoint. Then list available tools by calling:
```bash ```bash
curl -s "$(jq -r '.mcpServers["storkit"].url' .mcp.json)" \ curl -s "$(jq -r '.mcpServers["storkit"].url' .mcp.json)" \
-H 'Content-Type: application/json' \ -H 'Content-Type: application/json' \
-d '{"jsonrpc":"2.0","id":1,"method":"tools/list","params":{}}' -d '{"jsonrpc":"2.0","id":1,"method":"tools/list","params":{}}'
``` ```
This returns the full tool catalog (create stories, spawn agents, record tests, manage worktrees, etc.). Familiarize yourself with the available tools before proceeding. These tools allow you to directly manipulate the workflow and spawn subsidiary agents without manual file manipulation. This returns the full tool catalog (create stories, spawn agents, record tests, manage worktrees, etc.). Familiarize yourself with the available tools before proceeding. These tools allow you to directly manipulate the workflow and spawn subsidiary agents without manual file manipulation.
2. **Read Context:** Check `.story_kit/specs/00_CONTEXT.md` for high-level project goals. 3. **Read Context:** Check `.storkit/specs/00_CONTEXT.md` for high-level project goals.
3. **Read Stack:** Check `.story_kit/specs/tech/STACK.md` for technical constraints and patterns. 4. **Read Stack:** Check `.storkit/specs/tech/STACK.md` for technical constraints and patterns.
4. **Check Work Items:** Look at `.story_kit/work/1_backlog/` and `.story_kit/work/2_current/` to see what work is pending. 5. **Check Work Items:** Look at `.storkit/work/1_backlog/` and `.storkit/work/2_current/` to see what work is pending.
--- ---
+114 -48
View File
@@ -63,30 +63,52 @@ system_prompt = "You are a full-stack engineer working autonomously in a git wor
[[agent]] [[agent]]
name = "qa-2" name = "qa-2"
stage = "qa" stage = "qa"
role = "Reviews coder work in worktrees: runs quality gates, generates testing plans, and reports findings." role = "Reviews coder work in worktrees: runs quality gates, verifies acceptance criteria, and reports findings."
model = "sonnet" model = "sonnet"
max_turns = 40 max_turns = 40
max_budget_usd = 4.00 max_budget_usd = 4.00
prompt = """You are the QA agent for story {{story_id}}. Your job is to review the coder's work in the worktree and produce a structured QA report. prompt = """You are the QA agent for story {{story_id}}. Your job is to verify the coder's work satisfies the story's acceptance criteria and produce a structured QA report.
Read CLAUDE.md first, then .story_kit/README.md to understand the dev process. Read CLAUDE.md first, then .story_kit/README.md to understand the dev process.
## Your Workflow ## Your Workflow
### 1. Code Quality Scan ### 0. Read the Story
- Run `git diff master...HEAD --stat` to see what files changed - Read the story file at `.storkit/work/3_qa/{{story_id}}.md`
- Run `git diff master...HEAD` to review the actual changes for obvious coding mistakes (unused imports, dead code, unhandled errors, hardcoded values) - Extract every acceptance criterion (the `- [ ]` checkbox lines)
- Run `cargo clippy --all-targets --all-features` and note any warnings - Keep this list in mind for Step 3
### 1. Deterministic Gates (Prerequisites)
Run these first — if any fail, reject immediately without proceeding to AC review:
- Run `cargo clippy --all-targets --all-features` — must show 0 errors, 0 warnings
- Run `cargo test` and verify all tests pass
- If a `frontend/` directory exists: - If a `frontend/` directory exists:
- Run `npm run build` and note any TypeScript errors - Run `npm run build` and note any TypeScript errors
- Run `npx @biomejs/biome check src/` and note any linting issues - Run `npx @biomejs/biome check src/` and note any linting issues
- Run `npm test` and verify all frontend tests pass
### 2. Test Verification ### 2. Code Change Review
- Run `cargo test` and verify all tests pass - Run `git diff master...HEAD --stat` to see what files changed
- If `frontend/` exists: run `npm test` and verify all frontend tests pass - Run `git diff master...HEAD` to review the actual changes
- Review test quality: look for tests that are trivial or don't assert meaningful behavior - Flag any incomplete implementations:
- `todo!()`, `unimplemented!()`, `panic!()` used as stubs
- Placeholder strings like "TODO", "FIXME", "not implemented"
- Empty match arms or arms that just return `Default::default()`
- Hardcoded values where real logic is expected
- Note any obvious coding mistakes (unused imports, dead code, unhandled errors)
### 3. Manual Testing Support ### 3. Acceptance Criteria Review
For each AC extracted in Step 0:
- Review the diff and test files to determine if the code addresses this AC
- PASS: describe specifically how the code addresses it (which file/function/test)
- FAIL: explain exactly what is missing or incorrect
An AC fails if:
- No code change or test relates to it
- The implementation is stubbed out (todo!/unimplemented!)
- A test exists but doesn't actually assert the behaviour described
### 4. Manual Testing Support (only if all gates PASS and all ACs PASS)
- Build the server: run `cargo build` and note success/failure - Build the server: run `cargo build` and note success/failure
- If build succeeds: find a free port (try 3010-3020) and attempt to start the server - If build succeeds: find a free port (try 3010-3020) and attempt to start the server
- Generate a testing plan including: - Generate a testing plan including:
@@ -95,8 +117,8 @@ Read CLAUDE.md first, then .story_kit/README.md to understand the dev process.
- curl commands to exercise relevant API endpoints - curl commands to exercise relevant API endpoints
- Kill the test server when done: `pkill -f 'target.*storkit' || true` (NEVER use `pkill -f storkit` — it kills the vite dev server) - Kill the test server when done: `pkill -f 'target.*storkit' || true` (NEVER use `pkill -f storkit` — it kills the vite dev server)
### 4. Produce Structured Report ### 5. Produce Structured Report and Verdict
Print your QA report to stdout before your process exits. The server will automatically run acceptance gates. Use this format: Print your QA report to stdout. Then call `approve_qa` or `reject_qa` via the MCP tool based on the overall result. Use this format:
``` ```
## QA Report for {{story_id}} ## QA Report for {{story_id}}
@@ -105,27 +127,38 @@ Print your QA report to stdout before your process exits. The server will automa
- clippy: PASS/FAIL (details) - clippy: PASS/FAIL (details)
- TypeScript build: PASS/FAIL/SKIP (details) - TypeScript build: PASS/FAIL/SKIP (details)
- Biome lint: PASS/FAIL/SKIP (details) - Biome lint: PASS/FAIL/SKIP (details)
- Code review findings: (list any issues found, or "None")
### Test Verification
- cargo test: PASS/FAIL (N tests) - cargo test: PASS/FAIL (N tests)
- npm test: PASS/FAIL/SKIP (N tests) - npm test: PASS/FAIL/SKIP (N tests)
- Test quality issues: (list any trivial/weak tests, or "None") - Incomplete implementations: (list any todo!/unimplemented!/stubs found, or "None")
- Other code review findings: (list any issues found, or "None")
### Acceptance Criteria Review
- AC: <criterion text>
Result: PASS/FAIL
Evidence: <how the code addresses it, or what is missing>
(repeat for each AC)
### Manual Testing Plan ### Manual Testing Plan
- Server URL: http://localhost:PORT (or "Build failed") - Server URL: http://localhost:PORT (or "Skipped gate/AC failure" or "Build failed")
- Pages to visit: (list) - Pages to visit: (list, or "N/A")
- Things to check: (list) - Things to check: (list, or "N/A")
- curl commands: (list) - curl commands: (list, or "N/A")
### Overall: PASS/FAIL ### Overall: PASS/FAIL
Reason: (summary of why it passed or the primary reason it failed)
``` ```
After printing the report:
- If Overall is PASS: call `approve_qa(story_id='{{story_id}}')` via MCP
- If Overall is FAIL: call `reject_qa(story_id='{{story_id}}', notes='<concise reason>')` via MCP so the coder knows exactly what to fix
## Rules ## Rules
- Do NOT modify any code — read-only review only - Do NOT modify any code — read-only review only
- If the server fails to start, still provide the testing plan with curl commands - Gates must pass before AC review — a gate failure is an automatic reject
- The server automatically runs acceptance gates when your process exits""" - If any AC is not met, the overall result is FAIL
system_prompt = "You are a QA agent. Your job is read-only: review code quality, run tests, try to start the server, and produce a structured QA report. Do not modify code. The server automatically runs acceptance gates when your process exits." - Always call approve_qa or reject_qa — never leave the story without a verdict"""
system_prompt = "You are a QA agent. Your job is read-only: run quality gates, verify each acceptance criterion against the diff, and produce a structured QA report. Always call approve_qa or reject_qa via MCP to record your verdict. Do not modify code."
[[agent]] [[agent]]
name = "coder-opus" name = "coder-opus"
@@ -140,30 +173,52 @@ system_prompt = "You are a senior full-stack engineer working autonomously in a
[[agent]] [[agent]]
name = "qa" name = "qa"
stage = "qa" stage = "qa"
role = "Reviews coder work in worktrees: runs quality gates, generates testing plans, and reports findings." role = "Reviews coder work in worktrees: runs quality gates, verifies acceptance criteria, and reports findings."
model = "sonnet" model = "sonnet"
max_turns = 40 max_turns = 40
max_budget_usd = 4.00 max_budget_usd = 4.00
prompt = """You are the QA agent for story {{story_id}}. Your job is to review the coder's work in the worktree and produce a structured QA report. prompt = """You are the QA agent for story {{story_id}}. Your job is to verify the coder's work satisfies the story's acceptance criteria and produce a structured QA report.
Read CLAUDE.md first, then .story_kit/README.md to understand the dev process. Read CLAUDE.md first, then .story_kit/README.md to understand the dev process.
## Your Workflow ## Your Workflow
### 1. Code Quality Scan ### 0. Read the Story
- Run `git diff master...HEAD --stat` to see what files changed - Read the story file at `.storkit/work/3_qa/{{story_id}}.md`
- Run `git diff master...HEAD` to review the actual changes for obvious coding mistakes (unused imports, dead code, unhandled errors, hardcoded values) - Extract every acceptance criterion (the `- [ ]` checkbox lines)
- Run `cargo clippy --all-targets --all-features` and note any warnings - Keep this list in mind for Step 3
### 1. Deterministic Gates (Prerequisites)
Run these first — if any fail, reject immediately without proceeding to AC review:
- Run `cargo clippy --all-targets --all-features` — must show 0 errors, 0 warnings
- Run `cargo test` and verify all tests pass
- If a `frontend/` directory exists: - If a `frontend/` directory exists:
- Run `npm run build` and note any TypeScript errors - Run `npm run build` and note any TypeScript errors
- Run `npx @biomejs/biome check src/` and note any linting issues - Run `npx @biomejs/biome check src/` and note any linting issues
- Run `npm test` and verify all frontend tests pass
### 2. Test Verification ### 2. Code Change Review
- Run `cargo test` and verify all tests pass - Run `git diff master...HEAD --stat` to see what files changed
- If `frontend/` exists: run `npm test` and verify all frontend tests pass - Run `git diff master...HEAD` to review the actual changes
- Review test quality: look for tests that are trivial or don't assert meaningful behavior - Flag any incomplete implementations:
- `todo!()`, `unimplemented!()`, `panic!()` used as stubs
- Placeholder strings like "TODO", "FIXME", "not implemented"
- Empty match arms or arms that just return `Default::default()`
- Hardcoded values where real logic is expected
- Note any obvious coding mistakes (unused imports, dead code, unhandled errors)
### 3. Manual Testing Support ### 3. Acceptance Criteria Review
For each AC extracted in Step 0:
- Review the diff and test files to determine if the code addresses this AC
- PASS: describe specifically how the code addresses it (which file/function/test)
- FAIL: explain exactly what is missing or incorrect
An AC fails if:
- No code change or test relates to it
- The implementation is stubbed out (todo!/unimplemented!)
- A test exists but doesn't actually assert the behaviour described
### 4. Manual Testing Support (only if all gates PASS and all ACs PASS)
- Build the server: run `cargo build` and note success/failure - Build the server: run `cargo build` and note success/failure
- If build succeeds: find a free port (try 3010-3020) and attempt to start the server - If build succeeds: find a free port (try 3010-3020) and attempt to start the server
- Generate a testing plan including: - Generate a testing plan including:
@@ -172,8 +227,8 @@ Read CLAUDE.md first, then .story_kit/README.md to understand the dev process.
- curl commands to exercise relevant API endpoints - curl commands to exercise relevant API endpoints
- Kill the test server when done: `pkill -f 'target.*storkit' || true` (NEVER use `pkill -f storkit` — it kills the vite dev server) - Kill the test server when done: `pkill -f 'target.*storkit' || true` (NEVER use `pkill -f storkit` — it kills the vite dev server)
### 4. Produce Structured Report ### 5. Produce Structured Report and Verdict
Print your QA report to stdout before your process exits. The server will automatically run acceptance gates. Use this format: Print your QA report to stdout. Then call `approve_qa` or `reject_qa` via the MCP tool based on the overall result. Use this format:
``` ```
## QA Report for {{story_id}} ## QA Report for {{story_id}}
@@ -182,27 +237,38 @@ Print your QA report to stdout before your process exits. The server will automa
- clippy: PASS/FAIL (details) - clippy: PASS/FAIL (details)
- TypeScript build: PASS/FAIL/SKIP (details) - TypeScript build: PASS/FAIL/SKIP (details)
- Biome lint: PASS/FAIL/SKIP (details) - Biome lint: PASS/FAIL/SKIP (details)
- Code review findings: (list any issues found, or "None")
### Test Verification
- cargo test: PASS/FAIL (N tests) - cargo test: PASS/FAIL (N tests)
- npm test: PASS/FAIL/SKIP (N tests) - npm test: PASS/FAIL/SKIP (N tests)
- Test quality issues: (list any trivial/weak tests, or "None") - Incomplete implementations: (list any todo!/unimplemented!/stubs found, or "None")
- Other code review findings: (list any issues found, or "None")
### Acceptance Criteria Review
- AC: <criterion text>
Result: PASS/FAIL
Evidence: <how the code addresses it, or what is missing>
(repeat for each AC)
### Manual Testing Plan ### Manual Testing Plan
- Server URL: http://localhost:PORT (or "Build failed") - Server URL: http://localhost:PORT (or "Skipped gate/AC failure" or "Build failed")
- Pages to visit: (list) - Pages to visit: (list, or "N/A")
- Things to check: (list) - Things to check: (list, or "N/A")
- curl commands: (list) - curl commands: (list, or "N/A")
### Overall: PASS/FAIL ### Overall: PASS/FAIL
Reason: (summary of why it passed or the primary reason it failed)
``` ```
After printing the report:
- If Overall is PASS: call `approve_qa(story_id='{{story_id}}')` via MCP
- If Overall is FAIL: call `reject_qa(story_id='{{story_id}}', notes='<concise reason>')` via MCP so the coder knows exactly what to fix
## Rules ## Rules
- Do NOT modify any code — read-only review only - Do NOT modify any code — read-only review only
- If the server fails to start, still provide the testing plan with curl commands - Gates must pass before AC review — a gate failure is an automatic reject
- The server automatically runs acceptance gates when your process exits""" - If any AC is not met, the overall result is FAIL
system_prompt = "You are a QA agent. Your job is read-only: review code quality, run tests, try to start the server, and produce a structured QA report. Do not modify code. The server automatically runs acceptance gates when your process exits." - Always call approve_qa or reject_qa — never leave the story without a verdict"""
system_prompt = "You are a QA agent. Your job is read-only: run quality gates, verify each acceptance criterion against the diff, and produce a structured QA report. Always call approve_qa or reject_qa via MCP to record your verdict. Do not modify code."
[[agent]] [[agent]]
name = "mergemaster" name = "mergemaster"
@@ -0,0 +1,21 @@
---
name: "Unblock command handles all stuck states not just blocked flag"
---
# Story 435: Unblock command handles all stuck states not just blocked flag
## User Story
As a project owner, I want the unblock command to clear any stuck state on a story — not just the blocked flag — so that I have a single command to unstick stories regardless of why they're stuck.
## Acceptance Criteria
- [ ] Unblock clears merge_failure field in addition to blocked flag
- [ ] Unblock clears review_hold field
- [ ] Unblock reports which fields were cleared in the confirmation message
- [ ] Unblock works on stories in any pipeline stage (backlog, current, qa, merge, done)
- [ ] If no stuck state is found (no blocked, merge_failure, or review_hold), returns a clear message saying so
## Out of Scope
- TBD
@@ -0,0 +1,26 @@
---
name: "Unify story stuck states into a single status field"
---
# Refactor 436: Unify story stuck states into a single status field
## Current State
- TBD
## Desired State
Replace the separate blocked, merge_failure, and review_hold front matter fields with a single status field (e.g. status: blocked, status: merge_failure, status: review_hold). Simplifies the unblock command, auto-assign checks, and pipeline advance logic.
## Acceptance Criteria
- [ ] Replace blocked: true, merge_failure: string, and review_hold: true with a single status: field in story front matter
- [ ] Auto-assign checks a single field instead of three separate ones
- [ ] Pipeline advance and lifecycle code reads/writes the unified status field
- [ ] Unblock command clears the status field regardless of which stuck state it was
- [ ] retry_count remains a separate field (it's a counter, not a state)
- [ ] Migration: existing stories with old fields are handled gracefully on read
## Out of Scope
- TBD
@@ -0,0 +1,24 @@
---
name: "QA agent reviews code changes against acceptance criteria"
---
# Story 431: QA agent reviews code changes against acceptance criteria
## User Story
As a project owner, I want the QA agent to actually verify that the coder's implementation matches the story's acceptance criteria, so that incomplete or incorrect work is caught before merge.
## Acceptance Criteria
- [ ] QA agent reads the story's acceptance criteria before reviewing code
- [ ] QA agent reads the full diff against master to understand what changed
- [ ] For each AC, QA agent verifies the code addresses it and explains how
- [ ] QA agent flags incomplete implementations: todo!(), unimplemented!(), missing match arms, placeholder values
- [ ] QA agent checks that new code has corresponding test coverage
- [ ] QA agent produces a structured report: each AC with pass/fail and explanation
- [ ] If any AC is not met, QA rejects the story with a clear reason so the coder can fix it
- [ ] Deterministic gates (clippy, tests) still run as a prerequisite before the AC review
## Out of Scope
- TBD
@@ -0,0 +1,27 @@
---
name: "Complete setup wizard with MCP tools and agent-driven file generation"
agent: "coder-opus"
---
# Story 432: Complete setup wizard with MCP tools and agent-driven file generation
## User Story
As a developer running storkit init on a new project, I want the setup wizard to walk me through each step interactively — generating files, letting me review them, and confirming before moving on — so that my project is correctly configured without manual file editing.
## Acceptance Criteria
- [ ] MCP tool wizard_status returns the current wizard state: which step is active, which are done/skipped/pending
- [ ] MCP tool wizard_generate triggers the agent to read the codebase and generate content for the current step (CONTEXT.md, STACK.md, script/test, script/release, script/test_coverage)
- [ ] MCP tool wizard_confirm confirms the current step and advances to the next
- [ ] MCP tool wizard_skip skips the current step and advances to the next
- [ ] MCP tool wizard_retry re-generates content for the current step if the user isn't happy with it
- [ ] Bot command setup shows wizard progress and the current step with instructions
- [ ] Bot command setup confirm / setup skip / setup retry drive the wizard from chat
- [ ] Generated files are written to disk only after user confirmation, not during generation preview
- [ ] The wizard works from Claude Code terminal via MCP tools without requiring the web UI or chat bot
- [ ] Existing files (especially CLAUDE.md) are never overwritten — wizard appends or skips
## Out of Scope
- TBD
@@ -0,0 +1,24 @@
---
name: "Setup wizard interviews user on bare projects with no existing code"
agent: coder-opus
---
# Story 433: Setup wizard interviews user on bare projects with no existing code
## User Story
As a developer starting a brand new project from an empty directory, I want the setup wizard to ask me what I'm building and what tech stack I plan to use, so that it can generate meaningful CONTEXT.md and STACK.md without any codebase to analyze.
## Acceptance Criteria
- [ ] wizard_generate detects when the project directory has no source code files
- [ ] On bare projects, the wizard asks the user what they want to build instead of trying to analyze code
- [ ] Wizard asks about intended tech stack, frameworks, and language choices
- [ ] Conversation continues until the user confirms the generated CONTEXT.md captures their intent
- [ ] STACK.md is generated from the user's stated tech choices rather than from codebase detection
- [ ] script/test and script/release are generated with appropriate stubs for the stated stack
- [ ] The interview flow works via both MCP tools (Claude Code terminal) and bot commands (Matrix/WhatsApp/Slack)
## Out of Scope
- TBD
@@ -0,0 +1,20 @@
---
name: "Wizard auto-checks completion on first conversation"
---
# Story 434: Wizard auto-checks completion on first conversation
## User Story
As a developer opening Claude Code on a storkit project for the first time, I want the wizard to automatically check if setup is complete and prompt me through remaining steps, so I don't have to know to ask for it.
## Acceptance Criteria
- [ ] Scaffolded CLAUDE.md includes an IMPORTANT instruction telling Claude to call wizard_status on first conversation
- [ ] If wizard is incomplete, Claude guides the user through remaining steps without being asked
- [ ] If wizard is already complete, no wizard prompt appears — Claude behaves normally
- [ ] Works on both existing projects with code and bare projects with no code
## Out of Scope
- TBD
@@ -0,0 +1,26 @@
---
name: "strip_prefix_ci panics on multi-byte UTF-8 input"
---
# Bug 437: strip_prefix_ci panics on multi-byte UTF-8 input
## Description
The `strip_prefix_ci` function in `server/src/chat/transport/matrix/assign.rs` slices the input string at `prefix.len()` bytes without checking that the offset is a valid UTF-8 char boundary. When the input message starts with multi-byte characters (e.g. `⏺` which is 3 bytes), the slice can land mid-character, causing a panic.
## How to Reproduce
Send a Matrix message to the bot that starts with a multi-byte UTF-8 character (e.g. `⏺ storkit - wizard_confirm`) where the bot name byte length falls inside a multi-byte character.
## Actual Result
Thread panics: `byte index 6 is not a char boundary; it is inside '⏺' (bytes 4..7)`
## Expected Result
The function should return `None` (no match) without panicking, since an ASCII bot name cannot match a slice containing multi-byte characters.
## Acceptance Criteria
- [ ] strip_prefix_ci checks is_char_boundary before slicing
- [ ] No panic when input contains multi-byte UTF-8 characters at the prefix boundary
@@ -0,0 +1,23 @@
---
name: "Slash command autocomplete in web UI text input"
---
# Story 438: Slash command autocomplete in web UI text input
## User Story
As a user, I want to type `/` at the start of the text box and see a filtered list of available slash commands, so that I can discover and quickly invoke commands without memorizing them.
## Acceptance Criteria
- [ ] Typing `/` at position 0 in the ChatInput textarea shows a command picker overlay above the input
- [ ] The overlay lists all slash commands with name and description
- [ ] Typing further characters after `/` fuzzy-filters the list
- [ ] Arrow keys navigate the list, Tab/Enter selects, Escape dismisses
- [ ] Selecting a command inserts `/<command> ` into the input (with trailing space)
- [ ] Command list is a single shared source of truth used by both the picker and HelpOverlay
- [ ] The overlay follows the same visual style as the existing file picker (@-mention overlay)
## Out of Scope
- TBD
@@ -0,0 +1,25 @@
---
name: "Deduplicate strip_prefix_ci / strip_bot_mention into chat::util"
---
# Refactor 439: Deduplicate strip_prefix_ci / strip_bot_mention into chat::util
## Current State
- TBD
## Desired State
Eight Matrix transport files (assign.rs, delete.rs, start.rs, rebuild.rs, reset.rs, rmtree.rs, htop.rs, timer.rs) each contain their own private copies of `strip_prefix_ci` and `strip_bot_mention`. The canonical versions already live in `chat::util` with the correct `is_char_boundary` guard. The duplicates should be removed and all call sites should use `util::strip_bot_mention` instead.
## Acceptance Criteria
- [ ] All 8 private copies of strip_prefix_ci are removed
- [ ] All 8 private copies of strip_bot_mention are removed
- [ ] All call sites use chat::util::strip_bot_mention instead
- [ ] Existing tests in util.rs continue to pass
- [ ] No new copies of strip_prefix_ci exist outside util.rs
## Out of Scope
- TBD
@@ -0,0 +1,24 @@
---
name: "Consolidate is_permission_approval into chat::util"
---
# Refactor 440: Consolidate is_permission_approval into chat::util
## Current State
- TBD
## Desired State
Three copies of `is_permission_approval` exist across Slack (`chat/transport/slack/commands.rs`), WhatsApp (`chat/transport/whatsapp/commands.rs`), and Matrix (`chat/transport/matrix/bot/messages.rs`). The Slack and WhatsApp versions are identical; the Matrix version is a superset that also strips @mentions. Consolidate into a single `pub` function in `chat::util` using the Matrix superset behavior, then delete the 3 private copies.
## Acceptance Criteria
- [ ] Single pub fn is_permission_approval exists in chat::util
- [ ] All 3 private copies are removed
- [ ] Matrix @mention-stripping behavior is preserved in the shared version
- [ ] All call sites use the shared version
## Out of Scope
- TBD
@@ -0,0 +1,22 @@
---
name: "Deduplicate get_project_root wrappers in io modules"
---
# Refactor 441: Deduplicate get_project_root wrappers in io modules
## Current State
- TBD
## Desired State
Both `io/shell.rs` and `io/search.rs` contain identical private one-liner wrappers around `state.get_project_root()`. Either inline the call at each usage site or create a single shared helper, then delete the duplicate wrappers.
## Acceptance Criteria
- [ ] No duplicate private get_project_root wrappers in io/shell.rs and io/search.rs
- [ ] All call sites use the canonical version or inline the call
## Out of Scope
- TBD
@@ -0,0 +1,23 @@
---
name: "Deduplicate stage_display_name into shared module"
---
# Refactor 442: Deduplicate stage_display_name into shared module
## Current State
- TBD
## Desired State
`stage_display_name` has a `pub fn` in `chat/transport/matrix/notifications.rs` and a private copy in `chat/transport/matrix/delete.rs` with slightly different casing ("backlog" vs "Backlog", "in-progress" vs "Current"). The delete.rs copy should use the canonical version from notifications.rs, adjusting the callsite if the casing difference matters.
## Acceptance Criteria
- [ ] Private stage_display_name in delete.rs is removed
- [ ] delete.rs uses the pub version from notifications.rs
- [ ] Display casing is consistent or callsite is adjusted to handle the difference
## Out of Scope
- TBD
@@ -0,0 +1,23 @@
---
name: "Extract shared find_story_name from commands"
---
# Refactor 443: Extract shared find_story_name from commands
## Current State
- TBD
## Desired State
`find_story_name` is nearly identical in `chat/commands/overview.rs` and `chat/commands/unreleased.rs` (minor style diff: `let stages` vs `const STAGES`). Extract to a shared location (e.g. `chat::commands::util` or `io::stories`) and have both callers use it.
## Acceptance Criteria
- [ ] Single shared find_story_name function exists
- [ ] Both overview.rs and unreleased.rs use the shared version
- [ ] Private copies are removed
## Out of Scope
- TBD
@@ -0,0 +1,26 @@
---
name: "Extract shared test helpers (test_ctx, write_story_file, make_api)"
agent: "coder-opus"
---
# Refactor 444: Extract shared test helpers (test_ctx, write_story_file, make_api)
## Current State
- TBD
## Desired State
Several test helper functions are copy-pasted across many test modules: `test_ctx` (10 copies across http/ modules), `write_story_file` (5 copies across chat/commands/ and matrix/), `make_api` (5 copies across http/ modules), `setup_project` (3 copies in io/). Extract each into a shared `#[cfg(test)]` utility module so test scaffolding is maintained in one place.
## Acceptance Criteria
- [ ] test_ctx has a single shared definition used by all 10 http test modules
- [ ] write_story_file has a single shared definition used by all 5 callers
- [ ] make_api has a single shared definition used by all 5 callers
- [ ] setup_project has a single shared definition used by all 3 callers
- [ ] All private copies in individual test modules are removed
## Out of Scope
- TBD
@@ -0,0 +1,28 @@
---
name: "Rate-limited mergemaster exits advance stories to done without merging"
---
# Bug 445: Rate-limited mergemaster exits advance stories to done without merging
## Description
When the mergemaster agent is immediately rate-limited (zero turns, zero tool calls), it exits and run_server_owned_completion runs acceptance gates on the existing worktree. Since the coder already committed working code, the gates pass, and the pipeline advances the story to done — even though the mergemaster never executed run_squash_merge and the code was never cherry-picked onto master.
## How to Reproduce
Observed on stories 439 and 442. All mergemaster log entries show: init → rate_limit_event → error result. Zero turns, zero MCP tool calls, duration under 350ms. Yet both stories ended up in done with no merge commit on master.
## Actual Result
Stories advance to done with no code on master. The mergemaster never ran but the pipeline treated its exit as a successful completion.
## Expected Result
If the mergemaster exits without completing its work (no merge commit produced), the story should stay in the merge stage for retry, not advance to done.
## Acceptance Criteria
- [ ] run_server_owned_completion must not run for mergemaster agents — mergemaster has its own completion path via start_merge_agent_work
- [ ] If the mergemaster process exits without producing a SquashMergeResult, the story stays in merge stage
- [ ] Rate-limited mergemaster exits are treated as transient failures, not gate-passing completions
- [ ] Story remains eligible for retry when mergemaster fails due to rate limiting
Generated
+7 -7
View File
@@ -1774,9 +1774,9 @@ checksum = "d98f6fed1fde3f8c21bc40a1abb88dd75e67924f9cffc3ef95607bad8017f8e2"
[[package]] [[package]]
name = "iri-string" name = "iri-string"
version = "0.7.11" version = "0.7.12"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d8e7418f59cc01c88316161279a7f665217ae316b388e58a0d10e29f54f1e5eb" checksum = "25e659a4bb38e810ebc252e53b5814ff908a8c58c2a9ce2fae1bbec24cbf4e20"
dependencies = [ dependencies = [
"memchr", "memchr",
"serde", "serde",
@@ -4019,7 +4019,7 @@ checksum = "6ce2be8dc25455e1f91df71bfa12ad37d7af1092ae736f3a6cd0e37bc7810596"
[[package]] [[package]]
name = "storkit" name = "storkit"
version = "0.8.0" version = "0.8.2"
dependencies = [ dependencies = [
"async-stream", "async-stream",
"async-trait", "async-trait",
@@ -5618,18 +5618,18 @@ dependencies = [
[[package]] [[package]]
name = "zerocopy" name = "zerocopy"
version = "0.8.47" version = "0.8.48"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "efbb2a062be311f2ba113ce66f697a4dc589f85e78a4aea276200804cea0ed87" checksum = "eed437bf9d6692032087e337407a86f04cd8d6a16a37199ed57949d415bd68e9"
dependencies = [ dependencies = [
"zerocopy-derive", "zerocopy-derive",
] ]
[[package]] [[package]]
name = "zerocopy-derive" name = "zerocopy-derive"
version = "0.8.47" version = "0.8.48"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0e8bc7269b54418e7aeeef514aa68f8690b8c0489a06b0136e5f57c4c5ccab89" checksum = "70e3cd084b1788766f53af483dd21f93881ff30d7320490ec3ef7526d203bad4"
dependencies = [ dependencies = [
"proc-macro2", "proc-macro2",
"quote", "quote",
+37 -2
View File
@@ -1,6 +1,37 @@
# Storkit # Storkit
A story-driven development server that manages work items, spawns coding agents, and runs them through a pipeline from backlog to done. Ships as a single Rust binary with an embedded React frontend. Communicates via Matrix, WhatsApp, and Slack bot transports, and exposes MCP tools for programmatic access. A story-driven development server that manages work items, spawns coding agents, and runs them through a pipeline from backlog to done. Ships as a single Rust binary with an embedded React frontend.
## Getting started with Claude Code
1. Download the storkit binary (or build from source — see below).
2. From your project directory, scaffold and start the server:
```bash
storkit init --port 3000
```
This creates a `.storkit/` directory with the pipeline structure, `project.toml`, and `.mcp.json`. The `.mcp.json` file lets Claude Code discover storkit's MCP tools automatically.
3. Open a Claude Code session in the same project directory. Claude will pick up the MCP tools from `.mcp.json`.
4. Tell Claude: "help me set up this project with storkit." Claude will walk you through the setup wizard — generating project context, tech stack docs, and test/release scripts. Review each step and confirm or ask to retry.
Once setup is complete, Claude can create stories, start agents, check status, and manage the full pipeline via MCP tools — no commands to memorize.
## Web UI
Storkit also ships an embedded React frontend. Once the server is running, open `http://localhost:3000` to see the pipeline board, agent status, and chat interface.
## Chat transports
Storkit can be controlled via bot commands in **Matrix**, **WhatsApp**, and **Slack**. Configure a transport in `.storkit/bot.toml` — see the example files:
- `.storkit/bot.toml.matrix.example`
- `.storkit/bot.toml.whatsapp-meta.example`
- `.storkit/bot.toml.whatsapp-twilio.example`
- `.storkit/bot.toml.slack.example`
## Prerequisites ## Prerequisites
@@ -49,7 +80,11 @@ Configuration lives in `.storkit/project.toml`. See `.storkit/bot.toml.*.example
Requires a Gitea API token in `.env` (`GITEA_TOKEN=your_token`). Requires a Gitea API token in `.env` (`GITEA_TOKEN=your_token`).
```bash ```bash
script/release 0.6.1 script/release 0.7.1
``` ```
This bumps version in `Cargo.toml` and `package.json`, builds macOS arm64 and Linux amd64 binaries, tags the repo, and publishes a Gitea release with changelog and binaries attached. This bumps version in `Cargo.toml` and `package.json`, builds macOS arm64 and Linux amd64 binaries, tags the repo, and publishes a Gitea release with changelog and binaries attached.
## License
GPL-3.0. See [LICENSE](LICENSE).
+2 -2
View File
@@ -1,12 +1,12 @@
{ {
"name": "living-spec-standalone", "name": "living-spec-standalone",
"version": "0.8.0", "version": "0.8.2",
"lockfileVersion": 3, "lockfileVersion": 3,
"requires": true, "requires": true,
"packages": { "packages": {
"": { "": {
"name": "living-spec-standalone", "name": "living-spec-standalone",
"version": "0.8.0", "version": "0.8.2",
"dependencies": { "dependencies": {
"@types/react-syntax-highlighter": "^15.5.13", "@types/react-syntax-highlighter": "^15.5.13",
"react": "^19.1.0", "react": "^19.1.0",
+1 -1
View File
@@ -1,7 +1,7 @@
{ {
"name": "living-spec-standalone", "name": "living-spec-standalone",
"private": true, "private": true,
"version": "0.8.0", "version": "0.8.2",
"type": "module", "type": "module",
"scripts": { "scripts": {
"dev": "vite", "dev": "vite",
+24
View File
@@ -1481,6 +1481,10 @@ describe("Slash command handling (Story 374)", () => {
await act(async () => { await act(async () => {
fireEvent.change(input, { target: { value: "/status" } }); fireEvent.change(input, { target: { value: "/status" } });
}); });
// First Enter selects the command from the picker; second Enter submits it
await act(async () => {
fireEvent.keyDown(input, { key: "Enter", shiftKey: false });
});
await act(async () => { await act(async () => {
fireEvent.keyDown(input, { key: "Enter", shiftKey: false }); fireEvent.keyDown(input, { key: "Enter", shiftKey: false });
}); });
@@ -1551,6 +1555,10 @@ describe("Slash command handling (Story 374)", () => {
await act(async () => { await act(async () => {
fireEvent.change(input, { target: { value: "/git" } }); fireEvent.change(input, { target: { value: "/git" } });
}); });
// First Enter selects the command from the picker; second Enter submits it
await act(async () => {
fireEvent.keyDown(input, { key: "Enter", shiftKey: false });
});
await act(async () => { await act(async () => {
fireEvent.keyDown(input, { key: "Enter", shiftKey: false }); fireEvent.keyDown(input, { key: "Enter", shiftKey: false });
}); });
@@ -1569,6 +1577,10 @@ describe("Slash command handling (Story 374)", () => {
await act(async () => { await act(async () => {
fireEvent.change(input, { target: { value: "/cost" } }); fireEvent.change(input, { target: { value: "/cost" } });
}); });
// First Enter selects the command from the picker; second Enter submits it
await act(async () => {
fireEvent.keyDown(input, { key: "Enter", shiftKey: false });
});
await act(async () => { await act(async () => {
fireEvent.keyDown(input, { key: "Enter", shiftKey: false }); fireEvent.keyDown(input, { key: "Enter", shiftKey: false });
}); });
@@ -1595,6 +1607,10 @@ describe("Slash command handling (Story 374)", () => {
await act(async () => { await act(async () => {
fireEvent.change(input, { target: { value: "/reset" } }); fireEvent.change(input, { target: { value: "/reset" } });
}); });
// First Enter selects the command from the picker; second Enter submits it
await act(async () => {
fireEvent.keyDown(input, { key: "Enter", shiftKey: false });
});
await act(async () => { await act(async () => {
fireEvent.keyDown(input, { key: "Enter", shiftKey: false }); fireEvent.keyDown(input, { key: "Enter", shiftKey: false });
}); });
@@ -1634,6 +1650,10 @@ describe("Slash command handling (Story 374)", () => {
await act(async () => { await act(async () => {
fireEvent.change(input, { target: { value: "/help" } }); fireEvent.change(input, { target: { value: "/help" } });
}); });
// First Enter selects the command from the picker; second Enter submits it
await act(async () => {
fireEvent.keyDown(input, { key: "Enter", shiftKey: false });
});
await act(async () => { await act(async () => {
fireEvent.keyDown(input, { key: "Enter", shiftKey: false }); fireEvent.keyDown(input, { key: "Enter", shiftKey: false });
}); });
@@ -1652,6 +1672,10 @@ describe("Slash command handling (Story 374)", () => {
await act(async () => { await act(async () => {
fireEvent.change(input, { target: { value: "/git" } }); fireEvent.change(input, { target: { value: "/git" } });
}); });
// First Enter selects the command from the picker; second Enter submits it
await act(async () => {
fireEvent.keyDown(input, { key: "Enter", shiftKey: false });
});
await act(async () => { await act(async () => {
fireEvent.keyDown(input, { key: "Enter", shiftKey: false }); fireEvent.keyDown(input, { key: "Enter", shiftKey: false });
}); });
+1
View File
@@ -1059,6 +1059,7 @@ export function Chat({ projectPath, onCloseProject }: ChatProps) {
)} )}
{messages.map((msg: Message, idx: number) => ( {messages.map((msg: Message, idx: number) => (
<MessageItem <MessageItem
// biome-ignore lint/suspicious/noArrayIndexKey: Message has no stable ID
key={`msg-${idx}-${msg.role}-${msg.content.substring(0, 20)}`} key={`msg-${idx}-${msg.role}-${msg.content.substring(0, 20)}`}
msg={msg} msg={msg}
/> />
+172 -4
View File
@@ -1,5 +1,6 @@
import * as React from "react"; import * as React from "react";
import { api } from "../api/client"; import { api } from "../api/client";
import { SLASH_COMMANDS, type SlashCommand } from "../slashCommands";
const { const {
forwardRef, forwardRef,
@@ -113,6 +114,83 @@ function FilePickerOverlay({
); );
} }
interface SlashCommandPickerOverlayProps {
query: string;
selectedIndex: number;
onSelect: (cmd: SlashCommand) => void;
}
function SlashCommandPickerOverlay({
query,
selectedIndex,
onSelect,
}: SlashCommandPickerOverlayProps) {
const filtered = SLASH_COMMANDS.filter((cmd) =>
fuzzyMatch(cmd.name, query),
).sort((a, b) => fuzzyScore(a.name, query) - fuzzyScore(b.name, query));
if (filtered.length === 0) return null;
return (
<div
data-testid="slash-command-picker"
style={{
position: "absolute",
bottom: "100%",
left: 0,
right: 0,
background: "#1e1e1e",
border: "1px solid #444",
borderRadius: "8px",
marginBottom: "6px",
overflow: "hidden",
zIndex: 100,
boxShadow: "0 4px 16px rgba(0,0,0,0.4)",
maxHeight: "300px",
overflowY: "auto",
}}
>
{filtered.map((cmd, idx) => (
<button
key={cmd.name}
type="button"
data-testid={`slash-command-item-${idx}`}
onClick={() => onSelect(cmd)}
style={{
display: "flex",
flexDirection: "column",
width: "100%",
textAlign: "left",
padding: "10px 14px",
background: idx === selectedIndex ? "#2d4a6e" : "transparent",
border: "none",
cursor: "pointer",
gap: "2px",
}}
>
<code
style={{
fontSize: "0.88rem",
color: idx === selectedIndex ? "#ececec" : "#e0e0e0",
fontFamily: "monospace",
}}
>
{cmd.name}
</code>
<span
style={{
fontSize: "0.78rem",
color: idx === selectedIndex ? "#b0c0d0" : "#888",
}}
>
{cmd.description}
</span>
</button>
))}
</div>
);
}
export const ChatInput = forwardRef<ChatInputHandle, ChatInputProps>( export const ChatInput = forwardRef<ChatInputHandle, ChatInputProps>(
function ChatInput( function ChatInput(
{ loading, queuedMessages, onSubmit, onCancel, onRemoveQueuedMessage }, { loading, queuedMessages, onSubmit, onCancel, onRemoveQueuedMessage },
@@ -127,6 +205,10 @@ export const ChatInput = forwardRef<ChatInputHandle, ChatInputProps>(
const [pickerSelectedIndex, setPickerSelectedIndex] = useState(0); const [pickerSelectedIndex, setPickerSelectedIndex] = useState(0);
const [pickerAtStart, setPickerAtStart] = useState(0); const [pickerAtStart, setPickerAtStart] = useState(0);
// Slash command picker state
const [slashQuery, setSlashQuery] = useState<string | null>(null);
const [slashSelectedIndex, setSlashSelectedIndex] = useState(0);
useImperativeHandle(ref, () => ({ useImperativeHandle(ref, () => ({
appendToInput(text: string) { appendToInput(text: string) {
setInput((prev) => (prev ? `${prev}\n${text}` : text)); setInput((prev) => (prev ? `${prev}\n${text}` : text));
@@ -153,6 +235,31 @@ export const ChatInput = forwardRef<ChatInputHandle, ChatInputProps>(
setPickerSelectedIndex(0); setPickerSelectedIndex(0);
}, []); }, []);
// Compute filtered slash commands for current query
const filteredCommands =
slashQuery !== null
? SLASH_COMMANDS.filter((cmd) => fuzzyMatch(cmd.name, slashQuery)).sort(
(a, b) =>
fuzzyScore(a.name, slashQuery) - fuzzyScore(b.name, slashQuery),
)
: [];
const dismissSlashPicker = useCallback(() => {
setSlashQuery(null);
setSlashSelectedIndex(0);
}, []);
const selectCommand = useCallback(
(cmd: SlashCommand) => {
// Extract base command (first word, e.g. "/assign" from "/assign <number> <model>")
const baseCommand = cmd.name.split(" ")[0];
setInput(`${baseCommand} `);
dismissSlashPicker();
setTimeout(() => inputRef.current?.focus(), 0);
},
[dismissSlashPicker],
);
const selectFile = useCallback( const selectFile = useCallback(
(file: string) => { (file: string) => {
// Replace the @query portion with @file // Replace the @query portion with @file
@@ -173,11 +280,20 @@ export const ChatInput = forwardRef<ChatInputHandle, ChatInputProps>(
setInput(val); setInput(val);
const cursor = e.target.selectionStart ?? val.length; const cursor = e.target.selectionStart ?? val.length;
// Find the last @ before the cursor that starts a reference token
const textUpToCursor = val.slice(0, cursor); const textUpToCursor = val.slice(0, cursor);
// Match @ not preceded by non-whitespace (i.e. @ at start or after space/newline)
const atMatch = textUpToCursor.match(/(^|[\s\n])@([^\s@]*)$/);
// Slash command picker: triggered when input starts with / and no space yet
const slashMatch = textUpToCursor.match(/^\/(\S*)$/);
if (slashMatch) {
setSlashQuery(slashMatch[1]);
setSlashSelectedIndex(0);
if (pickerQuery !== null) dismissPicker();
return;
}
if (slashQuery !== null) dismissSlashPicker();
// File picker: triggered by @ at start or after whitespace
const atMatch = textUpToCursor.match(/(^|[\s\n])@([^\s@]*)$/);
if (atMatch) { if (atMatch) {
const query = atMatch[2]; const query = atMatch[2];
const atPos = textUpToCursor.lastIndexOf("@"); const atPos = textUpToCursor.lastIndexOf("@");
@@ -196,11 +312,50 @@ export const ChatInput = forwardRef<ChatInputHandle, ChatInputProps>(
if (pickerQuery !== null) dismissPicker(); if (pickerQuery !== null) dismissPicker();
} }
}, },
[projectFiles.length, pickerQuery, dismissPicker], [
projectFiles.length,
pickerQuery,
dismissPicker,
slashQuery,
dismissSlashPicker,
],
); );
const handleKeyDown = useCallback( const handleKeyDown = useCallback(
(e: React.KeyboardEvent<HTMLTextAreaElement>) => { (e: React.KeyboardEvent<HTMLTextAreaElement>) => {
// Slash command picker navigation
if (slashQuery !== null && filteredCommands.length > 0) {
if (e.key === "ArrowDown") {
e.preventDefault();
setSlashSelectedIndex((i) =>
Math.min(i + 1, filteredCommands.length - 1),
);
return;
}
if (e.key === "ArrowUp") {
e.preventDefault();
setSlashSelectedIndex((i) => Math.max(i - 1, 0));
return;
}
if (e.key === "Tab" || e.key === "Enter") {
e.preventDefault();
selectCommand(
filteredCommands[slashSelectedIndex] ?? filteredCommands[0],
);
return;
}
if (e.key === "Escape") {
e.preventDefault();
dismissSlashPicker();
return;
}
} else if (e.key === "Escape" && slashQuery !== null) {
e.preventDefault();
dismissSlashPicker();
return;
}
// File picker navigation
if (pickerQuery !== null && filteredFiles.length > 0) { if (pickerQuery !== null && filteredFiles.length > 0) {
if (e.key === "ArrowDown") { if (e.key === "ArrowDown") {
e.preventDefault(); e.preventDefault();
@@ -236,6 +391,11 @@ export const ChatInput = forwardRef<ChatInputHandle, ChatInputProps>(
} }
}, },
[ [
slashQuery,
filteredCommands,
slashSelectedIndex,
selectCommand,
dismissSlashPicker,
pickerQuery, pickerQuery,
filteredFiles, filteredFiles,
pickerSelectedIndex, pickerSelectedIndex,
@@ -249,6 +409,7 @@ export const ChatInput = forwardRef<ChatInputHandle, ChatInputProps>(
onSubmit(input); onSubmit(input);
setInput(""); setInput("");
dismissPicker(); dismissPicker();
dismissSlashPicker();
}; };
return ( return (
@@ -357,6 +518,13 @@ export const ChatInput = forwardRef<ChatInputHandle, ChatInputProps>(
position: "relative", position: "relative",
}} }}
> >
{slashQuery !== null && (
<SlashCommandPickerOverlay
query={slashQuery}
selectedIndex={slashSelectedIndex}
onSelect={selectCommand}
/>
)}
{pickerQuery !== null && ( {pickerQuery !== null && (
<FilePickerOverlay <FilePickerOverlay
query={pickerQuery} query={pickerQuery}
@@ -0,0 +1,240 @@
import { act, fireEvent, render, screen } from "@testing-library/react";
import { beforeEach, describe, expect, it, vi } from "vitest";
import { ChatInput } from "./ChatInput";
vi.mock("../api/client", () => ({
api: {
listProjectFiles: vi.fn().mockResolvedValue([]),
},
}));
const defaultProps = {
loading: false,
queuedMessages: [],
onSubmit: vi.fn(),
onCancel: vi.fn(),
onRemoveQueuedMessage: vi.fn(),
};
beforeEach(() => {
vi.clearAllMocks();
});
describe("Slash command picker overlay (Story 438 AC1)", () => {
it("shows slash command picker when / is typed at position 0", async () => {
render(<ChatInput {...defaultProps} />);
const textarea = screen.getByPlaceholderText("Send a message...");
await act(async () => {
fireEvent.change(textarea, { target: { value: "/" } });
});
expect(screen.getByTestId("slash-command-picker")).toBeInTheDocument();
});
it("does not show slash command picker for plain text", async () => {
render(<ChatInput {...defaultProps} />);
const textarea = screen.getByPlaceholderText("Send a message...");
await act(async () => {
fireEvent.change(textarea, { target: { value: "hello" } });
});
expect(
screen.queryByTestId("slash-command-picker"),
).not.toBeInTheDocument();
});
it("does not show slash command picker when / is not at position 0", async () => {
render(<ChatInput {...defaultProps} />);
const textarea = screen.getByPlaceholderText("Send a message...");
await act(async () => {
fireEvent.change(textarea, { target: { value: "hello /world" } });
});
expect(
screen.queryByTestId("slash-command-picker"),
).not.toBeInTheDocument();
});
});
describe("Slash command list (Story 438 AC2)", () => {
it("lists slash commands with name and description", async () => {
render(<ChatInput {...defaultProps} />);
const textarea = screen.getByPlaceholderText("Send a message...");
await act(async () => {
fireEvent.change(textarea, { target: { value: "/" } });
});
expect(screen.getByTestId("slash-command-picker")).toBeInTheDocument();
// First command should be /help
expect(screen.getByTestId("slash-command-item-0")).toBeInTheDocument();
expect(screen.getByTestId("slash-command-item-0")).toHaveTextContent(
"/help",
);
});
});
describe("Slash command fuzzy filter (Story 438 AC3)", () => {
it("filters commands when typing after /", async () => {
render(<ChatInput {...defaultProps} />);
const textarea = screen.getByPlaceholderText("Send a message...");
await act(async () => {
fireEvent.change(textarea, { target: { value: "/hel" } });
});
expect(screen.getByTestId("slash-command-picker")).toBeInTheDocument();
// /help should match "hel"
expect(screen.getByTestId("slash-command-item-0")).toHaveTextContent(
"/help",
);
// /rebuild should not be visible (no match for "hel")
const items = screen.queryAllByTestId(/^slash-command-item-/);
const texts = items.map((el) => el.textContent ?? "");
expect(texts.some((t) => t.includes("/rebuild"))).toBe(false);
});
it("shows no picker when query matches nothing", async () => {
render(<ChatInput {...defaultProps} />);
const textarea = screen.getByPlaceholderText("Send a message...");
await act(async () => {
fireEvent.change(textarea, { target: { value: "/zzzzz" } });
});
expect(
screen.queryByTestId("slash-command-picker"),
).not.toBeInTheDocument();
});
});
describe("Slash command keyboard navigation (Story 438 AC4)", () => {
it("ArrowDown navigates to next item", async () => {
render(<ChatInput {...defaultProps} />);
const textarea = screen.getByPlaceholderText("Send a message...");
await act(async () => {
fireEvent.change(textarea, { target: { value: "/" } });
});
const item0 = screen.getByTestId("slash-command-item-0");
expect(item0).toHaveStyle({ background: "#2d4a6e" });
await act(async () => {
fireEvent.keyDown(textarea, { key: "ArrowDown" });
});
const item1 = screen.getByTestId("slash-command-item-1");
expect(item1).toHaveStyle({ background: "#2d4a6e" });
});
it("ArrowUp stays at 0 when already at top", async () => {
render(<ChatInput {...defaultProps} />);
const textarea = screen.getByPlaceholderText("Send a message...");
await act(async () => {
fireEvent.change(textarea, { target: { value: "/" } });
});
await act(async () => {
fireEvent.keyDown(textarea, { key: "ArrowUp" });
});
const item0 = screen.getByTestId("slash-command-item-0");
expect(item0).toHaveStyle({ background: "#2d4a6e" });
});
it("Enter selects the highlighted command and inserts it", async () => {
render(<ChatInput {...defaultProps} />);
const textarea = screen.getByPlaceholderText("Send a message...");
await act(async () => {
fireEvent.change(textarea, { target: { value: "/hel" } });
});
await act(async () => {
fireEvent.keyDown(textarea, { key: "Enter" });
});
expect(
screen.queryByTestId("slash-command-picker"),
).not.toBeInTheDocument();
expect((textarea as HTMLTextAreaElement).value).toBe("/help ");
});
it("Tab selects the highlighted command and inserts it", async () => {
render(<ChatInput {...defaultProps} />);
const textarea = screen.getByPlaceholderText("Send a message...");
await act(async () => {
fireEvent.change(textarea, { target: { value: "/hel" } });
});
await act(async () => {
fireEvent.keyDown(textarea, { key: "Tab" });
});
expect(
screen.queryByTestId("slash-command-picker"),
).not.toBeInTheDocument();
expect((textarea as HTMLTextAreaElement).value).toBe("/help ");
});
it("Escape dismisses the picker", async () => {
render(<ChatInput {...defaultProps} />);
const textarea = screen.getByPlaceholderText("Send a message...");
await act(async () => {
fireEvent.change(textarea, { target: { value: "/" } });
});
expect(screen.getByTestId("slash-command-picker")).toBeInTheDocument();
await act(async () => {
fireEvent.keyDown(textarea, { key: "Escape" });
});
expect(
screen.queryByTestId("slash-command-picker"),
).not.toBeInTheDocument();
});
});
describe("Slash command selection inserts with trailing space (Story 438 AC5)", () => {
it("clicking a command inserts /<command> with trailing space", async () => {
render(<ChatInput {...defaultProps} />);
const textarea = screen.getByPlaceholderText("Send a message...");
await act(async () => {
fireEvent.change(textarea, { target: { value: "/" } });
});
await act(async () => {
fireEvent.click(screen.getByTestId("slash-command-item-0"));
});
expect(
screen.queryByTestId("slash-command-picker"),
).not.toBeInTheDocument();
const val = (textarea as HTMLTextAreaElement).value;
expect(val).toMatch(/^\/\w+ $/);
});
it("selection inserts only the base command (no argument placeholders)", async () => {
render(<ChatInput {...defaultProps} />);
const textarea = screen.getByPlaceholderText("Send a message...");
await act(async () => {
fireEvent.change(textarea, { target: { value: "/ass" } });
});
await act(async () => {
fireEvent.keyDown(textarea, { key: "Enter" });
});
expect((textarea as HTMLTextAreaElement).value).toBe("/assign ");
});
});
+1 -68
View File
@@ -1,75 +1,8 @@
import * as React from "react"; import * as React from "react";
import { SLASH_COMMANDS } from "../slashCommands";
const { useEffect, useRef } = React; const { useEffect, useRef } = React;
interface SlashCommand {
name: string;
description: string;
}
const SLASH_COMMANDS: SlashCommand[] = [
{
name: "/help",
description: "Show this list of available slash commands.",
},
{
name: "/status",
description:
"Show pipeline status and agent availability. `/status <number>` shows a story triage dump.",
},
{
name: "/assign <number> <model>",
description: "Pre-assign a model to a story (e.g. `/assign 42 opus`).",
},
{
name: "/start <number>",
description:
"Start a coder on a story. Optionally specify a model: `/start <number> opus`.",
},
{
name: "/show <number>",
description: "Display the full text of a work item.",
},
{
name: "/move <number> <stage>",
description:
"Move a work item to a pipeline stage (backlog, current, qa, merge, done).",
},
{
name: "/delete <number>",
description:
"Remove a work item from the pipeline and stop any running agent.",
},
{
name: "/cost",
description:
"Show token spend: 24h total, top stories, breakdown by agent type, and all-time total.",
},
{
name: "/git",
description:
"Show git status: branch, uncommitted changes, and ahead/behind remote.",
},
{
name: "/overview <number>",
description: "Show the implementation summary for a merged story.",
},
{
name: "/rebuild",
description: "Rebuild the server binary and restart.",
},
{
name: "/reset",
description:
"Clear the current Claude Code session and start fresh (messages and session ID are cleared locally).",
},
{
name: "/btw <question>",
description:
"Ask a side question using the current conversation as context. The question and answer are not added to the conversation history.",
},
];
interface HelpOverlayProps { interface HelpOverlayProps {
onDismiss: () => void; onDismiss: () => void;
} }
+1
View File
@@ -136,6 +136,7 @@ function MessageItemInner({ msg }: MessageItemProps) {
return ( return (
<div <div
// biome-ignore lint/suspicious/noArrayIndexKey: ToolCall has no stable ID
key={`tool-${i}-${tc.function.name}`} key={`tool-${i}-${tc.function.name}`}
style={{ style={{
display: "flex", display: "flex",
@@ -202,6 +202,7 @@ export function ServerLogsPanel({ logs }: ServerLogsPanelProps) {
) : ( ) : (
filteredLogs.map((entry, idx) => ( filteredLogs.map((entry, idx) => (
<div <div
// biome-ignore lint/suspicious/noArrayIndexKey: log entries have no stable ID
key={`${entry.timestamp}-${idx}`} key={`${entry.timestamp}-${idx}`}
style={{ style={{
display: "flex", display: "flex",
+67
View File
@@ -0,0 +1,67 @@
export interface SlashCommand {
name: string;
description: string;
}
export const SLASH_COMMANDS: SlashCommand[] = [
{
name: "/help",
description: "Show this list of available slash commands.",
},
{
name: "/status",
description:
"Show pipeline status and agent availability. `/status <number>` shows a story triage dump.",
},
{
name: "/assign <number> <model>",
description: "Pre-assign a model to a story (e.g. `/assign 42 opus`).",
},
{
name: "/start <number>",
description:
"Start a coder on a story. Optionally specify a model: `/start <number> opus`.",
},
{
name: "/show <number>",
description: "Display the full text of a work item.",
},
{
name: "/move <number> <stage>",
description:
"Move a work item to a pipeline stage (backlog, current, qa, merge, done).",
},
{
name: "/delete <number>",
description:
"Remove a work item from the pipeline and stop any running agent.",
},
{
name: "/cost",
description:
"Show token spend: 24h total, top stories, breakdown by agent type, and all-time total.",
},
{
name: "/git",
description:
"Show git status: branch, uncommitted changes, and ahead/behind remote.",
},
{
name: "/overview <number>",
description: "Show the implementation summary for a merged story.",
},
{
name: "/rebuild",
description: "Rebuild the server binary and restart.",
},
{
name: "/reset",
description:
"Clear the current Claude Code session and start fresh (messages and session ID are cleared locally).",
},
{
name: "/btw <question>",
description:
"Ask a side question using the current conversation as context. The question and answer are not added to the conversation history.",
},
];
+3
View File
@@ -4,6 +4,9 @@ set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)" PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
echo "=== Running cargo clippy ==="
cargo clippy --manifest-path "$PROJECT_ROOT/Cargo.toml" --all-targets --all-features
echo "=== Running Rust tests ===" echo "=== Running Rust tests ==="
cargo test --manifest-path "$PROJECT_ROOT/Cargo.toml" cargo test --manifest-path "$PROJECT_ROOT/Cargo.toml"
+1 -1
View File
@@ -1,6 +1,6 @@
[package] [package]
name = "storkit" name = "storkit"
version = "0.8.0" version = "0.8.2"
edition = "2024" edition = "2024"
build = "build.rs" build = "build.rs"
+4 -31
View File
@@ -171,39 +171,12 @@ fn run_command_with_timeout(
/// otherwise `cargo nextest run` / `cargo test`) in the given directory. /// otherwise `cargo nextest run` / `cargo test`) in the given directory.
/// Returns `(gates_passed, combined_output)`. /// Returns `(gates_passed, combined_output)`.
pub(crate) fn run_acceptance_gates(path: &Path) -> Result<(bool, String), String> { pub(crate) fn run_acceptance_gates(path: &Path) -> Result<(bool, String), String> {
let mut all_output = String::new(); // Run script/test (or fallback to cargo test). This is the sole
let mut all_passed = true; // acceptance gate — project-specific linting and test commands belong
// in script/test, not hardcoded here.
// ── cargo clippy ──────────────────────────────────────────────
let clippy = Command::new("cargo")
.args(["clippy", "--all-targets", "--all-features"])
.current_dir(path)
.output()
.map_err(|e| format!("Failed to run cargo clippy: {e}"))?;
all_output.push_str("=== cargo clippy ===\n");
let clippy_stdout = String::from_utf8_lossy(&clippy.stdout);
let clippy_stderr = String::from_utf8_lossy(&clippy.stderr);
if !clippy_stdout.is_empty() {
all_output.push_str(&clippy_stdout);
}
if !clippy_stderr.is_empty() {
all_output.push_str(&clippy_stderr);
}
all_output.push('\n');
if !clippy.status.success() {
all_passed = false;
}
// ── tests (script/test if available, else cargo nextest/test) ─
let (test_success, test_out) = run_project_tests(path)?; let (test_success, test_out) = run_project_tests(path)?;
all_output.push_str(&test_out);
if !test_success {
all_passed = false;
}
Ok((all_passed, all_output)) Ok((test_success, test_out))
} }
/// Run `script/test_coverage` in the given directory if the script exists. /// Run `script/test_coverage` in the given directory if the script exists.
+4 -3
View File
@@ -247,7 +247,7 @@ pub(crate) fn run_squash_merge(
.output() .output()
.map_err(|e| format!("Failed to check merge diff: {e}"))?; .map_err(|e| format!("Failed to check merge diff: {e}"))?;
let changed_files = String::from_utf8_lossy(&diff_check.stdout); let changed_files = String::from_utf8_lossy(&diff_check.stdout);
let has_code_changes = changed_files.lines().any(|f| !f.starts_with(".storkit/")); let has_code_changes = changed_files.lines().any(|f| !f.starts_with(".storkit/work/"));
if !has_code_changes { if !has_code_changes {
all_output.push_str( all_output.push_str(
"=== Merge commit contains only .storkit/ file moves, no code changes ===\n", "=== Merge commit contains only .storkit/ file moves, no code changes ===\n",
@@ -419,9 +419,10 @@ pub(crate) fn run_squash_merge(
} }
// Verify HEAD commit has actual code changes (not an empty cherry-pick). // Verify HEAD commit has actual code changes (not an empty cherry-pick).
// Exclude .storkit/ so that story-file-only commits don't pass this check. // Exclude .storkit/work/ (pipeline file moves) but keep .storkit/project.toml
// and other config files which are legitimate deliverables.
let diff_stat = Command::new("git") let diff_stat = Command::new("git")
.args(["diff", "--stat", "HEAD~1..HEAD", "--", ".", ":(exclude).storkit"]) .args(["diff", "--stat", "HEAD~1..HEAD", "--", ".", ":(exclude).storkit/work"])
.current_dir(project_root) .current_dir(project_root)
.output() .output()
.map(|o| String::from_utf8_lossy(&o.stdout).trim().to_string()) .map(|o| String::from_utf8_lossy(&o.stdout).trim().to_string())
+45
View File
@@ -498,6 +498,50 @@ impl AgentPool {
} }
} }
// Mergemaster agents have their own completion path via
// start_merge_agent_work / run_merge_pipeline and must NOT go
// through server-owned gates. When a mergemaster exits early
// (e.g. rate-limited before calling start_merge_agent_work) the
// feature-branch worktree compiles fine and post-merge tests on
// master pass (nothing changed), which would wrongly advance the
// story to 5_done/ without any squash merge having occurred.
// Instead: just remove the agent from the pool and let
// auto-assign restart a new mergemaster for the story.
let stage = config_clone
.find_agent(&aname)
.map(agent_config_stage)
.unwrap_or_else(|| pipeline_stage(&aname));
if stage == PipelineStage::Mergemaster {
let (tx_done, done_session_id) = {
let mut lock = match agents_ref.lock() {
Ok(a) => a,
Err(_) => return,
};
if let Some(agent) = lock.remove(&key_clone) {
(agent.tx, agent.session_id.or(result.session_id))
} else {
(tx_clone.clone(), result.session_id)
}
};
let _ = tx_done.send(AgentEvent::Done {
story_id: sid.clone(),
agent_name: aname.clone(),
session_id: done_session_id,
});
AgentPool::notify_agent_state_changed(&watcher_tx_clone);
// Send a WorkItem event so the auto-assign watcher loop
// re-dispatches a new mergemaster if the story still needs
// merging. This avoids an async call to start_agent inside
// a tokio::spawn (which would require Send).
let _ = watcher_tx_clone.send(
crate::io::watcher::WatcherEvent::WorkItem {
stage: "4_merge".to_string(),
item_id: sid.clone(),
action: "reassign".to_string(),
commit_msg: String::new(),
},
);
} else {
// Server-owned completion: run acceptance gates automatically // Server-owned completion: run acceptance gates automatically
// when the agent process exits normally. // when the agent process exits normally.
super::pipeline::run_server_owned_completion( super::pipeline::run_server_owned_completion(
@@ -511,6 +555,7 @@ impl AgentPool {
.await; .await;
AgentPool::notify_agent_state_changed(&watcher_tx_clone); AgentPool::notify_agent_state_changed(&watcher_tx_clone);
} }
}
Err(e) => { Err(e) => {
slog_error!("[agents] Agent process error for {aname} on {sid}: {e}"); slog_error!("[agents] Agent process error for {aname} on {sid}: {e}");
let event = AgentEvent::Error { let event = AgentEvent::Error {
+95 -1
View File
@@ -4,7 +4,7 @@ use std::collections::HashMap;
use std::sync::{Arc, Mutex}; use std::sync::{Arc, Mutex};
use tokio::sync::broadcast; use tokio::sync::broadcast;
use super::super::super::{AgentEvent, AgentStatus, CompletionReport}; use super::super::super::{AgentEvent, AgentStatus, CompletionReport, PipelineStage, pipeline_stage};
use super::super::{AgentPool, StoryAgent, composite_key}; use super::super::{AgentPool, StoryAgent, composite_key};
use super::advance::spawn_pipeline_advance; use super::advance::spawn_pipeline_advance;
@@ -155,6 +155,21 @@ pub(in crate::agents::pool) async fn run_server_owned_completion(
) { ) {
let key = composite_key(story_id, agent_name); let key = composite_key(story_id, agent_name);
// Guard: mergemaster agents have their own completion path via
// start_merge_agent_work / run_merge_pipeline. Running server-owned gates
// for a mergemaster would wrongly advance the story to 5_done/ even when
// no squash merge has occurred (e.g. rate-limited exit before the agent
// called start_merge_agent_work). The lifecycle caller is responsible for
// cleaning up the agent entry and triggering auto-assign.
if pipeline_stage(agent_name) == PipelineStage::Mergemaster {
slog!(
"[agents] run_server_owned_completion skipped for mergemaster \
'{story_id}:{agent_name}'; mergemaster completion is handled by \
start_merge_agent_work."
);
return;
}
// Guard: skip if completion was already recorded (legacy path). // Guard: skip if completion was already recorded (legacy path).
{ {
let lock = match agents.lock() { let lock = match agents.lock() {
@@ -516,4 +531,83 @@ mod tests {
) )
.await; .await;
} }
/// Regression test for bug 445: a rate-limited mergemaster exits before
/// calling start_merge_agent_work. run_server_owned_completion must be a
/// no-op for mergemaster agents — it must not run acceptance gates and must
/// not advance the story to 5_done/ even when a passing script/test exists.
///
/// Before the fix: run_server_owned_completion would call run_pipeline_advance
/// for the Mergemaster stage, which ran post-merge tests on master (they pass
/// because nothing changed), then called move_story_to_done — advancing the
/// story without any squash merge having occurred.
#[cfg(unix)]
#[tokio::test]
async fn server_owned_completion_is_noop_for_mergemaster() {
use std::fs;
use std::os::unix::fs::PermissionsExt;
use tempfile::tempdir;
let tmp = tempdir().unwrap();
let root = tmp.path();
init_git_repo(root);
// Create a passing script/test so post-merge tests would succeed if
// run_pipeline_advance were incorrectly called for this mergemaster.
let script_dir = root.join("script");
fs::create_dir_all(&script_dir).unwrap();
let script_test = script_dir.join("test");
fs::write(&script_test, "#!/usr/bin/env sh\nexit 0\n").unwrap();
let mut perms = fs::metadata(&script_test).unwrap().permissions();
perms.set_mode(0o755);
fs::set_permissions(&script_test, perms).unwrap();
// Story in 4_merge/ — must NOT be moved to 5_done/.
let merge_dir = root.join(".storkit/work/4_merge");
fs::create_dir_all(&merge_dir).unwrap();
let story_path = merge_dir.join("99_story_merge445.md");
fs::write(&story_path, "---\nname: Merge 445 Test\n---\n").unwrap();
let pool = AgentPool::new_test(3001);
pool.inject_test_agent_with_path(
"99_story_merge445",
"mergemaster",
AgentStatus::Running,
root.to_path_buf(),
);
run_server_owned_completion(
&pool.agents,
pool.port,
"99_story_merge445",
"mergemaster",
None,
pool.watcher_tx.clone(),
)
.await;
// Wait briefly in case any background task fires.
tokio::time::sleep(std::time::Duration::from_millis(150)).await;
// Story must remain in 4_merge/ — not moved to 5_done/.
let done_path = root.join(".storkit/work/5_done/99_story_merge445.md");
assert!(
!done_path.exists(),
"Story must NOT be moved to 5_done/ when run_server_owned_completion \
is (incorrectly) called for a mergemaster agent"
);
assert!(
story_path.exists(),
"Story must remain in 4_merge/ when mergemaster completion is a no-op"
);
// The agent entry should remain in the pool (lifecycle cleanup is the
// caller's responsibility, not run_server_owned_completion's).
let agents = pool.agents.lock().unwrap();
let key = composite_key("99_story_merge445", "mergemaster");
assert!(
agents.get(&key).is_some(),
"Agent must remain in pool — run_server_owned_completion is a no-op for mergemaster"
);
}
} }
+54
View File
@@ -13,6 +13,7 @@ mod help;
pub(crate) mod loc; pub(crate) mod loc;
mod move_story; mod move_story;
mod overview; mod overview;
mod setup;
mod show; mod show;
mod status; mod status;
mod timer; mod timer;
@@ -177,9 +178,62 @@ pub fn commands() -> &'static [BotCommand] {
description: "Show stories merged to master since the last release tag", description: "Show stories merged to master since the last release tag",
handler: unreleased::handle_unreleased, handler: unreleased::handle_unreleased,
}, },
BotCommand {
name: "setup",
description: "Show setup wizard progress; or `setup generate` / `setup confirm` / `setup skip` / `setup retry` to drive the wizard from chat",
handler: setup::handle_setup,
},
] ]
} }
/// Like [`try_handle_command`] but returns `(plain_body, html_body)`.
///
/// The plain body is unchanged Markdown text suitable for the Matrix `body`
/// field (non-HTML clients). The HTML body is suitable for `formatted_body`.
///
/// The pipeline-status command (no args) injects Matrix `<font data-mx-color>`
/// tags on the traffic-light dots. All other commands produce HTML by running
/// the plain body through pulldown-cmark.
pub fn try_handle_command_with_html(
dispatch: &CommandDispatch<'_>,
message: &str,
) -> Option<(String, String)> {
let command_text = strip_bot_mention(message, dispatch.bot_name, dispatch.bot_user_id);
let trimmed = command_text.trim();
if !trimmed.is_empty() {
let (cmd_name, args) = match trimmed.split_once(char::is_whitespace) {
Some((c, a)) => (c, a.trim()),
None => (trimmed, ""),
};
// Only the no-arg status variant shows the pipeline with traffic-light
// dots; `status <number>` is a triage dump that needs no colour tags.
if cmd_name.eq_ignore_ascii_case("status") && args.is_empty() {
let body = status::build_pipeline_status(dispatch.project_root, dispatch.agents);
let html = status::build_pipeline_status_html(dispatch.project_root, dispatch.agents);
return Some((body, html));
}
}
// Generic path: plain text body → Markdown-to-HTML.
let body = try_handle_command(dispatch, message)?;
let html = plain_to_html(&body);
Some((body, html))
}
/// Convert a Markdown string to HTML using the same options as the Matrix
/// transport's `markdown_to_html` helper.
fn plain_to_html(markdown: &str) -> String {
use pulldown_cmark::{Options, Parser, html};
let normalized = crate::chat::util::normalize_line_breaks(markdown);
let options = Options::ENABLE_TABLES
| Options::ENABLE_FOOTNOTES
| Options::ENABLE_STRIKETHROUGH
| Options::ENABLE_TASKLISTS;
let parser = Parser::new_ext(&normalized, options);
let mut out = String::new();
html::push_html(&mut out, parser);
out
}
/// Try to match a user message against a registered bot command. /// Try to match a user message against a registered bot command.
/// ///
/// The message is expected to be the raw body text (e.g., `"@timmy help"`). /// The message is expected to be the raw body text (e.g., `"@timmy help"`).
+1 -5
View File
@@ -142,11 +142,7 @@ mod tests {
try_handle_command(&dispatch, &format!("@timmy move {args}")) try_handle_command(&dispatch, &format!("@timmy move {args}"))
} }
fn write_story_file(root: &std::path::Path, stage: &str, filename: &str, content: &str) { use crate::chat::test_helpers::write_story_file;
let dir = root.join(".storkit/work").join(stage);
std::fs::create_dir_all(&dir).unwrap();
std::fs::write(dir.join(filename), content).unwrap();
}
#[test] #[test]
fn move_command_is_registered() { fn move_command_is_registered() {
+339
View File
@@ -0,0 +1,339 @@
//! Handler for the `setup` bot command.
//!
//! Drives the setup wizard from any chat transport (Matrix, Slack, WhatsApp).
//!
//! Usage:
//! - `setup` — show wizard progress and current step instructions
//! - `setup confirm` — confirm the current step (writes staged content to disk)
//! - `setup skip` — skip the current step
//! - `setup retry` — discard staged content and reset the current step
use super::CommandContext;
use crate::http::mcp::wizard_tools::{
generation_hint, is_script_step, step_output_path, write_if_missing,
};
use crate::io::wizard::{format_wizard_state, StepStatus, WizardState};
pub(super) fn handle_setup(ctx: &CommandContext) -> Option<String> {
let sub = ctx.args.trim().to_ascii_lowercase();
match sub.as_str() {
"" => Some(wizard_status_reply(ctx)),
"generate" => Some(wizard_generate_reply(ctx)),
"confirm" => Some(wizard_confirm_reply(ctx)),
"skip" => Some(wizard_skip_reply(ctx)),
"retry" => Some(wizard_retry_reply(ctx)),
_ => Some(format!(
"Unknown sub-command `{sub}`. Usage: `setup`, `setup generate`, `setup confirm`, `setup skip`, `setup retry`."
)),
}
}
/// Mark the current step as generating and return the generation hint.
///
/// This mirrors `wizard_generate` (with no content) from the MCP tools, making
/// the interview flow accessible from chat transports (Matrix, Slack, WhatsApp).
fn wizard_generate_reply(ctx: &CommandContext) -> String {
let root = ctx.project_root;
let mut state = match WizardState::load(root) {
Some(s) => s,
None => return "No wizard active.".to_string(),
};
if state.completed {
return "Wizard is already complete.".to_string();
}
let idx = state.current_step_index();
let step = state.steps[idx].step;
state.set_step_status(step, StepStatus::Generating, None);
if let Err(e) = state.save(root) {
return format!("Failed to save wizard state: {e}");
}
let hint = generation_hint(step, root);
format!(
"Step '{}' marked as generating.\n\n{hint}\n\nOnce you have the content, stage it via the API and then run `setup confirm` to write it to disk.",
step.label()
)
}
/// Compose a status reply for the `setup` command (no args).
fn wizard_status_reply(ctx: &CommandContext) -> String {
match WizardState::load(ctx.project_root) {
Some(state) => format_wizard_state(&state),
None => {
"No setup wizard active. Run `storkit init` in the project root to begin.".to_string()
}
}
}
/// Confirm the current wizard step, writing any staged content to disk.
fn wizard_confirm_reply(ctx: &CommandContext) -> String {
let root = ctx.project_root;
let mut state = match WizardState::load(root) {
Some(s) => s,
None => return "No wizard active.".to_string(),
};
if state.completed {
return "Wizard is already complete.".to_string();
}
let idx = state.current_step_index();
let step = state.steps[idx].step;
let content = state.steps[idx].content.clone();
// Write content to disk (only if a file path exists and the file is absent).
let write_msg =
if let (Some(c), Some(ref path)) = (&content, step_output_path(root, step)) {
let executable = is_script_step(step);
match write_if_missing(path, c, executable) {
Ok(true) => format!(" File written: `{}`.", path.display()),
Ok(false) => format!(" File `{}` already exists — skipped.", path.display()),
Err(e) => return format!("Error: {e}"),
}
} else {
String::new()
};
if let Err(e) = state.confirm_step(step) {
return format!("Cannot confirm step: {e}");
}
if let Err(e) = state.save(root) {
return format!("Failed to save wizard state: {e}");
}
if state.completed {
format!(
"Step '{}' confirmed.{write_msg}\n\nSetup wizard complete!",
step.label()
)
} else {
let next = &state.steps[state.current_step_index()];
format!(
"Step '{}' confirmed.{write_msg}\n\nNext: {} — run `wizard_generate` to begin.",
step.label(),
next.step.label()
)
}
}
/// Skip the current wizard step without writing any file.
fn wizard_skip_reply(ctx: &CommandContext) -> String {
let root = ctx.project_root;
let mut state = match WizardState::load(root) {
Some(s) => s,
None => return "No wizard active.".to_string(),
};
if state.completed {
return "Wizard is already complete.".to_string();
}
let idx = state.current_step_index();
let step = state.steps[idx].step;
if let Err(e) = state.skip_step(step) {
return format!("Cannot skip step: {e}");
}
if let Err(e) = state.save(root) {
return format!("Failed to save wizard state: {e}");
}
if state.completed {
format!(
"Step '{}' skipped. Setup wizard complete!",
step.label()
)
} else {
let next = &state.steps[state.current_step_index()];
format!(
"Step '{}' skipped.\n\nNext: {} — run `wizard_generate` to begin.",
step.label(),
next.step.label()
)
}
}
/// Discard staged content and reset the current step to pending.
fn wizard_retry_reply(ctx: &CommandContext) -> String {
let root = ctx.project_root;
let mut state = match WizardState::load(root) {
Some(s) => s,
None => return "No wizard active.".to_string(),
};
if state.completed {
return "Wizard is already complete.".to_string();
}
let idx = state.current_step_index();
let step = state.steps[idx].step;
if let Some(s) = state.steps.iter_mut().find(|s| s.step == step) {
s.status = StepStatus::Pending;
s.content = None;
}
if let Err(e) = state.save(root) {
return format!("Failed to save wizard state: {e}");
}
format!(
"Step '{}' reset to pending. Run `wizard_generate` to regenerate content.",
step.label()
)
}
// ── tests ─────────────────────────────────────────────────────────────────────
#[cfg(test)]
mod tests {
use super::*;
use crate::io::wizard::WizardState;
use std::collections::HashSet;
use std::sync::{Arc, Mutex};
use tempfile::TempDir;
fn make_ctx<'a>(
args: &'a str,
project_root: &'a std::path::Path,
agents: &'a Arc<crate::agents::AgentPool>,
ambient_rooms: &'a Arc<Mutex<HashSet<String>>>,
) -> CommandContext<'a> {
CommandContext {
bot_name: "Bot",
args,
project_root,
agents,
ambient_rooms,
room_id: "!test:example.com",
}
}
#[test]
fn setup_no_wizard_returns_helpful_message() {
let dir = TempDir::new().unwrap();
let agents = Arc::new(crate::agents::AgentPool::new_test(4000));
let rooms = Arc::new(Mutex::new(HashSet::new()));
let ctx = make_ctx("", dir.path(), &agents, &rooms);
let result = handle_setup(&ctx).unwrap();
assert!(result.contains("storkit init"));
}
#[test]
fn setup_with_wizard_shows_status() {
let dir = TempDir::new().unwrap();
std::fs::create_dir_all(dir.path().join(".storkit")).unwrap();
WizardState::init_if_missing(dir.path());
let agents = Arc::new(crate::agents::AgentPool::new_test(4001));
let rooms = Arc::new(Mutex::new(HashSet::new()));
let ctx = make_ctx("", dir.path(), &agents, &rooms);
let result = handle_setup(&ctx).unwrap();
assert!(result.contains("Setup wizard"));
}
#[test]
fn setup_skip_advances_wizard() {
let dir = TempDir::new().unwrap();
std::fs::create_dir_all(dir.path().join(".storkit")).unwrap();
WizardState::init_if_missing(dir.path());
let agents = Arc::new(crate::agents::AgentPool::new_test(4002));
let rooms = Arc::new(Mutex::new(HashSet::new()));
let ctx = make_ctx("skip", dir.path(), &agents, &rooms);
let result = handle_setup(&ctx).unwrap();
assert!(result.contains("skipped"));
let state = WizardState::load(dir.path()).unwrap();
assert_eq!(state.current_step_index(), 2);
}
#[test]
fn setup_confirm_advances_wizard() {
let dir = TempDir::new().unwrap();
std::fs::create_dir_all(dir.path().join(".storkit")).unwrap();
WizardState::init_if_missing(dir.path());
let agents = Arc::new(crate::agents::AgentPool::new_test(4003));
let rooms = Arc::new(Mutex::new(HashSet::new()));
let ctx = make_ctx("confirm", dir.path(), &agents, &rooms);
let result = handle_setup(&ctx).unwrap();
assert!(result.contains("confirmed"));
let state = WizardState::load(dir.path()).unwrap();
assert_eq!(state.current_step_index(), 2);
}
#[test]
fn setup_retry_resets_step() {
let dir = TempDir::new().unwrap();
std::fs::create_dir_all(dir.path().join(".storkit")).unwrap();
WizardState::init_if_missing(dir.path());
// Stage some content first.
{
let mut state = WizardState::load(dir.path()).unwrap();
state.set_step_status(
crate::io::wizard::WizardStep::Context,
crate::io::wizard::StepStatus::AwaitingConfirmation,
Some("content".to_string()),
);
state.save(dir.path()).unwrap();
}
let agents = Arc::new(crate::agents::AgentPool::new_test(4004));
let rooms = Arc::new(Mutex::new(HashSet::new()));
let ctx = make_ctx("retry", dir.path(), &agents, &rooms);
let result = handle_setup(&ctx).unwrap();
assert!(result.contains("reset"));
let state = WizardState::load(dir.path()).unwrap();
assert_eq!(
state.steps[1].status,
crate::io::wizard::StepStatus::Pending
);
}
#[test]
fn setup_unknown_sub_command_returns_usage() {
let dir = TempDir::new().unwrap();
let agents = Arc::new(crate::agents::AgentPool::new_test(4005));
let rooms = Arc::new(Mutex::new(HashSet::new()));
let ctx = make_ctx("foobar", dir.path(), &agents, &rooms);
let result = handle_setup(&ctx).unwrap();
assert!(result.contains("Unknown sub-command"));
assert!(result.contains("Usage"));
}
#[test]
fn setup_generate_marks_generating_and_returns_hint() {
let dir = TempDir::new().unwrap();
std::fs::create_dir_all(dir.path().join(".storkit")).unwrap();
WizardState::init_if_missing(dir.path());
let agents = Arc::new(crate::agents::AgentPool::new_test(4006));
let rooms = Arc::new(Mutex::new(HashSet::new()));
let ctx = make_ctx("generate", dir.path(), &agents, &rooms);
let result = handle_setup(&ctx).unwrap();
assert!(result.contains("generating"));
let state = WizardState::load(dir.path()).unwrap();
assert_eq!(
state.steps[1].status,
crate::io::wizard::StepStatus::Generating
);
}
#[test]
fn setup_generate_bare_project_asks_user() {
let dir = TempDir::new().unwrap();
// Bare project — only scaffolding files
std::fs::create_dir_all(dir.path().join(".storkit")).unwrap();
WizardState::init_if_missing(dir.path());
let agents = Arc::new(crate::agents::AgentPool::new_test(4007));
let rooms = Arc::new(Mutex::new(HashSet::new()));
let ctx = make_ctx("generate", dir.path(), &agents, &rooms);
let result = handle_setup(&ctx).unwrap();
assert!(result.contains("bare project"));
assert!(result.contains("Ask the user"));
}
#[test]
fn setup_generate_no_wizard_returns_error() {
let dir = TempDir::new().unwrap();
let agents = Arc::new(crate::agents::AgentPool::new_test(4008));
let rooms = Arc::new(Mutex::new(HashSet::new()));
let ctx = make_ctx("generate", dir.path(), &agents, &rooms);
let result = handle_setup(&ctx).unwrap();
assert!(result.contains("No wizard active"));
}
}
+1 -5
View File
@@ -91,11 +91,7 @@ mod tests {
try_handle_command(&dispatch, &format!("@timmy show {args}")) try_handle_command(&dispatch, &format!("@timmy show {args}"))
} }
fn write_story_file(root: &std::path::Path, stage: &str, filename: &str, content: &str) { use crate::chat::test_helpers::write_story_file;
let dir = root.join(".storkit/work").join(stage);
std::fs::create_dir_all(&dir).unwrap();
std::fs::write(dir.join(filename), content).unwrap();
}
#[test] #[test]
fn show_command_is_registered() { fn show_command_is_registered() {
+103
View File
@@ -122,6 +122,34 @@ fn read_stage_items(
items items
} }
/// Build the HTML `formatted_body` for the pipeline status with Matrix colour
/// tags on the traffic-light dots.
///
/// Converts the plain-text pipeline status (Markdown) to HTML via
/// pulldown-cmark and wraps each traffic-light character in a
/// `<font data-mx-color="#rrggbb">` tag so Matrix clients display them in
/// colour.
pub(super) fn build_pipeline_status_html(project_root: &std::path::Path, agents: &AgentPool) -> String {
use pulldown_cmark::{Options, Parser, html};
let plain = build_pipeline_status(project_root, agents);
let normalized = crate::chat::util::normalize_line_breaks(&plain);
let options = Options::ENABLE_TABLES
| Options::ENABLE_FOOTNOTES
| Options::ENABLE_STRIKETHROUGH
| Options::ENABLE_TASKLISTS;
let parser = Parser::new_ext(&normalized, options);
let mut html_out = String::new();
html::push_html(&mut html_out, parser);
// Wrap each traffic-light character with a Matrix colour tag.
html_out
.replace('\u{2717}', "<font data-mx-color=\"#cc0000\">\u{2717}</font>") // ✗ blocked
.replace('\u{25D1}', "<font data-mx-color=\"#ffaa00\">\u{25D1}</font>") // ◑ throttled
.replace('\u{25CF}', "<font data-mx-color=\"#00cc00\">\u{25CF}</font>") // ● running
.replace('\u{25CB}', "<font data-mx-color=\"#888888\">\u{25CB}</font>") // ○ idle
}
/// Build the full pipeline status text formatted for Matrix (markdown). /// Build the full pipeline status text formatted for Matrix (markdown).
pub(super) fn build_pipeline_status(project_root: &std::path::Path, agents: &AgentPool) -> String { pub(super) fn build_pipeline_status(project_root: &std::path::Path, agents: &AgentPool) -> String {
// Build a map from story_id → active AgentInfo for quick lookup. // Build a map from story_id → active AgentInfo for quick lookup.
@@ -444,6 +472,81 @@ mod tests {
// -- traffic_light_dot -------------------------------------------------- // -- traffic_light_dot --------------------------------------------------
// -- build_pipeline_status_html (colored dots) --------------------------
#[test]
fn html_status_colors_idle_dot_grey() {
use std::io::Write;
use tempfile::TempDir;
let tmp = TempDir::new().unwrap();
let stage_dir = tmp.path().join(".storkit/work/2_current");
std::fs::create_dir_all(&stage_dir).unwrap();
let story_path = stage_dir.join("42_story_idle.md");
let mut f = std::fs::File::create(&story_path).unwrap();
writeln!(f, "---\nname: Idle Story\n---\n").unwrap();
let agents = AgentPool::new_test(3000);
let html = build_pipeline_status_html(tmp.path(), &agents);
assert!(
html.contains("<font data-mx-color=\"#888888\">\u{25CB}</font>"),
"idle dot should be grey (#888888): {html}"
);
}
#[test]
fn html_status_colors_blocked_dot_red() {
use std::io::Write;
use tempfile::TempDir;
let tmp = TempDir::new().unwrap();
let stage_dir = tmp.path().join(".storkit/work/2_current");
std::fs::create_dir_all(&stage_dir).unwrap();
let story_path = stage_dir.join("42_story_blocked.md");
let mut f = std::fs::File::create(&story_path).unwrap();
writeln!(f, "---\nname: Blocked Story\nblocked: true\n---\n").unwrap();
let agents = AgentPool::new_test(3000);
let html = build_pipeline_status_html(tmp.path(), &agents);
assert!(
html.contains("<font data-mx-color=\"#cc0000\">\u{2717}</font>"),
"blocked dot should be red (#cc0000): {html}"
);
}
#[test]
fn html_status_plain_text_body_unchanged() {
use std::io::Write;
use tempfile::TempDir;
let tmp = TempDir::new().unwrap();
let stage_dir = tmp.path().join(".storkit/work/2_current");
std::fs::create_dir_all(&stage_dir).unwrap();
let story_path = stage_dir.join("42_story_idle.md");
let mut f = std::fs::File::create(&story_path).unwrap();
writeln!(f, "---\nname: Idle Story\n---\n").unwrap();
let agents = AgentPool::new_test(3000);
let plain = build_pipeline_status(tmp.path(), &agents);
// Plain text must still use bare Unicode dots (no HTML tags).
assert!(
plain.contains('\u{25CB}'),
"plain text should have bare Unicode idle dot: {plain}"
);
assert!(
!plain.contains("data-mx-color"),
"plain text must not contain HTML colour attributes: {plain}"
);
}
// -- traffic_light_dot --------------------------------------------------
#[test] #[test]
fn dot_idle_when_no_agent() { fn dot_idle_when_no_agent() {
assert_eq!(traffic_light_dot(false, false, false), "\u{25CB} "); // ○ assert_eq!(traffic_light_dot(false, false, false), "\u{25CB} "); // ○
+1 -5
View File
@@ -296,11 +296,7 @@ mod tests {
try_handle_command(&dispatch, &format!("@timmy status {args}")) try_handle_command(&dispatch, &format!("@timmy status {args}"))
} }
fn write_story_file(root: &Path, stage: &str, filename: &str, content: &str) { use crate::chat::test_helpers::write_story_file;
let dir = root.join(".storkit/work").join(stage);
std::fs::create_dir_all(&dir).unwrap();
std::fs::write(dir.join(filename), content).unwrap();
}
// -- registration ------------------------------------------------------- // -- registration -------------------------------------------------------
+16 -9
View File
@@ -98,17 +98,25 @@ pub(crate) fn unblock_by_path(path: &Path, story_id: &str) -> String {
let story_name = meta.name.as_deref().unwrap_or(story_id).to_string(); let story_name = meta.name.as_deref().unwrap_or(story_id).to_string();
if meta.blocked != Some(true) { let has_blocked = meta.blocked == Some(true);
let has_merge_failure = meta.merge_failure.is_some();
if !has_blocked && !has_merge_failure {
return format!( return format!(
"**{story_name}** ({story_id}) is not blocked. Nothing to unblock." "**{story_name}** ({story_id}) is not blocked. Nothing to unblock."
); );
} }
// Clear the blocked flag (reads + writes the file). // Clear the blocked flag if present.
if let Err(e) = clear_front_matter_field(path, "blocked") { if has_blocked && let Err(e) = clear_front_matter_field(path, "blocked") {
return format!("Failed to clear blocked flag on **{story_id}**: {e}"); return format!("Failed to clear blocked flag on **{story_id}**: {e}");
} }
// Clear merge_failure if present.
if has_merge_failure && let Err(e) = clear_front_matter_field(path, "merge_failure") {
return format!("Failed to clear merge_failure on **{story_id}**: {e}");
}
// Reset retry_count to 0 (re-read the updated file, modify, write). // Reset retry_count to 0 (re-read the updated file, modify, write).
let updated_contents = match std::fs::read_to_string(path) { let updated_contents = match std::fs::read_to_string(path) {
Ok(c) => c, Ok(c) => c,
@@ -119,7 +127,10 @@ pub(crate) fn unblock_by_path(path: &Path, story_id: &str) -> String {
return format!("Failed to reset retry_count on **{story_id}**: {e}"); return format!("Failed to reset retry_count on **{story_id}**: {e}");
} }
format!("Unblocked **{story_name}** ({story_id}). Retry count reset to 0.") let mut cleared = Vec::new();
if has_blocked { cleared.push("blocked"); }
if has_merge_failure { cleared.push("merge_failure"); }
format!("Unblocked **{story_name}** ({story_id}). Cleared: {}. Retry count reset to 0.", cleared.join(", "))
} }
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
@@ -149,11 +160,7 @@ mod tests {
try_handle_command(&dispatch, &format!("@timmy unblock {args}")) try_handle_command(&dispatch, &format!("@timmy unblock {args}"))
} }
fn write_story_file(root: &std::path::Path, stage: &str, filename: &str, content: &str) { use crate::chat::test_helpers::write_story_file;
let dir = root.join(".storkit/work").join(stage);
std::fs::create_dir_all(&dir).unwrap();
std::fs::write(dir.join(filename), content).unwrap();
}
#[test] #[test]
fn unblock_command_is_registered() { fn unblock_command_is_registered() {
+2
View File
@@ -8,6 +8,8 @@ pub mod commands;
pub mod timer; pub mod timer;
pub mod transport; pub mod transport;
pub mod util; pub mod util;
#[cfg(test)]
pub(crate) mod test_helpers;
use async_trait::async_trait; use async_trait::async_trait;
+15
View File
@@ -0,0 +1,15 @@
//! Shared test utilities for chat handler tests.
//!
//! Import with `use crate::chat::test_helpers::write_story_file;`
use std::path::Path;
/// Write a work-item file into the standard pipeline directory structure.
///
/// Creates `.storkit/work/{stage}/{filename}` under `root`, creating any
/// missing parent directories.
pub(crate) fn write_story_file(root: &Path, stage: &str, filename: &str, content: &str) {
let dir = root.join(".storkit/work").join(stage);
std::fs::create_dir_all(&dir).unwrap();
std::fs::write(dir.join(filename), content).unwrap();
}
+3 -32
View File
@@ -9,6 +9,8 @@ use serde::{Deserialize, Serialize};
use std::path::{Path, PathBuf}; use std::path::{Path, PathBuf};
use std::sync::{Arc, Mutex}; use std::sync::{Arc, Mutex};
use crate::chat::util::strip_bot_mention;
// ── Data types ───────────────────────────────────────────────────────────── // ── Data types ─────────────────────────────────────────────────────────────
/// A single scheduled timer entry. /// A single scheduled timer entry.
@@ -256,7 +258,7 @@ pub fn extract_timer_command(
bot_name: &str, bot_name: &str,
bot_user_id: &str, bot_user_id: &str,
) -> Option<TimerCommand> { ) -> Option<TimerCommand> {
let stripped = strip_mention(message, bot_name, bot_user_id); let stripped = strip_bot_mention(message, bot_name, bot_user_id);
let trimmed = stripped let trimmed = stripped
.trim() .trim()
.trim_start_matches(|c: char| !c.is_alphanumeric()); .trim_start_matches(|c: char| !c.is_alphanumeric());
@@ -466,37 +468,6 @@ fn resolve_story_id(number_or_id: &str, project_root: &Path) -> Option<String> {
None None
} }
fn strip_mention<'a>(message: &'a str, bot_name: &str, bot_user_id: &str) -> &'a str {
let trimmed = message.trim();
if let Some(rest) = strip_prefix_ci(trimmed, bot_user_id) {
return rest;
}
if let Some(localpart) = bot_user_id.split(':').next()
&& let Some(rest) = strip_prefix_ci(trimmed, localpart)
{
return rest;
}
if let Some(rest) = strip_prefix_ci(trimmed, bot_name) {
return rest;
}
trimmed
}
fn strip_prefix_ci<'a>(text: &'a str, prefix: &str) -> Option<&'a str> {
if text.len() < prefix.len() {
return None;
}
if !text[..prefix.len()].eq_ignore_ascii_case(prefix) {
return None;
}
let rest = &text[prefix.len()..];
match rest.chars().next() {
None => Some(rest),
Some(c) if c.is_alphanumeric() || c == '-' || c == '_' => None,
_ => Some(rest),
}
}
// ── Tests ────────────────────────────────────────────────────────────────── // ── Tests ──────────────────────────────────────────────────────────────────
#[cfg(test)] #[cfg(test)]
+15 -40
View File
@@ -9,6 +9,7 @@
//! that the next `start` invocation picks it up automatically. //! that the next `start` invocation picks it up automatically.
use crate::agents::{AgentPool, AgentStatus}; use crate::agents::{AgentPool, AgentStatus};
use crate::chat::util::strip_bot_mention;
use crate::io::story_metadata::{parse_front_matter, set_front_matter_field}; use crate::io::story_metadata::{parse_front_matter, set_front_matter_field};
use std::path::Path; use std::path::Path;
@@ -43,7 +44,7 @@ pub fn extract_assign_command(
bot_name: &str, bot_name: &str,
bot_user_id: &str, bot_user_id: &str,
) -> Option<AssignCommand> { ) -> Option<AssignCommand> {
let stripped = strip_mention(message, bot_name, bot_user_id); let stripped = strip_bot_mention(message, bot_name, bot_user_id);
let trimmed = stripped let trimmed = stripped
.trim() .trim()
.trim_start_matches(|c: char| !c.is_alphanumeric()); .trim_start_matches(|c: char| !c.is_alphanumeric());
@@ -234,40 +235,6 @@ pub async fn handle_assign(
} }
} }
/// Strip the bot mention prefix from a raw Matrix message body.
///
/// Mirrors the logic in `commands::strip_bot_mention` and `start::strip_mention`.
fn strip_mention<'a>(message: &'a str, bot_name: &str, bot_user_id: &str) -> &'a str {
let trimmed = message.trim();
if let Some(rest) = strip_prefix_ci(trimmed, bot_user_id) {
return rest;
}
if let Some(localpart) = bot_user_id.split(':').next()
&& let Some(rest) = strip_prefix_ci(trimmed, localpart)
{
return rest;
}
if let Some(rest) = strip_prefix_ci(trimmed, bot_name) {
return rest;
}
trimmed
}
fn strip_prefix_ci<'a>(text: &'a str, prefix: &str) -> Option<&'a str> {
if text.len() < prefix.len() {
return None;
}
if !text[..prefix.len()].eq_ignore_ascii_case(prefix) {
return None;
}
let rest = &text[prefix.len()..];
match rest.chars().next() {
None => Some(rest),
Some(c) if c.is_alphanumeric() || c == '-' || c == '_' => None,
_ => Some(rest),
}
}
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
// Tests // Tests
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
@@ -354,6 +321,18 @@ mod tests {
assert_eq!(cmd, None); assert_eq!(cmd, None);
} }
#[test]
fn extract_assign_command_multibyte_prefix_no_panic() {
// "xxxx⏺ assign 42 opus" — ⏺ (U+23FA) is 3 bytes, starting at byte 4.
// "@timmy" has len 6 so text[..6] lands inside ⏺ — panics without the fix.
let cmd = extract_assign_command(
"xxxx\u{23FA} assign 42 opus",
"Timmy",
"@timmy:home.local",
);
assert_eq!(cmd, None);
}
// -- resolve_agent_name -------------------------------------------------- // -- resolve_agent_name --------------------------------------------------
#[test] #[test]
@@ -371,11 +350,7 @@ mod tests {
// -- handle_assign (no running coder) ------------------------------------ // -- handle_assign (no running coder) ------------------------------------
fn write_story_file(root: &Path, stage: &str, filename: &str, content: &str) { use crate::chat::test_helpers::write_story_file;
let dir = root.join(".storkit/work").join(stage);
std::fs::create_dir_all(&dir).unwrap();
std::fs::write(dir.join(filename), content).unwrap();
}
#[tokio::test] #[tokio::test]
async fn handle_assign_returns_not_found_for_unknown_number() { async fn handle_assign_returns_not_found_for_unknown_number() {
@@ -1,4 +1,4 @@
use crate::chat::util::drain_complete_paragraphs; use crate::chat::util::{drain_complete_paragraphs, is_permission_approval};
use crate::http::context::PermissionDecision; use crate::http::context::PermissionDecision;
use crate::llm::providers::claude_code::{ClaudeCodeProvider, ClaudeCodeResult}; use crate::llm::providers::claude_code::{ClaudeCodeProvider, ClaudeCodeResult};
use crate::slog; use crate::slog;
@@ -22,24 +22,6 @@ use super::history::{ConversationEntry, ConversationRole, save_history};
use super::mentions::{is_reply_to_bot, mentions_bot}; use super::mentions::{is_reply_to_bot, mentions_bot};
use super::verification::check_sender_verified; use super::verification::check_sender_verified;
/// Returns `true` if the message body is an affirmative permission response.
///
/// Recognised affirmative tokens (case-insensitive): `yes`, `y`, `approve`,
/// `allow`, `ok`. Anything else — including ambiguous text — is treated as
/// denial (fail-closed).
pub(super) fn is_permission_approval(body: &str) -> bool {
// Strip a leading @mention (e.g. "@timmy yes") so the bot name doesn't
// interfere with the check.
let trimmed = body
.trim()
.trim_start_matches('@')
.split_whitespace()
.last()
.unwrap_or("")
.to_ascii_lowercase();
matches!(trimmed.as_str(), "yes" | "y" | "approve" | "allow" | "ok")
}
/// Build the user-facing prompt for a single turn. In multi-user rooms the /// Build the user-facing prompt for a single turn. In multi-user rooms the
/// sender is included so the LLM can distinguish participants. /// sender is included so the LLM can distinguish participants.
pub(super) fn format_user_prompt(sender: &str, message: &str) -> String { pub(super) fn format_user_prompt(sender: &str, message: &str) -> String {
@@ -186,10 +168,9 @@ pub(super) async fn on_room_message(
ambient_rooms: &ctx.ambient_rooms, ambient_rooms: &ctx.ambient_rooms,
room_id: &room_id_str, room_id: &room_id_str,
}; };
if let Some(response) = super::super::commands::try_handle_command(&dispatch, &user_message) { if let Some((response, response_html)) = super::super::commands::try_handle_command_with_html(&dispatch, &user_message) {
slog!("[matrix-bot] Handled bot command from {sender}"); slog!("[matrix-bot] Handled bot command from {sender}");
let html = markdown_to_html(&response); if let Ok(msg_id) = ctx.transport.send_message(&room_id_str, &response, &response_html).await
if let Ok(msg_id) = ctx.transport.send_message(&room_id_str, &response, &html).await
&& let Ok(event_id) = msg_id.parse() && let Ok(event_id) = msg_id.parse()
{ {
ctx.bot_sent_event_ids.lock().await.insert(event_id); ctx.bot_sent_event_ids.lock().await.insert(event_id);
@@ -705,45 +686,6 @@ mod tests {
assert_eq!(prompt, "@bob:example.com: What's up?"); assert_eq!(prompt, "@bob:example.com: What's up?");
} }
// -- is_permission_approval -----------------------------------------------
#[test]
fn is_permission_approval_accepts_yes_variants() {
assert!(is_permission_approval("yes"));
assert!(is_permission_approval("Yes"));
assert!(is_permission_approval("YES"));
assert!(is_permission_approval("y"));
assert!(is_permission_approval("Y"));
assert!(is_permission_approval("approve"));
assert!(is_permission_approval("allow"));
assert!(is_permission_approval("ok"));
assert!(is_permission_approval("OK"));
}
#[test]
fn is_permission_approval_denies_no_and_other() {
assert!(!is_permission_approval("no"));
assert!(!is_permission_approval("No"));
assert!(!is_permission_approval("n"));
assert!(!is_permission_approval("deny"));
assert!(!is_permission_approval("reject"));
assert!(!is_permission_approval("maybe"));
assert!(!is_permission_approval(""));
assert!(!is_permission_approval("yes please do it"));
}
#[test]
fn is_permission_approval_strips_at_mention_prefix() {
assert!(is_permission_approval("@timmy yes"));
assert!(!is_permission_approval("@timmy no"));
}
#[test]
fn is_permission_approval_handles_whitespace() {
assert!(is_permission_approval(" yes "));
assert!(is_permission_approval("\tyes\n"));
}
// -- bot_name / system prompt ------------------------------------------- // -- bot_name / system prompt -------------------------------------------
#[test] #[test]
+2 -36
View File
@@ -5,6 +5,7 @@
//! commits the change to git. //! commits the change to git.
use crate::agents::{AgentPool, AgentStatus}; use crate::agents::{AgentPool, AgentStatus};
use crate::chat::util::strip_bot_mention;
use std::path::Path; use std::path::Path;
/// A parsed delete command from a Matrix message body. /// A parsed delete command from a Matrix message body.
@@ -25,7 +26,7 @@ pub fn extract_delete_command(
bot_name: &str, bot_name: &str,
bot_user_id: &str, bot_user_id: &str,
) -> Option<DeleteCommand> { ) -> Option<DeleteCommand> {
let stripped = strip_mention(message, bot_name, bot_user_id); let stripped = strip_bot_mention(message, bot_name, bot_user_id);
let trimmed = stripped let trimmed = stripped
.trim() .trim()
.trim_start_matches(|c: char| !c.is_alphanumeric()); .trim_start_matches(|c: char| !c.is_alphanumeric());
@@ -185,41 +186,6 @@ fn stage_display_name(stage: &str) -> &str {
} }
} }
/// Strip the bot mention prefix from a raw Matrix message body.
///
/// Mirrors the logic in `commands::strip_bot_mention` and `htop::strip_mention`
/// so delete detection works without depending on private symbols.
fn strip_mention<'a>(message: &'a str, bot_name: &str, bot_user_id: &str) -> &'a str {
let trimmed = message.trim();
if let Some(rest) = strip_prefix_ci(trimmed, bot_user_id) {
return rest;
}
if let Some(localpart) = bot_user_id.split(':').next()
&& let Some(rest) = strip_prefix_ci(trimmed, localpart)
{
return rest;
}
if let Some(rest) = strip_prefix_ci(trimmed, bot_name) {
return rest;
}
trimmed
}
fn strip_prefix_ci<'a>(text: &'a str, prefix: &str) -> Option<&'a str> {
if text.len() < prefix.len() {
return None;
}
if !text[..prefix.len()].eq_ignore_ascii_case(prefix) {
return None;
}
let rest = &text[prefix.len()..];
match rest.chars().next() {
None => Some(rest),
Some(c) if c.is_alphanumeric() || c == '-' || c == '_' => None,
_ => Some(rest),
}
}
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
// Tests // Tests
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
+2 -37
View File
@@ -13,6 +13,7 @@ use std::time::Duration;
use tokio::sync::{Mutex as TokioMutex, watch}; use tokio::sync::{Mutex as TokioMutex, watch};
use crate::agents::{AgentPool, AgentStatus}; use crate::agents::{AgentPool, AgentStatus};
use crate::chat::util::strip_bot_mention;
use crate::slog; use crate::slog;
use crate::chat::ChatTransport; use crate::chat::ChatTransport;
@@ -51,7 +52,7 @@ pub type HtopSessions = Arc<TokioMutex<HashMap<String, HtopSession>>>;
/// - `htop 10m` → `Start { duration_secs: 600 }` /// - `htop 10m` → `Start { duration_secs: 600 }`
/// - `htop 120` → `Start { duration_secs: 120 }` (bare seconds) /// - `htop 120` → `Start { duration_secs: 120 }` (bare seconds)
pub fn extract_htop_command(message: &str, bot_name: &str, bot_user_id: &str) -> Option<HtopCommand> { pub fn extract_htop_command(message: &str, bot_name: &str, bot_user_id: &str) -> Option<HtopCommand> {
let stripped = strip_mention(message, bot_name, bot_user_id); let stripped = strip_bot_mention(message, bot_name, bot_user_id);
let trimmed = stripped.trim(); let trimmed = stripped.trim();
// Strip leading punctuation (e.g. the comma in "@timmy, htop") // Strip leading punctuation (e.g. the comma in "@timmy, htop")
@@ -88,42 +89,6 @@ fn parse_duration(s: &str) -> Option<u64> {
s.parse::<u64>().ok() s.parse::<u64>().ok()
} }
/// Strip the bot mention prefix from a raw Matrix message body.
///
/// Mirrors the logic in `commands::strip_bot_mention` so htop detection works
/// without depending on private symbols in that module.
fn strip_mention<'a>(message: &'a str, bot_name: &str, bot_user_id: &str) -> &'a str {
let trimmed = message.trim();
if let Some(rest) = strip_prefix_ci(trimmed, bot_user_id) {
return rest;
}
if let Some(localpart) = bot_user_id.split(':').next()
&& let Some(rest) = strip_prefix_ci(trimmed, localpart)
{
return rest;
}
if let Some(rest) = strip_prefix_ci(trimmed, bot_name) {
return rest;
}
trimmed
}
fn strip_prefix_ci<'a>(text: &'a str, prefix: &str) -> Option<&'a str> {
if text.len() < prefix.len() {
return None;
}
if !text[..prefix.len()].eq_ignore_ascii_case(prefix) {
return None;
}
let rest = &text[prefix.len()..];
match rest.chars().next() {
None => Some(rest),
Some(c) if c.is_alphanumeric() || c == '-' || c == '_' => None,
_ => Some(rest),
}
}
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
// System stats // System stats
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
+2 -33
View File
@@ -6,6 +6,7 @@
//! running. //! running.
use crate::agents::AgentPool; use crate::agents::AgentPool;
use crate::chat::util::strip_bot_mention;
use std::path::Path; use std::path::Path;
use std::sync::Arc; use std::sync::Arc;
@@ -22,7 +23,7 @@ pub fn extract_rebuild_command(
bot_name: &str, bot_name: &str,
bot_user_id: &str, bot_user_id: &str,
) -> Option<RebuildCommand> { ) -> Option<RebuildCommand> {
let stripped = strip_mention(message, bot_name, bot_user_id); let stripped = strip_bot_mention(message, bot_name, bot_user_id);
let trimmed = stripped let trimmed = stripped
.trim() .trim()
.trim_start_matches(|c: char| !c.is_alphanumeric()); .trim_start_matches(|c: char| !c.is_alphanumeric());
@@ -56,38 +57,6 @@ pub async fn handle_rebuild(
} }
} }
/// Strip the bot mention prefix from a raw Matrix message body.
fn strip_mention<'a>(message: &'a str, bot_name: &str, bot_user_id: &str) -> &'a str {
let trimmed = message.trim();
if let Some(rest) = strip_prefix_ci(trimmed, bot_user_id) {
return rest;
}
if let Some(localpart) = bot_user_id.split(':').next()
&& let Some(rest) = strip_prefix_ci(trimmed, localpart)
{
return rest;
}
if let Some(rest) = strip_prefix_ci(trimmed, bot_name) {
return rest;
}
trimmed
}
fn strip_prefix_ci<'a>(text: &'a str, prefix: &str) -> Option<&'a str> {
if text.len() < prefix.len() {
return None;
}
if !text[..prefix.len()].eq_ignore_ascii_case(prefix) {
return None;
}
let rest = &text[prefix.len()..];
match rest.chars().next() {
None => Some(rest),
Some(c) if c.is_alphanumeric() || c == '-' || c == '_' => None,
_ => Some(rest),
}
}
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
// Tests // Tests
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
+2 -33
View File
@@ -6,6 +6,7 @@
//! affected — only the in-memory/persisted conversation state is cleared. //! affected — only the in-memory/persisted conversation state is cleared.
use crate::chat::transport::matrix::bot::{ConversationHistory, RoomConversation}; use crate::chat::transport::matrix::bot::{ConversationHistory, RoomConversation};
use crate::chat::util::strip_bot_mention;
use matrix_sdk::ruma::OwnedRoomId; use matrix_sdk::ruma::OwnedRoomId;
use std::path::Path; use std::path::Path;
@@ -22,7 +23,7 @@ pub fn extract_reset_command(
bot_name: &str, bot_name: &str,
bot_user_id: &str, bot_user_id: &str,
) -> Option<ResetCommand> { ) -> Option<ResetCommand> {
let stripped = strip_mention(message, bot_name, bot_user_id); let stripped = strip_bot_mention(message, bot_name, bot_user_id);
let trimmed = stripped let trimmed = stripped
.trim() .trim()
.trim_start_matches(|c: char| !c.is_alphanumeric()); .trim_start_matches(|c: char| !c.is_alphanumeric());
@@ -58,38 +59,6 @@ pub async fn handle_reset(
"Session reset. Starting fresh — previous context has been cleared.".to_string() "Session reset. Starting fresh — previous context has been cleared.".to_string()
} }
/// Strip the bot mention prefix from a raw Matrix message body.
fn strip_mention<'a>(message: &'a str, bot_name: &str, bot_user_id: &str) -> &'a str {
let trimmed = message.trim();
if let Some(rest) = strip_prefix_ci(trimmed, bot_user_id) {
return rest;
}
if let Some(localpart) = bot_user_id.split(':').next()
&& let Some(rest) = strip_prefix_ci(trimmed, localpart)
{
return rest;
}
if let Some(rest) = strip_prefix_ci(trimmed, bot_name) {
return rest;
}
trimmed
}
fn strip_prefix_ci<'a>(text: &'a str, prefix: &str) -> Option<&'a str> {
if text.len() < prefix.len() {
return None;
}
if !text[..prefix.len()].eq_ignore_ascii_case(prefix) {
return None;
}
let rest = &text[prefix.len()..];
match rest.chars().next() {
None => Some(rest),
Some(c) if c.is_alphanumeric() || c == '-' || c == '_' => None,
_ => Some(rest),
}
}
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
// Tests // Tests
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
+2 -33
View File
@@ -5,6 +5,7 @@
//! The story file in the pipeline is left untouched. //! The story file in the pipeline is left untouched.
use crate::agents::{AgentPool, AgentStatus}; use crate::agents::{AgentPool, AgentStatus};
use crate::chat::util::strip_bot_mention;
use std::path::Path; use std::path::Path;
/// A parsed rmtree command from a Matrix message body. /// A parsed rmtree command from a Matrix message body.
@@ -25,7 +26,7 @@ pub fn extract_rmtree_command(
bot_name: &str, bot_name: &str,
bot_user_id: &str, bot_user_id: &str,
) -> Option<RmtreeCommand> { ) -> Option<RmtreeCommand> {
let stripped = strip_mention(message, bot_name, bot_user_id); let stripped = strip_bot_mention(message, bot_name, bot_user_id);
let trimmed = stripped let trimmed = stripped
.trim() .trim()
.trim_start_matches(|c: char| !c.is_alphanumeric()); .trim_start_matches(|c: char| !c.is_alphanumeric());
@@ -118,38 +119,6 @@ pub async fn handle_rmtree(
response response
} }
/// Strip the bot mention prefix from a raw Matrix message body.
fn strip_mention<'a>(message: &'a str, bot_name: &str, bot_user_id: &str) -> &'a str {
let trimmed = message.trim();
if let Some(rest) = strip_prefix_ci(trimmed, bot_user_id) {
return rest;
}
if let Some(localpart) = bot_user_id.split(':').next()
&& let Some(rest) = strip_prefix_ci(trimmed, localpart)
{
return rest;
}
if let Some(rest) = strip_prefix_ci(trimmed, bot_name) {
return rest;
}
trimmed
}
fn strip_prefix_ci<'a>(text: &'a str, prefix: &str) -> Option<&'a str> {
if text.len() < prefix.len() {
return None;
}
if !text[..prefix.len()].eq_ignore_ascii_case(prefix) {
return None;
}
let rest = &text[prefix.len()..];
match rest.chars().next() {
None => Some(rest),
Some(c) if c.is_alphanumeric() || c == '-' || c == '_' => None,
_ => Some(rest),
}
}
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
// Tests // Tests
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
+2 -35
View File
@@ -7,6 +7,7 @@
//! name ends with the supplied hint, e.g. `coder-{hint}`). //! name ends with the supplied hint, e.g. `coder-{hint}`).
use crate::agents::AgentPool; use crate::agents::AgentPool;
use crate::chat::util::strip_bot_mention;
use std::path::Path; use std::path::Path;
/// A parsed start command from a Matrix message body. /// A parsed start command from a Matrix message body.
@@ -31,7 +32,7 @@ pub fn extract_start_command(
bot_name: &str, bot_name: &str,
bot_user_id: &str, bot_user_id: &str,
) -> Option<StartCommand> { ) -> Option<StartCommand> {
let stripped = strip_mention(message, bot_name, bot_user_id); let stripped = strip_bot_mention(message, bot_name, bot_user_id);
let trimmed = stripped let trimmed = stripped
.trim() .trim()
.trim_start_matches(|c: char| !c.is_alphanumeric()); .trim_start_matches(|c: char| !c.is_alphanumeric());
@@ -177,40 +178,6 @@ pub async fn handle_start(
} }
} }
/// Strip the bot mention prefix from a raw Matrix message body.
///
/// Mirrors the logic in `commands::strip_bot_mention` and `delete::strip_mention`.
fn strip_mention<'a>(message: &'a str, bot_name: &str, bot_user_id: &str) -> &'a str {
let trimmed = message.trim();
if let Some(rest) = strip_prefix_ci(trimmed, bot_user_id) {
return rest;
}
if let Some(localpart) = bot_user_id.split(':').next()
&& let Some(rest) = strip_prefix_ci(trimmed, localpart)
{
return rest;
}
if let Some(rest) = strip_prefix_ci(trimmed, bot_name) {
return rest;
}
trimmed
}
fn strip_prefix_ci<'a>(text: &'a str, prefix: &str) -> Option<&'a str> {
if text.len() < prefix.len() {
return None;
}
if !text[..prefix.len()].eq_ignore_ascii_case(prefix) {
return None;
}
let rest = &text[prefix.len()..];
match rest.chars().next() {
None => Some(rest),
Some(c) if c.is_alphanumeric() || c == '-' || c == '_' => None,
_ => Some(rest),
}
}
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
// Tests // Tests
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
+1 -11
View File
@@ -9,6 +9,7 @@ use serde::{Deserialize, Serialize};
use crate::agents::AgentPool; use crate::agents::AgentPool;
use crate::chat::transport::matrix::{ConversationEntry, ConversationRole, RoomConversation}; use crate::chat::transport::matrix::{ConversationEntry, ConversationRole, RoomConversation};
use crate::chat::util::is_permission_approval;
use crate::slog; use crate::slog;
use crate::chat::ChatTransport; use crate::chat::ChatTransport;
use crate::http::context::{PermissionDecision, PermissionForward}; use crate::http::context::{PermissionDecision, PermissionForward};
@@ -86,17 +87,6 @@ pub struct SlackWebhookContext {
pub permission_timeout_secs: u64, pub permission_timeout_secs: u64,
} }
// ── Permission approval detection ──────────────────────────────────────
/// Returns `true` if the message body should be interpreted as permission approval.
fn is_permission_approval(body: &str) -> bool {
let trimmed = body.trim().to_ascii_lowercase();
matches!(
trimmed.as_str(),
"yes" | "y" | "approve" | "allow" | "ok"
)
}
// ── Incoming message dispatch ─────────────────────────────────────────── // ── Incoming message dispatch ───────────────────────────────────────────
pub(super) async fn handle_incoming_message( pub(super) async fn handle_incoming_message(
@@ -1,21 +1,13 @@
use std::sync::Arc; use std::sync::Arc;
use crate::chat::transport::matrix::{ConversationEntry, ConversationRole, RoomConversation}; use crate::chat::transport::matrix::{ConversationEntry, ConversationRole, RoomConversation};
use crate::chat::util::is_permission_approval;
use crate::http::context::{PermissionDecision}; use crate::http::context::{PermissionDecision};
use crate::slog; use crate::slog;
use super::WhatsAppWebhookContext; use super::WhatsAppWebhookContext;
use super::format::{chunk_for_whatsapp, markdown_to_whatsapp}; use super::format::{chunk_for_whatsapp, markdown_to_whatsapp};
use super::history::save_whatsapp_history; use super::history::save_whatsapp_history;
/// Returns `true` if the message body should be interpreted as permission approval.
fn is_permission_approval(body: &str) -> bool {
let trimmed = body.trim().to_ascii_lowercase();
matches!(
trimmed.as_str(),
"yes" | "y" | "approve" | "allow" | "ok"
)
}
/// Dispatch an incoming WhatsApp message to bot commands. /// Dispatch an incoming WhatsApp message to bot commands.
pub(super) async fn handle_incoming_message(ctx: &WhatsAppWebhookContext, sender: &str, message: &str) { pub(super) async fn handle_incoming_message(ctx: &WhatsAppWebhookContext, sender: &str, message: &str) {
use crate::chat::commands::{CommandDispatch, try_handle_command}; use crate::chat::commands::{CommandDispatch, try_handle_command};
+60
View File
@@ -3,6 +3,27 @@
//! These functions are transport-agnostic helpers for processing chat messages: //! These functions are transport-agnostic helpers for processing chat messages:
//! prefix stripping, bot-mention handling, and paragraph buffering. //! prefix stripping, bot-mention handling, and paragraph buffering.
/// Returns `true` if the message body is an affirmative permission response.
///
/// Recognised affirmative tokens (case-insensitive): `yes`, `y`, `approve`,
/// `allow`, `ok`. Anything else — including ambiguous text — is treated as
/// denial (fail-closed).
///
/// A leading `@mention` (e.g. `"@timmy yes"`) is stripped before checking, so
/// the bot name does not interfere with the result.
pub fn is_permission_approval(body: &str) -> bool {
// Strip a leading @mention (e.g. "@timmy yes") so the bot name doesn't
// interfere with the check.
let trimmed = body
.trim()
.trim_start_matches('@')
.split_whitespace()
.last()
.unwrap_or("")
.to_ascii_lowercase();
matches!(trimmed.as_str(), "yes" | "y" | "approve" | "allow" | "ok")
}
/// Case-insensitive prefix strip that also requires the match to end at a /// Case-insensitive prefix strip that also requires the match to end at a
/// word boundary (whitespace, punctuation, or end-of-string). /// word boundary (whitespace, punctuation, or end-of-string).
pub fn strip_prefix_ci<'a>(text: &'a str, prefix: &str) -> Option<&'a str> { pub fn strip_prefix_ci<'a>(text: &'a str, prefix: &str) -> Option<&'a str> {
@@ -190,6 +211,45 @@ pub fn normalize_line_breaks(text: &str) -> String {
mod tests { mod tests {
use super::*; use super::*;
// -- is_permission_approval ---------------------------------------------
#[test]
fn is_permission_approval_accepts_yes_variants() {
assert!(is_permission_approval("yes"));
assert!(is_permission_approval("Yes"));
assert!(is_permission_approval("YES"));
assert!(is_permission_approval("y"));
assert!(is_permission_approval("Y"));
assert!(is_permission_approval("approve"));
assert!(is_permission_approval("allow"));
assert!(is_permission_approval("ok"));
assert!(is_permission_approval("OK"));
}
#[test]
fn is_permission_approval_denies_no_and_other() {
assert!(!is_permission_approval("no"));
assert!(!is_permission_approval("No"));
assert!(!is_permission_approval("n"));
assert!(!is_permission_approval("deny"));
assert!(!is_permission_approval("reject"));
assert!(!is_permission_approval("maybe"));
assert!(!is_permission_approval(""));
assert!(!is_permission_approval("yes please do it"));
}
#[test]
fn is_permission_approval_strips_at_mention_prefix() {
assert!(is_permission_approval("@timmy yes"));
assert!(!is_permission_approval("@timmy no"));
}
#[test]
fn is_permission_approval_handles_whitespace() {
assert!(is_permission_approval(" yes "));
assert!(is_permission_approval("\tyes\n"));
}
// -- strip_prefix_ci ---------------------------------------------------- // -- strip_prefix_ci ----------------------------------------------------
#[test] #[test]
+16 -18
View File
@@ -64,6 +64,13 @@ impl AnthropicApi {
} }
} }
#[cfg(test)]
impl From<Arc<AppContext>> for AnthropicApi {
fn from(ctx: Arc<AppContext>) -> Self {
Self::new(ctx)
}
}
#[OpenApi(tag = "AnthropicTags::Anthropic")] #[OpenApi(tag = "AnthropicTags::Anthropic")]
impl AnthropicApi { impl AnthropicApi {
/// Check whether an Anthropic API key is stored. /// Check whether an Anthropic API key is stored.
@@ -151,25 +158,16 @@ impl AnthropicApi {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;
use crate::http::context::AppContext; use crate::http::test_helpers::{make_api, test_ctx};
use serde_json::json; use serde_json::json;
use std::sync::Arc;
use tempfile::TempDir; use tempfile::TempDir;
fn test_ctx(dir: &TempDir) -> AppContext {
AppContext::new_test(dir.path().to_path_buf())
}
fn make_api(dir: &TempDir) -> AnthropicApi {
AnthropicApi::new(Arc::new(test_ctx(dir)))
}
// -- get_anthropic_api_key (private helper) -- // -- get_anthropic_api_key (private helper) --
#[test] #[test]
fn get_api_key_returns_err_when_not_set() { fn get_api_key_returns_err_when_not_set() {
let dir = TempDir::new().unwrap(); let dir = TempDir::new().unwrap();
let ctx = test_ctx(&dir); let ctx = test_ctx(dir.path());
let result = get_anthropic_api_key(&ctx); let result = get_anthropic_api_key(&ctx);
assert!(result.is_err()); assert!(result.is_err());
assert!(result.unwrap_err().contains("not found")); assert!(result.unwrap_err().contains("not found"));
@@ -178,7 +176,7 @@ mod tests {
#[test] #[test]
fn get_api_key_returns_err_when_empty() { fn get_api_key_returns_err_when_empty() {
let dir = TempDir::new().unwrap(); let dir = TempDir::new().unwrap();
let ctx = test_ctx(&dir); let ctx = test_ctx(dir.path());
ctx.store.set(KEY_ANTHROPIC_API_KEY, json!("")); ctx.store.set(KEY_ANTHROPIC_API_KEY, json!(""));
let result = get_anthropic_api_key(&ctx); let result = get_anthropic_api_key(&ctx);
assert!(result.is_err()); assert!(result.is_err());
@@ -188,7 +186,7 @@ mod tests {
#[test] #[test]
fn get_api_key_returns_err_when_not_string() { fn get_api_key_returns_err_when_not_string() {
let dir = TempDir::new().unwrap(); let dir = TempDir::new().unwrap();
let ctx = test_ctx(&dir); let ctx = test_ctx(dir.path());
ctx.store.set(KEY_ANTHROPIC_API_KEY, json!(12345)); ctx.store.set(KEY_ANTHROPIC_API_KEY, json!(12345));
let result = get_anthropic_api_key(&ctx); let result = get_anthropic_api_key(&ctx);
assert!(result.is_err()); assert!(result.is_err());
@@ -198,7 +196,7 @@ mod tests {
#[test] #[test]
fn get_api_key_returns_key_when_set() { fn get_api_key_returns_key_when_set() {
let dir = TempDir::new().unwrap(); let dir = TempDir::new().unwrap();
let ctx = test_ctx(&dir); let ctx = test_ctx(dir.path());
ctx.store.set(KEY_ANTHROPIC_API_KEY, json!("sk-ant-test123")); ctx.store.set(KEY_ANTHROPIC_API_KEY, json!("sk-ant-test123"));
let result = get_anthropic_api_key(&ctx); let result = get_anthropic_api_key(&ctx);
assert_eq!(result.unwrap(), "sk-ant-test123"); assert_eq!(result.unwrap(), "sk-ant-test123");
@@ -209,7 +207,7 @@ mod tests {
#[tokio::test] #[tokio::test]
async fn key_exists_returns_false_when_not_set() { async fn key_exists_returns_false_when_not_set() {
let dir = TempDir::new().unwrap(); let dir = TempDir::new().unwrap();
let api = make_api(&dir); let api = make_api::<AnthropicApi>(&dir);
let result = api.get_anthropic_api_key_exists().await.unwrap(); let result = api.get_anthropic_api_key_exists().await.unwrap();
assert!(!result.0); assert!(!result.0);
} }
@@ -229,7 +227,7 @@ mod tests {
#[tokio::test] #[tokio::test]
async fn set_api_key_returns_true() { async fn set_api_key_returns_true() {
let dir = TempDir::new().unwrap(); let dir = TempDir::new().unwrap();
let api = make_api(&dir); let api = make_api::<AnthropicApi>(&dir);
let payload = Json(ApiKeyPayload { let payload = Json(ApiKeyPayload {
api_key: "sk-ant-test123".to_string(), api_key: "sk-ant-test123".to_string(),
}); });
@@ -256,7 +254,7 @@ mod tests {
#[tokio::test] #[tokio::test]
async fn list_models_fails_when_no_key() { async fn list_models_fails_when_no_key() {
let dir = TempDir::new().unwrap(); let dir = TempDir::new().unwrap();
let api = make_api(&dir); let api = make_api::<AnthropicApi>(&dir);
let result = api.list_anthropic_models().await; let result = api.list_anthropic_models().await;
assert!(result.is_err()); assert!(result.is_err());
} }
@@ -288,7 +286,7 @@ mod tests {
#[test] #[test]
fn new_creates_api_instance() { fn new_creates_api_instance() {
let dir = TempDir::new().unwrap(); let dir = TempDir::new().unwrap();
let _api = make_api(&dir); let _api = make_api::<AnthropicApi>(&dir);
} }
#[test] #[test]
+25 -24
View File
@@ -138,18 +138,19 @@ impl IoApi {
} }
} }
#[cfg(test)]
impl From<std::sync::Arc<AppContext>> for IoApi {
fn from(ctx: std::sync::Arc<AppContext>) -> Self {
Self { ctx }
}
}
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;
use crate::http::context::AppContext; use crate::http::test_helpers::make_api;
use tempfile::TempDir; use tempfile::TempDir;
fn make_api(dir: &TempDir) -> IoApi {
IoApi {
ctx: Arc::new(AppContext::new_test(dir.path().to_path_buf())),
}
}
// --- list_directory_absolute --- // --- list_directory_absolute ---
#[tokio::test] #[tokio::test]
@@ -158,7 +159,7 @@ mod tests {
std::fs::create_dir(dir.path().join("subdir")).unwrap(); std::fs::create_dir(dir.path().join("subdir")).unwrap();
std::fs::write(dir.path().join("file.txt"), "content").unwrap(); std::fs::write(dir.path().join("file.txt"), "content").unwrap();
let api = make_api(&dir); let api = make_api::<IoApi>(&dir);
let payload = Json(FilePathPayload { let payload = Json(FilePathPayload {
path: dir.path().to_string_lossy().to_string(), path: dir.path().to_string_lossy().to_string(),
}); });
@@ -176,7 +177,7 @@ mod tests {
let empty = dir.path().join("empty"); let empty = dir.path().join("empty");
std::fs::create_dir(&empty).unwrap(); std::fs::create_dir(&empty).unwrap();
let api = make_api(&dir); let api = make_api::<IoApi>(&dir);
let payload = Json(FilePathPayload { let payload = Json(FilePathPayload {
path: empty.to_string_lossy().to_string(), path: empty.to_string_lossy().to_string(),
}); });
@@ -187,7 +188,7 @@ mod tests {
#[tokio::test] #[tokio::test]
async fn list_directory_absolute_errors_on_nonexistent_path() { async fn list_directory_absolute_errors_on_nonexistent_path() {
let dir = TempDir::new().unwrap(); let dir = TempDir::new().unwrap();
let api = make_api(&dir); let api = make_api::<IoApi>(&dir);
let payload = Json(FilePathPayload { let payload = Json(FilePathPayload {
path: dir.path().join("nonexistent").to_string_lossy().to_string(), path: dir.path().join("nonexistent").to_string_lossy().to_string(),
}); });
@@ -201,7 +202,7 @@ mod tests {
let file = dir.path().join("not_a_dir.txt"); let file = dir.path().join("not_a_dir.txt");
std::fs::write(&file, "content").unwrap(); std::fs::write(&file, "content").unwrap();
let api = make_api(&dir); let api = make_api::<IoApi>(&dir);
let payload = Json(FilePathPayload { let payload = Json(FilePathPayload {
path: file.to_string_lossy().to_string(), path: file.to_string_lossy().to_string(),
}); });
@@ -216,7 +217,7 @@ mod tests {
let dir = TempDir::new().unwrap(); let dir = TempDir::new().unwrap();
let new_dir = dir.path().join("new_dir"); let new_dir = dir.path().join("new_dir");
let api = make_api(&dir); let api = make_api::<IoApi>(&dir);
let payload = Json(CreateDirectoryPayload { let payload = Json(CreateDirectoryPayload {
path: new_dir.to_string_lossy().to_string(), path: new_dir.to_string_lossy().to_string(),
}); });
@@ -231,7 +232,7 @@ mod tests {
let existing = dir.path().join("existing"); let existing = dir.path().join("existing");
std::fs::create_dir(&existing).unwrap(); std::fs::create_dir(&existing).unwrap();
let api = make_api(&dir); let api = make_api::<IoApi>(&dir);
let payload = Json(CreateDirectoryPayload { let payload = Json(CreateDirectoryPayload {
path: existing.to_string_lossy().to_string(), path: existing.to_string_lossy().to_string(),
}); });
@@ -244,7 +245,7 @@ mod tests {
let dir = TempDir::new().unwrap(); let dir = TempDir::new().unwrap();
let nested = dir.path().join("a").join("b").join("c"); let nested = dir.path().join("a").join("b").join("c");
let api = make_api(&dir); let api = make_api::<IoApi>(&dir);
let payload = Json(CreateDirectoryPayload { let payload = Json(CreateDirectoryPayload {
path: nested.to_string_lossy().to_string(), path: nested.to_string_lossy().to_string(),
}); });
@@ -258,7 +259,7 @@ mod tests {
#[tokio::test] #[tokio::test]
async fn get_home_directory_returns_a_path() { async fn get_home_directory_returns_a_path() {
let dir = TempDir::new().unwrap(); let dir = TempDir::new().unwrap();
let api = make_api(&dir); let api = make_api::<IoApi>(&dir);
let result = api.get_home_directory().await.unwrap(); let result = api.get_home_directory().await.unwrap();
let home = &result.0; let home = &result.0;
assert!(!home.is_empty()); assert!(!home.is_empty());
@@ -272,7 +273,7 @@ mod tests {
let dir = TempDir::new().unwrap(); let dir = TempDir::new().unwrap();
std::fs::write(dir.path().join("hello.txt"), "hello world").unwrap(); std::fs::write(dir.path().join("hello.txt"), "hello world").unwrap();
let api = make_api(&dir); let api = make_api::<IoApi>(&dir);
let payload = Json(FilePathPayload { let payload = Json(FilePathPayload {
path: "hello.txt".to_string(), path: "hello.txt".to_string(),
}); });
@@ -283,7 +284,7 @@ mod tests {
#[tokio::test] #[tokio::test]
async fn read_file_errors_on_missing_file() { async fn read_file_errors_on_missing_file() {
let dir = TempDir::new().unwrap(); let dir = TempDir::new().unwrap();
let api = make_api(&dir); let api = make_api::<IoApi>(&dir);
let payload = Json(FilePathPayload { let payload = Json(FilePathPayload {
path: "nonexistent.txt".to_string(), path: "nonexistent.txt".to_string(),
}); });
@@ -296,7 +297,7 @@ mod tests {
#[tokio::test] #[tokio::test]
async fn write_file_creates_file() { async fn write_file_creates_file() {
let dir = TempDir::new().unwrap(); let dir = TempDir::new().unwrap();
let api = make_api(&dir); let api = make_api::<IoApi>(&dir);
let payload = Json(WriteFilePayload { let payload = Json(WriteFilePayload {
path: "output.txt".to_string(), path: "output.txt".to_string(),
content: "written content".to_string(), content: "written content".to_string(),
@@ -312,7 +313,7 @@ mod tests {
#[tokio::test] #[tokio::test]
async fn write_file_creates_parent_dirs() { async fn write_file_creates_parent_dirs() {
let dir = TempDir::new().unwrap(); let dir = TempDir::new().unwrap();
let api = make_api(&dir); let api = make_api::<IoApi>(&dir);
let payload = Json(WriteFilePayload { let payload = Json(WriteFilePayload {
path: "sub/dir/file.txt".to_string(), path: "sub/dir/file.txt".to_string(),
content: "nested".to_string(), content: "nested".to_string(),
@@ -334,7 +335,7 @@ mod tests {
std::fs::write(dir.path().join("src/main.rs"), "fn main() {}").unwrap(); std::fs::write(dir.path().join("src/main.rs"), "fn main() {}").unwrap();
std::fs::write(dir.path().join("README.md"), "# readme").unwrap(); std::fs::write(dir.path().join("README.md"), "# readme").unwrap();
let api = make_api(&dir); let api = make_api::<IoApi>(&dir);
let result = api.list_project_files().await.unwrap(); let result = api.list_project_files().await.unwrap();
let files = &result.0; let files = &result.0;
@@ -348,7 +349,7 @@ mod tests {
std::fs::create_dir(dir.path().join("subdir")).unwrap(); std::fs::create_dir(dir.path().join("subdir")).unwrap();
std::fs::write(dir.path().join("file.txt"), "").unwrap(); std::fs::write(dir.path().join("file.txt"), "").unwrap();
let api = make_api(&dir); let api = make_api::<IoApi>(&dir);
let result = api.list_project_files().await.unwrap(); let result = api.list_project_files().await.unwrap();
let files = &result.0; let files = &result.0;
@@ -363,7 +364,7 @@ mod tests {
std::fs::write(dir.path().join("z_last.txt"), "").unwrap(); std::fs::write(dir.path().join("z_last.txt"), "").unwrap();
std::fs::write(dir.path().join("a_first.txt"), "").unwrap(); std::fs::write(dir.path().join("a_first.txt"), "").unwrap();
let api = make_api(&dir); let api = make_api::<IoApi>(&dir);
let result = api.list_project_files().await.unwrap(); let result = api.list_project_files().await.unwrap();
let files = &result.0; let files = &result.0;
@@ -380,7 +381,7 @@ mod tests {
std::fs::create_dir(dir.path().join("adir")).unwrap(); std::fs::create_dir(dir.path().join("adir")).unwrap();
std::fs::write(dir.path().join("bfile.txt"), "").unwrap(); std::fs::write(dir.path().join("bfile.txt"), "").unwrap();
let api = make_api(&dir); let api = make_api::<IoApi>(&dir);
let payload = Json(FilePathPayload { let payload = Json(FilePathPayload {
path: ".".to_string(), path: ".".to_string(),
}); });
@@ -394,7 +395,7 @@ mod tests {
#[tokio::test] #[tokio::test]
async fn list_directory_errors_on_nonexistent() { async fn list_directory_errors_on_nonexistent() {
let dir = TempDir::new().unwrap(); let dir = TempDir::new().unwrap();
let api = make_api(&dir); let api = make_api::<IoApi>(&dir);
let payload = Json(FilePathPayload { let payload = Json(FilePathPayload {
path: "nonexistent_dir".to_string(), path: "nonexistent_dir".to_string(),
}); });
+1 -5
View File
@@ -370,13 +370,9 @@ pub(super) async fn get_worktree_commits(worktree_path: &str, base_branch: &str)
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;
use crate::http::context::AppContext; use crate::http::test_helpers::test_ctx;
use crate::store::StoreOps; use crate::store::StoreOps;
fn test_ctx(dir: &std::path::Path) -> AppContext {
AppContext::new_test(dir.to_path_buf())
}
#[test] #[test]
fn tool_list_agents_empty() { fn tool_list_agents_empty() {
let tmp = tempfile::tempdir().unwrap(); let tmp = tempfile::tempdir().unwrap();
+1 -5
View File
@@ -279,11 +279,7 @@ pub(super) fn tool_loc_file(args: &Value, ctx: &AppContext) -> Result<String, St
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;
use crate::http::context::AppContext; use crate::http::test_helpers::test_ctx;
fn test_ctx(dir: &std::path::Path) -> AppContext {
AppContext::new_test(dir.to_path_buf())
}
#[test] #[test]
fn tool_get_server_logs_no_args_returns_string() { fn tool_get_server_logs_no_args_returns_string() {
+1 -4
View File
@@ -304,12 +304,9 @@ pub(super) async fn tool_git_log(args: &Value, ctx: &AppContext) -> Result<Strin
mod tests { mod tests {
use super::*; use super::*;
use crate::http::context::AppContext; use crate::http::context::AppContext;
use crate::http::test_helpers::test_ctx;
use serde_json::json; use serde_json::json;
fn test_ctx(dir: &std::path::Path) -> AppContext {
AppContext::new_test(dir.to_path_buf())
}
/// Create a temp directory with a git worktree structure and init a repo. /// Create a temp directory with a git worktree structure and init a repo.
fn setup_worktree() -> (tempfile::TempDir, PathBuf, AppContext) { fn setup_worktree() -> (tempfile::TempDir, PathBuf, AppContext) {
let tmp = tempfile::tempdir().unwrap(); let tmp = tempfile::tempdir().unwrap();
+1 -5
View File
@@ -164,11 +164,7 @@ pub(super) fn tool_report_merge_failure(args: &Value, ctx: &AppContext) -> Resul
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;
use crate::http::context::AppContext; use crate::http::test_helpers::test_ctx;
fn test_ctx(dir: &std::path::Path) -> AppContext {
AppContext::new_test(dir.to_path_buf())
}
fn setup_git_repo_in(dir: &std::path::Path) { fn setup_git_repo_in(dir: &std::path::Path) {
std::process::Command::new("git") std::process::Command::new("git")
+55 -7
View File
@@ -14,8 +14,9 @@ pub mod git_tools;
pub mod merge_tools; pub mod merge_tools;
pub mod qa_tools; pub mod qa_tools;
pub mod shell_tools; pub mod shell_tools;
pub mod story_tools;
pub mod status_tools; pub mod status_tools;
pub mod story_tools;
pub mod wizard_tools;
/// Returns true when the Accept header includes text/event-stream. /// Returns true when the Accept header includes text/event-stream.
fn wants_sse(req: &Request) -> bool { fn wants_sse(req: &Request) -> bool {
@@ -1164,6 +1165,51 @@ fn handle_tools_list(id: Option<Value>) -> JsonRpcResponse {
}, },
"required": ["file_path"] "required": ["file_path"]
} }
},
{
"name": "wizard_status",
"description": "Return the current setup wizard state: which step is active, and which are done/skipped/pending. Use this to inspect progress before calling wizard_generate, wizard_confirm, wizard_skip, or wizard_retry.",
"inputSchema": {
"type": "object",
"properties": {}
}
},
{
"name": "wizard_generate",
"description": "Drive content generation for the current wizard step. Call with no arguments to mark the step as 'generating' and receive a hint about what to produce. Call again with a 'content' argument (the full file body you generated) to stage it for review. Content is NOT written to disk until wizard_confirm is called.",
"inputSchema": {
"type": "object",
"properties": {
"content": {
"type": "string",
"description": "The generated file content to stage for the current step. Omit to receive a generation hint and mark the step as generating."
}
}
}
},
{
"name": "wizard_confirm",
"description": "Confirm the current wizard step: writes any staged content to disk (only if the target file does not already exist) and advances to the next step. Existing files are never overwritten.",
"inputSchema": {
"type": "object",
"properties": {}
}
},
{
"name": "wizard_skip",
"description": "Skip the current wizard step without writing any file. Use when a step does not apply to this project.",
"inputSchema": {
"type": "object",
"properties": {}
}
},
{
"name": "wizard_retry",
"description": "Discard any staged content for the current wizard step and reset it to pending so it can be regenerated. Use when the generated content needs improvement.",
"inputSchema": {
"type": "object",
"properties": {}
}
} }
] ]
}), }),
@@ -1258,6 +1304,12 @@ async fn handle_tools_call(
"status" => status_tools::tool_status(&args, ctx).await, "status" => status_tools::tool_status(&args, ctx).await,
// File line count // File line count
"loc_file" => diagnostics::tool_loc_file(&args, ctx), "loc_file" => diagnostics::tool_loc_file(&args, ctx),
// Setup wizard tools
"wizard_status" => wizard_tools::tool_wizard_status(ctx),
"wizard_generate" => wizard_tools::tool_wizard_generate(&args, ctx),
"wizard_confirm" => wizard_tools::tool_wizard_confirm(ctx),
"wizard_skip" => wizard_tools::tool_wizard_skip(ctx),
"wizard_retry" => wizard_tools::tool_wizard_retry(ctx),
_ => Err(format!("Unknown tool: {tool_name}")), _ => Err(format!("Unknown tool: {tool_name}")),
}; };
@@ -1284,11 +1336,7 @@ async fn handle_tools_call(
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;
use crate::http::context::AppContext; use crate::http::test_helpers::test_ctx;
fn test_ctx(dir: &std::path::Path) -> AppContext {
AppContext::new_test(dir.to_path_buf())
}
#[test] #[test]
fn json_rpc_response_serializes_success() { fn json_rpc_response_serializes_success() {
@@ -1376,7 +1424,7 @@ mod tests {
assert!(names.contains(&"git_log")); assert!(names.contains(&"git_log"));
assert!(names.contains(&"status")); assert!(names.contains(&"status"));
assert!(names.contains(&"loc_file")); assert!(names.contains(&"loc_file"));
assert_eq!(tools.len(), 51); assert_eq!(tools.len(), 56);
} }
#[test] #[test]
+1 -5
View File
@@ -194,11 +194,7 @@ pub(super) fn find_free_port(start: u16) -> u16 {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;
use crate::http::context::AppContext; use crate::http::test_helpers::test_ctx;
fn test_ctx(dir: &std::path::Path) -> AppContext {
AppContext::new_test(dir.to_path_buf())
}
#[test] #[test]
fn request_qa_in_tools_list() { fn request_qa_in_tools_list() {
+1 -5
View File
@@ -331,13 +331,9 @@ pub(super) fn handle_run_command_sse(
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;
use crate::http::context::AppContext; use crate::http::test_helpers::test_ctx;
use serde_json::json; use serde_json::json;
fn test_ctx(dir: &std::path::Path) -> AppContext {
AppContext::new_test(dir.to_path_buf())
}
// ── is_dangerous ───────────────────────────────────────────────── // ── is_dangerous ─────────────────────────────────────────────────
#[test] #[test]
+1 -5
View File
@@ -549,11 +549,7 @@ pub(super) fn parse_test_cases(value: Option<&Value>) -> Result<Vec<TestCaseResu
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;
use crate::http::context::AppContext; use crate::http::test_helpers::test_ctx;
fn test_ctx(dir: &std::path::Path) -> AppContext {
AppContext::new_test(dir.to_path_buf())
}
#[test] #[test]
fn parse_test_cases_empty() { fn parse_test_cases_empty() {
+639
View File
@@ -0,0 +1,639 @@
//! MCP tool implementations for the interactive setup wizard.
//!
//! These tools allow Claude Code (and other MCP clients) to drive the setup
//! wizard entirely from the terminal without requiring the web UI or chat bot.
//!
//! Typical flow:
//! 1. `wizard_status` — inspect current state
//! 2. `wizard_generate` — read the codebase and call again with `content` to
//! stage generated text for review
//! 3. `wizard_confirm` — write staged content to disk and advance the wizard
//! 4. `wizard_skip` — skip a step that does not apply
//! 5. `wizard_retry` — discard staged content and regenerate from scratch
use crate::http::context::AppContext;
use crate::io::wizard::{StepStatus, WizardState, WizardStep, format_wizard_state};
use serde_json::Value;
use std::fs;
use std::path::Path;
// ── helpers ───────────────────────────────────────────────────────────────────
/// Return the filesystem path (relative to `project_root`) for a step's output.
///
/// Returns `None` for `Scaffold` since that step has no single output file — it
/// creates the full `.storkit/` directory structure and is handled by
/// `storkit init` before the server starts.
pub(crate) fn step_output_path(project_root: &Path, step: WizardStep) -> Option<std::path::PathBuf> {
match step {
WizardStep::Context => Some(
project_root
.join(".storkit")
.join("specs")
.join("00_CONTEXT.md"),
),
WizardStep::Stack => Some(
project_root
.join(".storkit")
.join("specs")
.join("tech")
.join("STACK.md"),
),
WizardStep::TestScript => Some(project_root.join("script").join("test")),
WizardStep::ReleaseScript => Some(project_root.join("script").join("release")),
WizardStep::TestCoverage => Some(project_root.join("script").join("test_coverage")),
WizardStep::Scaffold => None,
}
}
pub(crate) fn is_script_step(step: WizardStep) -> bool {
matches!(
step,
WizardStep::TestScript | WizardStep::ReleaseScript | WizardStep::TestCoverage
)
}
/// Write `content` to `path` only when the file does not already exist.
///
/// Existing files (including `CLAUDE.md`) are never overwritten — the wizard
/// appends or skips per the acceptance criteria. For script steps the file is
/// also made executable after writing.
pub(crate) fn write_if_missing(path: &Path, content: &str, executable: bool) -> Result<bool, String> {
if path.exists() {
return Ok(false); // already present — skip silently
}
if let Some(parent) = path.parent() {
fs::create_dir_all(parent)
.map_err(|e| format!("Failed to create directory {}: {e}", parent.display()))?;
}
fs::write(path, content)
.map_err(|e| format!("Failed to write {}: {e}", path.display()))?;
if executable {
#[cfg(unix)]
{
use std::os::unix::fs::PermissionsExt;
let mut perms = fs::metadata(path)
.map_err(|e| format!("Failed to read permissions: {e}"))?
.permissions();
perms.set_mode(0o755);
fs::set_permissions(path, perms)
.map_err(|e| format!("Failed to set permissions: {e}"))?;
}
}
Ok(true)
}
/// Serialise a `WizardStep` to its snake_case string (e.g. `"test_script"`).
fn step_slug(step: WizardStep) -> String {
serde_json::to_value(step)
.ok()
.and_then(|v| v.as_str().map(String::from))
.unwrap_or_default()
}
// ── MCP tool handlers ─────────────────────────────────────────────────────────
/// `wizard_status` — return current wizard state as a human-readable summary.
pub(super) fn tool_wizard_status(ctx: &AppContext) -> Result<String, String> {
let root = ctx.state.get_project_root()?;
let state =
WizardState::load(&root).ok_or("No wizard active. Run `storkit init` to begin setup.")?;
Ok(format_wizard_state(&state))
}
/// `wizard_generate` — mark the current step as generating or stage content.
///
/// Call with no `content` argument to mark the step as `Generating` and
/// receive a hint describing what to generate. Call again with a `content`
/// argument (the generated file body) to stage it for review; the step will
/// transition to `AwaitingConfirmation`. Content is **not** written to disk
/// until `wizard_confirm` is called.
pub(super) fn tool_wizard_generate(args: &Value, ctx: &AppContext) -> Result<String, String> {
let root = ctx.state.get_project_root()?;
let mut state = WizardState::load(&root).ok_or("No wizard active.")?;
if state.completed {
return Ok("Wizard is already complete.".to_string());
}
let current_idx = state.current_step_index();
let step = state.steps[current_idx].step;
// If content is provided, stage it for confirmation.
if let Some(content) = args.get("content").and_then(|v| v.as_str()) {
state.set_step_status(
step,
StepStatus::AwaitingConfirmation,
Some(content.to_string()),
);
state
.save(&root)
.map_err(|e| format!("Failed to save wizard state: {e}"))?;
return Ok(format!(
"Content staged for '{}'. Run `wizard_confirm` to write it to disk, `wizard_retry` to regenerate, or `wizard_skip` to skip.",
step.label()
));
}
// No content provided — mark as generating and return a hint.
state.set_step_status(step, StepStatus::Generating, None);
state
.save(&root)
.map_err(|e| format!("Failed to save wizard state: {e}"))?;
let hint = generation_hint(step, &root);
let slug = step_slug(step);
Ok(format!(
"Step '{}' marked as generating.\n\n{hint}\n\nOnce you have the content, call `wizard_generate` again with a `content` argument (or PUT /wizard/step/{slug}/content). Then call `wizard_confirm` to write it to disk.",
step.label(),
))
}
/// Return true if the project directory has no meaningful source files.
pub(crate) fn is_bare_project(project_root: &Path) -> bool {
std::fs::read_dir(project_root)
.ok()
.map(|entries| {
let names: Vec<String> = entries
.filter_map(|e| e.ok())
.map(|e| e.file_name().to_string_lossy().to_string())
.collect();
// A bare project only has storkit scaffolding and no real code
names.iter().all(|n| {
n.starts_with('.')
|| n == "CLAUDE.md"
|| n == "LICENSE"
|| n == "README.md"
|| n == "script"
|| n == "store.json"
})
})
.unwrap_or(true)
}
/// Return a generation hint for a step based on the project root.
pub(crate) fn generation_hint(step: WizardStep, project_root: &Path) -> String {
let bare = is_bare_project(project_root);
match step {
WizardStep::Context => {
if bare {
"This is a bare project with no existing code. Ask the user what they want \
to build the project's purpose, goals, target users, and key features. \
Then generate `.storkit/specs/00_CONTEXT.md` from their answers covering:\n\
- High-level goal of the project\n\
- Core features\n\
- Domain concepts and entities\n\
- Glossary of abbreviations and technical terms".to_string()
} else {
"Read the project source tree and generate a `.storkit/specs/00_CONTEXT.md` describing:\n\
- High-level goal of the project\n\
- Core features\n\
- Domain concepts and entities\n\
- Glossary of abbreviations and technical terms".to_string()
}
}
WizardStep::Stack => {
if bare {
"This is a bare project with no existing code. Ask the user what language, \
frameworks, and tools they plan to use. Then generate `.storkit/specs/tech/STACK.md` \
from their answers covering:\n\
- Language, frameworks, and runtimes\n\
- Coding standards and linting rules\n\
- Quality gates (commands that must pass before merging)\n\
- Approved libraries and their purpose".to_string()
} else {
"Read the project source tree and generate a `.storkit/specs/tech/STACK.md` describing:\n\
- Language, frameworks, and runtimes\n\
- Coding standards and linting rules\n\
- Quality gates (commands that must pass before merging)\n\
- Approved libraries and their purpose".to_string()
}
}
WizardStep::TestScript => {
if bare {
"This is a bare project with no existing code. Read the STACK.md generated \
in the previous step (or ask the user about their stack if it was skipped) \
and generate a `script/test` shell script (#!/usr/bin/env bash, set -euo pipefail) \
with appropriate test commands for their chosen language and framework."
.to_string()
} else {
let has_cargo = project_root.join("Cargo.toml").exists();
let has_pkg = project_root.join("package.json").exists();
let has_pnpm = project_root.join("pnpm-lock.yaml").exists();
let mut cmds = Vec::new();
if has_cargo {
cmds.push("cargo nextest run");
}
if has_pkg {
cmds.push(if has_pnpm { "pnpm test" } else { "npm test" });
}
if cmds.is_empty() {
"Generate a `script/test` shell script (#!/usr/bin/env bash, set -euo pipefail) that runs the project's test suite.".to_string()
} else {
format!(
"Generate a `script/test` shell script (#!/usr/bin/env bash, set -euo pipefail) that runs: {}",
cmds.join(", ")
)
}
}
}
WizardStep::ReleaseScript => {
if bare {
"This is a bare project with no existing code. Read the STACK.md generated \
in the previous step (or ask the user about their stack if it was skipped) \
and generate a `script/release` shell script (#!/usr/bin/env bash, set -euo pipefail) \
with appropriate build/release commands for their chosen language and framework."
.to_string()
} else {
"Generate a `script/release` shell script (#!/usr/bin/env bash, set -euo pipefail) that builds and releases the project (e.g. `cargo build --release` or `npm run build`).".to_string()
}
}
WizardStep::TestCoverage => {
if bare {
"This is a bare project with no existing code. Read the STACK.md generated \
in the previous step (or ask the user about their stack if it was skipped) \
and generate a `script/test_coverage` shell script (#!/usr/bin/env bash, set -euo pipefail) \
with appropriate test coverage commands for their chosen language and framework."
.to_string()
} else {
"Generate a `script/test_coverage` shell script (#!/usr/bin/env bash, set -euo pipefail) that generates a test coverage report (e.g. `cargo llvm-cov nextest` or `npm run coverage`).".to_string()
}
}
WizardStep::Scaffold => "Scaffold step is handled automatically by `storkit init`.".to_string(),
}
}
/// `wizard_confirm` — confirm the current step and write its content to disk.
///
/// If the step has staged content, the content is written to its target file
/// (only if that file does not already exist — existing files are never
/// overwritten). The step is then marked as `Confirmed` and the wizard
/// advances to the next pending step.
pub(super) fn tool_wizard_confirm(ctx: &AppContext) -> Result<String, String> {
let root = ctx.state.get_project_root()?;
let mut state = WizardState::load(&root).ok_or("No wizard active.")?;
if state.completed {
return Ok("Wizard is already complete.".to_string());
}
let current_idx = state.current_step_index();
let step = state.steps[current_idx].step;
let content = state.steps[current_idx].content.clone();
// Write content to disk (only if a file path exists and the file is absent).
let write_msg = if let (Some(c), Some(ref path)) = (&content, step_output_path(&root, step)) {
let executable = is_script_step(step);
match write_if_missing(path, c, executable)? {
true => format!(" File written: `{}`.", path.display()),
false => format!(" File `{}` already exists — skipped.", path.display()),
}
} else {
String::new()
};
state
.confirm_step(step)
.map_err(|e| format!("Cannot confirm step: {e}"))?;
state
.save(&root)
.map_err(|e| format!("Failed to save wizard state: {e}"))?;
let next_idx = state.current_step_index();
if state.completed {
Ok(format!(
"Step '{}' confirmed.{write_msg}\n\nSetup wizard complete! All steps done.",
step.label()
))
} else {
let next = &state.steps[next_idx];
Ok(format!(
"Step '{}' confirmed.{write_msg}\n\nNext: {} — run `wizard_generate` to begin.",
step.label(),
next.step.label()
))
}
}
/// `wizard_skip` — skip the current step without writing any file.
pub(super) fn tool_wizard_skip(ctx: &AppContext) -> Result<String, String> {
let root = ctx.state.get_project_root()?;
let mut state = WizardState::load(&root).ok_or("No wizard active.")?;
if state.completed {
return Ok("Wizard is already complete.".to_string());
}
let current_idx = state.current_step_index();
let step = state.steps[current_idx].step;
state
.skip_step(step)
.map_err(|e| format!("Cannot skip step: {e}"))?;
state
.save(&root)
.map_err(|e| format!("Failed to save wizard state: {e}"))?;
let next_idx = state.current_step_index();
if state.completed {
Ok(format!(
"Step '{}' skipped. Setup wizard complete!",
step.label()
))
} else {
let next = &state.steps[next_idx];
Ok(format!(
"Step '{}' skipped.\n\nNext: {} — run `wizard_generate` to begin.",
step.label(),
next.step.label()
))
}
}
/// `wizard_retry` — discard staged content and reset the current step to
/// `Pending` so it can be regenerated.
pub(super) fn tool_wizard_retry(ctx: &AppContext) -> Result<String, String> {
let root = ctx.state.get_project_root()?;
let mut state = WizardState::load(&root).ok_or("No wizard active.")?;
if state.completed {
return Ok("Wizard is already complete.".to_string());
}
let current_idx = state.current_step_index();
let step = state.steps[current_idx].step;
// Clear content and reset to pending.
if let Some(s) = state.steps.iter_mut().find(|s| s.step == step) {
s.status = StepStatus::Pending;
s.content = None;
}
state
.save(&root)
.map_err(|e| format!("Failed to save wizard state: {e}"))?;
Ok(format!(
"Step '{}' reset to pending. Run `wizard_generate` to regenerate content.",
step.label()
))
}
// ── tests ─────────────────────────────────────────────────────────────────────
#[cfg(test)]
mod tests {
use super::*;
use crate::http::context::AppContext;
use tempfile::TempDir;
fn setup(dir: &TempDir) -> AppContext {
let root = dir.path().to_path_buf();
std::fs::create_dir_all(root.join(".storkit")).unwrap();
WizardState::init_if_missing(&root);
AppContext::new_test(root)
}
#[test]
fn wizard_status_returns_state() {
let dir = TempDir::new().unwrap();
let ctx = setup(&dir);
let result = tool_wizard_status(&ctx).unwrap();
assert!(result.contains("Setup wizard"));
assert!(result.contains("context"));
}
#[test]
fn wizard_status_no_wizard_returns_error() {
let dir = TempDir::new().unwrap();
std::fs::create_dir_all(dir.path().join(".storkit")).unwrap();
let ctx = AppContext::new_test(dir.path().to_path_buf());
assert!(tool_wizard_status(&ctx).is_err());
}
#[test]
fn wizard_generate_marks_generating() {
let dir = TempDir::new().unwrap();
let ctx = setup(&dir);
let result = tool_wizard_generate(&serde_json::json!({}), &ctx).unwrap();
assert!(result.contains("generating"));
let state = WizardState::load(dir.path()).unwrap();
assert_eq!(state.steps[1].status, StepStatus::Generating);
}
#[test]
fn wizard_generate_with_content_stages_content() {
let dir = TempDir::new().unwrap();
let ctx = setup(&dir);
let result = tool_wizard_generate(
&serde_json::json!({"content": "# My Project"}),
&ctx,
)
.unwrap();
assert!(result.contains("staged"));
let state = WizardState::load(dir.path()).unwrap();
assert_eq!(state.steps[1].status, StepStatus::AwaitingConfirmation);
assert_eq!(state.steps[1].content.as_deref(), Some("# My Project"));
}
#[test]
fn wizard_confirm_writes_file_and_advances() {
let dir = TempDir::new().unwrap();
let ctx = setup(&dir);
// Stage content for Context step.
tool_wizard_generate(
&serde_json::json!({"content": "# Context content"}),
&ctx,
)
.unwrap();
let result = tool_wizard_confirm(&ctx).unwrap();
assert!(result.contains("confirmed"));
// File should now exist.
let context_path = dir
.path()
.join(".storkit")
.join("specs")
.join("00_CONTEXT.md");
assert!(context_path.exists());
assert_eq!(
std::fs::read_to_string(&context_path).unwrap(),
"# Context content"
);
// Wizard should have advanced.
let state = WizardState::load(dir.path()).unwrap();
assert_eq!(state.steps[1].status, StepStatus::Confirmed);
assert_eq!(state.current_step_index(), 2);
}
#[test]
fn wizard_confirm_does_not_overwrite_existing_file() {
let dir = TempDir::new().unwrap();
let ctx = setup(&dir);
// Pre-create the specs directory and file.
let specs_dir = dir.path().join(".storkit").join("specs");
std::fs::create_dir_all(&specs_dir).unwrap();
let context_path = specs_dir.join("00_CONTEXT.md");
std::fs::write(&context_path, "original content").unwrap();
// Stage and confirm — existing file should NOT be overwritten.
tool_wizard_generate(
&serde_json::json!({"content": "new content"}),
&ctx,
)
.unwrap();
let result = tool_wizard_confirm(&ctx).unwrap();
assert!(result.contains("already exists"));
assert_eq!(
std::fs::read_to_string(&context_path).unwrap(),
"original content"
);
}
#[test]
fn wizard_skip_advances_wizard() {
let dir = TempDir::new().unwrap();
let ctx = setup(&dir);
let result = tool_wizard_skip(&ctx).unwrap();
assert!(result.contains("skipped"));
let state = WizardState::load(dir.path()).unwrap();
assert_eq!(state.steps[1].status, StepStatus::Skipped);
assert_eq!(state.current_step_index(), 2);
}
#[test]
fn wizard_retry_resets_to_pending() {
let dir = TempDir::new().unwrap();
let ctx = setup(&dir);
// Stage content first.
tool_wizard_generate(
&serde_json::json!({"content": "some content"}),
&ctx,
)
.unwrap();
let result = tool_wizard_retry(&ctx).unwrap();
assert!(result.contains("reset"));
let state = WizardState::load(dir.path()).unwrap();
assert_eq!(state.steps[1].status, StepStatus::Pending);
assert!(state.steps[1].content.is_none());
}
#[test]
fn wizard_complete_returns_done_message() {
let dir = TempDir::new().unwrap();
let ctx = setup(&dir);
// Skip all remaining steps.
for _ in 0..5 {
tool_wizard_skip(&ctx).unwrap();
}
let result = tool_wizard_status(&ctx).unwrap();
assert!(result.contains("complete"));
}
#[test]
fn format_wizard_state_shows_all_steps() {
let mut state = WizardState::default();
state.steps[0].status = StepStatus::Confirmed;
let output = format_wizard_state(&state);
assert!(output.contains(""));
assert!(output.contains("Scaffold"));
assert!(output.contains("← current"));
}
#[test]
fn is_bare_project_detects_empty_dir() {
let dir = TempDir::new().unwrap();
assert!(is_bare_project(dir.path()));
}
#[test]
fn is_bare_project_detects_scaffold_only_dir() {
let dir = TempDir::new().unwrap();
std::fs::create_dir_all(dir.path().join(".storkit")).unwrap();
std::fs::write(dir.path().join("CLAUDE.md"), "# Claude").unwrap();
std::fs::write(dir.path().join("README.md"), "# Readme").unwrap();
std::fs::create_dir_all(dir.path().join("script")).unwrap();
assert!(is_bare_project(dir.path()));
}
#[test]
fn is_bare_project_false_when_source_files_exist() {
let dir = TempDir::new().unwrap();
std::fs::create_dir_all(dir.path().join(".storkit")).unwrap();
std::fs::write(dir.path().join("Cargo.toml"), "[package]").unwrap();
assert!(!is_bare_project(dir.path()));
}
#[test]
fn is_bare_project_false_with_src_directory() {
let dir = TempDir::new().unwrap();
std::fs::create_dir_all(dir.path().join("src")).unwrap();
assert!(!is_bare_project(dir.path()));
}
#[test]
fn generation_hint_bare_context_asks_user() {
let dir = TempDir::new().unwrap();
// Bare project — only scaffolding
std::fs::create_dir_all(dir.path().join(".storkit")).unwrap();
let hint = generation_hint(WizardStep::Context, dir.path());
assert!(hint.contains("bare project"));
assert!(hint.contains("Ask the user"));
}
#[test]
fn generation_hint_bare_stack_asks_user() {
let dir = TempDir::new().unwrap();
std::fs::create_dir_all(dir.path().join(".storkit")).unwrap();
let hint = generation_hint(WizardStep::Stack, dir.path());
assert!(hint.contains("bare project"));
assert!(hint.contains("Ask the user"));
}
#[test]
fn generation_hint_bare_test_script_references_stack() {
let dir = TempDir::new().unwrap();
std::fs::create_dir_all(dir.path().join(".storkit")).unwrap();
let hint = generation_hint(WizardStep::TestScript, dir.path());
assert!(hint.contains("bare project"));
assert!(hint.contains("STACK.md"));
}
#[test]
fn generation_hint_bare_release_script_references_stack() {
let dir = TempDir::new().unwrap();
std::fs::create_dir_all(dir.path().join(".storkit")).unwrap();
let hint = generation_hint(WizardStep::ReleaseScript, dir.path());
assert!(hint.contains("bare project"));
assert!(hint.contains("STACK.md"));
}
#[test]
fn generation_hint_bare_test_coverage_references_stack() {
let dir = TempDir::new().unwrap();
std::fs::create_dir_all(dir.path().join(".storkit")).unwrap();
let hint = generation_hint(WizardStep::TestCoverage, dir.path());
assert!(hint.contains("bare project"));
assert!(hint.contains("STACK.md"));
}
#[test]
fn generation_hint_existing_project_reads_code() {
let dir = TempDir::new().unwrap();
std::fs::write(dir.path().join("Cargo.toml"), "[package]").unwrap();
let hint = generation_hint(WizardStep::Context, dir.path());
assert!(hint.contains("Read the project"));
assert!(!hint.contains("bare project"));
}
#[test]
fn generation_hint_existing_project_test_script_detects_cargo() {
let dir = TempDir::new().unwrap();
std::fs::write(dir.path().join("Cargo.toml"), "[package]").unwrap();
let hint = generation_hint(WizardStep::TestScript, dir.path());
assert!(hint.contains("cargo nextest"));
assert!(!hint.contains("bare project"));
}
}
+2
View File
@@ -1,6 +1,8 @@
pub mod agents; pub mod agents;
pub mod agents_sse; pub mod agents_sse;
pub mod anthropic; pub mod anthropic;
#[cfg(test)]
pub(crate) mod test_helpers;
pub mod assets; pub mod assets;
pub mod bot_command; pub mod bot_command;
pub mod chat; pub mod chat;
+13 -12
View File
@@ -50,22 +50,23 @@ impl ModelApi {
} }
} }
#[cfg(test)]
impl From<std::sync::Arc<AppContext>> for ModelApi {
fn from(ctx: std::sync::Arc<AppContext>) -> Self {
Self { ctx }
}
}
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;
use crate::http::context::AppContext; use crate::http::test_helpers::make_api;
use tempfile::TempDir; use tempfile::TempDir;
fn make_api(dir: &TempDir) -> ModelApi {
ModelApi {
ctx: Arc::new(AppContext::new_test(dir.path().to_path_buf())),
}
}
#[tokio::test] #[tokio::test]
async fn get_model_preference_returns_none_when_unset() { async fn get_model_preference_returns_none_when_unset() {
let dir = TempDir::new().unwrap(); let dir = TempDir::new().unwrap();
let api = make_api(&dir); let api = make_api::<ModelApi>(&dir);
let result = api.get_model_preference().await.unwrap(); let result = api.get_model_preference().await.unwrap();
assert!(result.0.is_none()); assert!(result.0.is_none());
} }
@@ -73,7 +74,7 @@ mod tests {
#[tokio::test] #[tokio::test]
async fn set_model_preference_returns_true() { async fn set_model_preference_returns_true() {
let dir = TempDir::new().unwrap(); let dir = TempDir::new().unwrap();
let api = make_api(&dir); let api = make_api::<ModelApi>(&dir);
let payload = Json(ModelPayload { let payload = Json(ModelPayload {
model: "claude-3-sonnet".to_string(), model: "claude-3-sonnet".to_string(),
}); });
@@ -84,7 +85,7 @@ mod tests {
#[tokio::test] #[tokio::test]
async fn get_model_preference_returns_value_after_set() { async fn get_model_preference_returns_value_after_set() {
let dir = TempDir::new().unwrap(); let dir = TempDir::new().unwrap();
let api = make_api(&dir); let api = make_api::<ModelApi>(&dir);
let payload = Json(ModelPayload { let payload = Json(ModelPayload {
model: "claude-3-sonnet".to_string(), model: "claude-3-sonnet".to_string(),
@@ -98,7 +99,7 @@ mod tests {
#[tokio::test] #[tokio::test]
async fn set_model_preference_overwrites_previous_value() { async fn set_model_preference_overwrites_previous_value() {
let dir = TempDir::new().unwrap(); let dir = TempDir::new().unwrap();
let api = make_api(&dir); let api = make_api::<ModelApi>(&dir);
api.set_model_preference(Json(ModelPayload { api.set_model_preference(Json(ModelPayload {
model: "model-a".to_string(), model: "model-a".to_string(),
@@ -119,7 +120,7 @@ mod tests {
#[tokio::test] #[tokio::test]
async fn get_ollama_models_returns_empty_list_for_unreachable_url() { async fn get_ollama_models_returns_empty_list_for_unreachable_url() {
let dir = TempDir::new().unwrap(); let dir = TempDir::new().unwrap();
let api = make_api(&dir); let api = make_api::<ModelApi>(&dir);
// Port 1 is reserved and should immediately refuse the connection. // Port 1 is reserved and should immediately refuse the connection.
let base_url = Query(Some("http://127.0.0.1:1".to_string())); let base_url = Query(Some("http://127.0.0.1:1".to_string()));
let result = api.get_ollama_models(base_url).await; let result = api.get_ollama_models(base_url).await;
+18 -17
View File
@@ -73,22 +73,23 @@ impl ProjectApi {
} }
} }
#[cfg(test)]
impl From<std::sync::Arc<AppContext>> for ProjectApi {
fn from(ctx: std::sync::Arc<AppContext>) -> Self {
Self { ctx }
}
}
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;
use crate::http::context::AppContext; use crate::http::test_helpers::make_api;
use tempfile::TempDir; use tempfile::TempDir;
fn make_api(dir: &TempDir) -> ProjectApi {
ProjectApi {
ctx: Arc::new(AppContext::new_test(dir.path().to_path_buf())),
}
}
#[tokio::test] #[tokio::test]
async fn get_current_project_returns_none_when_unset() { async fn get_current_project_returns_none_when_unset() {
let dir = TempDir::new().unwrap(); let dir = TempDir::new().unwrap();
let api = make_api(&dir); let api = make_api::<ProjectApi>(&dir);
// Clear the project root that new_test sets // Clear the project root that new_test sets
api.close_project().await.unwrap(); api.close_project().await.unwrap();
let result = api.get_current_project().await.unwrap(); let result = api.get_current_project().await.unwrap();
@@ -98,7 +99,7 @@ mod tests {
#[tokio::test] #[tokio::test]
async fn get_current_project_returns_path_from_state() { async fn get_current_project_returns_path_from_state() {
let dir = TempDir::new().unwrap(); let dir = TempDir::new().unwrap();
let api = make_api(&dir); let api = make_api::<ProjectApi>(&dir);
let result = api.get_current_project().await.unwrap(); let result = api.get_current_project().await.unwrap();
assert_eq!(result.0, Some(dir.path().to_string_lossy().to_string())); assert_eq!(result.0, Some(dir.path().to_string_lossy().to_string()));
} }
@@ -106,7 +107,7 @@ mod tests {
#[tokio::test] #[tokio::test]
async fn open_project_succeeds_with_valid_directory() { async fn open_project_succeeds_with_valid_directory() {
let dir = TempDir::new().unwrap(); let dir = TempDir::new().unwrap();
let api = make_api(&dir); let api = make_api::<ProjectApi>(&dir);
let path = dir.path().to_string_lossy().to_string(); let path = dir.path().to_string_lossy().to_string();
let payload = Json(PathPayload { path: path.clone() }); let payload = Json(PathPayload { path: path.clone() });
let result = api.open_project(payload).await.unwrap(); let result = api.open_project(payload).await.unwrap();
@@ -116,7 +117,7 @@ mod tests {
#[tokio::test] #[tokio::test]
async fn open_project_fails_with_nonexistent_file_path() { async fn open_project_fails_with_nonexistent_file_path() {
let dir = TempDir::new().unwrap(); let dir = TempDir::new().unwrap();
let api = make_api(&dir); let api = make_api::<ProjectApi>(&dir);
// Create a file (not a directory) to trigger validation error // Create a file (not a directory) to trigger validation error
let file_path = dir.path().join("not_a_dir.txt"); let file_path = dir.path().join("not_a_dir.txt");
std::fs::write(&file_path, "content").unwrap(); std::fs::write(&file_path, "content").unwrap();
@@ -130,7 +131,7 @@ mod tests {
#[tokio::test] #[tokio::test]
async fn close_project_returns_true() { async fn close_project_returns_true() {
let dir = TempDir::new().unwrap(); let dir = TempDir::new().unwrap();
let api = make_api(&dir); let api = make_api::<ProjectApi>(&dir);
let result = api.close_project().await.unwrap(); let result = api.close_project().await.unwrap();
assert!(result.0); assert!(result.0);
} }
@@ -138,7 +139,7 @@ mod tests {
#[tokio::test] #[tokio::test]
async fn close_project_clears_current_project() { async fn close_project_clears_current_project() {
let dir = TempDir::new().unwrap(); let dir = TempDir::new().unwrap();
let api = make_api(&dir); let api = make_api::<ProjectApi>(&dir);
// Verify project is set initially // Verify project is set initially
let before = api.get_current_project().await.unwrap(); let before = api.get_current_project().await.unwrap();
@@ -155,7 +156,7 @@ mod tests {
#[tokio::test] #[tokio::test]
async fn list_known_projects_returns_empty_initially() { async fn list_known_projects_returns_empty_initially() {
let dir = TempDir::new().unwrap(); let dir = TempDir::new().unwrap();
let api = make_api(&dir); let api = make_api::<ProjectApi>(&dir);
// Close the project so the store has no known projects // Close the project so the store has no known projects
api.close_project().await.unwrap(); api.close_project().await.unwrap();
let result = api.list_known_projects().await.unwrap(); let result = api.list_known_projects().await.unwrap();
@@ -165,7 +166,7 @@ mod tests {
#[tokio::test] #[tokio::test]
async fn list_known_projects_returns_project_after_open() { async fn list_known_projects_returns_project_after_open() {
let dir = TempDir::new().unwrap(); let dir = TempDir::new().unwrap();
let api = make_api(&dir); let api = make_api::<ProjectApi>(&dir);
let path = dir.path().to_string_lossy().to_string(); let path = dir.path().to_string_lossy().to_string();
api.open_project(Json(PathPayload { path: path.clone() })) api.open_project(Json(PathPayload { path: path.clone() }))
@@ -179,7 +180,7 @@ mod tests {
#[tokio::test] #[tokio::test]
async fn forget_known_project_removes_project() { async fn forget_known_project_removes_project() {
let dir = TempDir::new().unwrap(); let dir = TempDir::new().unwrap();
let api = make_api(&dir); let api = make_api::<ProjectApi>(&dir);
let path = dir.path().to_string_lossy().to_string(); let path = dir.path().to_string_lossy().to_string();
api.open_project(Json(PathPayload { path: path.clone() })) api.open_project(Json(PathPayload { path: path.clone() }))
@@ -202,7 +203,7 @@ mod tests {
#[tokio::test] #[tokio::test]
async fn forget_known_project_returns_true_for_nonexistent_path() { async fn forget_known_project_returns_true_for_nonexistent_path() {
let dir = TempDir::new().unwrap(); let dir = TempDir::new().unwrap();
let api = make_api(&dir); let api = make_api::<ProjectApi>(&dir);
let result = api let result = api
.forget_known_project(Json(PathPayload { .forget_known_project(Json(PathPayload {
path: "/some/unknown/path".to_string(), path: "/some/unknown/path".to_string(),
+25 -29
View File
@@ -104,27 +104,23 @@ pub fn get_editor_command_from_store(ctx: &AppContext) -> Option<String> {
.and_then(|v| v.as_str().map(|s| s.to_string())) .and_then(|v| v.as_str().map(|s| s.to_string()))
} }
#[cfg(test)]
impl From<std::sync::Arc<AppContext>> for SettingsApi {
fn from(ctx: std::sync::Arc<AppContext>) -> Self {
Self { ctx }
}
}
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;
use crate::http::context::AppContext; use crate::http::test_helpers::{make_api, test_ctx};
use std::sync::Arc;
use tempfile::TempDir; use tempfile::TempDir;
fn test_ctx(dir: &TempDir) -> AppContext {
AppContext::new_test(dir.path().to_path_buf())
}
fn make_api(dir: &TempDir) -> SettingsApi {
SettingsApi {
ctx: Arc::new(AppContext::new_test(dir.path().to_path_buf())),
}
}
#[tokio::test] #[tokio::test]
async fn get_editor_returns_none_when_unset() { async fn get_editor_returns_none_when_unset() {
let dir = TempDir::new().unwrap(); let dir = TempDir::new().unwrap();
let api = make_api(&dir); let api = make_api::<SettingsApi>(&dir);
let result = api.get_editor().await.unwrap(); let result = api.get_editor().await.unwrap();
assert!(result.0.editor_command.is_none()); assert!(result.0.editor_command.is_none());
} }
@@ -132,7 +128,7 @@ mod tests {
#[tokio::test] #[tokio::test]
async fn set_editor_stores_command() { async fn set_editor_stores_command() {
let dir = TempDir::new().unwrap(); let dir = TempDir::new().unwrap();
let api = make_api(&dir); let api = make_api::<SettingsApi>(&dir);
let payload = Json(EditorCommandPayload { let payload = Json(EditorCommandPayload {
editor_command: Some("zed".to_string()), editor_command: Some("zed".to_string()),
}); });
@@ -143,7 +139,7 @@ mod tests {
#[tokio::test] #[tokio::test]
async fn set_editor_clears_command_on_null() { async fn set_editor_clears_command_on_null() {
let dir = TempDir::new().unwrap(); let dir = TempDir::new().unwrap();
let api = make_api(&dir); let api = make_api::<SettingsApi>(&dir);
api.set_editor(Json(EditorCommandPayload { api.set_editor(Json(EditorCommandPayload {
editor_command: Some("zed".to_string()), editor_command: Some("zed".to_string()),
})) }))
@@ -161,7 +157,7 @@ mod tests {
#[tokio::test] #[tokio::test]
async fn set_editor_clears_command_on_empty_string() { async fn set_editor_clears_command_on_empty_string() {
let dir = TempDir::new().unwrap(); let dir = TempDir::new().unwrap();
let api = make_api(&dir); let api = make_api::<SettingsApi>(&dir);
let result = api let result = api
.set_editor(Json(EditorCommandPayload { .set_editor(Json(EditorCommandPayload {
editor_command: Some(String::new()), editor_command: Some(String::new()),
@@ -174,7 +170,7 @@ mod tests {
#[tokio::test] #[tokio::test]
async fn set_editor_trims_whitespace_only() { async fn set_editor_trims_whitespace_only() {
let dir = TempDir::new().unwrap(); let dir = TempDir::new().unwrap();
let api = make_api(&dir); let api = make_api::<SettingsApi>(&dir);
let result = api let result = api
.set_editor(Json(EditorCommandPayload { .set_editor(Json(EditorCommandPayload {
editor_command: Some(" ".to_string()), editor_command: Some(" ".to_string()),
@@ -187,7 +183,7 @@ mod tests {
#[tokio::test] #[tokio::test]
async fn get_editor_returns_value_after_set() { async fn get_editor_returns_value_after_set() {
let dir = TempDir::new().unwrap(); let dir = TempDir::new().unwrap();
let api = make_api(&dir); let api = make_api::<SettingsApi>(&dir);
api.set_editor(Json(EditorCommandPayload { api.set_editor(Json(EditorCommandPayload {
editor_command: Some("cursor".to_string()), editor_command: Some("cursor".to_string()),
})) }))
@@ -200,7 +196,7 @@ mod tests {
#[test] #[test]
fn editor_command_defaults_to_null() { fn editor_command_defaults_to_null() {
let dir = TempDir::new().unwrap(); let dir = TempDir::new().unwrap();
let ctx = test_ctx(&dir); let ctx = test_ctx(dir.path());
let result = get_editor_command_from_store(&ctx); let result = get_editor_command_from_store(&ctx);
assert!(result.is_none()); assert!(result.is_none());
} }
@@ -208,7 +204,7 @@ mod tests {
#[test] #[test]
fn set_editor_command_persists_in_store() { fn set_editor_command_persists_in_store() {
let dir = TempDir::new().unwrap(); let dir = TempDir::new().unwrap();
let ctx = test_ctx(&dir); let ctx = test_ctx(dir.path());
ctx.store.set(EDITOR_COMMAND_KEY, json!("zed")); ctx.store.set(EDITOR_COMMAND_KEY, json!("zed"));
ctx.store.save().unwrap(); ctx.store.save().unwrap();
@@ -220,7 +216,7 @@ mod tests {
#[test] #[test]
fn get_editor_command_from_store_returns_value() { fn get_editor_command_from_store_returns_value() {
let dir = TempDir::new().unwrap(); let dir = TempDir::new().unwrap();
let ctx = test_ctx(&dir); let ctx = test_ctx(dir.path());
ctx.store.set(EDITOR_COMMAND_KEY, json!("code")); ctx.store.set(EDITOR_COMMAND_KEY, json!("code"));
let result = get_editor_command_from_store(&ctx); let result = get_editor_command_from_store(&ctx);
@@ -230,7 +226,7 @@ mod tests {
#[test] #[test]
fn delete_editor_command_returns_none() { fn delete_editor_command_returns_none() {
let dir = TempDir::new().unwrap(); let dir = TempDir::new().unwrap();
let ctx = test_ctx(&dir); let ctx = test_ctx(dir.path());
ctx.store.set(EDITOR_COMMAND_KEY, json!("cursor")); ctx.store.set(EDITOR_COMMAND_KEY, json!("cursor"));
ctx.store.delete(EDITOR_COMMAND_KEY); ctx.store.delete(EDITOR_COMMAND_KEY);
@@ -258,7 +254,7 @@ mod tests {
#[tokio::test] #[tokio::test]
async fn get_editor_http_handler_returns_null_when_not_set() { async fn get_editor_http_handler_returns_null_when_not_set() {
let dir = TempDir::new().unwrap(); let dir = TempDir::new().unwrap();
let ctx = test_ctx(&dir); let ctx = test_ctx(dir.path());
let api = SettingsApi { let api = SettingsApi {
ctx: Arc::new(ctx), ctx: Arc::new(ctx),
}; };
@@ -269,7 +265,7 @@ mod tests {
#[tokio::test] #[tokio::test]
async fn set_editor_http_handler_stores_value() { async fn set_editor_http_handler_stores_value() {
let dir = TempDir::new().unwrap(); let dir = TempDir::new().unwrap();
let ctx = test_ctx(&dir); let ctx = test_ctx(dir.path());
let api = SettingsApi { let api = SettingsApi {
ctx: Arc::new(ctx), ctx: Arc::new(ctx),
}; };
@@ -286,7 +282,7 @@ mod tests {
#[tokio::test] #[tokio::test]
async fn set_editor_http_handler_clears_value_when_null() { async fn set_editor_http_handler_clears_value_when_null() {
let dir = TempDir::new().unwrap(); let dir = TempDir::new().unwrap();
let ctx = test_ctx(&dir); let ctx = test_ctx(dir.path());
let api = SettingsApi { let api = SettingsApi {
ctx: Arc::new(ctx), ctx: Arc::new(ctx),
}; };
@@ -310,7 +306,7 @@ mod tests {
#[tokio::test] #[tokio::test]
async fn open_file_returns_error_when_no_editor_configured() { async fn open_file_returns_error_when_no_editor_configured() {
let dir = TempDir::new().unwrap(); let dir = TempDir::new().unwrap();
let api = make_api(&dir); let api = make_api::<SettingsApi>(&dir);
let result = api let result = api
.open_file(Query("src/main.rs".to_string()), Query(Some(42))) .open_file(Query("src/main.rs".to_string()), Query(Some(42)))
.await; .await;
@@ -322,7 +318,7 @@ mod tests {
#[tokio::test] #[tokio::test]
async fn open_file_spawns_editor_with_path_and_line() { async fn open_file_spawns_editor_with_path_and_line() {
let dir = TempDir::new().unwrap(); let dir = TempDir::new().unwrap();
let api = make_api(&dir); let api = make_api::<SettingsApi>(&dir);
// Configure the editor to "echo" which is a safe no-op command // Configure the editor to "echo" which is a safe no-op command
api.set_editor(Json(EditorCommandPayload { api.set_editor(Json(EditorCommandPayload {
editor_command: Some("echo".to_string()), editor_command: Some("echo".to_string()),
@@ -339,7 +335,7 @@ mod tests {
#[tokio::test] #[tokio::test]
async fn open_file_spawns_editor_with_path_only_when_no_line() { async fn open_file_spawns_editor_with_path_only_when_no_line() {
let dir = TempDir::new().unwrap(); let dir = TempDir::new().unwrap();
let api = make_api(&dir); let api = make_api::<SettingsApi>(&dir);
api.set_editor(Json(EditorCommandPayload { api.set_editor(Json(EditorCommandPayload {
editor_command: Some("echo".to_string()), editor_command: Some("echo".to_string()),
})) }))
@@ -355,7 +351,7 @@ mod tests {
#[tokio::test] #[tokio::test]
async fn open_file_returns_error_for_nonexistent_editor() { async fn open_file_returns_error_for_nonexistent_editor() {
let dir = TempDir::new().unwrap(); let dir = TempDir::new().unwrap();
let api = make_api(&dir); let api = make_api::<SettingsApi>(&dir);
api.set_editor(Json(EditorCommandPayload { api.set_editor(Json(EditorCommandPayload {
editor_command: Some("this_editor_does_not_exist_xyz_abc".to_string()), editor_command: Some("this_editor_does_not_exist_xyz_abc".to_string()),
})) }))
+21
View File
@@ -0,0 +1,21 @@
//! Shared test utilities for HTTP handler tests.
//!
//! Import with `use crate::http::test_helpers::{make_api, test_ctx};`
use crate::http::context::AppContext;
use std::path::Path;
use std::sync::Arc;
use tempfile::TempDir;
/// Build an [`AppContext`] rooted at `dir` for use in tests.
pub(crate) fn test_ctx(dir: &Path) -> AppContext {
AppContext::new_test(dir.to_path_buf())
}
/// Build an API struct rooted in `dir` for use in tests.
///
/// Requires the API type to implement `From<Arc<AppContext>>`. Add a
/// `#[cfg(test)]` impl block to each API struct to opt in.
pub(crate) fn make_api<T: From<Arc<AppContext>>>(dir: &TempDir) -> T {
Arc::new(test_ctx(dir.path())).into()
}
+9 -1
View File
@@ -57,7 +57,11 @@ The permission system validates the entire command string, and chained commands
won't match allow rules like `Bash(git *)`. Use separate Bash calls instead \ won't match allow rules like `Bash(git *)`. Use separate Bash calls instead \
parallel calls work fine.\n\ parallel calls work fine.\n\
\n\ \n\
Read .storkit/README.md to see our dev process.\n"; Read .storkit/README.md to see our dev process.\n\
\n\
IMPORTANT: On your first conversation, call `wizard_status` to check if \
project setup is complete. If not, read .storkit/README.md for the full \
setup wizard instructions and guide the user through it conversationally.\n";
const STORY_KIT_CLAUDE_SETTINGS: &str = r#"{ const STORY_KIT_CLAUDE_SETTINGS: &str = r#"{
"permissions": { "permissions": {
@@ -756,6 +760,10 @@ mod tests {
content.contains("Never chain shell commands"), content.contains("Never chain shell commands"),
"CLAUDE.md should include command chaining rule" "CLAUDE.md should include command chaining rule"
); );
assert!(
content.contains("wizard_status"),
"CLAUDE.md should instruct Claude to call wizard_status on first conversation"
);
} }
#[test] #[test]
+2
View File
@@ -5,3 +5,5 @@ pub mod shell;
pub mod story_metadata; pub mod story_metadata;
pub mod watcher; pub mod watcher;
pub mod wizard; pub mod wizard;
#[cfg(test)]
pub(crate) mod test_helpers;
+1 -8
View File
@@ -74,17 +74,10 @@ fn needs_project_toml(story_kit: &Path) -> bool {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;
use crate::io::test_helpers::setup_project;
use std::fs; use std::fs;
use tempfile::TempDir; use tempfile::TempDir;
fn setup_project(dir: &TempDir) -> std::path::PathBuf {
let root = dir.path().to_path_buf();
let sk = root.join(".storkit");
fs::create_dir_all(sk.join("specs").join("tech")).unwrap();
fs::create_dir_all(root.join("script")).unwrap();
root
}
// ── needs_onboarding ────────────────────────────────────────── // ── needs_onboarding ──────────────────────────────────────────
#[test] #[test]
+3 -13
View File
@@ -11,15 +11,11 @@ pub struct SearchResult {
pub matches: usize, pub matches: usize,
} }
fn get_project_root(state: &SessionState) -> Result<PathBuf, String> {
state.get_project_root()
}
pub async fn search_files( pub async fn search_files(
query: String, query: String,
state: &SessionState, state: &SessionState,
) -> Result<Vec<SearchResult>, String> { ) -> Result<Vec<SearchResult>, String> {
let root = get_project_root(state)?; let root = state.get_project_root()?;
search_files_impl(query, root).await search_files_impl(query, root).await
} }
@@ -68,18 +64,12 @@ pub async fn search_files_impl(query: String, root: PathBuf) -> Result<Vec<Searc
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;
use std::fs; use crate::io::test_helpers::create_test_files;
use tempfile::TempDir; use tempfile::TempDir;
fn setup_project(files: &[(&str, &str)]) -> TempDir { fn setup_project(files: &[(&str, &str)]) -> TempDir {
let dir = TempDir::new().unwrap(); let dir = TempDir::new().unwrap();
for (path, content) in files { create_test_files(&dir, files);
let full = dir.path().join(path);
if let Some(parent) = full.parent() {
fs::create_dir_all(parent).unwrap();
}
fs::write(full, content).unwrap();
}
dir dir
} }
+1 -6
View File
@@ -3,11 +3,6 @@ use serde::Serialize;
use std::path::PathBuf; use std::path::PathBuf;
use std::process::Command; use std::process::Command;
/// Helper to get the root path (cloned) without joining
fn get_project_root(state: &SessionState) -> Result<PathBuf, String> {
state.get_project_root()
}
#[derive(Serialize, Debug, poem_openapi::Object)] #[derive(Serialize, Debug, poem_openapi::Object)]
pub struct CommandOutput { pub struct CommandOutput {
pub stdout: String, pub stdout: String,
@@ -53,7 +48,7 @@ pub async fn exec_shell(
args: Vec<String>, args: Vec<String>,
state: &SessionState, state: &SessionState,
) -> Result<CommandOutput, String> { ) -> Result<CommandOutput, String> {
let root = get_project_root(state)?; let root = state.get_project_root()?;
exec_shell_impl(command, args, root).await exec_shell_impl(command, args, root).await
} }
+32
View File
@@ -0,0 +1,32 @@
//! Shared test utilities for I/O module tests.
//!
//! Import with `use crate::io::test_helpers::{create_test_files, setup_project};`
use std::fs;
use std::path::PathBuf;
use tempfile::TempDir;
/// Create a minimal storkit project directory structure under `dir`.
///
/// Creates `.storkit/specs/tech/` and `script/`, then returns the root path.
/// Used by onboarding and wizard tests.
pub(crate) fn setup_project(dir: &TempDir) -> PathBuf {
let root = dir.path().to_path_buf();
let sk = root.join(".storkit");
fs::create_dir_all(sk.join("specs").join("tech")).unwrap();
fs::create_dir_all(root.join("script")).unwrap();
root
}
/// Write a set of files into `dir` at the given relative paths.
///
/// Parent directories are created automatically. Used by search tests.
pub(crate) fn create_test_files(dir: &TempDir, files: &[(&str, &str)]) {
for (path, content) in files {
let full = dir.path().join(path);
if let Some(parent) = full.parent() {
fs::create_dir_all(parent).unwrap();
}
fs::write(full, content).unwrap();
}
}
+63 -7
View File
@@ -1,4 +1,5 @@
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use serde_json;
use std::fs; use std::fs;
use std::path::Path; use std::path::Path;
@@ -190,18 +191,73 @@ impl WizardState {
} }
} }
/// Format a `WizardState` as a human-readable Markdown summary for display in
/// bot messages and MCP responses.
pub fn format_wizard_state(state: &WizardState) -> String {
let total = state.steps.len();
let current_idx = state.current_step_index();
let header = if state.completed {
format!("**Setup wizard — complete** ({total}/{total} steps done)")
} else {
format!("**Setup wizard — step {}/{}**", current_idx + 1, total)
};
let mut lines = vec![header, String::new()];
for (i, step) in state.steps.iter().enumerate() {
let marker = match step.status {
StepStatus::Confirmed => "",
StepStatus::Skipped => "~",
StepStatus::Generating => "",
StepStatus::AwaitingConfirmation => "?",
StepStatus::Pending => "",
};
let is_current = !state.completed && i == current_idx;
let suffix = if is_current { " ← current" } else { "" };
let status_str = serde_json::to_value(&step.status)
.ok()
.and_then(|v| v.as_str().map(String::from))
.unwrap_or_default();
lines.push(format!(
" {} {} ({}){suffix}",
marker,
step.step.label(),
status_str
));
}
if state.completed {
lines.push(String::new());
lines.push("All steps done. Your project is fully configured.".to_string());
} else {
let current = &state.steps[current_idx];
lines.push(String::new());
lines.push(format!("**Current:** {}", current.step.label()));
let hint = match current.status {
StepStatus::Pending => {
"Ready to generate. Proceed by calling wizard_generate.".to_string()
}
StepStatus::Generating => "Generating content…".to_string(),
StepStatus::AwaitingConfirmation => {
"Content ready for review. Show it to the user and ask if they're happy with it. Then call wizard_confirm, wizard_retry, or wizard_skip based on their response.".to_string()
}
StepStatus::Confirmed | StepStatus::Skipped => String::new(),
};
if !hint.is_empty() {
lines.push(hint);
}
}
lines.join("\n")
}
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;
use crate::io::test_helpers::setup_project;
use tempfile::TempDir; use tempfile::TempDir;
fn setup_project(dir: &TempDir) -> std::path::PathBuf {
let root = dir.path().to_path_buf();
let sk = root.join(".storkit");
std::fs::create_dir_all(&sk).unwrap();
root
}
#[test] #[test]
fn default_state_has_all_steps_pending() { fn default_state_has_all_steps_pending() {
let state = WizardState::default(); let state = WizardState::default();
+155 -124
View File
@@ -32,72 +32,78 @@ use std::path::PathBuf;
use std::sync::Arc; use std::sync::Arc;
use tokio::sync::broadcast; use tokio::sync::broadcast;
/// What the first CLI argument means. /// Parsed CLI arguments.
#[derive(Debug, PartialEq)] #[derive(Debug, PartialEq)]
enum CliDirective { struct CliArgs {
/// `--help` / `-h` /// Value from `--port <VALUE>` flag, if supplied.
Help, port: Option<u16>,
/// `--version` / `-V` /// Positional project path argument, if supplied.
Version, path: Option<String>,
/// `init [PATH]` — scaffold and start the setup wizard. /// Whether the `init` subcommand was given.
Init, init: bool,
/// An unrecognised flag (starts with `-`).
UnknownFlag(String),
/// A positional path argument.
Path,
/// No arguments at all.
None,
} }
/// Inspect the raw CLI arguments and return the directive they imply. /// Parse CLI arguments into `CliArgs`, or exit early for `--help` / `--version`.
fn classify_cli_args(args: &[String]) -> CliDirective { fn parse_cli_args(args: &[String]) -> Result<CliArgs, String> {
match args.first().map(String::as_str) { let mut port: Option<u16> = None;
None => CliDirective::None, let mut path: Option<String> = None;
Some("--help" | "-h") => CliDirective::Help, let mut init = false;
Some("--version" | "-V") => CliDirective::Version, let mut i = 0;
Some("init") => CliDirective::Init,
Some(a) if a.starts_with('-') => CliDirective::UnknownFlag(a.to_string()), while i < args.len() {
Some(_) => CliDirective::Path, match args[i].as_str() {
"--help" | "-h" => {
print_help();
std::process::exit(0);
} }
"--version" | "-V" => {
println!("storkit {}", env!("CARGO_PKG_VERSION"));
std::process::exit(0);
}
"--port" => {
i += 1;
if i >= args.len() {
return Err("--port requires a value".to_string());
}
match args[i].parse::<u16>() {
Ok(p) => port = Some(p),
Err(_) => return Err(format!("invalid port value: '{}'", args[i])),
}
}
a if a.starts_with("--port=") => {
let val = &a["--port=".len()..];
match val.parse::<u16>() {
Ok(p) => port = Some(p),
Err(_) => return Err(format!("invalid port value: '{val}'")),
}
}
"init" => {
init = true;
}
a if a.starts_with('-') => {
return Err(format!("unknown option: {a}"));
}
a => {
if path.is_some() {
return Err(format!("unexpected argument: {a}"));
}
path = Some(a.to_string());
}
}
i += 1;
}
Ok(CliArgs { port, path, init })
} }
/// Resolve the optional positional path argument (everything after the binary fn print_help() {
/// name) into an absolute `PathBuf`. Returns `None` when no argument was println!("storkit [OPTIONS] [PATH]");
/// supplied so that the caller can fall back to the auto-detect behaviour. println!("storkit init [OPTIONS] [PATH]");
fn parse_project_path_arg(args: &[String], cwd: &std::path::Path) -> Option<PathBuf> {
args.first().map(|s| io::fs::resolve_cli_path(cwd, s))
}
#[tokio::main]
async fn main() -> Result<(), std::io::Error> {
let app_state = Arc::new(SessionState::default());
let cwd = std::env::current_dir().unwrap_or_else(|_| PathBuf::from("."));
let store = Arc::new(
JsonFileStore::from_path(PathBuf::from("store.json")).map_err(std::io::Error::other)?,
);
let port = resolve_port();
// Collect CLI args, skipping the binary name (argv[0]).
let cli_args: Vec<String> = std::env::args().skip(1).collect();
// Handle CLI flags before treating anything as a project path.
let is_init = matches!(classify_cli_args(&cli_args), CliDirective::Init);
match classify_cli_args(&cli_args) {
CliDirective::Help => {
println!("storkit [PATH]");
println!("storkit init [PATH]");
println!(); println!();
println!("Serve a storkit project."); println!("Serve a storkit project.");
println!(); println!();
println!("USAGE:");
println!(" storkit [PATH]");
println!(" storkit init [PATH]");
println!();
println!("COMMANDS:"); println!("COMMANDS:");
println!( println!(" init Scaffold a new .storkit/ project and start the interactive setup wizard.");
" init Scaffold a new .storkit/ project and start the interactive setup wizard."
);
println!(); println!();
println!("ARGS:"); println!("ARGS:");
println!( println!(
@@ -108,27 +114,42 @@ async fn main() -> Result<(), std::io::Error> {
println!("OPTIONS:"); println!("OPTIONS:");
println!(" -h, --help Print this help and exit"); println!(" -h, --help Print this help and exit");
println!(" -V, --version Print the version and exit"); println!(" -V, --version Print the version and exit");
std::process::exit(0); println!(" --port <PORT> Port to listen on (default: 3001). Persisted to project.toml.");
} }
CliDirective::Version => {
println!("storkit {}", env!("CARGO_PKG_VERSION")); /// Resolve the optional positional path argument into an absolute `PathBuf`.
std::process::exit(0); fn resolve_path_arg(path_str: Option<&str>, cwd: &std::path::Path) -> Option<PathBuf> {
} path_str.map(|s| io::fs::resolve_cli_path(cwd, s))
CliDirective::UnknownFlag(flag) => { }
eprintln!("error: unknown option: {flag}");
#[tokio::main]
async fn main() -> Result<(), std::io::Error> {
let app_state = Arc::new(SessionState::default());
let cwd = std::env::current_dir().unwrap_or_else(|_| PathBuf::from("."));
let store = Arc::new(
JsonFileStore::from_path(PathBuf::from("store.json")).map_err(std::io::Error::other)?,
);
// Collect CLI args, skipping the binary name (argv[0]).
let raw_args: Vec<String> = std::env::args().skip(1).collect();
let cli = match parse_cli_args(&raw_args) {
Ok(args) => args,
Err(msg) => {
eprintln!("error: {msg}");
eprintln!("Run 'storkit --help' for usage."); eprintln!("Run 'storkit --help' for usage.");
std::process::exit(1); std::process::exit(1);
} }
CliDirective::Init | CliDirective::Path | CliDirective::None => {}
}
// For `storkit init [PATH]`, the path argument follows "init".
let explicit_path = if is_init {
parse_project_path_arg(&cli_args[1..], &cwd)
} else {
parse_project_path_arg(&cli_args, &cwd)
}; };
let is_init = cli.init;
let explicit_path = resolve_path_arg(cli.path.as_deref(), &cwd);
// Port resolution: CLI flag > project.toml (loaded later) > default.
// Use the CLI port for scaffolding .mcp.json; final port is resolved
// after the project root is known.
let port = cli.port.unwrap_or_else(resolve_port);
// When a path is given explicitly on the CLI, it must already exist as a // When a path is given explicitly on the CLI, it must already exist as a
// directory. We do not create directories from the command line. // directory. We do not create directories from the command line.
if let Some(ref path) = explicit_path { if let Some(ref path) = explicit_path {
@@ -611,96 +632,106 @@ name = "coder"
.unwrap_or_else(|e| panic!("Invalid project.toml: {e}")); .unwrap_or_else(|e| panic!("Invalid project.toml: {e}"));
} }
// ── classify_cli_args ───────────────────────────────────────────────── // ── parse_cli_args ─────────────────────────────────────────────────
#[test] #[test]
fn classify_none_when_no_args() { fn parse_no_args() {
assert_eq!(classify_cli_args(&[]), CliDirective::None); let result = parse_cli_args(&[]).unwrap();
assert_eq!(result.port, None);
assert_eq!(result.path, None);
assert!(!result.init);
} }
#[test] #[test]
fn classify_help_long() { fn parse_unknown_flag_is_error() {
assert_eq!( let args = vec!["--serve".to_string()];
classify_cli_args(&["--help".to_string()]), assert!(parse_cli_args(&args).is_err());
CliDirective::Help
);
} }
#[test] #[test]
fn classify_help_short() { fn parse_path_only() {
assert_eq!(classify_cli_args(&["-h".to_string()]), CliDirective::Help); let args = vec!["/some/path".to_string()];
let result = parse_cli_args(&args).unwrap();
assert_eq!(result.path, Some("/some/path".to_string()));
assert_eq!(result.port, None);
assert!(!result.init);
} }
#[test] #[test]
fn classify_version_long() { fn parse_port_flag() {
assert_eq!( let args = vec!["--port".to_string(), "4000".to_string()];
classify_cli_args(&["--version".to_string()]), let result = parse_cli_args(&args).unwrap();
CliDirective::Version assert_eq!(result.port, Some(4000));
); assert_eq!(result.path, None);
} }
#[test] #[test]
fn classify_version_short() { fn parse_port_equals_syntax() {
assert_eq!( let args = vec!["--port=5000".to_string()];
classify_cli_args(&["-V".to_string()]), let result = parse_cli_args(&args).unwrap();
CliDirective::Version assert_eq!(result.port, Some(5000));
);
} }
#[test] #[test]
fn classify_unknown_flag() { fn parse_port_with_path() {
assert_eq!( let args = vec!["--port".to_string(), "4200".to_string(), "/some/path".to_string()];
classify_cli_args(&["--serve".to_string()]), let result = parse_cli_args(&args).unwrap();
CliDirective::UnknownFlag("--serve".to_string()) assert_eq!(result.port, Some(4200));
); assert_eq!(result.path, Some("/some/path".to_string()));
} }
#[test] #[test]
fn classify_path() { fn parse_port_missing_value_is_error() {
assert_eq!( let args = vec!["--port".to_string()];
classify_cli_args(&["/some/path".to_string()]), assert!(parse_cli_args(&args).is_err());
CliDirective::Path
);
} }
// ── parse_project_path_arg ──────────────────────────────────────────── #[test]
fn parse_port_invalid_value_is_error() {
let args = vec!["--port".to_string(), "abc".to_string()];
assert!(parse_cli_args(&args).is_err());
}
#[test] #[test]
fn parse_project_path_arg_none_when_no_args() { fn parse_init_subcommand() {
let args = vec!["init".to_string()];
let result = parse_cli_args(&args).unwrap();
assert!(result.init);
assert_eq!(result.path, None);
}
#[test]
fn parse_init_with_path_and_port() {
let args = vec!["init".to_string(), "--port".to_string(), "3000".to_string(), "/my/project".to_string()];
let result = parse_cli_args(&args).unwrap();
assert!(result.init);
assert_eq!(result.port, Some(3000));
assert_eq!(result.path, Some("/my/project".to_string()));
}
// ── resolve_path_arg ────────────────────────────────────────────
#[test]
fn resolve_path_arg_none_when_no_path() {
let cwd = PathBuf::from("/home/user/project"); let cwd = PathBuf::from("/home/user/project");
let result = parse_project_path_arg(&[], &cwd); let result = resolve_path_arg(None, &cwd);
assert!(result.is_none()); assert!(result.is_none());
} }
#[test] #[test]
fn parse_project_path_arg_returns_path_for_absolute_arg() { fn resolve_path_arg_returns_path_for_absolute_arg() {
let cwd = PathBuf::from("/home/user/project"); let cwd = PathBuf::from("/home/user/project");
let args = vec!["/some/absolute/path".to_string()]; let result = resolve_path_arg(Some("/some/absolute/path"), &cwd).unwrap();
let result = parse_project_path_arg(&args, &cwd).unwrap();
// Absolute path returned as-is (canonicalize may fail, fallback used)
assert!( assert!(
result.ends_with("absolute/path") || result == PathBuf::from("/some/absolute/path") result.ends_with("absolute/path") || result == PathBuf::from("/some/absolute/path")
); );
} }
#[test] #[test]
fn parse_project_path_arg_resolves_dot_to_cwd() { fn resolve_path_arg_resolves_dot_to_cwd() {
let tmp = tempfile::tempdir().unwrap(); let tmp = tempfile::tempdir().unwrap();
let cwd = tmp.path().to_path_buf(); let cwd = tmp.path().to_path_buf();
let args = vec![".".to_string()]; let result = resolve_path_arg(Some("."), &cwd).unwrap();
let result = parse_project_path_arg(&args, &cwd).unwrap();
// "." relative to an existing cwd should canonicalize to the cwd itself
assert_eq!(result, cwd.canonicalize().unwrap_or(cwd)); assert_eq!(result, cwd.canonicalize().unwrap_or(cwd));
} }
#[test]
fn parse_project_path_arg_resolves_relative_path() {
let tmp = tempfile::tempdir().unwrap();
let cwd = tmp.path().to_path_buf();
let subdir = cwd.join("myproject");
std::fs::create_dir_all(&subdir).unwrap();
let args = vec!["myproject".to_string()];
let result = parse_project_path_arg(&args, &cwd).unwrap();
assert_eq!(result, subdir.canonicalize().unwrap_or(subdir));
}
} }