96 Commits

Author SHA1 Message Date
Dave
8074e3b420 fix: suppress unused variable TS errors that block release build
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-19 16:43:42 +00:00
Dave
8a6eeacb5e fix: suppress unused variable TS errors that block release build
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-19 16:41:04 +00:00
Dave
eb9707d8b6 story-kit: done 311_story_server_enforced_retry_limits_for_failed_merge_and_empty_diff_stories 2026-03-19 16:36:21 +00:00
Dave
3b887e3085 story-kit: merge 311_story_server_enforced_retry_limits_for_failed_merge_and_empty_diff_stories 2026-03-19 16:36:18 +00:00
Dave
662e00f94a fix: biome formatting and lint fixes in App.tsx and TokenUsagePage.tsx
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-19 16:14:44 +00:00
Dave
932325744c fix: return arrays for list endpoints in test fetch mock
Prevents "agentList is not iterable" warnings in test output.

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-19 16:09:02 +00:00
Dave
3ced187aaa fix: mock fetch in test setup to suppress URL parse errors in jsdom
Also set jsdom base URL to http://localhost:3000 in vitest config.

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-19 16:07:56 +00:00
Dave
64f73e24bf story-kit: done 310_story_bot_delete_command_removes_a_story_from_the_pipeline 2026-03-19 16:05:03 +00:00
Dave
c4282ab2fa story-kit: merge 310_story_bot_delete_command_removes_a_story_from_the_pipeline 2026-03-19 16:05:00 +00:00
Dave
a23fe71232 story-kit: accept 307_story_configurable_coder_pool_size_and_default_model_in_project_toml 2026-03-19 16:01:38 +00:00
Dave
84b1c24073 story-kit: done 307_story_configurable_coder_pool_size_and_default_model_in_project_toml 2026-03-19 16:00:37 +00:00
Dave
429597cbce story-kit: merge 307_story_configurable_coder_pool_size_and_default_model_in_project_toml 2026-03-19 16:00:35 +00:00
Dave
101b365354 story-kit: accept 309_story_show_token_cost_breakdown_in_expanded_work_item_detail_panel 2026-03-19 15:55:39 +00:00
Dave
ca3d5ee7a6 story-kit: create 312_bug_auto_assign_assigns_mergemaster_to_coding_stage_stories 2026-03-19 15:49:38 +00:00
Dave
4af9507764 story-kit: create 312_bug_auto_assign_assigns_mergemaster_to_coding_stage_stories 2026-03-19 15:48:41 +00:00
Dave
b71e8dd2be story-kit: create 310_story_bot_delete_command_removes_a_story_from_the_pipeline 2026-03-19 15:43:11 +00:00
Dave
a6621a7095 story-kit: done 309_story_show_token_cost_breakdown_in_expanded_work_item_detail_panel 2026-03-19 15:42:50 +00:00
Dave
ce380ffb52 story-kit: merge 309_story_show_token_cost_breakdown_in_expanded_work_item_detail_panel 2026-03-19 15:42:47 +00:00
Dave
be9c15efe0 story-kit: create 311_story_server_enforced_retry_limits_for_failed_merge_and_empty_diff_stories 2026-03-19 15:42:23 +00:00
Dave
76e3bf952e story-kit: accept 298_story_bot_htop_command_with_live_updating_process_dashboard 2026-03-19 15:33:32 +00:00
Dave
d6858b690b story-kit: done 306_story_replace_manual_qa_boolean_with_configurable_qa_mode_field 2026-03-19 11:58:50 +00:00
Dave
2067abb2e5 story-kit: merge 306_story_replace_manual_qa_boolean_with_configurable_qa_mode_field 2026-03-19 11:58:47 +00:00
Dave
a058fa5f19 story-kit: create 310_story_bot_delete_command_removes_a_story_from_the_pipeline 2026-03-19 11:57:28 +00:00
Dave
62c2c531e6 story-kit: create 309_story_show_token_cost_breakdown_in_expanded_work_item_detail_panel 2026-03-19 11:55:12 +00:00
Dave
f266bb1d03 story-kit: create 308_bug_token_cost_breakdown_missing_from_expanded_work_item_detail_panel 2026-03-19 11:54:05 +00:00
Dave
7c9261da41 story-kit: create 307_story_configurable_coder_pool_size_and_default_model_in_project_toml 2026-03-19 11:42:35 +00:00
Dave
0eac4ca966 story-kit: done 303_story_bot_cost_command_with_story_filter_for_detailed_breakdown 2026-03-19 11:40:57 +00:00
Dave
a70f6b01e0 story-kit: create 307_story_configurable_coder_pool_size_and_default_model_in_project_toml 2026-03-19 11:39:57 +00:00
Dave
4545b57160 story-kit: done 301_story_dedicated_token_usage_page_in_web_ui 2026-03-19 11:36:16 +00:00
Dave
a6ac6497e9 story-kit: merge 301_story_dedicated_token_usage_page_in_web_ui 2026-03-19 11:36:12 +00:00
Dave
586d06b840 story-kit: create 306_story_replace_manual_qa_boolean_with_configurable_qa_mode_field 2026-03-19 11:30:19 +00:00
Dave
c67f148383 story-kit: done 305_story_bot_show_command_displays_story_text_in_chat 2026-03-19 11:12:37 +00:00
Dave
f88114edbf story-kit: merge 305_story_bot_show_command_displays_story_text_in_chat 2026-03-19 11:12:35 +00:00
Dave
08c7a92d74 story-kit: done 300_story_show_token_cost_badge_on_pipeline_board_work_items 2026-03-19 11:02:14 +00:00
Dave
36535b639f story-kit: merge 300_story_show_token_cost_badge_on_pipeline_board_work_items 2026-03-19 11:02:12 +00:00
Dave
b6f99ce7a2 story-kit: done 304_story_mcp_tool_to_move_stories_between_pipeline_stages 2026-03-19 10:59:50 +00:00
Dave
f4376b01e1 story-kit: merge 304_story_mcp_tool_to_move_stories_between_pipeline_stages 2026-03-19 10:59:47 +00:00
Dave
e7aa4e028e story-kit: create 305_story_bot_show_command_displays_story_text_in_chat 2026-03-19 10:56:25 +00:00
Dave
52c5cc9b72 story-kit: done 302_story_bot_cost_command_shows_total_and_per_story_token_spend 2026-03-19 10:54:09 +00:00
Dave
c327263254 story-kit: merge 302_story_bot_cost_command_shows_total_and_per_story_token_spend 2026-03-19 10:54:04 +00:00
Dave
7c9b86c31b story-kit: create 304_story_mcp_tool_to_move_stories_between_pipeline_stages 2026-03-19 10:44:14 +00:00
Dave
a2c893420b story-kit: create 304_story_mcp_tool_to_move_stories_between_pipeline_stages 2026-03-19 10:43:10 +00:00
Dave
a1fe5356cf story-kit: create 301_story_dedicated_token_usage_page_in_web_ui 2026-03-19 10:41:49 +00:00
Dave
1477fbc02b story-kit: create 303_story_bot_cost_command_with_story_filter_for_detailed_breakdown 2026-03-19 10:40:42 +00:00
Dave
6a74eefd07 chore: gitignore token_usage.jsonl
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-19 10:35:37 +00:00
Dave
981fd3fd81 story-kit: merge 298_story_bot_htop_command_with_live_updating_process_dashboard
Adds htop bot command with live-updating Matrix message showing system
load and per-agent CPU/memory usage. Supports timeout override and
htop stop. Resolved conflict with git command in commands.rs registry.

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-19 10:33:21 +00:00
Dave
99d301b467 story-kit: done 298_story_bot_htop_command_with_live_updating_process_dashboard 2026-03-19 10:29:35 +00:00
Dave
9ed80384d5 story-kit: create 302_story_bot_cost_command_shows_total_and_per_story_token_spend 2026-03-19 10:14:01 +00:00
Dave
c2cda92337 story-kit: done 299_story_bot_git_status_command_shows_working_tree_and_branch_info 2026-03-19 10:11:30 +00:00
Dave
b25ae42737 story-kit: merge 299_story_bot_git_status_command_shows_working_tree_and_branch_info 2026-03-19 10:11:28 +00:00
Dave
7811130a8b story-kit: create 300_story_show_token_cost_badge_on_pipeline_board_work_items 2026-03-19 10:10:27 +00:00
Dave
ec212cb5a2 story-kit: create 303_story_bot_cost_command_with_story_filter_for_detailed_breakdown 2026-03-19 10:08:57 +00:00
Dave
d174bb41e7 story-kit: create 302_story_bot_cost_command_shows_total_and_per_story_token_spend 2026-03-19 10:08:48 +00:00
Dave
40570888ff story-kit: create 301_story_dedicated_token_usage_page_in_web_ui 2026-03-19 10:08:35 +00:00
Dave
dd75e9e0fa story-kit: create 300_story_show_token_cost_badge_on_pipeline_board_work_items 2026-03-19 10:08:26 +00:00
Dave
c2aa9ef134 story-kit: accept 295_bug_stories_stuck_in_qa_when_qa_agent_is_busy 2026-03-19 09:58:55 +00:00
Dave
501d6d31ff story-kit: create 299_story_bot_git_status_command_shows_working_tree_and_branch_info 2026-03-19 09:58:06 +00:00
Dave
db2f8fcfc5 story-kit: done 295_bug_stories_stuck_in_qa_when_qa_agent_is_busy 2026-03-19 09:57:32 +00:00
Dave
f325ddf9fe story-kit: done 296_story_track_per_agent_token_usage_for_cost_visibility_and_optimisation 2026-03-19 09:55:28 +00:00
Dave
9cdb0d4ea8 story-kit: merge 296_story_track_per_agent_token_usage_for_cost_visibility_and_optimisation 2026-03-19 09:55:25 +00:00
Dave
6c413e1fc7 fix: call auto_assign_available_work after every pipeline advance (bug 295)
Stories got stuck in QA/merge when agents were busy at assignment time.
Consolidates auto_assign into a single unconditional call at the end of
run_pipeline_advance, so whenever any agent completes, the system
immediately scans for pending work and assigns free agents.

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-19 09:53:41 +00:00
Dave
28b29b55a8 story-kit: accept 294_story_rename_app_title_from_story_kit_to_storkit 2026-03-19 09:47:31 +00:00
Dave
376de57252 story-kit: done 294_story_rename_app_title_from_story_kit_to_storkit 2026-03-19 09:46:31 +00:00
Dave
63f46751ac story-kit: merge 294_story_rename_app_title_from_story_kit_to_storkit 2026-03-19 09:46:28 +00:00
Dave
dc9df6d497 story-kit: create 298_story_bot_htop_command_with_live_updating_process_dashboard 2026-03-19 09:41:26 +00:00
Dave
ae7b04fac5 story-kit: create 298_story_bot_htop_command_with_live_updating_process_dashboard 2026-03-19 09:39:38 +00:00
Dave
50959e6b67 story-kit: done 297_story_improve_bot_status_command_formatting 2026-03-19 09:39:08 +00:00
Dave
6353b12c1d story-kit: merge 297_story_improve_bot_status_command_formatting 2026-03-19 09:39:05 +00:00
Dave
170fd53808 story-kit: create 296_story_track_per_agent_token_usage_for_cost_visibility_and_optimisation 2026-03-19 09:37:21 +00:00
Dave
597e6bf1c3 story-kit: create 298_story_bot_htop_command_with_live_updating_process_dashboard 2026-03-19 09:36:14 +00:00
Dave
7a5a56f211 story-kit: create 297_story_improve_bot_status_command_formatting 2026-03-19 09:26:32 +00:00
Dave
73c86b6946 story-kit: merge 293_story_register_all_bot_commands_in_the_command_registry
Moves status, ambient, and help commands into a unified command registry
in commands.rs. Help output now automatically lists all registered
commands. Resolved merge conflict with 1_backlog rename.

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-19 09:14:04 +00:00
Dave
11afd21f17 story-kit: create 296_story_track_per_agent_token_usage_for_cost_visibility_and_optimisation 2026-03-19 09:03:46 +00:00
Dave
1d20cfc679 story-kit: create 296_story_track_per_agent_token_usage_for_cost_visibility_and_optimisation 2026-03-19 08:59:46 +00:00
Dave
959c680e10 story-kit: accept 293_story_register_all_bot_commands_in_the_command_registry 2026-03-19 08:37:52 +00:00
Dave
dd377de7db story-kit: done 293_story_register_all_bot_commands_in_the_command_registry 2026-03-19 08:37:47 +00:00
Dave
9fee4d9478 story-kit: accept 292_story_show_server_logs_in_web_ui 2026-03-19 01:32:28 +00:00
Dave
40c04fcb28 story-kit: done 292_story_show_server_logs_in_web_ui 2026-03-19 01:31:28 +00:00
Dave
2f0d796b38 story-kit: merge 292_story_show_server_logs_in_web_ui 2026-03-19 01:31:25 +00:00
Dave
2346602b30 fix: default manual_qa to false so stories advance automatically
Bug 283 was implemented with manual_qa defaulting to true, causing all
stories to hold in QA for human review. Changed to default false as
originally specified — stories advance automatically unless explicitly
opted in with manual_qa: true.

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-19 00:53:00 +00:00
Dave
13c0ee4c08 story-kit: create 295_bug_stories_stuck_in_qa_when_qa_agent_is_busy 2026-03-18 21:24:11 +00:00
Dave
483dca5b95 story-kit: create 295_bug_stories_stuck_in_qa_when_qa_agent_is_busy 2026-03-18 21:23:04 +00:00
Dave
dc7d070101 story-kit: create 294_story_rename_app_title_from_story_kit_to_storkit 2026-03-18 20:43:09 +00:00
Dave
875d1f88aa story-kit: accept 291_story_show_test_results_in_work_item_detail_panel 2026-03-18 20:39:16 +00:00
Dave
f550018987 Updated toml to 1.0.7 2026-03-18 16:38:16 +00:00
Dave
52ec989c3a Fixed some bot tests. 2026-03-18 16:37:23 +00:00
Dave
d080e8b12d story-kit: accept 273_story_matrix_bot_sends_typing_indicator_while_waiting_for_claude_response 2026-03-18 16:11:51 +00:00
Dave
cfd85d3a0e story-kit: accept 284_story_matrix_bot_status_command_shows_pipeline_and_agent_availability 2026-03-18 16:06:45 +00:00
Dave
070d53068e story-kit: accept 283_bug_pipeline_does_not_check_manual_qa_flag_before_advancing_from_qa_to_merge 2026-03-18 16:00:44 +00:00
Dave
fa8e0f39f6 story-kit: create 293_story_register_all_bot_commands_in_the_command_registry 2026-03-18 15:57:14 +00:00
Dave
503fa6b7bf fix: rename remaining 1_upcoming references to 1_backlog in bot.rs
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-18 15:53:36 +00:00
Dave
51a0fb8297 story-kit: accept 282_story_matrix_bot_ambient_mode_toggle_via_chat_command 2026-03-18 15:52:35 +00:00
Dave
8ac85a0b67 chore: commit pending changes from session
- Add permission rules to .claude/settings.json
- Document empty merge and direct-to-master problems in problems.md
- Fix agent stream URL to use vite proxy instead of hardcoded host
- Add /agents proxy config to vite.config.ts

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-18 15:50:20 +00:00
Dave
aa4e042e32 story-kit: done 247_story_human_qa_gate_with_rejection_flow 2026-03-18 15:47:51 +00:00
Dave
9352443555 story-kit: merge 247_story_human_qa_gate_with_rejection_flow 2026-03-18 15:47:48 +00:00
Dave
1faacd7812 story-kit: done 291_story_show_test_results_in_work_item_detail_panel 2026-03-18 15:43:41 +00:00
79 changed files with 6973 additions and 933 deletions

View File

@@ -60,7 +60,16 @@
"Edit", "Edit",
"Write", "Write",
"Bash(find *)", "Bash(find *)",
"Bash(sqlite3 *)" "Bash(sqlite3 *)",
"Bash(cat <<:*)",
"Bash(cat <<'ENDJSON:*)",
"Bash(make release:*)",
"Bash(npm test:*)",
"Bash(head *)",
"Bash(tail *)",
"Bash(wc *)",
"Bash(npx vite:*)",
"Bash(npm run dev:*)"
] ]
} }
} }

View File

@@ -17,3 +17,6 @@ work/4_merge/
# Coverage reports (generated by cargo-llvm-cov, not tracked in git) # Coverage reports (generated by cargo-llvm-cov, not tracked in git)
coverage/ coverage/
# Token usage log (generated at runtime, contains cost data)
token_usage.jsonl

View File

@@ -2,6 +2,15 @@
Recurring issues observed during pipeline operation. Review periodically and create stories for systemic problems. Recurring issues observed during pipeline operation. Review periodically and create stories for systemic problems.
## 2026-03-18: Stories graduating to "done" with empty merges (7 of 10)
Pipeline allows stories to move through coding → QA → merge → done without any actual code changes landing on master. The squash-merge produces an empty diff but the pipeline still marks the story as done. Affected stories: 247, 273, 274, 278, 279, 280, 92. Only 266, 271, 277, and 281 actually shipped code. Root cause: no check that the merge commit contains a non-empty diff. Filed bug 283 for the manual_qa gate issue specifically, but the empty-merge-to-done problem is broader and needs its own fix.
## 2026-03-18: Agent committed directly to master instead of worktree ## 2026-03-18: Agent committed directly to master instead of worktree
Commit `5f4591f` ("fix: update should_commit_stage test to match 5_done") was made directly on master by an agent (likely mergemaster). Agents should only commit to their feature branch or merge-queue branch, never to master directly. The commit content was correct but the target branch was wrong. Suspect the agent ran `git commit` in the project root instead of the merge worktree directory. Multiple agents have committed directly to master instead of their worktree/feature branch:
- Commit `5f4591f` ("fix: update should_commit_stage test to match 5_done") — likely mergemaster
- Commit `a32cfbd` ("Add bot-level command registry with help command") — story 285 coder committed code + Cargo.lock directly to master
Agents should only commit to their feature branch or merge-queue branch, never to master directly. Suspect agents are running `git commit` in the project root instead of the worktree directory. This can also revert uncommitted fixes on master (e.g. project.toml pkill fix was overwritten). Frequency: at least 2 confirmed cases. This is a recurring and serious problem — needs a guard in the server or agent prompts.

View File

@@ -1,3 +1,18 @@
# Project-wide default QA mode: "server", "agent", or "human".
# Per-story `qa` front matter overrides this setting.
default_qa = "server"
# Default model for coder agents. Only agents with this model are auto-assigned.
# Opus coders are reserved for explicit per-story `agent:` front matter requests.
default_coder_model = "sonnet"
# Maximum concurrent coder agents. Stories wait in 2_current/ when all slots are full.
max_coders = 3
# Maximum retries per story per pipeline stage before marking as blocked.
# Set to 0 to disable retry limits.
max_retries = 2
[[component]] [[component]]
name = "frontend" name = "frontend"
path = "frontend" path = "frontend"
@@ -69,6 +84,16 @@ max_budget_usd = 5.00
prompt = "You are working in a git worktree on story {{story_id}}. Read CLAUDE.md first, then .story_kit/README.md to understand the dev process. The story details are in your prompt above. Follow the SDTW process through implementation and verification (Steps 1-3). The worktree and feature branch already exist - do not create them. Check .mcp.json for MCP tools. Do NOT accept the story or merge - commit your work and stop. If the user asks to review your changes, tell them to run: cd \"{{worktree_path}}\" && git difftool {{base_branch}}...HEAD\n\nIMPORTANT: Commit all your work before your process exits. The server will automatically run acceptance gates (cargo clippy + tests) when your process exits and advance the pipeline based on the results.\n\n## Bug Workflow: Root Cause First\nWhen working on bugs:\n1. Investigate the root cause before writing any fix. Use `git bisect` to find the breaking commit or `git log` to trace history. Read the relevant code before touching anything.\n2. Fix the root cause with a surgical, minimal change. Do NOT add new abstractions, wrappers, or workarounds when a targeted fix to the original code is possible.\n3. Write commit messages that explain what broke and why, not just what was changed.\n4. If you cannot determine the root cause after thorough investigation, document what you tried and why it was inconclusive — do not guess and ship a speculative fix." prompt = "You are working in a git worktree on story {{story_id}}. Read CLAUDE.md first, then .story_kit/README.md to understand the dev process. The story details are in your prompt above. Follow the SDTW process through implementation and verification (Steps 1-3). The worktree and feature branch already exist - do not create them. Check .mcp.json for MCP tools. Do NOT accept the story or merge - commit your work and stop. If the user asks to review your changes, tell them to run: cd \"{{worktree_path}}\" && git difftool {{base_branch}}...HEAD\n\nIMPORTANT: Commit all your work before your process exits. The server will automatically run acceptance gates (cargo clippy + tests) when your process exits and advance the pipeline based on the results.\n\n## Bug Workflow: Root Cause First\nWhen working on bugs:\n1. Investigate the root cause before writing any fix. Use `git bisect` to find the breaking commit or `git log` to trace history. Read the relevant code before touching anything.\n2. Fix the root cause with a surgical, minimal change. Do NOT add new abstractions, wrappers, or workarounds when a targeted fix to the original code is possible.\n3. Write commit messages that explain what broke and why, not just what was changed.\n4. If you cannot determine the root cause after thorough investigation, document what you tried and why it was inconclusive — do not guess and ship a speculative fix."
system_prompt = "You are a full-stack engineer working autonomously in a git worktree. Follow the Story-Driven Test Workflow strictly. Run cargo clippy and biome checks before considering work complete. Commit all your work before finishing - use a descriptive commit message. Do not accept stories, move them to archived, or merge to master - a human will do that. Do not coordinate with other agents - focus on your assigned story. The server automatically runs acceptance gates when your process exits. For bugs, always find and fix the root cause. Use git bisect to find breaking commits. Do not layer new code on top of existing code when a surgical fix is possible. If root cause is unclear after investigation, document what you tried rather than guessing." system_prompt = "You are a full-stack engineer working autonomously in a git worktree. Follow the Story-Driven Test Workflow strictly. Run cargo clippy and biome checks before considering work complete. Commit all your work before finishing - use a descriptive commit message. Do not accept stories, move them to archived, or merge to master - a human will do that. Do not coordinate with other agents - focus on your assigned story. The server automatically runs acceptance gates when your process exits. For bugs, always find and fix the root cause. Use git bisect to find breaking commits. Do not layer new code on top of existing code when a surgical fix is possible. If root cause is unclear after investigation, document what you tried rather than guessing."
[[agent]]
name = "coder-3"
stage = "coder"
role = "Full-stack engineer. Implements features across all components."
model = "sonnet"
max_turns = 50
max_budget_usd = 5.00
prompt = "You are working in a git worktree on story {{story_id}}. Read CLAUDE.md first, then .story_kit/README.md to understand the dev process. The story details are in your prompt above. Follow the SDTW process through implementation and verification (Steps 1-3). The worktree and feature branch already exist - do not create them. Check .mcp.json for MCP tools. Do NOT accept the story or merge - commit your work and stop. If the user asks to review your changes, tell them to run: cd \"{{worktree_path}}\" && git difftool {{base_branch}}...HEAD\n\nIMPORTANT: Commit all your work before your process exits. The server will automatically run acceptance gates (cargo clippy + tests) when your process exits and advance the pipeline based on the results.\n\n## Bug Workflow: Root Cause First\nWhen working on bugs:\n1. Investigate the root cause before writing any fix. Use `git bisect` to find the breaking commit or `git log` to trace history. Read the relevant code before touching anything.\n2. Fix the root cause with a surgical, minimal change. Do NOT add new abstractions, wrappers, or workarounds when a targeted fix to the original code is possible.\n3. Write commit messages that explain what broke and why, not just what was changed.\n4. If you cannot determine the root cause after thorough investigation, document what you tried and why it was inconclusive — do not guess and ship a speculative fix."
system_prompt = "You are a full-stack engineer working autonomously in a git worktree. Follow the Story-Driven Test Workflow strictly. Run cargo clippy and biome checks before considering work complete. Commit all your work before finishing - use a descriptive commit message. Do not accept stories, move them to archived, or merge to master - a human will do that. Do not coordinate with other agents - focus on your assigned story. The server automatically runs acceptance gates when your process exits. For bugs, always find and fix the root cause. Use git bisect to find breaking commits. Do not layer new code on top of existing code when a surgical fix is possible. If root cause is unclear after investigation, document what you tried rather than guessing."
[[agent]] [[agent]]
name = "qa-2" name = "qa-2"
stage = "qa" stage = "qa"

View File

@@ -0,0 +1,37 @@
---
name: "Auto-assign assigns mergemaster to coding-stage stories"
---
# Bug 312: Auto-assign assigns mergemaster to coding-stage stories
## Description
Auto-assign picks agents whose configured stage doesn't match the pipeline stage of the story. Observed multiple mismatch types:
- **Mergemaster assigned to coding-stage stories**: Story 310 was in `2_current/` but got mergemaster instead of a coder (2026-03-19)
- **Coders assigned to QA-stage stories**: Coders (stage=coder) have been observed running on stories in `3_qa/`
- **QA agents assigned to merge-stage stories**: QA agents (stage=qa) have been observed running on stories in `4_merge/`
The `auto_assign_available_work` function doesn't enforce that the agent's configured stage matches the pipeline stage of the story it's being assigned to. Story 279 (auto-assign respects agent stage from front matter) was supposed to fix stage matching, but the check may only apply to front-matter preferences, not the fallback assignment path.
## How to Reproduce
1. Move a story to any pipeline stage with no agent front matter preference
2. Wait for auto_assign_available_work to run
3. Observe that agents from the wrong stage get assigned (e.g. mergemaster to coding, coder to QA)
## Actual Result
Agents are assigned to stories in pipeline stages that don't match their configured stage. Mergemaster codes, coders do QA, QA agents attempt merges.
## Expected Result
Only coder-stage agents should be assigned to stories in 2_current/. Mergemaster should only be assigned to stories in 4_merge/.
## Acceptance Criteria
- [ ] auto_assign_available_work checks that the agent's configured stage matches the pipeline stage of the story before assigning
- [ ] Coder agents only assigned to stories in 2_current/
- [ ] QA agents only assigned to stories in 3_qa/
- [ ] Mergemaster only assigned to stories in 4_merge/
- [ ] Fallback assignment path respects stage matching (not just front-matter preference path)

View File

@@ -0,0 +1,24 @@
---
name: "Bot delete command removes a story from the pipeline"
---
# Story 310: Bot delete command removes a story from the pipeline
## User Story
As a project owner in a Matrix room, I want to type "{bot_name} delete {story_number}" to remove a story/bug/spike from the pipeline, so that I can clean up obsolete or duplicate work items from chat.
## Acceptance Criteria
- [ ] '{bot_name} delete {number}' finds the story/bug/spike by number across all pipeline stages and deletes the file
- [ ] Confirms deletion with the story name and stage it was in
- [ ] Returns a friendly message if no story with that number exists
- [ ] Stops any running agent on the story before deleting
- [ ] Removes the worktree if one exists for the story
- [ ] Registered in the command registry so it appears in help output
- [ ] Handled at bot level without LLM invocation
- [ ] Commits the deletion to git
## Out of Scope
- TBD

View File

@@ -0,0 +1,22 @@
---
name: "Server-enforced retry limits for failed merge and empty-diff stories"
---
# Story 311: Server-enforced retry limits for failed merge and empty-diff stories
## User Story
As a project owner, I want the server to enforce retry limits on stories that fail merge or produce empty diffs, so that agents don't loop infinitely on broken stories and waste tokens.
## Acceptance Criteria
- [ ] auto_assign_available_work checks the merge_failure front matter flag and skips stories in 4_merge that already have a reported failure
- [ ] Server tracks retry count per story per stage in front matter (e.g. retry_count: 2)
- [ ] After N retries (configurable in project.toml, default 2), story is flagged as blocked and auto-assign stops trying
- [ ] Blocked stories show a clear indicator in pipeline status (MCP and bot status command)
- [ ] Server detects 'coder finished with no commits on feature branch' at gate-check stage and fails the gates early instead of advancing to QA
- [ ] Empty-diff merge failures are detected and reported without needing the mergemaster agent to discover them
## Out of Scope
- TBD

View File

@@ -0,0 +1,25 @@
---
name: "Human QA gate with rejection flow"
---
# Story 247: Human QA gate with rejection flow
## User Story
As the project owner, I want stories to require my manual approval after machine QA before they can be merged, so that features that compile and pass tests but do not actually work correctly are caught before reaching master.
## Acceptance Criteria
- [ ] Story files support a manual_qa front matter field (defaults to true)
- [ ] After machine QA passes in 3_qa, stories with manual_qa: true wait for human approval before moving to 4_merge
- [ ] The UI shows a clear way to launch the app from the worktree for manual testing (single button click), with automatic port conflict handling via .story_kit_port
- [ ] Frontend and backend are pre-compiled during machine QA so the app is ready to run instantly for manual testing
- [ ] Only one QA app instance runs at a time — do not automatically spin up multiple instances
- [ ] Human can approve a story from 3_qa to move it to 4_merge
- [ ] Human can reject a story from 3_qa back to 2_current with notes about what is broken
- [ ] Rejection notes are written into the story file so the coder can see what needs fixing
- [ ] Stories with manual_qa: false skip the human gate and proceed directly from machine QA to 4_merge
## Out of Scope
- TBD

View File

@@ -0,0 +1,21 @@
---
name: "Show test results in work item detail panel"
---
# Story 291: Show test results in work item detail panel
## User Story
As a project owner viewing a work item in the web UI, I want to see the most recent test run results in the expanded detail panel, so that I can quickly see pass/fail status without digging through agent logs.
## Acceptance Criteria
- [ ] Expanded work item detail panel shows the most recent test results for that story
- [ ] Test results display pass/fail counts for unit and integration tests
- [ ] Failed tests are listed by name so you can see what broke
- [ ] Test results are read from the story file's ## Test Results section (already written by record_tests MCP tool)
- [ ] Panel shows a clear empty state when no test results exist yet
## Out of Scope
- TBD

View File

@@ -1,5 +1,6 @@
--- ---
name: "Show server logs in web UI" name: "Show server logs in web UI"
review_hold: true
--- ---
# Story 292: Show server logs in web UI # Story 292: Show server logs in web UI

View File

@@ -0,0 +1,23 @@
---
name: "Register all bot commands in the command registry"
review_hold: true
---
# Story 293: Register all bot commands in the command registry
## User Story
As a user, I want all bot commands (help, status, ambient on/off) to be registered in the command registry in commands.rs, so that the help command lists everything the bot can do and there's one consistent mechanism for handling bot-level commands.
## Acceptance Criteria
- [ ] Status command is moved from bot.rs into the command registry in commands.rs
- [ ] Ambient on/off command is moved from bot.rs into the command registry in commands.rs
- [ ] Help command output lists all registered commands including status and ambient
- [ ] No bot-level commands are handled outside of the registry (single mechanism)
- [ ] Existing behavior of all commands is preserved
- [ ] Registry handler functions receive enough context to perform their work (e.g. project_root for status, ambient_rooms for ambient)
## Out of Scope
- TBD

View File

@@ -0,0 +1,19 @@
---
name: "Rename app title from Story Kit to Storkit"
review_hold: true
---
# Story 294: Rename app title from Story Kit to Storkit
## User Story
As a user, I want the application title to say "Storkit" instead of "Story Kit", so that the branding reflects the new name.
## Acceptance Criteria
- [ ] The top title in the web UI header displays "Storkit" instead of "Story Kit"
- [ ] Any other visible references to "Story Kit" in the UI are updated to "Storkit"
## Out of Scope
- TBD

View File

@@ -0,0 +1,64 @@
---
name: "Stories stuck in QA when QA agent is busy"
review_hold: true
---
# Bug 295: Stories stuck in QA when QA agent is busy
## Description
When multiple stories pass coding gates simultaneously and move to QA, only the first one gets a QA agent assigned. The others fail with "Agent 'qa' is already running" and are never retried when the QA agent becomes free. Stories get stuck in QA with no agent indefinitely.
The root cause is in the server-owned agent completion handler in `server/src/agents/pool.rs`. When a coder finishes and gates pass, the server calls the pipeline advance logic which tries to start a QA agent. If the QA agent is already busy on another story, the start fails with an error and the story is left in `3_qa/` with no agent. There is no retry mechanism — the `auto_assign_available_work` function is only called on startup (via `reconcile_on_startup`) and when agents are manually started, not when agents complete.
## How to Reproduce
1. Have 3 stories in current with coders running (e.g. coder-1, coder-2, coder-opus)
2. All 3 coders finish within seconds of each other and pass gates
3. Server tries to start QA agent on all 3:
- Story 292: `qa` agent starts successfully
- Story 293: fails — `"Agent 'qa' is already running on story '292'"`
- Story 294: fails — same error
4. QA on 292 completes (gates pass after retry)
5. Stories 293 and 294 remain stuck in QA with no agent — nobody retries them
## Server Log Evidence (2026-03-18)
```
21:00:35 [agent:292:coder-1] Done.
21:00:42 [agents] Server-owned completion for '292:coder-1': gates_passed=true
21:00:47 [agent:292:qa] Spawning claude...
21:01:32 [agent:293:coder-2] Done.
21:01:34 [agent:294:coder-opus] Done.
21:01:41 [agents] Server-owned completion for '293:coder-2': gates_passed=true
21:01:41 [ERROR] Failed to start qa agent for '293': Agent 'qa' is already running on story '292'
21:01:48 [agents] Server-owned completion for '294:coder-opus': gates_passed=true
21:01:48 [ERROR] Failed to start qa agent for '294': Agent 'qa' is already running on story '292'
21:08:18 [agents] Server-owned completion for '292:qa': gates_passed=true
(293 and 294 are never picked up)
```
## Actual Result
Stories 293 and 294 stuck in QA with no agent after 292's QA agent was busy. The pipeline status shows them in `3_qa` with `agent: null` indefinitely.
## Expected Result
When a QA agent finishes a story, `auto_assign_available_work` should be called to scan for unassigned stories in all active stages and assign free agents. Stories 293 and 294 should get QA agents as soon as the QA agent finishes with 292.
## Suggested Fix
In the server-owned completion handler (the code path that runs after an agent's process exits), call `auto_assign_available_work()` after processing the completed story. This ensures that when any agent becomes free, the server immediately looks for pending work to assign it to.
The relevant code is in `server/src/agents/pool.rs` — the `handle_agent_completion` path (around line 804-950) and `auto_assign_available_work` (around line 1437-1559).
## Acceptance Criteria
- [ ] When an agent completes (any stage), `auto_assign_available_work` is called to pick up pending stories
- [ ] Stories that failed agent assignment due to busy agents are picked up when agents become available
- [ ] Server logs when a story is queued for retry vs permanently failed
- [ ] Multiple stories completing QA sequentially works correctly (story A finishes QA → story B gets QA agent)

View File

@@ -0,0 +1,54 @@
---
name: "Track per-agent token usage for cost visibility and optimisation"
---
# Story 296: Track per-agent token usage for cost visibility and optimisation
## User Story
As a project owner, I want to see how many tokens each agent consumes per story, so that I can identify expensive operations and optimise token usage across the pipeline.
## Acceptance Criteria
- [ ] Implement per-agent token tracking that captures input tokens, output tokens, and cache tokens for each agent run
- [ ] Token usage is recorded per story and per agent (e.g. coder-1 on story 293 used X tokens)
- [ ] Running totals are visible — either via MCP tool, web UI, or both
- [ ] Historical token usage is persisted so it survives server restarts (e.g. in story files or a separate log)
- [ ] Data is structured to support later analysis (e.g. which agent types are most expensive, which stories cost the most)
## Research Notes
Claude Code's JSON stream already emits all the data we need. No external library required.
**Data available in the `result` event at end of each agent session:**
```json
{
"type": "result",
"total_cost_usd": 1.57,
"usage": {
"input_tokens": 7,
"output_tokens": 475,
"cache_creation_input_tokens": 185020,
"cache_read_input_tokens": 810585
},
"modelUsage": {
"claude-opus-4-6[1m]": {
"inputTokens": 7,
"outputTokens": 475,
"cacheReadInputTokens": 810585,
"cacheCreationInputTokens": 185020,
"costUSD": 1.57
}
}
}
```
**Where to hook in:**
- `server/src/llm/providers/claude_code.rs``process_json_event()` already parses the JSON stream but currently ignores usage data from the `result` event
- Parse `usage` + `total_cost_usd` from the `result` event and pipe it to the agent completion handler in `server/src/agents/pool.rs`
**No external libraries needed** — Anthropic SDK, LiteLLM, Helicone, Langfuse etc. are all overkill since we have direct access to Claude Code's output stream.
## Out of Scope
- TBD

View File

@@ -0,0 +1,20 @@
---
name: "Improve bot status command formatting"
---
# Story 297: Improve bot status command formatting
## User Story
As a user reading the bot's status output in Matrix, I want to see clean story numbers and titles (not filenames), with agent assignments shown inline, so that the output is easy to scan at a glance.
## Acceptance Criteria
- [ ] Status output shows story number and title (e.g. '293 — Register all bot commands') not the full filename stem
- [ ] Each story shows which agent is working on it if one is assigned (e.g. 'coder-1 (sonnet)')
- [ ] Stories with no agent assigned show no agent info rather than cluttering the output
- [ ] Output is compact and scannable in a Matrix chat window
## Out of Scope
- TBD

View File

@@ -0,0 +1,25 @@
---
name: "Bot htop command with live-updating process dashboard"
---
# Story 298: Bot htop command with live-updating process dashboard
## User Story
As a project owner in a Matrix room, I want to type "{bot_name} htop" and see a live-updating dashboard of system load and agent processes, so that I can monitor resource usage without needing terminal access.
## Acceptance Criteria
- [ ] '{bot_name} htop' sends an initial status message showing load average and per-agent process info (CPU, memory, story assignment)
- [ ] Message is edited every 5 seconds with updated stats
- [ ] Only shows processes related to the project (agent PIDs and their child process trees)
- [ ] '{bot_name} htop stop' stops the live updating and sends a final 'monitoring stopped' edit
- [ ] Works regardless of what language/toolchain the agents are using (monitors by PID tree, not by process name)
- [ ] Uses Matrix message editing (replacement events) to update in place
- [ ] Only one htop session per room at a time — a second '{bot_name} htop' stops the existing session and starts a fresh one
- [ ] Auto-stops after 5 minutes by default to prevent runaway editing
- [ ] Optional timeout override: '{bot_name} htop 10m' to set a custom duration
## Out of Scope
- TBD

View File

@@ -0,0 +1,22 @@
---
name: "Bot git status command shows working tree and branch info"
---
# Story 299: Bot git status command shows working tree and branch info
## User Story
As a project owner in a Matrix room, I want to type "{bot_name} git" and see the current git status (branch, uncommitted changes, how far ahead/behind remote), so that I can check the repo state without terminal access.
## Acceptance Criteria
- [ ] '{bot_name} git' displays current branch name
- [ ] Shows count of uncommitted changes (staged and unstaged) with filenames
- [ ] Shows how many commits ahead/behind the remote branch
- [ ] Output is formatted compactly for Matrix chat
- [ ] Registered in the command registry in commands.rs so it appears in help output
- [ ] Handled at bot level without LLM invocation
## Out of Scope
- TBD

View File

@@ -0,0 +1,21 @@
---
name: "Show token cost badge on pipeline board work items"
---
# Story 300: Show token cost badge on pipeline board work items
## User Story
As a project owner viewing the pipeline board, I want to see the total token cost for each work item displayed as a badge, so that I can quickly spot expensive stories at a glance.
## Acceptance Criteria
- [ ] Each work item on the pipeline board shows its total cost in USD as a small badge
- [ ] Cost is fetched from the token_usage.jsonl data via a new API endpoint
- [ ] Items with no recorded usage show no badge (not $0.00)
- [ ] Cost updates when the pipeline refreshes (e.g. after an agent completes)
- [ ] Expanded work item detail panel shows per-agent cost breakdown (coder, QA, mergemaster) with token counts
## Out of Scope
- TBD

View File

@@ -0,0 +1,21 @@
---
name: "Dedicated token usage page in web UI"
---
# Story 301: Dedicated token usage page in web UI
## User Story
As a project owner, I want a dedicated token usage page in the web UI that shows per-story and per-agent cost breakdowns with totals, so that I can analyse where tokens are being spent and identify optimisation opportunities.
## Acceptance Criteria
- [ ] New page/panel accessible from the main navigation
- [ ] Shows a table of all recorded agent sessions with story, agent name, model, token counts, and cost
- [ ] Sortable by cost, story, agent, or date
- [ ] Shows summary totals: total cost, cost by agent type (coder vs QA vs mergemaster), cost by model (opus vs sonnet)
- [ ] Data loads from the token_usage.jsonl log via API endpoint
## Out of Scope
- TBD

View File

@@ -0,0 +1,22 @@
---
name: "Bot cost command shows total and per-story token spend"
---
# Story 302: Bot cost command shows total and per-story token spend
## User Story
As a project owner in a Matrix room, I want to type "{bot_name} cost" to see total token spend and the top most expensive stories, so that I can check burn rate from my phone.
## Acceptance Criteria
- [ ] '{bot_name} cost' shows total spend for the last 24 hours
- [ ] Shows top 5 most expensive stories from the last 24 hours with their costs
- [ ] Shows cost breakdown by agent type (coder, QA, mergemaster) for the last 24 hours
- [ ] Also shows an all-time total for context
- [ ] Registered in the command registry so it appears in help output
- [ ] Handled at bot level without LLM invocation
## Out of Scope
- TBD

View File

@@ -0,0 +1,21 @@
---
name: "Bot cost command with story filter for detailed breakdown"
---
# Story 303: Bot cost command with story filter for detailed breakdown
## User Story
As a project owner in a Matrix room, I want to type "{bot_name} cost 293" to see a detailed token breakdown for a specific story, so that I can understand where the tokens went on an expensive item.
## Acceptance Criteria
- [ ] '{bot_name} cost {story_number}' shows all agent sessions for that story
- [ ] Each session shows agent name, model, input/output/cache tokens, and cost in USD
- [ ] Shows total cost for the story at the bottom
- [ ] Registered in the command registry (can share the 'cost' command with args parsing)
- [ ] Returns a friendly message if no usage data exists for the story
## Out of Scope
- TBD

View File

@@ -0,0 +1,23 @@
---
name: "MCP tool to move stories between pipeline stages"
---
# Story 304: MCP tool to move stories between pipeline stages
## User Story
As a bot operator (Timmy), I want an MCP tool that moves stories between pipeline stages, so that I don't need shell mv permissions to manage the pipeline.
## Acceptance Criteria
- [ ] New MCP tool 'move_story' accepts story_id and target_stage (e.g. 'backlog', 'current', 'qa', 'merge', 'done')
- [ ] Validates the story exists before moving
- [ ] Handles the file move between stage directories
- [ ] Returns a confirmation message with the old and new stage
- [ ] Works for stories, bugs, spikes, and refactors
- [ ] Replaces the need for shell mv commands to move story files
- [ ] Tool description tells bots to prefer specific tools (accept_story, move_story_to_merge, request_qa) when available, and use move_story only for arbitrary moves that lack a dedicated tool (e.g. moving to backlog, moving ghost stories back to current)
## Out of Scope
- TBD

View File

@@ -0,0 +1,21 @@
---
name: "Bot show command displays story text in chat"
---
# Story 305: Bot show command displays story text in chat
## User Story
As a project owner in a Matrix room, I want to type "{bot_name} show {story_number}" and see the full story text displayed in chat, so that I can review story details without accessing the file system.
## Acceptance Criteria
- [ ] '{bot_name} show {number}' finds the story/bug/spike by number across all pipeline stages and displays its full markdown content
- [ ] Output is formatted for readability in Matrix
- [ ] Returns a friendly message if no story with that number exists
- [ ] Registered in the command registry so it appears in help output
- [ ] Handled at bot level without LLM invocation
## Out of Scope
- TBD

View File

@@ -0,0 +1,26 @@
---
name: "Replace manual_qa boolean with configurable qa mode field"
---
# Story 306: Replace manual_qa boolean with configurable qa mode field
## User Story
As a project owner, I want to configure QA mode per-story and set a project-wide default, so that I can choose between human review, server-only gate checks, or full agent QA on a per-story basis.
## Acceptance Criteria
- [ ] Replace manual_qa: true/false front matter field with qa: human|server|agent
- [ ] qa: server — skip the QA agent entirely, rely on server's automated gate checks (clippy + tests + coverage). If gates pass, advance straight to merge
- [ ] qa: agent — current behavior, spin up a QA agent (Claude session) to review code and run gates
- [ ] qa: human — hold in QA for human approval after server gates pass (current manual_qa: true behavior)
- [ ] Default qa mode is configurable in project.toml (e.g. default_qa = "server")
- [ ] Set the initial default in project.toml to "server"
- [ ] Per-story front matter qa field overrides the project default
- [ ] Backwards compatible: existing stories without a qa field use the project default
- [ ] Remove the old manual_qa field handling and replace with the new qa field throughout pool.rs, story_metadata.rs, and any other references
- [ ] Update bot.toml.example and project.toml documentation to reflect the new field
## Out of Scope
- TBD

View File

@@ -0,0 +1,25 @@
---
name: "Configurable coder pool size and default model in project.toml"
agent: coder-opus
---
# Story 307: Configurable coder pool size and default model in project.toml
## User Story
As a project owner, I want to configure the number of concurrent coder agents and their default model in project.toml, so that I can control resource usage and cost while still being able to override per-story when needed.
## Acceptance Criteria
- [ ] New project.toml setting: default_coder_model (e.g. 'sonnet') determines which model is used for coder agents by default
- [ ] New project.toml setting: max_coders (e.g. 3) limits concurrent coder agent slots
- [ ] Add one more sonnet coder to the agent config (coder-3) for a total of 3 sonnet coders
- [ ] When all coder slots are full, new stories wait in current until a slot frees up
- [ ] Per-story front matter agent field still overrides the default (e.g. agent: coder-opus assigns opus)
- [ ] Opus coders are only used when explicitly requested via front matter
- [ ] QA and mergemaster limits are unchanged (not configurable via this story)
- [ ] auto_assign_available_work respects the max_coders limit
## Out of Scope
- TBD

View File

@@ -0,0 +1,20 @@
---
name: "Show token cost breakdown in expanded work item detail panel"
---
# Story 309: Show token cost breakdown in expanded work item detail panel
## User Story
As a project owner viewing a work item in the web UI, I want to see a per-agent token cost breakdown in the expanded detail panel, so that I can understand where tokens were spent on that story.
## Acceptance Criteria
- [ ] WorkItemDetailPanel fetches token cost data using the existing /work-items/:story_id/token-cost endpoint
- [ ] Shows per-agent session breakdown: agent name, model, token counts (input/output/cache), cost in USD
- [ ] Shows total cost for the story
- [ ] Shows empty state when no token data exists for the story
## Out of Scope
- TBD

34
Cargo.lock generated
View File

@@ -4026,7 +4026,7 @@ dependencies = [
"tempfile", "tempfile",
"tokio", "tokio",
"tokio-tungstenite 0.29.0", "tokio-tungstenite 0.29.0",
"toml 1.0.6+spec-1.1.0", "toml 1.0.7+spec-1.1.0",
"uuid", "uuid",
"wait-timeout", "wait-timeout",
"walkdir", "walkdir",
@@ -4367,22 +4367,22 @@ dependencies = [
"serde_spanned", "serde_spanned",
"toml_datetime 0.7.5+spec-1.1.0", "toml_datetime 0.7.5+spec-1.1.0",
"toml_parser", "toml_parser",
"winnow", "winnow 0.7.14",
] ]
[[package]] [[package]]
name = "toml" name = "toml"
version = "1.0.6+spec-1.1.0" version = "1.0.7+spec-1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "399b1124a3c9e16766831c6bba21e50192572cdd98706ea114f9502509686ffc" checksum = "dd28d57d8a6f6e458bc0b8784f8fdcc4b99a437936056fa122cb234f18656a96"
dependencies = [ dependencies = [
"indexmap", "indexmap",
"serde_core", "serde_core",
"serde_spanned", "serde_spanned",
"toml_datetime 1.0.0+spec-1.1.0", "toml_datetime 1.0.1+spec-1.1.0",
"toml_parser", "toml_parser",
"toml_writer", "toml_writer",
"winnow", "winnow 1.0.0",
] ]
[[package]] [[package]]
@@ -4396,9 +4396,9 @@ dependencies = [
[[package]] [[package]]
name = "toml_datetime" name = "toml_datetime"
version = "1.0.0+spec-1.1.0" version = "1.0.1+spec-1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "32c2555c699578a4f59f0cc68e5116c8d7cabbd45e1409b989d4be085b53f13e" checksum = "9b320e741db58cac564e26c607d3cc1fdc4a88fd36c879568c07856ed83ff3e9"
dependencies = [ dependencies = [
"serde_core", "serde_core",
] ]
@@ -4412,23 +4412,23 @@ dependencies = [
"indexmap", "indexmap",
"toml_datetime 0.7.5+spec-1.1.0", "toml_datetime 0.7.5+spec-1.1.0",
"toml_parser", "toml_parser",
"winnow", "winnow 0.7.14",
] ]
[[package]] [[package]]
name = "toml_parser" name = "toml_parser"
version = "1.0.9+spec-1.1.0" version = "1.0.10+spec-1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "702d4415e08923e7e1ef96cd5727c0dfed80b4d2fa25db9647fe5eb6f7c5a4c4" checksum = "7df25b4befd31c4816df190124375d5a20c6b6921e2cad937316de3fccd63420"
dependencies = [ dependencies = [
"winnow", "winnow 1.0.0",
] ]
[[package]] [[package]]
name = "toml_writer" name = "toml_writer"
version = "1.0.6+spec-1.1.0" version = "1.0.7+spec-1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ab16f14aed21ee8bfd8ec22513f7287cd4a91aa92e44edfe2c17ddd004e92607" checksum = "f17aaa1c6e3dc22b1da4b6bba97d066e354c7945cac2f7852d4e4e7ca7a6b56d"
[[package]] [[package]]
name = "tower" name = "tower"
@@ -5444,6 +5444,12 @@ dependencies = [
"memchr", "memchr",
] ]
[[package]]
name = "winnow"
version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a90e88e4667264a994d34e6d1ab2d26d398dcdca8b7f52bec8668957517fc7d8"
[[package]] [[package]]
name = "winreg" name = "winreg"
version = "0.10.1" version = "0.10.1"

View File

@@ -24,7 +24,7 @@ serde_yaml = "0.9"
strip-ansi-escapes = "0.2" strip-ansi-escapes = "0.2"
tempfile = "3" tempfile = "3"
tokio = { version = "1", features = ["rt-multi-thread", "macros", "sync"] } tokio = { version = "1", features = ["rt-multi-thread", "macros", "sync"] }
toml = "1.0.6" toml = "1.0.7"
uuid = { version = "1.22.0", features = ["v4", "serde"] } uuid = { version = "1.22.0", features = ["v4", "serde"] }
tokio-tungstenite = "0.29.0" tokio-tungstenite = "0.29.0"
walkdir = "2.5.0" walkdir = "2.5.0"

View File

@@ -4,7 +4,7 @@
<meta charset="UTF-8" /> <meta charset="UTF-8" />
<link rel="icon" type="image/svg+xml" href="/vite.svg" /> <link rel="icon" type="image/svg+xml" href="/vite.svg" />
<meta name="viewport" content="width=device-width, initial-scale=1.0" /> <meta name="viewport" content="width=device-width, initial-scale=1.0" />
<title>Story Kit</title> <title>Storkit</title>
</head> </head>
<body> <body>

View File

@@ -7,6 +7,7 @@ import "./App.css";
function App() { function App() {
const [projectPath, setProjectPath] = React.useState<string | null>(null); const [projectPath, setProjectPath] = React.useState<string | null>(null);
const [_view, setView] = React.useState<"chat" | "token-usage">("chat");
const [isCheckingProject, setIsCheckingProject] = React.useState(true); const [isCheckingProject, setIsCheckingProject] = React.useState(true);
const [errorMsg, setErrorMsg] = React.useState<string | null>(null); const [errorMsg, setErrorMsg] = React.useState<string | null>(null);
const [pathInput, setPathInput] = React.useState(""); const [pathInput, setPathInput] = React.useState("");
@@ -120,6 +121,7 @@ function App() {
try { try {
await api.closeProject(); await api.closeProject();
setProjectPath(null); setProjectPath(null);
setView("chat");
} catch (e) { } catch (e) {
console.error(e); console.error(e);
} }

View File

@@ -128,8 +128,7 @@ export function subscribeAgentStream(
onEvent: (event: AgentEvent) => void, onEvent: (event: AgentEvent) => void,
onError?: (error: Event) => void, onError?: (error: Event) => void,
): () => void { ): () => void {
const host = import.meta.env.DEV ? "http://127.0.0.1:3001" : ""; const url = `/agents/${encodeURIComponent(storyId)}/${encodeURIComponent(agentName)}/stream`;
const url = `${host}/agents/${encodeURIComponent(storyId)}/${encodeURIComponent(agentName)}/stream`;
const eventSource = new EventSource(url); const eventSource = new EventSource(url);

View File

@@ -33,6 +33,8 @@ export interface PipelineStageItem {
error: string | null; error: string | null;
merge_failure: string | null; merge_failure: string | null;
agent: AgentAssignment | null; agent: AgentAssignment | null;
review_hold: boolean | null;
qa: string | null;
} }
export interface PipelineState { export interface PipelineState {
@@ -83,7 +85,9 @@ export type WsResponse =
/** Streaming token from a /btw side question response. */ /** Streaming token from a /btw side question response. */
| { type: "side_question_token"; content: string } | { type: "side_question_token"; content: string }
/** Final signal that the /btw side question has been fully answered. */ /** Final signal that the /btw side question has been fully answered. */
| { type: "side_question_done"; response: string }; | { type: "side_question_done"; response: string }
/** A single server log entry (bulk on connect, then live). */
| { type: "log_entry"; timestamp: string; level: string; message: string };
export interface ProviderConfig { export interface ProviderConfig {
provider: string; provider: string;
@@ -139,6 +143,37 @@ export interface SearchResult {
matches: number; matches: number;
} }
export interface AgentCostEntry {
agent_name: string;
model: string | null;
input_tokens: number;
output_tokens: number;
cache_creation_input_tokens: number;
cache_read_input_tokens: number;
total_cost_usd: number;
}
export interface TokenCostResponse {
total_cost_usd: number;
agents: AgentCostEntry[];
}
export interface TokenUsageRecord {
story_id: string;
agent_name: string;
model: string | null;
timestamp: string;
input_tokens: number;
output_tokens: number;
cache_creation_input_tokens: number;
cache_read_input_tokens: number;
total_cost_usd: number;
}
export interface AllTokenUsageResponse {
records: TokenUsageRecord[];
}
export interface CommandOutput { export interface CommandOutput {
stdout: string; stdout: string;
stderr: string; stderr: string;
@@ -312,8 +347,52 @@ export const api = {
baseUrl, baseUrl,
); );
}, },
getTokenCost(storyId: string, baseUrl?: string) {
return requestJson<TokenCostResponse>(
`/work-items/${encodeURIComponent(storyId)}/token-cost`,
{},
baseUrl,
);
},
getAllTokenUsage(baseUrl?: string) {
return requestJson<AllTokenUsageResponse>("/token-usage", {}, baseUrl);
},
/** Approve a story in QA, moving it to merge. */
approveQa(storyId: string) {
return callMcpTool("approve_qa", { story_id: storyId });
},
/** Reject a story in QA, moving it back to current with notes. */
rejectQa(storyId: string, notes: string) {
return callMcpTool("reject_qa", { story_id: storyId, notes });
},
/** Launch the QA app for a story's worktree. */
launchQaApp(storyId: string) {
return callMcpTool("launch_qa_app", { story_id: storyId });
},
}; };
async function callMcpTool(
toolName: string,
args: Record<string, unknown>,
): Promise<string> {
const res = await fetch("/mcp", {
method: "POST",
headers: { "Content-Type": "application/json" },
body: JSON.stringify({
jsonrpc: "2.0",
id: 1,
method: "tools/call",
params: { name: toolName, arguments: args },
}),
});
const json = await res.json();
if (json.error) {
throw new Error(json.error.message);
}
const text = json.result?.content?.[0]?.text ?? "";
return text;
}
export class ChatWebSocket { export class ChatWebSocket {
private static sharedSocket: WebSocket | null = null; private static sharedSocket: WebSocket | null = null;
private static refCount = 0; private static refCount = 0;
@@ -340,6 +419,11 @@ export class ChatWebSocket {
private onOnboardingStatus?: (needsOnboarding: boolean) => void; private onOnboardingStatus?: (needsOnboarding: boolean) => void;
private onSideQuestionToken?: (content: string) => void; private onSideQuestionToken?: (content: string) => void;
private onSideQuestionDone?: (response: string) => void; private onSideQuestionDone?: (response: string) => void;
private onLogEntry?: (
timestamp: string,
level: string,
message: string,
) => void;
private connected = false; private connected = false;
private closeTimer?: number; private closeTimer?: number;
private wsPath = DEFAULT_WS_PATH; private wsPath = DEFAULT_WS_PATH;
@@ -425,6 +509,8 @@ export class ChatWebSocket {
this.onSideQuestionToken?.(data.content); this.onSideQuestionToken?.(data.content);
if (data.type === "side_question_done") if (data.type === "side_question_done")
this.onSideQuestionDone?.(data.response); this.onSideQuestionDone?.(data.response);
if (data.type === "log_entry")
this.onLogEntry?.(data.timestamp, data.level, data.message);
if (data.type === "pong") { if (data.type === "pong") {
window.clearTimeout(this.heartbeatTimeout); window.clearTimeout(this.heartbeatTimeout);
this.heartbeatTimeout = undefined; this.heartbeatTimeout = undefined;
@@ -480,6 +566,7 @@ export class ChatWebSocket {
onOnboardingStatus?: (needsOnboarding: boolean) => void; onOnboardingStatus?: (needsOnboarding: boolean) => void;
onSideQuestionToken?: (content: string) => void; onSideQuestionToken?: (content: string) => void;
onSideQuestionDone?: (response: string) => void; onSideQuestionDone?: (response: string) => void;
onLogEntry?: (timestamp: string, level: string, message: string) => void;
}, },
wsPath = DEFAULT_WS_PATH, wsPath = DEFAULT_WS_PATH,
) { ) {
@@ -497,6 +584,7 @@ export class ChatWebSocket {
this.onOnboardingStatus = handlers.onOnboardingStatus; this.onOnboardingStatus = handlers.onOnboardingStatus;
this.onSideQuestionToken = handlers.onSideQuestionToken; this.onSideQuestionToken = handlers.onSideQuestionToken;
this.onSideQuestionDone = handlers.onSideQuestionDone; this.onSideQuestionDone = handlers.onSideQuestionDone;
this.onLogEntry = handlers.onLogEntry;
this.wsPath = wsPath; this.wsPath = wsPath;
this.shouldReconnect = true; this.shouldReconnect = true;

View File

@@ -13,6 +13,8 @@ import { ChatInput } from "./ChatInput";
import { HelpOverlay } from "./HelpOverlay"; import { HelpOverlay } from "./HelpOverlay";
import { LozengeFlyProvider } from "./LozengeFlyContext"; import { LozengeFlyProvider } from "./LozengeFlyContext";
import { MessageItem } from "./MessageItem"; import { MessageItem } from "./MessageItem";
import type { LogEntry } from "./ServerLogsPanel";
import { ServerLogsPanel } from "./ServerLogsPanel";
import { SideQuestionOverlay } from "./SideQuestionOverlay"; import { SideQuestionOverlay } from "./SideQuestionOverlay";
import { StagePanel } from "./StagePanel"; import { StagePanel } from "./StagePanel";
import { WorkItemDetailPanel } from "./WorkItemDetailPanel"; import { WorkItemDetailPanel } from "./WorkItemDetailPanel";
@@ -200,6 +202,9 @@ export function Chat({ projectPath, onCloseProject }: ChatProps) {
const [agentConfigVersion, setAgentConfigVersion] = useState(0); const [agentConfigVersion, setAgentConfigVersion] = useState(0);
const [agentStateVersion, setAgentStateVersion] = useState(0); const [agentStateVersion, setAgentStateVersion] = useState(0);
const [pipelineVersion, setPipelineVersion] = useState(0); const [pipelineVersion, setPipelineVersion] = useState(0);
const [storyTokenCosts, setStoryTokenCosts] = useState<Map<string, number>>(
new Map(),
);
const [needsOnboarding, setNeedsOnboarding] = useState(false); const [needsOnboarding, setNeedsOnboarding] = useState(false);
const onboardingTriggeredRef = useRef(false); const onboardingTriggeredRef = useRef(false);
const [selectedWorkItemId, setSelectedWorkItemId] = useState<string | null>( const [selectedWorkItemId, setSelectedWorkItemId] = useState<string | null>(
@@ -214,6 +219,7 @@ export function Chat({ projectPath, onCloseProject }: ChatProps) {
loading: boolean; loading: boolean;
} | null>(null); } | null>(null);
const [showHelp, setShowHelp] = useState(false); const [showHelp, setShowHelp] = useState(false);
const [serverLogs, setServerLogs] = useState<LogEntry[]>([]);
// Ref so stale WebSocket callbacks can read the current queued messages // Ref so stale WebSocket callbacks can read the current queued messages
const queuedMessagesRef = useRef<{ id: string; text: string }[]>([]); const queuedMessagesRef = useRef<{ id: string; text: string }[]>([]);
const queueIdCounterRef = useRef(0); const queueIdCounterRef = useRef(0);
@@ -360,6 +366,29 @@ export function Chat({ projectPath, onCloseProject }: ChatProps) {
onPipelineState: (state) => { onPipelineState: (state) => {
setPipeline(state); setPipeline(state);
setPipelineVersion((v) => v + 1); setPipelineVersion((v) => v + 1);
const allItems = [
...state.backlog,
...state.current,
...state.qa,
...state.merge,
...state.done,
];
for (const item of allItems) {
api
.getTokenCost(item.story_id)
.then((cost) => {
if (cost.total_cost_usd > 0) {
setStoryTokenCosts((prev) => {
const next = new Map(prev);
next.set(item.story_id, cost.total_cost_usd);
return next;
});
}
})
.catch(() => {
// Silently ignore — cost data may not exist yet.
});
}
}, },
onPermissionRequest: (requestId, toolName, toolInput) => { onPermissionRequest: (requestId, toolName, toolInput) => {
setPermissionQueue((prev) => [ setPermissionQueue((prev) => [
@@ -402,6 +431,9 @@ export function Chat({ projectPath, onCloseProject }: ChatProps) {
prev ? { ...prev, response, loading: false } : prev, prev ? { ...prev, response, loading: false } : prev,
); );
}, },
onLogEntry: (timestamp, level, message) => {
setServerLogs((prev) => [...prev, { timestamp, level, message }]);
},
}); });
return () => { return () => {
@@ -813,7 +845,7 @@ export function Chat({ projectPath, onCloseProject }: ChatProps) {
fontSize: "1.1rem", fontSize: "1.1rem",
}} }}
> >
Welcome to Story Kit Welcome to Storkit
</h3> </h3>
<p <p
style={{ style={{
@@ -999,28 +1031,34 @@ export function Chat({ projectPath, onCloseProject }: ChatProps) {
<StagePanel <StagePanel
title="Done" title="Done"
items={pipeline.done ?? []} items={pipeline.done ?? []}
costs={storyTokenCosts}
onItemClick={(item) => setSelectedWorkItemId(item.story_id)} onItemClick={(item) => setSelectedWorkItemId(item.story_id)}
/> />
<StagePanel <StagePanel
title="To Merge" title="To Merge"
items={pipeline.merge} items={pipeline.merge}
costs={storyTokenCosts}
onItemClick={(item) => setSelectedWorkItemId(item.story_id)} onItemClick={(item) => setSelectedWorkItemId(item.story_id)}
/> />
<StagePanel <StagePanel
title="QA" title="QA"
items={pipeline.qa} items={pipeline.qa}
costs={storyTokenCosts}
onItemClick={(item) => setSelectedWorkItemId(item.story_id)} onItemClick={(item) => setSelectedWorkItemId(item.story_id)}
/> />
<StagePanel <StagePanel
title="Current" title="Current"
items={pipeline.current} items={pipeline.current}
costs={storyTokenCosts}
onItemClick={(item) => setSelectedWorkItemId(item.story_id)} onItemClick={(item) => setSelectedWorkItemId(item.story_id)}
/> />
<StagePanel <StagePanel
title="Backlog" title="Backlog"
items={pipeline.backlog} items={pipeline.backlog}
costs={storyTokenCosts}
onItemClick={(item) => setSelectedWorkItemId(item.story_id)} onItemClick={(item) => setSelectedWorkItemId(item.story_id)}
/> />
<ServerLogsPanel logs={serverLogs} />
</> </>
)} )}
</LozengeFlyProvider> </LozengeFlyProvider>

View File

@@ -136,9 +136,9 @@ describe("ChatHeader", () => {
expect(screen.getByText("Built: 2026-01-01 00:00")).toBeInTheDocument(); expect(screen.getByText("Built: 2026-01-01 00:00")).toBeInTheDocument();
}); });
it("displays Story Kit branding in the header", () => { it("displays Storkit branding in the header", () => {
render(<ChatHeader {...makeProps()} />); render(<ChatHeader {...makeProps()} />);
expect(screen.getByText("Story Kit")).toBeInTheDocument(); expect(screen.getByText("Storkit")).toBeInTheDocument();
}); });
it("labels the claude-pty optgroup as 'Claude Code'", () => { it("labels the claude-pty optgroup as 'Claude Code'", () => {

View File

@@ -82,7 +82,7 @@ export function ChatHeader({
letterSpacing: "0.02em", letterSpacing: "0.02em",
}} }}
> >
Story Kit Storkit
</span> </span>
<div <div
title={projectPath} title={projectPath}

View File

@@ -59,6 +59,8 @@ describe("AgentLozenge fixed intrinsic width", () => {
error: null, error: null,
merge_failure: null, merge_failure: null,
agent: { agent_name: "coder-1", model: "sonnet", status: "running" }, agent: { agent_name: "coder-1", model: "sonnet", status: "running" },
review_hold: null,
qa: null,
}, },
]; ];
const pipeline = makePipeline({ current: items }); const pipeline = makePipeline({ current: items });
@@ -111,6 +113,8 @@ describe("LozengeFlyProvider fly-in visibility", () => {
error: null, error: null,
merge_failure: null, merge_failure: null,
agent: { agent_name: "coder-1", model: null, status: "running" }, agent: { agent_name: "coder-1", model: null, status: "running" },
review_hold: null,
qa: null,
}, },
], ],
}); });
@@ -151,6 +155,8 @@ describe("LozengeFlyProvider fly-in visibility", () => {
model: null, model: null,
status: "running", status: "running",
}, },
review_hold: null,
qa: null,
}, },
], ],
}); });
@@ -213,6 +219,8 @@ describe("LozengeFlyProvider fly-in clone", () => {
error: null, error: null,
merge_failure: null, merge_failure: null,
agent: { agent_name: "coder-1", model: "sonnet", status: "running" }, agent: { agent_name: "coder-1", model: "sonnet", status: "running" },
review_hold: null,
qa: null,
}, },
], ],
}); });
@@ -254,6 +262,8 @@ describe("LozengeFlyProvider fly-in clone", () => {
error: null, error: null,
merge_failure: null, merge_failure: null,
agent: { agent_name: "coder-1", model: null, status: "running" }, agent: { agent_name: "coder-1", model: null, status: "running" },
review_hold: null,
qa: null,
}, },
], ],
}); });
@@ -301,6 +311,8 @@ describe("LozengeFlyProvider fly-in clone", () => {
error: null, error: null,
merge_failure: null, merge_failure: null,
agent: { agent_name: "coder-1", model: null, status: "running" }, agent: { agent_name: "coder-1", model: null, status: "running" },
review_hold: null,
qa: null,
}, },
], ],
}); });
@@ -370,6 +382,8 @@ describe("LozengeFlyProvider fly-out", () => {
error: null, error: null,
merge_failure: null, merge_failure: null,
agent: { agent_name: "coder-1", model: "haiku", status: "completed" }, agent: { agent_name: "coder-1", model: "haiku", status: "completed" },
review_hold: null,
qa: null,
}, },
], ],
}); });
@@ -395,6 +409,8 @@ describe("LozengeFlyProvider fly-out", () => {
error: null, error: null,
merge_failure: null, merge_failure: null,
agent: null, agent: null,
review_hold: null,
qa: null,
}, },
], ],
}); });
@@ -427,6 +443,8 @@ describe("AgentLozenge idle vs active appearance", () => {
error: null, error: null,
merge_failure: null, merge_failure: null,
agent: { agent_name: "coder-1", model: null, status: "running" }, agent: { agent_name: "coder-1", model: null, status: "running" },
review_hold: null,
qa: null,
}, },
]; ];
const { container } = render( const { container } = render(
@@ -451,6 +469,8 @@ describe("AgentLozenge idle vs active appearance", () => {
error: null, error: null,
merge_failure: null, merge_failure: null,
agent: { agent_name: "coder-1", model: null, status: "pending" }, agent: { agent_name: "coder-1", model: null, status: "pending" },
review_hold: null,
qa: null,
}, },
]; ];
const { container } = render( const { container } = render(
@@ -475,6 +495,8 @@ describe("AgentLozenge idle vs active appearance", () => {
error: null, error: null,
merge_failure: null, merge_failure: null,
agent: { agent_name: "coder-1", model: null, status: "running" }, agent: { agent_name: "coder-1", model: null, status: "running" },
review_hold: null,
qa: null,
}, },
]; ];
const { container } = render( const { container } = render(
@@ -526,6 +548,8 @@ describe("hiddenRosterAgents: assigned agents are absent from roster", () => {
error: null, error: null,
merge_failure: null, merge_failure: null,
agent: { agent_name: "coder-1", model: null, status: "running" }, agent: { agent_name: "coder-1", model: null, status: "running" },
review_hold: null,
qa: null,
}, },
], ],
}); });
@@ -547,6 +571,8 @@ describe("hiddenRosterAgents: assigned agents are absent from roster", () => {
error: null, error: null,
merge_failure: null, merge_failure: null,
agent: null, agent: null,
review_hold: null,
qa: null,
}, },
], ],
}); });
@@ -569,6 +595,8 @@ describe("hiddenRosterAgents: assigned agents are absent from roster", () => {
error: null, error: null,
merge_failure: null, merge_failure: null,
agent: { agent_name: "coder-1", model: null, status: "running" }, agent: { agent_name: "coder-1", model: null, status: "running" },
review_hold: null,
qa: null,
}, },
], ],
}); });
@@ -629,6 +657,8 @@ describe("hiddenRosterAgents: fly-out keeps agent hidden until clone lands", ()
error: null, error: null,
merge_failure: null, merge_failure: null,
agent: { agent_name: "coder-1", model: null, status: "completed" }, agent: { agent_name: "coder-1", model: null, status: "completed" },
review_hold: null,
qa: null,
}, },
], ],
}); });
@@ -640,6 +670,8 @@ describe("hiddenRosterAgents: fly-out keeps agent hidden until clone lands", ()
error: null, error: null,
merge_failure: null, merge_failure: null,
agent: null, agent: null,
review_hold: null,
qa: null,
}, },
], ],
}); });
@@ -682,6 +714,8 @@ describe("hiddenRosterAgents: fly-out keeps agent hidden until clone lands", ()
error: null, error: null,
merge_failure: null, merge_failure: null,
agent: { agent_name: "coder-1", model: null, status: "completed" }, agent: { agent_name: "coder-1", model: null, status: "completed" },
review_hold: null,
qa: null,
}, },
], ],
}); });
@@ -693,6 +727,8 @@ describe("hiddenRosterAgents: fly-out keeps agent hidden until clone lands", ()
error: null, error: null,
merge_failure: null, merge_failure: null,
agent: null, agent: null,
review_hold: null,
qa: null,
}, },
], ],
}); });
@@ -766,6 +802,8 @@ describe("LozengeFlyProvider agent swap (name change)", () => {
error: null, error: null,
merge_failure: null, merge_failure: null,
agent: { agent_name: "coder-1", model: "sonnet", status: "running" }, agent: { agent_name: "coder-1", model: "sonnet", status: "running" },
review_hold: null,
qa: null,
}, },
], ],
}); });
@@ -777,6 +815,8 @@ describe("LozengeFlyProvider agent swap (name change)", () => {
error: null, error: null,
merge_failure: null, merge_failure: null,
agent: { agent_name: "coder-2", model: "haiku", status: "running" }, agent: { agent_name: "coder-2", model: "haiku", status: "running" },
review_hold: null,
qa: null,
}, },
], ],
}); });
@@ -861,6 +901,8 @@ describe("LozengeFlyProvider fly-out without roster element", () => {
model: null, model: null,
status: "completed", status: "completed",
}, },
review_hold: null,
qa: null,
}, },
], ],
}); });
@@ -872,6 +914,8 @@ describe("LozengeFlyProvider fly-out without roster element", () => {
error: null, error: null,
merge_failure: null, merge_failure: null,
agent: null, agent: null,
review_hold: null,
qa: null,
}, },
], ],
}); });
@@ -943,6 +987,8 @@ describe("FlyingLozengeClone initial non-flying render", () => {
error: null, error: null,
merge_failure: null, merge_failure: null,
agent: { agent_name: "coder-1", model: null, status: "running" }, agent: { agent_name: "coder-1", model: null, status: "running" },
review_hold: null,
qa: null,
}, },
], ],
}); });
@@ -1018,6 +1064,8 @@ describe("Bug 137: no animation actions lost during rapid pipeline updates", ()
error: null, error: null,
merge_failure: null, merge_failure: null,
agent: { agent_name: "coder-1", model: "sonnet", status: "running" }, agent: { agent_name: "coder-1", model: "sonnet", status: "running" },
review_hold: null,
qa: null,
}, },
], ],
}); });
@@ -1029,6 +1077,8 @@ describe("Bug 137: no animation actions lost during rapid pipeline updates", ()
error: null, error: null,
merge_failure: null, merge_failure: null,
agent: { agent_name: "coder-2", model: "haiku", status: "running" }, agent: { agent_name: "coder-2", model: "haiku", status: "running" },
review_hold: null,
qa: null,
}, },
], ],
}); });
@@ -1095,6 +1145,8 @@ describe("Bug 137: no animation actions lost during rapid pipeline updates", ()
error: null, error: null,
merge_failure: null, merge_failure: null,
agent: { agent_name: "coder-1", model: null, status: "running" }, agent: { agent_name: "coder-1", model: null, status: "running" },
review_hold: null,
qa: null,
}, },
], ],
}); });
@@ -1106,6 +1158,8 @@ describe("Bug 137: no animation actions lost during rapid pipeline updates", ()
error: null, error: null,
merge_failure: null, merge_failure: null,
agent: { agent_name: "coder-2", model: null, status: "running" }, agent: { agent_name: "coder-2", model: null, status: "running" },
review_hold: null,
qa: null,
}, },
], ],
}); });
@@ -1191,6 +1245,8 @@ describe("Bug 137: animations remain functional through sustained agent activity
error: null, error: null,
merge_failure: null, merge_failure: null,
agent: { agent_name: agentName, model: null, status: "running" }, agent: { agent_name: agentName, model: null, status: "running" },
review_hold: null,
qa: null,
}, },
], ],
}); });

View File

@@ -0,0 +1,246 @@
import * as React from "react";
const { useCallback, useEffect, useRef, useState } = React;
export interface LogEntry {
timestamp: string;
level: string;
message: string;
}
interface ServerLogsPanelProps {
logs: LogEntry[];
}
function levelColor(level: string): string {
switch (level.toUpperCase()) {
case "ERROR":
return "#e06c75";
case "WARN":
return "#e5c07b";
default:
return "#98c379";
}
}
export function ServerLogsPanel({ logs }: ServerLogsPanelProps) {
const [isOpen, setIsOpen] = useState(false);
const [filter, setFilter] = useState("");
const [severityFilter, setSeverityFilter] = useState<string>("ALL");
const scrollRef = useRef<HTMLDivElement>(null);
const userScrolledUpRef = useRef(false);
const lastScrollTopRef = useRef(0);
const filteredLogs = logs.filter((entry) => {
const matchesSeverity =
severityFilter === "ALL" || entry.level.toUpperCase() === severityFilter;
const matchesFilter =
filter === "" ||
entry.message.toLowerCase().includes(filter.toLowerCase()) ||
entry.timestamp.includes(filter);
return matchesSeverity && matchesFilter;
});
const scrollToBottom = useCallback(() => {
const el = scrollRef.current;
if (el) {
el.scrollTop = el.scrollHeight;
lastScrollTopRef.current = el.scrollTop;
}
}, []);
// Auto-scroll when new entries arrive (unless user scrolled up).
useEffect(() => {
if (!isOpen) return;
if (!userScrolledUpRef.current) {
scrollToBottom();
}
}, [filteredLogs.length, isOpen, scrollToBottom]);
const handleScroll = () => {
const el = scrollRef.current;
if (!el) return;
const isAtBottom = el.scrollHeight - el.scrollTop - el.clientHeight < 5;
if (el.scrollTop < lastScrollTopRef.current) {
userScrolledUpRef.current = true;
}
if (isAtBottom) {
userScrolledUpRef.current = false;
}
lastScrollTopRef.current = el.scrollTop;
};
const severityButtons = ["ALL", "INFO", "WARN", "ERROR"] as const;
return (
<div
data-testid="server-logs-panel"
style={{
borderRadius: "8px",
border: "1px solid #333",
overflow: "hidden",
}}
>
{/* Header / toggle */}
<button
type="button"
data-testid="server-logs-panel-toggle"
onClick={() => setIsOpen((v) => !v)}
style={{
width: "100%",
display: "flex",
alignItems: "center",
justifyContent: "space-between",
padding: "8px 12px",
background: "#1e1e1e",
border: "none",
cursor: "pointer",
color: "#ccc",
fontSize: "0.85em",
fontWeight: 600,
textAlign: "left",
}}
>
<span>Server Logs</span>
<span style={{ color: "#666", fontSize: "0.85em" }}>
{logs.length > 0 && (
<span style={{ marginRight: "8px", color: "#555" }}>
{logs.length}
</span>
)}
{isOpen ? "▲" : "▼"}
</span>
</button>
{isOpen && (
<div style={{ background: "#0d1117" }}>
{/* Filter controls */}
<div
style={{
display: "flex",
gap: "6px",
padding: "8px",
borderBottom: "1px solid #1e1e1e",
flexWrap: "wrap",
alignItems: "center",
}}
>
<input
type="text"
data-testid="server-logs-filter-input"
value={filter}
onChange={(e) => setFilter(e.target.value)}
placeholder="Filter logs..."
style={{
flex: 1,
minWidth: "80px",
padding: "4px 8px",
borderRadius: "4px",
border: "1px solid #333",
background: "#161b22",
color: "#ccc",
fontSize: "0.8em",
outline: "none",
}}
/>
{severityButtons.map((sev) => (
<button
key={sev}
type="button"
data-testid={`server-logs-severity-${sev.toLowerCase()}`}
onClick={() => setSeverityFilter(sev)}
style={{
padding: "3px 8px",
borderRadius: "4px",
border: "1px solid",
borderColor:
severityFilter === sev ? levelColor(sev) : "#333",
background:
severityFilter === sev
? "rgba(255,255,255,0.06)"
: "transparent",
color:
sev === "ALL"
? severityFilter === "ALL"
? "#ccc"
: "#555"
: levelColor(sev),
fontSize: "0.75em",
cursor: "pointer",
fontWeight: severityFilter === sev ? 700 : 400,
}}
>
{sev}
</button>
))}
</div>
{/* Log entries */}
<div
ref={scrollRef}
onScroll={handleScroll}
data-testid="server-logs-entries"
style={{
maxHeight: "240px",
overflowY: "auto",
padding: "4px 0",
fontFamily: "monospace",
fontSize: "0.75em",
}}
>
{filteredLogs.length === 0 ? (
<div
style={{
padding: "16px",
color: "#444",
textAlign: "center",
fontSize: "0.9em",
}}
>
No log entries
</div>
) : (
filteredLogs.map((entry, idx) => (
<div
key={`${entry.timestamp}-${idx}`}
style={{
display: "flex",
gap: "6px",
padding: "1px 8px",
lineHeight: "1.5",
borderBottom: "1px solid #111",
}}
>
<span
style={{ color: "#444", flexShrink: 0, minWidth: "70px" }}
>
{entry.timestamp.replace("T", " ").replace("Z", "")}
</span>
<span
style={{
color: levelColor(entry.level),
flexShrink: 0,
minWidth: "38px",
fontWeight: 700,
}}
>
{entry.level}
</span>
<span
style={{
color: "#c9d1d9",
wordBreak: "break-word",
whiteSpace: "pre-wrap",
}}
>
{entry.message}
</span>
</div>
))
)}
</div>
</div>
)}
</div>
);
}

View File

@@ -17,6 +17,8 @@ describe("StagePanel", () => {
error: null, error: null,
merge_failure: null, merge_failure: null,
agent: null, agent: null,
review_hold: null,
qa: null,
}, },
]; ];
render(<StagePanel title="Current" items={items} />); render(<StagePanel title="Current" items={items} />);
@@ -37,6 +39,8 @@ describe("StagePanel", () => {
model: "sonnet", model: "sonnet",
status: "running", status: "running",
}, },
review_hold: null,
qa: null,
}, },
]; ];
render(<StagePanel title="Current" items={items} />); render(<StagePanel title="Current" items={items} />);
@@ -56,6 +60,8 @@ describe("StagePanel", () => {
model: null, model: null,
status: "running", status: "running",
}, },
review_hold: null,
qa: null,
}, },
]; ];
render(<StagePanel title="Current" items={items} />); render(<StagePanel title="Current" items={items} />);
@@ -74,6 +80,8 @@ describe("StagePanel", () => {
model: "haiku", model: "haiku",
status: "pending", status: "pending",
}, },
review_hold: null,
qa: null,
}, },
]; ];
render(<StagePanel title="QA" items={items} />); render(<StagePanel title="QA" items={items} />);
@@ -88,6 +96,8 @@ describe("StagePanel", () => {
error: null, error: null,
merge_failure: null, merge_failure: null,
agent: null, agent: null,
review_hold: null,
qa: null,
}, },
]; ];
render(<StagePanel title="Current" items={items} />); render(<StagePanel title="Current" items={items} />);
@@ -102,6 +112,8 @@ describe("StagePanel", () => {
error: "Missing front matter", error: "Missing front matter",
merge_failure: null, merge_failure: null,
agent: null, agent: null,
review_hold: null,
qa: null,
}, },
]; ];
render(<StagePanel title="Upcoming" items={items} />); render(<StagePanel title="Upcoming" items={items} />);
@@ -116,6 +128,8 @@ describe("StagePanel", () => {
error: null, error: null,
merge_failure: null, merge_failure: null,
agent: null, agent: null,
review_hold: null,
qa: null,
}, },
]; ];
render(<StagePanel title="Upcoming" items={items} />); render(<StagePanel title="Upcoming" items={items} />);
@@ -132,6 +146,8 @@ describe("StagePanel", () => {
error: null, error: null,
merge_failure: null, merge_failure: null,
agent: null, agent: null,
review_hold: null,
qa: null,
}, },
]; ];
render(<StagePanel title="Current" items={items} />); render(<StagePanel title="Current" items={items} />);
@@ -148,6 +164,8 @@ describe("StagePanel", () => {
error: null, error: null,
merge_failure: null, merge_failure: null,
agent: null, agent: null,
review_hold: null,
qa: null,
}, },
]; ];
render(<StagePanel title="QA" items={items} />); render(<StagePanel title="QA" items={items} />);
@@ -164,6 +182,8 @@ describe("StagePanel", () => {
error: null, error: null,
merge_failure: null, merge_failure: null,
agent: null, agent: null,
review_hold: null,
qa: null,
}, },
]; ];
render(<StagePanel title="Done" items={items} />); render(<StagePanel title="Done" items={items} />);
@@ -180,6 +200,8 @@ describe("StagePanel", () => {
error: null, error: null,
merge_failure: null, merge_failure: null,
agent: null, agent: null,
review_hold: null,
qa: null,
}, },
]; ];
render(<StagePanel title="Upcoming" items={items} />); render(<StagePanel title="Upcoming" items={items} />);
@@ -199,6 +221,8 @@ describe("StagePanel", () => {
error: null, error: null,
merge_failure: null, merge_failure: null,
agent: null, agent: null,
review_hold: null,
qa: null,
}, },
]; ];
render(<StagePanel title="Current" items={items} />); render(<StagePanel title="Current" items={items} />);
@@ -215,6 +239,8 @@ describe("StagePanel", () => {
error: null, error: null,
merge_failure: null, merge_failure: null,
agent: null, agent: null,
review_hold: null,
qa: null,
}, },
]; ];
render(<StagePanel title="QA" items={items} />); render(<StagePanel title="QA" items={items} />);
@@ -231,6 +257,8 @@ describe("StagePanel", () => {
error: null, error: null,
merge_failure: null, merge_failure: null,
agent: null, agent: null,
review_hold: null,
qa: null,
}, },
]; ];
render(<StagePanel title="Done" items={items} />); render(<StagePanel title="Done" items={items} />);
@@ -247,6 +275,8 @@ describe("StagePanel", () => {
error: null, error: null,
merge_failure: "Squash merge failed: conflicts in Cargo.lock", merge_failure: "Squash merge failed: conflicts in Cargo.lock",
agent: null, agent: null,
review_hold: null,
qa: null,
}, },
]; ];
render(<StagePanel title="Merge" items={items} />); render(<StagePanel title="Merge" items={items} />);
@@ -266,6 +296,8 @@ describe("StagePanel", () => {
error: null, error: null,
merge_failure: null, merge_failure: null,
agent: null, agent: null,
review_hold: null,
qa: null,
}, },
]; ];
render(<StagePanel title="Merge" items={items} />); render(<StagePanel title="Merge" items={items} />);

View File

@@ -42,6 +42,8 @@ interface StagePanelProps {
items: PipelineStageItem[]; items: PipelineStageItem[];
emptyMessage?: string; emptyMessage?: string;
onItemClick?: (item: PipelineStageItem) => void; onItemClick?: (item: PipelineStageItem) => void;
/** Map of story_id → total_cost_usd for displaying cost badges. */
costs?: Map<string, number>;
} }
function AgentLozenge({ function AgentLozenge({
@@ -128,6 +130,7 @@ export function StagePanel({
items, items,
emptyMessage = "Empty.", emptyMessage = "Empty.",
onItemClick, onItemClick,
costs,
}: StagePanelProps) { }: StagePanelProps) {
return ( return (
<div <div
@@ -240,6 +243,19 @@ export function StagePanel({
{typeLabel} {typeLabel}
</span> </span>
)} )}
{costs?.has(item.story_id) && (
<span
data-testid={`cost-badge-${item.story_id}`}
style={{
fontSize: "0.65em",
fontWeight: 600,
color: "#e3b341",
marginRight: "8px",
}}
>
${costs.get(item.story_id)?.toFixed(2)}
</span>
)}
{item.name ?? item.story_id} {item.name ?? item.story_id}
</div> </div>
{item.error && ( {item.error && (

View File

@@ -0,0 +1,440 @@
import * as React from "react";
import type { TokenUsageRecord } from "../api/client";
import { api } from "../api/client";
type SortKey =
| "timestamp"
| "story_id"
| "agent_name"
| "model"
| "total_cost_usd";
type SortDir = "asc" | "desc";
function formatCost(usd: number): string {
if (usd === 0) return "$0.00";
if (usd < 0.001) return `$${usd.toFixed(6)}`;
if (usd < 0.01) return `$${usd.toFixed(4)}`;
return `$${usd.toFixed(3)}`;
}
function formatTokens(n: number): string {
if (n >= 1_000_000) return `${(n / 1_000_000).toFixed(1)}M`;
if (n >= 1_000) return `${(n / 1_000).toFixed(1)}K`;
return String(n);
}
function formatTimestamp(iso: string): string {
const d = new Date(iso);
const year = d.getFullYear();
const month = String(d.getMonth() + 1).padStart(2, "0");
const day = String(d.getDate()).padStart(2, "0");
const h = String(d.getHours()).padStart(2, "0");
const m = String(d.getMinutes()).padStart(2, "0");
return `${year}-${month}-${day} ${h}:${m}`;
}
/** Infer an agent type from the agent name. */
function agentType(agentName: string): string {
const lower = agentName.toLowerCase();
if (lower.startsWith("coder")) return "coder";
if (lower.startsWith("qa")) return "qa";
if (lower.startsWith("mergemaster") || lower.startsWith("merge"))
return "mergemaster";
return "other";
}
interface SortHeaderProps {
label: string;
sortKey: SortKey;
current: SortKey;
dir: SortDir;
onSort: (key: SortKey) => void;
align?: "left" | "right";
}
function SortHeader({
label,
sortKey,
current,
dir,
onSort,
align = "left",
}: SortHeaderProps) {
const active = current === sortKey;
return (
<th
style={{
padding: "8px 12px",
textAlign: align,
cursor: "pointer",
userSelect: "none",
borderBottom: "1px solid #333",
color: active ? "#ececec" : "#aaa",
fontWeight: active ? "700" : "500",
whiteSpace: "nowrap",
fontSize: "0.8em",
letterSpacing: "0.05em",
textTransform: "uppercase",
}}
onClick={() => onSort(sortKey)}
>
{label}
{active ? (dir === "asc" ? " ↑" : " ↓") : ""}
</th>
);
}
interface TokenUsagePageProps {
projectPath: string;
}
export function TokenUsagePage({
projectPath: _projectPath,
}: TokenUsagePageProps) {
const [records, setRecords] = React.useState<TokenUsageRecord[]>([]);
const [loading, setLoading] = React.useState(true);
const [error, setError] = React.useState<string | null>(null);
const [sortKey, setSortKey] = React.useState<SortKey>("timestamp");
const [sortDir, setSortDir] = React.useState<SortDir>("desc");
React.useEffect(() => {
setLoading(true);
setError(null);
api
.getAllTokenUsage()
.then((resp) => setRecords(resp.records))
.catch((e) =>
setError(e instanceof Error ? e.message : "Failed to load token usage"),
)
.finally(() => setLoading(false));
}, []);
function handleSort(key: SortKey) {
if (key === sortKey) {
setSortDir((d) => (d === "asc" ? "desc" : "asc"));
} else {
setSortKey(key);
setSortDir(key === "timestamp" ? "desc" : "asc");
}
}
const sorted = React.useMemo(() => {
return [...records].sort((a, b) => {
let cmp = 0;
switch (sortKey) {
case "timestamp":
cmp = a.timestamp.localeCompare(b.timestamp);
break;
case "story_id":
cmp = a.story_id.localeCompare(b.story_id);
break;
case "agent_name":
cmp = a.agent_name.localeCompare(b.agent_name);
break;
case "model":
cmp = (a.model ?? "").localeCompare(b.model ?? "");
break;
case "total_cost_usd":
cmp = a.total_cost_usd - b.total_cost_usd;
break;
}
return sortDir === "asc" ? cmp : -cmp;
});
}, [records, sortKey, sortDir]);
// Compute summary totals
const totalCost = records.reduce((s, r) => s + r.total_cost_usd, 0);
const byAgentType = React.useMemo(() => {
const map: Record<string, number> = {};
for (const r of records) {
const t = agentType(r.agent_name);
map[t] = (map[t] ?? 0) + r.total_cost_usd;
}
return map;
}, [records]);
const byModel = React.useMemo(() => {
const map: Record<string, number> = {};
for (const r of records) {
const m = r.model ?? "unknown";
map[m] = (map[m] ?? 0) + r.total_cost_usd;
}
return map;
}, [records]);
const cellStyle: React.CSSProperties = {
padding: "7px 12px",
borderBottom: "1px solid #222",
fontSize: "0.85em",
color: "#ccc",
whiteSpace: "nowrap",
};
return (
<div
style={{
height: "100%",
overflowY: "auto",
background: "#111",
padding: "24px",
fontFamily: "monospace",
}}
>
<h2
style={{
color: "#ececec",
margin: "0 0 20px",
fontSize: "1.1em",
fontWeight: "700",
letterSpacing: "0.04em",
}}
>
Token Usage
</h2>
{/* Summary totals */}
<div
style={{
display: "flex",
gap: "16px",
flexWrap: "wrap",
marginBottom: "24px",
}}
>
<SummaryCard
label="Total Cost"
value={formatCost(totalCost)}
highlight
/>
{Object.entries(byAgentType)
.sort(([a], [b]) => a.localeCompare(b))
.map(([type, cost]) => (
<SummaryCard
key={type}
label={`${type.charAt(0).toUpperCase()}${type.slice(1)}`}
value={formatCost(cost)}
/>
))}
{Object.entries(byModel)
.sort(([, a], [, b]) => b - a)
.map(([model, cost]) => (
<SummaryCard key={model} label={model} value={formatCost(cost)} />
))}
</div>
{loading && (
<p style={{ color: "#555", fontSize: "0.9em" }}>Loading...</p>
)}
{error && <p style={{ color: "#e05c5c", fontSize: "0.9em" }}>{error}</p>}
{!loading && !error && records.length === 0 && (
<p style={{ color: "#555", fontSize: "0.9em" }}>
No token usage records found.
</p>
)}
{!loading && !error && records.length > 0 && (
<div style={{ overflowX: "auto" }}>
<table
style={{
width: "100%",
borderCollapse: "collapse",
fontSize: "0.9em",
}}
>
<thead>
<tr style={{ background: "#1a1a1a" }}>
<SortHeader
label="Date"
sortKey="timestamp"
current={sortKey}
dir={sortDir}
onSort={handleSort}
/>
<SortHeader
label="Story"
sortKey="story_id"
current={sortKey}
dir={sortDir}
onSort={handleSort}
/>
<SortHeader
label="Agent"
sortKey="agent_name"
current={sortKey}
dir={sortDir}
onSort={handleSort}
/>
<SortHeader
label="Model"
sortKey="model"
current={sortKey}
dir={sortDir}
onSort={handleSort}
/>
<th
style={{
...cellStyle,
borderBottom: "1px solid #333",
textAlign: "right",
color: "#aaa",
fontSize: "0.8em",
letterSpacing: "0.05em",
textTransform: "uppercase",
fontWeight: "500",
}}
>
Input
</th>
<th
style={{
...cellStyle,
borderBottom: "1px solid #333",
textAlign: "right",
color: "#aaa",
fontSize: "0.8em",
letterSpacing: "0.05em",
textTransform: "uppercase",
fontWeight: "500",
}}
>
Cache+
</th>
<th
style={{
...cellStyle,
borderBottom: "1px solid #333",
textAlign: "right",
color: "#aaa",
fontSize: "0.8em",
letterSpacing: "0.05em",
textTransform: "uppercase",
fontWeight: "500",
}}
>
Cache
</th>
<th
style={{
...cellStyle,
borderBottom: "1px solid #333",
textAlign: "right",
color: "#aaa",
fontSize: "0.8em",
letterSpacing: "0.05em",
textTransform: "uppercase",
fontWeight: "500",
}}
>
Output
</th>
<SortHeader
label="Cost"
sortKey="total_cost_usd"
current={sortKey}
dir={sortDir}
onSort={handleSort}
align="right"
/>
</tr>
</thead>
<tbody>
{sorted.map((r, i) => (
<tr
key={`${r.story_id}-${r.agent_name}-${r.timestamp}`}
style={{ background: i % 2 === 0 ? "#111" : "#161616" }}
>
<td style={cellStyle}>{formatTimestamp(r.timestamp)}</td>
<td
style={{
...cellStyle,
color: "#8b9cf7",
maxWidth: "220px",
overflow: "hidden",
textOverflow: "ellipsis",
}}
>
{r.story_id}
</td>
<td style={{ ...cellStyle, color: "#7ec8a4" }}>
{r.agent_name}
</td>
<td style={{ ...cellStyle, color: "#c9a96e" }}>
{r.model ?? "—"}
</td>
<td style={{ ...cellStyle, textAlign: "right" }}>
{formatTokens(r.input_tokens)}
</td>
<td style={{ ...cellStyle, textAlign: "right" }}>
{formatTokens(r.cache_creation_input_tokens)}
</td>
<td style={{ ...cellStyle, textAlign: "right" }}>
{formatTokens(r.cache_read_input_tokens)}
</td>
<td style={{ ...cellStyle, textAlign: "right" }}>
{formatTokens(r.output_tokens)}
</td>
<td
style={{
...cellStyle,
textAlign: "right",
color: "#e08c5c",
fontWeight: "600",
}}
>
{formatCost(r.total_cost_usd)}
</td>
</tr>
))}
</tbody>
</table>
</div>
)}
</div>
);
}
function SummaryCard({
label,
value,
highlight = false,
}: {
label: string;
value: string;
highlight?: boolean;
}) {
return (
<div
style={{
background: highlight ? "#1e1e2e" : "#1a1a1a",
border: `1px solid ${highlight ? "#3a3a5a" : "#2a2a2a"}`,
borderRadius: "8px",
padding: "12px 16px",
minWidth: "120px",
}}
>
<div
style={{
fontSize: "0.7em",
color: "#666",
textTransform: "uppercase",
letterSpacing: "0.07em",
marginBottom: "4px",
}}
>
{label}
</div>
<div
style={{
fontSize: "1.1em",
fontWeight: "700",
color: highlight ? "#c9a96e" : "#ececec",
fontFamily: "monospace",
}}
>
{value}
</div>
</div>
);
}

View File

@@ -1,7 +1,7 @@
import { act, render, screen, waitFor } from "@testing-library/react"; import { act, render, screen, waitFor } from "@testing-library/react";
import { afterEach, beforeEach, describe, expect, it, vi } from "vitest"; import { afterEach, beforeEach, describe, expect, it, vi } from "vitest";
import type { AgentEvent, AgentInfo } from "../api/agents"; import type { AgentEvent, AgentInfo } from "../api/agents";
import type { TestResultsResponse } from "../api/client"; import type { TestResultsResponse, TokenCostResponse } from "../api/client";
vi.mock("../api/client", async () => { vi.mock("../api/client", async () => {
const actual = const actual =
@@ -12,6 +12,7 @@ vi.mock("../api/client", async () => {
...actual.api, ...actual.api,
getWorkItemContent: vi.fn(), getWorkItemContent: vi.fn(),
getTestResults: vi.fn(), getTestResults: vi.fn(),
getTokenCost: vi.fn(),
}, },
}; };
}); });
@@ -30,6 +31,7 @@ const { WorkItemDetailPanel } = await import("./WorkItemDetailPanel");
const mockedGetWorkItemContent = vi.mocked(api.getWorkItemContent); const mockedGetWorkItemContent = vi.mocked(api.getWorkItemContent);
const mockedGetTestResults = vi.mocked(api.getTestResults); const mockedGetTestResults = vi.mocked(api.getTestResults);
const mockedGetTokenCost = vi.mocked(api.getTokenCost);
const mockedListAgents = vi.mocked(agentsApi.listAgents); const mockedListAgents = vi.mocked(agentsApi.listAgents);
const mockedSubscribeAgentStream = vi.mocked(subscribeAgentStream); const mockedSubscribeAgentStream = vi.mocked(subscribeAgentStream);
@@ -52,6 +54,7 @@ beforeEach(() => {
vi.clearAllMocks(); vi.clearAllMocks();
mockedGetWorkItemContent.mockResolvedValue(DEFAULT_CONTENT); mockedGetWorkItemContent.mockResolvedValue(DEFAULT_CONTENT);
mockedGetTestResults.mockResolvedValue(null); mockedGetTestResults.mockResolvedValue(null);
mockedGetTokenCost.mockResolvedValue({ total_cost_usd: 0, agents: [] });
mockedListAgents.mockResolvedValue([]); mockedListAgents.mockResolvedValue([]);
mockedSubscribeAgentStream.mockReturnValue(() => {}); mockedSubscribeAgentStream.mockReturnValue(() => {});
}); });
@@ -608,3 +611,146 @@ describe("WorkItemDetailPanel - Test Results", () => {
}); });
}); });
}); });
describe("WorkItemDetailPanel - Token Cost", () => {
const sampleTokenCost: TokenCostResponse = {
total_cost_usd: 0.012345,
agents: [
{
agent_name: "coder-1",
model: "claude-sonnet-4-6",
input_tokens: 1000,
output_tokens: 500,
cache_creation_input_tokens: 200,
cache_read_input_tokens: 100,
total_cost_usd: 0.009,
},
{
agent_name: "coder-2",
model: null,
input_tokens: 800,
output_tokens: 300,
cache_creation_input_tokens: 0,
cache_read_input_tokens: 0,
total_cost_usd: 0.003345,
},
],
};
it("shows empty state when no token data exists", async () => {
mockedGetTokenCost.mockResolvedValue({ total_cost_usd: 0, agents: [] });
render(
<WorkItemDetailPanel
storyId="42_story_foo"
pipelineVersion={0}
onClose={() => {}}
/>,
);
await waitFor(() => {
expect(screen.getByTestId("token-cost-empty")).toBeInTheDocument();
});
expect(screen.getByText("No token data recorded")).toBeInTheDocument();
});
it("shows per-agent breakdown and total cost when data exists", async () => {
mockedGetTokenCost.mockResolvedValue(sampleTokenCost);
render(
<WorkItemDetailPanel
storyId="42_story_foo"
pipelineVersion={0}
onClose={() => {}}
/>,
);
await waitFor(() => {
expect(screen.getByTestId("token-cost-content")).toBeInTheDocument();
});
expect(screen.getByTestId("token-cost-total")).toHaveTextContent(
"$0.012345",
);
expect(screen.getByTestId("token-cost-agent-coder-1")).toBeInTheDocument();
expect(screen.getByTestId("token-cost-agent-coder-2")).toBeInTheDocument();
});
it("shows agent name and model when model is present", async () => {
mockedGetTokenCost.mockResolvedValue(sampleTokenCost);
render(
<WorkItemDetailPanel
storyId="42_story_foo"
pipelineVersion={0}
onClose={() => {}}
/>,
);
await waitFor(() => {
expect(
screen.getByTestId("token-cost-agent-coder-1"),
).toBeInTheDocument();
});
const agentRow = screen.getByTestId("token-cost-agent-coder-1");
expect(agentRow).toHaveTextContent("coder-1");
expect(agentRow).toHaveTextContent("claude-sonnet-4-6");
});
it("shows agent name without model when model is null", async () => {
mockedGetTokenCost.mockResolvedValue(sampleTokenCost);
render(
<WorkItemDetailPanel
storyId="42_story_foo"
pipelineVersion={0}
onClose={() => {}}
/>,
);
await waitFor(() => {
expect(
screen.getByTestId("token-cost-agent-coder-2"),
).toBeInTheDocument();
});
const agentRow = screen.getByTestId("token-cost-agent-coder-2");
expect(agentRow).toHaveTextContent("coder-2");
expect(agentRow).not.toHaveTextContent("null");
});
it("re-fetches token cost when pipelineVersion changes", async () => {
mockedGetTokenCost.mockResolvedValue({ total_cost_usd: 0, agents: [] });
const { rerender } = render(
<WorkItemDetailPanel
storyId="42_story_foo"
pipelineVersion={0}
onClose={() => {}}
/>,
);
await waitFor(() => {
expect(mockedGetTokenCost).toHaveBeenCalledTimes(1);
});
mockedGetTokenCost.mockResolvedValue(sampleTokenCost);
rerender(
<WorkItemDetailPanel
storyId="42_story_foo"
pipelineVersion={1}
onClose={() => {}}
/>,
);
await waitFor(() => {
expect(mockedGetTokenCost).toHaveBeenCalledTimes(2);
});
await waitFor(() => {
expect(screen.getByTestId("token-cost-content")).toBeInTheDocument();
});
});
});

View File

@@ -2,7 +2,12 @@ import * as React from "react";
import Markdown from "react-markdown"; import Markdown from "react-markdown";
import type { AgentEvent, AgentInfo, AgentStatusValue } from "../api/agents"; import type { AgentEvent, AgentInfo, AgentStatusValue } from "../api/agents";
import { agentsApi, subscribeAgentStream } from "../api/agents"; import { agentsApi, subscribeAgentStream } from "../api/agents";
import type { TestCaseResult, TestResultsResponse } from "../api/client"; import type {
AgentCostEntry,
TestCaseResult,
TestResultsResponse,
TokenCostResponse,
} from "../api/client";
import { api } from "../api/client"; import { api } from "../api/client";
const { useEffect, useRef, useState } = React; const { useEffect, useRef, useState } = React;
@@ -27,6 +32,8 @@ interface WorkItemDetailPanelProps {
storyId: string; storyId: string;
pipelineVersion: number; pipelineVersion: number;
onClose: () => void; onClose: () => void;
/** True when the item is in QA and awaiting human review. */
reviewHold?: boolean;
} }
function TestCaseRow({ tc }: { tc: TestCaseResult }) { function TestCaseRow({ tc }: { tc: TestCaseResult }) {
@@ -109,6 +116,7 @@ export function WorkItemDetailPanel({
storyId, storyId,
pipelineVersion, pipelineVersion,
onClose, onClose,
reviewHold: _reviewHold,
}: WorkItemDetailPanelProps) { }: WorkItemDetailPanelProps) {
const [content, setContent] = useState<string | null>(null); const [content, setContent] = useState<string | null>(null);
const [stage, setStage] = useState<string>(""); const [stage, setStage] = useState<string>("");
@@ -122,6 +130,7 @@ export function WorkItemDetailPanel({
const [testResults, setTestResults] = useState<TestResultsResponse | null>( const [testResults, setTestResults] = useState<TestResultsResponse | null>(
null, null,
); );
const [tokenCost, setTokenCost] = useState<TokenCostResponse | null>(null);
const panelRef = useRef<HTMLDivElement>(null); const panelRef = useRef<HTMLDivElement>(null);
const cleanupRef = useRef<(() => void) | null>(null); const cleanupRef = useRef<(() => void) | null>(null);
@@ -156,6 +165,18 @@ export function WorkItemDetailPanel({
}); });
}, [storyId, pipelineVersion]); }, [storyId, pipelineVersion]);
// Fetch token cost on mount and when pipeline updates arrive.
useEffect(() => {
api
.getTokenCost(storyId)
.then((data) => {
setTokenCost(data);
})
.catch(() => {
// Silently ignore — token cost may not exist yet.
});
}, [storyId, pipelineVersion]);
useEffect(() => { useEffect(() => {
cleanupRef.current?.(); cleanupRef.current?.();
cleanupRef.current = null; cleanupRef.current = null;
@@ -362,6 +383,96 @@ export function WorkItemDetailPanel({
</div> </div>
)} )}
{/* Token Cost section */}
<div
data-testid="token-cost-section"
style={{
border: "1px solid #2a2a2a",
borderRadius: "8px",
padding: "10px 12px",
background: "#161616",
}}
>
<div
style={{
fontWeight: 600,
fontSize: "0.8em",
color: "#555",
marginBottom: "8px",
}}
>
Token Cost
</div>
{tokenCost && tokenCost.agents.length > 0 ? (
<div data-testid="token-cost-content">
<div
style={{
fontSize: "0.75em",
color: "#888",
marginBottom: "8px",
}}
>
Total:{" "}
<span data-testid="token-cost-total" style={{ color: "#ccc" }}>
${tokenCost.total_cost_usd.toFixed(6)}
</span>
</div>
{tokenCost.agents.map((agent: AgentCostEntry) => (
<div
key={agent.agent_name}
data-testid={`token-cost-agent-${agent.agent_name}`}
style={{
fontSize: "0.75em",
color: "#888",
padding: "4px 0",
borderTop: "1px solid #222",
}}
>
<div
style={{
display: "flex",
justifyContent: "space-between",
marginBottom: "2px",
}}
>
<span style={{ color: "#ccc", fontWeight: 600 }}>
{agent.agent_name}
{agent.model ? (
<span
style={{ color: "#666", fontWeight: 400 }}
>{` (${agent.model})`}</span>
) : null}
</span>
<span style={{ color: "#aaa" }}>
${agent.total_cost_usd.toFixed(6)}
</span>
</div>
<div style={{ color: "#555" }}>
in {agent.input_tokens.toLocaleString()} / out{" "}
{agent.output_tokens.toLocaleString()}
{(agent.cache_creation_input_tokens > 0 ||
agent.cache_read_input_tokens > 0) && (
<>
{" "}
/ cache +
{agent.cache_creation_input_tokens.toLocaleString()}{" "}
read {agent.cache_read_input_tokens.toLocaleString()}
</>
)}
</div>
</div>
))}
</div>
) : (
<div
data-testid="token-cost-empty"
style={{ fontSize: "0.75em", color: "#444" }}
>
No token data recorded
</div>
)}
</div>
{/* Test Results section */} {/* Test Results section */}
<div <div
data-testid="test-results-section" data-testid="test-results-section"

View File

@@ -33,7 +33,7 @@ function makeProps(
describe("SelectionScreen", () => { describe("SelectionScreen", () => {
it("renders the title and description", () => { it("renders the title and description", () => {
render(<SelectionScreen {...makeProps()} />); render(<SelectionScreen {...makeProps()} />);
expect(screen.getByText("Story Kit")).toBeInTheDocument(); expect(screen.getByText("Storkit")).toBeInTheDocument();
expect( expect(
screen.getByText("Paste or complete a project path to start."), screen.getByText("Paste or complete a project path to start."),
).toBeInTheDocument(); ).toBeInTheDocument();

View File

@@ -54,7 +54,7 @@ export function SelectionScreen({
className="selection-screen" className="selection-screen"
style={{ padding: "2rem", maxWidth: "800px", margin: "0 auto" }} style={{ padding: "2rem", maxWidth: "800px", margin: "0 auto" }}
> >
<h1>Story Kit</h1> <h1>Storkit</h1>
<p>Paste or complete a project path to start.</p> <p>Paste or complete a project path to start.</p>
{knownProjects.length > 0 && ( {knownProjects.length > 0 && (

View File

@@ -1 +1,20 @@
import "@testing-library/jest-dom"; import "@testing-library/jest-dom";
import { beforeEach, vi } from "vitest";
// Provide a default fetch mock so components that call API endpoints on mount
// don't throw URL-parse errors in the jsdom test environment. Tests that need
// specific responses should mock the relevant `api.*` method as usual.
beforeEach(() => {
vi.stubGlobal(
"fetch",
vi.fn((input: string | URL | Request) => {
const url = typeof input === "string" ? input : input.toString();
// Endpoints that return arrays need [] not {} to avoid "not iterable" errors.
const arrayEndpoints = ["/agents", "/agents/config"];
const body = arrayEndpoints.some((ep) => url.endsWith(ep))
? JSON.stringify([])
: JSON.stringify({});
return Promise.resolve(new Response(body, { status: 200 }));
}),
);
});

View File

@@ -23,6 +23,13 @@ export default defineConfig(() => {
}); });
}, },
}, },
"/agents": {
target: `http://127.0.0.1:${String(backendPort)}`,
timeout: 120000,
configure: (proxy) => {
proxy.on("error", (_err) => {});
},
},
}, },
watch: { watch: {
ignored: [ ignored: [

View File

@@ -8,6 +8,11 @@ export default defineConfig({
}, },
test: { test: {
environment: "jsdom", environment: "jsdom",
environmentOptions: {
jsdom: {
url: "http://localhost:3000",
},
},
globals: true, globals: true,
testTimeout: 10_000, testTimeout: 10_000,
setupFiles: ["./src/setupTests.ts"], setupFiles: ["./src/setupTests.ts"],

View File

@@ -1,7 +1,7 @@
use std::path::{Path, PathBuf}; use std::path::{Path, PathBuf};
use std::process::Command; use std::process::Command;
use crate::io::story_metadata::clear_front_matter_field; use crate::io::story_metadata::{clear_front_matter_field, write_rejection_notes};
use crate::slog; use crate::slog;
pub(super) fn item_type_from_id(item_id: &str) -> &'static str { pub(super) fn item_type_from_id(item_id: &str) -> &'static str {
@@ -131,9 +131,11 @@ pub fn move_story_to_archived(project_root: &Path, story_id: &str) -> Result<(),
std::fs::rename(&source_path, &done_path) std::fs::rename(&source_path, &done_path)
.map_err(|e| format!("Failed to move story '{story_id}' to 5_done/: {e}"))?; .map_err(|e| format!("Failed to move story '{story_id}' to 5_done/: {e}"))?;
// Strip stale merge_failure from front matter now that the story is done. // Strip stale pipeline fields from front matter now that the story is done.
if let Err(e) = clear_front_matter_field(&done_path, "merge_failure") { for field in &["merge_failure", "retry_count", "blocked"] {
slog!("[lifecycle] Warning: could not clear merge_failure from '{story_id}': {e}"); if let Err(e) = clear_front_matter_field(&done_path, field) {
slog!("[lifecycle] Warning: could not clear {field} from '{story_id}': {e}");
}
} }
let from_dir = if source_path == current_path { let from_dir = if source_path == current_path {
@@ -183,6 +185,14 @@ pub fn move_story_to_merge(project_root: &Path, story_id: &str) -> Result<(), St
} else { } else {
"work/3_qa/" "work/3_qa/"
}; };
// Reset retry count and blocked for the new stage.
if let Err(e) = clear_front_matter_field(&merge_path, "retry_count") {
slog!("[lifecycle] Warning: could not clear retry_count for '{story_id}': {e}");
}
if let Err(e) = clear_front_matter_field(&merge_path, "blocked") {
slog!("[lifecycle] Warning: could not clear blocked for '{story_id}': {e}");
}
slog!("[lifecycle] Moved '{story_id}' from {from_dir} to work/4_merge/"); slog!("[lifecycle] Moved '{story_id}' from {from_dir} to work/4_merge/");
Ok(()) Ok(())
@@ -214,11 +224,137 @@ pub fn move_story_to_qa(project_root: &Path, story_id: &str) -> Result<(), Strin
std::fs::rename(&current_path, &qa_path) std::fs::rename(&current_path, &qa_path)
.map_err(|e| format!("Failed to move '{story_id}' to 3_qa/: {e}"))?; .map_err(|e| format!("Failed to move '{story_id}' to 3_qa/: {e}"))?;
// Reset retry count for the new stage.
if let Err(e) = clear_front_matter_field(&qa_path, "retry_count") {
slog!("[lifecycle] Warning: could not clear retry_count for '{story_id}': {e}");
}
if let Err(e) = clear_front_matter_field(&qa_path, "blocked") {
slog!("[lifecycle] Warning: could not clear blocked for '{story_id}': {e}");
}
slog!("[lifecycle] Moved '{story_id}' from work/2_current/ to work/3_qa/"); slog!("[lifecycle] Moved '{story_id}' from work/2_current/ to work/3_qa/");
Ok(()) Ok(())
} }
/// Move a story from `work/3_qa/` back to `work/2_current/` and write rejection notes.
///
/// Used when a human reviewer rejects a story during manual QA.
/// Clears the `review_hold` front matter field and appends rejection notes to the story file.
pub fn reject_story_from_qa(
project_root: &Path,
story_id: &str,
notes: &str,
) -> Result<(), String> {
let sk = project_root.join(".story_kit").join("work");
let qa_path = sk.join("3_qa").join(format!("{story_id}.md"));
let current_dir = sk.join("2_current");
let current_path = current_dir.join(format!("{story_id}.md"));
if current_path.exists() {
return Ok(()); // Already in 2_current — idempotent.
}
if !qa_path.exists() {
return Err(format!(
"Work item '{story_id}' not found in work/3_qa/. Cannot reject."
));
}
std::fs::create_dir_all(&current_dir)
.map_err(|e| format!("Failed to create work/2_current/ directory: {e}"))?;
std::fs::rename(&qa_path, &current_path)
.map_err(|e| format!("Failed to move '{story_id}' from 3_qa/ to 2_current/: {e}"))?;
// Clear review_hold since the story is going back for rework.
if let Err(e) = clear_front_matter_field(&current_path, "review_hold") {
slog!("[lifecycle] Warning: could not clear review_hold from '{story_id}': {e}");
}
// Write rejection notes into the story file so the coder can see what needs fixing.
if !notes.is_empty()
&& let Err(e) = write_rejection_notes(&current_path, notes)
{
slog!("[lifecycle] Warning: could not write rejection notes to '{story_id}': {e}");
}
slog!("[lifecycle] Rejected '{story_id}' from work/3_qa/ back to work/2_current/");
Ok(())
}
/// Move any work item to an arbitrary pipeline stage by searching all stages.
///
/// Accepts `target_stage` as one of: `backlog`, `current`, `qa`, `merge`, `done`.
/// Idempotent: if the item is already in the target stage, returns Ok.
/// Returns `(from_stage, to_stage)` on success.
pub fn move_story_to_stage(
project_root: &Path,
story_id: &str,
target_stage: &str,
) -> Result<(String, String), String> {
let stage_dirs: &[(&str, &str)] = &[
("backlog", "1_backlog"),
("current", "2_current"),
("qa", "3_qa"),
("merge", "4_merge"),
("done", "5_done"),
];
let target_dir_name = stage_dirs
.iter()
.find(|(name, _)| *name == target_stage)
.map(|(_, dir)| *dir)
.ok_or_else(|| {
format!(
"Invalid target_stage '{target_stage}'. Must be one of: backlog, current, qa, merge, done"
)
})?;
let sk = project_root.join(".story_kit").join("work");
let target_dir = sk.join(target_dir_name);
let target_path = target_dir.join(format!("{story_id}.md"));
if target_path.exists() {
return Ok((target_stage.to_string(), target_stage.to_string()));
}
// Search all named stages plus the archive stage.
let search_dirs: &[(&str, &str)] = &[
("backlog", "1_backlog"),
("current", "2_current"),
("qa", "3_qa"),
("merge", "4_merge"),
("done", "5_done"),
("archived", "6_archived"),
];
let mut found_path: Option<std::path::PathBuf> = None;
let mut from_stage = "";
for (stage_name, dir_name) in search_dirs {
let candidate = sk.join(dir_name).join(format!("{story_id}.md"));
if candidate.exists() {
found_path = Some(candidate);
from_stage = stage_name;
break;
}
}
let source_path =
found_path.ok_or_else(|| format!("Work item '{story_id}' not found in any pipeline stage."))?;
std::fs::create_dir_all(&target_dir)
.map_err(|e| format!("Failed to create work/{target_dir_name}/ directory: {e}"))?;
std::fs::rename(&source_path, &target_path)
.map_err(|e| format!("Failed to move '{story_id}' to work/{target_dir_name}/: {e}"))?;
slog!(
"[lifecycle] Moved '{story_id}' from work/{from_stage}/ to work/{target_dir_name}/"
);
Ok((from_stage.to_string(), target_stage.to_string()))
}
/// Move a bug from `work/2_current/` or `work/1_backlog/` to `work/5_done/` and auto-commit. /// Move a bug from `work/2_current/` or `work/1_backlog/` to `work/5_done/` and auto-commit.
/// ///
/// * If the bug is in `2_current/`, it is moved to `5_done/` and committed. /// * If the bug is in `2_current/`, it is moved to `5_done/` and committed.
@@ -552,4 +688,142 @@ mod tests {
"should return false when no feature branch" "should return false when no feature branch"
); );
} }
// ── reject_story_from_qa tests ────────────────────────────────────────────
#[test]
fn reject_story_from_qa_moves_to_current() {
use std::fs;
let tmp = tempfile::tempdir().unwrap();
let root = tmp.path();
let qa_dir = root.join(".story_kit/work/3_qa");
let current_dir = root.join(".story_kit/work/2_current");
fs::create_dir_all(&qa_dir).unwrap();
fs::create_dir_all(&current_dir).unwrap();
fs::write(
qa_dir.join("50_story_test.md"),
"---\nname: Test\nreview_hold: true\n---\n# Story\n",
)
.unwrap();
reject_story_from_qa(root, "50_story_test", "Button color wrong").unwrap();
assert!(!qa_dir.join("50_story_test.md").exists());
assert!(current_dir.join("50_story_test.md").exists());
let contents = fs::read_to_string(current_dir.join("50_story_test.md")).unwrap();
assert!(contents.contains("Button color wrong"));
assert!(contents.contains("## QA Rejection Notes"));
assert!(!contents.contains("review_hold"));
}
#[test]
fn reject_story_from_qa_errors_when_not_in_qa() {
let tmp = tempfile::tempdir().unwrap();
let result = reject_story_from_qa(tmp.path(), "99_nonexistent", "notes");
assert!(result.unwrap_err().contains("not found in work/3_qa/"));
}
#[test]
fn reject_story_from_qa_idempotent_when_in_current() {
use std::fs;
let tmp = tempfile::tempdir().unwrap();
let root = tmp.path();
let current_dir = root.join(".story_kit/work/2_current");
fs::create_dir_all(&current_dir).unwrap();
fs::write(current_dir.join("51_story_test.md"), "---\nname: Test\n---\n# Story\n").unwrap();
reject_story_from_qa(root, "51_story_test", "notes").unwrap();
assert!(current_dir.join("51_story_test.md").exists());
}
// ── move_story_to_stage tests ─────────────────────────────────
#[test]
fn move_story_to_stage_moves_from_backlog_to_current() {
use std::fs;
let tmp = tempfile::tempdir().unwrap();
let root = tmp.path();
let backlog = root.join(".story_kit/work/1_backlog");
let current = root.join(".story_kit/work/2_current");
fs::create_dir_all(&backlog).unwrap();
fs::create_dir_all(&current).unwrap();
fs::write(backlog.join("60_story_move.md"), "test").unwrap();
let (from, to) = move_story_to_stage(root, "60_story_move", "current").unwrap();
assert_eq!(from, "backlog");
assert_eq!(to, "current");
assert!(!backlog.join("60_story_move.md").exists());
assert!(current.join("60_story_move.md").exists());
}
#[test]
fn move_story_to_stage_moves_from_current_to_backlog() {
use std::fs;
let tmp = tempfile::tempdir().unwrap();
let root = tmp.path();
let current = root.join(".story_kit/work/2_current");
let backlog = root.join(".story_kit/work/1_backlog");
fs::create_dir_all(&current).unwrap();
fs::create_dir_all(&backlog).unwrap();
fs::write(current.join("61_story_back.md"), "test").unwrap();
let (from, to) = move_story_to_stage(root, "61_story_back", "backlog").unwrap();
assert_eq!(from, "current");
assert_eq!(to, "backlog");
assert!(!current.join("61_story_back.md").exists());
assert!(backlog.join("61_story_back.md").exists());
}
#[test]
fn move_story_to_stage_idempotent_when_already_in_target() {
use std::fs;
let tmp = tempfile::tempdir().unwrap();
let root = tmp.path();
let current = root.join(".story_kit/work/2_current");
fs::create_dir_all(&current).unwrap();
fs::write(current.join("62_story_idem.md"), "test").unwrap();
let (from, to) = move_story_to_stage(root, "62_story_idem", "current").unwrap();
assert_eq!(from, "current");
assert_eq!(to, "current");
assert!(current.join("62_story_idem.md").exists());
}
#[test]
fn move_story_to_stage_invalid_target_returns_error() {
let tmp = tempfile::tempdir().unwrap();
let result = move_story_to_stage(tmp.path(), "1_story_test", "invalid");
assert!(result.is_err());
assert!(result.unwrap_err().contains("Invalid target_stage"));
}
#[test]
fn move_story_to_stage_not_found_returns_error() {
let tmp = tempfile::tempdir().unwrap();
let result = move_story_to_stage(tmp.path(), "99_story_ghost", "current");
assert!(result.is_err());
assert!(result.unwrap_err().contains("not found in any pipeline stage"));
}
#[test]
fn move_story_to_stage_finds_in_qa_dir() {
use std::fs;
let tmp = tempfile::tempdir().unwrap();
let root = tmp.path();
let qa_dir = root.join(".story_kit/work/3_qa");
let backlog = root.join(".story_kit/work/1_backlog");
fs::create_dir_all(&qa_dir).unwrap();
fs::create_dir_all(&backlog).unwrap();
fs::write(qa_dir.join("63_story_qa.md"), "test").unwrap();
let (from, to) = move_story_to_stage(root, "63_story_qa", "backlog").unwrap();
assert_eq!(from, "qa");
assert_eq!(to, "backlog");
assert!(!qa_dir.join("63_story_qa.md").exists());
assert!(backlog.join("63_story_qa.md").exists());
}
} }

View File

@@ -3,13 +3,14 @@ pub mod lifecycle;
pub mod merge; pub mod merge;
mod pool; mod pool;
mod pty; mod pty;
pub mod token_usage;
use crate::config::AgentConfig; use crate::config::AgentConfig;
use serde::Serialize; use serde::{Deserialize, Serialize};
pub use lifecycle::{ pub use lifecycle::{
close_bug_to_archive, feature_branch_has_unmerged_changes, move_story_to_archived, close_bug_to_archive, feature_branch_has_unmerged_changes, move_story_to_archived,
move_story_to_merge, move_story_to_qa, move_story_to_merge, move_story_to_qa, move_story_to_stage, reject_story_from_qa,
}; };
pub use pool::AgentPool; pub use pool::AgentPool;
@@ -136,6 +137,45 @@ pub struct CompletionReport {
pub gate_output: String, pub gate_output: String,
} }
/// Token usage from a Claude Code session's `result` event.
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub struct TokenUsage {
pub input_tokens: u64,
pub output_tokens: u64,
pub cache_creation_input_tokens: u64,
pub cache_read_input_tokens: u64,
pub total_cost_usd: f64,
}
impl TokenUsage {
/// Parse token usage from a Claude Code `result` JSON event.
pub fn from_result_event(json: &serde_json::Value) -> Option<Self> {
let usage = json.get("usage")?;
Some(Self {
input_tokens: usage
.get("input_tokens")
.and_then(|v| v.as_u64())
.unwrap_or(0),
output_tokens: usage
.get("output_tokens")
.and_then(|v| v.as_u64())
.unwrap_or(0),
cache_creation_input_tokens: usage
.get("cache_creation_input_tokens")
.and_then(|v| v.as_u64())
.unwrap_or(0),
cache_read_input_tokens: usage
.get("cache_read_input_tokens")
.and_then(|v| v.as_u64())
.unwrap_or(0),
total_cost_usd: json
.get("total_cost_usd")
.and_then(|v| v.as_f64())
.unwrap_or(0.0),
})
}
}
#[derive(Debug, Serialize, Clone)] #[derive(Debug, Serialize, Clone)]
pub struct AgentInfo { pub struct AgentInfo {
pub story_id: String, pub story_id: String,

File diff suppressed because it is too large Load Diff

View File

@@ -5,11 +5,17 @@ use std::sync::{Arc, Mutex};
use portable_pty::{ChildKiller, CommandBuilder, PtySize, native_pty_system}; use portable_pty::{ChildKiller, CommandBuilder, PtySize, native_pty_system};
use tokio::sync::broadcast; use tokio::sync::broadcast;
use super::AgentEvent; use super::{AgentEvent, TokenUsage};
use crate::agent_log::AgentLogWriter; use crate::agent_log::AgentLogWriter;
use crate::slog; use crate::slog;
use crate::slog_warn; use crate::slog_warn;
/// Result from a PTY agent session, containing the session ID and token usage.
pub(super) struct PtyResult {
pub session_id: Option<String>,
pub token_usage: Option<TokenUsage>,
}
fn composite_key(story_id: &str, agent_name: &str) -> String { fn composite_key(story_id: &str, agent_name: &str) -> String {
format!("{story_id}:{agent_name}") format!("{story_id}:{agent_name}")
} }
@@ -41,7 +47,7 @@ pub(super) async fn run_agent_pty_streaming(
log_writer: Option<Arc<Mutex<AgentLogWriter>>>, log_writer: Option<Arc<Mutex<AgentLogWriter>>>,
inactivity_timeout_secs: u64, inactivity_timeout_secs: u64,
child_killers: Arc<Mutex<HashMap<String, Box<dyn ChildKiller + Send + Sync>>>>, child_killers: Arc<Mutex<HashMap<String, Box<dyn ChildKiller + Send + Sync>>>>,
) -> Result<Option<String>, String> { ) -> Result<PtyResult, String> {
let sid = story_id.to_string(); let sid = story_id.to_string();
let aname = agent_name.to_string(); let aname = agent_name.to_string();
let cmd = command.to_string(); let cmd = command.to_string();
@@ -156,7 +162,7 @@ fn run_agent_pty_blocking(
log_writer: Option<&Mutex<AgentLogWriter>>, log_writer: Option<&Mutex<AgentLogWriter>>,
inactivity_timeout_secs: u64, inactivity_timeout_secs: u64,
child_killers: &Arc<Mutex<HashMap<String, Box<dyn ChildKiller + Send + Sync>>>>, child_killers: &Arc<Mutex<HashMap<String, Box<dyn ChildKiller + Send + Sync>>>>,
) -> Result<Option<String>, String> { ) -> Result<PtyResult, String> {
let pty_system = native_pty_system(); let pty_system = native_pty_system();
let pair = pty_system let pair = pty_system
@@ -251,6 +257,7 @@ fn run_agent_pty_blocking(
}; };
let mut session_id: Option<String> = None; let mut session_id: Option<String> = None;
let mut token_usage: Option<TokenUsage> = None;
loop { loop {
let recv_result = match timeout_dur { let recv_result = match timeout_dur {
@@ -334,7 +341,21 @@ fn run_agent_pty_blocking(
// Complete assistant events are skipped for content extraction // Complete assistant events are skipped for content extraction
// because thinking and text already arrived via stream_event. // because thinking and text already arrived via stream_event.
// The raw JSON is still forwarded as AgentJson below. // The raw JSON is still forwarded as AgentJson below.
"assistant" | "user" | "result" => {} "assistant" | "user" => {}
"result" => {
// Extract token usage from the result event.
if let Some(usage) = TokenUsage::from_result_event(&json) {
slog!(
"[agent:{story_id}:{agent_name}] Token usage: in={} out={} cache_create={} cache_read={} cost=${:.4}",
usage.input_tokens,
usage.output_tokens,
usage.cache_creation_input_tokens,
usage.cache_read_input_tokens,
usage.total_cost_usd,
);
token_usage = Some(usage);
}
}
_ => {} _ => {}
} }
@@ -359,7 +380,10 @@ fn run_agent_pty_blocking(
session_id session_id
); );
Ok(session_id) Ok(PtyResult {
session_id,
token_usage,
})
} }
#[cfg(test)] #[cfg(test)]

View File

@@ -0,0 +1,202 @@
use std::fs;
use std::path::Path;
use chrono::Utc;
use serde::{Deserialize, Serialize};
use super::TokenUsage;
/// A single token usage record persisted to disk.
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub struct TokenUsageRecord {
pub story_id: String,
pub agent_name: String,
pub timestamp: String,
#[serde(default)]
pub model: Option<String>,
pub usage: TokenUsage,
}
/// Append a token usage record to the persistent JSONL file.
///
/// Each line is a self-contained JSON object, making appends atomic and
/// reads simple. The file lives at `.story_kit/token_usage.jsonl`.
pub fn append_record(project_root: &Path, record: &TokenUsageRecord) -> Result<(), String> {
let path = token_usage_path(project_root);
if let Some(parent) = path.parent() {
fs::create_dir_all(parent)
.map_err(|e| format!("Failed to create token_usage directory: {e}"))?;
}
let mut line =
serde_json::to_string(record).map_err(|e| format!("Failed to serialize record: {e}"))?;
line.push('\n');
use std::io::Write;
let file = fs::OpenOptions::new()
.create(true)
.append(true)
.open(&path)
.map_err(|e| format!("Failed to open token_usage file: {e}"))?;
let mut writer = std::io::BufWriter::new(file);
writer
.write_all(line.as_bytes())
.map_err(|e| format!("Failed to write token_usage record: {e}"))?;
writer
.flush()
.map_err(|e| format!("Failed to flush token_usage file: {e}"))?;
Ok(())
}
/// Read all token usage records from the persistent file.
pub fn read_all(project_root: &Path) -> Result<Vec<TokenUsageRecord>, String> {
let path = token_usage_path(project_root);
if !path.exists() {
return Ok(Vec::new());
}
let content =
fs::read_to_string(&path).map_err(|e| format!("Failed to read token_usage file: {e}"))?;
let mut records = Vec::new();
for line in content.lines() {
let trimmed = line.trim();
if trimmed.is_empty() {
continue;
}
match serde_json::from_str::<TokenUsageRecord>(trimmed) {
Ok(record) => records.push(record),
Err(e) => {
crate::slog_warn!("[token_usage] Skipping malformed line: {e}");
}
}
}
Ok(records)
}
/// Build a `TokenUsageRecord` from the parts available at completion time.
pub fn build_record(
story_id: &str,
agent_name: &str,
model: Option<String>,
usage: TokenUsage,
) -> TokenUsageRecord {
TokenUsageRecord {
story_id: story_id.to_string(),
agent_name: agent_name.to_string(),
timestamp: Utc::now().to_rfc3339(),
model,
usage,
}
}
fn token_usage_path(project_root: &Path) -> std::path::PathBuf {
project_root.join(".story_kit").join("token_usage.jsonl")
}
#[cfg(test)]
mod tests {
use super::*;
use tempfile::TempDir;
fn sample_usage() -> TokenUsage {
TokenUsage {
input_tokens: 100,
output_tokens: 200,
cache_creation_input_tokens: 5000,
cache_read_input_tokens: 10000,
total_cost_usd: 1.57,
}
}
#[test]
fn append_and_read_roundtrip() {
let dir = TempDir::new().unwrap();
let root = dir.path();
let record = build_record("42_story_foo", "coder-1", None, sample_usage());
append_record(root, &record).unwrap();
let records = read_all(root).unwrap();
assert_eq!(records.len(), 1);
assert_eq!(records[0].story_id, "42_story_foo");
assert_eq!(records[0].agent_name, "coder-1");
assert_eq!(records[0].usage, sample_usage());
}
#[test]
fn multiple_appends_accumulate() {
let dir = TempDir::new().unwrap();
let root = dir.path();
let r1 = build_record("s1", "coder-1", None, sample_usage());
let r2 = build_record("s2", "coder-2", None, sample_usage());
append_record(root, &r1).unwrap();
append_record(root, &r2).unwrap();
let records = read_all(root).unwrap();
assert_eq!(records.len(), 2);
assert_eq!(records[0].story_id, "s1");
assert_eq!(records[1].story_id, "s2");
}
#[test]
fn read_empty_returns_empty() {
let dir = TempDir::new().unwrap();
let records = read_all(dir.path()).unwrap();
assert!(records.is_empty());
}
#[test]
fn malformed_lines_are_skipped() {
let dir = TempDir::new().unwrap();
let root = dir.path();
let path = root.join(".story_kit").join("token_usage.jsonl");
fs::create_dir_all(path.parent().unwrap()).unwrap();
fs::write(&path, "not json\n{\"bad\":true}\n").unwrap();
let records = read_all(root).unwrap();
assert!(records.is_empty());
}
#[test]
fn token_usage_from_result_event() {
let json = serde_json::json!({
"type": "result",
"total_cost_usd": 1.57,
"usage": {
"input_tokens": 7,
"output_tokens": 475,
"cache_creation_input_tokens": 185020,
"cache_read_input_tokens": 810585
}
});
let usage = TokenUsage::from_result_event(&json).unwrap();
assert_eq!(usage.input_tokens, 7);
assert_eq!(usage.output_tokens, 475);
assert_eq!(usage.cache_creation_input_tokens, 185020);
assert_eq!(usage.cache_read_input_tokens, 810585);
assert!((usage.total_cost_usd - 1.57).abs() < f64::EPSILON);
}
#[test]
fn token_usage_from_result_event_missing_usage() {
let json = serde_json::json!({"type": "result"});
assert!(TokenUsage::from_result_event(&json).is_none());
}
#[test]
fn token_usage_from_result_event_partial_fields() {
let json = serde_json::json!({
"type": "result",
"total_cost_usd": 0.5,
"usage": {
"input_tokens": 10,
"output_tokens": 20
}
});
let usage = TokenUsage::from_result_event(&json).unwrap();
assert_eq!(usage.input_tokens, 10);
assert_eq!(usage.output_tokens, 20);
assert_eq!(usage.cache_creation_input_tokens, 0);
assert_eq!(usage.cache_read_input_tokens, 0);
}
}

View File

@@ -11,6 +11,25 @@ pub struct ProjectConfig {
pub agent: Vec<AgentConfig>, pub agent: Vec<AgentConfig>,
#[serde(default)] #[serde(default)]
pub watcher: WatcherConfig, pub watcher: WatcherConfig,
/// Project-wide default QA mode: "server", "agent", or "human".
/// Per-story `qa` front matter overrides this. Default: "server".
#[serde(default = "default_qa")]
pub default_qa: String,
/// Default model for coder-stage agents (e.g. "sonnet").
/// When set, `find_free_agent_for_stage` only considers coder agents whose
/// model matches this value, so opus agents are only used when explicitly
/// requested via story front matter `agent:` field.
#[serde(default)]
pub default_coder_model: Option<String>,
/// Maximum number of concurrent coder-stage agents.
/// When set, `auto_assign_available_work` will not start more than this many
/// coder agents at once. Stories wait in `2_current/` until a slot frees up.
#[serde(default)]
pub max_coders: Option<usize>,
/// Maximum number of retries per story per pipeline stage before marking as blocked.
/// Default: 2. Set to 0 to disable retry limits.
#[serde(default = "default_max_retries")]
pub max_retries: u32,
} }
/// Configuration for the filesystem watcher's sweep behaviour. /// Configuration for the filesystem watcher's sweep behaviour.
@@ -46,6 +65,14 @@ fn default_done_retention_secs() -> u64 {
4 * 60 * 60 // 4 hours 4 * 60 * 60 // 4 hours
} }
fn default_qa() -> String {
"server".to_string()
}
fn default_max_retries() -> u32 {
2
}
#[derive(Debug, Clone, Deserialize)] #[derive(Debug, Clone, Deserialize)]
#[allow(dead_code)] #[allow(dead_code)]
pub struct ComponentConfig { pub struct ComponentConfig {
@@ -124,6 +151,14 @@ struct LegacyProjectConfig {
agent: Option<AgentConfig>, agent: Option<AgentConfig>,
#[serde(default)] #[serde(default)]
watcher: WatcherConfig, watcher: WatcherConfig,
#[serde(default = "default_qa")]
default_qa: String,
#[serde(default)]
default_coder_model: Option<String>,
#[serde(default)]
max_coders: Option<usize>,
#[serde(default = "default_max_retries")]
max_retries: u32,
} }
impl Default for ProjectConfig { impl Default for ProjectConfig {
@@ -145,6 +180,10 @@ impl Default for ProjectConfig {
inactivity_timeout_secs: default_inactivity_timeout_secs(), inactivity_timeout_secs: default_inactivity_timeout_secs(),
}], }],
watcher: WatcherConfig::default(), watcher: WatcherConfig::default(),
default_qa: default_qa(),
default_coder_model: None,
max_coders: None,
max_retries: default_max_retries(),
} }
} }
} }
@@ -186,6 +225,10 @@ impl ProjectConfig {
component: legacy.component, component: legacy.component,
agent: vec![agent], agent: vec![agent],
watcher: legacy.watcher, watcher: legacy.watcher,
default_qa: legacy.default_qa,
default_coder_model: legacy.default_coder_model,
max_coders: legacy.max_coders,
max_retries: legacy.max_retries,
}; };
validate_agents(&config.agent)?; validate_agents(&config.agent)?;
return Ok(config); return Ok(config);
@@ -206,6 +249,10 @@ impl ProjectConfig {
component: legacy.component, component: legacy.component,
agent: vec![agent], agent: vec![agent],
watcher: legacy.watcher, watcher: legacy.watcher,
default_qa: legacy.default_qa,
default_coder_model: legacy.default_coder_model,
max_coders: legacy.max_coders,
max_retries: legacy.max_retries,
}; };
validate_agents(&config.agent)?; validate_agents(&config.agent)?;
Ok(config) Ok(config)
@@ -214,12 +261,23 @@ impl ProjectConfig {
component: legacy.component, component: legacy.component,
agent: Vec::new(), agent: Vec::new(),
watcher: legacy.watcher, watcher: legacy.watcher,
default_qa: legacy.default_qa,
default_coder_model: legacy.default_coder_model,
max_coders: legacy.max_coders,
max_retries: legacy.max_retries,
}) })
} }
} }
} }
} }
/// Return the project-wide default QA mode parsed from `default_qa`.
/// Falls back to `Server` if the value is unrecognised.
pub fn default_qa_mode(&self) -> crate::io::story_metadata::QaMode {
crate::io::story_metadata::QaMode::from_str(&self.default_qa)
.unwrap_or(crate::io::story_metadata::QaMode::Server)
}
/// Look up an agent config by name. /// Look up an agent config by name.
pub fn find_agent(&self, name: &str) -> Option<&AgentConfig> { pub fn find_agent(&self, name: &str) -> Option<&AgentConfig> {
self.agent.iter().find(|a| a.name == name) self.agent.iter().find(|a| a.name == name)
@@ -689,4 +747,68 @@ command = "claude"
assert_eq!(config.watcher.done_retention_secs, 900); assert_eq!(config.watcher.done_retention_secs, 900);
assert_eq!(config.agent.len(), 1); assert_eq!(config.agent.len(), 1);
} }
// ── default_coder_model & max_coders ─────────────────────────────────
#[test]
fn parse_default_coder_model_and_max_coders() {
let toml_str = r#"
default_coder_model = "sonnet"
max_coders = 3
[[agent]]
name = "coder-1"
stage = "coder"
model = "sonnet"
[[agent]]
name = "coder-opus"
stage = "coder"
model = "opus"
"#;
let config = ProjectConfig::parse(toml_str).unwrap();
assert_eq!(config.default_coder_model, Some("sonnet".to_string()));
assert_eq!(config.max_coders, Some(3));
}
#[test]
fn default_coder_model_and_max_coders_default_to_none() {
let toml_str = r#"
[[agent]]
name = "coder-1"
"#;
let config = ProjectConfig::parse(toml_str).unwrap();
assert_eq!(config.default_coder_model, None);
assert_eq!(config.max_coders, None);
}
#[test]
fn project_toml_has_default_coder_model_and_max_coders() {
// Verify the actual project.toml has the new settings.
let manifest_dir = std::path::Path::new(env!("CARGO_MANIFEST_DIR"));
let project_root = manifest_dir.parent().unwrap();
let config = ProjectConfig::load(project_root).unwrap();
assert_eq!(config.default_coder_model, Some("sonnet".to_string()));
assert_eq!(config.max_coders, Some(3));
}
#[test]
fn project_toml_has_three_sonnet_coders() {
let manifest_dir = std::path::Path::new(env!("CARGO_MANIFEST_DIR"));
let project_root = manifest_dir.parent().unwrap();
let config = ProjectConfig::load(project_root).unwrap();
let sonnet_coders: Vec<_> = config
.agent
.iter()
.filter(|a| a.stage.as_deref() == Some("coder") && a.model.as_deref() == Some("sonnet"))
.collect();
assert_eq!(
sonnet_coders.len(),
3,
"Expected 3 sonnet coders (coder-1, coder-2, coder-3), found {}",
sonnet_coders.len()
);
}
} }

View File

@@ -112,6 +112,45 @@ struct AgentOutputResponse {
output: String, output: String,
} }
/// Per-agent cost breakdown entry for the token cost endpoint.
#[derive(Object, Serialize)]
struct AgentCostEntry {
agent_name: String,
model: Option<String>,
input_tokens: u64,
output_tokens: u64,
cache_creation_input_tokens: u64,
cache_read_input_tokens: u64,
total_cost_usd: f64,
}
/// Response for the work item token cost endpoint.
#[derive(Object, Serialize)]
struct TokenCostResponse {
total_cost_usd: f64,
agents: Vec<AgentCostEntry>,
}
/// A single token usage record in the all-usage response.
#[derive(Object, Serialize)]
struct TokenUsageRecordResponse {
story_id: String,
agent_name: String,
model: Option<String>,
timestamp: String,
input_tokens: u64,
output_tokens: u64,
cache_creation_input_tokens: u64,
cache_read_input_tokens: u64,
total_cost_usd: f64,
}
/// Response for the all token usage endpoint.
#[derive(Object, Serialize)]
struct AllTokenUsageResponse {
records: Vec<TokenUsageRecordResponse>,
}
/// Returns true if the story file exists in `work/5_done/` or `work/6_archived/`. /// Returns true if the story file exists in `work/5_done/` or `work/6_archived/`.
/// ///
/// Used to exclude agents for already-archived stories from the `list_agents` /// Used to exclude agents for already-archived stories from the `list_agents`
@@ -463,6 +502,94 @@ impl AgentsApi {
Ok(Json(true)) Ok(Json(true))
} }
/// Get the total token cost and per-agent breakdown for a work item.
///
/// Returns the sum of all recorded token usage for the given story_id.
/// If no usage has been recorded, returns zero cost with an empty agents list.
#[oai(path = "/work-items/:story_id/token-cost", method = "get")]
async fn get_work_item_token_cost(
&self,
story_id: Path<String>,
) -> OpenApiResult<Json<TokenCostResponse>> {
let project_root = self
.ctx
.agents
.get_project_root(&self.ctx.state)
.map_err(bad_request)?;
let all_records = crate::agents::token_usage::read_all(&project_root)
.map_err(|e| bad_request(format!("Failed to read token usage: {e}")))?;
let mut agent_map: std::collections::HashMap<String, AgentCostEntry> =
std::collections::HashMap::new();
let mut total_cost_usd = 0.0_f64;
for record in all_records.into_iter().filter(|r| r.story_id == story_id.0) {
total_cost_usd += record.usage.total_cost_usd;
let entry = agent_map
.entry(record.agent_name.clone())
.or_insert_with(|| AgentCostEntry {
agent_name: record.agent_name.clone(),
model: record.model.clone(),
input_tokens: 0,
output_tokens: 0,
cache_creation_input_tokens: 0,
cache_read_input_tokens: 0,
total_cost_usd: 0.0,
});
entry.input_tokens += record.usage.input_tokens;
entry.output_tokens += record.usage.output_tokens;
entry.cache_creation_input_tokens += record.usage.cache_creation_input_tokens;
entry.cache_read_input_tokens += record.usage.cache_read_input_tokens;
entry.total_cost_usd += record.usage.total_cost_usd;
}
let mut agents: Vec<AgentCostEntry> = agent_map.into_values().collect();
agents.sort_by(|a, b| a.agent_name.cmp(&b.agent_name));
Ok(Json(TokenCostResponse {
total_cost_usd,
agents,
}))
}
/// Get all token usage records across all stories.
///
/// Returns the full history from the persistent token_usage.jsonl log.
#[oai(path = "/token-usage", method = "get")]
async fn get_all_token_usage(
&self,
) -> OpenApiResult<Json<AllTokenUsageResponse>> {
let project_root = self
.ctx
.agents
.get_project_root(&self.ctx.state)
.map_err(bad_request)?;
let records = crate::agents::token_usage::read_all(&project_root)
.map_err(|e| bad_request(format!("Failed to read token usage: {e}")))?;
let response_records: Vec<TokenUsageRecordResponse> = records
.into_iter()
.map(|r| TokenUsageRecordResponse {
story_id: r.story_id,
agent_name: r.agent_name,
model: r.model,
timestamp: r.timestamp,
input_tokens: r.usage.input_tokens,
output_tokens: r.usage.output_tokens,
cache_creation_input_tokens: r.usage.cache_creation_input_tokens,
cache_read_input_tokens: r.usage.cache_read_input_tokens,
total_cost_usd: r.usage.total_cost_usd,
})
.collect();
Ok(Json(AllTokenUsageResponse {
records: response_records,
}))
}
} }
#[cfg(test)] #[cfg(test)]

View File

@@ -49,6 +49,9 @@ pub struct AppContext {
/// Receiver for permission requests. The active WebSocket handler locks /// Receiver for permission requests. The active WebSocket handler locks
/// this and polls for incoming permission forwards. /// this and polls for incoming permission forwards.
pub perm_rx: Arc<tokio::sync::Mutex<mpsc::UnboundedReceiver<PermissionForward>>>, pub perm_rx: Arc<tokio::sync::Mutex<mpsc::UnboundedReceiver<PermissionForward>>>,
/// Child process of the QA app launched for manual testing.
/// Only one instance runs at a time.
pub qa_app_process: Arc<std::sync::Mutex<Option<std::process::Child>>>,
} }
#[cfg(test)] #[cfg(test)]
@@ -69,6 +72,7 @@ impl AppContext {
reconciliation_tx, reconciliation_tx,
perm_tx, perm_tx,
perm_rx: Arc::new(tokio::sync::Mutex::new(perm_rx)), perm_rx: Arc::new(tokio::sync::Mutex::new(perm_rx)),
qa_app_process: Arc::new(std::sync::Mutex::new(None)),
} }
} }
} }

View File

@@ -1,4 +1,4 @@
use crate::agents::{close_bug_to_archive, feature_branch_has_unmerged_changes, move_story_to_archived, move_story_to_merge, move_story_to_qa, AgentStatus, PipelineStage}; use crate::agents::{close_bug_to_archive, feature_branch_has_unmerged_changes, move_story_to_archived, move_story_to_merge, move_story_to_qa, move_story_to_stage, reject_story_from_qa, AgentStatus, PipelineStage};
use crate::config::ProjectConfig; use crate::config::ProjectConfig;
use crate::log_buffer; use crate::log_buffer;
use crate::slog; use crate::slog;
@@ -639,7 +639,7 @@ fn handle_tools_list(id: Option<Value>) -> JsonRpcResponse {
}, },
{ {
"name": "update_story", "name": "update_story",
"description": "Update an existing story file. Can replace the '## User Story' and/or '## Description' section content, and/or set YAML front matter fields (e.g. agent, manual_qa). Auto-commits via the filesystem watcher.", "description": "Update an existing story file. Can replace the '## User Story' and/or '## Description' section content, and/or set YAML front matter fields (e.g. agent, qa). Auto-commits via the filesystem watcher.",
"inputSchema": { "inputSchema": {
"type": "object", "type": "object",
"properties": { "properties": {
@@ -862,6 +862,52 @@ fn handle_tools_list(id: Option<Value>) -> JsonRpcResponse {
"required": ["story_id"] "required": ["story_id"]
} }
}, },
{
"name": "approve_qa",
"description": "Approve a story that passed machine QA and is awaiting human review. Moves the story from work/3_qa/ to work/4_merge/ and starts the mergemaster agent.",
"inputSchema": {
"type": "object",
"properties": {
"story_id": {
"type": "string",
"description": "Story identifier (e.g. '247_story_human_qa_gate')"
}
},
"required": ["story_id"]
}
},
{
"name": "reject_qa",
"description": "Reject a story during human QA review. Moves the story from work/3_qa/ back to work/2_current/ with rejection notes so the coder agent can fix the issues.",
"inputSchema": {
"type": "object",
"properties": {
"story_id": {
"type": "string",
"description": "Story identifier (e.g. '247_story_human_qa_gate')"
},
"notes": {
"type": "string",
"description": "Explanation of what is broken or needs fixing"
}
},
"required": ["story_id", "notes"]
}
},
{
"name": "launch_qa_app",
"description": "Launch the app from a story's worktree for manual QA testing. Automatically assigns a free port, writes it to .story_kit_port, and starts the backend server. Only one QA app instance runs at a time.",
"inputSchema": {
"type": "object",
"properties": {
"story_id": {
"type": "string",
"description": "Story identifier whose worktree app to launch"
}
},
"required": ["story_id"]
}
},
{ {
"name": "get_pipeline_status", "name": "get_pipeline_status",
"description": "Return a structured snapshot of the full work item pipeline. Includes all active stages (current, qa, merge, done) with each item's stage, name, and assigned agent. Also includes upcoming backlog items.", "description": "Return a structured snapshot of the full work item pipeline. Includes all active stages (current, qa, merge, done) with each item's stage, name, and assigned agent. Also includes upcoming backlog items.",
@@ -916,6 +962,38 @@ fn handle_tools_list(id: Option<Value>) -> JsonRpcResponse {
}, },
"required": ["tool_name", "input"] "required": ["tool_name", "input"]
} }
},
{
"name": "get_token_usage",
"description": "Return per-agent token usage records from the persistent log. Shows input tokens, output tokens, cache tokens, and cost in USD for each agent session. Optionally filter by story_id.",
"inputSchema": {
"type": "object",
"properties": {
"story_id": {
"type": "string",
"description": "Optional: filter records to a specific story (e.g. '42_my_story')"
}
}
}
},
{
"name": "move_story",
"description": "Move a work item (story, bug, spike, or refactor) to an arbitrary pipeline stage. Prefer dedicated tools when available: use accept_story to mark items done, move_story_to_merge to queue for merging, or request_qa to trigger QA review. Use move_story only for arbitrary moves that lack a dedicated tool — for example, moving a story back to backlog or recovering a ghost story by moving it back to current.",
"inputSchema": {
"type": "object",
"properties": {
"story_id": {
"type": "string",
"description": "Work item identifier (filename stem, e.g. '28_story_my_feature')"
},
"target_stage": {
"type": "string",
"enum": ["backlog", "current", "qa", "merge", "done"],
"description": "Target pipeline stage: backlog (1_backlog), current (2_current), qa (3_qa), merge (4_merge), done (5_done)"
}
},
"required": ["story_id", "target_stage"]
}
} }
] ]
}), }),
@@ -979,6 +1057,9 @@ async fn handle_tools_call(
"report_merge_failure" => tool_report_merge_failure(&args, ctx), "report_merge_failure" => tool_report_merge_failure(&args, ctx),
// QA tools // QA tools
"request_qa" => tool_request_qa(&args, ctx).await, "request_qa" => tool_request_qa(&args, ctx).await,
"approve_qa" => tool_approve_qa(&args, ctx).await,
"reject_qa" => tool_reject_qa(&args, ctx).await,
"launch_qa_app" => tool_launch_qa_app(&args, ctx).await,
// Pipeline status // Pipeline status
"get_pipeline_status" => tool_get_pipeline_status(ctx), "get_pipeline_status" => tool_get_pipeline_status(ctx),
// Diagnostics // Diagnostics
@@ -987,6 +1068,10 @@ async fn handle_tools_call(
"rebuild_and_restart" => tool_rebuild_and_restart(ctx).await, "rebuild_and_restart" => tool_rebuild_and_restart(ctx).await,
// Permission bridge (Claude Code → frontend dialog) // Permission bridge (Claude Code → frontend dialog)
"prompt_permission" => tool_prompt_permission(&args, ctx).await, "prompt_permission" => tool_prompt_permission(&args, ctx).await,
// Token usage
"get_token_usage" => tool_get_token_usage(&args, ctx),
// Arbitrary pipeline movement
"move_story" => tool_move_story(&args, ctx),
_ => Err(format!("Unknown tool: {tool_name}")), _ => Err(format!("Unknown tool: {tool_name}")),
}; };
@@ -1071,7 +1156,7 @@ fn tool_get_pipeline_status(ctx: &AppContext) -> Result<String, String> {
items items
.iter() .iter()
.map(|s| { .map(|s| {
json!({ let mut item = json!({
"story_id": s.story_id, "story_id": s.story_id,
"name": s.name, "name": s.name,
"stage": stage, "stage": stage,
@@ -1080,7 +1165,19 @@ fn tool_get_pipeline_status(ctx: &AppContext) -> Result<String, String> {
"model": a.model, "model": a.model,
"status": a.status, "status": a.status,
})), })),
}) });
// Include blocked/retry_count when present so callers can
// identify stories stuck in the pipeline.
if let Some(true) = s.blocked {
item["blocked"] = json!(true);
}
if let Some(rc) = s.retry_count {
item["retry_count"] = json!(rc);
}
if let Some(ref mf) = s.merge_failure {
item["merge_failure"] = json!(mf);
}
item
}) })
.collect() .collect()
} }
@@ -1947,6 +2044,159 @@ async fn tool_request_qa(args: &Value, ctx: &AppContext) -> Result<String, Strin
.map_err(|e| format!("Serialization error: {e}")) .map_err(|e| format!("Serialization error: {e}"))
} }
async fn tool_approve_qa(args: &Value, ctx: &AppContext) -> Result<String, String> {
let story_id = args
.get("story_id")
.and_then(|v| v.as_str())
.ok_or("Missing required argument: story_id")?;
let project_root = ctx.agents.get_project_root(&ctx.state)?;
// Clear review_hold before moving
let qa_path = project_root
.join(".story_kit/work/3_qa")
.join(format!("{story_id}.md"));
if qa_path.exists() {
let _ = crate::io::story_metadata::clear_front_matter_field(&qa_path, "review_hold");
}
// Move story from work/3_qa/ to work/4_merge/
move_story_to_merge(&project_root, story_id)?;
// Start the mergemaster agent
let info = ctx
.agents
.start_agent(&project_root, story_id, Some("mergemaster"), None)
.await?;
serde_json::to_string_pretty(&json!({
"story_id": info.story_id,
"agent_name": info.agent_name,
"status": info.status.to_string(),
"message": format!(
"Story '{story_id}' approved. Moved to work/4_merge/ and mergemaster agent '{}' started.",
info.agent_name
),
}))
.map_err(|e| format!("Serialization error: {e}"))
}
async fn tool_reject_qa(args: &Value, ctx: &AppContext) -> Result<String, String> {
let story_id = args
.get("story_id")
.and_then(|v| v.as_str())
.ok_or("Missing required argument: story_id")?;
let notes = args
.get("notes")
.and_then(|v| v.as_str())
.ok_or("Missing required argument: notes")?;
let project_root = ctx.agents.get_project_root(&ctx.state)?;
// Move story from work/3_qa/ back to work/2_current/ with rejection notes
reject_story_from_qa(&project_root, story_id, notes)?;
// Restart the coder agent with rejection context
let story_path = project_root
.join(".story_kit/work/2_current")
.join(format!("{story_id}.md"));
let agent_name = if story_path.exists() {
let contents = std::fs::read_to_string(&story_path).unwrap_or_default();
crate::io::story_metadata::parse_front_matter(&contents)
.ok()
.and_then(|meta| meta.agent)
} else {
None
};
let agent_name = agent_name.as_deref().unwrap_or("coder-opus");
let context = format!(
"\n\n---\n## QA Rejection\n\
Your previous implementation was rejected during human QA review.\n\
Rejection notes:\n{notes}\n\n\
Please fix the issues described above and try again."
);
if let Err(e) = ctx
.agents
.start_agent(&project_root, story_id, Some(agent_name), Some(&context))
.await
{
slog_warn!("[qa] Failed to restart coder for '{story_id}' after rejection: {e}");
}
Ok(format!(
"Story '{story_id}' rejected and moved back to work/2_current/. Coder agent '{agent_name}' restarted with rejection notes."
))
}
async fn tool_launch_qa_app(args: &Value, ctx: &AppContext) -> Result<String, String> {
let story_id = args
.get("story_id")
.and_then(|v| v.as_str())
.ok_or("Missing required argument: story_id")?;
let project_root = ctx.agents.get_project_root(&ctx.state)?;
// Find the worktree path for this story
let worktrees = crate::worktree::list_worktrees(&project_root)?;
let wt = worktrees
.iter()
.find(|w| w.story_id == story_id)
.ok_or_else(|| format!("No worktree found for story '{story_id}'"))?;
let wt_path = wt.path.clone();
// Stop any existing QA app instance
{
let mut guard = ctx.qa_app_process.lock().unwrap();
if let Some(mut child) = guard.take() {
let _ = child.kill();
let _ = child.wait();
slog!("[qa-app] Stopped previous QA app instance.");
}
}
// Find a free port starting from 3100
let port = find_free_port(3100);
// Write .story_kit_port so the frontend dev server knows where to connect
let port_file = wt_path.join(".story_kit_port");
std::fs::write(&port_file, port.to_string())
.map_err(|e| format!("Failed to write .story_kit_port: {e}"))?;
// Launch the server from the worktree
let child = std::process::Command::new("cargo")
.args(["run"])
.env("STORYKIT_PORT", port.to_string())
.current_dir(&wt_path)
.stdout(std::process::Stdio::null())
.stderr(std::process::Stdio::null())
.spawn()
.map_err(|e| format!("Failed to launch QA app: {e}"))?;
{
let mut guard = ctx.qa_app_process.lock().unwrap();
*guard = Some(child);
}
serde_json::to_string_pretty(&json!({
"story_id": story_id,
"port": port,
"worktree_path": wt_path.to_string_lossy(),
"message": format!("QA app launched on port {port} from worktree at {}", wt_path.display()),
}))
.map_err(|e| format!("Serialization error: {e}"))
}
/// Find a free TCP port starting from `start`.
fn find_free_port(start: u16) -> u16 {
for port in start..start + 100 {
if std::net::TcpListener::bind(("127.0.0.1", port)).is_ok() {
return port;
}
}
start // fallback
}
/// Run `git log <base>..HEAD --oneline` in the worktree and return the commit /// Run `git log <base>..HEAD --oneline` in the worktree and return the commit
/// summaries, or `None` if git is unavailable or there are no new commits. /// summaries, or `None` if git is unavailable or there are no new commits.
async fn get_worktree_commits(worktree_path: &str, base_branch: &str) -> Option<Vec<String>> { async fn get_worktree_commits(worktree_path: &str, base_branch: &str) -> Option<Vec<String>> {
@@ -2281,6 +2531,74 @@ async fn tool_prompt_permission(args: &Value, ctx: &AppContext) -> Result<String
} }
} }
fn tool_get_token_usage(args: &Value, ctx: &AppContext) -> Result<String, String> {
let root = ctx.state.get_project_root()?;
let filter_story = args.get("story_id").and_then(|v| v.as_str());
let all_records = crate::agents::token_usage::read_all(&root)?;
let records: Vec<_> = all_records
.into_iter()
.filter(|r| filter_story.is_none_or(|s| r.story_id == s))
.collect();
let total_cost: f64 = records.iter().map(|r| r.usage.total_cost_usd).sum();
let total_input: u64 = records.iter().map(|r| r.usage.input_tokens).sum();
let total_output: u64 = records.iter().map(|r| r.usage.output_tokens).sum();
let total_cache_create: u64 = records
.iter()
.map(|r| r.usage.cache_creation_input_tokens)
.sum();
let total_cache_read: u64 = records
.iter()
.map(|r| r.usage.cache_read_input_tokens)
.sum();
serde_json::to_string_pretty(&json!({
"records": records.iter().map(|r| json!({
"story_id": r.story_id,
"agent_name": r.agent_name,
"timestamp": r.timestamp,
"input_tokens": r.usage.input_tokens,
"output_tokens": r.usage.output_tokens,
"cache_creation_input_tokens": r.usage.cache_creation_input_tokens,
"cache_read_input_tokens": r.usage.cache_read_input_tokens,
"total_cost_usd": r.usage.total_cost_usd,
})).collect::<Vec<_>>(),
"totals": {
"records": records.len(),
"input_tokens": total_input,
"output_tokens": total_output,
"cache_creation_input_tokens": total_cache_create,
"cache_read_input_tokens": total_cache_read,
"total_cost_usd": total_cost,
}
}))
.map_err(|e| format!("Serialization error: {e}"))
}
fn tool_move_story(args: &Value, ctx: &AppContext) -> Result<String, String> {
let story_id = args
.get("story_id")
.and_then(|v| v.as_str())
.ok_or("Missing required argument: story_id")?;
let target_stage = args
.get("target_stage")
.and_then(|v| v.as_str())
.ok_or("Missing required argument: target_stage")?;
let project_root = ctx.agents.get_project_root(&ctx.state)?;
let (from_stage, to_stage) = move_story_to_stage(&project_root, story_id, target_stage)?;
serde_json::to_string_pretty(&json!({
"story_id": story_id,
"from_stage": from_stage,
"to_stage": to_stage,
"message": format!("Work item '{story_id}' moved from '{from_stage}' to '{to_stage}'.")
}))
.map_err(|e| format!("Serialization error: {e}"))
}
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;
@@ -2383,11 +2701,16 @@ mod tests {
assert!(names.contains(&"move_story_to_merge")); assert!(names.contains(&"move_story_to_merge"));
assert!(names.contains(&"report_merge_failure")); assert!(names.contains(&"report_merge_failure"));
assert!(names.contains(&"request_qa")); assert!(names.contains(&"request_qa"));
assert!(names.contains(&"approve_qa"));
assert!(names.contains(&"reject_qa"));
assert!(names.contains(&"launch_qa_app"));
assert!(names.contains(&"get_server_logs")); assert!(names.contains(&"get_server_logs"));
assert!(names.contains(&"prompt_permission")); assert!(names.contains(&"prompt_permission"));
assert!(names.contains(&"get_pipeline_status")); assert!(names.contains(&"get_pipeline_status"));
assert!(names.contains(&"rebuild_and_restart")); assert!(names.contains(&"rebuild_and_restart"));
assert_eq!(tools.len(), 36); assert!(names.contains(&"get_token_usage"));
assert!(names.contains(&"move_story"));
assert_eq!(tools.len(), 41);
} }
#[test] #[test]
@@ -3524,6 +3847,72 @@ mod tests {
let _ = result; let _ = result;
} }
// ── tool_get_token_usage tests ────────────────────────────────
#[test]
fn tool_get_token_usage_empty_returns_zero_totals() {
let tmp = tempfile::tempdir().unwrap();
let ctx = test_ctx(tmp.path());
let result = tool_get_token_usage(&json!({}), &ctx).unwrap();
let parsed: Value = serde_json::from_str(&result).unwrap();
assert_eq!(parsed["records"].as_array().unwrap().len(), 0);
assert_eq!(parsed["totals"]["records"], 0);
assert_eq!(parsed["totals"]["total_cost_usd"], 0.0);
}
#[test]
fn tool_get_token_usage_returns_written_records() {
let tmp = tempfile::tempdir().unwrap();
let root = tmp.path();
let ctx = test_ctx(root);
let usage = crate::agents::TokenUsage {
input_tokens: 100,
output_tokens: 200,
cache_creation_input_tokens: 5000,
cache_read_input_tokens: 10000,
total_cost_usd: 1.57,
};
let record =
crate::agents::token_usage::build_record("42_story_foo", "coder-1", None, usage);
crate::agents::token_usage::append_record(root, &record).unwrap();
let result = tool_get_token_usage(&json!({}), &ctx).unwrap();
let parsed: Value = serde_json::from_str(&result).unwrap();
assert_eq!(parsed["records"].as_array().unwrap().len(), 1);
assert_eq!(parsed["records"][0]["story_id"], "42_story_foo");
assert_eq!(parsed["records"][0]["agent_name"], "coder-1");
assert_eq!(parsed["records"][0]["input_tokens"], 100);
assert_eq!(parsed["totals"]["records"], 1);
assert!((parsed["totals"]["total_cost_usd"].as_f64().unwrap() - 1.57).abs() < f64::EPSILON);
}
#[test]
fn tool_get_token_usage_filters_by_story_id() {
let tmp = tempfile::tempdir().unwrap();
let root = tmp.path();
let ctx = test_ctx(root);
let usage = crate::agents::TokenUsage {
input_tokens: 50,
output_tokens: 60,
cache_creation_input_tokens: 0,
cache_read_input_tokens: 0,
total_cost_usd: 0.5,
};
let r1 = crate::agents::token_usage::build_record("10_story_a", "coder-1", None, usage.clone());
let r2 = crate::agents::token_usage::build_record("20_story_b", "coder-2", None, usage);
crate::agents::token_usage::append_record(root, &r1).unwrap();
crate::agents::token_usage::append_record(root, &r2).unwrap();
let result =
tool_get_token_usage(&json!({"story_id": "10_story_a"}), &ctx).unwrap();
let parsed: Value = serde_json::from_str(&result).unwrap();
assert_eq!(parsed["records"].as_array().unwrap().len(), 1);
assert_eq!(parsed["records"][0]["story_id"], "10_story_a");
assert_eq!(parsed["totals"]["records"], 1);
}
// ── tool_list_worktrees tests ───────────────────────────────── // ── tool_list_worktrees tests ─────────────────────────────────
#[test] #[test]
@@ -3934,6 +4323,80 @@ stage = "coder"
assert!(!req_names.contains(&"agent_name")); assert!(!req_names.contains(&"agent_name"));
} }
// ── approve_qa in tools list ──────────────────────────────────
#[test]
fn approve_qa_in_tools_list() {
let resp = handle_tools_list(Some(json!(1)));
let tools = resp.result.unwrap()["tools"].as_array().unwrap().clone();
let tool = tools.iter().find(|t| t["name"] == "approve_qa");
assert!(tool.is_some(), "approve_qa missing from tools list");
let t = tool.unwrap();
let required = t["inputSchema"]["required"].as_array().unwrap();
let req_names: Vec<&str> = required.iter().map(|v| v.as_str().unwrap()).collect();
assert!(req_names.contains(&"story_id"));
}
// ── reject_qa in tools list ──────────────────────────────────
#[test]
fn reject_qa_in_tools_list() {
let resp = handle_tools_list(Some(json!(1)));
let tools = resp.result.unwrap()["tools"].as_array().unwrap().clone();
let tool = tools.iter().find(|t| t["name"] == "reject_qa");
assert!(tool.is_some(), "reject_qa missing from tools list");
let t = tool.unwrap();
let required = t["inputSchema"]["required"].as_array().unwrap();
let req_names: Vec<&str> = required.iter().map(|v| v.as_str().unwrap()).collect();
assert!(req_names.contains(&"story_id"));
assert!(req_names.contains(&"notes"));
}
// ── launch_qa_app in tools list ──────────────────────────────
#[test]
fn launch_qa_app_in_tools_list() {
let resp = handle_tools_list(Some(json!(1)));
let tools = resp.result.unwrap()["tools"].as_array().unwrap().clone();
let tool = tools.iter().find(|t| t["name"] == "launch_qa_app");
assert!(tool.is_some(), "launch_qa_app missing from tools list");
let t = tool.unwrap();
let required = t["inputSchema"]["required"].as_array().unwrap();
let req_names: Vec<&str> = required.iter().map(|v| v.as_str().unwrap()).collect();
assert!(req_names.contains(&"story_id"));
}
// ── approve_qa missing story_id ──────────────────────────────
#[tokio::test]
async fn tool_approve_qa_missing_story_id() {
let tmp = tempfile::tempdir().unwrap();
let ctx = test_ctx(tmp.path());
let result = tool_approve_qa(&json!({}), &ctx).await;
assert!(result.is_err());
assert!(result.unwrap_err().contains("story_id"));
}
// ── reject_qa missing arguments ──────────────────────────────
#[tokio::test]
async fn tool_reject_qa_missing_story_id() {
let tmp = tempfile::tempdir().unwrap();
let ctx = test_ctx(tmp.path());
let result = tool_reject_qa(&json!({"notes": "broken"}), &ctx).await;
assert!(result.is_err());
assert!(result.unwrap_err().contains("story_id"));
}
#[tokio::test]
async fn tool_reject_qa_missing_notes() {
let tmp = tempfile::tempdir().unwrap();
let ctx = test_ctx(tmp.path());
let result = tool_reject_qa(&json!({"story_id": "1_story_test"}), &ctx).await;
assert!(result.is_err());
assert!(result.unwrap_err().contains("notes"));
}
// ── tool_validate_stories with file content ─────────────────── // ── tool_validate_stories with file content ───────────────────
#[test] #[test]
@@ -4326,4 +4789,137 @@ stage = "coder"
target/debug/)" target/debug/)"
); );
} }
// ── move_story tool tests ─────────────────────────────────────
#[test]
fn move_story_in_tools_list() {
let resp = handle_tools_list(Some(json!(1)));
let tools = resp.result.unwrap()["tools"].as_array().unwrap().clone();
let tool = tools.iter().find(|t| t["name"] == "move_story");
assert!(tool.is_some(), "move_story missing from tools list");
let t = tool.unwrap();
assert!(t["description"].is_string());
let required = t["inputSchema"]["required"].as_array().unwrap();
let req_names: Vec<&str> = required.iter().map(|v| v.as_str().unwrap()).collect();
assert!(req_names.contains(&"story_id"));
assert!(req_names.contains(&"target_stage"));
}
#[test]
fn tool_move_story_missing_story_id() {
let tmp = tempfile::tempdir().unwrap();
let ctx = test_ctx(tmp.path());
let result = tool_move_story(&json!({"target_stage": "current"}), &ctx);
assert!(result.is_err());
assert!(result.unwrap_err().contains("story_id"));
}
#[test]
fn tool_move_story_missing_target_stage() {
let tmp = tempfile::tempdir().unwrap();
let ctx = test_ctx(tmp.path());
let result = tool_move_story(&json!({"story_id": "1_story_test"}), &ctx);
assert!(result.is_err());
assert!(result.unwrap_err().contains("target_stage"));
}
#[test]
fn tool_move_story_invalid_target_stage() {
let tmp = tempfile::tempdir().unwrap();
let root = tmp.path();
// Seed project root in state so get_project_root works
let backlog = root.join(".story_kit/work/1_backlog");
fs::create_dir_all(&backlog).unwrap();
fs::write(backlog.join("1_story_test.md"), "---\nname: Test\n---\n").unwrap();
let ctx = test_ctx(root);
let result = tool_move_story(
&json!({"story_id": "1_story_test", "target_stage": "invalid"}),
&ctx,
);
assert!(result.is_err());
assert!(result.unwrap_err().contains("Invalid target_stage"));
}
#[test]
fn tool_move_story_moves_from_backlog_to_current() {
let tmp = tempfile::tempdir().unwrap();
let root = tmp.path();
let backlog = root.join(".story_kit/work/1_backlog");
let current = root.join(".story_kit/work/2_current");
fs::create_dir_all(&backlog).unwrap();
fs::create_dir_all(&current).unwrap();
fs::write(backlog.join("5_story_test.md"), "---\nname: Test\n---\n").unwrap();
let ctx = test_ctx(root);
let result = tool_move_story(
&json!({"story_id": "5_story_test", "target_stage": "current"}),
&ctx,
)
.unwrap();
assert!(!backlog.join("5_story_test.md").exists());
assert!(current.join("5_story_test.md").exists());
let parsed: Value = serde_json::from_str(&result).unwrap();
assert_eq!(parsed["story_id"], "5_story_test");
assert_eq!(parsed["from_stage"], "backlog");
assert_eq!(parsed["to_stage"], "current");
}
#[test]
fn tool_move_story_moves_from_current_to_backlog() {
let tmp = tempfile::tempdir().unwrap();
let root = tmp.path();
let current = root.join(".story_kit/work/2_current");
let backlog = root.join(".story_kit/work/1_backlog");
fs::create_dir_all(&current).unwrap();
fs::create_dir_all(&backlog).unwrap();
fs::write(current.join("6_story_back.md"), "---\nname: Back\n---\n").unwrap();
let ctx = test_ctx(root);
let result = tool_move_story(
&json!({"story_id": "6_story_back", "target_stage": "backlog"}),
&ctx,
)
.unwrap();
assert!(!current.join("6_story_back.md").exists());
assert!(backlog.join("6_story_back.md").exists());
let parsed: Value = serde_json::from_str(&result).unwrap();
assert_eq!(parsed["from_stage"], "current");
assert_eq!(parsed["to_stage"], "backlog");
}
#[test]
fn tool_move_story_idempotent_when_already_in_target() {
let tmp = tempfile::tempdir().unwrap();
let root = tmp.path();
let current = root.join(".story_kit/work/2_current");
fs::create_dir_all(&current).unwrap();
fs::write(current.join("7_story_idem.md"), "---\nname: Idem\n---\n").unwrap();
let ctx = test_ctx(root);
let result = tool_move_story(
&json!({"story_id": "7_story_idem", "target_stage": "current"}),
&ctx,
)
.unwrap();
assert!(current.join("7_story_idem.md").exists());
let parsed: Value = serde_json::from_str(&result).unwrap();
assert_eq!(parsed["from_stage"], "current");
assert_eq!(parsed["to_stage"], "current");
}
#[test]
fn tool_move_story_error_when_not_found() {
let tmp = tempfile::tempdir().unwrap();
let ctx = test_ctx(tmp.path());
let result = tool_move_story(
&json!({"story_id": "99_story_ghost", "target_stage": "current"}),
&ctx,
);
assert!(result.is_err());
assert!(result.unwrap_err().contains("not found in any pipeline stage"));
}
} }

View File

@@ -102,7 +102,7 @@ pub fn build_openapi_service(ctx: Arc<AppContext>) -> (ApiService, ApiService) {
); );
let api_service = let api_service =
OpenApiService::new(api, "Story Kit API", "1.0").server("http://127.0.0.1:3001/api"); OpenApiService::new(api, "Storkit API", "1.0").server("http://127.0.0.1:3001/api");
let docs_api = ( let docs_api = (
ProjectApi { ctx: ctx.clone() }, ProjectApi { ctx: ctx.clone() },
@@ -116,7 +116,7 @@ pub fn build_openapi_service(ctx: Arc<AppContext>) -> (ApiService, ApiService) {
); );
let docs_service = let docs_service =
OpenApiService::new(docs_api, "Story Kit API", "1.0").server("http://127.0.0.1:3001/api"); OpenApiService::new(docs_api, "Storkit API", "1.0").server("http://127.0.0.1:3001/api");
(api_service, docs_service) (api_service, docs_service)
} }

View File

@@ -24,6 +24,18 @@ pub struct UpcomingStory {
pub merge_failure: Option<String>, pub merge_failure: Option<String>,
/// Active agent working on this item, if any. /// Active agent working on this item, if any.
pub agent: Option<AgentAssignment>, pub agent: Option<AgentAssignment>,
/// True when the item is held in QA for human review.
#[serde(skip_serializing_if = "Option::is_none")]
pub review_hold: Option<bool>,
/// QA mode for this item: "human", "server", or "agent".
#[serde(skip_serializing_if = "Option::is_none")]
pub qa: Option<String>,
/// Number of retries at the current pipeline stage.
#[serde(skip_serializing_if = "Option::is_none")]
pub retry_count: Option<u32>,
/// True when the story has exceeded its retry limit and will not be auto-assigned.
#[serde(skip_serializing_if = "Option::is_none")]
pub blocked: Option<bool>,
} }
pub struct StoryValidationResult { pub struct StoryValidationResult {
@@ -117,12 +129,12 @@ fn load_stage_items(
.to_string(); .to_string();
let contents = fs::read_to_string(&path) let contents = fs::read_to_string(&path)
.map_err(|e| format!("Failed to read story file {}: {e}", path.display()))?; .map_err(|e| format!("Failed to read story file {}: {e}", path.display()))?;
let (name, error, merge_failure) = match parse_front_matter(&contents) { let (name, error, merge_failure, review_hold, qa, retry_count, blocked) = match parse_front_matter(&contents) {
Ok(meta) => (meta.name, None, meta.merge_failure), Ok(meta) => (meta.name, None, meta.merge_failure, meta.review_hold, meta.qa.map(|m| m.as_str().to_string()), meta.retry_count, meta.blocked),
Err(e) => (None, Some(e.to_string()), None), Err(e) => (None, Some(e.to_string()), None, None, None, None, None),
}; };
let agent = agent_map.get(&story_id).cloned(); let agent = agent_map.get(&story_id).cloned();
stories.push(UpcomingStory { story_id, name, error, merge_failure, agent }); stories.push(UpcomingStory { story_id, name, error, merge_failure, agent, review_hold, qa, retry_count, blocked });
} }
stories.sort_by(|a, b| a.story_id.cmp(&b.story_id)); stories.sort_by(|a, b| a.story_id.cmp(&b.story_id));
@@ -1685,12 +1697,12 @@ mod tests {
fs::write(&filepath, "---\nname: T\n---\n\n## User Story\n\nSome story\n").unwrap(); fs::write(&filepath, "---\nname: T\n---\n\n## User Story\n\nSome story\n").unwrap();
let mut fields = HashMap::new(); let mut fields = HashMap::new();
fields.insert("manual_qa".to_string(), "true".to_string()); fields.insert("qa".to_string(), "human".to_string());
fields.insert("priority".to_string(), "high".to_string()); fields.insert("priority".to_string(), "high".to_string());
update_story_in_file(tmp.path(), "25_test", None, None, Some(&fields)).unwrap(); update_story_in_file(tmp.path(), "25_test", None, None, Some(&fields)).unwrap();
let result = fs::read_to_string(&filepath).unwrap(); let result = fs::read_to_string(&filepath).unwrap();
assert!(result.contains("manual_qa: \"true\""), "manual_qa field should be set"); assert!(result.contains("qa: \"human\""), "qa field should be set");
assert!(result.contains("priority: \"high\""), "priority field should be set"); assert!(result.contains("priority: \"high\""), "priority field should be set");
assert!(result.contains("name: T"), "name field preserved"); assert!(result.contains("name: T"), "name field preserved");
} }

View File

@@ -4,6 +4,7 @@ use crate::io::onboarding;
use crate::io::watcher::WatcherEvent; use crate::io::watcher::WatcherEvent;
use crate::llm::chat; use crate::llm::chat;
use crate::llm::types::Message; use crate::llm::types::Message;
use crate::log_buffer;
use futures::{SinkExt, StreamExt}; use futures::{SinkExt, StreamExt};
use poem::handler; use poem::handler;
use poem::web::Data; use poem::web::Data;
@@ -132,6 +133,13 @@ enum WsResponse {
SideQuestionDone { SideQuestionDone {
response: String, response: String,
}, },
/// A single server log entry. Sent in bulk on connect (recent history),
/// then streamed live as new entries arrive.
LogEntry {
timestamp: String,
level: String,
message: String,
},
} }
impl From<WatcherEvent> for Option<WsResponse> { impl From<WatcherEvent> for Option<WsResponse> {
@@ -208,6 +216,42 @@ pub async fn ws_handler(ws: WebSocket, ctx: Data<&Arc<AppContext>>) -> impl poem
}); });
} }
// Push recent server log entries so the client has history on connect.
{
let entries = log_buffer::global().get_recent_entries(100, None, None);
for entry in entries {
let _ = tx.send(WsResponse::LogEntry {
timestamp: entry.timestamp,
level: entry.level.as_str().to_string(),
message: entry.message,
});
}
}
// Subscribe to live log entries and forward them to the client.
let tx_logs = tx.clone();
let mut log_rx = log_buffer::global().subscribe();
tokio::spawn(async move {
loop {
match log_rx.recv().await {
Ok(entry) => {
if tx_logs
.send(WsResponse::LogEntry {
timestamp: entry.timestamp,
level: entry.level.as_str().to_string(),
message: entry.message,
})
.is_err()
{
break;
}
}
Err(tokio::sync::broadcast::error::RecvError::Lagged(_)) => continue,
Err(tokio::sync::broadcast::error::RecvError::Closed) => break,
}
}
});
// Subscribe to filesystem watcher events and forward them to the client. // Subscribe to filesystem watcher events and forward them to the client.
// After each work-item event, also push the updated pipeline state. // After each work-item event, also push the updated pipeline state.
// Config-changed events are forwarded as-is without a pipeline refresh. // Config-changed events are forwarded as-is without a pipeline refresh.
@@ -693,6 +737,10 @@ mod tests {
error: None, error: None,
merge_failure: None, merge_failure: None,
agent: None, agent: None,
review_hold: None,
qa: None,
retry_count: None,
blocked: None,
}; };
let resp = WsResponse::PipelineState { let resp = WsResponse::PipelineState {
backlog: vec![story], backlog: vec![story],
@@ -830,6 +878,10 @@ mod tests {
error: None, error: None,
merge_failure: None, merge_failure: None,
agent: None, agent: None,
review_hold: None,
qa: None,
retry_count: None,
blocked: None,
}], }],
current: vec![UpcomingStory { current: vec![UpcomingStory {
story_id: "2_story_b".to_string(), story_id: "2_story_b".to_string(),
@@ -837,6 +889,10 @@ mod tests {
error: None, error: None,
merge_failure: None, merge_failure: None,
agent: None, agent: None,
review_hold: None,
qa: None,
retry_count: None,
blocked: None,
}], }],
qa: vec![], qa: vec![],
merge: vec![], merge: vec![],
@@ -846,6 +902,10 @@ mod tests {
error: None, error: None,
merge_failure: None, merge_failure: None,
agent: None, agent: None,
review_hold: None,
qa: None,
retry_count: None,
blocked: None,
}], }],
}; };
let resp: WsResponse = state.into(); let resp: WsResponse = state.into();
@@ -1002,6 +1062,10 @@ mod tests {
model: Some("claude-3-5-sonnet".to_string()), model: Some("claude-3-5-sonnet".to_string()),
status: "running".to_string(), status: "running".to_string(),
}), }),
review_hold: None,
qa: None,
retry_count: None,
blocked: None,
}], }],
qa: vec![], qa: vec![],
merge: vec![], merge: vec![],
@@ -1126,10 +1190,30 @@ mod tests {
"expected onboarding_status, got: {onboarding}" "expected onboarding_status, got: {onboarding}"
); );
// Drain any log_entry messages sent as initial history on connect.
// These are buffered before tests send their own requests.
loop {
// Use a very short timeout: if nothing arrives quickly, the burst is done.
let Ok(Some(Ok(msg))) =
tokio::time::timeout(std::time::Duration::from_millis(200), stream.next()).await
else {
break;
};
let val: serde_json::Value = match msg {
tungstenite::Message::Text(t) => serde_json::from_str(t.as_ref()).unwrap(),
_ => break,
};
if val["type"] != "log_entry" {
// Unexpected non-log message during drain — this shouldn't happen.
panic!("unexpected message during log drain: {val}");
}
}
(sink, stream, initial) (sink, stream, initial)
} }
/// Read next text message from the stream with a timeout. /// Read next non-log_entry text message from the stream with a timeout.
/// Skips any `log_entry` messages that arrive between events.
async fn next_msg( async fn next_msg(
stream: &mut futures::stream::SplitStream< stream: &mut futures::stream::SplitStream<
tokio_tungstenite::WebSocketStream< tokio_tungstenite::WebSocketStream<
@@ -1137,14 +1221,19 @@ mod tests {
>, >,
>, >,
) -> serde_json::Value { ) -> serde_json::Value {
loop {
let msg = tokio::time::timeout(std::time::Duration::from_secs(2), stream.next()) let msg = tokio::time::timeout(std::time::Duration::from_secs(2), stream.next())
.await .await
.expect("timeout waiting for message") .expect("timeout waiting for message")
.expect("stream ended") .expect("stream ended")
.expect("ws error"); .expect("ws error");
match msg { let val: serde_json::Value = match msg {
tungstenite::Message::Text(t) => serde_json::from_str(t.as_ref()).unwrap(), tungstenite::Message::Text(t) => serde_json::from_str(t.as_ref()).unwrap(),
other => panic!("expected text message, got: {other:?}"), other => panic!("expected text message, got: {other:?}"),
};
if val["type"] != "log_entry" {
return val;
}
} }
} }

View File

@@ -99,7 +99,11 @@ const STORY_KIT_CLAUDE_SETTINGS: &str = r#"{
} }
"#; "#;
const DEFAULT_PROJECT_AGENTS_TOML: &str = r#"[[agent]] const DEFAULT_PROJECT_AGENTS_TOML: &str = r#"# Project-wide default QA mode: "server", "agent", or "human".
# Per-story `qa` front matter overrides this setting.
default_qa = "server"
[[agent]]
name = "coder-1" name = "coder-1"
stage = "coder" stage = "coder"
role = "Full-stack engineer. Implements features across all components." role = "Full-stack engineer. Implements features across all components."

View File

@@ -2,6 +2,45 @@ use serde::Deserialize;
use std::fs; use std::fs;
use std::path::Path; use std::path::Path;
/// QA mode for a story: determines how the pipeline handles post-coder review.
///
/// - `Server` — skip the QA agent; rely on server gate checks (clippy + tests).
/// If gates pass, advance straight to merge.
/// - `Agent` — spin up a QA agent (Claude session) to review code and run gates.
/// - `Human` — hold in QA for human approval after server gates pass.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum QaMode {
Server,
Agent,
Human,
}
impl QaMode {
/// Parse a string into a `QaMode`. Returns `None` for unrecognised values.
pub fn from_str(s: &str) -> Option<Self> {
match s.trim().to_lowercase().as_str() {
"server" => Some(Self::Server),
"agent" => Some(Self::Agent),
"human" => Some(Self::Human),
_ => None,
}
}
pub fn as_str(&self) -> &'static str {
match self {
Self::Server => "server",
Self::Agent => "agent",
Self::Human => "human",
}
}
}
impl std::fmt::Display for QaMode {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.write_str(self.as_str())
}
}
#[derive(Debug, Clone, PartialEq, Eq, Default)] #[derive(Debug, Clone, PartialEq, Eq, Default)]
pub struct StoryMetadata { pub struct StoryMetadata {
pub name: Option<String>, pub name: Option<String>,
@@ -9,6 +48,11 @@ pub struct StoryMetadata {
pub merge_failure: Option<String>, pub merge_failure: Option<String>,
pub agent: Option<String>, pub agent: Option<String>,
pub review_hold: Option<bool>, pub review_hold: Option<bool>,
pub qa: Option<QaMode>,
/// Number of times this story has been retried at its current pipeline stage.
pub retry_count: Option<u32>,
/// When `true`, auto-assign will skip this story (retry limit exceeded).
pub blocked: Option<bool>,
} }
#[derive(Debug, Clone, PartialEq, Eq)] #[derive(Debug, Clone, PartialEq, Eq)]
@@ -33,6 +77,14 @@ struct FrontMatter {
merge_failure: Option<String>, merge_failure: Option<String>,
agent: Option<String>, agent: Option<String>,
review_hold: Option<bool>, review_hold: Option<bool>,
/// New configurable QA mode field: "human", "server", or "agent".
qa: Option<String>,
/// Legacy boolean field — mapped to `qa: human` (true) or ignored (false/absent).
manual_qa: Option<bool>,
/// Number of times this story has been retried at its current pipeline stage.
retry_count: Option<u32>,
/// When `true`, auto-assign will skip this story (retry limit exceeded).
blocked: Option<bool>,
} }
pub fn parse_front_matter(contents: &str) -> Result<StoryMetadata, StoryMetaError> { pub fn parse_front_matter(contents: &str) -> Result<StoryMetadata, StoryMetaError> {
@@ -61,12 +113,22 @@ pub fn parse_front_matter(contents: &str) -> Result<StoryMetadata, StoryMetaErro
} }
fn build_metadata(front: FrontMatter) -> StoryMetadata { fn build_metadata(front: FrontMatter) -> StoryMetadata {
// Resolve qa mode: prefer the new `qa` field, fall back to legacy `manual_qa`.
let qa = if let Some(ref qa_str) = front.qa {
QaMode::from_str(qa_str)
} else {
front.manual_qa.and_then(|v| if v { Some(QaMode::Human) } else { None })
};
StoryMetadata { StoryMetadata {
name: front.name, name: front.name,
coverage_baseline: front.coverage_baseline, coverage_baseline: front.coverage_baseline,
merge_failure: front.merge_failure, merge_failure: front.merge_failure,
agent: front.agent, agent: front.agent,
review_hold: front.review_hold, review_hold: front.review_hold,
qa,
retry_count: front.retry_count,
blocked: front.blocked,
} }
} }
@@ -193,6 +255,67 @@ pub fn set_front_matter_field(contents: &str, key: &str, value: &str) -> String
result result
} }
/// Increment the `retry_count` field in the story file's front matter.
///
/// Reads the current value (defaulting to 0), increments by 1, and writes back.
/// Returns the new retry count.
pub fn increment_retry_count(path: &Path) -> Result<u32, String> {
let contents =
fs::read_to_string(path).map_err(|e| format!("Failed to read story file: {e}"))?;
let current = parse_front_matter(&contents)
.ok()
.and_then(|m| m.retry_count)
.unwrap_or(0);
let new_count = current + 1;
let updated = set_front_matter_field(&contents, "retry_count", &new_count.to_string());
fs::write(path, &updated).map_err(|e| format!("Failed to write story file: {e}"))?;
Ok(new_count)
}
/// Write `blocked: true` to the YAML front matter of a story file.
///
/// Used to mark stories that have exceeded the retry limit and should not
/// be auto-assigned again.
pub fn write_blocked(path: &Path) -> Result<(), String> {
let contents =
fs::read_to_string(path).map_err(|e| format!("Failed to read story file: {e}"))?;
let updated = set_front_matter_field(&contents, "blocked", "true");
fs::write(path, &updated).map_err(|e| format!("Failed to write story file: {e}"))?;
Ok(())
}
/// Append rejection notes to a story file body.
///
/// Adds a `## QA Rejection Notes` section at the end of the file so the coder
/// agent can see what needs fixing.
pub fn write_rejection_notes(path: &Path, notes: &str) -> Result<(), String> {
let contents =
fs::read_to_string(path).map_err(|e| format!("Failed to read story file: {e}"))?;
let section = format!("\n\n## QA Rejection Notes\n\n{notes}\n");
let updated = format!("{contents}{section}");
fs::write(path, &updated).map_err(|e| format!("Failed to write story file: {e}"))?;
Ok(())
}
/// Resolve the effective QA mode for a story file.
///
/// Reads the `qa` front matter field. If absent, falls back to `default`.
/// Spikes are **not** handled here — the caller is responsible for overriding
/// to `Human` for spikes.
pub fn resolve_qa_mode(path: &Path, default: QaMode) -> QaMode {
let contents = match fs::read_to_string(path) {
Ok(c) => c,
Err(_) => return default,
};
match parse_front_matter(&contents) {
Ok(meta) => meta.qa.unwrap_or(default),
Err(_) => default,
}
}
pub fn parse_unchecked_todos(contents: &str) -> Vec<String> { pub fn parse_unchecked_todos(contents: &str) -> Vec<String> {
contents contents
.lines() .lines()
@@ -367,4 +490,81 @@ workflow: tdd
assert!(contents.contains("review_hold: true")); assert!(contents.contains("review_hold: true"));
assert!(contents.contains("name: My Spike")); assert!(contents.contains("name: My Spike"));
} }
#[test]
fn parses_qa_mode_from_front_matter() {
let input = "---\nname: Story\nqa: server\n---\n# Story\n";
let meta = parse_front_matter(input).expect("front matter");
assert_eq!(meta.qa, Some(QaMode::Server));
let input = "---\nname: Story\nqa: agent\n---\n# Story\n";
let meta = parse_front_matter(input).expect("front matter");
assert_eq!(meta.qa, Some(QaMode::Agent));
let input = "---\nname: Story\nqa: human\n---\n# Story\n";
let meta = parse_front_matter(input).expect("front matter");
assert_eq!(meta.qa, Some(QaMode::Human));
}
#[test]
fn qa_mode_defaults_to_none() {
let input = "---\nname: Story\n---\n# Story\n";
let meta = parse_front_matter(input).expect("front matter");
assert_eq!(meta.qa, None);
}
#[test]
fn legacy_manual_qa_true_maps_to_human() {
let input = "---\nname: Story\nmanual_qa: true\n---\n# Story\n";
let meta = parse_front_matter(input).expect("front matter");
assert_eq!(meta.qa, Some(QaMode::Human));
}
#[test]
fn legacy_manual_qa_false_maps_to_none() {
let input = "---\nname: Story\nmanual_qa: false\n---\n# Story\n";
let meta = parse_front_matter(input).expect("front matter");
assert_eq!(meta.qa, None);
}
#[test]
fn qa_field_takes_precedence_over_manual_qa() {
let input = "---\nname: Story\nqa: server\nmanual_qa: true\n---\n# Story\n";
let meta = parse_front_matter(input).expect("front matter");
assert_eq!(meta.qa, Some(QaMode::Server));
}
#[test]
fn resolve_qa_mode_uses_file_value() {
let tmp = tempfile::tempdir().unwrap();
let path = tmp.path().join("story.md");
std::fs::write(&path, "---\nname: Test\nqa: human\n---\n# Story\n").unwrap();
assert_eq!(resolve_qa_mode(&path, QaMode::Server), QaMode::Human);
}
#[test]
fn resolve_qa_mode_falls_back_to_default() {
let tmp = tempfile::tempdir().unwrap();
let path = tmp.path().join("story.md");
std::fs::write(&path, "---\nname: Test\n---\n# Story\n").unwrap();
assert_eq!(resolve_qa_mode(&path, QaMode::Server), QaMode::Server);
assert_eq!(resolve_qa_mode(&path, QaMode::Agent), QaMode::Agent);
}
#[test]
fn resolve_qa_mode_missing_file_uses_default() {
let path = std::path::Path::new("/nonexistent/story.md");
assert_eq!(resolve_qa_mode(path, QaMode::Server), QaMode::Server);
}
#[test]
fn write_rejection_notes_appends_section() {
let tmp = tempfile::tempdir().unwrap();
let path = tmp.path().join("story.md");
std::fs::write(&path, "---\nname: Test\n---\n# Story\n").unwrap();
write_rejection_notes(&path, "Button color is wrong").unwrap();
let contents = std::fs::read_to_string(&path).unwrap();
assert!(contents.contains("## QA Rejection Notes"));
assert!(contents.contains("Button color is wrong"));
}
} }

View File

@@ -11,6 +11,7 @@ use std::fs::OpenOptions;
use std::io::Write; use std::io::Write;
use std::path::PathBuf; use std::path::PathBuf;
use std::sync::{Mutex, OnceLock}; use std::sync::{Mutex, OnceLock};
use tokio::sync::broadcast;
const CAPACITY: usize = 1000; const CAPACITY: usize = 1000;
@@ -72,16 +73,25 @@ impl LogEntry {
pub struct LogBuffer { pub struct LogBuffer {
entries: Mutex<VecDeque<LogEntry>>, entries: Mutex<VecDeque<LogEntry>>,
log_file: Mutex<Option<PathBuf>>, log_file: Mutex<Option<PathBuf>>,
/// Broadcast channel for live log streaming to WebSocket subscribers.
broadcast_tx: broadcast::Sender<LogEntry>,
} }
impl LogBuffer { impl LogBuffer {
fn new() -> Self { fn new() -> Self {
let (broadcast_tx, _) = broadcast::channel(512);
Self { Self {
entries: Mutex::new(VecDeque::with_capacity(CAPACITY)), entries: Mutex::new(VecDeque::with_capacity(CAPACITY)),
log_file: Mutex::new(None), log_file: Mutex::new(None),
broadcast_tx,
} }
} }
/// Subscribe to live log entries as they are pushed.
pub fn subscribe(&self) -> broadcast::Receiver<LogEntry> {
self.broadcast_tx.subscribe()
}
/// Set the persistent log file path. Call once at startup after the /// Set the persistent log file path. Call once at startup after the
/// project root is known. /// project root is known.
pub fn set_log_file(&self, path: PathBuf) { pub fn set_log_file(&self, path: PathBuf) {
@@ -112,8 +122,11 @@ impl LogBuffer {
if buf.len() >= CAPACITY { if buf.len() >= CAPACITY {
buf.pop_front(); buf.pop_front();
} }
buf.push_back(entry); buf.push_back(entry.clone());
} }
// Best-effort broadcast to WebSocket subscribers.
let _ = self.broadcast_tx.send(entry);
} }
/// Return up to `count` recent log lines as formatted strings, /// Return up to `count` recent log lines as formatted strings,
@@ -140,6 +153,31 @@ impl LogBuffer {
let start = filtered.len().saturating_sub(count); let start = filtered.len().saturating_sub(count);
filtered[start..].to_vec() filtered[start..].to_vec()
} }
/// Return up to `count` recent `LogEntry` structs (not formatted strings),
/// optionally filtered by substring and/or severity level.
/// Entries are returned in chronological order (oldest first).
pub fn get_recent_entries(
&self,
count: usize,
filter: Option<&str>,
severity: Option<&LogLevel>,
) -> Vec<LogEntry> {
let buf = match self.entries.lock() {
Ok(b) => b,
Err(_) => return vec![],
};
let filtered: Vec<LogEntry> = buf
.iter()
.filter(|entry| {
severity.is_none_or(|s| &entry.level == s)
&& filter.is_none_or(|f| entry.message.contains(f) || entry.formatted().contains(f))
})
.cloned()
.collect();
let start = filtered.len().saturating_sub(count);
filtered[start..].to_vec()
}
} }
static GLOBAL: OnceLock<LogBuffer> = OnceLock::new(); static GLOBAL: OnceLock<LogBuffer> = OnceLock::new();
@@ -208,10 +246,7 @@ mod tests {
#[test] #[test]
fn evicts_oldest_at_capacity() { fn evicts_oldest_at_capacity() {
let buf = LogBuffer { let buf = LogBuffer::new();
entries: Mutex::new(VecDeque::with_capacity(CAPACITY)),
log_file: Mutex::new(None),
};
// Fill past capacity // Fill past capacity
for i in 0..=CAPACITY { for i in 0..=CAPACITY {
buf.push_entry(LogLevel::Info, format!("line {i}")); buf.push_entry(LogLevel::Info, format!("line {i}"));

View File

@@ -188,6 +188,7 @@ async fn main() -> Result<(), std::io::Error> {
reconciliation_tx, reconciliation_tx,
perm_tx, perm_tx,
perm_rx, perm_rx,
qa_app_process: Arc::new(std::sync::Mutex::new(None)),
}; };
let app = build_routes(ctx); let app = build_routes(ctx);

View File

@@ -1,5 +1,4 @@
use crate::agents::{AgentPool, AgentStatus}; use crate::agents::AgentPool;
use crate::config::ProjectConfig;
use crate::http::context::{PermissionDecision, PermissionForward}; use crate::http::context::{PermissionDecision, PermissionForward};
use crate::llm::providers::claude_code::{ClaudeCodeProvider, ClaudeCodeResult}; use crate::llm::providers::claude_code::{ClaudeCodeProvider, ClaudeCodeResult};
use crate::slog; use crate::slog;
@@ -32,7 +31,7 @@ use matrix_sdk::encryption::verification::{
}; };
use matrix_sdk::ruma::events::key::verification::request::ToDeviceKeyVerificationRequestEvent; use matrix_sdk::ruma::events::key::verification::request::ToDeviceKeyVerificationRequestEvent;
use super::config::{BotConfig, save_ambient_rooms}; use super::config::BotConfig;
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
// Conversation history types // Conversation history types
@@ -168,11 +167,14 @@ pub struct BotContext {
/// in bot.toml; defaults to "Assistant" when unset. /// in bot.toml; defaults to "Assistant" when unset.
pub bot_name: String, pub bot_name: String,
/// Set of room IDs where ambient mode is active. In ambient mode the bot /// Set of room IDs where ambient mode is active. In ambient mode the bot
/// responds to all messages rather than only addressed ones. This is /// responds to all messages rather than only addressed ones.
/// in-memory only — the state does not survive a bot restart. /// Uses a sync mutex since locks are never held across await points.
pub ambient_rooms: Arc<TokioMutex<HashSet<OwnedRoomId>>>, pub ambient_rooms: Arc<std::sync::Mutex<HashSet<OwnedRoomId>>>,
/// Agent pool for checking agent availability. /// Agent pool for checking agent availability.
pub agents: Arc<AgentPool>, pub agents: Arc<AgentPool>,
/// Per-room htop monitoring sessions. Keyed by room ID; each entry holds
/// a stop-signal sender that the background task watches.
pub htop_sessions: super::htop::HtopSessions,
} }
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
@@ -191,167 +193,6 @@ pub fn format_startup_announcement(bot_name: &str) -> String {
// Command extraction // Command extraction
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
/// Extract the command portion from a bot-addressed message.
///
/// Strips the leading bot mention (full Matrix user ID, `@localpart`, or
/// display name) plus any trailing punctuation (`,`, `:`) and whitespace,
/// then returns the remainder in lowercase. Returns `None` when no
/// recognized mention prefix is found in the message.
pub fn extract_command(body: &str, bot_name: &str, bot_user_id: &OwnedUserId) -> Option<String> {
let full_id = bot_user_id.as_str().to_lowercase();
let at_localpart = format!("@{}", bot_user_id.localpart().to_lowercase());
let bot_name_lower = bot_name.to_lowercase();
let body_lower = body.trim().to_lowercase();
let stripped = if let Some(s) = body_lower.strip_prefix(&full_id) {
s
} else if let Some(s) = body_lower.strip_prefix(&at_localpart) {
// Guard against matching a longer @mention (e.g. "@timmybot" vs "@timmy").
let next = s.chars().next();
if next.is_some_and(|c| c.is_alphanumeric() || c == '-' || c == '_') {
return None;
}
s
} else if let Some(s) = body_lower.strip_prefix(&bot_name_lower) {
// Guard against matching a longer display-name prefix.
let next = s.chars().next();
if next.is_some_and(|c| c.is_alphanumeric() || c == '-' || c == '_') {
return None;
}
s
} else {
return None;
};
// Strip leading separators (`,`, `:`) and whitespace after the mention.
let cmd = stripped.trim_start_matches(|c: char| c == ':' || c == ',' || c.is_whitespace());
Some(cmd.trim().to_string())
}
// ---------------------------------------------------------------------------
// Pipeline status formatter
// ---------------------------------------------------------------------------
/// Read all story IDs and names from a pipeline stage directory.
fn read_stage_items(
project_root: &std::path::Path,
stage_dir: &str,
) -> Vec<(String, Option<String>)> {
let dir = project_root
.join(".story_kit")
.join("work")
.join(stage_dir);
if !dir.exists() {
return Vec::new();
}
let mut items = Vec::new();
if let Ok(entries) = std::fs::read_dir(&dir) {
for entry in entries.flatten() {
let path = entry.path();
if path.extension().and_then(|e| e.to_str()) != Some("md") {
continue;
}
if let Some(stem) = path.file_stem().and_then(|s| s.to_str()) {
let name = std::fs::read_to_string(&path)
.ok()
.and_then(|contents| {
crate::io::story_metadata::parse_front_matter(&contents)
.ok()
.and_then(|m| m.name)
});
items.push((stem.to_string(), name));
}
}
}
items.sort_by(|a, b| a.0.cmp(&b.0));
items
}
/// Build the full pipeline status text formatted for Matrix (markdown).
pub fn build_pipeline_status(project_root: &std::path::Path, agents: &AgentPool) -> String {
// Build a map from story_id → active AgentInfo for quick lookup.
let active_agents = agents.list_agents().unwrap_or_default();
let active_map: std::collections::HashMap<String, &crate::agents::AgentInfo> = active_agents
.iter()
.filter(|a| matches!(a.status, AgentStatus::Running | AgentStatus::Pending))
.map(|a| (a.story_id.clone(), a))
.collect();
let config = ProjectConfig::load(project_root).ok();
let mut out = String::from("**Pipeline Status**\n\n");
let stages = [
("1_upcoming", "Upcoming"),
("2_current", "In Progress"),
("3_qa", "QA"),
("4_merge", "Merge"),
("5_done", "Done"),
];
for (dir, label) in &stages {
let items = read_stage_items(project_root, dir);
let count = items.len();
out.push_str(&format!("**{label}** ({count})\n"));
if items.is_empty() {
out.push_str(" *(none)*\n");
} else {
for (story_id, name) in &items {
let display = match name {
Some(n) => format!("{story_id}{n}"),
None => story_id.clone(),
};
if let Some(agent) = active_map.get(story_id) {
let model_str = config
.as_ref()
.and_then(|cfg| cfg.find_agent(&agent.agent_name))
.and_then(|ac| ac.model.as_deref())
.unwrap_or("?");
out.push_str(&format!(
"{display}{} ({}) [{}]\n",
agent.agent_name, model_str, agent.status
));
} else {
out.push_str(&format!("{display}\n"));
}
}
}
out.push('\n');
}
// Free agents: configured agents not currently running or pending.
out.push_str("**Free Agents**\n");
if let Some(cfg) = &config {
let busy_names: std::collections::HashSet<String> = active_agents
.iter()
.filter(|a| matches!(a.status, AgentStatus::Running | AgentStatus::Pending))
.map(|a| a.agent_name.clone())
.collect();
let free: Vec<String> = cfg
.agent
.iter()
.filter(|a| !busy_names.contains(&a.name))
.map(|a| match &a.model {
Some(m) => format!("{} ({})", a.name, m),
None => a.name.clone(),
})
.collect();
if free.is_empty() {
out.push_str(" *(none — all agents busy)*\n");
} else {
for name in &free {
out.push_str(&format!("{name}\n"));
}
}
} else {
out.push_str(" *(no agent config found)*\n");
}
out
}
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
// Bot entry point // Bot entry point
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
@@ -384,7 +225,7 @@ pub async fn run_bot(
let mut login_builder = client let mut login_builder = client
.matrix_auth() .matrix_auth()
.login_username(&config.username, &config.password) .login_username(&config.username, &config.password)
.initial_device_display_name("Story Kit Bot"); .initial_device_display_name("Storkit Bot");
if let Some(ref device_id) = saved_device_id { if let Some(ref device_id) = saved_device_id {
login_builder = login_builder.device_id(device_id); login_builder = login_builder.device_id(device_id);
@@ -536,8 +377,9 @@ pub async fn run_bot(
pending_perm_replies: Arc::new(TokioMutex::new(HashMap::new())), pending_perm_replies: Arc::new(TokioMutex::new(HashMap::new())),
permission_timeout_secs: config.permission_timeout_secs, permission_timeout_secs: config.permission_timeout_secs,
bot_name, bot_name,
ambient_rooms: Arc::new(TokioMutex::new(persisted_ambient)), ambient_rooms: Arc::new(std::sync::Mutex::new(persisted_ambient)),
agents, agents,
htop_sessions: Arc::new(TokioMutex::new(HashMap::new())),
}; };
slog!("[matrix-bot] Cryptographic identity verification is always ON — commands from unencrypted rooms or unverified devices are rejected"); slog!("[matrix-bot] Cryptographic identity verification is always ON — commands from unencrypted rooms or unverified devices are rejected");
@@ -654,50 +496,6 @@ fn contains_word(haystack: &str, needle: &str) -> bool {
false false
} }
/// Parse an ambient-mode toggle command from a message body.
///
/// Recognises the following (case-insensitive) forms, with or without a
/// leading bot mention:
///
/// - `@botname ambient on` / `@botname:server ambient on`
/// - `botname ambient on`
/// - `ambient on`
///
/// and the `off` variants.
///
/// Returns `Some(true)` for "ambient on", `Some(false)` for "ambient off",
/// and `None` when the body is not an ambient mode command.
pub fn parse_ambient_command(
body: &str,
bot_user_id: &OwnedUserId,
bot_name: &str,
) -> Option<bool> {
let lower = body.trim().to_ascii_lowercase();
let display_lower = bot_name.to_ascii_lowercase();
let localpart_lower = bot_user_id.localpart().to_ascii_lowercase();
// Strip a leading @mention (handles "@localpart" and "@localpart:server").
let rest = if let Some(after_at) = lower.strip_prefix('@') {
// Skip everything up to the first whitespace (the full mention token).
let word_end = after_at
.find(char::is_whitespace)
.unwrap_or(after_at.len());
after_at[word_end..].trim()
} else if let Some(after) = lower.strip_prefix(display_lower.as_str()) {
after.trim()
} else if let Some(after) = lower.strip_prefix(localpart_lower.as_str()) {
after.trim()
} else {
lower.as_str()
};
match rest {
"ambient on" => Some(true),
"ambient off" => Some(false),
_ => None,
}
}
/// Returns `true` if the message's `relates_to` field references an event that /// Returns `true` if the message's `relates_to` field references an event that
/// the bot previously sent (i.e. the message is a reply or thread-reply to a /// the bot previously sent (i.e. the message is a reply or thread-reply to a
/// bot message). /// bot message).
@@ -906,7 +704,7 @@ async fn on_room_message(
// ambient mode is enabled for this room. // ambient mode is enabled for this room.
let is_addressed = mentions_bot(&body, formatted_body.as_deref(), &ctx.bot_user_id) let is_addressed = mentions_bot(&body, formatted_body.as_deref(), &ctx.bot_user_id)
|| is_reply_to_bot(ev.content.relates_to.as_ref(), &ctx.bot_sent_event_ids).await; || is_reply_to_bot(ev.content.relates_to.as_ref(), &ctx.bot_sent_event_ids).await;
let is_ambient = ctx.ambient_rooms.lock().await.contains(&incoming_room_id); let is_ambient = ctx.ambient_rooms.lock().unwrap().contains(&incoming_room_id);
if !is_addressed && !is_ambient { if !is_addressed && !is_ambient {
slog!( slog!(
@@ -977,57 +775,82 @@ async fn on_room_message(
} }
} }
// Check for ambient mode toggle commands. Commands are only recognised
// from addressed messages so they can't be accidentally triggered by
// ambient-mode traffic from other users.
let ambient_cmd = is_addressed
.then(|| parse_ambient_command(&body, &ctx.bot_user_id, &ctx.bot_name))
.flatten();
if let Some(enable) = ambient_cmd {
let ambient_room_ids: Vec<String> = {
let mut ambient = ctx.ambient_rooms.lock().await;
if enable {
ambient.insert(incoming_room_id.clone());
} else {
ambient.remove(&incoming_room_id);
}
ambient.iter().map(|r| r.to_string()).collect()
}; // lock released before the async send below
// Persist updated ambient rooms to bot.toml so the state survives restarts.
save_ambient_rooms(&ctx.project_root, &ambient_room_ids);
let confirmation = if enable {
"Ambient mode on. I'll respond to all messages in this room."
} else {
"Ambient mode off. I'll only respond when mentioned."
};
let html = markdown_to_html(confirmation);
if let Ok(resp) = room
.send(RoomMessageEventContent::text_html(confirmation, html))
.await
{
ctx.bot_sent_event_ids.lock().await.insert(resp.event_id);
}
slog!(
"[matrix-bot] Ambient mode {} for room {}",
if enable { "enabled" } else { "disabled" },
incoming_room_id
);
return;
}
let sender = ev.sender.to_string(); let sender = ev.sender.to_string();
let user_message = body; let user_message = body;
slog!("[matrix-bot] Message from {sender}: {user_message}"); slog!("[matrix-bot] Message from {sender}: {user_message}");
// Check for bot-level commands (e.g. "help") before invoking the LLM. // Check for bot-level commands (help, status, ambient, …) before invoking
if let Some(response) = super::commands::try_handle_command( // the LLM. All commands are registered in commands.rs — no special-casing
// needed here.
let dispatch = super::commands::CommandDispatch {
bot_name: &ctx.bot_name,
bot_user_id: ctx.bot_user_id.as_str(),
project_root: &ctx.project_root,
agents: &ctx.agents,
ambient_rooms: &ctx.ambient_rooms,
room_id: &incoming_room_id,
is_addressed,
};
if let Some(response) = super::commands::try_handle_command(&dispatch, &user_message) {
slog!("[matrix-bot] Handled bot command from {sender}");
let html = markdown_to_html(&response);
if let Ok(resp) = room
.send(RoomMessageEventContent::text_html(response, html))
.await
{
ctx.bot_sent_event_ids.lock().await.insert(resp.event_id);
}
return;
}
// Check for the htop command, which requires async Matrix access (Room)
// and cannot be handled by the sync command registry.
if let Some(htop_cmd) =
super::htop::extract_htop_command(&user_message, &ctx.bot_name, ctx.bot_user_id.as_str())
{
slog!("[matrix-bot] Handling htop command from {sender}: {htop_cmd:?}");
match htop_cmd {
super::htop::HtopCommand::Stop => {
super::htop::handle_htop_stop(&room, &incoming_room_id, &ctx.htop_sessions).await;
}
super::htop::HtopCommand::Start { duration_secs } => {
super::htop::handle_htop_start(
&room,
&incoming_room_id,
&ctx.htop_sessions,
Arc::clone(&ctx.agents),
duration_secs,
)
.await;
}
}
return;
}
// Check for the delete command, which requires async agent/worktree ops
// and cannot be handled by the sync command registry.
if let Some(del_cmd) = super::delete::extract_delete_command(
&user_message,
&ctx.bot_name, &ctx.bot_name,
ctx.bot_user_id.as_str(), ctx.bot_user_id.as_str(),
&user_message,
) { ) {
slog!("[matrix-bot] Handled bot command from {sender}"); let response = match del_cmd {
super::delete::DeleteCommand::Delete { story_number } => {
slog!(
"[matrix-bot] Handling delete command from {sender}: story {story_number}"
);
super::delete::handle_delete(
&ctx.bot_name,
&story_number,
&ctx.project_root,
&ctx.agents,
)
.await
}
super::delete::DeleteCommand::BadArgs => {
format!("Usage: `{} delete <number>`", ctx.bot_name)
}
};
let html = markdown_to_html(&response); let html = markdown_to_html(&response);
if let Ok(resp) = room if let Ok(resp) = room
.send(RoomMessageEventContent::text_html(response, html)) .send(RoomMessageEventContent::text_html(response, html))
@@ -1062,22 +885,6 @@ async fn handle_message(
sender: String, sender: String,
user_message: String, user_message: String,
) { ) {
// Handle built-in commands before invoking Claude.
if let Some(cmd) = extract_command(&user_message, &ctx.bot_name, &ctx.bot_user_id)
&& cmd == "status"
{
let project_root = ctx.project_root.clone();
let status_text = build_pipeline_status(&project_root, &ctx.agents);
let html = markdown_to_html(&status_text);
if let Ok(resp) = room
.send(RoomMessageEventContent::text_html(status_text, html))
.await
{
ctx.bot_sent_event_ids.lock().await.insert(resp.event_id);
}
return;
}
// Look up the room's existing Claude Code session ID (if any) so we can // Look up the room's existing Claude Code session ID (if any) so we can
// resume the conversation with structured API messages instead of // resume the conversation with structured API messages instead of
// flattening history into a text prefix. // flattening history into a text prefix.
@@ -1562,8 +1369,9 @@ mod tests {
pending_perm_replies: Arc::new(TokioMutex::new(HashMap::new())), pending_perm_replies: Arc::new(TokioMutex::new(HashMap::new())),
permission_timeout_secs: 120, permission_timeout_secs: 120,
bot_name: "Assistant".to_string(), bot_name: "Assistant".to_string(),
ambient_rooms: Arc::new(TokioMutex::new(HashSet::new())), ambient_rooms: Arc::new(std::sync::Mutex::new(HashSet::new())),
agents: Arc::new(AgentPool::new_test(3000)), agents: Arc::new(AgentPool::new_test(3000)),
htop_sessions: Arc::new(TokioMutex::new(HashMap::new())),
}; };
// Clone must work (required by Matrix SDK event handler injection). // Clone must work (required by Matrix SDK event handler injection).
let _cloned = ctx.clone(); let _cloned = ctx.clone();
@@ -2020,114 +1828,6 @@ mod tests {
assert_eq!(format_startup_announcement("Assistant"), "Assistant is online."); assert_eq!(format_startup_announcement("Assistant"), "Assistant is online.");
} }
// -- extract_command (status trigger) ------------------------------------
#[test]
fn extract_command_returns_status_for_bot_name_prefix() {
let uid = make_user_id("@assistant:example.com");
let result = extract_command("Assistant status", "Assistant", &uid);
assert_eq!(result.as_deref(), Some("status"));
}
#[test]
fn extract_command_returns_status_for_at_localpart_prefix() {
let uid = make_user_id("@assistant:example.com");
let result = extract_command("@assistant status", "Assistant", &uid);
assert_eq!(result.as_deref(), Some("status"));
}
#[test]
fn extract_command_returns_status_for_full_id_prefix() {
let uid = make_user_id("@assistant:example.com");
let result = extract_command("@assistant:example.com status", "Assistant", &uid);
assert_eq!(result.as_deref(), Some("status"));
}
#[test]
fn extract_command_returns_none_when_no_bot_mention() {
let uid = make_user_id("@assistant:example.com");
let result = extract_command("status", "Assistant", &uid);
assert!(result.is_none());
}
#[test]
fn extract_command_handles_punctuation_after_mention() {
let uid = make_user_id("@assistant:example.com");
let result = extract_command("@assistant: status", "Assistant", &uid);
assert_eq!(result.as_deref(), Some("status"));
}
// -- build_pipeline_status -----------------------------------------------
fn write_story_file(dir: &std::path::Path, stage: &str, filename: &str, name: &str) {
let stage_dir = dir.join(".story_kit").join("work").join(stage);
std::fs::create_dir_all(&stage_dir).unwrap();
let content = format!("---\nname: \"{name}\"\n---\n\n# {name}\n");
std::fs::write(stage_dir.join(filename), content).unwrap();
}
#[test]
fn build_pipeline_status_includes_all_stages() {
let dir = tempfile::tempdir().unwrap();
let pool = AgentPool::new_test(3001);
let out = build_pipeline_status(dir.path(), &pool);
assert!(out.contains("Upcoming"), "missing Upcoming: {out}");
assert!(out.contains("In Progress"), "missing In Progress: {out}");
assert!(out.contains("QA"), "missing QA: {out}");
assert!(out.contains("Merge"), "missing Merge: {out}");
assert!(out.contains("Done"), "missing Done: {out}");
}
#[test]
fn build_pipeline_status_shows_story_id_and_name() {
let dir = tempfile::tempdir().unwrap();
write_story_file(
dir.path(),
"1_upcoming",
"42_story_do_something.md",
"Do Something",
);
let pool = AgentPool::new_test(3001);
let out = build_pipeline_status(dir.path(), &pool);
assert!(
out.contains("42_story_do_something"),
"missing story id: {out}"
);
assert!(out.contains("Do Something"), "missing story name: {out}");
}
#[test]
fn build_pipeline_status_includes_free_agents_section() {
let dir = tempfile::tempdir().unwrap();
let pool = AgentPool::new_test(3001);
let out = build_pipeline_status(dir.path(), &pool);
assert!(out.contains("Free Agents"), "missing Free Agents section: {out}");
}
#[test]
fn build_pipeline_status_uses_markdown_bold_headings() {
let dir = tempfile::tempdir().unwrap();
let pool = AgentPool::new_test(3001);
let out = build_pipeline_status(dir.path(), &pool);
// Stages and headers should use markdown bold (**text**).
assert!(out.contains("**Pipeline Status**"), "missing bold title: {out}");
assert!(out.contains("**Upcoming**"), "stage should use bold: {out}");
}
#[test]
fn build_pipeline_status_shows_none_for_empty_stages() {
let dir = tempfile::tempdir().unwrap();
let pool = AgentPool::new_test(3001);
let out = build_pipeline_status(dir.path(), &pool);
// Empty stages show *(none)*
assert!(out.contains("*(none)*"), "expected none marker: {out}");
}
// -- bot_name / system prompt ------------------------------------------- // -- bot_name / system prompt -------------------------------------------
#[test] #[test]
@@ -2152,96 +1852,4 @@ mod tests {
assert_eq!(resolve_bot_name(Some("Timmy".to_string())), "Timmy"); assert_eq!(resolve_bot_name(Some("Timmy".to_string())), "Timmy");
} }
// -- parse_ambient_command ------------------------------------------------
#[test]
fn ambient_command_on_with_at_mention() {
let uid = make_user_id("@timmy:homeserver.local");
assert_eq!(parse_ambient_command("@timmy ambient on", &uid, "Timmy"), Some(true));
}
#[test]
fn ambient_command_off_with_at_mention() {
let uid = make_user_id("@timmy:homeserver.local");
assert_eq!(parse_ambient_command("@timmy ambient off", &uid, "Timmy"), Some(false));
}
#[test]
fn ambient_command_on_with_full_user_id() {
let uid = make_user_id("@timmy:homeserver.local");
assert_eq!(
parse_ambient_command("@timmy:homeserver.local ambient on", &uid, "Timmy"),
Some(true)
);
}
#[test]
fn ambient_command_on_with_display_name() {
let uid = make_user_id("@timmy:homeserver.local");
assert_eq!(parse_ambient_command("timmy ambient on", &uid, "Timmy"), Some(true));
}
#[test]
fn ambient_command_off_with_display_name() {
let uid = make_user_id("@timmy:homeserver.local");
assert_eq!(parse_ambient_command("timmy ambient off", &uid, "Timmy"), Some(false));
}
#[test]
fn ambient_command_on_bare() {
// "ambient on" without any bot mention is also recognised.
let uid = make_user_id("@timmy:homeserver.local");
assert_eq!(parse_ambient_command("ambient on", &uid, "Timmy"), Some(true));
}
#[test]
fn ambient_command_off_bare() {
let uid = make_user_id("@timmy:homeserver.local");
assert_eq!(parse_ambient_command("ambient off", &uid, "Timmy"), Some(false));
}
#[test]
fn ambient_command_case_insensitive() {
let uid = make_user_id("@timmy:homeserver.local");
assert_eq!(parse_ambient_command("@Timmy AMBIENT ON", &uid, "Timmy"), Some(true));
assert_eq!(parse_ambient_command("TIMMY AMBIENT OFF", &uid, "Timmy"), Some(false));
}
#[test]
fn ambient_command_unrelated_message_returns_none() {
let uid = make_user_id("@timmy:homeserver.local");
assert_eq!(parse_ambient_command("@timmy what is the status?", &uid, "Timmy"), None);
assert_eq!(parse_ambient_command("hello there", &uid, "Timmy"), None);
assert_eq!(parse_ambient_command("ambient", &uid, "Timmy"), None);
}
// -- ambient mode state ---------------------------------------------------
#[tokio::test]
async fn ambient_rooms_defaults_to_empty() {
let ambient_rooms: Arc<TokioMutex<HashSet<OwnedRoomId>>> =
Arc::new(TokioMutex::new(HashSet::new()));
let room_id: OwnedRoomId = "!room:example.com".parse().unwrap();
assert!(!ambient_rooms.lock().await.contains(&room_id));
}
#[tokio::test]
async fn ambient_mode_can_be_toggled_per_room() {
let ambient_rooms: Arc<TokioMutex<HashSet<OwnedRoomId>>> =
Arc::new(TokioMutex::new(HashSet::new()));
let room_a: OwnedRoomId = "!room_a:example.com".parse().unwrap();
let room_b: OwnedRoomId = "!room_b:example.com".parse().unwrap();
// Enable ambient mode for room_a only.
ambient_rooms.lock().await.insert(room_a.clone());
let guard = ambient_rooms.lock().await;
assert!(guard.contains(&room_a), "room_a should be in ambient mode");
assert!(!guard.contains(&room_b), "room_b should NOT be in ambient mode");
drop(guard);
// Disable ambient mode for room_a.
ambient_rooms.lock().await.remove(&room_a);
assert!(!ambient_rooms.lock().await.contains(&room_a), "room_a ambient mode should be off");
}
} }

File diff suppressed because it is too large Load Diff

362
server/src/matrix/delete.rs Normal file
View File

@@ -0,0 +1,362 @@
//! Delete command: remove a story/bug/spike from the pipeline.
//!
//! `{bot_name} delete {number}` finds the work item by number across all pipeline
//! stages, stops any running agent, removes the worktree, deletes the file, and
//! commits the change to git.
use crate::agents::{AgentPool, AgentStatus};
use std::path::Path;
/// A parsed delete command from a Matrix message body.
#[derive(Debug, PartialEq)]
pub enum DeleteCommand {
/// Delete the story with this number (digits only, e.g. `"42"`).
Delete { story_number: String },
/// The user typed `delete` but without a valid numeric argument.
BadArgs,
}
/// Parse a delete command from a raw Matrix message body.
///
/// Strips the bot mention prefix and checks whether the first word is `delete`.
/// Returns `None` when the message is not a delete command at all.
pub fn extract_delete_command(
message: &str,
bot_name: &str,
bot_user_id: &str,
) -> Option<DeleteCommand> {
let stripped = strip_mention(message, bot_name, bot_user_id);
let trimmed = stripped
.trim()
.trim_start_matches(|c: char| !c.is_alphanumeric());
let (cmd, args) = match trimmed.split_once(char::is_whitespace) {
Some((c, a)) => (c, a.trim()),
None => (trimmed, ""),
};
if !cmd.eq_ignore_ascii_case("delete") {
return None;
}
if !args.is_empty() && args.chars().all(|c| c.is_ascii_digit()) {
Some(DeleteCommand::Delete {
story_number: args.to_string(),
})
} else {
Some(DeleteCommand::BadArgs)
}
}
/// Handle a delete command asynchronously.
///
/// Finds the work item by `story_number` across all pipeline stages, stops any
/// running agent, removes the worktree, deletes the file, and commits to git.
/// Returns a markdown-formatted response string.
pub async fn handle_delete(
bot_name: &str,
story_number: &str,
project_root: &Path,
agents: &AgentPool,
) -> String {
const STAGES: &[&str] = &[
"1_backlog",
"2_current",
"3_qa",
"4_merge",
"5_done",
"6_archived",
];
// Find the story file across all pipeline stages.
let mut found: Option<(std::path::PathBuf, &str, String)> = None; // (path, stage, story_id)
'outer: for stage in STAGES {
let dir = project_root.join(".story_kit").join("work").join(stage);
if !dir.exists() {
continue;
}
if let Ok(entries) = std::fs::read_dir(&dir) {
for entry in entries.flatten() {
let path = entry.path();
if path.extension().and_then(|e| e.to_str()) != Some("md") {
continue;
}
if let Some(stem) = path
.file_stem()
.and_then(|s| s.to_str())
.map(|s| s.to_string())
{
let file_num = stem
.split('_')
.next()
.filter(|s| !s.is_empty() && s.chars().all(|c| c.is_ascii_digit()))
.unwrap_or("")
.to_string();
if file_num == story_number {
found = Some((path, stage, stem));
break 'outer;
}
}
}
}
}
let (path, stage, story_id) = match found {
Some(f) => f,
None => {
return format!(
"No story, bug, or spike with number **{story_number}** found."
);
}
};
// Read the human-readable name from front matter for the confirmation message.
let story_name = std::fs::read_to_string(&path)
.ok()
.and_then(|contents| {
crate::io::story_metadata::parse_front_matter(&contents)
.ok()
.and_then(|m| m.name)
})
.unwrap_or_else(|| story_id.clone());
// Stop any running or pending agents for this story.
let running_agents: Vec<(String, String)> = agents
.list_agents()
.unwrap_or_default()
.into_iter()
.filter(|a| {
a.story_id == story_id
&& matches!(a.status, AgentStatus::Running | AgentStatus::Pending)
})
.map(|a| (a.story_id.clone(), a.agent_name.clone()))
.collect();
let mut stopped_agents: Vec<String> = Vec::new();
for (sid, agent_name) in &running_agents {
if let Err(e) = agents.stop_agent(project_root, sid, agent_name).await {
return format!(
"Failed to stop agent '{agent_name}' for story {story_number}: {e}"
);
}
stopped_agents.push(agent_name.clone());
}
// Remove the worktree if one exists (best-effort; ignore errors).
let _ = crate::worktree::prune_worktree_sync(project_root, &story_id);
// Delete the story file.
if let Err(e) = std::fs::remove_file(&path) {
return format!("Failed to delete story {story_number}: {e}");
}
// Commit the deletion to git.
let commit_msg = format!("story-kit: delete {story_id}");
let work_rel = std::path::PathBuf::from(".story_kit").join("work");
let _ = std::process::Command::new("git")
.args(["add", "-A"])
.arg(&work_rel)
.current_dir(project_root)
.output();
let _ = std::process::Command::new("git")
.args(["commit", "-m", &commit_msg])
.current_dir(project_root)
.output();
// Build the response.
let stage_label = stage_display_name(stage);
let mut response = format!("Deleted **{story_name}** from **{stage_label}**.");
if !stopped_agents.is_empty() {
let agent_list = stopped_agents.join(", ");
response.push_str(&format!(" Stopped agent(s): {agent_list}."));
}
crate::slog!(
"[matrix-bot] delete command: removed {story_id} from {stage} (bot={bot_name})"
);
response
}
/// Human-readable label for a pipeline stage directory name.
fn stage_display_name(stage: &str) -> &str {
match stage {
"1_backlog" => "backlog",
"2_current" => "in-progress",
"3_qa" => "QA",
"4_merge" => "merge",
"5_done" => "done",
"6_archived" => "archived",
other => other,
}
}
/// Strip the bot mention prefix from a raw Matrix message body.
///
/// Mirrors the logic in `commands::strip_bot_mention` and `htop::strip_mention`
/// so delete detection works without depending on private symbols.
fn strip_mention<'a>(message: &'a str, bot_name: &str, bot_user_id: &str) -> &'a str {
let trimmed = message.trim();
if let Some(rest) = strip_prefix_ci(trimmed, bot_user_id) {
return rest;
}
if let Some(localpart) = bot_user_id.split(':').next()
&& let Some(rest) = strip_prefix_ci(trimmed, localpart)
{
return rest;
}
if let Some(rest) = strip_prefix_ci(trimmed, bot_name) {
return rest;
}
trimmed
}
fn strip_prefix_ci<'a>(text: &'a str, prefix: &str) -> Option<&'a str> {
if text.len() < prefix.len() {
return None;
}
if !text[..prefix.len()].eq_ignore_ascii_case(prefix) {
return None;
}
let rest = &text[prefix.len()..];
match rest.chars().next() {
None => Some(rest),
Some(c) if c.is_alphanumeric() || c == '-' || c == '_' => None,
_ => Some(rest),
}
}
// ---------------------------------------------------------------------------
// Tests
// ---------------------------------------------------------------------------
#[cfg(test)]
mod tests {
use super::*;
// -- extract_delete_command ---------------------------------------------
#[test]
fn extract_with_full_user_id() {
let cmd =
extract_delete_command("@timmy:home.local delete 42", "Timmy", "@timmy:home.local");
assert_eq!(cmd, Some(DeleteCommand::Delete { story_number: "42".to_string() }));
}
#[test]
fn extract_with_display_name() {
let cmd = extract_delete_command("Timmy delete 310", "Timmy", "@timmy:home.local");
assert_eq!(cmd, Some(DeleteCommand::Delete { story_number: "310".to_string() }));
}
#[test]
fn extract_with_localpart() {
let cmd = extract_delete_command("@timmy delete 7", "Timmy", "@timmy:home.local");
assert_eq!(cmd, Some(DeleteCommand::Delete { story_number: "7".to_string() }));
}
#[test]
fn extract_case_insensitive_command() {
let cmd = extract_delete_command("Timmy DELETE 99", "Timmy", "@timmy:home.local");
assert_eq!(cmd, Some(DeleteCommand::Delete { story_number: "99".to_string() }));
}
#[test]
fn extract_no_args_is_bad_args() {
let cmd = extract_delete_command("Timmy delete", "Timmy", "@timmy:home.local");
assert_eq!(cmd, Some(DeleteCommand::BadArgs));
}
#[test]
fn extract_non_numeric_arg_is_bad_args() {
let cmd = extract_delete_command("Timmy delete foo", "Timmy", "@timmy:home.local");
assert_eq!(cmd, Some(DeleteCommand::BadArgs));
}
#[test]
fn extract_non_delete_command_returns_none() {
let cmd = extract_delete_command("Timmy help", "Timmy", "@timmy:home.local");
assert_eq!(cmd, None);
}
#[test]
fn extract_no_bot_prefix_returns_none() {
let cmd = extract_delete_command("delete 42", "Timmy", "@timmy:home.local");
// Without mention prefix the raw text is "delete 42" — cmd is "delete", args "42"
// strip_mention returns the full trimmed text when no prefix matches,
// so this is a valid delete command addressed to no-one (ambient mode).
assert_eq!(cmd, Some(DeleteCommand::Delete { story_number: "42".to_string() }));
}
// -- handle_delete (integration-style, uses temp filesystem) -----------
#[tokio::test]
async fn handle_delete_returns_not_found_for_unknown_number() {
let tmp = tempfile::tempdir().unwrap();
let project_root = tmp.path();
// Create the pipeline directories.
for stage in &["1_backlog", "2_current", "3_qa", "4_merge", "5_done", "6_archived"] {
std::fs::create_dir_all(project_root.join(".story_kit").join("work").join(stage))
.unwrap();
}
let agents = std::sync::Arc::new(crate::agents::AgentPool::new_test(3000));
let response = handle_delete("Timmy", "999", project_root, &agents).await;
assert!(
response.contains("No story") && response.contains("999"),
"unexpected response: {response}"
);
}
#[tokio::test]
async fn handle_delete_removes_story_file_and_confirms() {
let tmp = tempfile::tempdir().unwrap();
let project_root = tmp.path();
// Init a bare git repo so the commit step doesn't fail fatally.
std::process::Command::new("git")
.args(["init"])
.current_dir(project_root)
.output()
.unwrap();
std::process::Command::new("git")
.args(["config", "user.email", "test@test.com"])
.current_dir(project_root)
.output()
.unwrap();
std::process::Command::new("git")
.args(["config", "user.name", "Test"])
.current_dir(project_root)
.output()
.unwrap();
let backlog_dir = project_root.join(".story_kit").join("work").join("1_backlog");
std::fs::create_dir_all(&backlog_dir).unwrap();
let story_path = backlog_dir.join("42_story_some_feature.md");
std::fs::write(
&story_path,
"---\nname: Some Feature\n---\n\n# Story 42\n",
)
.unwrap();
// Initial commit so git doesn't complain about no commits.
std::process::Command::new("git")
.args(["add", "-A"])
.current_dir(project_root)
.output()
.unwrap();
std::process::Command::new("git")
.args(["commit", "-m", "init"])
.current_dir(project_root)
.output()
.unwrap();
let agents = std::sync::Arc::new(crate::agents::AgentPool::new_test(3000));
let response = handle_delete("Timmy", "42", project_root, &agents).await;
assert!(
response.contains("Some Feature") && response.contains("backlog"),
"unexpected response: {response}"
);
assert!(!story_path.exists(), "story file should have been deleted");
}
}

572
server/src/matrix/htop.rs Normal file
View File

@@ -0,0 +1,572 @@
//! htop command: live-updating system and agent process dashboard.
//!
//! Sends an initial message to a Matrix room showing load average and
//! per-agent process info, then edits it in-place every 5 seconds using
//! Matrix replacement events. A single htop session per room is enforced;
//! a new `htop` invocation stops any existing session and starts a fresh one.
//! Sessions auto-stop after the configured duration (default 5 minutes).
use std::collections::HashMap;
use std::sync::Arc;
use std::time::Duration;
use matrix_sdk::room::Room;
use matrix_sdk::ruma::OwnedEventId;
use matrix_sdk::ruma::events::room::message::{
ReplacementMetadata, RoomMessageEventContent, RoomMessageEventContentWithoutRelation,
};
use tokio::sync::{Mutex as TokioMutex, watch};
use crate::agents::{AgentPool, AgentStatus};
use crate::slog;
use super::bot::markdown_to_html;
/// A parsed htop command from a Matrix message body.
#[derive(Debug, PartialEq)]
pub enum HtopCommand {
/// Start (or restart) monitoring. `duration_secs` is the auto-stop
/// timeout; defaults to 300 (5 minutes).
Start { duration_secs: u64 },
/// Stop any active monitoring session for the room.
Stop,
}
/// Per-room htop session: holds the stop-signal sender so callers can cancel.
pub struct HtopSession {
/// Send `true` to request a graceful stop of the background loop.
pub stop_tx: watch::Sender<bool>,
}
/// Per-room htop session map type alias.
pub type HtopSessions = Arc<TokioMutex<HashMap<matrix_sdk::ruma::OwnedRoomId, HtopSession>>>;
/// Parse an htop command from a raw Matrix message body.
///
/// Strips the bot mention prefix and checks whether the first word is `htop`.
/// Returns `None` when the message is not an htop command.
///
/// Recognised forms (after stripping the bot mention):
/// - `htop` → `Start { duration_secs: 300 }`
/// - `htop stop` → `Stop`
/// - `htop 10m` → `Start { duration_secs: 600 }`
/// - `htop 120` → `Start { duration_secs: 120 }` (bare seconds)
pub fn extract_htop_command(message: &str, bot_name: &str, bot_user_id: &str) -> Option<HtopCommand> {
let stripped = strip_mention(message, bot_name, bot_user_id);
let trimmed = stripped.trim();
// Strip leading punctuation (e.g. the comma in "@timmy, htop")
let trimmed = trimmed.trim_start_matches(|c: char| !c.is_alphanumeric());
let (cmd, args) = match trimmed.split_once(char::is_whitespace) {
Some((c, a)) => (c, a.trim()),
None => (trimmed, ""),
};
if !cmd.eq_ignore_ascii_case("htop") {
return None;
}
if args.eq_ignore_ascii_case("stop") {
return Some(HtopCommand::Stop);
}
let duration_secs = parse_duration(args).unwrap_or(300);
Some(HtopCommand::Start { duration_secs })
}
/// Parse an optional duration argument.
///
/// Accepts `""` (empty → `None`), `"5m"` / `"10M"` (minutes), or a bare
/// integer interpreted as seconds.
fn parse_duration(s: &str) -> Option<u64> {
if s.is_empty() {
return None;
}
if let Some(mins_str) = s.strip_suffix('m').or_else(|| s.strip_suffix('M')) {
return mins_str.parse::<u64>().ok().map(|m| m * 60);
}
s.parse::<u64>().ok()
}
/// Strip the bot mention prefix from a raw Matrix message body.
///
/// Mirrors the logic in `commands::strip_bot_mention` so htop detection works
/// without depending on private symbols in that module.
fn strip_mention<'a>(message: &'a str, bot_name: &str, bot_user_id: &str) -> &'a str {
let trimmed = message.trim();
if let Some(rest) = strip_prefix_ci(trimmed, bot_user_id) {
return rest;
}
if let Some(localpart) = bot_user_id.split(':').next()
&& let Some(rest) = strip_prefix_ci(trimmed, localpart)
{
return rest;
}
if let Some(rest) = strip_prefix_ci(trimmed, bot_name) {
return rest;
}
trimmed
}
fn strip_prefix_ci<'a>(text: &'a str, prefix: &str) -> Option<&'a str> {
if text.len() < prefix.len() {
return None;
}
if !text[..prefix.len()].eq_ignore_ascii_case(prefix) {
return None;
}
let rest = &text[prefix.len()..];
match rest.chars().next() {
None => Some(rest),
Some(c) if c.is_alphanumeric() || c == '-' || c == '_' => None,
_ => Some(rest),
}
}
// ---------------------------------------------------------------------------
// System stats
// ---------------------------------------------------------------------------
/// Read the system load average using the `uptime` command.
///
/// Returns a short string like `"load average: 1.23, 0.98, 0.75"` on success,
/// or `"load: unknown"` on failure.
fn get_load_average() -> String {
let output = std::process::Command::new("uptime")
.output()
.ok()
.and_then(|o| String::from_utf8(o.stdout).ok())
.unwrap_or_default();
// uptime output typically contains "load average: X, Y, Z" (Linux/macOS)
// or "load averages: X Y Z" (some BSD variants).
if let Some(idx) = output.find("load average") {
output[idx..].trim().trim_end_matches('\n').to_string()
} else {
"load: unknown".to_string()
}
}
/// Process stats for a single agent, gathered from `ps`.
#[derive(Debug, Default)]
struct AgentProcessStats {
cpu_pct: f64,
mem_pct: f64,
num_procs: usize,
}
/// Gather CPU% and MEM% for processes whose command line contains `worktree_path`.
///
/// Runs `ps aux` and sums all matching lines. Returns `None` when no
/// matching process is found.
fn gather_process_stats(worktree_path: &str) -> Option<AgentProcessStats> {
let output = std::process::Command::new("ps")
.args(["aux"])
.output()
.ok()
.and_then(|o| String::from_utf8(o.stdout).ok())?;
let mut stats = AgentProcessStats::default();
for line in output.lines().skip(1) {
// Avoid matching against our own status display (the ps command itself)
if !line.contains(worktree_path) {
continue;
}
let parts: Vec<&str> = line.split_whitespace().collect();
// ps aux columns: USER PID %CPU %MEM VSZ RSS TTY STAT START TIME COMMAND...
if parts.len() >= 4
&& let (Ok(cpu), Ok(mem)) = (parts[2].parse::<f64>(), parts[3].parse::<f64>())
{
stats.cpu_pct += cpu;
stats.mem_pct += mem;
stats.num_procs += 1;
}
}
if stats.num_procs > 0 {
Some(stats)
} else {
None
}
}
// ---------------------------------------------------------------------------
// Message formatting
// ---------------------------------------------------------------------------
/// Build the Markdown text for the htop dashboard.
///
/// `tick` is the number of updates sent so far (0 = initial).
/// `total_duration_secs` is the configured auto-stop timeout.
pub fn build_htop_message(agents: &AgentPool, tick: u32, total_duration_secs: u64) -> String {
let elapsed_secs = (tick as u64) * 5;
let remaining_secs = total_duration_secs.saturating_sub(elapsed_secs);
let remaining_mins = remaining_secs / 60;
let remaining_secs_rem = remaining_secs % 60;
let load = get_load_average();
let mut lines = vec![
format!("**htop** — {load}"),
format!(
"*Updates every 5s · auto-stops in {}m{}s · send `htop stop` to stop*",
remaining_mins, remaining_secs_rem
),
String::new(),
];
let all_agents = agents.list_agents().unwrap_or_default();
let active: Vec<_> = all_agents
.iter()
.filter(|a| matches!(a.status, AgentStatus::Running | AgentStatus::Pending))
.collect();
if active.is_empty() {
lines.push("*No agents currently running.*".to_string());
} else {
lines.push("| Agent | Story | CPU% | MEM% | Procs |".to_string());
lines.push("|-------|-------|-----:|-----:|------:|".to_string());
for agent in &active {
let story_label = agent
.story_id
.split('_')
.next()
.unwrap_or(&agent.story_id)
.to_string();
let stats = agent
.worktree_path
.as_deref()
.and_then(gather_process_stats)
.unwrap_or_default();
lines.push(format!(
"| {} | {} | {:.1} | {:.1} | {} |",
agent.agent_name,
story_label,
stats.cpu_pct,
stats.mem_pct,
stats.num_procs,
));
}
}
lines.join("\n")
}
// ---------------------------------------------------------------------------
// Matrix replacement helper
// ---------------------------------------------------------------------------
/// Edit an existing Matrix message by sending a replacement event.
///
/// Uses `RoomMessageEventContentWithoutRelation::make_replacement` with
/// `ReplacementMetadata` so the replacement carries the original event ID.
async fn send_replacement(
room: &Room,
original_event_id: &OwnedEventId,
plain: &str,
html: &str,
) -> Result<(), String> {
let new_content =
RoomMessageEventContentWithoutRelation::text_html(plain.to_string(), html.to_string());
let metadata = ReplacementMetadata::new(original_event_id.clone(), None);
let content = new_content.make_replacement(metadata);
room.send(content)
.await
.map(|_| ())
.map_err(|e| format!("Matrix send error: {e}"))
}
// ---------------------------------------------------------------------------
// Background monitoring loop
// ---------------------------------------------------------------------------
/// Run the htop background loop: update the message every 5 seconds until
/// the stop signal is received or the timeout expires.
pub async fn run_htop_loop(
room: Room,
initial_event_id: OwnedEventId,
agents: Arc<AgentPool>,
mut stop_rx: watch::Receiver<bool>,
duration_secs: u64,
) {
let interval_secs: u64 = 5;
let max_ticks = (duration_secs / interval_secs).max(1);
for tick in 1..=max_ticks {
// Wait for the interval or a stop signal.
let sleep = tokio::time::sleep(Duration::from_secs(interval_secs));
tokio::pin!(sleep);
tokio::select! {
_ = &mut sleep => {}
Ok(()) = stop_rx.changed() => {
if *stop_rx.borrow() {
send_stopped_message(&room, &initial_event_id).await;
return;
}
}
}
// Re-check after waking — the sender might have signalled while we slept.
if *stop_rx.borrow() {
send_stopped_message(&room, &initial_event_id).await;
return;
}
let text = build_htop_message(&agents, tick as u32, duration_secs);
let html = markdown_to_html(&text);
if let Err(e) = send_replacement(&room, &initial_event_id, &text, &html).await {
slog!("[htop] Failed to update message: {e}");
return;
}
}
// Auto-stop: timeout reached.
send_stopped_message(&room, &initial_event_id).await;
}
async fn send_stopped_message(room: &Room, event_id: &OwnedEventId) {
let text = "**htop** — monitoring stopped.";
let html = markdown_to_html(text);
if let Err(e) = send_replacement(room, event_id, text, &html).await {
slog!("[htop] Failed to send stop message: {e}");
}
}
// ---------------------------------------------------------------------------
// Public command handlers (called from on_room_message in bot.rs)
// ---------------------------------------------------------------------------
/// Start a new htop monitoring session for `room_id`.
///
/// Stops any existing session for the room, sends the initial dashboard
/// message, and spawns a background task that edits it every 5 seconds.
pub async fn handle_htop_start(
room: &Room,
room_id: &matrix_sdk::ruma::OwnedRoomId,
htop_sessions: &HtopSessions,
agents: Arc<AgentPool>,
duration_secs: u64,
) {
// Stop any existing session (best-effort; ignore errors if already done).
stop_existing_session(htop_sessions, room_id).await;
// Send the initial message.
let initial_text = build_htop_message(&agents, 0, duration_secs);
let initial_html = markdown_to_html(&initial_text);
let send_result = room
.send(RoomMessageEventContent::text_html(
initial_text,
initial_html,
))
.await;
let event_id = match send_result {
Ok(r) => r.event_id,
Err(e) => {
slog!("[htop] Failed to send initial message: {e}");
return;
}
};
// Create the stop channel and register the session.
let (stop_tx, stop_rx) = watch::channel(false);
{
let mut sessions = htop_sessions.lock().await;
sessions.insert(room_id.clone(), HtopSession { stop_tx });
}
// Spawn the background update loop.
let room_clone = room.clone();
let sessions_clone = Arc::clone(htop_sessions);
let room_id_clone = room_id.clone();
tokio::spawn(async move {
run_htop_loop(room_clone, event_id, agents, stop_rx, duration_secs).await;
// Clean up the session entry when the loop exits naturally.
let mut sessions = sessions_clone.lock().await;
sessions.remove(&room_id_clone);
});
}
/// Stop the active htop session for `room_id`, if any.
///
/// When there is no active session, sends a "no active session" reply
/// to the room so the user knows the command was received.
pub async fn handle_htop_stop(
room: &Room,
room_id: &matrix_sdk::ruma::OwnedRoomId,
htop_sessions: &HtopSessions,
) {
let had_session = stop_existing_session(htop_sessions, room_id).await;
if !had_session {
let msg = "No active htop session in this room.";
let html = markdown_to_html(msg);
if let Err(e) = room
.send(RoomMessageEventContent::text_html(msg, html))
.await
{
slog!("[htop] Failed to send no-session reply: {e}");
}
}
// When a session was active, the background task handles the final edit.
}
/// Signal and remove the existing session for `room_id`.
///
/// Returns `true` if a session was found and stopped.
async fn stop_existing_session(
htop_sessions: &HtopSessions,
room_id: &matrix_sdk::ruma::OwnedRoomId,
) -> bool {
let mut sessions = htop_sessions.lock().await;
if let Some(session) = sessions.remove(room_id) {
// Signal the background task to stop (ignore error — task may be done).
let _ = session.stop_tx.send(true);
true
} else {
false
}
}
// ---------------------------------------------------------------------------
// Tests
// ---------------------------------------------------------------------------
#[cfg(test)]
mod tests {
use super::*;
// -- extract_htop_command -----------------------------------------------
#[test]
fn htop_bare_command() {
let cmd = extract_htop_command("@timmy htop", "Timmy", "@timmy:homeserver.local");
assert_eq!(cmd, Some(HtopCommand::Start { duration_secs: 300 }));
}
#[test]
fn htop_with_display_name() {
let cmd = extract_htop_command("Timmy htop", "Timmy", "@timmy:homeserver.local");
assert_eq!(cmd, Some(HtopCommand::Start { duration_secs: 300 }));
}
#[test]
fn htop_stop() {
let cmd = extract_htop_command("@timmy htop stop", "Timmy", "@timmy:homeserver.local");
assert_eq!(cmd, Some(HtopCommand::Stop));
}
#[test]
fn htop_duration_minutes() {
let cmd = extract_htop_command("@timmy htop 10m", "Timmy", "@timmy:homeserver.local");
assert_eq!(cmd, Some(HtopCommand::Start { duration_secs: 600 }));
}
#[test]
fn htop_duration_uppercase_m() {
let cmd = extract_htop_command("@timmy htop 2M", "Timmy", "@timmy:homeserver.local");
assert_eq!(cmd, Some(HtopCommand::Start { duration_secs: 120 }));
}
#[test]
fn htop_duration_seconds() {
let cmd = extract_htop_command("@timmy htop 90", "Timmy", "@timmy:homeserver.local");
assert_eq!(cmd, Some(HtopCommand::Start { duration_secs: 90 }));
}
#[test]
fn non_htop_command_returns_none() {
let cmd = extract_htop_command("@timmy status", "Timmy", "@timmy:homeserver.local");
assert!(cmd.is_none());
}
#[test]
fn unrelated_message_returns_none() {
let cmd = extract_htop_command("hello world", "Timmy", "@timmy:homeserver.local");
assert!(cmd.is_none());
}
#[test]
fn htop_case_insensitive() {
let cmd = extract_htop_command("@timmy HTOP", "Timmy", "@timmy:homeserver.local");
assert_eq!(cmd, Some(HtopCommand::Start { duration_secs: 300 }));
}
#[test]
fn htop_full_user_id() {
let cmd = extract_htop_command(
"@timmy:homeserver.local htop",
"Timmy",
"@timmy:homeserver.local",
);
assert_eq!(cmd, Some(HtopCommand::Start { duration_secs: 300 }));
}
#[test]
fn htop_with_comma_after_mention() {
// Some Matrix clients format mentions as "@timmy, htop"
let cmd = extract_htop_command("@timmy, htop", "Timmy", "@timmy:homeserver.local");
assert_eq!(cmd, Some(HtopCommand::Start { duration_secs: 300 }));
}
// -- parse_duration -----------------------------------------------------
#[test]
fn parse_duration_empty_returns_none() {
assert_eq!(parse_duration(""), None);
}
#[test]
fn parse_duration_minutes() {
assert_eq!(parse_duration("5m"), Some(300));
}
#[test]
fn parse_duration_seconds() {
assert_eq!(parse_duration("120"), Some(120));
}
#[test]
fn parse_duration_invalid_returns_none() {
assert_eq!(parse_duration("abc"), None);
}
// -- build_htop_message -------------------------------------------------
#[test]
fn build_htop_message_no_agents() {
let pool = Arc::new(crate::agents::AgentPool::new_test(3000));
let text = build_htop_message(&pool, 0, 300);
assert!(text.contains("htop"), "should mention htop: {text}");
assert!(
text.contains("No agents currently running"),
"should note no agents: {text}"
);
}
#[test]
fn build_htop_message_contains_load() {
let pool = Arc::new(crate::agents::AgentPool::new_test(3000));
let text = build_htop_message(&pool, 0, 300);
// Load average is gathered via `uptime`; it should appear in some form.
assert!(
text.contains("load"),
"message should contain load info: {text}"
);
}
#[test]
fn build_htop_message_shows_remaining_time() {
let pool = Arc::new(crate::agents::AgentPool::new_test(3000));
let text = build_htop_message(&pool, 0, 300);
assert!(
text.contains("auto-stops in"),
"should show remaining time: {text}"
);
}
}

View File

@@ -18,6 +18,8 @@
mod bot; mod bot;
pub mod commands; pub mod commands;
mod config; mod config;
pub mod delete;
pub mod htop;
pub mod notifications; pub mod notifications;
pub use config::BotConfig; pub use config::BotConfig;

View File

@@ -219,6 +219,19 @@ pub async fn remove_worktree_by_story_id(
} }
/// List all worktrees under `{project_root}/.story_kit/worktrees/`. /// List all worktrees under `{project_root}/.story_kit/worktrees/`.
/// Find the worktree path for a given story ID, if it exists.
pub fn find_worktree_path(project_root: &Path, story_id: &str) -> Option<PathBuf> {
let wt_path = project_root
.join(".story_kit")
.join("worktrees")
.join(story_id);
if wt_path.is_dir() {
Some(wt_path)
} else {
None
}
}
pub fn list_worktrees(project_root: &Path) -> Result<Vec<WorktreeListEntry>, String> { pub fn list_worktrees(project_root: &Path) -> Result<Vec<WorktreeListEntry>, String> {
let worktrees_dir = project_root.join(".story_kit").join("worktrees"); let worktrees_dir = project_root.join(".story_kit").join("worktrees");
if !worktrees_dir.exists() { if !worktrees_dir.exists() {
@@ -507,6 +520,10 @@ mod tests {
component: vec![], component: vec![],
agent: vec![], agent: vec![],
watcher: WatcherConfig::default(), watcher: WatcherConfig::default(),
default_qa: "server".to_string(),
default_coder_model: None,
max_coders: None,
max_retries: 2,
}; };
// Should complete without panic // Should complete without panic
run_setup_commands(tmp.path(), &config).await; run_setup_commands(tmp.path(), &config).await;
@@ -524,6 +541,10 @@ mod tests {
}], }],
agent: vec![], agent: vec![],
watcher: WatcherConfig::default(), watcher: WatcherConfig::default(),
default_qa: "server".to_string(),
default_coder_model: None,
max_coders: None,
max_retries: 2,
}; };
// Should complete without panic // Should complete without panic
run_setup_commands(tmp.path(), &config).await; run_setup_commands(tmp.path(), &config).await;
@@ -541,6 +562,10 @@ mod tests {
}], }],
agent: vec![], agent: vec![],
watcher: WatcherConfig::default(), watcher: WatcherConfig::default(),
default_qa: "server".to_string(),
default_coder_model: None,
max_coders: None,
max_retries: 2,
}; };
// Setup command failures are non-fatal — should not panic or propagate // Setup command failures are non-fatal — should not panic or propagate
run_setup_commands(tmp.path(), &config).await; run_setup_commands(tmp.path(), &config).await;
@@ -558,6 +583,10 @@ mod tests {
}], }],
agent: vec![], agent: vec![],
watcher: WatcherConfig::default(), watcher: WatcherConfig::default(),
default_qa: "server".to_string(),
default_coder_model: None,
max_coders: None,
max_retries: 2,
}; };
// Teardown failures are best-effort — should not propagate // Teardown failures are best-effort — should not propagate
assert!(run_teardown_commands(tmp.path(), &config).await.is_ok()); assert!(run_teardown_commands(tmp.path(), &config).await.is_ok());
@@ -574,6 +603,10 @@ mod tests {
component: vec![], component: vec![],
agent: vec![], agent: vec![],
watcher: WatcherConfig::default(), watcher: WatcherConfig::default(),
default_qa: "server".to_string(),
default_coder_model: None,
max_coders: None,
max_retries: 2,
}; };
let info = create_worktree(&project_root, "42_fresh_test", &config, 3001) let info = create_worktree(&project_root, "42_fresh_test", &config, 3001)
.await .await
@@ -597,6 +630,10 @@ mod tests {
component: vec![], component: vec![],
agent: vec![], agent: vec![],
watcher: WatcherConfig::default(), watcher: WatcherConfig::default(),
default_qa: "server".to_string(),
default_coder_model: None,
max_coders: None,
max_retries: 2,
}; };
// First creation // First creation
let _info1 = create_worktree(&project_root, "43_reuse_test", &config, 3001) let _info1 = create_worktree(&project_root, "43_reuse_test", &config, 3001)
@@ -636,6 +673,10 @@ mod tests {
component: vec![], component: vec![],
agent: vec![], agent: vec![],
watcher: WatcherConfig::default(), watcher: WatcherConfig::default(),
default_qa: "server".to_string(),
default_coder_model: None,
max_coders: None,
max_retries: 2,
}; };
let result = remove_worktree_by_story_id(tmp.path(), "99_nonexistent", &config).await; let result = remove_worktree_by_story_id(tmp.path(), "99_nonexistent", &config).await;
@@ -658,6 +699,10 @@ mod tests {
component: vec![], component: vec![],
agent: vec![], agent: vec![],
watcher: WatcherConfig::default(), watcher: WatcherConfig::default(),
default_qa: "server".to_string(),
default_coder_model: None,
max_coders: None,
max_retries: 2,
}; };
create_worktree(&project_root, "88_remove_by_id", &config, 3001) create_worktree(&project_root, "88_remove_by_id", &config, 3001)
.await .await
@@ -711,6 +756,10 @@ mod tests {
}], }],
agent: vec![], agent: vec![],
watcher: WatcherConfig::default(), watcher: WatcherConfig::default(),
default_qa: "server".to_string(),
default_coder_model: None,
max_coders: None,
max_retries: 2,
}; };
// Even though setup commands fail, create_worktree must succeed // Even though setup commands fail, create_worktree must succeed
// so the agent can start and fix the problem itself. // so the agent can start and fix the problem itself.
@@ -736,6 +785,10 @@ mod tests {
component: vec![], component: vec![],
agent: vec![], agent: vec![],
watcher: WatcherConfig::default(), watcher: WatcherConfig::default(),
default_qa: "server".to_string(),
default_coder_model: None,
max_coders: None,
max_retries: 2,
}; };
// First creation — no setup commands, should succeed // First creation — no setup commands, should succeed
create_worktree(&project_root, "173_reuse_fail", &empty_config, 3001) create_worktree(&project_root, "173_reuse_fail", &empty_config, 3001)
@@ -751,6 +804,10 @@ mod tests {
}], }],
agent: vec![], agent: vec![],
watcher: WatcherConfig::default(), watcher: WatcherConfig::default(),
default_qa: "server".to_string(),
default_coder_model: None,
max_coders: None,
max_retries: 2,
}; };
// Second call — worktree exists, setup commands fail, must still succeed // Second call — worktree exists, setup commands fail, must still succeed
let result = let result =
@@ -773,6 +830,10 @@ mod tests {
component: vec![], component: vec![],
agent: vec![], agent: vec![],
watcher: WatcherConfig::default(), watcher: WatcherConfig::default(),
default_qa: "server".to_string(),
default_coder_model: None,
max_coders: None,
max_retries: 2,
}; };
let info = create_worktree(&project_root, "77_remove_async", &config, 3001) let info = create_worktree(&project_root, "77_remove_async", &config, 3001)
.await .await