Compare commits
343 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 358f177584 | |||
| b60bb57aa4 | |||
| 7003fca873 | |||
| b5d825356e | |||
| 896eb4fc52 | |||
| f8d7438eec | |||
| f7f4e8f95b | |||
| af76910f36 | |||
| f06111f045 | |||
| c6020b7f43 | |||
| 488b798275 | |||
| 0df19967ca | |||
| 6e04015676 | |||
| acaf9477a1 | |||
| 46a89d481a | |||
| c51428414e | |||
| 50405800c6 | |||
| 4aca056bc9 | |||
| 5e725340b4 | |||
| 3fa2064e3e | |||
| 16f9722851 | |||
| 5f0680c6c1 | |||
| 57e0197d75 | |||
| dc4bac3a85 | |||
| f16545ec36 | |||
| d132ed8e64 | |||
| 2a633d604a | |||
| 6a44c0b8ee | |||
| 3f97e34f21 | |||
| 49a8a23d75 | |||
| 1358a32476 | |||
| 9b79160c95 | |||
| 0cbe99677f | |||
| 46b1609528 | |||
| 2b0b08ceda | |||
| 19cc684433 | |||
| fecb157291 | |||
| ac84e7240e | |||
| d5d82bdb00 | |||
| f10edd6718 | |||
| 3f6cd55833 | |||
| a9e8bc4d87 | |||
| 063e0fa76e | |||
| 9e7bd33822 | |||
| 7427865e46 | |||
| ff5f9c76fd | |||
| 641bbfbe2e | |||
| 5516ec4595 | |||
| 762467efd4 | |||
| 3f54bda360 | |||
| 4d1e388a48 | |||
| 10be86587a | |||
| 6a10591413 | |||
| 321c88e05e | |||
| 23562dfa61 | |||
| cb6ebf1d69 | |||
| a006985faf | |||
| 3fce9ec082 | |||
| 03026c70cc | |||
| b75679175b | |||
| 440081016d | |||
| e8f3629c76 | |||
| c5cdc0f594 | |||
| fec417cb16 | |||
| a70a06a5fb | |||
| 0a617e1c18 | |||
| 4527f71857 | |||
| 6e0d12d145 | |||
| d471d29c72 | |||
| 0b652eec21 | |||
| b32fdf7d65 | |||
| 2da0e1eb55 | |||
| 269124a1fd | |||
| 5992f9bd19 | |||
| a53967453e | |||
| ab4b218ac7 | |||
| d5b936c88d | |||
| 07cc0e3f29 | |||
| db4a84c70f | |||
| 3048d26e66 | |||
| 8e45b2a08d | |||
| ddc4a57cd2 | |||
| d216f3c267 | |||
| 8cd881c8f1 | |||
| 2867e1d15f | |||
| c2c9d3f9cb | |||
| f734b4a3c6 | |||
| 890693efda | |||
| 5403b29261 | |||
| 8ee59f5dc1 | |||
| 5dcc35a1b3 | |||
| af70b68cd1 | |||
| e356f9b2dd | |||
| 96793de11b | |||
| bfe70f5599 | |||
| 98aedaddf0 | |||
| 496ce864d7 | |||
| 243738551c | |||
| 20f2d97f06 | |||
| b6edc1bff7 | |||
| c45613a3ad | |||
| 7efed33851 | |||
| b00a477070 | |||
| 52f2e89659 | |||
| 08db28d9d6 | |||
| 77ff0ce093 | |||
| 0ab1b1232b | |||
| 209e01bc06 | |||
| 2650b1a42e | |||
| 3595df4d9d | |||
| 5d84100c41 | |||
| dd436ad186 | |||
| b811b9188f | |||
| 9935311c35 | |||
| be0036922a | |||
| 361f9dff0d | |||
| fc160b5c5f | |||
| 9092b8a2c9 | |||
| dfe3d96313 | |||
| bcefa6a25d | |||
| 50bfeddcb5 | |||
| 8e6b8ef338 | |||
| d363eb63e2 | |||
| 422cec370d | |||
| 973b7d6f72 | |||
| 49b78f3642 | |||
| 93576e3f83 | |||
| dd7f71dd87 | |||
| 9a8492c72f | |||
| ac9bdde164 | |||
| 0b2ec64c74 | |||
| fe0a032e8e | |||
| eff8f6a6a6 | |||
| e45eab82f2 | |||
| 310ad365e6 | |||
| 0b50c66caa | |||
| 9feed0f882 | |||
| bb3301c5af | |||
| a2123274a5 | |||
| 3cbbc5387a | |||
| 4e828fbdd1 | |||
| 6d88595e0d | |||
| aa90646edf | |||
| 7235ab7c7c | |||
| a0326dae78 | |||
| 953fce2ca6 | |||
| 5035b84de5 | |||
| c2f477dde6 | |||
| b098c8ff9f | |||
| 7fea543f60 | |||
| f8bb23a6d4 | |||
| 0016841770 | |||
| 3639d64da6 | |||
| ebdcf18134 | |||
| d83f2ae4c1 | |||
| f6c0d35f11 | |||
| facbf51f05 | |||
| 847ebc292f | |||
| 065ca2bd8f | |||
| 34988855bc | |||
| 7fc788baea | |||
| 40575924b5 | |||
| 4f56fa6cbe | |||
| 52513b55ff | |||
| 1ae2fa9b9b | |||
| 6077f74dbd | |||
| 8ab2e19e98 | |||
| b44f3a33e3 | |||
| 57407aed51 | |||
| a29677b3c7 | |||
| 95df450fca | |||
| 6c6bc35785 | |||
| 7652bbba9c | |||
| efd89a26ac | |||
| 71d4746009 | |||
| 98b5475160 | |||
| 740f1b5e6e | |||
| c0bab1e671 | |||
| 306810e4d5 | |||
| 1193b7ac9a | |||
| 05db012aaf | |||
| bc3c852509 | |||
| 04051282da | |||
| 081b33a8a6 | |||
| cf5424f9a6 | |||
| 1ec9aaab8a | |||
| d6f82393f5 | |||
| f4ce0e017b | |||
| c0ea5f0cb8 | |||
| d375c4b1d3 | |||
| 4ea4be1462 | |||
| bc1c1cd2c9 | |||
| c1e4c40f31 | |||
| 203e8f22be | |||
| 665c036a56 | |||
| 73304f08ac | |||
| fe9fc69f96 | |||
| 3b0542cd41 | |||
| 102919e0b3 | |||
| d63aa0a3c2 | |||
| 7f7db57933 | |||
| 043791194f | |||
| 710f839c65 | |||
| b0e21abb6e | |||
| 6b71c07f5b | |||
| 9cff3c753d | |||
| 6acd7f5249 | |||
| 26f5b25f22 | |||
| 8bc0bd592e | |||
| 7c25aca39b | |||
| 5173bf4aef | |||
| 7f7f49d757 | |||
| e88b9bbc63 | |||
| db22ab2229 | |||
| c30ad79398 | |||
| 16853328fa | |||
| 8ac8cdba88 | |||
| c046edebda | |||
| eef9669c95 | |||
| a9cdd3a354 | |||
| b4eeb499e9 | |||
| fca46c3806 | |||
| 2510fe44bc | |||
| e152cf3cb8 | |||
| 7d3b256fff | |||
| f6d632139e | |||
| 204a99c2e7 | |||
| f28a03e42e | |||
| 26f4edadcc | |||
| fd58631e65 | |||
| f70399a28f | |||
| 02d08faaa2 | |||
| 2dc77479ad | |||
| c5761ae968 | |||
| 67754781ca | |||
| 3436507a21 | |||
| 93bc08574b | |||
| 3571511349 | |||
| 04214ca155 | |||
| 4d48df152c | |||
| e0a70a4c1c | |||
| bae50fbc5b | |||
| 8998dac593 | |||
| e6b300e70e | |||
| b22e2b9274 | |||
| 24b1aa6e7f | |||
| 85e37e03a8 | |||
| f22a2666b8 | |||
| 507889627a | |||
| c4cee72938 | |||
| 33cb363651 | |||
| cd3ded278d | |||
| b5bf75aa5a | |||
| f6b5b1b01a | |||
| 26d34245f9 | |||
| de54265c35 | |||
| a52d1e098f | |||
| 015fa48c32 | |||
| abc30c93d1 | |||
| cf2faa9bff | |||
| 92aa1ebccf | |||
| 877f69c897 | |||
| 710b604b7c | |||
| ab4ce2db92 | |||
| 61f6fd60a8 | |||
| e66149e07c | |||
| 108a697483 | |||
| 1a7f419ecf | |||
| 96b1ce373b | |||
| 58e41f7e0b | |||
| c9a2fa58eb | |||
| 64c0f190cf | |||
| fc443ed987 | |||
| 7939a19816 | |||
| 46b5087157 | |||
| a8d6524b56 | |||
| 61d63db84c | |||
| aa4ec8c779 | |||
| 3777042ad3 | |||
| feb340beba | |||
| 23369c514d | |||
| 832da16b6f | |||
| 131964cbc3 | |||
| 81db0504ed | |||
| 584a44a516 | |||
| c7c4a57533 | |||
| 0a67c28f8c | |||
| 6476492caa | |||
| faf8734ea8 | |||
| 862f0704be | |||
| d3df1586c6 | |||
| 8a1996e0e4 | |||
| 61f5a0c3be | |||
| d7bc785de1 | |||
| eaac665a9f | |||
| d702aa59c4 | |||
| 9df9a1454a | |||
| 47163d235c | |||
| a7342fc9d3 | |||
| 5dd8feb75c | |||
| f5024b2648 | |||
| 6521c83eec | |||
| 65e3643655 | |||
| fc95b57a78 | |||
| 7c1a970b13 | |||
| 64e2df20b7 | |||
| 90e3612fd3 | |||
| 962bfe37c6 | |||
| f05c6a42b0 | |||
| 077288e7b7 | |||
| 580ab1ce68 | |||
| 71a6c72614 | |||
| fae7b3be20 | |||
| 775b9ac7e3 | |||
| 5a87d55dd4 | |||
| 0457fbfecc | |||
| 13b16138b5 | |||
| 8249896449 | |||
| dbd932bf46 | |||
| eef49678ce | |||
| 58ee82c988 | |||
| 49ac23044a | |||
| 84a775be77 | |||
| 60c0c95f38 | |||
| a1a30bcc42 | |||
| 96ebd7ecb8 | |||
| 25c8b1ec25 | |||
| bcb7cfabee | |||
| d4dad1d556 | |||
| 195c7c51c4 | |||
| 968d973cff | |||
| 4394ab3fed | |||
| 11bbfca3da | |||
| a9aa88b655 | |||
| b62974dd88 | |||
| ac52a8bb4e | |||
| 18755aac96 | |||
| 5d37421f70 | |||
| 224d269971 | |||
| 6146a173f1 | |||
| 821345d266 | |||
| 0fa63e2de3 | |||
| d8cbec8268 |
@@ -1,5 +1,7 @@
|
||||
{
|
||||
"enabledMcpjsonServers": ["storkit"],
|
||||
"enabledMcpjsonServers": [
|
||||
"storkit"
|
||||
],
|
||||
"permissions": {
|
||||
"allow": [
|
||||
"Bash(./server/target/debug/storkit:*)",
|
||||
@@ -67,7 +69,8 @@
|
||||
"Bash(tail *)",
|
||||
"Bash(wc *)",
|
||||
"Bash(npx vite:*)",
|
||||
"Bash(npm run dev:*)"
|
||||
"Bash(npm run dev:*)",
|
||||
"Bash(stat *)"
|
||||
]
|
||||
}
|
||||
}
|
||||
+10
-4
@@ -9,16 +9,22 @@
|
||||
|
||||
When you start a new session with this project:
|
||||
|
||||
1. **Check for MCP Tools:** Read `.mcp.json` to discover the MCP server endpoint. Then list available tools by calling:
|
||||
1. **Check Setup Wizard:** Call `wizard_status` to check if project setup is complete. If the wizard is not complete, guide the user through the remaining steps. Important rules for the wizard flow:
|
||||
- **Be conversational.** Don't show tool names, step numbers, or raw wizard output to the user.
|
||||
- **On projects with existing code:** Read the codebase and generate each file, then show the user what you wrote and ask if it looks right.
|
||||
- **On bare projects with no code:** Ask the user what they want to build, what language/framework they plan to use, and generate files from their answers.
|
||||
- **You must actually generate the files.** The workflow for each step is: (1) call `wizard_generate` with no args to get a hint, (2) write the file content yourself based on the conversation, (3) call `wizard_generate` again with the `content` argument containing the full file body, (4) show the user what you wrote, (5) call `wizard_confirm` (they approve), `wizard_retry` (they want changes), or `wizard_skip` (they want to skip). Do not stop after discussing — follow through and write the files.
|
||||
- **Keep moving.** After each step is confirmed, immediately proceed to the next wizard step without waiting for the user to ask.
|
||||
2. **Check for MCP Tools:** Read `.mcp.json` to discover the MCP server endpoint. Then list available tools by calling:
|
||||
```bash
|
||||
curl -s "$(jq -r '.mcpServers["storkit"].url' .mcp.json)" \
|
||||
-H 'Content-Type: application/json' \
|
||||
-d '{"jsonrpc":"2.0","id":1,"method":"tools/list","params":{}}'
|
||||
```
|
||||
This returns the full tool catalog (create stories, spawn agents, record tests, manage worktrees, etc.). Familiarize yourself with the available tools before proceeding. These tools allow you to directly manipulate the workflow and spawn subsidiary agents without manual file manipulation.
|
||||
2. **Read Context:** Check `.story_kit/specs/00_CONTEXT.md` for high-level project goals.
|
||||
3. **Read Stack:** Check `.story_kit/specs/tech/STACK.md` for technical constraints and patterns.
|
||||
4. **Check Work Items:** Look at `.story_kit/work/1_backlog/` and `.story_kit/work/2_current/` to see what work is pending.
|
||||
3. **Read Context:** Check `.storkit/specs/00_CONTEXT.md` for high-level project goals.
|
||||
4. **Read Stack:** Check `.storkit/specs/tech/STACK.md` for technical constraints and patterns.
|
||||
5. **Check Work Items:** Look at `.storkit/work/1_backlog/` and `.storkit/work/2_current/` to see what work is pending.
|
||||
|
||||
|
||||
---
|
||||
|
||||
@@ -26,3 +26,8 @@ whatsapp_verify_token = "my-secret-verify-token"
|
||||
|
||||
# Maximum conversation turns to remember per user (default: 20).
|
||||
# history_size = 20
|
||||
|
||||
# Optional: restrict which phone numbers can interact with the bot.
|
||||
# When set, only listed numbers are processed; all others are silently ignored.
|
||||
# When absent or empty, all numbers are allowed (open by default).
|
||||
# whatsapp_allowed_phones = ["+15551234567", "+15559876543"]
|
||||
|
||||
@@ -22,3 +22,8 @@ twilio_whatsapp_number = "+14155238886"
|
||||
|
||||
# Maximum conversation turns to remember per user (default: 20).
|
||||
# history_size = 20
|
||||
|
||||
# Optional: restrict which phone numbers can interact with the bot.
|
||||
# When set, only listed numbers are processed; all others are silently ignored.
|
||||
# When absent or empty, all numbers are allowed (open by default).
|
||||
# whatsapp_allowed_phones = ["+15551234567", "+15559876543"]
|
||||
|
||||
+121
-50
@@ -11,12 +11,17 @@ max_coders = 3
|
||||
|
||||
# Maximum retries per story per pipeline stage before marking as blocked.
|
||||
# Set to 0 to disable retry limits.
|
||||
max_retries = 2
|
||||
max_retries = 3
|
||||
|
||||
# Base branch name for this project. Worktree creation, merges, and agent prompts
|
||||
# use this value for {{base_branch}}. When not set, falls back to auto-detection
|
||||
# (reads current HEAD branch).
|
||||
base_branch = "master"
|
||||
|
||||
[[component]]
|
||||
name = "frontend"
|
||||
path = "frontend"
|
||||
setup = ["npm install", "npm run build"]
|
||||
setup = ["npm ci", "npm run build"]
|
||||
teardown = []
|
||||
|
||||
[[component]]
|
||||
@@ -58,30 +63,52 @@ system_prompt = "You are a full-stack engineer working autonomously in a git wor
|
||||
[[agent]]
|
||||
name = "qa-2"
|
||||
stage = "qa"
|
||||
role = "Reviews coder work in worktrees: runs quality gates, generates testing plans, and reports findings."
|
||||
role = "Reviews coder work in worktrees: runs quality gates, verifies acceptance criteria, and reports findings."
|
||||
model = "sonnet"
|
||||
max_turns = 40
|
||||
max_budget_usd = 4.00
|
||||
prompt = """You are the QA agent for story {{story_id}}. Your job is to review the coder's work in the worktree and produce a structured QA report.
|
||||
prompt = """You are the QA agent for story {{story_id}}. Your job is to verify the coder's work satisfies the story's acceptance criteria and produce a structured QA report.
|
||||
|
||||
Read CLAUDE.md first, then .story_kit/README.md to understand the dev process.
|
||||
|
||||
## Your Workflow
|
||||
|
||||
### 1. Code Quality Scan
|
||||
- Run `git diff master...HEAD --stat` to see what files changed
|
||||
- Run `git diff master...HEAD` to review the actual changes for obvious coding mistakes (unused imports, dead code, unhandled errors, hardcoded values)
|
||||
- Run `cargo clippy --all-targets --all-features` and note any warnings
|
||||
### 0. Read the Story
|
||||
- Read the story file at `.storkit/work/3_qa/{{story_id}}.md`
|
||||
- Extract every acceptance criterion (the `- [ ]` checkbox lines)
|
||||
- Keep this list in mind for Step 3
|
||||
|
||||
### 1. Deterministic Gates (Prerequisites)
|
||||
Run these first — if any fail, reject immediately without proceeding to AC review:
|
||||
- Run `cargo clippy --all-targets --all-features` — must show 0 errors, 0 warnings
|
||||
- Run `cargo test` and verify all tests pass
|
||||
- If a `frontend/` directory exists:
|
||||
- Run `npm run build` and note any TypeScript errors
|
||||
- Run `npx @biomejs/biome check src/` and note any linting issues
|
||||
- Run `npm test` and verify all frontend tests pass
|
||||
|
||||
### 2. Test Verification
|
||||
- Run `cargo test` and verify all tests pass
|
||||
- If `frontend/` exists: run `npm test` and verify all frontend tests pass
|
||||
- Review test quality: look for tests that are trivial or don't assert meaningful behavior
|
||||
### 2. Code Change Review
|
||||
- Run `git diff master...HEAD --stat` to see what files changed
|
||||
- Run `git diff master...HEAD` to review the actual changes
|
||||
- Flag any incomplete implementations:
|
||||
- `todo!()`, `unimplemented!()`, `panic!()` used as stubs
|
||||
- Placeholder strings like "TODO", "FIXME", "not implemented"
|
||||
- Empty match arms or arms that just return `Default::default()`
|
||||
- Hardcoded values where real logic is expected
|
||||
- Note any obvious coding mistakes (unused imports, dead code, unhandled errors)
|
||||
|
||||
### 3. Manual Testing Support
|
||||
### 3. Acceptance Criteria Review
|
||||
For each AC extracted in Step 0:
|
||||
- Review the diff and test files to determine if the code addresses this AC
|
||||
- PASS: describe specifically how the code addresses it (which file/function/test)
|
||||
- FAIL: explain exactly what is missing or incorrect
|
||||
|
||||
An AC fails if:
|
||||
- No code change or test relates to it
|
||||
- The implementation is stubbed out (todo!/unimplemented!)
|
||||
- A test exists but doesn't actually assert the behaviour described
|
||||
|
||||
### 4. Manual Testing Support (only if all gates PASS and all ACs PASS)
|
||||
- Build the server: run `cargo build` and note success/failure
|
||||
- If build succeeds: find a free port (try 3010-3020) and attempt to start the server
|
||||
- Generate a testing plan including:
|
||||
@@ -90,8 +117,8 @@ Read CLAUDE.md first, then .story_kit/README.md to understand the dev process.
|
||||
- curl commands to exercise relevant API endpoints
|
||||
- Kill the test server when done: `pkill -f 'target.*storkit' || true` (NEVER use `pkill -f storkit` — it kills the vite dev server)
|
||||
|
||||
### 4. Produce Structured Report
|
||||
Print your QA report to stdout before your process exits. The server will automatically run acceptance gates. Use this format:
|
||||
### 5. Produce Structured Report and Verdict
|
||||
Print your QA report to stdout. Then call `approve_qa` or `reject_qa` via the MCP tool based on the overall result. Use this format:
|
||||
|
||||
```
|
||||
## QA Report for {{story_id}}
|
||||
@@ -100,27 +127,38 @@ Print your QA report to stdout before your process exits. The server will automa
|
||||
- clippy: PASS/FAIL (details)
|
||||
- TypeScript build: PASS/FAIL/SKIP (details)
|
||||
- Biome lint: PASS/FAIL/SKIP (details)
|
||||
- Code review findings: (list any issues found, or "None")
|
||||
|
||||
### Test Verification
|
||||
- cargo test: PASS/FAIL (N tests)
|
||||
- npm test: PASS/FAIL/SKIP (N tests)
|
||||
- Test quality issues: (list any trivial/weak tests, or "None")
|
||||
- Incomplete implementations: (list any todo!/unimplemented!/stubs found, or "None")
|
||||
- Other code review findings: (list any issues found, or "None")
|
||||
|
||||
### Acceptance Criteria Review
|
||||
- AC: <criterion text>
|
||||
Result: PASS/FAIL
|
||||
Evidence: <how the code addresses it, or what is missing>
|
||||
|
||||
(repeat for each AC)
|
||||
|
||||
### Manual Testing Plan
|
||||
- Server URL: http://localhost:PORT (or "Build failed")
|
||||
- Pages to visit: (list)
|
||||
- Things to check: (list)
|
||||
- curl commands: (list)
|
||||
- Server URL: http://localhost:PORT (or "Skipped — gate/AC failure" or "Build failed")
|
||||
- Pages to visit: (list, or "N/A")
|
||||
- Things to check: (list, or "N/A")
|
||||
- curl commands: (list, or "N/A")
|
||||
|
||||
### Overall: PASS/FAIL
|
||||
Reason: (summary of why it passed or the primary reason it failed)
|
||||
```
|
||||
|
||||
After printing the report:
|
||||
- If Overall is PASS: call `approve_qa(story_id='{{story_id}}')` via MCP
|
||||
- If Overall is FAIL: call `reject_qa(story_id='{{story_id}}', notes='<concise reason>')` via MCP so the coder knows exactly what to fix
|
||||
|
||||
## Rules
|
||||
- Do NOT modify any code — read-only review only
|
||||
- If the server fails to start, still provide the testing plan with curl commands
|
||||
- The server automatically runs acceptance gates when your process exits"""
|
||||
system_prompt = "You are a QA agent. Your job is read-only: review code quality, run tests, try to start the server, and produce a structured QA report. Do not modify code. The server automatically runs acceptance gates when your process exits."
|
||||
- Gates must pass before AC review — a gate failure is an automatic reject
|
||||
- If any AC is not met, the overall result is FAIL
|
||||
- Always call approve_qa or reject_qa — never leave the story without a verdict"""
|
||||
system_prompt = "You are a QA agent. Your job is read-only: run quality gates, verify each acceptance criterion against the diff, and produce a structured QA report. Always call approve_qa or reject_qa via MCP to record your verdict. Do not modify code."
|
||||
|
||||
[[agent]]
|
||||
name = "coder-opus"
|
||||
@@ -135,30 +173,52 @@ system_prompt = "You are a senior full-stack engineer working autonomously in a
|
||||
[[agent]]
|
||||
name = "qa"
|
||||
stage = "qa"
|
||||
role = "Reviews coder work in worktrees: runs quality gates, generates testing plans, and reports findings."
|
||||
role = "Reviews coder work in worktrees: runs quality gates, verifies acceptance criteria, and reports findings."
|
||||
model = "sonnet"
|
||||
max_turns = 40
|
||||
max_budget_usd = 4.00
|
||||
prompt = """You are the QA agent for story {{story_id}}. Your job is to review the coder's work in the worktree and produce a structured QA report.
|
||||
prompt = """You are the QA agent for story {{story_id}}. Your job is to verify the coder's work satisfies the story's acceptance criteria and produce a structured QA report.
|
||||
|
||||
Read CLAUDE.md first, then .story_kit/README.md to understand the dev process.
|
||||
|
||||
## Your Workflow
|
||||
|
||||
### 1. Code Quality Scan
|
||||
- Run `git diff master...HEAD --stat` to see what files changed
|
||||
- Run `git diff master...HEAD` to review the actual changes for obvious coding mistakes (unused imports, dead code, unhandled errors, hardcoded values)
|
||||
- Run `cargo clippy --all-targets --all-features` and note any warnings
|
||||
### 0. Read the Story
|
||||
- Read the story file at `.storkit/work/3_qa/{{story_id}}.md`
|
||||
- Extract every acceptance criterion (the `- [ ]` checkbox lines)
|
||||
- Keep this list in mind for Step 3
|
||||
|
||||
### 1. Deterministic Gates (Prerequisites)
|
||||
Run these first — if any fail, reject immediately without proceeding to AC review:
|
||||
- Run `cargo clippy --all-targets --all-features` — must show 0 errors, 0 warnings
|
||||
- Run `cargo test` and verify all tests pass
|
||||
- If a `frontend/` directory exists:
|
||||
- Run `npm run build` and note any TypeScript errors
|
||||
- Run `npx @biomejs/biome check src/` and note any linting issues
|
||||
- Run `npm test` and verify all frontend tests pass
|
||||
|
||||
### 2. Test Verification
|
||||
- Run `cargo test` and verify all tests pass
|
||||
- If `frontend/` exists: run `npm test` and verify all frontend tests pass
|
||||
- Review test quality: look for tests that are trivial or don't assert meaningful behavior
|
||||
### 2. Code Change Review
|
||||
- Run `git diff master...HEAD --stat` to see what files changed
|
||||
- Run `git diff master...HEAD` to review the actual changes
|
||||
- Flag any incomplete implementations:
|
||||
- `todo!()`, `unimplemented!()`, `panic!()` used as stubs
|
||||
- Placeholder strings like "TODO", "FIXME", "not implemented"
|
||||
- Empty match arms or arms that just return `Default::default()`
|
||||
- Hardcoded values where real logic is expected
|
||||
- Note any obvious coding mistakes (unused imports, dead code, unhandled errors)
|
||||
|
||||
### 3. Manual Testing Support
|
||||
### 3. Acceptance Criteria Review
|
||||
For each AC extracted in Step 0:
|
||||
- Review the diff and test files to determine if the code addresses this AC
|
||||
- PASS: describe specifically how the code addresses it (which file/function/test)
|
||||
- FAIL: explain exactly what is missing or incorrect
|
||||
|
||||
An AC fails if:
|
||||
- No code change or test relates to it
|
||||
- The implementation is stubbed out (todo!/unimplemented!)
|
||||
- A test exists but doesn't actually assert the behaviour described
|
||||
|
||||
### 4. Manual Testing Support (only if all gates PASS and all ACs PASS)
|
||||
- Build the server: run `cargo build` and note success/failure
|
||||
- If build succeeds: find a free port (try 3010-3020) and attempt to start the server
|
||||
- Generate a testing plan including:
|
||||
@@ -167,8 +227,8 @@ Read CLAUDE.md first, then .story_kit/README.md to understand the dev process.
|
||||
- curl commands to exercise relevant API endpoints
|
||||
- Kill the test server when done: `pkill -f 'target.*storkit' || true` (NEVER use `pkill -f storkit` — it kills the vite dev server)
|
||||
|
||||
### 4. Produce Structured Report
|
||||
Print your QA report to stdout before your process exits. The server will automatically run acceptance gates. Use this format:
|
||||
### 5. Produce Structured Report and Verdict
|
||||
Print your QA report to stdout. Then call `approve_qa` or `reject_qa` via the MCP tool based on the overall result. Use this format:
|
||||
|
||||
```
|
||||
## QA Report for {{story_id}}
|
||||
@@ -177,27 +237,38 @@ Print your QA report to stdout before your process exits. The server will automa
|
||||
- clippy: PASS/FAIL (details)
|
||||
- TypeScript build: PASS/FAIL/SKIP (details)
|
||||
- Biome lint: PASS/FAIL/SKIP (details)
|
||||
- Code review findings: (list any issues found, or "None")
|
||||
|
||||
### Test Verification
|
||||
- cargo test: PASS/FAIL (N tests)
|
||||
- npm test: PASS/FAIL/SKIP (N tests)
|
||||
- Test quality issues: (list any trivial/weak tests, or "None")
|
||||
- Incomplete implementations: (list any todo!/unimplemented!/stubs found, or "None")
|
||||
- Other code review findings: (list any issues found, or "None")
|
||||
|
||||
### Acceptance Criteria Review
|
||||
- AC: <criterion text>
|
||||
Result: PASS/FAIL
|
||||
Evidence: <how the code addresses it, or what is missing>
|
||||
|
||||
(repeat for each AC)
|
||||
|
||||
### Manual Testing Plan
|
||||
- Server URL: http://localhost:PORT (or "Build failed")
|
||||
- Pages to visit: (list)
|
||||
- Things to check: (list)
|
||||
- curl commands: (list)
|
||||
- Server URL: http://localhost:PORT (or "Skipped — gate/AC failure" or "Build failed")
|
||||
- Pages to visit: (list, or "N/A")
|
||||
- Things to check: (list, or "N/A")
|
||||
- curl commands: (list, or "N/A")
|
||||
|
||||
### Overall: PASS/FAIL
|
||||
Reason: (summary of why it passed or the primary reason it failed)
|
||||
```
|
||||
|
||||
After printing the report:
|
||||
- If Overall is PASS: call `approve_qa(story_id='{{story_id}}')` via MCP
|
||||
- If Overall is FAIL: call `reject_qa(story_id='{{story_id}}', notes='<concise reason>')` via MCP so the coder knows exactly what to fix
|
||||
|
||||
## Rules
|
||||
- Do NOT modify any code — read-only review only
|
||||
- If the server fails to start, still provide the testing plan with curl commands
|
||||
- The server automatically runs acceptance gates when your process exits"""
|
||||
system_prompt = "You are a QA agent. Your job is read-only: review code quality, run tests, try to start the server, and produce a structured QA report. Do not modify code. The server automatically runs acceptance gates when your process exits."
|
||||
- Gates must pass before AC review — a gate failure is an automatic reject
|
||||
- If any AC is not met, the overall result is FAIL
|
||||
- Always call approve_qa or reject_qa — never leave the story without a verdict"""
|
||||
system_prompt = "You are a QA agent. Your job is read-only: run quality gates, verify each acceptance criterion against the diff, and produce a structured QA report. Always call approve_qa or reject_qa via MCP to record your verdict. Do not modify code."
|
||||
|
||||
[[agent]]
|
||||
name = "mergemaster"
|
||||
|
||||
@@ -0,0 +1,43 @@
|
||||
# Example project.toml — copy to .storkit/project.toml and customise.
|
||||
# This file is checked in; project.toml itself is gitignored (it may contain
|
||||
# instance-specific settings).
|
||||
|
||||
# Project-wide default QA mode: "server", "agent", or "human".
|
||||
# Per-story `qa` front matter overrides this setting.
|
||||
default_qa = "server"
|
||||
|
||||
# Default model for coder agents. Only agents with this model are auto-assigned.
|
||||
# Opus coders are reserved for explicit per-story `agent:` front matter requests.
|
||||
default_coder_model = "sonnet"
|
||||
|
||||
# Maximum concurrent coder agents. Stories wait in 2_current/ when all slots are full.
|
||||
max_coders = 3
|
||||
|
||||
# Maximum retries per story per pipeline stage before marking as blocked.
|
||||
# Set to 0 to disable retry limits.
|
||||
max_retries = 2
|
||||
|
||||
# Base branch name for this project. Worktree creation, merges, and agent prompts
|
||||
# use this value for {{base_branch}}. When not set, falls back to auto-detection
|
||||
# (reads current HEAD branch).
|
||||
base_branch = "main"
|
||||
|
||||
[[component]]
|
||||
name = "server"
|
||||
path = "."
|
||||
setup = ["cargo build"]
|
||||
teardown = []
|
||||
|
||||
[[agent]]
|
||||
name = "coder-1"
|
||||
role = "Full-stack engineer"
|
||||
stage = "coder"
|
||||
model = "sonnet"
|
||||
max_turns = 50
|
||||
max_budget_usd = 5.00
|
||||
prompt = """
|
||||
You are working in a git worktree on story {{story_id}}.
|
||||
Read CLAUDE.md first, then .storkit/README.md to understand the dev process.
|
||||
Run: cd "{{worktree_path}}" && git difftool {{base_branch}}...HEAD
|
||||
Commit all your work before your process exits.
|
||||
"""
|
||||
@@ -0,0 +1,24 @@
|
||||
---
|
||||
name: "WhatsApp webhook HMAC signature verification"
|
||||
retry_count: 3
|
||||
blocked: true
|
||||
---
|
||||
|
||||
# Story 388: WhatsApp webhook HMAC signature verification
|
||||
|
||||
## User Story
|
||||
|
||||
As a bot operator, I want incoming WhatsApp webhook requests to be cryptographically verified, so that forged requests from unauthorized sources are rejected.
|
||||
|
||||
## Acceptance Criteria
|
||||
|
||||
- [ ] Meta webhooks: validate X-Hub-Signature-256 HMAC-SHA256 header using the app secret before processing
|
||||
- [ ] Twilio webhooks: validate request signature using the auth token before processing
|
||||
- [ ] Requests with missing or invalid signatures are rejected with 403 Forbidden
|
||||
- [ ] Verification is fail-closed: if signature checking is configured, unsigned requests are rejected
|
||||
- [ ] Existing bot.toml config is extended with any needed secrets (e.g. Meta app_secret for HMAC verification)
|
||||
- [ ] MUST use audited crypto crates (hmac, sha2, sha1, base64) — no hand-rolled cryptographic primitives
|
||||
|
||||
## Out of Scope
|
||||
|
||||
- TBD
|
||||
+40
@@ -0,0 +1,40 @@
|
||||
---
|
||||
name: "Fly.io Machines API integration for multi-tenant storkit SaaS"
|
||||
---
|
||||
|
||||
# Spike 408: Fly.io Machines API integration for multi-tenant storkit SaaS
|
||||
|
||||
## Question
|
||||
|
||||
Can we build a working Rust integration that creates and manages per-tenant Fly.io Machines, attaches volumes, injects Claude credentials, and proxies JWT-authenticated HTTP/WebSocket traffic to the right machine?
|
||||
|
||||
## Hypothesis
|
||||
|
||||
A thin Rust service using `reqwest` for the Machines API and `axum` for the reverse proxy is sufficient. No heavyweight orchestration framework needed.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- Fly.io account with API token (set `FLY_API_TOKEN` env var)
|
||||
- Spike 407 findings reviewed
|
||||
|
||||
## Timebox
|
||||
|
||||
4 hours
|
||||
|
||||
## Investigation Plan
|
||||
|
||||
- [ ] Create a minimal Rust crate in `spikes/fly_machines/` — do not touch production code
|
||||
- [ ] Implement machine lifecycle: create, start, stop, destroy via Fly Machines REST API using `reqwest`
|
||||
- [ ] Test attaching a persistent volume to a machine and verify it persists across stop/start
|
||||
- [ ] Test secret injection — pass a dummy `credentials.json` as a Fly secret and verify it's readable inside the machine
|
||||
- [ ] Sketch the auth proxy: JWT validation → machine lookup → reverse proxy to machine's private IP; verify WebSocket proxying works
|
||||
- [ ] Measure actual cold start time for a minimal storkit container image
|
||||
- [ ] Document any API quirks, rate limits, or sharp edges discovered during testing
|
||||
|
||||
## Findings
|
||||
|
||||
- TBD
|
||||
|
||||
## Recommendation
|
||||
|
||||
- TBD
|
||||
@@ -0,0 +1,22 @@
|
||||
---
|
||||
name: "Multi-account OAuth token rotation on rate limit"
|
||||
---
|
||||
|
||||
# Story 411: Multi-account OAuth token rotation on rate limit
|
||||
|
||||
## User Story
|
||||
|
||||
As a storkit user with multiple Claude Max subscriptions, I want the system to automatically rotate to a different account when one gets rate limited, so that agents and chat don't stall out waiting for limits to reset.
|
||||
|
||||
## Acceptance Criteria
|
||||
|
||||
- [ ] OAuth login flow stores credentials per-account (keyed by email), not overwriting previous accounts
|
||||
- [ ] GET /oauth/status returns all stored accounts and their status (active, rate-limited, expired)
|
||||
- [ ] When the active account hits a rate limit, storkit automatically swaps to the next available account's refresh token, refreshes, and retries
|
||||
- [ ] The bot sends a notification in Matrix/WhatsApp when it swaps accounts
|
||||
- [ ] If all accounts are rate limited, the bot surfaces a clear message with the time until the earliest reset
|
||||
- [ ] A new /oauth/authorize login adds to the account pool rather than replacing the current credentials
|
||||
|
||||
## Out of Scope
|
||||
|
||||
- TBD
|
||||
+24
@@ -0,0 +1,24 @@
|
||||
---
|
||||
name: "Recheck bot command to re-run gates without restarting agent"
|
||||
---
|
||||
|
||||
# Story 412: Recheck bot command to re-run gates without restarting agent
|
||||
|
||||
## User Story
|
||||
|
||||
As a user, I want to send `recheck <number>` to the bot so that it re-runs acceptance gates on an existing worktree without spawning a new agent, so I can unblock stories that failed due to environment issues without wasting agent turns.
|
||||
|
||||
## Acceptance Criteria
|
||||
|
||||
- [ ] recheck command is registered in chat/commands/mod.rs and appears in help output
|
||||
- [ ] `recheck <number>` runs run_acceptance_gates on the story's existing worktree
|
||||
- [ ] If gates pass, the story advances through the pipeline (same as if a coder completed successfully)
|
||||
- [ ] If gates fail, the error output is returned to the user (not silently retried)
|
||||
- [ ] If no worktree exists for the story, returns a clear error
|
||||
- [ ] Does not spawn a new agent or increment retry_count
|
||||
- [ ] Works from all transports (Matrix, WhatsApp, Slack)
|
||||
- [ ] Works from web UI slash commands
|
||||
|
||||
## Out of Scope
|
||||
|
||||
- TBD
|
||||
+21
@@ -0,0 +1,21 @@
|
||||
---
|
||||
name: "Unblock command handles all stuck states not just blocked flag"
|
||||
---
|
||||
|
||||
# Story 435: Unblock command handles all stuck states not just blocked flag
|
||||
|
||||
## User Story
|
||||
|
||||
As a project owner, I want the unblock command to clear any stuck state on a story — not just the blocked flag — so that I have a single command to unstick stories regardless of why they're stuck.
|
||||
|
||||
## Acceptance Criteria
|
||||
|
||||
- [ ] Unblock clears merge_failure field in addition to blocked flag
|
||||
- [ ] Unblock clears review_hold field
|
||||
- [ ] Unblock reports which fields were cleared in the confirmation message
|
||||
- [ ] Unblock works on stories in any pipeline stage (backlog, current, qa, merge, done)
|
||||
- [ ] If no stuck state is found (no blocked, merge_failure, or review_hold), returns a clear message saying so
|
||||
|
||||
## Out of Scope
|
||||
|
||||
- TBD
|
||||
+26
@@ -0,0 +1,26 @@
|
||||
---
|
||||
name: "Unify story stuck states into a single status field"
|
||||
---
|
||||
|
||||
# Refactor 436: Unify story stuck states into a single status field
|
||||
|
||||
## Current State
|
||||
|
||||
- TBD
|
||||
|
||||
## Desired State
|
||||
|
||||
Replace the separate blocked, merge_failure, and review_hold front matter fields with a single status field (e.g. status: blocked, status: merge_failure, status: review_hold). Simplifies the unblock command, auto-assign checks, and pipeline advance logic.
|
||||
|
||||
## Acceptance Criteria
|
||||
|
||||
- [ ] Replace blocked: true, merge_failure: string, and review_hold: true with a single status: field in story front matter
|
||||
- [ ] Auto-assign checks a single field instead of three separate ones
|
||||
- [ ] Pipeline advance and lifecycle code reads/writes the unified status field
|
||||
- [ ] Unblock command clears the status field regardless of which stuck state it was
|
||||
- [ ] retry_count remains a separate field (it's a counter, not a state)
|
||||
- [ ] Migration: existing stories with old fields are handled gracefully on read
|
||||
|
||||
## Out of Scope
|
||||
|
||||
- TBD
|
||||
@@ -0,0 +1,31 @@
|
||||
---
|
||||
name: "Rename project from \"storkit\" to \"huskies\""
|
||||
---
|
||||
|
||||
# Story 455: Rename project from "storkit" to "huskies"
|
||||
|
||||
## User Story
|
||||
|
||||
As a project maintainer, I want to rename the project from "storkit" to "huskies" so that the product has its new identity throughout the codebase, tooling, and documentation.
|
||||
|
||||
## Acceptance Criteria
|
||||
|
||||
- [ ] Rust crate name in server/Cargo.toml changed from 'storkit' to 'huskies'
|
||||
- [ ] Binary name changed to 'huskies' (Dockerfile CMD, release script binary names)
|
||||
- [ ] Environment variables renamed: STORKIT_PORT → HUSKIES_PORT, STORKIT_HOST → HUSKIES_HOST
|
||||
- [ ] Docker service name, container_name, image name, and volume names updated in docker-compose.yml
|
||||
- [ ] Docker user/group renamed from 'storkit' to 'huskies' in Dockerfile (groupadd, useradd, home dir /home/huskies/.claude)
|
||||
- [ ] MCP server registration renamed from 'storkit' to 'huskies' in scaffold-generated .mcp.json and in server/src/http/mcp/mod.rs serverInfo name
|
||||
- [ ] All 35+ MCP tool permission patterns updated from mcp__storkit__* to mcp__huskies__* across code and permission configs
|
||||
- [ ] The .storkit/ project directory marker renamed to .huskies/ throughout all Rust source (paths.rs, config.rs, scaffold.rs, watcher.rs, prompts.rs, and all agent/pipeline code)
|
||||
- [ ] Release script updated: Gitea repo path dave/storkit → dave/huskies, changelog regex updated to match ^(huskies|storkit|story-kit): for backwards-compatible history parsing, binary artifact names updated
|
||||
- [ ] Git commit prefix convention updated from 'storkit:' to 'huskies:' in storkit README and agent prompts
|
||||
- [ ] Website updated: page title, headings, and contact email (hello@storkit.dev) if domain changes
|
||||
- [ ] README.md updated: all CLI examples use 'huskies' binary name, all .storkit/ references become .huskies/
|
||||
- [ ] A migration path exists for existing installs: either storkit auto-detects and migrates .storkit/ → .huskies/, or a migration script (script/migrate) is provided
|
||||
- [ ] All Claude Code .mcp.json files in existing worktrees are regenerated via scaffold or migration
|
||||
- [ ] Gitea repository renamed from dave/storkit to dave/huskies (external action required, noted in story)
|
||||
|
||||
## Out of Scope
|
||||
|
||||
- TBD
|
||||
+48
@@ -0,0 +1,48 @@
|
||||
---
|
||||
name: "Zombie process accumulation from unrereaped child processes"
|
||||
---
|
||||
|
||||
# Bug 452: Zombie process accumulation from unrereaped child processes
|
||||
|
||||
## Description
|
||||
|
||||
Storkit accumulates zombie processes over time from unrereaped child and grandchild processes. Observed 101 zombies in Docker container, 27 on macOS host. Breakdown: 51 esbuild, 36 echo, 5 claude, 5 sh, 2 bash, 1 cargo.
|
||||
|
||||
Root cause: storkit does not reap orphaned grandchild processes. The zombies are mostly grandchildren (`esbuild`, `echo`, `sh`, `cargo`) spawned by `npm run build`, `cargo test`, etc. during worktree setup and gate checks. This happens both natively (observed 27 zombies on macOS host) and in Docker containers. When the intermediate parent exits, these grandchildren get reparented to storkit (or PID 1 in Docker) and become zombies because nobody calls `waitpid` for them.
|
||||
|
||||
**Already fixed:**
|
||||
- `docker-compose.yml` now has `init: true` which uses tini as PID 1 in Docker — this handles zombie reaping inside containers
|
||||
- `llm/providers/claude_code.rs` now has `child.wait()` after `child.kill()` in all code paths, and the reader thread is joined before returning
|
||||
- `agents/pty.rs` reader thread is now joined before returning
|
||||
|
||||
**Remaining:** Storkit running natively (e.g. on macOS) still accumulates zombie grandchildren because there is no tini. The fix is to add a background reaper thread that periodically calls `waitpid(-1, WNOHANG)` in a loop to clean up any orphaned children. This should be spawned early in `main()` on Unix platforms. Example:
|
||||
|
||||
```rust
|
||||
#[cfg(unix)]
|
||||
std::thread::spawn(|| {
|
||||
loop {
|
||||
unsafe { while libc::waitpid(-1, std::ptr::null_mut(), libc::WNOHANG) > 0 {} }
|
||||
std::thread::sleep(std::time::Duration::from_secs(5));
|
||||
}
|
||||
});
|
||||
```
|
||||
|
||||
## How to Reproduce
|
||||
|
||||
Run several agent sessions. Check with `ps -eo stat,comm | grep Z | awk '{print $2}' | sort | uniq -c | sort -rn`.
|
||||
|
||||
## Actual Result
|
||||
|
||||
Zombie processes accumulate continuously. Never reaped.
|
||||
|
||||
## Expected Result
|
||||
|
||||
No zombie accumulation during normal operation.
|
||||
|
||||
## Acceptance Criteria
|
||||
|
||||
- [x] `child.wait()` is called after `child.kill()` in all code paths in `claude_code.rs`
|
||||
- [x] Reader threads are joined in both `pty.rs` and `claude_code.rs`
|
||||
- [x] `init: true` added to docker-compose.yml for Docker deployments
|
||||
- [ ] Background reaper thread added for native (non-Docker) deployments
|
||||
- [ ] Verified with `ps aux | grep '<defunct>'` after running multiple agent sessions natively on macOS
|
||||
@@ -0,0 +1,20 @@
|
||||
---
|
||||
name: "Deduplicate work item display in web UI story panel"
|
||||
---
|
||||
|
||||
# Story 454: Deduplicate work item display in web UI story panel
|
||||
|
||||
## User Story
|
||||
|
||||
As a user, I want the work item detail panel to display cleanly without redundant information, so that I can read story details without noise.
|
||||
|
||||
## Acceptance Criteria
|
||||
|
||||
- [ ] The story title is not shown twice (remove the duplicate heading)
|
||||
- [ ] The work item type label is not shown twice
|
||||
- [ ] The word 'name' is not shown as a prefix before the story title
|
||||
- [ ] The story ID/title line (e.g. 'Story 3: ...') is left-justified with no extra indentation
|
||||
|
||||
## Out of Scope
|
||||
|
||||
- TBD
|
||||
+29
@@ -0,0 +1,29 @@
|
||||
---
|
||||
name: "Matrix bot ignores in-room verification requests from Element"
|
||||
---
|
||||
|
||||
# Bug 456: Matrix bot ignores in-room verification requests from Element
|
||||
|
||||
## Description
|
||||
|
||||
The Matrix bot (Sally) only registers a handler for to-device verification events (`ToDeviceKeyVerificationRequestEvent`). Modern Element clients use in-room verification (`m.key.verification.request` as a room message event) by default. When a user initiates "Start Verification" from Element, the request is sent as a room event and the bot never sees it — nothing appears in the bot logs and the verification flow hangs indefinitely. As a result, Sally's device remains unverified (Big Red Dot), and if Element has "never send to unverified sessions" enabled, it will not share Megolm room keys with Sally's device, making her deaf to all encrypted room messages.
|
||||
|
||||
## How to Reproduce
|
||||
|
||||
1. Run the storkit Matrix bot (Sally) in a room with E2EE enabled. 2. In Element, open the room member list, click Sally's device, and press "Start Verification". 3. Watch the bot logs: grep for "verif\|Incoming".
|
||||
|
||||
## Actual Result
|
||||
|
||||
Nothing appears in the bot logs. The verification flow hangs in Element and eventually times out. Sally's device remains unverified. If Element is set to encrypt only to verified sessions, Sally cannot decrypt any messages in the room.
|
||||
|
||||
## Expected Result
|
||||
|
||||
The bot receives the in-room verification request, accepts it, drives the SAS emoji flow to completion, and logs "Verification with @user completed successfully!". Sally's device shows as verified in Element.
|
||||
|
||||
## Acceptance Criteria
|
||||
|
||||
- [ ] Bot registers an in-room verification event handler for m.key.verification.request room events (in addition to the existing to-device handler)
|
||||
- [ ] When Element initiates 'Start Verification' from the device list, the bot logs 'Incoming verification request from ...'
|
||||
- [ ] The SAS emoji flow completes: bot logs the emoji string, confirms, and logs 'Verification ... completed successfully!'
|
||||
- [ ] Sally's device shows as verified (no Big Red Dot) in Element after the flow completes
|
||||
- [ ] Existing to-device verification handler is preserved for clients that use the older flow
|
||||
@@ -1,6 +1,5 @@
|
||||
---
|
||||
name: "Work item titles render too large in expanded view"
|
||||
merge_failure: "Merge pipeline infrastructure failure: squash merge committed successfully on merge-queue branch, but cherry-pick onto master failed with 'fatal: bad revision merge-queue/237_bug_work_item_titles_render_too_large_in_expanded_view'. The merge worktree setup also failed (ENOENT for .story_kit/merge_workspace — pnpm install, pnpm build, cargo check all skipped). The merge-queue branch appears to have been cleaned up before the cherry-pick step could reference it. Master is untouched."
|
||||
---
|
||||
|
||||
# Bug 237: Work item titles render too large in expanded view
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
---
|
||||
name: "Add refactor work item type"
|
||||
merge_failure: "merge_agent_work tool returned empty output on two attempts. The merge-queue branch (merge-queue/254_story_add_refactor_work_item_type) was created with squash merge commit 27d24b2, and the merge workspace worktree exists at .story_kit/merge_workspace, but the pipeline never completed (no success/failure logged after MERGE-DEBUG calls). The stale merge workspace worktree may be blocking completion. Possibly related to bug 250 (merge pipeline cherry-pick fails with bad revision on merge-queue branch). Human intervention needed to: 1) clean up the merge-queue worktree and branch, 2) investigate why the merge pipeline hangs after creating the squash merge commit, 3) retry the merge."
|
||||
---
|
||||
|
||||
# Story 254: Add refactor work item type
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
---
|
||||
name: "Show agent logs in expanded story popup"
|
||||
merge_failure: "merge_agent_work tool returned empty output. The merge pipeline created the merge-queue branch (merge-queue/255_story_show_agent_logs_in_expanded_story_popup) and merge workspace worktree at .story_kit/merge_workspace, but hung without completing. This is the same issue that affected story 254 — likely related to bug 250 (merge pipeline cherry-pick fails with bad revision on merge-queue branch). The stale merge workspace worktree on the merge-queue branch may be blocking completion. Human intervention needed to: 1) clean up the merge workspace worktree and merge-queue branch, 2) investigate the root cause in the merge pipeline (possibly the cherry-pick/fast-forward step after squash merge), 3) retry the merge."
|
||||
---
|
||||
|
||||
# Story 255: Show agent logs in expanded story popup
|
||||
|
||||
+23
@@ -0,0 +1,23 @@
|
||||
---
|
||||
name: "WhatsApp markdown-to-WhatsApp formatting conversion"
|
||||
---
|
||||
|
||||
# Story 384: WhatsApp markdown-to-WhatsApp formatting conversion
|
||||
|
||||
## User Story
|
||||
|
||||
As a WhatsApp user, I want bot messages to use WhatsApp-native formatting instead of raw markdown, so that headers, bold text, and links render properly.
|
||||
|
||||
## Acceptance Criteria
|
||||
|
||||
- [ ] Headers (# ## ### etc.) are converted to bold text (*Header*) in WhatsApp messages
|
||||
- [ ] Markdown bold (**text**) is converted to WhatsApp bold (*text*)
|
||||
- [ ] Markdown strikethrough (~~text~~) is converted to WhatsApp strikethrough (~text~)
|
||||
- [ ] Markdown links [text](url) are converted to readable format: text (url)
|
||||
- [ ] Code blocks and inline code are preserved as-is (already compatible)
|
||||
- [ ] Matrix bot formatting is completely unaffected (conversion only applied in WhatsApp send paths)
|
||||
- [ ] Existing WhatsApp chunking (4096 char limit) still works correctly after conversion
|
||||
|
||||
## Out of Scope
|
||||
|
||||
- TBD
|
||||
@@ -0,0 +1,23 @@
|
||||
---
|
||||
name: "Slack markdown-to-mrkdwn formatting conversion"
|
||||
---
|
||||
|
||||
# Story 385: Slack markdown-to-mrkdwn formatting conversion
|
||||
|
||||
## User Story
|
||||
|
||||
As a Slack user, I want bot messages to use Slack-native mrkdwn formatting instead of raw markdown, so that headers, bold text, and links render properly.
|
||||
|
||||
## Acceptance Criteria
|
||||
|
||||
- [ ] Headers (# ## ### etc.) are converted to bold text (*Header*) in Slack messages
|
||||
- [ ] Markdown bold (**text**) is converted to Slack bold (*text*)
|
||||
- [ ] Markdown strikethrough (~~text~~) is converted to Slack strikethrough (~text~)
|
||||
- [ ] Markdown links [text](url) are converted to Slack format: <url|text>
|
||||
- [ ] Code blocks and inline code are preserved as-is (already compatible)
|
||||
- [ ] WhatsApp and Matrix bot formatting are completely unaffected (conversion only applied in Slack send paths)
|
||||
- [ ] Conversion is applied to all Slack send paths: command responses, LLM streaming, htop snapshots, delete responses, and slash command responses
|
||||
|
||||
## Out of Scope
|
||||
|
||||
- TBD
|
||||
+22
@@ -0,0 +1,22 @@
|
||||
---
|
||||
name: "Unreleased command shows list of stories since last release"
|
||||
---
|
||||
|
||||
# Story 386: Unreleased command shows list of stories since last release
|
||||
|
||||
## User Story
|
||||
|
||||
As a user, I want a bot command and web UI slash command called "unreleased" that shows a list of stories completed since the last release, so that I can see what's ready to ship.
|
||||
|
||||
## Acceptance Criteria
|
||||
|
||||
- [ ] Bot command `unreleased` returns a list of stories merged to master since the last release tag
|
||||
- [ ] Web UI slash command /unreleased returns the same list
|
||||
- [ ] Each entry shows story number and name
|
||||
- [ ] If there are no unreleased stories, a clear message is shown
|
||||
- [ ] Command is registered in the help command output
|
||||
- [ ] WhatsApp, Slack, and Matrix transports all support the command via the shared command dispatcher
|
||||
|
||||
## Out of Scope
|
||||
|
||||
- TBD
|
||||
@@ -0,0 +1,23 @@
|
||||
---
|
||||
name: "Configurable base branch name in project.toml"
|
||||
---
|
||||
|
||||
# Story 387: Configurable base branch name in project.toml
|
||||
|
||||
## User Story
|
||||
|
||||
As a project owner, I want to configure the main branch name in project.toml (e.g. "main", "master", "develop"), so that the system doesn't hardcode "master" and works with any branching convention.
|
||||
|
||||
## Acceptance Criteria
|
||||
|
||||
- [ ] New optional `base_branch` setting in project.toml (e.g. base_branch = "main")
|
||||
- [ ] When set, all worktree creation, merge operations, and agent prompts use the configured branch name
|
||||
- [ ] When not set, falls back to the existing auto-detection logic (detect_base_branch) which reads the current git branch
|
||||
- [ ] The hardcoded "master" fallback in detect_base_branch is replaced by the project.toml setting when available
|
||||
- [ ] Agent prompt template {{base_branch}} resolves to the configured value
|
||||
- [ ] Existing projects without the setting continue to work unchanged (backwards compatible)
|
||||
- [ ] project.toml.example uses base_branch = \"main\" as the example value; the actual project.toml uses base_branch = \"master\"
|
||||
|
||||
## Out of Scope
|
||||
|
||||
- TBD
|
||||
@@ -0,0 +1,21 @@
|
||||
---
|
||||
name: "WhatsApp phone number allowlist authorization"
|
||||
---
|
||||
|
||||
# Story 389: WhatsApp phone number allowlist authorization
|
||||
|
||||
## User Story
|
||||
|
||||
As a bot operator, I want to restrict which phone numbers can interact with the bot, so that only authorized users can send commands.
|
||||
|
||||
## Acceptance Criteria
|
||||
|
||||
- [ ] New optional allowed_phones list in bot.toml for WhatsApp (similar to Matrix allowed_users)
|
||||
- [ ] When configured, only messages from listed phone numbers are processed; all others are silently ignored
|
||||
- [ ] When not configured (empty or absent), all phone numbers are allowed (backwards compatible)
|
||||
- [ ] Unauthorized senders are logged but receive no response
|
||||
- [ ] The allowlist applies to all message types: commands, LLM conversations, and async commands (htop, delete)
|
||||
|
||||
## Out of Scope
|
||||
|
||||
- TBD
|
||||
+31
@@ -0,0 +1,31 @@
|
||||
---
|
||||
name: "WhatsApp missing async command handlers for start, rebuild, reset, rmtree, assign"
|
||||
---
|
||||
|
||||
# Bug 390: WhatsApp missing async command handlers for start, rebuild, reset, rmtree, assign
|
||||
|
||||
## Description
|
||||
|
||||
Five bot commands listed in help don't work in WhatsApp. Matrix's on_room_message pre-dispatches these via extract_*_command() functions before calling try_handle_command(), but WhatsApp's handle_incoming_message only pre-dispatches htop and delete. The missing commands have fallback handlers that return None, so they silently fall through to the LLM instead of executing.
|
||||
|
||||
## How to Reproduce
|
||||
|
||||
1. Send "rebuild" (or "start 386", "reset", "rmtree 386", "assign 386 opus") to the WhatsApp bot\n2. Observe the message is forwarded to the LLM instead of executing the command
|
||||
|
||||
## Actual Result
|
||||
|
||||
The 5 commands (start, rebuild, reset, rmtree, assign) fall through to the LLM and generate a conversational response instead of executing the bot command.
|
||||
|
||||
## Expected Result
|
||||
|
||||
All commands listed in help should work in WhatsApp, matching Matrix behavior. start should spawn an agent, rebuild should rebuild the server, reset should clear the session, rmtree should remove a worktree, assign should pre-assign a model.
|
||||
|
||||
## Acceptance Criteria
|
||||
|
||||
- [ ] start command works in WhatsApp (extract_start_command dispatch)
|
||||
- [ ] rebuild command works in WhatsApp (extract_rebuild_command dispatch)
|
||||
- [ ] reset command works in WhatsApp (extract_reset_command dispatch)
|
||||
- [ ] rmtree command works in WhatsApp (extract_rmtree_command dispatch)
|
||||
- [ ] assign command works in WhatsApp (extract_assign_command dispatch)
|
||||
- [ ] Same 5 commands also work in Slack transport if similarly missing
|
||||
- [ ] RETRY: Previous attempt was marked done without any code changes — the mergemaster moved the story to done but no async command handlers were actually added to whatsapp.rs. The fix must add extract_start_command, extract_rebuild_command, extract_reset_command, extract_rmtree_command, and extract_assign_command dispatch blocks to handle_incoming_message in whatsapp.rs, following the existing pattern used for htop and delete. Also check and fix Slack if similarly missing.
|
||||
+27
@@ -0,0 +1,27 @@
|
||||
---
|
||||
name: "strip_prefix_ci panics on multi-byte UTF-8 characters"
|
||||
---
|
||||
|
||||
# Bug 391: strip_prefix_ci panics on multi-byte UTF-8 characters
|
||||
|
||||
## Description
|
||||
|
||||
strip_prefix_ci in commands/mod.rs slices text by byte offset using prefix.len(), which panics when the slice boundary falls inside a multi-byte UTF-8 character (e.g. right single quote U+2019, emojis). The function assumes ASCII-safe byte boundaries but real WhatsApp/Matrix messages contain Unicode.
|
||||
|
||||
## How to Reproduce
|
||||
|
||||
1. Send a message to the bot containing a smart quote or emoji within the first N bytes (where N = bot name length)\n2. e.g. "For now let\u2019s just deal with it" where the bot name prefix check slices at byte 12, inside the 3-byte \u2019 character
|
||||
|
||||
## Actual Result
|
||||
|
||||
Thread panics: "byte index 12 is not a char boundary; it is inside \u2018\u2019\u2019 (bytes 11..14)"
|
||||
|
||||
## Expected Result
|
||||
|
||||
The function should safely handle multi-byte UTF-8 without panicking. If the slice boundary isn't a char boundary, the prefix doesn't match — return None.
|
||||
|
||||
## Acceptance Criteria
|
||||
|
||||
- [ ] strip_prefix_ci does not panic on messages containing multi-byte UTF-8 characters (smart quotes, emojis, CJK, etc.)
|
||||
- [ ] Use text.get(..prefix.len()) or text.is_char_boundary() instead of direct indexing
|
||||
- [ ] Add test cases for messages with emojis and smart quotes
|
||||
+27
@@ -0,0 +1,27 @@
|
||||
---
|
||||
name: "Extract shared transport utilities from matrix module into chat submodule"
|
||||
agent: "coder-opus"
|
||||
---
|
||||
|
||||
# Refactor 392: Extract shared transport utilities from matrix module into chat submodule
|
||||
|
||||
## Current State
|
||||
|
||||
- TBD
|
||||
|
||||
## Desired State
|
||||
|
||||
Several functions currently living in the matrix transport module are used by all transports (WhatsApp, Slack, Matrix). These should be pulled up into a shared location under the chat module. Candidates include: strip_prefix_ci, strip_bot_mention, try_handle_command, drain_complete_paragraphs, markdown_to_whatsapp (pattern could generalize), chunk_for_whatsapp, and the command dispatch infrastructure. A chat::util or chat::text submodule would be a natural home for string utilities like strip_prefix_ci. The command dispatch (try_handle_command, CommandDispatch, BotCommand registry) could live in chat::commands.
|
||||
|
||||
## Acceptance Criteria
|
||||
|
||||
- [ ] Shared string utilities (strip_prefix_ci, strip_bot_mention, drain_complete_paragraphs) moved to a chat::util or chat::text submodule
|
||||
- [ ] Command dispatch infrastructure (try_handle_command, CommandDispatch, BotCommand, command registry) moved to chat::commands
|
||||
- [ ] Per-transport formatting functions (markdown_to_whatsapp, markdown_to_slack) remain in their respective transport modules
|
||||
- [ ] All transports import from the new shared location instead of reaching into matrix::
|
||||
- [ ] No functional changes — purely structural refactor
|
||||
- [ ] All existing tests pass and move with their code
|
||||
|
||||
## Out of Scope
|
||||
|
||||
- TBD
|
||||
+23
@@ -0,0 +1,23 @@
|
||||
---
|
||||
name: "Pipeline stage notifications for WhatsApp and Slack transports"
|
||||
---
|
||||
|
||||
# Story 393: Pipeline stage notifications for WhatsApp and Slack transports
|
||||
|
||||
## User Story
|
||||
|
||||
As a WhatsApp or Slack user, I want to receive pipeline stage transition notifications (e.g. "story moved from Current to QA") just like Matrix users do, so I can track story progress from any transport.
|
||||
|
||||
## Acceptance Criteria
|
||||
|
||||
- [ ] WhatsApp transport spawns a notification listener at startup using the existing spawn_notification_listener infrastructure
|
||||
- [ ] Slack transport spawns a notification listener at startup using the same infrastructure
|
||||
- [ ] Notifications are sent to all active ambient senders/channels for the respective transport
|
||||
- [ ] Stage transition notifications (story moved between pipeline stages) are delivered
|
||||
- [ ] Error notifications (story failures) are delivered
|
||||
- [ ] Rate limit warnings are delivered with debouncing
|
||||
- [ ] Matrix notification behavior is completely unaffected
|
||||
|
||||
## Out of Scope
|
||||
|
||||
- TBD
|
||||
@@ -0,0 +1,23 @@
|
||||
---
|
||||
name: "WhatsApp and Slack permission prompt forwarding"
|
||||
---
|
||||
|
||||
# Story 394: WhatsApp and Slack permission prompt forwarding
|
||||
|
||||
## User Story
|
||||
|
||||
As a WhatsApp or Slack user, I want permission requests from Claude Code to be forwarded to my chat so I can approve or deny them, rather than having them silently fail.
|
||||
|
||||
## Acceptance Criteria
|
||||
|
||||
- [ ] Permission requests are sent as messages to the WhatsApp sender with tool name and input details
|
||||
- [ ] User can reply yes/y/approve or no/n/deny to approve or deny the permission
|
||||
- [ ] Permission requests time out and auto-deny (fail-closed) if not answered within the configured timeout
|
||||
- [ ] Slack receives the same permission forwarding treatment
|
||||
- [ ] Reuses the existing permission channel infrastructure (perm_rx, PermissionForward, PermissionDecision)
|
||||
- [ ] Matrix permission handling is completely unaffected
|
||||
- [ ] handle_llm_message uses a tokio::select! loop (like Matrix bot.rs) to listen for both LLM output and permission requests concurrently
|
||||
|
||||
## Out of Scope
|
||||
|
||||
- TBD
|
||||
@@ -0,0 +1,24 @@
|
||||
---
|
||||
name: "Fix npm deprecated module warnings"
|
||||
---
|
||||
|
||||
# Refactor 395: Fix npm deprecated module warnings
|
||||
|
||||
## Current State
|
||||
|
||||
- TBD
|
||||
|
||||
## Desired State
|
||||
|
||||
Address npm warnings about deprecated modules in the frontend dependencies. Update or replace deprecated packages to eliminate warnings during npm install.
|
||||
|
||||
## Acceptance Criteria
|
||||
|
||||
- [ ] npm install runs with zero deprecation warnings
|
||||
- [ ] All existing frontend tests (npm test) still pass
|
||||
- [ ] npm run build succeeds without errors
|
||||
- [ ] No functional regressions in the frontend
|
||||
|
||||
## Out of Scope
|
||||
|
||||
- TBD
|
||||
@@ -0,0 +1,21 @@
|
||||
---
|
||||
name: "WhatsApp bot startup announcement after restart"
|
||||
---
|
||||
|
||||
# Story 396: WhatsApp bot startup announcement after restart
|
||||
|
||||
## User Story
|
||||
|
||||
As a WhatsApp user, I want the bot to announce its presence when it starts up or restarts, like it does in Matrix, so I know it's back online and ready.
|
||||
|
||||
## Acceptance Criteria
|
||||
|
||||
- [ ] Bot sends a startup message to all known WhatsApp senders (from conversation history or ambient rooms) when the server starts
|
||||
- [ ] Startup message includes the bot name and indicates it is online/ready
|
||||
- [ ] Slack transport gets the same startup announcement treatment
|
||||
- [ ] Matrix startup announcement behavior is unaffected
|
||||
- [ ] After a rebuild command, the new process sends the announcement on startup
|
||||
|
||||
## Out of Scope
|
||||
|
||||
- TBD
|
||||
+30
@@ -0,0 +1,30 @@
|
||||
---
|
||||
name: "Selection screen directory picker unreadable in dark mode"
|
||||
---
|
||||
|
||||
# Bug 397: Selection screen directory picker unreadable in dark mode
|
||||
|
||||
## Description
|
||||
|
||||
The ProjectPathInput component in the selection screen uses hardcoded light-theme inline styles (white backgrounds, dark borders, dark text highlights) that don't adapt to dark mode. When the browser/OS uses dark mode, the global CSS sets text color to #f6f6f6 (white) but the dropdown keeps background: #fff — resulting in white text on a white background, making the directory picker completely unreadable.
|
||||
|
||||
## How to Reproduce
|
||||
|
||||
1. Run storkit under Docker (or locally) with a browser set to dark mode (prefers-color-scheme: dark).
|
||||
2. Open http://localhost:3001 in the browser.
|
||||
3. Click into the project path input and start typing a path to trigger the autocomplete dropdown.
|
||||
|
||||
## Actual Result
|
||||
|
||||
The suggestion dropdown has white background with white/light text inherited from the dark-mode global styles. Match highlights use color: #222 which is barely visible. The close button and header bar also use light-only colors. The entire directory picker is effectively unreadable.
|
||||
|
||||
## Expected Result
|
||||
|
||||
The directory picker dropdown should be readable in both light and dark mode. Colors for background, text, borders, and highlights should adapt to the active color scheme.
|
||||
|
||||
## Acceptance Criteria
|
||||
|
||||
- [ ] ProjectPathInput dropdown is readable in dark mode (prefers-color-scheme: dark)
|
||||
- [ ] ProjectPathInput dropdown remains readable in light mode
|
||||
- [ ] Suggestion highlight text is visible against the dropdown background in both themes
|
||||
- [ ] No hardcoded light-only colors remain in ProjectPathInput inline styles
|
||||
@@ -0,0 +1,31 @@
|
||||
---
|
||||
name: "CLI --port flag with project.toml persistence"
|
||||
---
|
||||
|
||||
# Story 399: CLI --port flag with project.toml persistence
|
||||
|
||||
## User Story
|
||||
|
||||
As a developer, I want to set the server port via a --port CLI flag that persists to project.toml, so that I don't have to remember an environment variable on every run.
|
||||
|
||||
## Acceptance Criteria
|
||||
|
||||
- [ ] `storkit --help` shows a `--port` option
|
||||
- [ ] `storkit --port 4000` starts the server on port 4000
|
||||
- [ ] After first run with `--port`, the port is saved to `project.toml`
|
||||
- [ ] On subsequent runs without `--port`, the port from `project.toml` is used
|
||||
- [ ] CLI `--port` overrides the value in `project.toml`
|
||||
- [ ] Default port is 3001 when neither `--port` nor `project.toml` port is set
|
||||
- [ ] `STORKIT_PORT` env var is removed — no longer read or respected
|
||||
- [ ] `.storkit_port` lock file mechanism is removed (`write_port_file` / `remove_port_file`)
|
||||
|
||||
## Out of Scope
|
||||
|
||||
- Docker compose changes (can update `STORKIT_PORT` references separately)
|
||||
- Adding other CLI flags beyond `--port`
|
||||
|
||||
## Technical Notes
|
||||
|
||||
Port resolution priority: `--port` flag > `project.toml` `port` field > default 3001
|
||||
|
||||
The port should be written to `project.toml` on startup so subsequent runs remember it. Use the existing `config.rs` / `ProjectConfig` struct — add a `port` field.
|
||||
@@ -0,0 +1,45 @@
|
||||
---
|
||||
name: "WhatsApp and Slack missing reset command handler"
|
||||
---
|
||||
|
||||
# Bug 400: WhatsApp and Slack missing reset command handler
|
||||
|
||||
## Description
|
||||
|
||||
The reset command has a fallback handler in chat/commands/mod.rs that returns None with a comment saying it's handled before try_handle_command. This is only true for Matrix. WhatsApp and Slack don't have pre-dispatch handling, so None causes fallthrough to LLM. This caused a real outage when stale session IDs couldn't be cleared via the bot after switching from Docker to bare-metal.
|
||||
|
||||
## Implementation Note
|
||||
|
||||
Follow the **rebuild pattern** established in story 402, with one complication: `handle_reset` in `server/src/chat/transport/matrix/reset.rs` takes a Matrix-specific `ConversationHistory` (`Arc<TokioMutex<HashMap<OwnedRoomId, RoomConversation>>>`), so it cannot be called directly from WhatsApp or Slack.
|
||||
|
||||
**WhatsApp session storage** (`server/src/chat/transport/whatsapp.rs`):
|
||||
- Type: `WhatsAppConversationHistory = Arc<TokioMutex<HashMap<String, RoomConversation>>>` (key = sender phone number)
|
||||
- Persisted to `.storkit/whatsapp_history.json` via `save_whatsapp_history`
|
||||
|
||||
**Slack session storage** (`server/src/chat/transport/slack.rs`):
|
||||
- Type: `SlackConversationHistory = Arc<TokioMutex<HashMap<String, RoomConversation>>>` (key = channel ID)
|
||||
- Persisted to `.storkit/slack_history.json` via `save_slack_history`
|
||||
|
||||
**Approach:**
|
||||
- Use `extract_reset_command` from `server/src/chat/transport/matrix/reset.rs` to detect the command (it works transport-agnostically)
|
||||
- Implement the reset inline in each transport's async message handler: clear `session_id` and `entries` for the sender/channel key, call the transport's own `save_*_history`, reply with confirmation
|
||||
- Add async intercepts in `whatsapp.rs` (~line 1107, after the rebuild intercept) and `slack.rs` (~line 845, after the rebuild intercept)
|
||||
- The fallback handler in `chat/commands/mod.rs` (`handle_reset_fallback`) stays as-is
|
||||
|
||||
## How to Reproduce
|
||||
|
||||
1. Configure bot with transport = "whatsapp" or "slack"\n2. Send "reset" to the bot\n3. Check server logs
|
||||
|
||||
## Actual Result
|
||||
|
||||
Log shows "No command matched, forwarding to LLM" — reset is sent to the LLM as a conversational message instead of clearing the session.
|
||||
|
||||
## Expected Result
|
||||
|
||||
The bot clears the sender's session_id from conversation history and replies with confirmation like "Session cleared."
|
||||
|
||||
## Acceptance Criteria
|
||||
|
||||
- [ ] WhatsApp transport handles reset command: clears sender session_id and replies with confirmation
|
||||
- [ ] Slack transport handles reset command: clears channel session_id and replies with confirmation
|
||||
- [ ] Fallback handler in chat/commands/mod.rs no longer silently swallows the reset command
|
||||
@@ -0,0 +1,35 @@
|
||||
---
|
||||
name: "WhatsApp and Slack missing start command handler"
|
||||
---
|
||||
|
||||
# Bug 401: WhatsApp and Slack missing start command handler
|
||||
|
||||
## Description
|
||||
|
||||
The start command has a fallback handler in chat/commands/mod.rs that returns None. Only Matrix has pre-dispatch handling for this command. On WhatsApp and Slack, the command falls through to the LLM path.
|
||||
|
||||
## Implementation Note
|
||||
|
||||
Follow the **rebuild pattern** established in story 402.
|
||||
|
||||
- `extract_start_command` and `handle_start` already exist in `server/src/chat/transport/matrix/start.rs`
|
||||
- Add an async intercept in `server/src/chat/transport/whatsapp.rs` (see rebuild intercept ~line 1107) and `server/src/chat/transport/slack.rs` (see rebuild intercept ~line 845)
|
||||
- Call `crate::chat::transport::matrix::start::extract_start_command` to detect the command, then `crate::chat::transport::matrix::start::handle_start` to execute it
|
||||
- The fallback handler in `chat/commands/mod.rs` (`handle_start_fallback`) stays as-is — it exists only so `help` lists the command
|
||||
|
||||
## How to Reproduce
|
||||
|
||||
1. Configure bot with transport = "whatsapp" or "slack"\n2. Send "start <story_id>" to the bot\n3. Check server logs
|
||||
|
||||
## Actual Result
|
||||
|
||||
Command falls through to LLM instead of starting an agent.
|
||||
|
||||
## Expected Result
|
||||
|
||||
The bot starts an agent for the specified story and replies with confirmation.
|
||||
|
||||
## Acceptance Criteria
|
||||
|
||||
- [ ] WhatsApp transport handles start command: starts agent and replies with confirmation
|
||||
- [ ] Slack transport handles start command: starts agent and replies with confirmation
|
||||
+26
@@ -0,0 +1,26 @@
|
||||
---
|
||||
name: "WhatsApp and Slack missing rebuild command handler"
|
||||
---
|
||||
|
||||
# Bug 402: WhatsApp and Slack missing rebuild command handler
|
||||
|
||||
## Description
|
||||
|
||||
The rebuild command has a fallback handler in chat/commands/mod.rs that returns None. Only Matrix has pre-dispatch handling for this command. On WhatsApp and Slack, the command falls through to the LLM path.
|
||||
|
||||
## How to Reproduce
|
||||
|
||||
1. Configure bot with transport = "whatsapp" or "slack"\n2. Send "rebuild" to the bot\n3. Check server logs
|
||||
|
||||
## Actual Result
|
||||
|
||||
Command falls through to LLM instead of triggering a server rebuild.
|
||||
|
||||
## Expected Result
|
||||
|
||||
The bot triggers a server rebuild and replies with confirmation.
|
||||
|
||||
## Acceptance Criteria
|
||||
|
||||
- [ ] WhatsApp transport handles rebuild command: triggers rebuild and replies with confirmation
|
||||
- [ ] Slack transport handles rebuild command: triggers rebuild and replies with confirmation
|
||||
@@ -0,0 +1,37 @@
|
||||
---
|
||||
name: "WhatsApp and Slack missing rmtree command handler"
|
||||
retry_count: 2
|
||||
blocked: true
|
||||
---
|
||||
|
||||
# Bug 403: WhatsApp and Slack missing rmtree command handler
|
||||
|
||||
## Description
|
||||
|
||||
The rmtree command has a fallback handler in chat/commands/mod.rs that returns None. Only Matrix has pre-dispatch handling for this command. On WhatsApp and Slack, the command falls through to the LLM path.
|
||||
|
||||
## Implementation Note
|
||||
|
||||
Follow the **rebuild pattern** established in story 402.
|
||||
|
||||
- `extract_rmtree_command` and `handle_rmtree` already exist in `server/src/chat/transport/matrix/rmtree.rs`
|
||||
- Add an async intercept in `server/src/chat/transport/whatsapp.rs` (see rebuild intercept ~line 1107) and `server/src/chat/transport/slack.rs` (see rebuild intercept ~line 845)
|
||||
- Call `crate::chat::transport::matrix::rmtree::extract_rmtree_command` to detect the command, then `crate::chat::transport::matrix::rmtree::handle_rmtree` to execute it
|
||||
- The fallback handler in `chat/commands/mod.rs` (`handle_rmtree_fallback`) stays as-is — it exists only so `help` lists the command
|
||||
|
||||
## How to Reproduce
|
||||
|
||||
1. Configure bot with transport = "whatsapp" or "slack"\n2. Send "rmtree <story_id>" to the bot\n3. Check server logs
|
||||
|
||||
## Actual Result
|
||||
|
||||
Command falls through to LLM instead of removing the worktree.
|
||||
|
||||
## Expected Result
|
||||
|
||||
The bot removes the worktree for the specified story and replies with confirmation.
|
||||
|
||||
## Acceptance Criteria
|
||||
|
||||
- [ ] WhatsApp transport handles rmtree command: removes worktree and replies with confirmation
|
||||
- [ ] Slack transport handles rmtree command: removes worktree and replies with confirmation
|
||||
@@ -0,0 +1,36 @@
|
||||
---
|
||||
name: "WhatsApp and Slack missing assign command handler"
|
||||
---
|
||||
|
||||
# Bug 404: WhatsApp and Slack missing assign command handler
|
||||
|
||||
## Description
|
||||
|
||||
The assign command has a fallback handler in chat/commands/mod.rs that returns None. Only Matrix has pre-dispatch handling for this command. On WhatsApp and Slack, the command falls through to the LLM path.
|
||||
|
||||
## Implementation Note
|
||||
|
||||
Follow the **rebuild pattern** established in story 402.
|
||||
|
||||
- `extract_assign_command` and `handle_assign` already exist in `server/src/chat/transport/matrix/assign.rs`
|
||||
- Add an async intercept in `server/src/chat/transport/whatsapp.rs` (see rebuild intercept ~line 1107) and `server/src/chat/transport/slack.rs` (see rebuild intercept ~line 845)
|
||||
- Call `crate::chat::transport::matrix::assign::extract_assign_command` to detect the command, then `crate::chat::transport::matrix::assign::handle_assign` to execute it
|
||||
- The fallback handler in `chat/commands/mod.rs` (`handle_assign_fallback` — note: the registry entry for `assign` currently calls `assign::handle_assign` synchronously; verify this doesn't conflict) stays as-is for `help` listing
|
||||
- The fallback in `chat/commands/assign.rs` may need to return `None` instead of a real response once the async path handles it
|
||||
|
||||
## How to Reproduce
|
||||
|
||||
1. Configure bot with transport = "whatsapp" or "slack"\n2. Send "assign <story_id> <agent>" to the bot\n3. Check server logs
|
||||
|
||||
## Actual Result
|
||||
|
||||
Command falls through to LLM instead of assigning the agent.
|
||||
|
||||
## Expected Result
|
||||
|
||||
The bot assigns the specified agent to the story and replies with confirmation.
|
||||
|
||||
## Acceptance Criteria
|
||||
|
||||
- [ ] WhatsApp transport handles assign command: assigns agent and replies with confirmation
|
||||
- [ ] Slack transport handles assign command: assigns agent and replies with confirmation
|
||||
+30
@@ -0,0 +1,30 @@
|
||||
---
|
||||
name: "Auto-refresh expired OAuth token for Claude Code PTY"
|
||||
---
|
||||
|
||||
# Story 405: Auto-refresh expired OAuth token for Claude Code PTY
|
||||
|
||||
## User Story
|
||||
|
||||
As a storkit user with a Claude Max subscription, I want the server to automatically refresh my expired OAuth token so that chat, Matrix, and WhatsApp integrations don't stop working when the token expires.
|
||||
|
||||
## Acceptance Criteria
|
||||
|
||||
### Detection
|
||||
- [ ] When the Claude Code PTY returns an `authentication_failed` error, storkit detects it instead of passing the raw 401 JSON to the user
|
||||
|
||||
### Auto-refresh (credentials exist, refresh token valid)
|
||||
- [ ] Storkit reads the OAuth refresh token from `~/.claude/.credentials.json`
|
||||
- [ ] Storkit calls the Anthropic OAuth token refresh endpoint (`https://console.anthropic.com/v1/oauth/token` with `grant_type=refresh_token`) to obtain a new access token
|
||||
- [ ] Storkit writes the refreshed access token (and new expiresAt) back to `~/.claude/.credentials.json`
|
||||
- [ ] After a successful refresh, storkit automatically retries the original chat request
|
||||
- [ ] The refresh+retry is transparent to the user — they see no error
|
||||
|
||||
### Full login required (no credentials, or refresh token also expired)
|
||||
- [ ] If `.credentials.json` doesn't exist or the refresh call itself fails, storkit surfaces a clear error: "OAuth session expired. Please run `claude login` to re-authenticate."
|
||||
- [ ] The error message is surfaced through the normal chat stream (not just server logs)
|
||||
|
||||
## Out of Scope
|
||||
|
||||
- Implementing the full interactive `claude login` browser OAuth flow inside storkit
|
||||
- Proactive token refresh before expiry (refreshing on demand when the error occurs is sufficient)
|
||||
+21
@@ -0,0 +1,21 @@
|
||||
---
|
||||
name: "Browser-based OAuth login flow from web UI and chat integrations"
|
||||
---
|
||||
|
||||
# Story 406: Browser-based OAuth login flow from web UI and chat integrations
|
||||
|
||||
## User Story
|
||||
|
||||
As a new storkit user (or one whose refresh token has expired), I want to complete the full Claude OAuth login flow from the web UI, Matrix, or WhatsApp so that I don't need terminal access to run `claude login`.
|
||||
|
||||
## Acceptance Criteria
|
||||
|
||||
- [ ] From the web UI, the user can initiate OAuth login — storkit generates the Anthropic authorize URL and opens it in a new tab
|
||||
- [ ] After the user authenticates in the browser, the OAuth callback writes accessToken, refreshToken, and expiresAt to ~/.claude/.credentials.json
|
||||
- [ ] From Matrix or WhatsApp, storkit sends the user a clickable OAuth authorize link when credentials are missing or fully expired
|
||||
- [ ] After successful login, the user can immediately start chatting without restarting storkit
|
||||
- [ ] If the OAuth callback fails or the user cancels, a clear error is shown
|
||||
|
||||
## Out of Scope
|
||||
|
||||
- TBD
|
||||
@@ -0,0 +1,195 @@
|
||||
---
|
||||
name: "Fly.io Machines for multi-tenant storkit SaaS — docs, security & pricing"
|
||||
retry_count: 2
|
||||
blocked: true
|
||||
---
|
||||
|
||||
# Spike 407: Fly.io Machines for multi-tenant storkit SaaS — docs, security & pricing
|
||||
|
||||
## Question
|
||||
|
||||
What do Fly.io's published docs, security claims, and pricing say about using Machines as the isolation layer for a multi-tenant storkit SaaS? Is there anything that rules it out before we write code?
|
||||
|
||||
## Hypothesis
|
||||
|
||||
Fly.io Machines (Firecracker-based microVMs) are a viable isolation primitive for tenants running arbitrary shell commands, and the pricing model is workable at early SaaS scale.
|
||||
|
||||
## Timebox
|
||||
|
||||
2 hours
|
||||
|
||||
## Investigation Plan
|
||||
|
||||
- [x] Read Fly.io Machines API docs — what are the core primitives (machine lifecycle, networking, volumes, secrets)?
|
||||
- [x] Research Fly.io's published isolation model — what security guarantees do they document for Firecracker microVMs? Summarise claims and explicitly flag what would require independent security review before production use.
|
||||
- [x] Research cold start time — what do Fly.io docs and community benchmarks claim? Note that real numbers require a test account (covered in spike 408).
|
||||
- [x] Research persistent volume support — can a volume be attached per-tenant? What are the size/count limits?
|
||||
- [x] Research secret injection options — env vars, Fly Secrets API, volume mounts. What's the right approach for per-tenant `~/.claude/.credentials.json`?
|
||||
- [x] Research machine count and org limits — any hard caps that would block SaaS growth?
|
||||
- [x] Research pricing — always-on vs stop-on-idle machine costs at 10, 100, 1000 tenants. Include volume and egress costs.
|
||||
- [x] Identify any documented showstoppers.
|
||||
|
||||
## Findings
|
||||
|
||||
### 1. Core API Primitives
|
||||
|
||||
Base URL: `https://api.machines.dev` (or `http://_api.internal:4280` from within 6PN).
|
||||
Auth: `Authorization: Bearer <fly_api_token>`.
|
||||
|
||||
**Machine lifecycle** — full REST API:
|
||||
- `POST /v1/apps/{app}/machines` — create (+ optionally start via `skip_launch: false`)
|
||||
- `POST /v1/apps/{app}/machines/{id}/start` — start stopped machine (~10ms same-region)
|
||||
- `POST /v1/apps/{app}/machines/{id}/stop` — stop (SIGINT/SIGKILL, retains disk)
|
||||
- `POST /v1/apps/{app}/machines/{id}/suspend` — snapshot RAM to disk for fast resume
|
||||
- `DELETE /v1/apps/{app}/machines/{id}` — destroy (irreversible)
|
||||
- `GET /v1/apps/{app}/machines/{id}/wait?state=started` — synchronize on state transitions
|
||||
|
||||
Machine states: `created → started → stopped/suspended → destroyed`.
|
||||
Leases (`POST .../lease`) provide exclusive mutation locks — useful for orchestration.
|
||||
|
||||
**Rate limits**: 1 req/s per action per machine/app ID (burst to 3). Matters for rapid tenant provisioning.
|
||||
|
||||
### 2. Isolation Model
|
||||
|
||||
Each Fly Machine is a **Firecracker microVM** — a separate Linux kernel, not a container. Defense in depth:
|
||||
1. KVM hardware-enforced memory and CPU isolation
|
||||
2. Minimal device model (5 virtual devices vs QEMU's hundreds)
|
||||
3. Rust VMM implementation (no C memory-safety bugs in VMM)
|
||||
4. `seccomp-bpf` limits Firecracker process to ~40 syscalls with argument filters
|
||||
5. Jailer chroots + namespaces + drops privileges around the Firecracker process
|
||||
|
||||
From official docs: *"MicroVMs provide strong hardware-virtualization-based security and workload isolation, which allows us to safely run applications from different customers on shared hardware."* Full VM isolation prevents kernel sharing between apps.
|
||||
|
||||
Tenants have full root inside their VM by design — the kernel boundary contains blast radius.
|
||||
|
||||
**Claims requiring independent verification before production use:**
|
||||
- Whether SMT/hyperthreading is disabled on hosts (directly relevant to Spectre/MDS side-channel attacks — Firecracker's own docs recommend disabling SMT for strict multi-tenancy, but Fly.io does not publicly document this)
|
||||
- CPU dedication is explicitly described as "best-effort", not a hard guarantee
|
||||
- Pentest scope/dates/findings for three named firms (Atredis Partners, Doyensec, Tetrel) are not published
|
||||
- Whether the SOC 2 Type II report scope covers the Firecracker isolation layer specifically
|
||||
|
||||
**Compliance**: SOC 2 Type II certified (report available on request), ISO 27001 datacenters (Equinix), HIPAA BAA available, GDPR DPA available.
|
||||
|
||||
### 3. Network Isolation
|
||||
|
||||
Each machine gets a private IPv6 (6PN) address. Key isolation controls:
|
||||
- Cross-organization: Fly.io platform blocks all cross-org traffic at the platform level — strong boundary
|
||||
- Intra-organization: **open by default** — any machine in the same org can reach any other
|
||||
|
||||
For multi-tenant SaaS, this means tenant machines in the same Fly.io org are NOT network-isolated from each other unless you use **Custom Private Networks (6PNs)**:
|
||||
- `POST /v1/apps` with a `network` field assigns that app to an isolated 6PN
|
||||
- Apps on different 6PNs cannot reach each other via private networking (only via public IPs)
|
||||
- **Assignment is permanent** — cannot be changed after app creation; plan upfront
|
||||
|
||||
Stable machine addressing: `<machine_id>.vm.<appname>.internal` (6PN addresses change on migration).
|
||||
|
||||
### 4. Cold Start Times
|
||||
|
||||
| Scenario | Documented Latency |
|
||||
|---|---|
|
||||
| Cold boot (create + start, same region) | ~300 ms |
|
||||
| Start existing stopped machine (same region) | ~10 ms |
|
||||
| Start stopped machine (cross-region) | ~150 ms |
|
||||
| Resume from suspend (same region) | Sub-100ms (implied) |
|
||||
|
||||
Community-observed: 400–600ms end-to-end (including app init) for stopped machine cold starts.
|
||||
FLAME workloads report 3–8s in some restart-race conditions.
|
||||
|
||||
Real latency numbers with our actual image size require a test account — covered by spike 408.
|
||||
|
||||
### 5. Persistent Volume Support
|
||||
|
||||
- Volumes are created via `POST /v1/apps/{app}/volumes` with `size_gb` (default 3 GB), region, encryption flag
|
||||
- Attached to machine via `config.mounts[].volume` at create/update time
|
||||
- **1:1 constraint**: one volume per machine, one machine per volume, same region required
|
||||
- Volumes persist across machine stop/start/suspend/destroy — they are a separate resource
|
||||
- Can extend volume online (`PUT .../volumes/{id}/extend`)
|
||||
- Volume snapshots available (billed at $0.08/GB/month as of Jan 2026)
|
||||
- No documented per-org volume count cap (separate from machine cap)
|
||||
|
||||
For per-tenant `~/.claude/` home directories, attach one volume per tenant machine — straightforward.
|
||||
|
||||
### 6. Secret Injection
|
||||
|
||||
Four methods, in order of recommendation for sensitive credentials:
|
||||
|
||||
1. **Fly Secrets** (`fly secrets set KEY=value`) — encrypted at rest, injected as env vars at boot to all machines in the app. **Secrets are per-app, not per-machine** — all machines in an app share the same secret set. For per-tenant isolated secrets, each tenant needs their own app (or use method 3).
|
||||
|
||||
2. **`config.files` with `secret_name`** — writes a named secret to a file path inside the machine at start time:
|
||||
```json
|
||||
{"guest_path": "/root/.claude/.credentials.json", "secret_name": "TENANT_CREDENTIALS"}
|
||||
```
|
||||
This is the right approach for per-tenant `~/.claude/.credentials.json` if tenants share an app — pair with `ignore_app_secrets: true` and per-process secret scoping.
|
||||
|
||||
3. **`config.env`** — plain env vars in machine config, not encrypted at rest. Non-sensitive config only.
|
||||
|
||||
4. **`config.processes[].secrets`** — inject named secrets only to specific process groups; `ignore_app_secrets: true` prevents inheritance of app-level secrets.
|
||||
|
||||
**Recommended architecture**: One app per tenant (isolated 6PN + isolated secrets) is the cleanest security model. Secrets stored per app via Fly Secrets, credentials file written via `config.files` at boot.
|
||||
|
||||
### 7. Machine Count and Org Limits
|
||||
|
||||
| Limit | Default | Hard Cap |
|
||||
|---|---|---|
|
||||
| Machines per org (all states) | 50 | None architectural |
|
||||
|
||||
- The 50-machine default is a **fail-safe**, not an architectural limit. Fly.io runs customers with 100,000+ machines.
|
||||
- To raise: email `billing@fly.io` with requirements.
|
||||
- **This limit will be hit immediately in any real multi-tenant deployment** — must budget for an early limit-raise request before launching.
|
||||
- API rate limit of 1 req/s per action also needs consideration for bulk tenant provisioning scripts.
|
||||
|
||||
### 8. Pricing (as of March 2026)
|
||||
|
||||
**Compute (per second, billed only while running):**
|
||||
|
||||
| Preset | Per Month always-on |
|
||||
|---|---|
|
||||
| shared-cpu-1x (256 MB) | $2.05 |
|
||||
| shared-cpu-2x (512 MB) | $4.10 |
|
||||
| performance-1x (2 GB) | $32.64 |
|
||||
|
||||
**Storage**: $0.15/GB/month (provisioned, regardless of machine state)
|
||||
**Egress**: $0.02/GB (North America/Europe), $0.04/GB (APAC/SA), $0.12/GB (Africa/India)
|
||||
**Dedicated IPv4**: $2.00/month per app (shared IPv6 is free)
|
||||
|
||||
**No free tier** for new orgs (eliminated 2024). No minimum spend, no base fee.
|
||||
|
||||
**Monthly cost estimates** (1x shared-cpu-1x, 1 GB volume, 1 GB egress/tenant, US East):
|
||||
|
||||
| Scenario | Per Tenant | 10 Tenants | 100 Tenants | 1,000 Tenants |
|
||||
|---|---|---|---|---|
|
||||
| Always-on (730h/month) | $2.22 | $22 | $222 | $2,220 |
|
||||
| Autostop, 8h/day active | $0.92 | $9 | $92 | $920 |
|
||||
| Autostop, 2h/day active | $0.53 | $5 | $53 | $530 |
|
||||
|
||||
At scale, volume storage becomes the dominant cost when machines are idle. At 1,000 tenants autostopped, storage is ~$150/month vs compute of $170–$370/month.
|
||||
|
||||
### 9. Showstoppers
|
||||
|
||||
**None identified** that rule it out. The following require action before launch:
|
||||
|
||||
| Risk | Severity | Mitigation |
|
||||
|---|---|---|
|
||||
| Default 50-machine org cap | High (blocks launch) | Email billing@fly.io early; no architectural cap |
|
||||
| SMT/hyperthreading not documented | Medium (security) | Request confirmation from Fly.io support before production; mitigated by VM-level isolation |
|
||||
| Intra-org network open by default | Medium (security) | Use one app per tenant with custom 6PNs |
|
||||
| Secrets are per-app not per-machine | Low | Use one app per tenant or `config.files` with `secret_name` |
|
||||
| Volume and machine must be same region | Low (ops) | Enforce region consistency in provisioning code |
|
||||
| API rate limit 1 req/s per machine | Low | Throttle bulk provisioning loops |
|
||||
|
||||
## Recommendation
|
||||
|
||||
**Proceed.** Fly.io Machines are a viable isolation layer for multi-tenant storkit SaaS.
|
||||
|
||||
**Architecture to validate in spike 408:**
|
||||
- One Fly.io app per tenant (provides 6PN network isolation + isolated secrets)
|
||||
- One Firecracker microVM per tenant app (shared-cpu-1x 256 MB baseline; adjust per observed usage)
|
||||
- One persistent volume per tenant (1 GB baseline for `~/.claude/`, repos, storkit state)
|
||||
- Autostop/autoresume enabled — 70–92% compute cost reduction vs always-on for typical dev tool usage
|
||||
- Tenant credentials injected via `config.files` + Fly Secrets at machine start
|
||||
|
||||
**Pricing verdict**: Workable at early SaaS scale. At 100 tenants with autostop (8h/day), costs ~$92/month; at 1,000 tenants ~$920/month. Margins are viable if per-tenant pricing is $5–$20/month.
|
||||
|
||||
**Before production**: Confirm with Fly.io support whether SMT is disabled on worker hosts. Request org machine limit raised to 200–500 during private beta.
|
||||
|
||||
**Spike 408 scope**: Validate cold start latency, autostop resume behavior, and volume persistence with a real test machine running the storkit container image.
|
||||
@@ -0,0 +1,69 @@
|
||||
---
|
||||
name: "Split whatsapp.rs into focused modules"
|
||||
retry_count: 2
|
||||
blocked: true
|
||||
---
|
||||
|
||||
# Refactor 409: Split whatsapp.rs into focused modules
|
||||
|
||||
## Current State
|
||||
|
||||
- TBD
|
||||
|
||||
## Desired State
|
||||
|
||||
whatsapp.rs is 2000+ lines making it expensive for agents to navigate and edit. Split into focused modules under chat/transport/whatsapp/.
|
||||
|
||||
## Acceptance Criteria
|
||||
|
||||
- [x] mod.rs contains webhook handlers, WebhookContext, and re-exports
|
||||
- [x] meta.rs contains WhatsAppTransport, ChatTransport impl, and Graph API structs/calls
|
||||
- [x] twilio.rs contains TwilioWhatsAppTransport, ChatTransport impl, and Twilio structs/calls
|
||||
- [x] history.rs contains WhatsAppConversationHistory, load/save_whatsapp_history, and MessagingWindowTracker
|
||||
- [x] commands.rs contains handle_incoming_message, handle_llm_message, and all async command dispatch
|
||||
- [x] format.rs contains markdown_to_whatsapp and chunk_for_whatsapp
|
||||
- [x] All existing tests pass
|
||||
- [x] No behaviour changes — pure structural refactor
|
||||
|
||||
## Out of Scope
|
||||
|
||||
- TBD
|
||||
|
||||
## Test Results
|
||||
|
||||
<!-- storkit-test-results: {"unit":[{"name":"whatsapp::format::tests::chunk_short_message_returns_single_chunk","status":"pass","details":null},{"name":"whatsapp::format::tests::chunk_exactly_at_limit_returns_single_chunk","status":"pass","details":null},{"name":"whatsapp::format::tests::chunk_splits_on_paragraph_boundary","status":"pass","details":null},{"name":"whatsapp::format::tests::chunk_splits_on_line_boundary_when_no_paragraph_break","status":"pass","details":null},{"name":"whatsapp::format::tests::chunk_hard_splits_continuous_text","status":"pass","details":null},{"name":"whatsapp::format::tests::chunk_empty_string_returns_single_empty","status":"pass","details":null},{"name":"whatsapp::format::tests::md_to_wa_converts_headers_to_bold","status":"pass","details":null},{"name":"whatsapp::format::tests::md_to_wa_converts_bold","status":"pass","details":null},{"name":"whatsapp::format::tests::md_to_wa_converts_bold_italic","status":"pass","details":null},{"name":"whatsapp::format::tests::md_to_wa_converts_strikethrough","status":"pass","details":null},{"name":"whatsapp::format::tests::md_to_wa_converts_links","status":"pass","details":null},{"name":"whatsapp::format::tests::md_to_wa_removes_horizontal_rules","status":"pass","details":null},{"name":"whatsapp::format::tests::md_to_wa_preserves_inline_code","status":"pass","details":null},{"name":"whatsapp::format::tests::md_to_wa_preserves_code_blocks","status":"pass","details":null},{"name":"whatsapp::format::tests::md_to_wa_mixed_message","status":"pass","details":null},{"name":"whatsapp::format::tests::md_to_wa_passthrough_plain_text","status":"pass","details":null},{"name":"whatsapp::history::tests::messaging_window_tracker_basics","status":"pass","details":null},{"name":"whatsapp::history::tests::messaging_window_tracker_expiry","status":"pass","details":null},{"name":"whatsapp::history::tests::messaging_window_tracker_reset","status":"pass","details":null},{"name":"whatsapp::history::tests::load_empty_history","status":"pass","details":null},{"name":"whatsapp::history::tests::save_and_load_history","status":"pass","details":null},{"name":"whatsapp::twilio::tests::parse_twilio_form_valid","status":"pass","details":null},{"name":"whatsapp::twilio::tests::parse_twilio_form_missing_body","status":"pass","details":null},{"name":"whatsapp::twilio::tests::parse_twilio_form_missing_from","status":"pass","details":null},{"name":"whatsapp::commands::tests::parse_command_help","status":"pass","details":null},{"name":"whatsapp::commands::tests::parse_command_status","status":"pass","details":null},{"name":"whatsapp::commands::tests::parse_command_unknown","status":"pass","details":null},{"name":"whatsapp::mod::tests::webhook_context_basics","status":"pass","details":null}],"integration":[]} -->
|
||||
|
||||
### Unit Tests (28 passed, 0 failed)
|
||||
|
||||
- ✅ whatsapp::format::tests::chunk_short_message_returns_single_chunk
|
||||
- ✅ whatsapp::format::tests::chunk_exactly_at_limit_returns_single_chunk
|
||||
- ✅ whatsapp::format::tests::chunk_splits_on_paragraph_boundary
|
||||
- ✅ whatsapp::format::tests::chunk_splits_on_line_boundary_when_no_paragraph_break
|
||||
- ✅ whatsapp::format::tests::chunk_hard_splits_continuous_text
|
||||
- ✅ whatsapp::format::tests::chunk_empty_string_returns_single_empty
|
||||
- ✅ whatsapp::format::tests::md_to_wa_converts_headers_to_bold
|
||||
- ✅ whatsapp::format::tests::md_to_wa_converts_bold
|
||||
- ✅ whatsapp::format::tests::md_to_wa_converts_bold_italic
|
||||
- ✅ whatsapp::format::tests::md_to_wa_converts_strikethrough
|
||||
- ✅ whatsapp::format::tests::md_to_wa_converts_links
|
||||
- ✅ whatsapp::format::tests::md_to_wa_removes_horizontal_rules
|
||||
- ✅ whatsapp::format::tests::md_to_wa_preserves_inline_code
|
||||
- ✅ whatsapp::format::tests::md_to_wa_preserves_code_blocks
|
||||
- ✅ whatsapp::format::tests::md_to_wa_mixed_message
|
||||
- ✅ whatsapp::format::tests::md_to_wa_passthrough_plain_text
|
||||
- ✅ whatsapp::history::tests::messaging_window_tracker_basics
|
||||
- ✅ whatsapp::history::tests::messaging_window_tracker_expiry
|
||||
- ✅ whatsapp::history::tests::messaging_window_tracker_reset
|
||||
- ✅ whatsapp::history::tests::load_empty_history
|
||||
- ✅ whatsapp::history::tests::save_and_load_history
|
||||
- ✅ whatsapp::twilio::tests::parse_twilio_form_valid
|
||||
- ✅ whatsapp::twilio::tests::parse_twilio_form_missing_body
|
||||
- ✅ whatsapp::twilio::tests::parse_twilio_form_missing_from
|
||||
- ✅ whatsapp::commands::tests::parse_command_help
|
||||
- ✅ whatsapp::commands::tests::parse_command_status
|
||||
- ✅ whatsapp::commands::tests::parse_command_unknown
|
||||
- ✅ whatsapp::mod::tests::webhook_context_basics
|
||||
|
||||
### Integration Tests (0 passed, 0 failed)
|
||||
|
||||
*No integration tests recorded.*
|
||||
@@ -0,0 +1,22 @@
|
||||
---
|
||||
name: "loc bot command — top files by line count"
|
||||
---
|
||||
|
||||
# Story 410: loc bot command — top files by line count
|
||||
|
||||
## User Story
|
||||
|
||||
As a developer, I want to send `loc` to the bot and see the top files by line count, so I can spot files that are getting too large before they become a problem for agents.
|
||||
|
||||
## Acceptance Criteria
|
||||
|
||||
- [ ] loc command is registered in chat/commands/mod.rs and appears in help output
|
||||
- [ ] `loc` returns the top 10 source files by line count (excluding generated files, node_modules, target/, .storkit/worktrees/)
|
||||
- [ ] `loc 5` returns the top 5 files
|
||||
- [ ] `loc 20` returns the top 20 files
|
||||
- [ ] Output includes file path, line count, and rank
|
||||
- [ ] Command works from all transports (Matrix, WhatsApp, Slack)
|
||||
|
||||
## Out of Scope
|
||||
|
||||
- TBD
|
||||
@@ -0,0 +1,29 @@
|
||||
---
|
||||
name: "Split slack.rs into focused modules"
|
||||
---
|
||||
|
||||
# Refactor 413: Split slack.rs into focused modules
|
||||
|
||||
## Current State
|
||||
|
||||
- TBD
|
||||
|
||||
## Desired State
|
||||
|
||||
Refactor the monolithic server/src/chat/transport/slack.rs (1902 lines) into a slack/ directory with focused modules, mirroring the whatsapp/ module structure from story 409.
|
||||
|
||||
## Acceptance Criteria
|
||||
|
||||
- [ ] slack.rs is replaced by a slack/ directory with mod.rs re-exporting all public types
|
||||
- [ ] meta.rs contains SlackTransport struct, ChatTransport trait impl, and Slack API request/response types
|
||||
- [ ] commands.rs contains incoming message dispatch, permission logic, and slash command handling
|
||||
- [ ] format.rs contains markdown_to_slack() conversion
|
||||
- [ ] history.rs contains load_slack_history(), save_slack_history(), and SlackHistoryDump
|
||||
- [ ] verify.rs contains verify_slack_signature(), sha256(), and constant_time_eq()
|
||||
- [ ] mod.rs contains Slack event types, webhook handlers, and SlackWebhookContext
|
||||
- [ ] All existing tests are preserved and pass in their respective modules
|
||||
- [ ] No public API changes — all existing imports from other crates continue to work
|
||||
|
||||
## Out of Scope
|
||||
|
||||
- TBD
|
||||
@@ -0,0 +1,19 @@
|
||||
---
|
||||
name: "loc command filters out known-huge files"
|
||||
---
|
||||
|
||||
# Story 414: loc command filters out known-huge files
|
||||
|
||||
## User Story
|
||||
|
||||
As a ..., I want ..., so that ...
|
||||
|
||||
## Acceptance Criteria
|
||||
|
||||
- [ ] loc command excludes lockfiles and generated files (e.g. package-lock.json, Cargo.lock, frontend/package-lock.json) from results
|
||||
- [ ] Exclusion list is defined as a constant, easy to extend
|
||||
- [ ] Excluded files do not count toward line totals
|
||||
|
||||
## Out of Scope
|
||||
|
||||
- TBD
|
||||
@@ -0,0 +1,29 @@
|
||||
---
|
||||
name: "Split agents/pool/mod.rs into submodules"
|
||||
---
|
||||
|
||||
# Refactor 415: Split agents/pool/mod.rs into submodules
|
||||
|
||||
## Current State
|
||||
|
||||
- TBD
|
||||
|
||||
## Desired State
|
||||
|
||||
Refactor the monolithic server/src/agents/pool/mod.rs (2407 lines) into focused submodules within the pool/ directory.
|
||||
|
||||
## Acceptance Criteria
|
||||
|
||||
- [ ] types.rs contains StoryAgent, PendingGuard, AgentInfo, composite_key, and related helper structs
|
||||
- [ ] lifecycle.rs contains start_agent, stop_agent, wait_for_agent and their unit tests
|
||||
- [ ] worktree.rs contains create_worktree, get_project_root, find_active_story_stage and their unit tests
|
||||
- [ ] query.rs contains list_agents, available_agents_for_stage, get_log_info, subscribe, drain_events and their unit tests
|
||||
- [ ] process.rs contains kill_all_children, kill_child_for_key, ChildKiller registry methods and their unit tests
|
||||
- [ ] test_helpers.rs contains inject_test_agent and its variants (4 methods)
|
||||
- [ ] mod.rs contains AgentPool struct, new(), and re-exports all public types
|
||||
- [ ] Unit tests live in their respective module files, not in a separate tests module
|
||||
- [ ] No public API changes — all existing imports continue to work
|
||||
|
||||
## Out of Scope
|
||||
|
||||
- TBD
|
||||
@@ -0,0 +1,28 @@
|
||||
---
|
||||
name: "Split io/fs.rs into submodules"
|
||||
---
|
||||
|
||||
# Refactor 416: Split io/fs.rs into submodules
|
||||
|
||||
## Current State
|
||||
|
||||
- TBD
|
||||
|
||||
## Desired State
|
||||
|
||||
Refactor the monolithic server/src/io/fs.rs (2007 lines) into focused submodules within an fs/ directory.
|
||||
|
||||
## Acceptance Criteria
|
||||
|
||||
- [ ] scaffold.rs contains scaffold_story_kit, write_file_if_missing, write_script_if_missing, write_story_kit_gitignore, append_root_gitignore_entries, detect_components_toml, detect_script_test, generate_project_toml and their unit tests
|
||||
- [ ] project.rs contains open_project, close_project, get_current_project, get_known_projects, forget_known_project, ensure_project_root_with_story_kit, validate_project_path and their unit tests
|
||||
- [ ] files.rs contains read_file, write_file, list_directory, list_project_files, FileEntry, create_directory_absolute and their unit tests
|
||||
- [ ] paths.rs contains resolve_cli_path, resolve_path, resolve_path_impl, find_story_kit_root, get_home_directory and their unit tests
|
||||
- [ ] preferences.rs contains get_model_preference, set_model_preference and their unit tests
|
||||
- [ ] mod.rs re-exports all public types and functions
|
||||
- [ ] Unit tests live in their respective module files
|
||||
- [ ] No public API changes — all existing imports continue to work
|
||||
|
||||
## Out of Scope
|
||||
|
||||
- TBD
|
||||
@@ -0,0 +1,30 @@
|
||||
---
|
||||
name: "Split matrix/bot.rs into focused modules"
|
||||
---
|
||||
|
||||
# Refactor 417: Split matrix/bot.rs into focused modules
|
||||
|
||||
## Current State
|
||||
|
||||
- TBD
|
||||
|
||||
## Desired State
|
||||
|
||||
Refactor the monolithic server/src/chat/transport/matrix/bot.rs (1926 lines) into focused submodules.
|
||||
|
||||
## Acceptance Criteria
|
||||
|
||||
- [ ] history.rs contains ConversationRole, ConversationEntry, RoomConversation, PersistedHistory, load_history, save_history and their unit tests
|
||||
- [ ] context.rs contains BotContext struct
|
||||
- [ ] run.rs contains run_bot main event loop
|
||||
- [ ] messages.rs contains on_room_message, handle_message, format_user_prompt, is_permission_approval and their unit tests
|
||||
- [ ] mentions.rs contains mentions_bot, contains_word, is_reply_to_bot and their unit tests
|
||||
- [ ] verification.rs contains check_sender_verified, on_to_device_verification_request, handle_sas_verification and their unit tests
|
||||
- [ ] format.rs contains markdown_to_html, format_startup_announcement and their unit tests
|
||||
- [ ] mod.rs re-exports all public types
|
||||
- [ ] Unit tests live in their respective module files
|
||||
- [ ] No public API changes — all existing imports continue to work
|
||||
|
||||
## Out of Scope
|
||||
|
||||
- TBD
|
||||
@@ -0,0 +1,28 @@
|
||||
---
|
||||
name: "Split pool/auto_assign.rs into submodules"
|
||||
---
|
||||
|
||||
# Refactor 418: Split pool/auto_assign.rs into submodules
|
||||
|
||||
## Current State
|
||||
|
||||
- TBD
|
||||
|
||||
## Desired State
|
||||
|
||||
Refactor the monolithic server/src/agents/pool/auto_assign.rs (1813 lines) into focused submodules.
|
||||
|
||||
## Acceptance Criteria
|
||||
|
||||
- [ ] auto_assign.rs contains auto_assign_available_work and its unit tests
|
||||
- [ ] reconcile.rs contains reconcile_on_startup and its unit tests
|
||||
- [ ] watchdog.rs contains run_watchdog_once, spawn_watchdog, check_orphaned_agents and their unit tests
|
||||
- [ ] scan.rs contains scan_stage_items, is_story_assigned_for_stage, count_active_agents_for_stage, find_free_agent_for_stage, is_agent_free and their unit tests
|
||||
- [ ] story_checks.rs contains read_story_front_matter_agent, has_review_hold, is_story_blocked, has_merge_failure and their unit tests
|
||||
- [ ] mod.rs wires the submodules and re-exports all public items
|
||||
- [ ] Unit tests live in their respective module files
|
||||
- [ ] No public API changes — all existing imports continue to work
|
||||
|
||||
## Out of Scope
|
||||
|
||||
- TBD
|
||||
+29
@@ -0,0 +1,29 @@
|
||||
---
|
||||
name: "Matrix bot crashes on transient network error instead of retrying"
|
||||
---
|
||||
|
||||
# Bug 419: Matrix bot crashes on transient network error instead of retrying
|
||||
|
||||
## Description
|
||||
|
||||
The Matrix bot treats a transient sync error as fatal and stops entirely. A single failed HTTP request to the homeserver kills the bot, requiring a full server rebuild to recover.
|
||||
|
||||
## How to Reproduce
|
||||
|
||||
1. Run storkit with Matrix bot enabled\n2. Homeserver becomes temporarily unreachable (network blip, DNS hiccup, server restart)\n3. Bot hits sync error and crashes
|
||||
|
||||
## Actual Result
|
||||
|
||||
Bot logs "Fatal error: Matrix sync error: error sending request for url (...)" and stops responding. No retry, no recovery.
|
||||
|
||||
## Expected Result
|
||||
|
||||
Bot logs a warning, backs off with exponential delay, and retries the sync. Only crash on unrecoverable errors (invalid credentials, banned, etc).
|
||||
|
||||
## Acceptance Criteria
|
||||
|
||||
- [ ] Transient network errors (connection refused, timeout, DNS failure) trigger a retry with exponential backoff
|
||||
- [ ] Bot logs a warning on each failed retry attempt
|
||||
- [ ] Bot resumes normal operation once the homeserver is reachable again
|
||||
- [ ] Unrecoverable errors (401, 403) still cause a clean shutdown with a clear error message
|
||||
- [ ] Bot sends a notification after recovering from a network outage
|
||||
+23
@@ -0,0 +1,23 @@
|
||||
---
|
||||
name: "loc for a specified file — bot command and web UI slash command"
|
||||
---
|
||||
|
||||
# Story 420: loc for a specified file — bot command and web UI slash command
|
||||
|
||||
## User Story
|
||||
|
||||
As a developer, I want to send `loc <filepath>` to the bot or use it as a slash command in the web UI to see the line count for a specific file, so I can quickly check how large a file is without leaving my workflow.
|
||||
|
||||
## Acceptance Criteria
|
||||
|
||||
- [ ] loc <filepath> returns the line count for the specified file
|
||||
- [ ] Relative paths are resolved against the project root
|
||||
- [ ] If the file does not exist, returns a clear error
|
||||
- [ ] Works from all transports (Matrix, WhatsApp, Slack)
|
||||
- [ ] Works as a slash command in the web UI
|
||||
- [ ] loc with no argument retains existing behavior (top files by line count)
|
||||
- [ ] Exposed as an MCP tool so agents can query file line counts programmatically
|
||||
|
||||
## Out of Scope
|
||||
|
||||
- TBD
|
||||
@@ -0,0 +1,24 @@
|
||||
---
|
||||
name: "Timer command for deferred agent start"
|
||||
---
|
||||
|
||||
# Story 421: Timer command for deferred agent start
|
||||
|
||||
## User Story
|
||||
|
||||
As a ..., I want ..., so that ...
|
||||
|
||||
## Acceptance Criteria
|
||||
|
||||
- [ ] Bot command `timer <story_id> <HH:MM>` schedules a one-shot deferred start for the given story at the next occurrence of that time (server-local timezone)
|
||||
- [ ] Bot command `timer list` shows all pending timers with story ID and scheduled time
|
||||
- [ ] Bot command `timer cancel <story_id>` removes the pending timer for that story
|
||||
- [ ] Timers are persisted to .storkit/timers.json so they survive server restarts
|
||||
- [ ] A 30s tick loop (tokio task, same pattern as watchdog) checks for due timers and calls start_agent when triggered
|
||||
- [ ] When a timer fires, the story must already be in current — timer does not move stories between stages
|
||||
- [ ] Fired timers are removed after execution (one-shot, not recurring)
|
||||
- [ ] Multiple timers for the same time are supported and respect agent slot contention via auto-assign
|
||||
|
||||
## Out of Scope
|
||||
|
||||
- TBD
|
||||
@@ -0,0 +1,22 @@
|
||||
---
|
||||
name: "Unblock command to reset blocked stories"
|
||||
---
|
||||
|
||||
# Story 422: Unblock command to reset blocked stories
|
||||
|
||||
## User Story
|
||||
|
||||
As a ..., I want ..., so that ...
|
||||
|
||||
## Acceptance Criteria
|
||||
|
||||
- [ ] Bot command `unblock <story_id>` clears blocked flag and resets retry_count to 0 on the story front matter
|
||||
- [ ] Replies with confirmation including story ID and name
|
||||
- [ ] Returns clear error if story is not found or not blocked
|
||||
- [ ] Works from all transports (Matrix, WhatsApp, Slack)
|
||||
- [ ] Exposed as an MCP tool so agents can unblock stories programmatically
|
||||
- [ ] Works as a slash command in the web UI
|
||||
|
||||
## Out of Scope
|
||||
|
||||
- TBD
|
||||
+22
@@ -0,0 +1,22 @@
|
||||
---
|
||||
name: "Auto-schedule timer on rate limit to resume after reset"
|
||||
---
|
||||
|
||||
# Story 423: Auto-schedule timer on rate limit to resume after reset
|
||||
|
||||
## User Story
|
||||
|
||||
As a ..., I want ..., so that ...
|
||||
|
||||
## Acceptance Criteria
|
||||
|
||||
- [ ] When a rate_limit_event with a hard block (not just allowed_warning) is received from the PTY stream, parse the reset time from rate_limit_info
|
||||
- [ ] Automatically create a timer (via TimerStore from story 421) for the blocked story at the parsed reset time
|
||||
- [ ] If a timer already exists for that story, update it to the later reset time rather than creating a duplicate
|
||||
- [ ] Log the auto-scheduled timer with story ID, agent name, and scheduled resume time
|
||||
- [ ] Notify chat transports that the story was rate-limited and will auto-resume at the scheduled time
|
||||
- [ ] When the timer fires and restarts the agent, the existing worktree and committed work are preserved
|
||||
|
||||
## Out of Scope
|
||||
|
||||
- TBD
|
||||
+23
@@ -0,0 +1,23 @@
|
||||
---
|
||||
name: "Rate limit traffic light status and hard block alerts"
|
||||
agent: coder-opus
|
||||
---
|
||||
|
||||
# Story 424: Rate limit traffic light status and hard block alerts
|
||||
|
||||
## User Story
|
||||
|
||||
As a ..., I want ..., so that ...
|
||||
|
||||
## Acceptance Criteria
|
||||
|
||||
- [ ] Remove repetitive per-message throttle warnings (allowed_warning) from chat transports entirely
|
||||
- [ ] Pipeline status messages show a coloured dot next to each work item: green for running normally, yellow for throttled, red for hard blocked, white/grey for idle/no agent
|
||||
- [ ] Hard block events (429 / rate_limit_exceeded) still send an individual chat notification with a red icon, including the reset time
|
||||
- [ ] Throttle and block state tracked per-agent so the status dot updates in real time
|
||||
- [ ] Server-side logging of throttle warnings is preserved for debugging
|
||||
- [ ] Traffic light dots in status report should be small/compact, not large emoji
|
||||
|
||||
## Out of Scope
|
||||
|
||||
- TBD
|
||||
+20
@@ -0,0 +1,20 @@
|
||||
---
|
||||
name: "Chat notification when a story blocks with reason"
|
||||
---
|
||||
|
||||
# Story 425: Chat notification when a story blocks with reason
|
||||
|
||||
## User Story
|
||||
|
||||
As a project owner monitoring agent progress via chat, I want to receive a notification when a story gets blocked, including the reason, so that I can decide whether to unblock it or investigate the failure.
|
||||
|
||||
## Acceptance Criteria
|
||||
|
||||
- [ ] When a story transitions to blocked state, send a chat notification to all configured transports
|
||||
- [ ] Notification includes the story ID, story name, and the reason for blocking (e.g. gate failure output, max retries exceeded, empty diff)
|
||||
- [ ] Notification uses a red or warning icon to distinguish from normal status messages
|
||||
- [ ] Works across Matrix, WhatsApp, and Slack transports
|
||||
|
||||
## Out of Scope
|
||||
|
||||
- TBD
|
||||
+77
@@ -0,0 +1,77 @@
|
||||
---
|
||||
name: "Mergemaster pipeline marks story done without verifying code landed on master"
|
||||
retry_count: 1
|
||||
---
|
||||
|
||||
# Bug 426: Mergemaster pipeline marks story done without verifying code landed on master
|
||||
|
||||
## Description
|
||||
|
||||
The mergemaster pipeline can mark a story as done even when the feature code never makes it to master. The cherry-pick step in merge.rs may fail or be skipped, but the pipeline still advances the story to done via the filesystem watcher. There is no post-merge verification that the code actually exists on master before marking done.
|
||||
|
||||
## How to Reproduce
|
||||
|
||||
Observed on stories 422 and 403. For 422: mergemaster created merge-queue branch, resolved 2 conflicts in chat/commands/mod.rs and http/mcp/mod.rs, passed quality gates, created merge-queue commit cb2ef6b (4 files, 333 insertions including unblock.rs). But the done commit on master (05db012) only moves the story file — zero code changes. There is no 'storkit: merge 422' commit on master at all. The feature branch (db3157f) still has the code but it was never cherry-picked onto master.
|
||||
|
||||
## Manual Merge Notes
|
||||
|
||||
When manually cherry-picking 422 onto master, two conflicts arose:
|
||||
|
||||
1. `server/src/chat/commands/mod.rs` — both 421 (timer) and 422 (unblock) added entries to the same BotCommand registry. Resolution: keep both.
|
||||
2. `server/src/http/mcp/mod.rs` — 420 (loc_file) and 422 (unblock) both bumped the tool count assertion from 49→50. Resolution: keep loc_file assertion, bump count to 51.
|
||||
|
||||
Additionally, the cherry-pick could not proceed at all because master was on the `merge-queue/424` branch with 3 unresolved files (notifications.rs, ws.rs, watcher.rs). A concurrent in-progress merge left the working tree dirty, which likely caused the original cherry-pick to fail silently. This suggests a race condition: the filesystem watcher commits (story file moves) can leave master in a state where the cherry-pick step in merge.rs fails.
|
||||
|
||||
## Full Audit of Done Stories (2026-03-28)
|
||||
|
||||
Audited all 9 stories in `5_done/` to check whether their code actually landed on master:
|
||||
|
||||
| Story | Merge Commit | Code on Master |
|
||||
|-------|-------------|----------------|
|
||||
| 417 — Split matrix/bot.rs | `665c036` (9 files, +1973/-1926) | YES |
|
||||
| 418 — Split pool/auto_assign.rs | `d375c4b` (7 files, +1901/-1813) | YES |
|
||||
| 419 — Matrix bot network error | `1193b7a` (1 file, +121/-3) | YES |
|
||||
| 420 — loc file command | `d6f8239` (5 files, +112/-32) | YES |
|
||||
| 421 — Timer command | `cf5424f` (7 files, +836) | YES |
|
||||
| 422 — Unblock command | `6c6bc35` (4 files, +336) — manual cherry-pick | YES |
|
||||
| 423 — Auto-schedule timer on rate limit | `b44f3a3` + `8ab2e19` (6 files, +375/-8) — manual cherry-pick | YES |
|
||||
| **424 — Rate limit traffic light** | **None** | **NO — moved back to backlog for redo** |
|
||||
| 425 — Chat notification on story block | `98b5475` (5 files, +184/-15) | YES |
|
||||
| **427 — Text normalization for line breaks** | **None** | **NO — phantom done, code never landed** |
|
||||
|
||||
**4 out of 10 stories (422, 423, 424, 427) had broken merges.** 422 and 423 were fixed via manual cherry-pick. 424 was moved back to backlog for a fresh run. 427 also hit the same bug — marked done without code on master.
|
||||
|
||||
## Actual Result
|
||||
|
||||
Story moved to done with no code on master. The merge-queue commit exists on a detached branch but was never applied to master. No merge commit appears in git log on master.
|
||||
|
||||
## Expected Result
|
||||
|
||||
Pipeline should verify that the cherry-pick produced a merge commit on master before advancing to done. If cherry-pick fails or is missing, the story should remain in merge stage with a merge_failure flag.
|
||||
|
||||
## Suggested Fix
|
||||
|
||||
The code path is: `merge.rs::run_squash_merge` → `pipeline/merge.rs::start_merge_agent_work` → `lifecycle.rs::move_story_to_archived`.
|
||||
|
||||
`run_squash_merge` (merge.rs:354) cherry-picks the merge-queue commit onto `project_root` and checks `cp.status.success()`. If it returns `success: true`, `start_merge_agent_work` (pipeline/merge.rs:106) immediately calls `move_story_to_archived`, which moves the story file to `5_done/`. The watcher then commits "storkit: done".
|
||||
|
||||
The gap: between the cherry-pick returning success and the story moving to done, nobody verifies the cherry-pick actually produced a code commit on master. Possible failure modes:
|
||||
|
||||
1. `project_root` is not on master (e.g. checked out to a merge-queue branch from a concurrent merge)
|
||||
2. Cherry-pick exits 0 but produces an empty commit (no code diff)
|
||||
3. Cherry-pick succeeds on the wrong branch
|
||||
|
||||
**Fix:** After the cherry-pick in `run_squash_merge` succeeds (line 384), before returning `success: true`:
|
||||
|
||||
1. Verify `project_root` is on master: `git rev-parse --abbrev-ref HEAD` must equal the base branch
|
||||
2. Verify the HEAD commit on master contains the expected merge message (e.g. matches `storkit: merge <story_id>`) or has a non-empty diff
|
||||
3. If either check fails, abort the cherry-pick and return `success: false`
|
||||
|
||||
This keeps the fix entirely within `run_squash_merge` — no changes needed to the pipeline advance or lifecycle code.
|
||||
|
||||
## Acceptance Criteria
|
||||
|
||||
- [ ] Pipeline must not move a story to done unless a merge commit containing the feature code exists on master
|
||||
- [ ] If cherry-pick fails or produces no code diff on master, the merge must be reported as failed
|
||||
- [ ] Add a post-merge verification step that checks git log on master for the expected merge commit before advancing to done
|
||||
- [ ] When verification fails, emit a merge_failure and leave the story in the merge stage for retry
|
||||
+20
@@ -0,0 +1,20 @@
|
||||
---
|
||||
name: "Server-side text normalization for chat message line breaks"
|
||||
---
|
||||
|
||||
# Story 427: Server-side text normalization for chat message line breaks
|
||||
|
||||
## User Story
|
||||
|
||||
As a user reading bot messages in Matrix, I want single newlines between sentences to render correctly, so that messages don't show up with words joined together like "sentence one.Sentence two".
|
||||
|
||||
## Acceptance Criteria
|
||||
|
||||
- [ ] Add a text normalization step before markdown-to-HTML conversion in the Matrix transport that converts single newlines between non-empty prose lines into double newlines
|
||||
- [ ] Preserve intentional single-newline formatting in bullet lists, headings, table rows, and code fences
|
||||
- [ ] Apply the same normalization in WhatsApp and Slack transports
|
||||
- [ ] Unit tests covering prose paragraphs, bullet lists, code blocks, and mixed content
|
||||
|
||||
## Out of Scope
|
||||
|
||||
- TBD
|
||||
@@ -0,0 +1,26 @@
|
||||
---
|
||||
name: "Split pool/pipeline.rs into submodules"
|
||||
---
|
||||
|
||||
# Refactor 428: Split pool/pipeline.rs into submodules
|
||||
|
||||
## Current State
|
||||
|
||||
- TBD
|
||||
|
||||
## Desired State
|
||||
|
||||
Refactor the monolithic server/src/agents/pool/pipeline.rs (1789 lines) into focused submodules.
|
||||
|
||||
## Acceptance Criteria
|
||||
|
||||
- [ ] advance.rs contains run_pipeline_advance, spawn_pipeline_advance, should_block_story and their unit tests
|
||||
- [ ] completion.rs contains run_server_owned_completion, report_completion and their unit tests
|
||||
- [ ] merge.rs contains start_merge_agent_work, run_merge_pipeline, get_merge_status, set_merge_failure_reported and their unit tests
|
||||
- [ ] mod.rs re-exports all public items and wires the submodules
|
||||
- [ ] Unit tests live in their respective module files
|
||||
- [ ] No public API changes — all existing imports continue to work
|
||||
|
||||
## Out of Scope
|
||||
|
||||
- TBD
|
||||
+27
@@ -0,0 +1,27 @@
|
||||
---
|
||||
name: "Interactive project setup wizard for new storkit projects"
|
||||
agent: coder-opus
|
||||
---
|
||||
|
||||
# Story 429: Interactive project setup wizard for new storkit projects
|
||||
|
||||
## User Story
|
||||
|
||||
As a developer adopting storkit on an existing project, I want a guided setup process that scaffolds the .storkit directory and has an agent generate project-specific configuration files, so that I can get up and running without manually writing specs and scripts.
|
||||
|
||||
## Acceptance Criteria
|
||||
|
||||
- [ ] storkit init scaffolds .storkit/ directory structure, project.toml, and .mcp.json without clobbering any existing files (especially CLAUDE.md)
|
||||
- [ ] Setup wizard tracks progress through ordered steps, resumable if interrupted
|
||||
- [ ] Step 1: scaffold .storkit/ directory structure and project.toml
|
||||
- [ ] Step 2: agent reads codebase and generates specs/00_CONTEXT.md, user confirms or requests revision
|
||||
- [ ] Step 3: agent reads tech stack and generates specs/tech/STACK.md, user confirms or requests revision
|
||||
- [ ] Step 4: agent creates script/test that runs the project's actual test suite, user runs it to verify, then confirms
|
||||
- [ ] Step 5: agent creates script/release tailored to the project's deployment, user confirms
|
||||
- [ ] Step 6: agent creates script/test_coverage if the stack supports it, user confirms
|
||||
- [ ] Each step gates on user confirmation before advancing to the next
|
||||
- [ ] Existing CLAUDE.md is preserved — storkit appends its content or leaves it untouched
|
||||
|
||||
## Out of Scope
|
||||
|
||||
- TBD
|
||||
+27
@@ -0,0 +1,27 @@
|
||||
---
|
||||
name: "Status command traffic light dots not coloured in Matrix"
|
||||
---
|
||||
|
||||
# Bug 430: Status command traffic light dots not coloured in Matrix
|
||||
|
||||
## Description
|
||||
|
||||
The traffic light dots in the status command use plain Unicode characters (○ ● ◑ ✗) which render without colour in Matrix. The HTML formatted_body should use data-mx-color to colour them green/yellow/red.
|
||||
|
||||
## How to Reproduce
|
||||
|
||||
Send the status command to the bot in Matrix. Observe the dots are monochrome.
|
||||
|
||||
## Actual Result
|
||||
|
||||
Dots render as plain monochrome Unicode characters.
|
||||
|
||||
## Expected Result
|
||||
|
||||
Dots render in colour: green (● running), yellow (◑ throttled), red (✗ blocked), grey (○ idle). Use font tag with data-mx-color attribute for Matrix HTML formatted_body.
|
||||
|
||||
## Acceptance Criteria
|
||||
|
||||
- [ ] HTML formatted_body uses <font data-mx-color="#colour">dot</font> for each traffic light state
|
||||
- [ ] Green (#00cc00) for running, yellow (#ffaa00) for throttled, red (#cc0000) for blocked, grey (#888888) for idle
|
||||
- [ ] Plain text fallback remains unchanged (Unicode dots for non-HTML transports)
|
||||
+24
@@ -0,0 +1,24 @@
|
||||
---
|
||||
name: "QA agent reviews code changes against acceptance criteria"
|
||||
---
|
||||
|
||||
# Story 431: QA agent reviews code changes against acceptance criteria
|
||||
|
||||
## User Story
|
||||
|
||||
As a project owner, I want the QA agent to actually verify that the coder's implementation matches the story's acceptance criteria, so that incomplete or incorrect work is caught before merge.
|
||||
|
||||
## Acceptance Criteria
|
||||
|
||||
- [ ] QA agent reads the story's acceptance criteria before reviewing code
|
||||
- [ ] QA agent reads the full diff against master to understand what changed
|
||||
- [ ] For each AC, QA agent verifies the code addresses it and explains how
|
||||
- [ ] QA agent flags incomplete implementations: todo!(), unimplemented!(), missing match arms, placeholder values
|
||||
- [ ] QA agent checks that new code has corresponding test coverage
|
||||
- [ ] QA agent produces a structured report: each AC with pass/fail and explanation
|
||||
- [ ] If any AC is not met, QA rejects the story with a clear reason so the coder can fix it
|
||||
- [ ] Deterministic gates (clippy, tests) still run as a prerequisite before the AC review
|
||||
|
||||
## Out of Scope
|
||||
|
||||
- TBD
|
||||
+27
@@ -0,0 +1,27 @@
|
||||
---
|
||||
name: "Complete setup wizard with MCP tools and agent-driven file generation"
|
||||
agent: "coder-opus"
|
||||
---
|
||||
|
||||
# Story 432: Complete setup wizard with MCP tools and agent-driven file generation
|
||||
|
||||
## User Story
|
||||
|
||||
As a developer running storkit init on a new project, I want the setup wizard to walk me through each step interactively — generating files, letting me review them, and confirming before moving on — so that my project is correctly configured without manual file editing.
|
||||
|
||||
## Acceptance Criteria
|
||||
|
||||
- [ ] MCP tool wizard_status returns the current wizard state: which step is active, which are done/skipped/pending
|
||||
- [ ] MCP tool wizard_generate triggers the agent to read the codebase and generate content for the current step (CONTEXT.md, STACK.md, script/test, script/release, script/test_coverage)
|
||||
- [ ] MCP tool wizard_confirm confirms the current step and advances to the next
|
||||
- [ ] MCP tool wizard_skip skips the current step and advances to the next
|
||||
- [ ] MCP tool wizard_retry re-generates content for the current step if the user isn't happy with it
|
||||
- [ ] Bot command setup shows wizard progress and the current step with instructions
|
||||
- [ ] Bot command setup confirm / setup skip / setup retry drive the wizard from chat
|
||||
- [ ] Generated files are written to disk only after user confirmation, not during generation preview
|
||||
- [ ] The wizard works from Claude Code terminal via MCP tools without requiring the web UI or chat bot
|
||||
- [ ] Existing files (especially CLAUDE.md) are never overwritten — wizard appends or skips
|
||||
|
||||
## Out of Scope
|
||||
|
||||
- TBD
|
||||
+24
@@ -0,0 +1,24 @@
|
||||
---
|
||||
name: "Setup wizard interviews user on bare projects with no existing code"
|
||||
agent: coder-opus
|
||||
---
|
||||
|
||||
# Story 433: Setup wizard interviews user on bare projects with no existing code
|
||||
|
||||
## User Story
|
||||
|
||||
As a developer starting a brand new project from an empty directory, I want the setup wizard to ask me what I'm building and what tech stack I plan to use, so that it can generate meaningful CONTEXT.md and STACK.md without any codebase to analyze.
|
||||
|
||||
## Acceptance Criteria
|
||||
|
||||
- [ ] wizard_generate detects when the project directory has no source code files
|
||||
- [ ] On bare projects, the wizard asks the user what they want to build instead of trying to analyze code
|
||||
- [ ] Wizard asks about intended tech stack, frameworks, and language choices
|
||||
- [ ] Conversation continues until the user confirms the generated CONTEXT.md captures their intent
|
||||
- [ ] STACK.md is generated from the user's stated tech choices rather than from codebase detection
|
||||
- [ ] script/test and script/release are generated with appropriate stubs for the stated stack
|
||||
- [ ] The interview flow works via both MCP tools (Claude Code terminal) and bot commands (Matrix/WhatsApp/Slack)
|
||||
|
||||
## Out of Scope
|
||||
|
||||
- TBD
|
||||
+20
@@ -0,0 +1,20 @@
|
||||
---
|
||||
name: "Wizard auto-checks completion on first conversation"
|
||||
---
|
||||
|
||||
# Story 434: Wizard auto-checks completion on first conversation
|
||||
|
||||
## User Story
|
||||
|
||||
As a developer opening Claude Code on a storkit project for the first time, I want the wizard to automatically check if setup is complete and prompt me through remaining steps, so I don't have to know to ask for it.
|
||||
|
||||
## Acceptance Criteria
|
||||
|
||||
- [ ] Scaffolded CLAUDE.md includes an IMPORTANT instruction telling Claude to call wizard_status on first conversation
|
||||
- [ ] If wizard is incomplete, Claude guides the user through remaining steps without being asked
|
||||
- [ ] If wizard is already complete, no wizard prompt appears — Claude behaves normally
|
||||
- [ ] Works on both existing projects with code and bare projects with no code
|
||||
|
||||
## Out of Scope
|
||||
|
||||
- TBD
|
||||
@@ -0,0 +1,26 @@
|
||||
---
|
||||
name: "strip_prefix_ci panics on multi-byte UTF-8 input"
|
||||
---
|
||||
|
||||
# Bug 437: strip_prefix_ci panics on multi-byte UTF-8 input
|
||||
|
||||
## Description
|
||||
|
||||
The `strip_prefix_ci` function in `server/src/chat/transport/matrix/assign.rs` slices the input string at `prefix.len()` bytes without checking that the offset is a valid UTF-8 char boundary. When the input message starts with multi-byte characters (e.g. `⏺` which is 3 bytes), the slice can land mid-character, causing a panic.
|
||||
|
||||
## How to Reproduce
|
||||
|
||||
Send a Matrix message to the bot that starts with a multi-byte UTF-8 character (e.g. `⏺ storkit - wizard_confirm`) where the bot name byte length falls inside a multi-byte character.
|
||||
|
||||
## Actual Result
|
||||
|
||||
Thread panics: `byte index 6 is not a char boundary; it is inside '⏺' (bytes 4..7)`
|
||||
|
||||
## Expected Result
|
||||
|
||||
The function should return `None` (no match) without panicking, since an ASCII bot name cannot match a slice containing multi-byte characters.
|
||||
|
||||
## Acceptance Criteria
|
||||
|
||||
- [ ] strip_prefix_ci checks is_char_boundary before slicing
|
||||
- [ ] No panic when input contains multi-byte UTF-8 characters at the prefix boundary
|
||||
@@ -0,0 +1,23 @@
|
||||
---
|
||||
name: "Slash command autocomplete in web UI text input"
|
||||
---
|
||||
|
||||
# Story 438: Slash command autocomplete in web UI text input
|
||||
|
||||
## User Story
|
||||
|
||||
As a user, I want to type `/` at the start of the text box and see a filtered list of available slash commands, so that I can discover and quickly invoke commands without memorizing them.
|
||||
|
||||
## Acceptance Criteria
|
||||
|
||||
- [ ] Typing `/` at position 0 in the ChatInput textarea shows a command picker overlay above the input
|
||||
- [ ] The overlay lists all slash commands with name and description
|
||||
- [ ] Typing further characters after `/` fuzzy-filters the list
|
||||
- [ ] Arrow keys navigate the list, Tab/Enter selects, Escape dismisses
|
||||
- [ ] Selecting a command inserts `/<command> ` into the input (with trailing space)
|
||||
- [ ] Command list is a single shared source of truth used by both the picker and HelpOverlay
|
||||
- [ ] The overlay follows the same visual style as the existing file picker (@-mention overlay)
|
||||
|
||||
## Out of Scope
|
||||
|
||||
- TBD
|
||||
+25
@@ -0,0 +1,25 @@
|
||||
---
|
||||
name: "Deduplicate strip_prefix_ci / strip_bot_mention into chat::util"
|
||||
---
|
||||
|
||||
# Refactor 439: Deduplicate strip_prefix_ci / strip_bot_mention into chat::util
|
||||
|
||||
## Current State
|
||||
|
||||
- TBD
|
||||
|
||||
## Desired State
|
||||
|
||||
Eight Matrix transport files (assign.rs, delete.rs, start.rs, rebuild.rs, reset.rs, rmtree.rs, htop.rs, timer.rs) each contain their own private copies of `strip_prefix_ci` and `strip_bot_mention`. The canonical versions already live in `chat::util` with the correct `is_char_boundary` guard. The duplicates should be removed and all call sites should use `util::strip_bot_mention` instead.
|
||||
|
||||
## Acceptance Criteria
|
||||
|
||||
- [ ] All 8 private copies of strip_prefix_ci are removed
|
||||
- [ ] All 8 private copies of strip_bot_mention are removed
|
||||
- [ ] All call sites use chat::util::strip_bot_mention instead
|
||||
- [ ] Existing tests in util.rs continue to pass
|
||||
- [ ] No new copies of strip_prefix_ci exist outside util.rs
|
||||
|
||||
## Out of Scope
|
||||
|
||||
- TBD
|
||||
+24
@@ -0,0 +1,24 @@
|
||||
---
|
||||
name: "Consolidate is_permission_approval into chat::util"
|
||||
---
|
||||
|
||||
# Refactor 440: Consolidate is_permission_approval into chat::util
|
||||
|
||||
## Current State
|
||||
|
||||
- TBD
|
||||
|
||||
## Desired State
|
||||
|
||||
Three copies of `is_permission_approval` exist across Slack (`chat/transport/slack/commands.rs`), WhatsApp (`chat/transport/whatsapp/commands.rs`), and Matrix (`chat/transport/matrix/bot/messages.rs`). The Slack and WhatsApp versions are identical; the Matrix version is a superset that also strips @mentions. Consolidate into a single `pub` function in `chat::util` using the Matrix superset behavior, then delete the 3 private copies.
|
||||
|
||||
## Acceptance Criteria
|
||||
|
||||
- [ ] Single pub fn is_permission_approval exists in chat::util
|
||||
- [ ] All 3 private copies are removed
|
||||
- [ ] Matrix @mention-stripping behavior is preserved in the shared version
|
||||
- [ ] All call sites use the shared version
|
||||
|
||||
## Out of Scope
|
||||
|
||||
- TBD
|
||||
+22
@@ -0,0 +1,22 @@
|
||||
---
|
||||
name: "Deduplicate get_project_root wrappers in io modules"
|
||||
---
|
||||
|
||||
# Refactor 441: Deduplicate get_project_root wrappers in io modules
|
||||
|
||||
## Current State
|
||||
|
||||
- TBD
|
||||
|
||||
## Desired State
|
||||
|
||||
Both `io/shell.rs` and `io/search.rs` contain identical private one-liner wrappers around `state.get_project_root()`. Either inline the call at each usage site or create a single shared helper, then delete the duplicate wrappers.
|
||||
|
||||
## Acceptance Criteria
|
||||
|
||||
- [ ] No duplicate private get_project_root wrappers in io/shell.rs and io/search.rs
|
||||
- [ ] All call sites use the canonical version or inline the call
|
||||
|
||||
## Out of Scope
|
||||
|
||||
- TBD
|
||||
+23
@@ -0,0 +1,23 @@
|
||||
---
|
||||
name: "Deduplicate stage_display_name into shared module"
|
||||
---
|
||||
|
||||
# Refactor 442: Deduplicate stage_display_name into shared module
|
||||
|
||||
## Current State
|
||||
|
||||
- TBD
|
||||
|
||||
## Desired State
|
||||
|
||||
`stage_display_name` has a `pub fn` in `chat/transport/matrix/notifications.rs` and a private copy in `chat/transport/matrix/delete.rs` with slightly different casing ("backlog" vs "Backlog", "in-progress" vs "Current"). The delete.rs copy should use the canonical version from notifications.rs, adjusting the callsite if the casing difference matters.
|
||||
|
||||
## Acceptance Criteria
|
||||
|
||||
- [ ] Private stage_display_name in delete.rs is removed
|
||||
- [ ] delete.rs uses the pub version from notifications.rs
|
||||
- [ ] Display casing is consistent or callsite is adjusted to handle the difference
|
||||
|
||||
## Out of Scope
|
||||
|
||||
- TBD
|
||||
@@ -0,0 +1,23 @@
|
||||
---
|
||||
name: "Extract shared find_story_name from commands"
|
||||
---
|
||||
|
||||
# Refactor 443: Extract shared find_story_name from commands
|
||||
|
||||
## Current State
|
||||
|
||||
- TBD
|
||||
|
||||
## Desired State
|
||||
|
||||
`find_story_name` is nearly identical in `chat/commands/overview.rs` and `chat/commands/unreleased.rs` (minor style diff: `let stages` vs `const STAGES`). Extract to a shared location (e.g. `chat::commands::util` or `io::stories`) and have both callers use it.
|
||||
|
||||
## Acceptance Criteria
|
||||
|
||||
- [ ] Single shared find_story_name function exists
|
||||
- [ ] Both overview.rs and unreleased.rs use the shared version
|
||||
- [ ] Private copies are removed
|
||||
|
||||
## Out of Scope
|
||||
|
||||
- TBD
|
||||
+26
@@ -0,0 +1,26 @@
|
||||
---
|
||||
name: "Extract shared test helpers (test_ctx, write_story_file, make_api)"
|
||||
agent: "coder-opus"
|
||||
---
|
||||
|
||||
# Refactor 444: Extract shared test helpers (test_ctx, write_story_file, make_api)
|
||||
|
||||
## Current State
|
||||
|
||||
- TBD
|
||||
|
||||
## Desired State
|
||||
|
||||
Several test helper functions are copy-pasted across many test modules: `test_ctx` (10 copies across http/ modules), `write_story_file` (5 copies across chat/commands/ and matrix/), `make_api` (5 copies across http/ modules), `setup_project` (3 copies in io/). Extract each into a shared `#[cfg(test)]` utility module so test scaffolding is maintained in one place.
|
||||
|
||||
## Acceptance Criteria
|
||||
|
||||
- [ ] test_ctx has a single shared definition used by all 10 http test modules
|
||||
- [ ] write_story_file has a single shared definition used by all 5 callers
|
||||
- [ ] make_api has a single shared definition used by all 5 callers
|
||||
- [ ] setup_project has a single shared definition used by all 3 callers
|
||||
- [ ] All private copies in individual test modules are removed
|
||||
|
||||
## Out of Scope
|
||||
|
||||
- TBD
|
||||
+28
@@ -0,0 +1,28 @@
|
||||
---
|
||||
name: "Rate-limited mergemaster exits advance stories to done without merging"
|
||||
---
|
||||
|
||||
# Bug 445: Rate-limited mergemaster exits advance stories to done without merging
|
||||
|
||||
## Description
|
||||
|
||||
When the mergemaster agent is immediately rate-limited (zero turns, zero tool calls), it exits and run_server_owned_completion runs acceptance gates on the existing worktree. Since the coder already committed working code, the gates pass, and the pipeline advances the story to done — even though the mergemaster never executed run_squash_merge and the code was never cherry-picked onto master.
|
||||
|
||||
## How to Reproduce
|
||||
|
||||
Observed on stories 439 and 442. All mergemaster log entries show: init → rate_limit_event → error result. Zero turns, zero MCP tool calls, duration under 350ms. Yet both stories ended up in done with no merge commit on master.
|
||||
|
||||
## Actual Result
|
||||
|
||||
Stories advance to done with no code on master. The mergemaster never ran but the pipeline treated its exit as a successful completion.
|
||||
|
||||
## Expected Result
|
||||
|
||||
If the mergemaster exits without completing its work (no merge commit produced), the story should stay in the merge stage for retry, not advance to done.
|
||||
|
||||
## Acceptance Criteria
|
||||
|
||||
- [ ] run_server_owned_completion must not run for mergemaster agents — mergemaster has its own completion path via start_merge_agent_work
|
||||
- [ ] If the mergemaster process exits without producing a SquashMergeResult, the story stays in merge stage
|
||||
- [ ] Rate-limited mergemaster exits are treated as transient failures, not gate-passing completions
|
||||
- [ ] Story remains eligible for retry when mergemaster fails due to rate limiting
|
||||
@@ -0,0 +1,20 @@
|
||||
---
|
||||
name: "OAuth login button in web UI"
|
||||
---
|
||||
|
||||
# Story 446: OAuth login button in web UI
|
||||
|
||||
## User Story
|
||||
|
||||
As a user of the storkit web UI, I want a login button that triggers the Anthropic OAuth flow, so that I can authenticate without manually navigating to /oauth/authorize.
|
||||
|
||||
## Acceptance Criteria
|
||||
|
||||
- [ ] Web UI shows a login/authenticate button when no OAuth token is active
|
||||
- [ ] Clicking the button navigates to /oauth/authorize which starts the Anthropic OAuth flow
|
||||
- [ ] After successful OAuth callback, the UI updates to show the authenticated state
|
||||
- [ ] If already authenticated, the button is hidden or shows the current auth status
|
||||
|
||||
## Out of Scope
|
||||
|
||||
- TBD
|
||||
+27
@@ -0,0 +1,27 @@
|
||||
---
|
||||
name: "Element tab-completion display name breaks bot command matching"
|
||||
---
|
||||
|
||||
# Bug 447: Element tab-completion display name breaks bot command matching
|
||||
|
||||
## Description
|
||||
|
||||
When a user tab-completes a bot mention in Element, the Matrix client inserts the display name (e.g. `timmy ⚡️`) rather than the user ID (`@timmy`). If the display name contains emoji or special characters, the `strip_bot_mention` function in chat::util may fail to match it against the bot name, causing commands like `ambient on` to not be recognized.
|
||||
|
||||
## How to Reproduce
|
||||
|
||||
1. Set bot display_name to include emoji (e.g. `timmy ⚡️`) in bot.toml\n2. In Element, tab-complete the bot name to get `timmy ⚡️`\n3. Send `timmy ⚡️ ambient on`\n4. The bot does not respond — command not matched
|
||||
|
||||
## Actual Result
|
||||
|
||||
Bot ignores the command. The display name with emoji doesn't match during strip_bot_mention, so the command text is not correctly extracted.
|
||||
|
||||
## Expected Result
|
||||
|
||||
Bot should recognize commands regardless of whether the mention was tab-completed with the display name (including emoji) or typed manually as @localpart.
|
||||
|
||||
## Acceptance Criteria
|
||||
|
||||
- [ ] strip_bot_mention handles display names containing emoji and special characters
|
||||
- [ ] strip_bot_mention handles Element's tab-completion format (display name followed by colon or comma)
|
||||
- [ ] Commands work whether the user types @timmy, timmy, or tab-completes timmy ⚡️
|
||||
+20
@@ -0,0 +1,20 @@
|
||||
---
|
||||
name: "Send OAuth login link via chat when credentials are missing"
|
||||
---
|
||||
|
||||
# Story 448: Send OAuth login link via chat when credentials are missing
|
||||
|
||||
## User Story
|
||||
|
||||
As a storkit user on Matrix or WhatsApp, I want the bot to send me a clickable OAuth authorize link when credentials are missing or expired, so that I can authenticate without terminal access or manually constructing the URL.
|
||||
|
||||
## Acceptance Criteria
|
||||
|
||||
- [ ] When storkit detects missing or expired credentials during a chat interaction, it sends the user a clickable /oauth/authorize link
|
||||
- [ ] Works on Matrix and WhatsApp transports
|
||||
- [ ] After successful OAuth callback, the user can immediately resume chatting without restarting storkit
|
||||
- [ ] If credentials are already valid, no login link is sent
|
||||
|
||||
## Out of Scope
|
||||
|
||||
- TBD
|
||||
@@ -0,0 +1,27 @@
|
||||
---
|
||||
name: "OAuth callback URL ignores --port CLI flag"
|
||||
---
|
||||
|
||||
# Bug 449: OAuth callback URL ignores --port CLI flag
|
||||
|
||||
## Description
|
||||
|
||||
OAuthState is initialized with `resolve_port()` (reads STORKIT_PORT env var, defaults to 3001) instead of the actual port the server is listening on. When the server is started with `--port 4000`, the OAuth callback URL is still generated as `http://localhost:3001/callback`, so the Anthropic redirect lands on the wrong server and the state parameter lookup fails with "Unknown or expired state parameter".
|
||||
|
||||
## How to Reproduce
|
||||
|
||||
Start storkit with `--port 4000` (without setting STORKIT_PORT env var). Click the OAuth login button in the web UI. Authenticate with Anthropic. The callback redirect goes to localhost:3001 instead of localhost:4000.
|
||||
|
||||
## Actual Result
|
||||
|
||||
Callback hits port 3001 (or wrong port). If a different storkit is running there, it returns "Invalid State". If nothing is running there, the page fails to load.
|
||||
|
||||
## Expected Result
|
||||
|
||||
Callback URL should use the actual server port (from --port CLI flag), so the redirect returns to the correct server instance.
|
||||
|
||||
## Acceptance Criteria
|
||||
|
||||
- [ ] build_routes receives the actual listening port and passes it to OAuthState::new
|
||||
- [ ] OAuth callback URL matches the port the server is actually listening on
|
||||
- [ ] Works with --port flag, STORKIT_PORT env var, and default port
|
||||
+27
@@ -0,0 +1,27 @@
|
||||
---
|
||||
name: "Web UI silently swallows chat errors including OAuth login link"
|
||||
---
|
||||
|
||||
# Bug 450: Web UI silently swallows chat errors including OAuth login link
|
||||
|
||||
## Description
|
||||
|
||||
When the WebSocket chat returns an error (e.g. OAuth authentication failed with a login URL), the `onError` handler in `Chat.tsx` only logs to `console.error` and resets loading state. The error message is never displayed to the user. This means the OAuth login link from story #448 works on Matrix/WhatsApp but is invisible in the web UI.
|
||||
|
||||
## How to Reproduce
|
||||
|
||||
Use the web UI with missing or expired OAuth credentials. Send any chat message. The server detects auth failure, attempts token refresh, fails, and returns an error containing a login URL over WebSocket.
|
||||
|
||||
## Actual Result
|
||||
|
||||
Nothing visible happens. The error is logged to browser console only. The user sees no feedback.
|
||||
|
||||
## Expected Result
|
||||
|
||||
The error message (including the clickable OAuth login link) should be displayed in the chat as an assistant message so the user can act on it.
|
||||
|
||||
## Acceptance Criteria
|
||||
|
||||
- [ ] WebSocket error messages are displayed in the chat UI as assistant messages
|
||||
- [ ] OAuth login URL in the error is rendered as a clickable link
|
||||
- [ ] Consistent with how Matrix and WhatsApp transports display the same error
|
||||
+27
@@ -0,0 +1,27 @@
|
||||
---
|
||||
name: "Chat.test.tsx /help test expects removed overlay behavior"
|
||||
---
|
||||
|
||||
# Bug 451: Chat.test.tsx /help test expects removed overlay behavior
|
||||
|
||||
## Description
|
||||
|
||||
The test `AC: /help shows help overlay` in `Chat.test.tsx:1645` expects `/help` to show the `help-overlay` testid element. However, the `/help` intercept was removed from Chat.tsx and `help` was added to `knownCommands`, so `/help` now goes through `api.botCommand()` like other commands. The test needs to be updated to expect a `botCommand("help", ...)` call instead of the overlay. This is blocking gates on stories 449 and 450.
|
||||
|
||||
## How to Reproduce
|
||||
|
||||
Run `cd frontend && npm test` — the test `AC: /help shows help overlay` fails.
|
||||
|
||||
## Actual Result
|
||||
|
||||
Test fails: `findByTestId("help-overlay")` times out because the overlay is never rendered. `/help` is dispatched to the backend via `botCommand` instead.
|
||||
|
||||
## Expected Result
|
||||
|
||||
Test should pass by expecting `/help` to call `api.botCommand("help", ...)` and display the response in chat, consistent with the current code behavior.
|
||||
|
||||
## Acceptance Criteria
|
||||
|
||||
- [ ] The /help test in Chat.test.tsx is updated to expect botCommand dispatch
|
||||
- [ ] All frontend tests pass
|
||||
- [ ] HelpOverlay component and showHelp state can be removed from Chat.tsx if no longer used
|
||||
+53
@@ -0,0 +1,53 @@
|
||||
---
|
||||
name: "Agent PTY crashes with fatal runtime error on restart after gate failure"
|
||||
---
|
||||
|
||||
# Bug 453: Agent PTY crashes with fatal runtime error on restart after gate failure
|
||||
|
||||
## Description
|
||||
|
||||
When an agent completes coding and the acceptance gates fail (e.g. a test failure), the pipeline restarts the agent on the same worktree. The restarted Claude Code PTY process crashes immediately with `fatal runtime error: assertion failed: output.write(&bytes).is_ok(), aborting`. The process exits in the same second it spawns (Session: None), burns through all 3 retries, and blocks the story.
|
||||
|
||||
Key observations:
|
||||
- The crash is **deterministic, not intermittent**: the first PTY spawn in a worktree always works; the second spawn (restart) always crashes
|
||||
- Running `claude -p "hello"` manually in the same worktree works fine (no crash) — the issue is specific to spawning via portable-pty
|
||||
- The worktree is clean (all changes committed) — the agent has nothing to do but fix the gate failure
|
||||
- The crash is inside the Claude Code binary, not storkit code
|
||||
- Observed on every story that needed a restart: 329, 400, 420, 438, 446, 449, 450
|
||||
- Stories that passed gates on the first run were never affected — they never triggered a second spawn
|
||||
|
||||
Likely cause: the reader thread spawned by `std::thread::spawn` in `pty.rs` (line 248-255) is never joined. After `run_agent_pty_streaming` returns, the pipeline immediately calls `start_agent` for the retry, but the old reader thread may still be running and holding a cloned PTY reader fd. The new PTY allocation could collide with the still-open fd from the previous session.
|
||||
|
||||
The root cause is unknown. It is NOT caused by zombie process accumulation (that is a separate issue in #452).
|
||||
|
||||
**Timeline:** The crash first appeared on 2026-03-21. Agent logs go back to 2026-02-23 with no instances before that date. Stories that hit it: 329 (Mar 21), 400 (Mar 26), 420 (Mar 28), 438 (Mar 28), 446 (Mar 30), 449 (Mar 31), 450 (Mar 31).
|
||||
|
||||
**Suspect commits around 2026-03-21:**
|
||||
- `4344081b` — storkit: merge 343_refactor_abstract_agent_runtime_to_support_non_claude_code_backends (refactored agent runtime layer)
|
||||
- `c4e45b28` — The great storkit name conversion
|
||||
- Story 359 — Docker security hardening (`cap_drop: ALL`, added back only `SETUID`/`SETGID`) — could affect PTY allocation
|
||||
- Story 329 — Docker/OrbStack evaluation spike (first crash was on this story's mergemaster)
|
||||
|
||||
**Ruled out:** Docker capability restrictions (cap_drop: ALL) — tested by temporarily removing all cap_drop/security_opt; crash still occurs.
|
||||
|
||||
**Evidence of stale PTY fd:** After all agents stopped, storkit (PID 7) was still holding an open fd to `/dev/pts/ptmx` (fd 46). This is a leaked PTY master fd from a previous agent session. The reader thread spawned by `std::thread::spawn` in `pty.rs` is never joined, so the cloned reader fd stays open in the storkit process after the agent exits.
|
||||
|
||||
Remaining areas to investigate: the unjoined reader thread leaking PTY fds, and whether the leaked fd from the first session interferes with the second PTY allocation.
|
||||
|
||||
## How to Reproduce
|
||||
|
||||
1. Have a story in current stage with committed code in its worktree. 2. Introduce a test failure that causes gates to fail. 3. The pipeline restarts the agent on the same worktree. 4. The Claude Code process crashes immediately on spawn.
|
||||
|
||||
## Actual Result
|
||||
|
||||
`fatal runtime error: assertion failed: output.write(&bytes).is_ok(), aborting` — process exits instantly (same second as spawn), Session: None. Burns through retries and blocks the story.
|
||||
|
||||
## Expected Result
|
||||
|
||||
The restarted agent should start successfully, receive the gate failure context, and be able to fix the issue.
|
||||
|
||||
## Acceptance Criteria
|
||||
|
||||
- [ ] Agent restart after gate failure successfully spawns a Claude Code PTY session
|
||||
- [ ] No fatal runtime error on PTY restart in a worktree with prior committed work
|
||||
- [ ] If Claude Code fails to start, the error is handled gracefully without burning retries
|
||||
@@ -1 +0,0 @@
|
||||
3001
|
||||
Generated
+199
-140
@@ -26,7 +26,7 @@ version = "0.5.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d122413f284cf2d62fb1b7db97e02edb8cda96d769b16e443a4f6195e35662b0"
|
||||
dependencies = [
|
||||
"crypto-common",
|
||||
"crypto-common 0.1.7",
|
||||
"generic-array",
|
||||
]
|
||||
|
||||
@@ -38,7 +38,7 @@ checksum = "b169f7a6d4742236a0a00c541b845991d0ac43e546831af1249753ab4c3aa3a0"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"cipher",
|
||||
"cpufeatures",
|
||||
"cpufeatures 0.2.17",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -209,9 +209,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "aws-lc-sys"
|
||||
version = "0.39.0"
|
||||
version = "0.39.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1fa7e52a4c5c547c741610a2c6f123f3881e409b714cd27e6798ef020c514f0a"
|
||||
checksum = "83a25cf98105baa966497416dbd42565ce3a8cf8dbfd59803ec9ad46f3126399"
|
||||
dependencies = [
|
||||
"cc",
|
||||
"cmake",
|
||||
@@ -265,16 +265,16 @@ checksum = "a1d084b0137aaa901caf9f1e8b21daa6aa24d41cd806e111335541eff9683bd6"
|
||||
|
||||
[[package]]
|
||||
name = "blake3"
|
||||
version = "1.8.3"
|
||||
version = "1.8.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2468ef7d57b3fb7e16b576e8377cdbde2320c60e1491e961d11da40fc4f02a2d"
|
||||
checksum = "4d2d5991425dfd0785aed03aedcf0b321d61975c9b5b3689c774a2610ae0b51e"
|
||||
dependencies = [
|
||||
"arrayref",
|
||||
"arrayvec",
|
||||
"cc",
|
||||
"cfg-if",
|
||||
"constant_time_eq",
|
||||
"cpufeatures",
|
||||
"cpufeatures 0.3.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -286,6 +286,15 @@ dependencies = [
|
||||
"generic-array",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "block-buffer"
|
||||
version = "0.12.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "cdd35008169921d80bc60d3d0ab416eecb028c4cd653352907921d95084790be"
|
||||
dependencies = [
|
||||
"hybrid-array",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "block-padding"
|
||||
version = "0.3.3"
|
||||
@@ -349,9 +358,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "cc"
|
||||
version = "1.2.57"
|
||||
version = "1.2.58"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7a0dd1ca384932ff3641c8718a02769f1698e7563dc6974ffd03346116310423"
|
||||
checksum = "e1e928d4b69e3077709075a938a05ffbedfa53a84c8f766efbf8220bb1ff60e1"
|
||||
dependencies = [
|
||||
"find-msvc-tools",
|
||||
"jobserver",
|
||||
@@ -391,7 +400,7 @@ checksum = "c3613f74bd2eac03dad61bd53dbe620703d4371614fe0bc3b9f04dd36fe4e818"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"cipher",
|
||||
"cpufeatures",
|
||||
"cpufeatures 0.2.17",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -427,16 +436,16 @@ version = "0.4.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "773f3b9af64447d2ce9850330c473515014aa235e6a783b02db81ff39e4a3dad"
|
||||
dependencies = [
|
||||
"crypto-common",
|
||||
"crypto-common 0.1.7",
|
||||
"inout",
|
||||
"zeroize",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "cmake"
|
||||
version = "0.1.57"
|
||||
version = "0.1.58"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "75443c44cd6b379beb8c5b45d85d0773baf31cce901fe7bb252f4eff3008ef7d"
|
||||
checksum = "c0f78a02292a74a88ac736019ab962ece0bc380e3f977bf72e376c5d78ff0678"
|
||||
dependencies = [
|
||||
"cc",
|
||||
]
|
||||
@@ -492,6 +501,12 @@ version = "0.9.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c2459377285ad874054d797f3ccebf984978aa39129f6eafde5cdc8315b612f8"
|
||||
|
||||
[[package]]
|
||||
name = "const-oid"
|
||||
version = "0.10.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a6ef517f0926dd24a1582492c791b6a4818a4d94e789a334894aa15b0d12f55c"
|
||||
|
||||
[[package]]
|
||||
name = "const_panic"
|
||||
version = "0.2.15"
|
||||
@@ -551,6 +566,15 @@ dependencies = [
|
||||
"libc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "cpufeatures"
|
||||
version = "0.3.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8b2a41393f66f16b0823bb79094d54ac5fbd34ab292ddafb9a0456ac9f87d201"
|
||||
dependencies = [
|
||||
"libc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "crc32fast"
|
||||
version = "1.5.0"
|
||||
@@ -596,6 +620,15 @@ dependencies = [
|
||||
"typenum",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "crypto-common"
|
||||
version = "0.2.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "77727bb15fa921304124b128af125e7e3b968275d1b108b379190264f4423710"
|
||||
dependencies = [
|
||||
"hybrid-array",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "ctr"
|
||||
version = "0.9.2"
|
||||
@@ -612,9 +645,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "97fb8b7c4503de7d6ae7b42ab72a5a59857b4c937ec27a3d4539dba95b5ab2be"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"cpufeatures",
|
||||
"cpufeatures 0.2.17",
|
||||
"curve25519-dalek-derive",
|
||||
"digest",
|
||||
"digest 0.10.7",
|
||||
"fiat-crypto",
|
||||
"rustc_version",
|
||||
"serde",
|
||||
@@ -740,7 +773,7 @@ version = "0.7.10"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e7c1832837b905bbfb5101e07cc24c8deddf52f93225eee6ead5f4d63d53ddcb"
|
||||
dependencies = [
|
||||
"const-oid",
|
||||
"const-oid 0.9.6",
|
||||
"zeroize",
|
||||
]
|
||||
|
||||
@@ -813,11 +846,22 @@ version = "0.10.7"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292"
|
||||
dependencies = [
|
||||
"block-buffer",
|
||||
"crypto-common",
|
||||
"block-buffer 0.10.4",
|
||||
"crypto-common 0.1.7",
|
||||
"subtle",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "digest"
|
||||
version = "0.11.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4850db49bf08e663084f7fb5c87d202ef91a3907271aff24a94eb97ff039153c"
|
||||
dependencies = [
|
||||
"block-buffer 0.12.0",
|
||||
"const-oid 0.10.2",
|
||||
"crypto-common 0.2.1",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "displaydoc"
|
||||
version = "0.2.5"
|
||||
@@ -862,7 +906,7 @@ dependencies = [
|
||||
"ed25519",
|
||||
"rand_core 0.6.4",
|
||||
"serde",
|
||||
"sha2",
|
||||
"sha2 0.10.9",
|
||||
"subtle",
|
||||
"zeroize",
|
||||
]
|
||||
@@ -1371,7 +1415,7 @@ version = "0.12.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6c49c37c09c17a53d937dfbb742eb3a961d65a994e6bcdcf37e7399d0cc8ab5e"
|
||||
dependencies = [
|
||||
"digest",
|
||||
"digest 0.10.7",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -1452,10 +1496,19 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9"
|
||||
|
||||
[[package]]
|
||||
name = "hyper"
|
||||
version = "1.8.1"
|
||||
name = "hybrid-array"
|
||||
version = "0.4.10"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2ab2d4f250c3d7b1c9fcdff1cece94ea4e2dfbec68614f7b87cb205f24ca9d11"
|
||||
checksum = "3944cf8cf766b40e2a1a333ee5e9b563f854d5fa49d6a8ca2764e97c6eddb214"
|
||||
dependencies = [
|
||||
"typenum",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "hyper"
|
||||
version = "1.9.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6299f016b246a94207e63da54dbe807655bf9e00044f73ded42c3ac5305fbcca"
|
||||
dependencies = [
|
||||
"atomic-waker",
|
||||
"bytes",
|
||||
@@ -1468,7 +1521,6 @@ dependencies = [
|
||||
"httpdate",
|
||||
"itoa",
|
||||
"pin-project-lite",
|
||||
"pin-utils",
|
||||
"smallvec",
|
||||
"tokio",
|
||||
"want",
|
||||
@@ -1542,12 +1594,13 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "icu_collections"
|
||||
version = "2.1.1"
|
||||
version = "2.2.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4c6b649701667bbe825c3b7e6388cb521c23d88644678e83c0c4d0a621a34b43"
|
||||
checksum = "2984d1cd16c883d7935b9e07e44071dca8d917fd52ecc02c04d5fa0b5a3f191c"
|
||||
dependencies = [
|
||||
"displaydoc",
|
||||
"potential_utf",
|
||||
"utf8_iter",
|
||||
"yoke",
|
||||
"zerofrom",
|
||||
"zerovec",
|
||||
@@ -1555,9 +1608,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "icu_locale_core"
|
||||
version = "2.1.1"
|
||||
version = "2.2.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "edba7861004dd3714265b4db54a3c390e880ab658fec5f7db895fae2046b5bb6"
|
||||
checksum = "92219b62b3e2b4d88ac5119f8904c10f8f61bf7e95b640d25ba3075e6cac2c29"
|
||||
dependencies = [
|
||||
"displaydoc",
|
||||
"litemap",
|
||||
@@ -1568,9 +1621,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "icu_normalizer"
|
||||
version = "2.1.1"
|
||||
version = "2.2.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5f6c8828b67bf8908d82127b2054ea1b4427ff0230ee9141c54251934ab1b599"
|
||||
checksum = "c56e5ee99d6e3d33bd91c5d85458b6005a22140021cc324cea84dd0e72cff3b4"
|
||||
dependencies = [
|
||||
"icu_collections",
|
||||
"icu_normalizer_data",
|
||||
@@ -1582,15 +1635,15 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "icu_normalizer_data"
|
||||
version = "2.1.1"
|
||||
version = "2.2.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7aedcccd01fc5fe81e6b489c15b247b8b0690feb23304303a9e560f37efc560a"
|
||||
checksum = "da3be0ae77ea334f4da67c12f149704f19f81d1adf7c51cf482943e84a2bad38"
|
||||
|
||||
[[package]]
|
||||
name = "icu_properties"
|
||||
version = "2.1.2"
|
||||
version = "2.2.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "020bfc02fe870ec3a66d93e677ccca0562506e5872c650f893269e08615d74ec"
|
||||
checksum = "bee3b67d0ea5c2cca5003417989af8996f8604e34fb9ddf96208a033901e70de"
|
||||
dependencies = [
|
||||
"icu_collections",
|
||||
"icu_locale_core",
|
||||
@@ -1602,15 +1655,15 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "icu_properties_data"
|
||||
version = "2.1.2"
|
||||
version = "2.2.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "616c294cf8d725c6afcd8f55abc17c56464ef6211f9ed59cccffe534129c77af"
|
||||
checksum = "8e2bbb201e0c04f7b4b3e14382af113e17ba4f63e2c9d2ee626b720cbce54a14"
|
||||
|
||||
[[package]]
|
||||
name = "icu_provider"
|
||||
version = "2.1.1"
|
||||
version = "2.2.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "85962cf0ce02e1e0a629cc34e7ca3e373ce20dda4c4d7294bbd0bf1fdb59e614"
|
||||
checksum = "139c4cf31c8b5f33d7e199446eff9c1e02decfc2f0eec2c8d71f65befa45b421"
|
||||
dependencies = [
|
||||
"displaydoc",
|
||||
"icu_locale_core",
|
||||
@@ -1774,9 +1827,9 @@ checksum = "d98f6fed1fde3f8c21bc40a1abb88dd75e67924f9cffc3ef95607bad8017f8e2"
|
||||
|
||||
[[package]]
|
||||
name = "iri-string"
|
||||
version = "0.7.11"
|
||||
version = "0.7.12"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d8e7418f59cc01c88316161279a7f665217ae316b388e58a0d10e29f54f1e5eb"
|
||||
checksum = "25e659a4bb38e810ebc252e53b5814ff908a8c58c2a9ce2fae1bbec24cbf4e20"
|
||||
dependencies = [
|
||||
"memchr",
|
||||
"serde",
|
||||
@@ -1862,10 +1915,12 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "js-sys"
|
||||
version = "0.3.91"
|
||||
version = "0.3.94"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b49715b7073f385ba4bc528e5747d02e66cb39c6146efb66b781f131f0fb399c"
|
||||
checksum = "2e04e2ef80ce82e13552136fabeef8a5ed1f985a96805761cbb9a2c34e7664d9"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"futures-util",
|
||||
"once_cell",
|
||||
"wasm-bindgen",
|
||||
]
|
||||
@@ -1948,9 +2003,9 @@ checksum = "09edd9e8b54e49e587e4f6295a7d29c3ea94d469cb40ab8ca70b288248a81db2"
|
||||
|
||||
[[package]]
|
||||
name = "libc"
|
||||
version = "0.2.183"
|
||||
version = "0.2.184"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b5b646652bf6661599e1da8901b3b9522896f01e736bad5f723fe7a3a27f899d"
|
||||
checksum = "48f5d2a454e16a5ea0f4ced81bd44e4cfc7bd3a507b61887c99fd3538b28e4af"
|
||||
|
||||
[[package]]
|
||||
name = "libredox"
|
||||
@@ -1983,9 +2038,9 @@ checksum = "32a66949e030da00e8c7d4434b251670a91556f4144941d37452769c25d58a53"
|
||||
|
||||
[[package]]
|
||||
name = "litemap"
|
||||
version = "0.8.1"
|
||||
version = "0.8.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6373607a59f0be73a39b6fe456b8192fcc3585f602af20751600e974dd455e77"
|
||||
checksum = "92daf443525c4cce67b150400bc2316076100ce0b3686209eb8cf3c31612e6f0"
|
||||
|
||||
[[package]]
|
||||
name = "lock_api"
|
||||
@@ -2156,7 +2211,7 @@ dependencies = [
|
||||
"serde",
|
||||
"serde_html_form",
|
||||
"serde_json",
|
||||
"sha2",
|
||||
"sha2 0.10.9",
|
||||
"tempfile",
|
||||
"thiserror 2.0.18",
|
||||
"tokio",
|
||||
@@ -2249,7 +2304,7 @@ dependencies = [
|
||||
"ruma",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"sha2",
|
||||
"sha2 0.10.9",
|
||||
"subtle",
|
||||
"thiserror 2.0.18",
|
||||
"time",
|
||||
@@ -2284,7 +2339,7 @@ dependencies = [
|
||||
"serde",
|
||||
"serde-wasm-bindgen",
|
||||
"serde_json",
|
||||
"sha2",
|
||||
"sha2 0.10.9",
|
||||
"thiserror 2.0.18",
|
||||
"tokio",
|
||||
"tracing",
|
||||
@@ -2338,7 +2393,7 @@ dependencies = [
|
||||
"rmp-serde",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"sha2",
|
||||
"sha2 0.10.9",
|
||||
"thiserror 2.0.18",
|
||||
"zeroize",
|
||||
]
|
||||
@@ -2428,9 +2483,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "mio"
|
||||
version = "1.1.1"
|
||||
version = "1.2.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a69bcab0ad47271a0234d9422b131806bf3968021e5dc9328caf2d4cd58557fc"
|
||||
checksum = "50b7e5b27aa02a74bac8c3f23f448f8d87ff11f92d3aac1a6ed369ee08cc56c1"
|
||||
dependencies = [
|
||||
"libc",
|
||||
"log",
|
||||
@@ -2559,9 +2614,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "num-conv"
|
||||
version = "0.2.0"
|
||||
version = "0.2.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "cf97ec579c3c42f953ef76dbf8d55ac91fb219dde70e49aa4a6b7d74e9919050"
|
||||
checksum = "c6673768db2d862beb9b39a78fdcb1a69439615d5794a1be50caa9bc92c81967"
|
||||
|
||||
[[package]]
|
||||
name = "num-traits"
|
||||
@@ -2597,7 +2652,7 @@ dependencies = [
|
||||
"serde",
|
||||
"serde_json",
|
||||
"serde_path_to_error",
|
||||
"sha2",
|
||||
"sha2 0.10.9",
|
||||
"thiserror 1.0.69",
|
||||
"url",
|
||||
]
|
||||
@@ -2655,7 +2710,7 @@ version = "0.12.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f8ed6a7761f76e3b9f92dfb0a60a6a6477c61024b775147ff0973a02653abaf2"
|
||||
dependencies = [
|
||||
"digest",
|
||||
"digest 0.10.7",
|
||||
"hmac",
|
||||
]
|
||||
|
||||
@@ -2709,12 +2764,6 @@ version = "0.2.17"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a89322df9ebe1c1578d689c92318e070967d1042b512afbe49518723f4e6d5cd"
|
||||
|
||||
[[package]]
|
||||
name = "pin-utils"
|
||||
version = "0.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184"
|
||||
|
||||
[[package]]
|
||||
name = "pkcs8"
|
||||
version = "0.10.2"
|
||||
@@ -2840,7 +2889,7 @@ version = "0.8.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8159bd90725d2df49889a078b54f4f79e87f1f8a8444194cdca81d38f5393abf"
|
||||
dependencies = [
|
||||
"cpufeatures",
|
||||
"cpufeatures 0.2.17",
|
||||
"opaque-debug",
|
||||
"universal-hash",
|
||||
]
|
||||
@@ -2868,9 +2917,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "potential_utf"
|
||||
version = "0.1.4"
|
||||
version = "0.1.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b73949432f5e2a09657003c25bca5e19a0e9c84f8058ca374f49e0ebe605af77"
|
||||
checksum = "0103b1cef7ec0cf76490e969665504990193874ea05c85ff9bab8b911d0a0564"
|
||||
dependencies = [
|
||||
"zerovec",
|
||||
]
|
||||
@@ -3502,7 +3551,7 @@ dependencies = [
|
||||
"rand 0.8.5",
|
||||
"ruma-common",
|
||||
"serde_json",
|
||||
"sha2",
|
||||
"sha2 0.10.9",
|
||||
"thiserror 2.0.18",
|
||||
]
|
||||
|
||||
@@ -3550,15 +3599,15 @@ version = "8.11.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5bcdef0be6fe7f6fa333b1073c949729274b05f123a0ad7efcb8efd878e5c3b1"
|
||||
dependencies = [
|
||||
"sha2",
|
||||
"sha2 0.10.9",
|
||||
"walkdir",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rustc-hash"
|
||||
version = "2.1.1"
|
||||
version = "2.1.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "357703d41365b4b27c590e3ed91eabb1b663f07c4c084095e60cbed4362dff0d"
|
||||
checksum = "94300abf3f1ae2e2b8ffb7b58043de3d399c73fa6f4b73826402a5c457614dbe"
|
||||
|
||||
[[package]]
|
||||
name = "rustc_version"
|
||||
@@ -3824,9 +3873,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "serde_spanned"
|
||||
version = "1.1.0"
|
||||
version = "1.1.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "876ac351060d4f882bb1032b6369eb0aef79ad9df1ea8bc404874d8cc3d0cd98"
|
||||
checksum = "6662b5879511e06e8999a8a235d848113e942c9124f211511b16466ee2995f26"
|
||||
dependencies = [
|
||||
"serde_core",
|
||||
]
|
||||
@@ -3874,8 +3923,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e3bf829a2d51ab4a5ddf1352d8470c140cadc8301b2ae1789db023f01cedd6ba"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"cpufeatures",
|
||||
"digest",
|
||||
"cpufeatures 0.2.17",
|
||||
"digest 0.10.7",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -3885,8 +3934,19 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a7507d819769d01a365ab707794a4084392c824f54a7a6a7862f8c3d0892b283"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"cpufeatures",
|
||||
"digest",
|
||||
"cpufeatures 0.2.17",
|
||||
"digest 0.10.7",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "sha2"
|
||||
version = "0.11.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "446ba717509524cb3f22f17ecc096f10f4822d76ab5c0b9822c5f9c284e825f4"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"cpufeatures 0.3.0",
|
||||
"digest 0.11.2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -3941,9 +4001,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "simd-adler32"
|
||||
version = "0.3.8"
|
||||
version = "0.3.9"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e320a6c5ad31d271ad523dcf3ad13e2767ad8b1cb8f047f75a8aeaf8da139da2"
|
||||
checksum = "703d5c7ef118737c72f1af64ad2f6f8c5e1921f818cdcb97b8fe6fc69bf66214"
|
||||
|
||||
[[package]]
|
||||
name = "similar"
|
||||
@@ -4017,7 +4077,7 @@ checksum = "6ce2be8dc25455e1f91df71bfa12ad37d7af1092ae736f3a6cd0e37bc7810596"
|
||||
|
||||
[[package]]
|
||||
name = "storkit"
|
||||
version = "0.6.0"
|
||||
version = "0.8.6"
|
||||
dependencies = [
|
||||
"async-stream",
|
||||
"async-trait",
|
||||
@@ -4028,6 +4088,7 @@ dependencies = [
|
||||
"futures",
|
||||
"homedir",
|
||||
"ignore",
|
||||
"libc",
|
||||
"libsqlite3-sys",
|
||||
"matrix-sdk",
|
||||
"mime_guess",
|
||||
@@ -4037,17 +4098,19 @@ dependencies = [
|
||||
"poem-openapi",
|
||||
"portable-pty",
|
||||
"pulldown-cmark",
|
||||
"regex",
|
||||
"reqwest 0.13.2",
|
||||
"rust-embed",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"serde_urlencoded",
|
||||
"serde_yaml",
|
||||
"sha2 0.11.0",
|
||||
"strip-ansi-escapes",
|
||||
"tempfile",
|
||||
"tokio",
|
||||
"tokio-tungstenite 0.29.0",
|
||||
"toml 1.1.0+spec-1.1.0",
|
||||
"toml 1.1.2+spec-1.1.0",
|
||||
"uuid",
|
||||
"wait-timeout",
|
||||
"walkdir",
|
||||
@@ -4268,9 +4331,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "tinystr"
|
||||
version = "0.8.2"
|
||||
version = "0.8.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "42d3e9c45c09de15d06dd8acf5f4e0e399e85927b7f00711024eb7ae10fa4869"
|
||||
checksum = "c8323304221c2a851516f22236c5722a72eaa19749016521d6dff0824447d96d"
|
||||
dependencies = [
|
||||
"displaydoc",
|
||||
"zerovec",
|
||||
@@ -4394,17 +4457,17 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "toml"
|
||||
version = "1.1.0+spec-1.1.0"
|
||||
version = "1.1.2+spec-1.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f8195ca05e4eb728f4ba94f3e3291661320af739c4e43779cbdfae82ab239fcc"
|
||||
checksum = "81f3d15e84cbcd896376e6730314d59fb5a87f31e4b038454184435cd57defee"
|
||||
dependencies = [
|
||||
"indexmap",
|
||||
"serde_core",
|
||||
"serde_spanned",
|
||||
"toml_datetime 1.1.0+spec-1.1.0",
|
||||
"toml_datetime 1.1.1+spec-1.1.0",
|
||||
"toml_parser",
|
||||
"toml_writer",
|
||||
"winnow 1.0.0",
|
||||
"winnow 1.0.1",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -4418,39 +4481,39 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "toml_datetime"
|
||||
version = "1.1.0+spec-1.1.0"
|
||||
version = "1.1.1+spec-1.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "97251a7c317e03ad83774a8752a7e81fb6067740609f75ea2b585b569a59198f"
|
||||
checksum = "3165f65f62e28e0115a00b2ebdd37eb6f3b641855f9d636d3cd4103767159ad7"
|
||||
dependencies = [
|
||||
"serde_core",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "toml_edit"
|
||||
version = "0.25.8+spec-1.1.0"
|
||||
version = "0.25.10+spec-1.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "16bff38f1d86c47f9ff0647e6838d7bb362522bdf44006c7068c2b1e606f1f3c"
|
||||
checksum = "a82418ca169e235e6c399a84e395ab6debeb3bc90edc959bf0f48647c6a32d1b"
|
||||
dependencies = [
|
||||
"indexmap",
|
||||
"toml_datetime 1.1.0+spec-1.1.0",
|
||||
"toml_datetime 1.1.1+spec-1.1.0",
|
||||
"toml_parser",
|
||||
"winnow 1.0.0",
|
||||
"winnow 1.0.1",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "toml_parser"
|
||||
version = "1.1.0+spec-1.1.0"
|
||||
version = "1.1.2+spec-1.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2334f11ee363607eb04df9b8fc8a13ca1715a72ba8662a26ac285c98aabb4011"
|
||||
checksum = "a2abe9b86193656635d2411dc43050282ca48aa31c2451210f4202550afb7526"
|
||||
dependencies = [
|
||||
"winnow 1.0.0",
|
||||
"winnow 1.0.1",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "toml_writer"
|
||||
version = "1.1.0+spec-1.1.0"
|
||||
version = "1.1.1+spec-1.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d282ade6016312faf3e41e57ebbba0c073e4056dab1232ab1cb624199648f8ed"
|
||||
checksum = "756daf9b1013ebe47a8776667b466417e2d4c5679d441c26230efd9ef78692db"
|
||||
|
||||
[[package]]
|
||||
name = "tower"
|
||||
@@ -4606,9 +4669,9 @@ checksum = "562d481066bde0658276a35467c4af00bdc6ee726305698a55b86e61d7ad82bb"
|
||||
|
||||
[[package]]
|
||||
name = "typewit"
|
||||
version = "1.14.2"
|
||||
version = "1.15.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f8c1ae7cc0fdb8b842d65d127cb981574b0d2b249b74d1c7a2986863dc134f71"
|
||||
checksum = "06fee3a8df48c50c55ad646a4e03b00a370da6fe1850ebf467a8d0165dfcafae"
|
||||
dependencies = [
|
||||
"typewit_proc_macros",
|
||||
]
|
||||
@@ -4661,9 +4724,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "unicode-segmentation"
|
||||
version = "1.13.1"
|
||||
version = "1.13.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "da36089a805484bcccfffe0739803392c8298778a2d2f09febf76fac5ad9025b"
|
||||
checksum = "9629274872b2bfaf8d66f5f15725007f635594914870f65218920345aa11aa8c"
|
||||
|
||||
[[package]]
|
||||
name = "unicode-xid"
|
||||
@@ -4677,7 +4740,7 @@ version = "0.5.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "fc1de2c688dc15305988b563c3854064043356019f97a4b46276fe734c4f07ea"
|
||||
dependencies = [
|
||||
"crypto-common",
|
||||
"crypto-common 0.1.7",
|
||||
"subtle",
|
||||
]
|
||||
|
||||
@@ -4726,9 +4789,9 @@ checksum = "b6c140620e7ffbb22c2dee59cafe6084a59b5ffc27a8859a5f0d494b5d52b6be"
|
||||
|
||||
[[package]]
|
||||
name = "uuid"
|
||||
version = "1.22.0"
|
||||
version = "1.23.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a68d3c8f01c0cfa54a75291d83601161799e4a89a39e0929f4b0354d88757a37"
|
||||
checksum = "5ac8b6f42ead25368cf5b098aeb3dc8a1a2c05a3eee8a9a1a68c640edbfc79d9"
|
||||
dependencies = [
|
||||
"getrandom 0.4.2",
|
||||
"js-sys",
|
||||
@@ -4777,7 +4840,7 @@ dependencies = [
|
||||
"serde",
|
||||
"serde_bytes",
|
||||
"serde_json",
|
||||
"sha2",
|
||||
"sha2 0.10.9",
|
||||
"subtle",
|
||||
"thiserror 2.0.18",
|
||||
"x25519-dalek",
|
||||
@@ -4847,9 +4910,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "wasm-bindgen"
|
||||
version = "0.2.114"
|
||||
version = "0.2.117"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6532f9a5c1ece3798cb1c2cfdba640b9b3ba884f5db45973a6f442510a87d38e"
|
||||
checksum = "0551fc1bb415591e3372d0bc4780db7e587d84e2a7e79da121051c5c4b89d0b0"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"once_cell",
|
||||
@@ -4860,23 +4923,19 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "wasm-bindgen-futures"
|
||||
version = "0.4.64"
|
||||
version = "0.4.67"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e9c5522b3a28661442748e09d40924dfb9ca614b21c00d3fd135720e48b67db8"
|
||||
checksum = "03623de6905b7206edd0a75f69f747f134b7f0a2323392d664448bf2d3c5d87e"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"futures-util",
|
||||
"js-sys",
|
||||
"once_cell",
|
||||
"wasm-bindgen",
|
||||
"web-sys",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "wasm-bindgen-macro"
|
||||
version = "0.2.114"
|
||||
version = "0.2.117"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "18a2d50fcf105fb33bb15f00e7a77b772945a2ee45dcf454961fd843e74c18e6"
|
||||
checksum = "7fbdf9a35adf44786aecd5ff89b4563a90325f9da0923236f6104e603c7e86be"
|
||||
dependencies = [
|
||||
"quote",
|
||||
"wasm-bindgen-macro-support",
|
||||
@@ -4884,9 +4943,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "wasm-bindgen-macro-support"
|
||||
version = "0.2.114"
|
||||
version = "0.2.117"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "03ce4caeaac547cdf713d280eda22a730824dd11e6b8c3ca9e42247b25c631e3"
|
||||
checksum = "dca9693ef2bab6d4e6707234500350d8dad079eb508dca05530c85dc3a529ff2"
|
||||
dependencies = [
|
||||
"bumpalo",
|
||||
"proc-macro2",
|
||||
@@ -4897,9 +4956,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "wasm-bindgen-shared"
|
||||
version = "0.2.114"
|
||||
version = "0.2.117"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "75a326b8c223ee17883a4251907455a2431acc2791c98c26279376490c378c16"
|
||||
checksum = "39129a682a6d2d841b6c429d0c51e5cb0ed1a03829d8b3d1e69a011e62cb3d3b"
|
||||
dependencies = [
|
||||
"unicode-ident",
|
||||
]
|
||||
@@ -4984,9 +5043,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "web-sys"
|
||||
version = "0.3.91"
|
||||
version = "0.3.94"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "854ba17bb104abfb26ba36da9729addc7ce7f06f5c0f90f3c391f8461cca21f9"
|
||||
checksum = "cd70027e39b12f0849461e08ffc50b9cd7688d942c1c8e3c7b22273236b4dd0a"
|
||||
dependencies = [
|
||||
"js-sys",
|
||||
"wasm-bindgen",
|
||||
@@ -5465,9 +5524,9 @@ checksum = "df79d97927682d2fd8adb29682d1140b343be4ac0f08fd68b7765d9c059d3945"
|
||||
|
||||
[[package]]
|
||||
name = "winnow"
|
||||
version = "1.0.0"
|
||||
version = "1.0.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a90e88e4667264a994d34e6d1ab2d26d398dcdca8b7f52bec8668957517fc7d8"
|
||||
checksum = "09dac053f1cd375980747450bfc7250c264eaae0583872e845c0c7cd578872b5"
|
||||
dependencies = [
|
||||
"memchr",
|
||||
]
|
||||
@@ -5595,9 +5654,9 @@ checksum = "fdd20c5420375476fbd4394763288da7eb0cc0b8c11deed431a91562af7335d3"
|
||||
|
||||
[[package]]
|
||||
name = "yoke"
|
||||
version = "0.8.1"
|
||||
version = "0.8.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "72d6e5c6afb84d73944e5cedb052c4680d5657337201555f9f2a16b7406d4954"
|
||||
checksum = "abe8c5fda708d9ca3df187cae8bfb9ceda00dd96231bed36e445a1a48e66f9ca"
|
||||
dependencies = [
|
||||
"stable_deref_trait",
|
||||
"yoke-derive",
|
||||
@@ -5606,9 +5665,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "yoke-derive"
|
||||
version = "0.8.1"
|
||||
version = "0.8.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b659052874eb698efe5b9e8cf382204678a0086ebf46982b79d6ca3182927e5d"
|
||||
checksum = "de844c262c8848816172cef550288e7dc6c7b7814b4ee56b3e1553f275f1858e"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
@@ -5618,18 +5677,18 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "zerocopy"
|
||||
version = "0.8.47"
|
||||
version = "0.8.48"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "efbb2a062be311f2ba113ce66f697a4dc589f85e78a4aea276200804cea0ed87"
|
||||
checksum = "eed437bf9d6692032087e337407a86f04cd8d6a16a37199ed57949d415bd68e9"
|
||||
dependencies = [
|
||||
"zerocopy-derive",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "zerocopy-derive"
|
||||
version = "0.8.47"
|
||||
version = "0.8.48"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0e8bc7269b54418e7aeeef514aa68f8690b8c0489a06b0136e5f57c4c5ccab89"
|
||||
checksum = "70e3cd084b1788766f53af483dd21f93881ff30d7320490ec3ef7526d203bad4"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
@@ -5638,18 +5697,18 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "zerofrom"
|
||||
version = "0.1.6"
|
||||
version = "0.1.7"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "50cc42e0333e05660c3587f3bf9d0478688e15d870fab3346451ce7f8c9fbea5"
|
||||
checksum = "69faa1f2a1ea75661980b013019ed6687ed0e83d069bc1114e2cc74c6c04c4df"
|
||||
dependencies = [
|
||||
"zerofrom-derive",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "zerofrom-derive"
|
||||
version = "0.1.6"
|
||||
version = "0.1.7"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d71e5d6e06ab090c67b5e44993ec16b72dcbaabc526db883a360057678b48502"
|
||||
checksum = "11532158c46691caf0f2593ea8358fed6bbf68a0315e80aae9bd41fbade684a1"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
@@ -5679,9 +5738,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "zerotrie"
|
||||
version = "0.2.3"
|
||||
version = "0.2.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2a59c17a5562d507e4b54960e8569ebee33bee890c70aa3fe7b97e85a9fd7851"
|
||||
checksum = "0f9152d31db0792fa83f70fb2f83148effb5c1f5b8c7686c3459e361d9bc20bf"
|
||||
dependencies = [
|
||||
"displaydoc",
|
||||
"yoke",
|
||||
@@ -5690,9 +5749,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "zerovec"
|
||||
version = "0.11.5"
|
||||
version = "0.11.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6c28719294829477f525be0186d13efa9a3c602f7ec202ca9e353d310fb9a002"
|
||||
checksum = "90f911cbc359ab6af17377d242225f4d75119aec87ea711a880987b18cd7b239"
|
||||
dependencies = [
|
||||
"yoke",
|
||||
"zerofrom",
|
||||
@@ -5701,9 +5760,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "zerovec-derive"
|
||||
version = "0.11.2"
|
||||
version = "0.11.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "eadce39539ca5cb3985590102671f2567e659fca9666581ad3411d59207951f3"
|
||||
checksum = "625dc425cab0dca6dc3c3319506e6593dcb08a9f387ea3b284dbd52a92c40555"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
|
||||
@@ -21,6 +21,7 @@ rust-embed = "8"
|
||||
serde = { version = "1", features = ["derive"] }
|
||||
serde_json = "1"
|
||||
serde_urlencoded = "0.7"
|
||||
sha2 = "0.11.0"
|
||||
serde_yaml = "0.9"
|
||||
strip-ansi-escapes = "0.2"
|
||||
tempfile = "3"
|
||||
@@ -38,3 +39,5 @@ matrix-sdk = { version = "0.16.0", default-features = false, features = [
|
||||
pulldown-cmark = { version = "0.13.3", default-features = false, features = [
|
||||
"html",
|
||||
] }
|
||||
regex = "1"
|
||||
libc = "0.2"
|
||||
|
||||
@@ -0,0 +1,674 @@
|
||||
GNU GENERAL PUBLIC LICENSE
|
||||
Version 3, 29 June 2007
|
||||
|
||||
Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
|
||||
Everyone is permitted to copy and distribute verbatim copies
|
||||
of this license document, but changing it is not allowed.
|
||||
|
||||
Preamble
|
||||
|
||||
The GNU General Public License is a free, copyleft license for
|
||||
software and other kinds of works.
|
||||
|
||||
The licenses for most software and other practical works are designed
|
||||
to take away your freedom to share and change the works. By contrast,
|
||||
the GNU General Public License is intended to guarantee your freedom to
|
||||
share and change all versions of a program--to make sure it remains free
|
||||
software for all its users. We, the Free Software Foundation, use the
|
||||
GNU General Public License for most of our software; it applies also to
|
||||
any other work released this way by its authors. You can apply it to
|
||||
your programs, too.
|
||||
|
||||
When we speak of free software, we are referring to freedom, not
|
||||
price. Our General Public Licenses are designed to make sure that you
|
||||
have the freedom to distribute copies of free software (and charge for
|
||||
them if you wish), that you receive source code or can get it if you
|
||||
want it, that you can change the software or use pieces of it in new
|
||||
free programs, and that you know you can do these things.
|
||||
|
||||
To protect your rights, we need to prevent others from denying you
|
||||
these rights or asking you to surrender the rights. Therefore, you have
|
||||
certain responsibilities if you distribute copies of the software, or if
|
||||
you modify it: responsibilities to respect the freedom of others.
|
||||
|
||||
For example, if you distribute copies of such a program, whether
|
||||
gratis or for a fee, you must pass on to the recipients the same
|
||||
freedoms that you received. You must make sure that they, too, receive
|
||||
or can get the source code. And you must show them these terms so they
|
||||
know their rights.
|
||||
|
||||
Developers that use the GNU GPL protect your rights with two steps:
|
||||
(1) assert copyright on the software, and (2) offer you this License
|
||||
giving you legal permission to copy, distribute and/or modify it.
|
||||
|
||||
For the developers' and authors' protection, the GPL clearly explains
|
||||
that there is no warranty for this free software. For both users' and
|
||||
authors' sake, the GPL requires that modified versions be marked as
|
||||
changed, so that their problems will not be attributed erroneously to
|
||||
authors of previous versions.
|
||||
|
||||
Some devices are designed to deny users access to install or run
|
||||
modified versions of the software inside them, although the manufacturer
|
||||
can do so. This is fundamentally incompatible with the aim of
|
||||
protecting users' freedom to change the software. The systematic
|
||||
pattern of such abuse occurs in the area of products for individuals to
|
||||
use, which is precisely where it is most unacceptable. Therefore, we
|
||||
have designed this version of the GPL to prohibit the practice for those
|
||||
products. If such problems arise substantially in other domains, we
|
||||
stand ready to extend this provision to those domains in future versions
|
||||
of the GPL, as needed to protect the freedom of users.
|
||||
|
||||
Finally, every program is threatened constantly by software patents.
|
||||
States should not allow patents to restrict development and use of
|
||||
software on general-purpose computers, but in those that do, we wish to
|
||||
avoid the special danger that patents applied to a free program could
|
||||
make it effectively proprietary. To prevent this, the GPL assures that
|
||||
patents cannot be used to render the program non-free.
|
||||
|
||||
The precise terms and conditions for copying, distribution and
|
||||
modification follow.
|
||||
|
||||
TERMS AND CONDITIONS
|
||||
|
||||
0. Definitions.
|
||||
|
||||
"This License" refers to version 3 of the GNU General Public License.
|
||||
|
||||
"Copyright" also means copyright-like laws that apply to other kinds of
|
||||
works, such as semiconductor masks.
|
||||
|
||||
"The Program" refers to any copyrightable work licensed under this
|
||||
License. Each licensee is addressed as "you". "Licensees" and
|
||||
"recipients" may be individuals or organizations.
|
||||
|
||||
To "modify" a work means to copy from or adapt all or part of the work
|
||||
in a fashion requiring copyright permission, other than the making of an
|
||||
exact copy. The resulting work is called a "modified version" of the
|
||||
earlier work or a work "based on" the earlier work.
|
||||
|
||||
A "covered work" means either the unmodified Program or a work based
|
||||
on the Program.
|
||||
|
||||
To "propagate" a work means to do anything with it that, without
|
||||
permission, would make you directly or secondarily liable for
|
||||
infringement under applicable copyright law, except executing it on a
|
||||
computer or modifying a private copy. Propagation includes copying,
|
||||
distribution (with or without modification), making available to the
|
||||
public, and in some countries other activities as well.
|
||||
|
||||
To "convey" a work means any kind of propagation that enables other
|
||||
parties to make or receive copies. Mere interaction with a user through
|
||||
a computer network, with no transfer of a copy, is not conveying.
|
||||
|
||||
An interactive user interface displays "Appropriate Legal Notices"
|
||||
to the extent that it includes a convenient and prominently visible
|
||||
feature that (1) displays an appropriate copyright notice, and (2)
|
||||
tells the user that there is no warranty for the work (except to the
|
||||
extent that warranties are provided), that licensees may convey the
|
||||
work under this License, and how to view a copy of this License. If
|
||||
the interface presents a list of user commands or options, such as a
|
||||
menu, a prominent item in the list meets this criterion.
|
||||
|
||||
1. Source Code.
|
||||
|
||||
The "source code" for a work means the preferred form of the work
|
||||
for making modifications to it. "Object code" means any non-source
|
||||
form of a work.
|
||||
|
||||
A "Standard Interface" means an interface that either is an official
|
||||
standard defined by a recognized standards body, or, in the case of
|
||||
interfaces specified for a particular programming language, one that
|
||||
is widely used among developers working in that language.
|
||||
|
||||
The "System Libraries" of an executable work include anything, other
|
||||
than the work as a whole, that (a) is included in the normal form of
|
||||
packaging a Major Component, but which is not part of that Major
|
||||
Component, and (b) serves only to enable use of the work with that
|
||||
Major Component, or to implement a Standard Interface for which an
|
||||
implementation is available to the public in source code form. A
|
||||
"Major Component", in this context, means a major essential component
|
||||
(kernel, window system, and so on) of the specific operating system
|
||||
(if any) on which the executable work runs, or a compiler used to
|
||||
produce the work, or an object code interpreter used to run it.
|
||||
|
||||
The "Corresponding Source" for a work in object code form means all
|
||||
the source code needed to generate, install, and (for an executable
|
||||
work) run the object code and to modify the work, including scripts to
|
||||
control those activities. However, it does not include the work's
|
||||
System Libraries, or general-purpose tools or generally available free
|
||||
programs which are used unmodified in performing those activities but
|
||||
which are not part of the work. For example, Corresponding Source
|
||||
includes interface definition files associated with source files for
|
||||
the work, and the source code for shared libraries and dynamically
|
||||
linked subprograms that the work is specifically designed to require,
|
||||
such as by intimate data communication or control flow between those
|
||||
subprograms and other parts of the work.
|
||||
|
||||
The Corresponding Source need not include anything that users
|
||||
can regenerate automatically from other parts of the Corresponding
|
||||
Source.
|
||||
|
||||
The Corresponding Source for a work in source code form is that
|
||||
same work.
|
||||
|
||||
2. Basic Permissions.
|
||||
|
||||
All rights granted under this License are granted for the term of
|
||||
copyright on the Program, and are irrevocable provided the stated
|
||||
conditions are met. This License explicitly affirms your unlimited
|
||||
permission to run the unmodified Program. The output from running a
|
||||
covered work is covered by this License only if the output, given its
|
||||
content, constitutes a covered work. This License acknowledges your
|
||||
rights of fair use or other equivalent, as provided by copyright law.
|
||||
|
||||
You may make, run and propagate covered works that you do not
|
||||
convey, without conditions so long as your license otherwise remains
|
||||
in force. You may convey covered works to others for the sole purpose
|
||||
of having them make modifications exclusively for you, or provide you
|
||||
with facilities for running those works, provided that you comply with
|
||||
the terms of this License in conveying all material for which you do
|
||||
not control copyright. Those thus making or running the covered works
|
||||
for you must do so exclusively on your behalf, under your direction
|
||||
and control, on terms that prohibit them from making any copies of
|
||||
your copyrighted material outside their relationship with you.
|
||||
|
||||
Conveying under any other circumstances is permitted solely under
|
||||
the conditions stated below. Sublicensing is not allowed; section 10
|
||||
makes it unnecessary.
|
||||
|
||||
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
|
||||
|
||||
No covered work shall be deemed part of an effective technological
|
||||
measure under any applicable law fulfilling obligations under article
|
||||
11 of the WIPO copyright treaty adopted on 20 December 1996, or
|
||||
similar laws prohibiting or restricting circumvention of such
|
||||
measures.
|
||||
|
||||
When you convey a covered work, you waive any legal power to forbid
|
||||
circumvention of technological measures to the extent such circumvention
|
||||
is effected by exercising rights under this License with respect to
|
||||
the covered work, and you disclaim any intention to limit operation or
|
||||
modification of the work as a means of enforcing, against the work's
|
||||
users, your or third parties' legal rights to forbid circumvention of
|
||||
technological measures.
|
||||
|
||||
4. Conveying Verbatim Copies.
|
||||
|
||||
You may convey verbatim copies of the Program's source code as you
|
||||
receive it, in any medium, provided that you conspicuously and
|
||||
appropriately publish on each copy an appropriate copyright notice;
|
||||
keep intact all notices stating that this License and any
|
||||
non-permissive terms added in accord with section 7 apply to the code;
|
||||
keep intact all notices of the absence of any warranty; and give all
|
||||
recipients a copy of this License along with the Program.
|
||||
|
||||
You may charge any price or no price for each copy that you convey,
|
||||
and you may offer support or warranty protection for a fee.
|
||||
|
||||
5. Conveying Modified Source Versions.
|
||||
|
||||
You may convey a work based on the Program, or the modifications to
|
||||
produce it from the Program, in the form of source code under the
|
||||
terms of section 4, provided that you also meet all of these conditions:
|
||||
|
||||
a) The work must carry prominent notices stating that you modified
|
||||
it, and giving a relevant date.
|
||||
|
||||
b) The work must carry prominent notices stating that it is
|
||||
released under this License and any conditions added under section
|
||||
7. This requirement modifies the requirement in section 4 to
|
||||
"keep intact all notices".
|
||||
|
||||
c) You must license the entire work, as a whole, under this
|
||||
License to anyone who comes into possession of a copy. This
|
||||
License will therefore apply, along with any applicable section 7
|
||||
additional terms, to the whole of the work, and all its parts,
|
||||
regardless of how they are packaged. This License gives no
|
||||
permission to license the work in any other way, but it does not
|
||||
invalidate such permission if you have separately received it.
|
||||
|
||||
d) If the work has interactive user interfaces, each must display
|
||||
Appropriate Legal Notices; however, if the Program has interactive
|
||||
interfaces that do not display Appropriate Legal Notices, your
|
||||
work need not make them do so.
|
||||
|
||||
A compilation of a covered work with other separate and independent
|
||||
works, which are not by their nature extensions of the covered work,
|
||||
and which are not combined with it such as to form a larger program,
|
||||
in or on a volume of a storage or distribution medium, is called an
|
||||
"aggregate" if the compilation and its resulting copyright are not
|
||||
used to limit the access or legal rights of the compilation's users
|
||||
beyond what the individual works permit. Inclusion of a covered work
|
||||
in an aggregate does not cause this License to apply to the other
|
||||
parts of the aggregate.
|
||||
|
||||
6. Conveying Non-Source Forms.
|
||||
|
||||
You may convey a covered work in object code form under the terms
|
||||
of sections 4 and 5, provided that you also convey the
|
||||
machine-readable Corresponding Source under the terms of this License,
|
||||
in one of these ways:
|
||||
|
||||
a) Convey the object code in, or embodied in, a physical product
|
||||
(including a physical distribution medium), accompanied by the
|
||||
Corresponding Source fixed on a durable physical medium
|
||||
customarily used for software interchange.
|
||||
|
||||
b) Convey the object code in, or embodied in, a physical product
|
||||
(including a physical distribution medium), accompanied by a
|
||||
written offer, valid for at least three years and valid for as
|
||||
long as you offer spare parts or customer support for that product
|
||||
model, to give anyone who possesses the object code either (1) a
|
||||
copy of the Corresponding Source for all the software in the
|
||||
product that is covered by this License, on a durable physical
|
||||
medium customarily used for software interchange, for a price no
|
||||
more than your reasonable cost of physically performing this
|
||||
conveying of source, or (2) access to copy the
|
||||
Corresponding Source from a network server at no charge.
|
||||
|
||||
c) Convey individual copies of the object code with a copy of the
|
||||
written offer to provide the Corresponding Source. This
|
||||
alternative is allowed only occasionally and noncommercially, and
|
||||
only if you received the object code with such an offer, in accord
|
||||
with subsection 6b.
|
||||
|
||||
d) Convey the object code by offering access from a designated
|
||||
place (gratis or for a charge), and offer equivalent access to the
|
||||
Corresponding Source in the same way through the same place at no
|
||||
further charge. You need not require recipients to copy the
|
||||
Corresponding Source along with the object code. If the place to
|
||||
copy the object code is a network server, the Corresponding Source
|
||||
may be on a different server (operated by you or a third party)
|
||||
that supports equivalent copying facilities, provided you maintain
|
||||
clear directions next to the object code saying where to find the
|
||||
Corresponding Source. Regardless of what server hosts the
|
||||
Corresponding Source, you remain obligated to ensure that it is
|
||||
available for as long as needed to satisfy these requirements.
|
||||
|
||||
e) Convey the object code using peer-to-peer transmission, provided
|
||||
you inform other peers where the object code and Corresponding
|
||||
Source of the work are being offered to the general public at no
|
||||
charge under subsection 6d.
|
||||
|
||||
A separable portion of the object code, whose source code is excluded
|
||||
from the Corresponding Source as a System Library, need not be
|
||||
included in conveying the object code work.
|
||||
|
||||
A "User Product" is either (1) a "consumer product", which means any
|
||||
tangible personal property which is normally used for personal, family,
|
||||
or household purposes, or (2) anything designed or sold for incorporation
|
||||
into a dwelling. In determining whether a product is a consumer product,
|
||||
doubtful cases shall be resolved in favor of coverage. For a particular
|
||||
product received by a particular user, "normally used" refers to a
|
||||
typical or common use of that class of product, regardless of the status
|
||||
of the particular user or of the way in which the particular user
|
||||
actually uses, or expects or is expected to use, the product. A product
|
||||
is a consumer product regardless of whether the product has substantial
|
||||
commercial, industrial or non-consumer uses, unless such uses represent
|
||||
the only significant mode of use of the product.
|
||||
|
||||
"Installation Information" for a User Product means any methods,
|
||||
procedures, authorization keys, or other information required to install
|
||||
and execute modified versions of a covered work in that User Product from
|
||||
a modified version of its Corresponding Source. The information must
|
||||
suffice to ensure that the continued functioning of the modified object
|
||||
code is in no case prevented or interfered with solely because
|
||||
modification has been made.
|
||||
|
||||
If you convey an object code work under this section in, or with, or
|
||||
specifically for use in, a User Product, and the conveying occurs as
|
||||
part of a transaction in which the right of possession and use of the
|
||||
User Product is transferred to the recipient in perpetuity or for a
|
||||
fixed term (regardless of how the transaction is characterized), the
|
||||
Corresponding Source conveyed under this section must be accompanied
|
||||
by the Installation Information. But this requirement does not apply
|
||||
if neither you nor any third party retains the ability to install
|
||||
modified object code on the User Product (for example, the work has
|
||||
been installed in ROM).
|
||||
|
||||
The requirement to provide Installation Information does not include a
|
||||
requirement to continue to provide support service, warranty, or updates
|
||||
for a work that has been modified or installed by the recipient, or for
|
||||
the User Product in which it has been modified or installed. Access to a
|
||||
network may be denied when the modification itself materially and
|
||||
adversely affects the operation of the network or violates the rules and
|
||||
protocols for communication across the network.
|
||||
|
||||
Corresponding Source conveyed, and Installation Information provided,
|
||||
in accord with this section must be in a format that is publicly
|
||||
documented (and with an implementation available to the public in
|
||||
source code form), and must require no special password or key for
|
||||
unpacking, reading or copying.
|
||||
|
||||
7. Additional Terms.
|
||||
|
||||
"Additional permissions" are terms that supplement the terms of this
|
||||
License by making exceptions from one or more of its conditions.
|
||||
Additional permissions that are applicable to the entire Program shall
|
||||
be treated as though they were included in this License, to the extent
|
||||
that they are valid under applicable law. If additional permissions
|
||||
apply only to part of the Program, that part may be used separately
|
||||
under those permissions, but the entire Program remains governed by
|
||||
this License without regard to the additional permissions.
|
||||
|
||||
When you convey a copy of a covered work, you may at your option
|
||||
remove any additional permissions from that copy, or from any part of
|
||||
it. (Additional permissions may be written to require their own
|
||||
removal in certain cases when you modify the work.) You may place
|
||||
additional permissions on material, added by you to a covered work,
|
||||
for which you have or can give appropriate copyright permission.
|
||||
|
||||
Notwithstanding any other provision of this License, for material you
|
||||
add to a covered work, you may (if authorized by the copyright holders of
|
||||
that material) supplement the terms of this License with terms:
|
||||
|
||||
a) Disclaiming warranty or limiting liability differently from the
|
||||
terms of sections 15 and 16 of this License; or
|
||||
|
||||
b) Requiring preservation of specified reasonable legal notices or
|
||||
author attributions in that material or in the Appropriate Legal
|
||||
Notices displayed by works containing it; or
|
||||
|
||||
c) Prohibiting misrepresentation of the origin of that material, or
|
||||
requiring that modified versions of such material be marked in
|
||||
reasonable ways as different from the original version; or
|
||||
|
||||
d) Limiting the use for publicity purposes of names of licensors or
|
||||
authors of the material; or
|
||||
|
||||
e) Declining to grant rights under trademark law for use of some
|
||||
trade names, trademarks, or service marks; or
|
||||
|
||||
f) Requiring indemnification of licensors and authors of that
|
||||
material by anyone who conveys the material (or modified versions of
|
||||
it) with contractual assumptions of liability to the recipient, for
|
||||
any liability that these contractual assumptions directly impose on
|
||||
those licensors and authors.
|
||||
|
||||
All other non-permissive additional terms are considered "further
|
||||
restrictions" within the meaning of section 10. If the Program as you
|
||||
received it, or any part of it, contains a notice stating that it is
|
||||
governed by this License along with a term that is a further
|
||||
restriction, you may remove that term. If a license document contains
|
||||
a further restriction but permits relicensing or conveying under this
|
||||
License, you may add to a covered work material governed by the terms
|
||||
of that license document, provided that the further restriction does
|
||||
not survive such relicensing or conveying.
|
||||
|
||||
If you add terms to a covered work in accord with this section, you
|
||||
must place, in the relevant source files, a statement of the
|
||||
additional terms that apply to those files, or a notice indicating
|
||||
where to find the applicable terms.
|
||||
|
||||
Additional terms, permissive or non-permissive, may be stated in the
|
||||
form of a separately written license, or stated as exceptions;
|
||||
the above requirements apply either way.
|
||||
|
||||
8. Termination.
|
||||
|
||||
You may not propagate or modify a covered work except as expressly
|
||||
provided under this License. Any attempt otherwise to propagate or
|
||||
modify it is void, and will automatically terminate your rights under
|
||||
this License (including any patent licenses granted under the third
|
||||
paragraph of section 11).
|
||||
|
||||
However, if you cease all violation of this License, then your
|
||||
license from a particular copyright holder is reinstated (a)
|
||||
provisionally, unless and until the copyright holder explicitly and
|
||||
finally terminates your license, and (b) permanently, if the copyright
|
||||
holder fails to notify you of the violation by some reasonable means
|
||||
prior to 60 days after the cessation.
|
||||
|
||||
Moreover, your license from a particular copyright holder is
|
||||
reinstated permanently if the copyright holder notifies you of the
|
||||
violation by some reasonable means, this is the first time you have
|
||||
received notice of violation of this License (for any work) from that
|
||||
copyright holder, and you cure the violation prior to 30 days after
|
||||
your receipt of the notice.
|
||||
|
||||
Termination of your rights under this section does not terminate the
|
||||
licenses of parties who have received copies or rights from you under
|
||||
this License. If your rights have been terminated and not permanently
|
||||
reinstated, you do not qualify to receive new licenses for the same
|
||||
material under section 10.
|
||||
|
||||
9. Acceptance Not Required for Having Copies.
|
||||
|
||||
You are not required to accept this License in order to receive or
|
||||
run a copy of the Program. Ancillary propagation of a covered work
|
||||
occurring solely as a consequence of using peer-to-peer transmission
|
||||
to receive a copy likewise does not require acceptance. However,
|
||||
nothing other than this License grants you permission to propagate or
|
||||
modify any covered work. These actions infringe copyright if you do
|
||||
not accept this License. Therefore, by modifying or propagating a
|
||||
covered work, you indicate your acceptance of this License to do so.
|
||||
|
||||
10. Automatic Licensing of Downstream Recipients.
|
||||
|
||||
Each time you convey a covered work, the recipient automatically
|
||||
receives a license from the original licensors, to run, modify and
|
||||
propagate that work, subject to this License. You are not responsible
|
||||
for enforcing compliance by third parties with this License.
|
||||
|
||||
An "entity transaction" is a transaction transferring control of an
|
||||
organization, or substantially all assets of one, or subdividing an
|
||||
organization, or merging organizations. If propagation of a covered
|
||||
work results from an entity transaction, each party to that
|
||||
transaction who receives a copy of the work also receives whatever
|
||||
licenses to the work the party's predecessor in interest had or could
|
||||
give under the previous paragraph, plus a right to possession of the
|
||||
Corresponding Source of the work from the predecessor in interest, if
|
||||
the predecessor has it or can get it with reasonable efforts.
|
||||
|
||||
You may not impose any further restrictions on the exercise of the
|
||||
rights granted or affirmed under this License. For example, you may
|
||||
not impose a license fee, royalty, or other charge for exercise of
|
||||
rights granted under this License, and you may not initiate litigation
|
||||
(including a cross-claim or counterclaim in a lawsuit) alleging that
|
||||
any patent claim is infringed by making, using, selling, offering for
|
||||
sale, or importing the Program or any portion of it.
|
||||
|
||||
11. Patents.
|
||||
|
||||
A "contributor" is a copyright holder who authorizes use under this
|
||||
License of the Program or a work on which the Program is based. The
|
||||
work thus licensed is called the contributor's "contributor version".
|
||||
|
||||
A contributor's "essential patent claims" are all patent claims
|
||||
owned or controlled by the contributor, whether already acquired or
|
||||
hereafter acquired, that would be infringed by some manner, permitted
|
||||
by this License, of making, using, or selling its contributor version,
|
||||
but do not include claims that would be infringed only as a
|
||||
consequence of further modification of the contributor version. For
|
||||
purposes of this definition, "control" includes the right to grant
|
||||
patent sublicenses in a manner consistent with the requirements of
|
||||
this License.
|
||||
|
||||
Each contributor grants you a non-exclusive, worldwide, royalty-free
|
||||
patent license under the contributor's essential patent claims, to
|
||||
make, use, sell, offer for sale, import and otherwise run, modify and
|
||||
propagate the contents of its contributor version.
|
||||
|
||||
In the following three paragraphs, a "patent license" is any express
|
||||
agreement or commitment, however denominated, not to enforce a patent
|
||||
(such as an express permission to practice a patent or covenant not to
|
||||
sue for patent infringement). To "grant" such a patent license to a
|
||||
party means to make such an agreement or commitment not to enforce a
|
||||
patent against the party.
|
||||
|
||||
If you convey a covered work, knowingly relying on a patent license,
|
||||
and the Corresponding Source of the work is not available for anyone
|
||||
to copy, free of charge and under the terms of this License, through a
|
||||
publicly available network server or other readily accessible means,
|
||||
then you must either (1) cause the Corresponding Source to be so
|
||||
available, or (2) arrange to deprive yourself of the benefit of the
|
||||
patent license for this particular work, or (3) arrange, in a manner
|
||||
consistent with the requirements of this License, to extend the patent
|
||||
license to downstream recipients. "Knowingly relying" means you have
|
||||
actual knowledge that, but for the patent license, your conveying the
|
||||
covered work in a country, or your recipient's use of the covered work
|
||||
in a country, would infringe one or more identifiable patents in that
|
||||
country that you have reason to believe are valid.
|
||||
|
||||
If, pursuant to or in connection with a single transaction or
|
||||
arrangement, you convey, or propagate by procuring conveyance of, a
|
||||
covered work, and grant a patent license to some of the parties
|
||||
receiving the covered work authorizing them to use, propagate, modify
|
||||
or convey a specific copy of the covered work, then the patent license
|
||||
you grant is automatically extended to all recipients of the covered
|
||||
work and works based on it.
|
||||
|
||||
A patent license is "discriminatory" if it does not include within
|
||||
the scope of its coverage, prohibits the exercise of, or is
|
||||
conditioned on the non-exercise of one or more of the rights that are
|
||||
specifically granted under this License. You may not convey a covered
|
||||
work if you are a party to an arrangement with a third party that is
|
||||
in the business of distributing software, under which you make payment
|
||||
to the third party based on the extent of your activity of conveying
|
||||
the work, and under which the third party grants, to any of the
|
||||
parties who would receive the covered work from you, a discriminatory
|
||||
patent license (a) in connection with copies of the covered work
|
||||
conveyed by you (or copies made from those copies), or (b) primarily
|
||||
for and in connection with specific products or compilations that
|
||||
contain the covered work, unless you entered into that arrangement,
|
||||
or that patent license was granted, prior to 28 March 2007.
|
||||
|
||||
Nothing in this License shall be construed as excluding or limiting
|
||||
any implied license or other defenses to infringement that may
|
||||
otherwise be available to you under applicable patent law.
|
||||
|
||||
12. No Surrender of Others' Freedom.
|
||||
|
||||
If conditions are imposed on you (whether by court order, agreement or
|
||||
otherwise) that contradict the conditions of this License, they do not
|
||||
excuse you from the conditions of this License. If you cannot convey a
|
||||
covered work so as to satisfy simultaneously your obligations under this
|
||||
License and any other pertinent obligations, then as a consequence you may
|
||||
not convey it at all. For example, if you agree to terms that obligate you
|
||||
to collect a royalty for further conveying from those to whom you convey
|
||||
the Program, the only way you could satisfy both those terms and this
|
||||
License would be to refrain entirely from conveying the Program.
|
||||
|
||||
13. Use with the GNU Affero General Public License.
|
||||
|
||||
Notwithstanding any other provision of this License, you have
|
||||
permission to link or combine any covered work with a work licensed
|
||||
under version 3 of the GNU Affero General Public License into a single
|
||||
combined work, and to convey the resulting work. The terms of this
|
||||
License will continue to apply to the part which is the covered work,
|
||||
but the special requirements of the GNU Affero General Public License,
|
||||
section 13, concerning interaction through a network will apply to the
|
||||
combination as such.
|
||||
|
||||
14. Revised Versions of this License.
|
||||
|
||||
The Free Software Foundation may publish revised and/or new versions of
|
||||
the GNU General Public License from time to time. Such new versions will
|
||||
be similar in spirit to the present version, but may differ in detail to
|
||||
address new problems or concerns.
|
||||
|
||||
Each version is given a distinguishing version number. If the
|
||||
Program specifies that a certain numbered version of the GNU General
|
||||
Public License "or any later version" applies to it, you have the
|
||||
option of following the terms and conditions either of that numbered
|
||||
version or of any later version published by the Free Software
|
||||
Foundation. If the Program does not specify a version number of the
|
||||
GNU General Public License, you may choose any version ever published
|
||||
by the Free Software Foundation.
|
||||
|
||||
If the Program specifies that a proxy can decide which future
|
||||
versions of the GNU General Public License can be used, that proxy's
|
||||
public statement of acceptance of a version permanently authorizes you
|
||||
to choose that version for the Program.
|
||||
|
||||
Later license versions may give you additional or different
|
||||
permissions. However, no additional obligations are imposed on any
|
||||
author or copyright holder as a result of your choosing to follow a
|
||||
later version.
|
||||
|
||||
15. Disclaimer of Warranty.
|
||||
|
||||
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
|
||||
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
|
||||
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
|
||||
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
|
||||
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
||||
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
|
||||
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
|
||||
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
|
||||
|
||||
16. Limitation of Liability.
|
||||
|
||||
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
|
||||
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
|
||||
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
|
||||
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
|
||||
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
|
||||
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
|
||||
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
|
||||
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
|
||||
SUCH DAMAGES.
|
||||
|
||||
17. Interpretation of Sections 15 and 16.
|
||||
|
||||
If the disclaimer of warranty and limitation of liability provided
|
||||
above cannot be given local legal effect according to their terms,
|
||||
reviewing courts shall apply local law that most closely approximates
|
||||
an absolute waiver of all civil liability in connection with the
|
||||
Program, unless a warranty or assumption of liability accompanies a
|
||||
copy of the Program in return for a fee.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
How to Apply These Terms to Your New Programs
|
||||
|
||||
If you develop a new program, and you want it to be of the greatest
|
||||
possible use to the public, the best way to achieve this is to make it
|
||||
free software which everyone can redistribute and change under these terms.
|
||||
|
||||
To do so, attach the following notices to the program. It is safest
|
||||
to attach them to the start of each source file to most effectively
|
||||
state the exclusion of warranty; and each file should have at least
|
||||
the "copyright" line and a pointer to where the full notice is found.
|
||||
|
||||
<one line to give the program's name and a brief idea of what it does.>
|
||||
Copyright (C) <year> <name of author>
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
Also add information on how to contact you by electronic and paper mail.
|
||||
|
||||
If the program does terminal interaction, make it output a short
|
||||
notice like this when it starts in an interactive mode:
|
||||
|
||||
<program> Copyright (C) <year> <name of author>
|
||||
This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
|
||||
This is free software, and you are welcome to redistribute it
|
||||
under certain conditions; type `show c' for details.
|
||||
|
||||
The hypothetical commands `show w' and `show c' should show the appropriate
|
||||
parts of the General Public License. Of course, your program's commands
|
||||
might be different; for a GUI interface, you would use an "about box".
|
||||
|
||||
You should also get your employer (if you work as a programmer) or school,
|
||||
if any, to sign a "copyright disclaimer" for the program, if necessary.
|
||||
For more information on this, and how to apply and follow the GNU GPL, see
|
||||
<https://www.gnu.org/licenses/>.
|
||||
|
||||
The GNU General Public License does not permit incorporating your program
|
||||
into proprietary programs. If your program is a subroutine library, you
|
||||
may consider it more useful to permit linking proprietary applications with
|
||||
the library. If this is what you want to do, use the GNU Lesser General
|
||||
Public License instead of this License. But first, please read
|
||||
<https://www.gnu.org/licenses/why-not-lgpl.html>.
|
||||
@@ -1,182 +1,90 @@
|
||||
# Story Kit
|
||||
# Storkit
|
||||
|
||||
This app runs as a single Rust web server binary that serves the Vite/React frontend and exposes APIs.
|
||||
The frontend lives in the `frontend/` directory.
|
||||
A story-driven development server that manages work items, spawns coding agents, and runs them through a pipeline from backlog to done. Ships as a single Rust binary with an embedded React frontend.
|
||||
|
||||
You can also run the frontend and backend separately in development (Vite dev server + Rust API).
|
||||
## Getting started with Claude Code
|
||||
|
||||
## Running it in development
|
||||
1. Download the storkit binary (or build from source — see below).
|
||||
|
||||
2. From your project directory, scaffold and start the server:
|
||||
|
||||
```bash
|
||||
# Build the frontend
|
||||
cd frontend
|
||||
npm install
|
||||
npm run dev
|
||||
|
||||
# In another terminal - run the server (serves embedded frontend/dist/)
|
||||
cargo run
|
||||
storkit init --port 3000
|
||||
```
|
||||
|
||||
## Production
|
||||
This creates a `.storkit/` directory with the pipeline structure, `project.toml`, and `.mcp.json`. The `.mcp.json` file lets Claude Code discover storkit's MCP tools automatically.
|
||||
|
||||
3. Open a Claude Code session in the same project directory. Claude will pick up the MCP tools from `.mcp.json`.
|
||||
|
||||
4. Tell Claude: "help me set up this project with storkit." Claude will walk you through the setup wizard — generating project context, tech stack docs, and test/release scripts. Review each step and confirm or ask to retry.
|
||||
|
||||
Once setup is complete, Claude can create stories, start agents, check status, and manage the full pipeline via MCP tools — no commands to memorize.
|
||||
|
||||
## Web UI
|
||||
|
||||
Storkit also ships an embedded React frontend. Once the server is running, open `http://localhost:3000` to see the pipeline board, agent status, and chat interface.
|
||||
|
||||
## Chat transports
|
||||
|
||||
Storkit can be controlled via bot commands in **Matrix**, **WhatsApp**, and **Slack**. Configure a transport in `.storkit/bot.toml` — see the example files:
|
||||
|
||||
- `.storkit/bot.toml.matrix.example`
|
||||
- `.storkit/bot.toml.whatsapp-meta.example`
|
||||
- `.storkit/bot.toml.whatsapp-twilio.example`
|
||||
- `.storkit/bot.toml.slack.example`
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- Rust (2024 edition)
|
||||
- Node.js and npm
|
||||
- Docker (for Linux cross-compilation and container deployment)
|
||||
- `cross` (`cargo install cross`) for Linux static builds
|
||||
|
||||
## Building for production
|
||||
|
||||
```bash
|
||||
# Build the release binary (also builds the frontend via build.rs)
|
||||
cargo build --release
|
||||
|
||||
# Run the server (serves embedded frontend/dist/)
|
||||
./target/release/storkit
|
||||
```
|
||||
|
||||
## Cross-Platform Distribution
|
||||
The release binary embeds the frontend via `rust-embed`. Output: `target/release/storkit`.
|
||||
|
||||
Story Kit ships as a **single self-contained binary** with the React frontend embedded via
|
||||
`rust-embed`. No Rust toolchain, Node.js, or extra libraries are required on the target machine.
|
||||
|
||||
### macOS
|
||||
For a static Linux binary (musl, zero dynamic deps):
|
||||
|
||||
```bash
|
||||
# Native build – no extra tools required beyond Rust + npm
|
||||
make build-macos
|
||||
# Output: target/release/storkit
|
||||
|
||||
# Verify only system frameworks are linked (Security.framework, libSystem.B.dylib, etc.)
|
||||
otool -L target/release/storkit
|
||||
cross build --release --target x86_64-unknown-linux-musl
|
||||
```
|
||||
|
||||
### Linux (static x86_64, zero dynamic deps)
|
||||
|
||||
The Linux build uses the `x86_64-unknown-linux-musl` target to produce a fully static binary.
|
||||
|
||||
**Prerequisites:**
|
||||
Docker:
|
||||
|
||||
```bash
|
||||
# Install cross – a Rust cross-compilation tool backed by Docker
|
||||
cargo install cross
|
||||
|
||||
# Ensure Docker Desktop (or Docker Engine) is running
|
||||
docker compose -f docker/docker-compose.yml build
|
||||
```
|
||||
|
||||
**Build:**
|
||||
## Running in development
|
||||
|
||||
```bash
|
||||
make build-linux
|
||||
# Output: target/x86_64-unknown-linux-musl/release/storkit
|
||||
# Run tests
|
||||
script/test
|
||||
|
||||
# Verify the binary is statically linked
|
||||
file target/x86_64-unknown-linux-musl/release/storkit
|
||||
# Expected: ELF 64-bit LSB executable, x86-64, statically linked
|
||||
# Run the server
|
||||
cargo run -- --port 3000
|
||||
|
||||
ldd target/x86_64-unknown-linux-musl/release/storkit
|
||||
# Expected: not a dynamic executable
|
||||
# In another terminal, run the frontend dev server
|
||||
cd frontend && npm install && npm run dev
|
||||
```
|
||||
|
||||
**Running on any Linux x86_64 machine:**
|
||||
|
||||
```bash
|
||||
# No Rust, Node, glibc, or any other library needed – just copy and run
|
||||
./storkit
|
||||
```
|
||||
Configuration lives in `.storkit/project.toml`. See `.storkit/bot.toml.*.example` for transport setup.
|
||||
|
||||
## Releasing
|
||||
|
||||
Builds both macOS and Linux binaries locally, tags the repo, and publishes a Gitea release with a changelog.
|
||||
|
||||
**One-time setup:**
|
||||
|
||||
1. Create a Gitea API token at `https://code.crashlabs.io/user/settings/applications` (needs repository read/write)
|
||||
2. Add it to `.env` (gitignored): `GITEA_TOKEN=your_token`
|
||||
3. Ensure `cross` is installed (`cargo install cross`) and Docker is running
|
||||
|
||||
**To release:**
|
||||
Requires a Gitea API token in `.env` (`GITEA_TOKEN=your_token`).
|
||||
|
||||
```bash
|
||||
make release V=0.2.0
|
||||
script/release 0.7.1
|
||||
```
|
||||
|
||||
This will:
|
||||
- Build macOS arm64 (native) and Linux amd64 (static musl via cross/Docker)
|
||||
- Generate a changelog from commits since the last tag
|
||||
- Tag the repo as `v0.2.0` and push the tag
|
||||
- Create a Gitea release with both binaries and the changelog attached
|
||||
This bumps version in `Cargo.toml` and `package.json`, builds macOS arm64 and Linux amd64 binaries, tags the repo, and publishes a Gitea release with changelog and binaries attached.
|
||||
|
||||
## Testing
|
||||
## License
|
||||
|
||||
### Frontend Tests
|
||||
|
||||
The frontend uses **Vitest** for unit tests and **Playwright** for end-to-end tests.
|
||||
|
||||
```bash
|
||||
cd frontend
|
||||
|
||||
# Run unit tests
|
||||
npm test
|
||||
|
||||
# Run end-to-end tests
|
||||
npm run test:e2e
|
||||
```
|
||||
|
||||
### Backend Tests
|
||||
|
||||
This project uses **nextest** for running tests and **cargo-llvm-cov** for code coverage.
|
||||
|
||||
### Install Tools
|
||||
|
||||
```bash
|
||||
cargo install cargo-nextest cargo-llvm-cov
|
||||
```
|
||||
|
||||
### Run Tests
|
||||
|
||||
```bash
|
||||
# Run all tests
|
||||
cargo nextest run
|
||||
|
||||
# Run specific module
|
||||
cargo nextest run search_files
|
||||
|
||||
# Run with verbose output
|
||||
cargo nextest run --no-capture
|
||||
```
|
||||
|
||||
### Generate Coverage
|
||||
|
||||
```bash
|
||||
# HTML report (opens in browser)
|
||||
cargo llvm-cov nextest --html --open
|
||||
|
||||
# Terminal output
|
||||
cargo llvm-cov nextest
|
||||
|
||||
# LCOV format (for CI)
|
||||
cargo llvm-cov nextest --lcov --output-path lcov.info
|
||||
|
||||
# Clean coverage data
|
||||
cargo llvm-cov clean
|
||||
```
|
||||
|
||||
### Configuration
|
||||
|
||||
- **Nextest config**: `.config/nextest.toml`
|
||||
- **Coverage output**: `target/llvm-cov/html/index.html`
|
||||
|
||||
## Current Coverage (search_files module)
|
||||
|
||||
```
|
||||
Module: commands/search.rs
|
||||
├── Region Coverage: 75.36%
|
||||
├── Function Coverage: 69.05%
|
||||
└── Line Coverage: 72.55%
|
||||
```
|
||||
|
||||
### Available Test Profiles
|
||||
|
||||
```bash
|
||||
# Development (default)
|
||||
cargo nextest run
|
||||
|
||||
# CI with retries
|
||||
cargo nextest run --profile ci
|
||||
|
||||
# Coverage optimized
|
||||
cargo nextest run --profile coverage
|
||||
```
|
||||
GPL-3.0. See [LICENSE](LICENSE).
|
||||
|
||||
@@ -91,7 +91,6 @@ services:
|
||||
- no-new-privileges:true
|
||||
|
||||
# Resource limits – cap the whole system.
|
||||
# Adjust based on your machine. These are conservative defaults.
|
||||
deploy:
|
||||
resources:
|
||||
limits:
|
||||
@@ -109,6 +108,11 @@ services:
|
||||
retries: 3
|
||||
start_period: 10s
|
||||
|
||||
# Use tini as PID 1 to reap zombie child processes.
|
||||
# Without this, grandchild processes (esbuild, cargo, etc.) spawned by
|
||||
# npm/cargo during worktree setup and gate checks become zombies.
|
||||
init: true
|
||||
|
||||
# Restart policy – restart on crash but not on manual stop
|
||||
restart: unless-stopped
|
||||
|
||||
|
||||
@@ -14,8 +14,12 @@ if [ -z "$GIT_USER_EMAIL" ]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Use GIT_AUTHOR/COMMITTER env vars instead of git config --global,
|
||||
# so the root filesystem can stay read-only (no ~/.gitconfig write).
|
||||
# Set git identity globally so it persists for all shells (docker exec, etc.),
|
||||
# not just the entrypoint process tree.
|
||||
git config --global user.name "$GIT_USER_NAME"
|
||||
git config --global user.email "$GIT_USER_EMAIL"
|
||||
|
||||
# Also set env vars for backwards compatibility.
|
||||
export GIT_AUTHOR_NAME="$GIT_USER_NAME"
|
||||
export GIT_COMMITTER_NAME="$GIT_USER_NAME"
|
||||
export GIT_AUTHOR_EMAIL="$GIT_USER_EMAIL"
|
||||
@@ -27,7 +31,7 @@ export GIT_COMMITTER_EMAIL="$GIT_USER_EMAIL"
|
||||
# binaries on a Linux container). Reinstall to get the right ones.
|
||||
if [ -d /workspace/frontend ] && [ -f /workspace/frontend/package.json ]; then
|
||||
echo "Installing frontend dependencies for container platform..."
|
||||
cd /workspace/frontend && npm install --prefer-offline 2>/dev/null || true
|
||||
cd /workspace/frontend && npm ci --prefer-offline 2>/dev/null || true
|
||||
cd /workspace
|
||||
fi
|
||||
|
||||
|
||||
Generated
+331
-3926
File diff suppressed because it is too large
Load Diff
@@ -1,7 +1,7 @@
|
||||
{
|
||||
"name": "living-spec-standalone",
|
||||
"private": true,
|
||||
"version": "0.6.0",
|
||||
"version": "0.8.6",
|
||||
"type": "module",
|
||||
"scripts": {
|
||||
"dev": "vite",
|
||||
@@ -20,6 +20,9 @@
|
||||
"react-markdown": "^10.1.0",
|
||||
"react-syntax-highlighter": "^16.1.0"
|
||||
},
|
||||
"overrides": {
|
||||
"glob": "^13.0.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@biomejs/biome": "^2.4.2",
|
||||
"@playwright/test": "^1.47.2",
|
||||
@@ -31,9 +34,7 @@
|
||||
"@types/react-dom": "^19.1.6",
|
||||
"@vitejs/plugin-react": "^4.6.0",
|
||||
"@vitest/coverage-v8": "^2.1.9",
|
||||
"jest": "^29.0.0",
|
||||
"jsdom": "^28.1.0",
|
||||
"ts-jest": "^29.0.0",
|
||||
"typescript": "~5.8.3",
|
||||
"vite": "^5.4.21",
|
||||
"vitest": "^2.1.4"
|
||||
|
||||
+50
-4
@@ -127,13 +127,13 @@ details summary::-webkit-details-marker {
|
||||
display: none;
|
||||
}
|
||||
|
||||
details[open] summary span:first-child {
|
||||
transform: rotate(90deg);
|
||||
display: inline-block;
|
||||
details summary span:first-child {
|
||||
transition: transform 0.2s ease;
|
||||
}
|
||||
|
||||
details summary span:first-child {
|
||||
details[open] summary span:first-child {
|
||||
transform: rotate(90deg);
|
||||
display: inline-block;
|
||||
transition: transform 0.2s ease;
|
||||
}
|
||||
|
||||
@@ -236,3 +236,49 @@ body,
|
||||
opacity: 0;
|
||||
}
|
||||
}
|
||||
|
||||
/* ProjectPathInput dropdown theming */
|
||||
.path-dropdown {
|
||||
border: 1px solid #ddd;
|
||||
background: #fff;
|
||||
color: #0f0f0f;
|
||||
}
|
||||
|
||||
.path-dropdown-header {
|
||||
border-bottom: 1px solid #eee;
|
||||
background: #fafafa;
|
||||
}
|
||||
|
||||
.path-dropdown-item {
|
||||
background: transparent;
|
||||
}
|
||||
|
||||
.path-dropdown-item--selected {
|
||||
background: #f0f0f0;
|
||||
}
|
||||
|
||||
.path-match-highlight {
|
||||
font-weight: 600;
|
||||
color: #222;
|
||||
}
|
||||
|
||||
@media (prefers-color-scheme: dark) {
|
||||
.path-dropdown {
|
||||
border-color: #555;
|
||||
background: #1e1e1e;
|
||||
color: #f6f6f6;
|
||||
}
|
||||
|
||||
.path-dropdown-header {
|
||||
border-bottom-color: #444;
|
||||
background: #2a2a2a;
|
||||
}
|
||||
|
||||
.path-dropdown-item--selected {
|
||||
background: #3a3a3a;
|
||||
}
|
||||
|
||||
.path-match-highlight {
|
||||
color: #f6f6f6;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -19,6 +19,7 @@ vi.mock("./api/client", () => {
|
||||
setModelPreference: vi.fn(),
|
||||
cancelChat: vi.fn(),
|
||||
setAnthropicApiKey: vi.fn(),
|
||||
getOAuthStatus: vi.fn(),
|
||||
};
|
||||
class ChatWebSocket {
|
||||
connect() {}
|
||||
@@ -65,6 +66,12 @@ describe("App", () => {
|
||||
mockedApi.getAnthropicApiKeyExists.mockResolvedValue(false);
|
||||
mockedApi.getAnthropicModels.mockResolvedValue([]);
|
||||
mockedApi.getModelPreference.mockResolvedValue(null);
|
||||
mockedApi.getOAuthStatus.mockResolvedValue({
|
||||
authenticated: false,
|
||||
expired: false,
|
||||
expires_at: 0,
|
||||
has_refresh_token: false,
|
||||
});
|
||||
});
|
||||
|
||||
async function renderApp() {
|
||||
|
||||
+28
-1
@@ -1,4 +1,5 @@
|
||||
import * as React from "react";
|
||||
import type { OAuthStatus } from "./api/client";
|
||||
import { api } from "./api/client";
|
||||
import { Chat } from "./components/Chat";
|
||||
import { SelectionScreen } from "./components/selection/SelectionScreen";
|
||||
@@ -14,6 +15,27 @@ function App() {
|
||||
const [isOpening, setIsOpening] = React.useState(false);
|
||||
const [knownProjects, setKnownProjects] = React.useState<string[]>([]);
|
||||
const [homeDir, setHomeDir] = React.useState<string | null>(null);
|
||||
const [oauthStatus, setOauthStatus] = React.useState<OAuthStatus | null>(
|
||||
null,
|
||||
);
|
||||
|
||||
React.useEffect(() => {
|
||||
let active = true;
|
||||
function fetchOAuthStatus() {
|
||||
api
|
||||
.getOAuthStatus()
|
||||
.then((s) => {
|
||||
if (active) setOauthStatus(s);
|
||||
})
|
||||
.catch(() => {});
|
||||
}
|
||||
fetchOAuthStatus();
|
||||
const intervalId = window.setInterval(fetchOAuthStatus, 5000);
|
||||
return () => {
|
||||
active = false;
|
||||
window.clearInterval(intervalId);
|
||||
};
|
||||
}, []);
|
||||
|
||||
React.useEffect(() => {
|
||||
api
|
||||
@@ -182,10 +204,15 @@ function App() {
|
||||
onCloseSuggestions={closeSuggestions}
|
||||
completionError={completionError}
|
||||
currentPartial={currentPartial}
|
||||
oauthStatus={oauthStatus}
|
||||
/>
|
||||
) : (
|
||||
<div className="workspace" style={{ height: "100%" }}>
|
||||
<Chat projectPath={projectPath} onCloseProject={closeProject} />
|
||||
<Chat
|
||||
projectPath={projectPath}
|
||||
onCloseProject={closeProject}
|
||||
oauthStatus={oauthStatus}
|
||||
/>
|
||||
</div>
|
||||
)}
|
||||
|
||||
|
||||
@@ -21,6 +21,19 @@ export type WsRequest =
|
||||
config: ProviderConfig;
|
||||
};
|
||||
|
||||
export interface WizardStepInfo {
|
||||
step: string;
|
||||
label: string;
|
||||
status: string;
|
||||
content?: string;
|
||||
}
|
||||
|
||||
export interface WizardStateData {
|
||||
steps: WizardStepInfo[];
|
||||
current_step_index: number;
|
||||
completed: boolean;
|
||||
}
|
||||
|
||||
export interface AgentAssignment {
|
||||
agent_name: string;
|
||||
model: string | null;
|
||||
@@ -80,6 +93,13 @@ export type WsResponse =
|
||||
| { type: "pong" }
|
||||
/** Sent on connect when the project still needs onboarding (specs are placeholders). */
|
||||
| { type: "onboarding_status"; needs_onboarding: boolean }
|
||||
/** Sent on connect when a setup wizard is active. */
|
||||
| {
|
||||
type: "wizard_state";
|
||||
steps: WizardStepInfo[];
|
||||
current_step_index: number;
|
||||
completed: boolean;
|
||||
}
|
||||
/** Streaming thinking token from an extended-thinking block, separate from regular text. */
|
||||
| { type: "thinking_token"; content: string }
|
||||
/** Streaming token from a /btw side question response. */
|
||||
@@ -185,6 +205,13 @@ export interface CommandOutput {
|
||||
exit_code: number;
|
||||
}
|
||||
|
||||
export interface OAuthStatus {
|
||||
authenticated: boolean;
|
||||
expired: boolean;
|
||||
expires_at: number;
|
||||
has_refresh_token: boolean;
|
||||
}
|
||||
|
||||
declare const __STORKIT_PORT__: string;
|
||||
|
||||
const DEFAULT_API_BASE = "/api";
|
||||
@@ -382,6 +409,10 @@ export const api = {
|
||||
deleteStory(storyId: string) {
|
||||
return callMcpTool("delete_story", { story_id: storyId });
|
||||
},
|
||||
/** Fetch OAuth status from the server. */
|
||||
getOAuthStatus() {
|
||||
return requestJson<OAuthStatus>("/oauth/status", {}, "");
|
||||
},
|
||||
/** Execute a bot slash command without LLM invocation. Returns markdown response text. */
|
||||
botCommand(command: string, args: string, baseUrl?: string) {
|
||||
return requestJson<{ response: string }>(
|
||||
@@ -438,6 +469,7 @@ export class ChatWebSocket {
|
||||
private onAgentConfigChanged?: () => void;
|
||||
private onAgentStateChanged?: () => void;
|
||||
private onOnboardingStatus?: (needsOnboarding: boolean) => void;
|
||||
private onWizardState?: (state: WizardStateData) => void;
|
||||
private onSideQuestionToken?: (content: string) => void;
|
||||
private onSideQuestionDone?: (response: string) => void;
|
||||
private onLogEntry?: (
|
||||
@@ -528,6 +560,12 @@ export class ChatWebSocket {
|
||||
if (data.type === "agent_state_changed") this.onAgentStateChanged?.();
|
||||
if (data.type === "onboarding_status")
|
||||
this.onOnboardingStatus?.(data.needs_onboarding);
|
||||
if (data.type === "wizard_state")
|
||||
this.onWizardState?.({
|
||||
steps: data.steps,
|
||||
current_step_index: data.current_step_index,
|
||||
completed: data.completed,
|
||||
});
|
||||
if (data.type === "side_question_token")
|
||||
this.onSideQuestionToken?.(data.content);
|
||||
if (data.type === "side_question_done")
|
||||
@@ -587,6 +625,7 @@ export class ChatWebSocket {
|
||||
onAgentConfigChanged?: () => void;
|
||||
onAgentStateChanged?: () => void;
|
||||
onOnboardingStatus?: (needsOnboarding: boolean) => void;
|
||||
onWizardState?: (state: WizardStateData) => void;
|
||||
onSideQuestionToken?: (content: string) => void;
|
||||
onSideQuestionDone?: (response: string) => void;
|
||||
onLogEntry?: (timestamp: string, level: string, message: string) => void;
|
||||
@@ -606,6 +645,7 @@ export class ChatWebSocket {
|
||||
this.onAgentConfigChanged = handlers.onAgentConfigChanged;
|
||||
this.onAgentStateChanged = handlers.onAgentStateChanged;
|
||||
this.onOnboardingStatus = handlers.onOnboardingStatus;
|
||||
this.onWizardState = handlers.onWizardState;
|
||||
this.onSideQuestionToken = handlers.onSideQuestionToken;
|
||||
this.onSideQuestionDone = handlers.onSideQuestionDone;
|
||||
this.onLogEntry = handlers.onLogEntry;
|
||||
|
||||
@@ -1481,6 +1481,10 @@ describe("Slash command handling (Story 374)", () => {
|
||||
await act(async () => {
|
||||
fireEvent.change(input, { target: { value: "/status" } });
|
||||
});
|
||||
// First Enter selects the command from the picker; second Enter submits it
|
||||
await act(async () => {
|
||||
fireEvent.keyDown(input, { key: "Enter", shiftKey: false });
|
||||
});
|
||||
await act(async () => {
|
||||
fireEvent.keyDown(input, { key: "Enter", shiftKey: false });
|
||||
});
|
||||
@@ -1551,6 +1555,10 @@ describe("Slash command handling (Story 374)", () => {
|
||||
await act(async () => {
|
||||
fireEvent.change(input, { target: { value: "/git" } });
|
||||
});
|
||||
// First Enter selects the command from the picker; second Enter submits it
|
||||
await act(async () => {
|
||||
fireEvent.keyDown(input, { key: "Enter", shiftKey: false });
|
||||
});
|
||||
await act(async () => {
|
||||
fireEvent.keyDown(input, { key: "Enter", shiftKey: false });
|
||||
});
|
||||
@@ -1569,6 +1577,10 @@ describe("Slash command handling (Story 374)", () => {
|
||||
await act(async () => {
|
||||
fireEvent.change(input, { target: { value: "/cost" } });
|
||||
});
|
||||
// First Enter selects the command from the picker; second Enter submits it
|
||||
await act(async () => {
|
||||
fireEvent.keyDown(input, { key: "Enter", shiftKey: false });
|
||||
});
|
||||
await act(async () => {
|
||||
fireEvent.keyDown(input, { key: "Enter", shiftKey: false });
|
||||
});
|
||||
@@ -1595,6 +1607,10 @@ describe("Slash command handling (Story 374)", () => {
|
||||
await act(async () => {
|
||||
fireEvent.change(input, { target: { value: "/reset" } });
|
||||
});
|
||||
// First Enter selects the command from the picker; second Enter submits it
|
||||
await act(async () => {
|
||||
fireEvent.keyDown(input, { key: "Enter", shiftKey: false });
|
||||
});
|
||||
await act(async () => {
|
||||
fireEvent.keyDown(input, { key: "Enter", shiftKey: false });
|
||||
});
|
||||
@@ -1626,7 +1642,10 @@ describe("Slash command handling (Story 374)", () => {
|
||||
expect(mockedApi.botCommand).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it("AC: /help shows help overlay", async () => {
|
||||
it("AC: /help calls botCommand and displays response", async () => {
|
||||
mockedApi.botCommand.mockResolvedValue({
|
||||
response: "Available commands: status, help, ...",
|
||||
});
|
||||
render(<Chat projectPath="/tmp/project" onCloseProject={vi.fn()} />);
|
||||
await waitFor(() => expect(capturedWsHandlers).not.toBeNull());
|
||||
|
||||
@@ -1634,13 +1653,18 @@ describe("Slash command handling (Story 374)", () => {
|
||||
await act(async () => {
|
||||
fireEvent.change(input, { target: { value: "/help" } });
|
||||
});
|
||||
// First Enter selects the command from the picker; second Enter submits it
|
||||
await act(async () => {
|
||||
fireEvent.keyDown(input, { key: "Enter", shiftKey: false });
|
||||
});
|
||||
await act(async () => {
|
||||
fireEvent.keyDown(input, { key: "Enter", shiftKey: false });
|
||||
});
|
||||
|
||||
expect(await screen.findByTestId("help-overlay")).toBeInTheDocument();
|
||||
await waitFor(() => {
|
||||
expect(mockedApi.botCommand).toHaveBeenCalledWith("help", "", undefined);
|
||||
});
|
||||
expect(lastSendChatArgs).toBeNull();
|
||||
expect(mockedApi.botCommand).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it("AC: botCommand API error shows error message in chat", async () => {
|
||||
@@ -1652,6 +1676,10 @@ describe("Slash command handling (Story 374)", () => {
|
||||
await act(async () => {
|
||||
fireEvent.change(input, { target: { value: "/git" } });
|
||||
});
|
||||
// First Enter selects the command from the picker; second Enter submits it
|
||||
await act(async () => {
|
||||
fireEvent.keyDown(input, { key: "Enter", shiftKey: false });
|
||||
});
|
||||
await act(async () => {
|
||||
fireEvent.keyDown(input, { key: "Enter", shiftKey: false });
|
||||
});
|
||||
@@ -1661,3 +1689,42 @@ describe("Slash command handling (Story 374)", () => {
|
||||
).toBeInTheDocument();
|
||||
});
|
||||
});
|
||||
|
||||
describe("Bug 450: WebSocket error messages displayed in chat", () => {
|
||||
beforeEach(() => {
|
||||
capturedWsHandlers = null;
|
||||
setupMocks();
|
||||
});
|
||||
|
||||
it("AC1: WebSocket error message is shown in chat as an assistant message", async () => {
|
||||
render(<Chat projectPath="/tmp/project" onCloseProject={vi.fn()} />);
|
||||
|
||||
await waitFor(() => expect(capturedWsHandlers).not.toBeNull());
|
||||
|
||||
act(() => {
|
||||
capturedWsHandlers?.onError("Something went wrong on the server.");
|
||||
});
|
||||
|
||||
expect(
|
||||
await screen.findByText("Something went wrong on the server."),
|
||||
).toBeInTheDocument();
|
||||
});
|
||||
|
||||
it("AC2: OAuth login URL in WebSocket error is rendered as a clickable link", async () => {
|
||||
render(<Chat projectPath="/tmp/project" onCloseProject={vi.fn()} />);
|
||||
|
||||
await waitFor(() => expect(capturedWsHandlers).not.toBeNull());
|
||||
|
||||
act(() => {
|
||||
capturedWsHandlers?.onError(
|
||||
"OAuth login required. Please visit: https://example.com/oauth/login",
|
||||
);
|
||||
});
|
||||
|
||||
const link = await screen.findByRole("link", {
|
||||
name: /https:\/\/example\.com\/oauth\/login/,
|
||||
});
|
||||
expect(link).toBeInTheDocument();
|
||||
expect(link).toHaveAttribute("href", "https://example.com/oauth/login");
|
||||
});
|
||||
});
|
||||
|
||||
@@ -4,7 +4,12 @@ import { Prism as SyntaxHighlighter } from "react-syntax-highlighter";
|
||||
import { oneDark } from "react-syntax-highlighter/dist/esm/styles/prism";
|
||||
import type { AgentConfigInfo } from "../api/agents";
|
||||
import { agentsApi } from "../api/agents";
|
||||
import type { AnthropicModelInfo, PipelineState } from "../api/client";
|
||||
import type {
|
||||
AnthropicModelInfo,
|
||||
OAuthStatus,
|
||||
PipelineState,
|
||||
WizardStateData,
|
||||
} from "../api/client";
|
||||
import { api, ChatWebSocket } from "../api/client";
|
||||
import { useChatHistory } from "../hooks/useChatHistory";
|
||||
import type { Message, ProviderConfig } from "../types";
|
||||
@@ -17,6 +22,7 @@ import { LozengeFlyProvider } from "./LozengeFlyContext";
|
||||
import { MessageItem } from "./MessageItem";
|
||||
import type { LogEntry } from "./ServerLogsPanel";
|
||||
import { ServerLogsPanel } from "./ServerLogsPanel";
|
||||
import SetupWizard from "./SetupWizard";
|
||||
import { SideQuestionOverlay } from "./SideQuestionOverlay";
|
||||
import { StagePanel } from "./StagePanel";
|
||||
import { WorkItemDetailPanel } from "./WorkItemDetailPanel";
|
||||
@@ -159,9 +165,14 @@ const getContextWindowSize = (
|
||||
interface ChatProps {
|
||||
projectPath: string;
|
||||
onCloseProject: () => void;
|
||||
oauthStatus?: OAuthStatus | null;
|
||||
}
|
||||
|
||||
export function Chat({ projectPath, onCloseProject }: ChatProps) {
|
||||
export function Chat({
|
||||
projectPath,
|
||||
onCloseProject,
|
||||
oauthStatus = null,
|
||||
}: ChatProps) {
|
||||
const { messages, setMessages, clearMessages } = useChatHistory(projectPath);
|
||||
const [loading, setLoading] = useState(false);
|
||||
const [model, setModel] = useState("claude-code-pty");
|
||||
@@ -217,6 +228,7 @@ export function Chat({ projectPath, onCloseProject }: ChatProps) {
|
||||
new Map(),
|
||||
);
|
||||
const [needsOnboarding, setNeedsOnboarding] = useState(false);
|
||||
const [wizardState, setWizardState] = useState<WizardStateData | null>(null);
|
||||
const onboardingTriggeredRef = useRef(false);
|
||||
const [selectedWorkItemId, setSelectedWorkItemId] = useState<string | null>(
|
||||
null,
|
||||
@@ -401,6 +413,14 @@ export function Chat({ projectPath, onCloseProject }: ChatProps) {
|
||||
console.error("WebSocket error:", message);
|
||||
setLoading(false);
|
||||
setActivityStatus(null);
|
||||
const markdownMessage = message.replace(
|
||||
/(https?:\/\/[^\s]+)/g,
|
||||
"[$1]($1)",
|
||||
);
|
||||
setMessages((prev) => [
|
||||
...prev,
|
||||
{ role: "assistant", content: markdownMessage },
|
||||
]);
|
||||
if (queuedMessagesRef.current.length > 0) {
|
||||
const batch = queuedMessagesRef.current.map((item) => item.text);
|
||||
queuedMessagesRef.current = [];
|
||||
@@ -466,6 +486,9 @@ export function Chat({ projectPath, onCloseProject }: ChatProps) {
|
||||
onOnboardingStatus: (onboarding: boolean) => {
|
||||
setNeedsOnboarding(onboarding);
|
||||
},
|
||||
onWizardState: (state: WizardStateData) => {
|
||||
setWizardState(state);
|
||||
},
|
||||
onSideQuestionToken: (content) => {
|
||||
setSideQuestion((prev) =>
|
||||
prev ? { ...prev, response: prev.response + content } : prev,
|
||||
@@ -606,12 +629,6 @@ export function Chat({ projectPath, onCloseProject }: ChatProps) {
|
||||
const sendMessage = async (messageText: string) => {
|
||||
if (!messageText.trim()) return;
|
||||
|
||||
// /help — show available slash commands overlay
|
||||
if (/^\/help\s*$/i.test(messageText)) {
|
||||
setShowHelp(true);
|
||||
return;
|
||||
}
|
||||
|
||||
// /reset — clear session and message history without LLM
|
||||
if (/^\/reset\s*$/i.test(messageText)) {
|
||||
setMessages([]);
|
||||
@@ -647,6 +664,15 @@ export function Chat({ projectPath, onCloseProject }: ChatProps) {
|
||||
"git",
|
||||
"overview",
|
||||
"rebuild",
|
||||
"loc",
|
||||
"help",
|
||||
"ambient",
|
||||
"htop",
|
||||
"rmtree",
|
||||
"timer",
|
||||
"unblock",
|
||||
"unreleased",
|
||||
"setup",
|
||||
]);
|
||||
|
||||
if (knownCommands.has(cmd)) {
|
||||
@@ -930,6 +956,7 @@ export function Chat({ projectPath, onCloseProject }: ChatProps) {
|
||||
enableTools={enableTools}
|
||||
onToggleTools={setEnableTools}
|
||||
wsConnected={wsConnected}
|
||||
oauthStatus={oauthStatus}
|
||||
/>
|
||||
|
||||
{/* Two-column content area */}
|
||||
@@ -977,65 +1004,79 @@ export function Chat({ projectPath, onCloseProject }: ChatProps) {
|
||||
gap: "24px",
|
||||
}}
|
||||
>
|
||||
{needsOnboarding && messages.length === 0 && !loading && (
|
||||
<div
|
||||
data-testid="onboarding-welcome"
|
||||
style={{
|
||||
padding: "24px",
|
||||
borderRadius: "12px",
|
||||
background: "#1c2a1c",
|
||||
border: "1px solid #2d4a2d",
|
||||
marginBottom: "8px",
|
||||
}}
|
||||
>
|
||||
<h3
|
||||
{wizardState &&
|
||||
!wizardState.completed &&
|
||||
messages.length === 0 &&
|
||||
!loading && (
|
||||
<SetupWizard
|
||||
wizardState={wizardState}
|
||||
onWizardUpdate={setWizardState}
|
||||
sendMessage={sendMessage}
|
||||
/>
|
||||
)}
|
||||
{needsOnboarding &&
|
||||
!wizardState &&
|
||||
messages.length === 0 &&
|
||||
!loading && (
|
||||
<div
|
||||
data-testid="onboarding-welcome"
|
||||
style={{
|
||||
margin: "0 0 8px 0",
|
||||
color: "#a0d4a0",
|
||||
fontSize: "1.1rem",
|
||||
padding: "24px",
|
||||
borderRadius: "12px",
|
||||
background: "#1c2a1c",
|
||||
border: "1px solid #2d4a2d",
|
||||
marginBottom: "8px",
|
||||
}}
|
||||
>
|
||||
Welcome to Storkit
|
||||
</h3>
|
||||
<p
|
||||
style={{
|
||||
margin: "0 0 16px 0",
|
||||
color: "#ccc",
|
||||
lineHeight: 1.5,
|
||||
}}
|
||||
>
|
||||
This project needs to be set up before you can start writing
|
||||
stories. The agent will guide you through configuring your
|
||||
project goals and tech stack.
|
||||
</p>
|
||||
<button
|
||||
type="button"
|
||||
data-testid="onboarding-start-button"
|
||||
onClick={() => {
|
||||
if (onboardingTriggeredRef.current) return;
|
||||
onboardingTriggeredRef.current = true;
|
||||
setNeedsOnboarding(false);
|
||||
sendMessage(
|
||||
"I just created a new project. Help me set it up.",
|
||||
);
|
||||
}}
|
||||
style={{
|
||||
padding: "10px 20px",
|
||||
borderRadius: "8px",
|
||||
border: "none",
|
||||
backgroundColor: "#a0d4a0",
|
||||
color: "#1a1a1a",
|
||||
cursor: "pointer",
|
||||
fontSize: "0.95rem",
|
||||
fontWeight: 600,
|
||||
}}
|
||||
>
|
||||
Start Project Setup
|
||||
</button>
|
||||
</div>
|
||||
)}
|
||||
<h3
|
||||
style={{
|
||||
margin: "0 0 8px 0",
|
||||
color: "#a0d4a0",
|
||||
fontSize: "1.1rem",
|
||||
}}
|
||||
>
|
||||
Welcome to Storkit
|
||||
</h3>
|
||||
<p
|
||||
style={{
|
||||
margin: "0 0 16px 0",
|
||||
color: "#ccc",
|
||||
lineHeight: 1.5,
|
||||
}}
|
||||
>
|
||||
This project needs to be set up before you can start
|
||||
writing stories. The agent will guide you through
|
||||
configuring your project goals and tech stack.
|
||||
</p>
|
||||
<button
|
||||
type="button"
|
||||
data-testid="onboarding-start-button"
|
||||
onClick={() => {
|
||||
if (onboardingTriggeredRef.current) return;
|
||||
onboardingTriggeredRef.current = true;
|
||||
setNeedsOnboarding(false);
|
||||
sendMessage(
|
||||
"I just created a new project. Help me set it up.",
|
||||
);
|
||||
}}
|
||||
style={{
|
||||
padding: "10px 20px",
|
||||
borderRadius: "8px",
|
||||
border: "none",
|
||||
backgroundColor: "#a0d4a0",
|
||||
color: "#1a1a1a",
|
||||
cursor: "pointer",
|
||||
fontSize: "0.95rem",
|
||||
fontWeight: 600,
|
||||
}}
|
||||
>
|
||||
Start Project Setup
|
||||
</button>
|
||||
</div>
|
||||
)}
|
||||
{messages.map((msg: Message, idx: number) => (
|
||||
<MessageItem
|
||||
// biome-ignore lint/suspicious/noArrayIndexKey: Message has no stable ID
|
||||
key={`msg-${idx}-${msg.role}-${msg.content.substring(0, 20)}`}
|
||||
msg={msg}
|
||||
/>
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
import * as React from "react";
|
||||
import type { OAuthStatus } from "../api/client";
|
||||
import { api } from "../api/client";
|
||||
|
||||
const { useState, useEffect } = React;
|
||||
@@ -32,6 +33,7 @@ interface ChatHeaderProps {
|
||||
enableTools: boolean;
|
||||
onToggleTools: (enabled: boolean) => void;
|
||||
wsConnected: boolean;
|
||||
oauthStatus?: OAuthStatus | null;
|
||||
}
|
||||
|
||||
const getContextEmoji = (percentage: number): string => {
|
||||
@@ -55,6 +57,7 @@ export function ChatHeader({
|
||||
enableTools,
|
||||
onToggleTools,
|
||||
wsConnected,
|
||||
oauthStatus = null,
|
||||
}: ChatHeaderProps) {
|
||||
const hasModelOptions = availableModels.length > 0 || claudeModels.length > 0;
|
||||
const [showConfirm, setShowConfirm] = useState(false);
|
||||
@@ -340,6 +343,63 @@ export function ChatHeader({
|
||||
</div>
|
||||
|
||||
<div style={{ display: "flex", alignItems: "center", gap: "16px" }}>
|
||||
{oauthStatus !== null &&
|
||||
(!oauthStatus.authenticated || oauthStatus.expired) && (
|
||||
<button
|
||||
type="button"
|
||||
title="Authenticate with Claude via OAuth"
|
||||
onClick={() => {
|
||||
window.open(
|
||||
"/oauth/authorize",
|
||||
"_blank",
|
||||
"noopener,noreferrer",
|
||||
);
|
||||
}}
|
||||
style={{
|
||||
padding: "6px 12px",
|
||||
borderRadius: "99px",
|
||||
border: "none",
|
||||
fontSize: "0.85em",
|
||||
backgroundColor: "#1a3a5c",
|
||||
color: "#7eb8f7",
|
||||
cursor: "pointer",
|
||||
outline: "none",
|
||||
transition: "all 0.2s",
|
||||
whiteSpace: "nowrap",
|
||||
}}
|
||||
onMouseOver={(e) => {
|
||||
e.currentTarget.style.backgroundColor = "#234d7a";
|
||||
e.currentTarget.style.color = "#a8d4ff";
|
||||
}}
|
||||
onMouseOut={(e) => {
|
||||
e.currentTarget.style.backgroundColor = "#1a3a5c";
|
||||
e.currentTarget.style.color = "#7eb8f7";
|
||||
}}
|
||||
onFocus={(e) => {
|
||||
e.currentTarget.style.backgroundColor = "#234d7a";
|
||||
e.currentTarget.style.color = "#a8d4ff";
|
||||
}}
|
||||
onBlur={(e) => {
|
||||
e.currentTarget.style.backgroundColor = "#1a3a5c";
|
||||
e.currentTarget.style.color = "#7eb8f7";
|
||||
}}
|
||||
>
|
||||
{oauthStatus.expired ? "Re-authenticate" : "Login with Claude"}
|
||||
</button>
|
||||
)}
|
||||
{oauthStatus?.authenticated && !oauthStatus.expired && (
|
||||
<span
|
||||
title="Authenticated with Claude via OAuth"
|
||||
style={{
|
||||
fontSize: "0.8em",
|
||||
color: "#4caf50",
|
||||
whiteSpace: "nowrap",
|
||||
}}
|
||||
>
|
||||
✓ Claude
|
||||
</span>
|
||||
)}
|
||||
|
||||
<div
|
||||
style={{
|
||||
fontSize: "0.75em",
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
import * as React from "react";
|
||||
import { api } from "../api/client";
|
||||
import { SLASH_COMMANDS, type SlashCommand } from "../slashCommands";
|
||||
|
||||
const {
|
||||
forwardRef,
|
||||
@@ -113,6 +114,83 @@ function FilePickerOverlay({
|
||||
);
|
||||
}
|
||||
|
||||
interface SlashCommandPickerOverlayProps {
|
||||
query: string;
|
||||
selectedIndex: number;
|
||||
onSelect: (cmd: SlashCommand) => void;
|
||||
}
|
||||
|
||||
function SlashCommandPickerOverlay({
|
||||
query,
|
||||
selectedIndex,
|
||||
onSelect,
|
||||
}: SlashCommandPickerOverlayProps) {
|
||||
const filtered = SLASH_COMMANDS.filter((cmd) =>
|
||||
fuzzyMatch(cmd.name, query),
|
||||
).sort((a, b) => fuzzyScore(a.name, query) - fuzzyScore(b.name, query));
|
||||
|
||||
if (filtered.length === 0) return null;
|
||||
|
||||
return (
|
||||
<div
|
||||
data-testid="slash-command-picker"
|
||||
style={{
|
||||
position: "absolute",
|
||||
bottom: "100%",
|
||||
left: 0,
|
||||
right: 0,
|
||||
background: "#1e1e1e",
|
||||
border: "1px solid #444",
|
||||
borderRadius: "8px",
|
||||
marginBottom: "6px",
|
||||
overflow: "hidden",
|
||||
zIndex: 100,
|
||||
boxShadow: "0 4px 16px rgba(0,0,0,0.4)",
|
||||
maxHeight: "300px",
|
||||
overflowY: "auto",
|
||||
}}
|
||||
>
|
||||
{filtered.map((cmd, idx) => (
|
||||
<button
|
||||
key={cmd.name}
|
||||
type="button"
|
||||
data-testid={`slash-command-item-${idx}`}
|
||||
onClick={() => onSelect(cmd)}
|
||||
style={{
|
||||
display: "flex",
|
||||
flexDirection: "column",
|
||||
width: "100%",
|
||||
textAlign: "left",
|
||||
padding: "10px 14px",
|
||||
background: idx === selectedIndex ? "#2d4a6e" : "transparent",
|
||||
border: "none",
|
||||
cursor: "pointer",
|
||||
gap: "2px",
|
||||
}}
|
||||
>
|
||||
<code
|
||||
style={{
|
||||
fontSize: "0.88rem",
|
||||
color: idx === selectedIndex ? "#ececec" : "#e0e0e0",
|
||||
fontFamily: "monospace",
|
||||
}}
|
||||
>
|
||||
{cmd.name}
|
||||
</code>
|
||||
<span
|
||||
style={{
|
||||
fontSize: "0.78rem",
|
||||
color: idx === selectedIndex ? "#b0c0d0" : "#888",
|
||||
}}
|
||||
>
|
||||
{cmd.description}
|
||||
</span>
|
||||
</button>
|
||||
))}
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
export const ChatInput = forwardRef<ChatInputHandle, ChatInputProps>(
|
||||
function ChatInput(
|
||||
{ loading, queuedMessages, onSubmit, onCancel, onRemoveQueuedMessage },
|
||||
@@ -127,6 +205,10 @@ export const ChatInput = forwardRef<ChatInputHandle, ChatInputProps>(
|
||||
const [pickerSelectedIndex, setPickerSelectedIndex] = useState(0);
|
||||
const [pickerAtStart, setPickerAtStart] = useState(0);
|
||||
|
||||
// Slash command picker state
|
||||
const [slashQuery, setSlashQuery] = useState<string | null>(null);
|
||||
const [slashSelectedIndex, setSlashSelectedIndex] = useState(0);
|
||||
|
||||
useImperativeHandle(ref, () => ({
|
||||
appendToInput(text: string) {
|
||||
setInput((prev) => (prev ? `${prev}\n${text}` : text));
|
||||
@@ -153,6 +235,31 @@ export const ChatInput = forwardRef<ChatInputHandle, ChatInputProps>(
|
||||
setPickerSelectedIndex(0);
|
||||
}, []);
|
||||
|
||||
// Compute filtered slash commands for current query
|
||||
const filteredCommands =
|
||||
slashQuery !== null
|
||||
? SLASH_COMMANDS.filter((cmd) => fuzzyMatch(cmd.name, slashQuery)).sort(
|
||||
(a, b) =>
|
||||
fuzzyScore(a.name, slashQuery) - fuzzyScore(b.name, slashQuery),
|
||||
)
|
||||
: [];
|
||||
|
||||
const dismissSlashPicker = useCallback(() => {
|
||||
setSlashQuery(null);
|
||||
setSlashSelectedIndex(0);
|
||||
}, []);
|
||||
|
||||
const selectCommand = useCallback(
|
||||
(cmd: SlashCommand) => {
|
||||
// Extract base command (first word, e.g. "/assign" from "/assign <number> <model>")
|
||||
const baseCommand = cmd.name.split(" ")[0];
|
||||
setInput(`${baseCommand} `);
|
||||
dismissSlashPicker();
|
||||
setTimeout(() => inputRef.current?.focus(), 0);
|
||||
},
|
||||
[dismissSlashPicker],
|
||||
);
|
||||
|
||||
const selectFile = useCallback(
|
||||
(file: string) => {
|
||||
// Replace the @query portion with @file
|
||||
@@ -173,11 +280,20 @@ export const ChatInput = forwardRef<ChatInputHandle, ChatInputProps>(
|
||||
setInput(val);
|
||||
|
||||
const cursor = e.target.selectionStart ?? val.length;
|
||||
// Find the last @ before the cursor that starts a reference token
|
||||
const textUpToCursor = val.slice(0, cursor);
|
||||
// Match @ not preceded by non-whitespace (i.e. @ at start or after space/newline)
|
||||
const atMatch = textUpToCursor.match(/(^|[\s\n])@([^\s@]*)$/);
|
||||
|
||||
// Slash command picker: triggered when input starts with / and no space yet
|
||||
const slashMatch = textUpToCursor.match(/^\/(\S*)$/);
|
||||
if (slashMatch) {
|
||||
setSlashQuery(slashMatch[1]);
|
||||
setSlashSelectedIndex(0);
|
||||
if (pickerQuery !== null) dismissPicker();
|
||||
return;
|
||||
}
|
||||
if (slashQuery !== null) dismissSlashPicker();
|
||||
|
||||
// File picker: triggered by @ at start or after whitespace
|
||||
const atMatch = textUpToCursor.match(/(^|[\s\n])@([^\s@]*)$/);
|
||||
if (atMatch) {
|
||||
const query = atMatch[2];
|
||||
const atPos = textUpToCursor.lastIndexOf("@");
|
||||
@@ -196,11 +312,50 @@ export const ChatInput = forwardRef<ChatInputHandle, ChatInputProps>(
|
||||
if (pickerQuery !== null) dismissPicker();
|
||||
}
|
||||
},
|
||||
[projectFiles.length, pickerQuery, dismissPicker],
|
||||
[
|
||||
projectFiles.length,
|
||||
pickerQuery,
|
||||
dismissPicker,
|
||||
slashQuery,
|
||||
dismissSlashPicker,
|
||||
],
|
||||
);
|
||||
|
||||
const handleKeyDown = useCallback(
|
||||
(e: React.KeyboardEvent<HTMLTextAreaElement>) => {
|
||||
// Slash command picker navigation
|
||||
if (slashQuery !== null && filteredCommands.length > 0) {
|
||||
if (e.key === "ArrowDown") {
|
||||
e.preventDefault();
|
||||
setSlashSelectedIndex((i) =>
|
||||
Math.min(i + 1, filteredCommands.length - 1),
|
||||
);
|
||||
return;
|
||||
}
|
||||
if (e.key === "ArrowUp") {
|
||||
e.preventDefault();
|
||||
setSlashSelectedIndex((i) => Math.max(i - 1, 0));
|
||||
return;
|
||||
}
|
||||
if (e.key === "Tab" || e.key === "Enter") {
|
||||
e.preventDefault();
|
||||
selectCommand(
|
||||
filteredCommands[slashSelectedIndex] ?? filteredCommands[0],
|
||||
);
|
||||
return;
|
||||
}
|
||||
if (e.key === "Escape") {
|
||||
e.preventDefault();
|
||||
dismissSlashPicker();
|
||||
return;
|
||||
}
|
||||
} else if (e.key === "Escape" && slashQuery !== null) {
|
||||
e.preventDefault();
|
||||
dismissSlashPicker();
|
||||
return;
|
||||
}
|
||||
|
||||
// File picker navigation
|
||||
if (pickerQuery !== null && filteredFiles.length > 0) {
|
||||
if (e.key === "ArrowDown") {
|
||||
e.preventDefault();
|
||||
@@ -236,6 +391,11 @@ export const ChatInput = forwardRef<ChatInputHandle, ChatInputProps>(
|
||||
}
|
||||
},
|
||||
[
|
||||
slashQuery,
|
||||
filteredCommands,
|
||||
slashSelectedIndex,
|
||||
selectCommand,
|
||||
dismissSlashPicker,
|
||||
pickerQuery,
|
||||
filteredFiles,
|
||||
pickerSelectedIndex,
|
||||
@@ -249,6 +409,7 @@ export const ChatInput = forwardRef<ChatInputHandle, ChatInputProps>(
|
||||
onSubmit(input);
|
||||
setInput("");
|
||||
dismissPicker();
|
||||
dismissSlashPicker();
|
||||
};
|
||||
|
||||
return (
|
||||
@@ -357,6 +518,13 @@ export const ChatInput = forwardRef<ChatInputHandle, ChatInputProps>(
|
||||
position: "relative",
|
||||
}}
|
||||
>
|
||||
{slashQuery !== null && (
|
||||
<SlashCommandPickerOverlay
|
||||
query={slashQuery}
|
||||
selectedIndex={slashSelectedIndex}
|
||||
onSelect={selectCommand}
|
||||
/>
|
||||
)}
|
||||
{pickerQuery !== null && (
|
||||
<FilePickerOverlay
|
||||
query={pickerQuery}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user