Compare commits
278 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| e45eab82f2 | |||
| 310ad365e6 | |||
| 0b50c66caa | |||
| 9feed0f882 | |||
| bb3301c5af | |||
| a2123274a5 | |||
| 3cbbc5387a | |||
| 4e828fbdd1 | |||
| 6d88595e0d | |||
| aa90646edf | |||
| 7235ab7c7c | |||
| a0326dae78 | |||
| 953fce2ca6 | |||
| 5035b84de5 | |||
| c2f477dde6 | |||
| b098c8ff9f | |||
| 7fea543f60 | |||
| f8bb23a6d4 | |||
| 0016841770 | |||
| 3639d64da6 | |||
| ebdcf18134 | |||
| d83f2ae4c1 | |||
| f6c0d35f11 | |||
| facbf51f05 | |||
| 847ebc292f | |||
| 065ca2bd8f | |||
| 34988855bc | |||
| 7fc788baea | |||
| 40575924b5 | |||
| 4f56fa6cbe | |||
| 52513b55ff | |||
| 1ae2fa9b9b | |||
| 6077f74dbd | |||
| 8ab2e19e98 | |||
| b44f3a33e3 | |||
| 57407aed51 | |||
| a29677b3c7 | |||
| 95df450fca | |||
| 6c6bc35785 | |||
| 7652bbba9c | |||
| efd89a26ac | |||
| 71d4746009 | |||
| 98b5475160 | |||
| 740f1b5e6e | |||
| c0bab1e671 | |||
| 306810e4d5 | |||
| 1193b7ac9a | |||
| 05db012aaf | |||
| bc3c852509 | |||
| 04051282da | |||
| 081b33a8a6 | |||
| cf5424f9a6 | |||
| 1ec9aaab8a | |||
| d6f82393f5 | |||
| f4ce0e017b | |||
| c0ea5f0cb8 | |||
| d375c4b1d3 | |||
| 4ea4be1462 | |||
| bc1c1cd2c9 | |||
| c1e4c40f31 | |||
| 203e8f22be | |||
| 665c036a56 | |||
| 73304f08ac | |||
| fe9fc69f96 | |||
| 3b0542cd41 | |||
| 102919e0b3 | |||
| d63aa0a3c2 | |||
| 7f7db57933 | |||
| 043791194f | |||
| 710f839c65 | |||
| b0e21abb6e | |||
| 6b71c07f5b | |||
| 9cff3c753d | |||
| 6acd7f5249 | |||
| 26f5b25f22 | |||
| 8bc0bd592e | |||
| 7c25aca39b | |||
| 5173bf4aef | |||
| 7f7f49d757 | |||
| e88b9bbc63 | |||
| db22ab2229 | |||
| c30ad79398 | |||
| 16853328fa | |||
| 8ac8cdba88 | |||
| c046edebda | |||
| eef9669c95 | |||
| a9cdd3a354 | |||
| b4eeb499e9 | |||
| fca46c3806 | |||
| 2510fe44bc | |||
| e152cf3cb8 | |||
| 7d3b256fff | |||
| f6d632139e | |||
| 204a99c2e7 | |||
| f28a03e42e | |||
| 26f4edadcc | |||
| fd58631e65 | |||
| f70399a28f | |||
| 02d08faaa2 | |||
| 2dc77479ad | |||
| c5761ae968 | |||
| 67754781ca | |||
| 3436507a21 | |||
| 93bc08574b | |||
| 3571511349 | |||
| 04214ca155 | |||
| 4d48df152c | |||
| e0a70a4c1c | |||
| bae50fbc5b | |||
| 8998dac593 | |||
| e6b300e70e | |||
| b22e2b9274 | |||
| 24b1aa6e7f | |||
| 85e37e03a8 | |||
| f22a2666b8 | |||
| 507889627a | |||
| c4cee72938 | |||
| 33cb363651 | |||
| cd3ded278d | |||
| b5bf75aa5a | |||
| f6b5b1b01a | |||
| 26d34245f9 | |||
| de54265c35 | |||
| a52d1e098f | |||
| 015fa48c32 | |||
| abc30c93d1 | |||
| cf2faa9bff | |||
| 92aa1ebccf | |||
| 877f69c897 | |||
| 710b604b7c | |||
| ab4ce2db92 | |||
| 61f6fd60a8 | |||
| e66149e07c | |||
| 108a697483 | |||
| 1a7f419ecf | |||
| 96b1ce373b | |||
| 58e41f7e0b | |||
| c9a2fa58eb | |||
| 64c0f190cf | |||
| fc443ed987 | |||
| 7939a19816 | |||
| 46b5087157 | |||
| a8d6524b56 | |||
| 61d63db84c | |||
| aa4ec8c779 | |||
| 3777042ad3 | |||
| feb340beba | |||
| 23369c514d | |||
| 832da16b6f | |||
| 131964cbc3 | |||
| 81db0504ed | |||
| 584a44a516 | |||
| c7c4a57533 | |||
| 0a67c28f8c | |||
| 6476492caa | |||
| faf8734ea8 | |||
| 862f0704be | |||
| d3df1586c6 | |||
| 8a1996e0e4 | |||
| 61f5a0c3be | |||
| d7bc785de1 | |||
| eaac665a9f | |||
| d702aa59c4 | |||
| 9df9a1454a | |||
| 47163d235c | |||
| a7342fc9d3 | |||
| 5dd8feb75c | |||
| f5024b2648 | |||
| 6521c83eec | |||
| 65e3643655 | |||
| fc95b57a78 | |||
| 7c1a970b13 | |||
| 64e2df20b7 | |||
| 90e3612fd3 | |||
| 962bfe37c6 | |||
| f05c6a42b0 | |||
| 077288e7b7 | |||
| 580ab1ce68 | |||
| 71a6c72614 | |||
| fae7b3be20 | |||
| 775b9ac7e3 | |||
| 5a87d55dd4 | |||
| 0457fbfecc | |||
| 13b16138b5 | |||
| 8249896449 | |||
| dbd932bf46 | |||
| eef49678ce | |||
| 58ee82c988 | |||
| 49ac23044a | |||
| 84a775be77 | |||
| 60c0c95f38 | |||
| a1a30bcc42 | |||
| 96ebd7ecb8 | |||
| 25c8b1ec25 | |||
| bcb7cfabee | |||
| d4dad1d556 | |||
| 195c7c51c4 | |||
| 968d973cff | |||
| 4394ab3fed | |||
| 11bbfca3da | |||
| a9aa88b655 | |||
| b62974dd88 | |||
| ac52a8bb4e | |||
| 18755aac96 | |||
| 5d37421f70 | |||
| 224d269971 | |||
| 6146a173f1 | |||
| 821345d266 | |||
| 0fa63e2de3 | |||
| d8cbec8268 | |||
| 618a2779ff | |||
| 721d12bcfe | |||
| df6d2db327 | |||
| 49285c1865 | |||
| 0c15be43b8 | |||
| 9408bd2cdf | |||
| a24e4c5c85 | |||
| c0133fe733 | |||
| 752c3904bf | |||
| bac53ac09a | |||
| b2ef2eca5f | |||
| fb05f71e76 | |||
| 438be196c9 | |||
| f1b4894d6e | |||
| bd281fd749 | |||
| 79edc28334 | |||
| 92c53704f0 | |||
| 7223fa2f10 | |||
| dedf951b17 | |||
| aad583defd | |||
| 88b02cf746 | |||
| 1a9833d820 | |||
| a904cda629 | |||
| c755c03f0e | |||
| a8630f3e1b | |||
| 9fb1bd5711 | |||
| 0b3ce0f33e | |||
| f4b7573f0a | |||
| bb801ba826 | |||
| 53634d638d | |||
| b50e7cff00 | |||
| 68973b0bb8 | |||
| 34bbf5a122 | |||
| ed3c5f9c95 | |||
| 59d1a2c069 | |||
| 52e73bfbea | |||
| 4e590401a5 | |||
| 6b6815325d | |||
| f874783b09 | |||
| 292f9cdfe2 | |||
| 1cce46d3fa | |||
| e85c06df19 | |||
| 8b85ca743e | |||
| 1a7b6c7342 | |||
| 4a94158ef2 | |||
| f10ea1ecf2 | |||
| 1a3b69301a | |||
| 6d3eab92fd | |||
| f6920a87ad | |||
| 5f9d903987 | |||
| ea916d27f4 | |||
| 970b9bcd9d | |||
| a5ee6890f5 | |||
| 41dc3292bb | |||
| 3766f8b464 | |||
| 0c85ecc85c | |||
| 2c29a4d2b8 | |||
| 454d694d24 | |||
| 96bedd70dc | |||
| fffdd5c5ea | |||
| 4805598932 | |||
| 3d55e2fcc6 | |||
| 96b31d1a48 | |||
| 11168fa426 | |||
| c2c2d65889 | |||
| 5c8c4b7ff3 | |||
| fbab93f493 | |||
| 78ff6d104e |
@@ -8,6 +8,7 @@
|
|||||||
# App specific (root-level; storkit subdirectory patterns live in .storkit/.gitignore)
|
# App specific (root-level; storkit subdirectory patterns live in .storkit/.gitignore)
|
||||||
store.json
|
store.json
|
||||||
.storkit_port
|
.storkit_port
|
||||||
|
.storkit/bot.toml.bak
|
||||||
|
|
||||||
# Rust stuff
|
# Rust stuff
|
||||||
target
|
target
|
||||||
|
|||||||
@@ -20,3 +20,6 @@ coverage/
|
|||||||
|
|
||||||
# Token usage log (generated at runtime, contains cost data)
|
# Token usage log (generated at runtime, contains cost data)
|
||||||
token_usage.jsonl
|
token_usage.jsonl
|
||||||
|
|
||||||
|
# Chat service logs
|
||||||
|
whatsapp_history.json
|
||||||
|
|||||||
+23
-1
@@ -228,7 +228,29 @@ If a user hands you this document and says "Apply this process to my project":
|
|||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
## 6. Code Quality
|
## 6. Chat Bot Configuration
|
||||||
|
|
||||||
|
Story Kit includes a chat bot that can be connected to one messaging platform at a time. The bot handles commands, LLM conversations, and pipeline notifications.
|
||||||
|
|
||||||
|
**Only one transport can be active at a time.** To configure the bot, copy the appropriate example file to `.storkit/bot.toml`:
|
||||||
|
|
||||||
|
| Transport | Example file | Webhook endpoint |
|
||||||
|
|-----------|-------------|-----------------|
|
||||||
|
| Matrix | `bot.toml.matrix.example` | *(uses Matrix sync, no webhook)* |
|
||||||
|
| WhatsApp (Meta Cloud API) | `bot.toml.whatsapp-meta.example` | `/webhook/whatsapp` |
|
||||||
|
| WhatsApp (Twilio) | `bot.toml.whatsapp-twilio.example` | `/webhook/whatsapp` |
|
||||||
|
| Slack | `bot.toml.slack.example` | `/webhook/slack` |
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cp .storkit/bot.toml.matrix.example .storkit/bot.toml
|
||||||
|
# Edit bot.toml with your credentials
|
||||||
|
```
|
||||||
|
|
||||||
|
The `bot.toml` file is gitignored (it contains secrets). The example files are checked in for reference.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 7. Code Quality
|
||||||
|
|
||||||
**MANDATORY:** Before completing Step 3 (Verification) of any story, you MUST run all applicable linters, formatters, and test suites and fix ALL errors and warnings. Zero tolerance for warnings or errors.
|
**MANDATORY:** Before completing Step 3 (Verification) of any story, you MUST run all applicable linters, formatters, and test suites and fix ALL errors and warnings. Zero tolerance for warnings or errors.
|
||||||
|
|
||||||
|
|||||||
@@ -1,61 +0,0 @@
|
|||||||
homeserver = "https://matrix.example.com"
|
|
||||||
username = "@botname:example.com"
|
|
||||||
password = "your-bot-password"
|
|
||||||
|
|
||||||
# List one or more rooms to listen in. Use a single-element list for one room.
|
|
||||||
room_ids = ["!roomid:example.com"]
|
|
||||||
|
|
||||||
# Optional: the deprecated single-room key is still accepted for backwards compat.
|
|
||||||
# room_id = "!roomid:example.com"
|
|
||||||
|
|
||||||
allowed_users = ["@youruser:example.com"]
|
|
||||||
enabled = false
|
|
||||||
|
|
||||||
# Maximum conversation turns to remember per room (default: 20).
|
|
||||||
# history_size = 20
|
|
||||||
|
|
||||||
# Rooms where the bot responds to all messages (not just addressed ones).
|
|
||||||
# This list is updated automatically when users toggle ambient mode at runtime.
|
|
||||||
# ambient_rooms = ["!roomid:example.com"]
|
|
||||||
|
|
||||||
# ── WhatsApp Business API ──────────────────────────────────────────────
|
|
||||||
# Set transport = "whatsapp" to use WhatsApp instead of Matrix.
|
|
||||||
# The webhook endpoint will be available at /webhook/whatsapp.
|
|
||||||
# You must configure this URL in the Meta Developer Dashboard.
|
|
||||||
#
|
|
||||||
# transport = "whatsapp"
|
|
||||||
# whatsapp_phone_number_id = "123456789012345"
|
|
||||||
# whatsapp_access_token = "EAAx..."
|
|
||||||
# whatsapp_verify_token = "my-secret-verify-token"
|
|
||||||
#
|
|
||||||
# ── 24-hour messaging window & notification templates ─────────────────
|
|
||||||
# WhatsApp only allows free-form text messages within 24 hours of the last
|
|
||||||
# inbound message from a user. For proactive pipeline notifications sent
|
|
||||||
# after the window expires, an approved Meta message template is used.
|
|
||||||
#
|
|
||||||
# Register the template in the Meta Business Manager:
|
|
||||||
# 1. Go to Business Settings → WhatsApp → Message Templates → Create.
|
|
||||||
# 2. Category: UTILITY
|
|
||||||
# 3. Template name: pipeline_notification (or your chosen name below)
|
|
||||||
# 4. Language: English (en_US)
|
|
||||||
# 5. Body text (example):
|
|
||||||
# Story *{{1}}* has moved to *{{2}}*.
|
|
||||||
# Where {{1}} = story name, {{2}} = pipeline stage.
|
|
||||||
# 6. Submit for review. Meta typically approves utility templates within
|
|
||||||
# minutes; transactional categories may take longer.
|
|
||||||
#
|
|
||||||
# Once approved, set the name below (default: "pipeline_notification"):
|
|
||||||
# whatsapp_notification_template = "pipeline_notification"
|
|
||||||
|
|
||||||
# ── Slack Bot API ─────────────────────────────────────────────────────
|
|
||||||
# Set transport = "slack" to use Slack instead of Matrix.
|
|
||||||
# The webhook endpoint will be available at /webhook/slack.
|
|
||||||
# Configure this URL in the Slack App → Event Subscriptions → Request URL.
|
|
||||||
#
|
|
||||||
# Required Slack App scopes: chat:write, chat:update
|
|
||||||
# Subscribe to bot events: message.channels, message.groups, message.im
|
|
||||||
#
|
|
||||||
# transport = "slack"
|
|
||||||
# slack_bot_token = "xoxb-..."
|
|
||||||
# slack_signing_secret = "your-signing-secret"
|
|
||||||
# slack_channel_ids = ["C01ABCDEF"]
|
|
||||||
@@ -0,0 +1,26 @@
|
|||||||
|
# Matrix Transport
|
||||||
|
# Copy this file to bot.toml and fill in your values.
|
||||||
|
# Only one transport can be active at a time.
|
||||||
|
|
||||||
|
enabled = true
|
||||||
|
transport = "matrix"
|
||||||
|
|
||||||
|
homeserver = "https://matrix.example.com"
|
||||||
|
username = "@botname:example.com"
|
||||||
|
password = "your-bot-password"
|
||||||
|
|
||||||
|
# List one or more rooms to listen in.
|
||||||
|
room_ids = ["!roomid:example.com"]
|
||||||
|
|
||||||
|
# Users allowed to interact with the bot (fail-closed: empty = nobody).
|
||||||
|
allowed_users = ["@youruser:example.com"]
|
||||||
|
|
||||||
|
# Bot display name in chat.
|
||||||
|
# display_name = "Assistant"
|
||||||
|
|
||||||
|
# Maximum conversation turns to remember per room (default: 20).
|
||||||
|
# history_size = 20
|
||||||
|
|
||||||
|
# Rooms where the bot responds to all messages (not just addressed ones).
|
||||||
|
# This list is updated automatically when users toggle ambient mode at runtime.
|
||||||
|
# ambient_rooms = ["!roomid:example.com"]
|
||||||
@@ -0,0 +1,23 @@
|
|||||||
|
# Slack Transport
|
||||||
|
# Copy this file to bot.toml and fill in your values.
|
||||||
|
# Only one transport can be active at a time.
|
||||||
|
#
|
||||||
|
# Setup:
|
||||||
|
# 1. Create a Slack App at api.slack.com/apps
|
||||||
|
# 2. Add OAuth scopes: chat:write, chat:update
|
||||||
|
# 3. Subscribe to bot events: message.channels, message.groups, message.im
|
||||||
|
# 4. Install the app to your workspace
|
||||||
|
# 5. Set your webhook URL in Event Subscriptions: https://your-server/webhook/slack
|
||||||
|
|
||||||
|
enabled = true
|
||||||
|
transport = "slack"
|
||||||
|
|
||||||
|
slack_bot_token = "xoxb-..."
|
||||||
|
slack_signing_secret = "your-signing-secret"
|
||||||
|
slack_channel_ids = ["C01ABCDEF"]
|
||||||
|
|
||||||
|
# Bot display name (used in formatted messages).
|
||||||
|
# display_name = "Assistant"
|
||||||
|
|
||||||
|
# Maximum conversation turns to remember per channel (default: 20).
|
||||||
|
# history_size = 20
|
||||||
@@ -0,0 +1,33 @@
|
|||||||
|
# WhatsApp Transport (Meta Cloud API)
|
||||||
|
# Copy this file to bot.toml and fill in your values.
|
||||||
|
# Only one transport can be active at a time.
|
||||||
|
#
|
||||||
|
# Setup:
|
||||||
|
# 1. Create a Meta Business App at developers.facebook.com
|
||||||
|
# 2. Add the WhatsApp product
|
||||||
|
# 3. Copy your Phone Number ID and generate a permanent access token
|
||||||
|
# 4. Register your webhook URL: https://your-server/webhook/whatsapp
|
||||||
|
# 5. Set the verify token below to match what you configure in Meta's dashboard
|
||||||
|
|
||||||
|
enabled = true
|
||||||
|
transport = "whatsapp"
|
||||||
|
whatsapp_provider = "meta"
|
||||||
|
|
||||||
|
whatsapp_phone_number_id = "123456789012345"
|
||||||
|
whatsapp_access_token = "EAAx..."
|
||||||
|
whatsapp_verify_token = "my-secret-verify-token"
|
||||||
|
|
||||||
|
# Optional: name of the approved Meta message template used for notifications
|
||||||
|
# sent outside the 24-hour messaging window (default: "pipeline_notification").
|
||||||
|
# whatsapp_notification_template = "pipeline_notification"
|
||||||
|
|
||||||
|
# Bot display name (used in formatted messages).
|
||||||
|
# display_name = "Assistant"
|
||||||
|
|
||||||
|
# Maximum conversation turns to remember per user (default: 20).
|
||||||
|
# history_size = 20
|
||||||
|
|
||||||
|
# Optional: restrict which phone numbers can interact with the bot.
|
||||||
|
# When set, only listed numbers are processed; all others are silently ignored.
|
||||||
|
# When absent or empty, all numbers are allowed (open by default).
|
||||||
|
# whatsapp_allowed_phones = ["+15551234567", "+15559876543"]
|
||||||
@@ -0,0 +1,29 @@
|
|||||||
|
# WhatsApp Transport (Twilio)
|
||||||
|
# Copy this file to bot.toml and fill in your values.
|
||||||
|
# Only one transport can be active at a time.
|
||||||
|
#
|
||||||
|
# Setup:
|
||||||
|
# 1. Sign up at twilio.com
|
||||||
|
# 2. Activate the WhatsApp sandbox (Messaging > Try it out > Send a WhatsApp message)
|
||||||
|
# 3. Send the sandbox join code from your WhatsApp to the sandbox number
|
||||||
|
# 4. Copy your Account SID, Auth Token, and sandbox number below
|
||||||
|
# 5. Set your webhook URL in the Twilio console: https://your-server/webhook/whatsapp
|
||||||
|
|
||||||
|
enabled = true
|
||||||
|
transport = "whatsapp"
|
||||||
|
whatsapp_provider = "twilio"
|
||||||
|
|
||||||
|
twilio_account_sid = "ACxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
|
||||||
|
twilio_auth_token = "your_auth_token"
|
||||||
|
twilio_whatsapp_number = "+14155238886"
|
||||||
|
|
||||||
|
# Bot display name (used in formatted messages).
|
||||||
|
# display_name = "Assistant"
|
||||||
|
|
||||||
|
# Maximum conversation turns to remember per user (default: 20).
|
||||||
|
# history_size = 20
|
||||||
|
|
||||||
|
# Optional: restrict which phone numbers can interact with the bot.
|
||||||
|
# When set, only listed numbers are processed; all others are silently ignored.
|
||||||
|
# When absent or empty, all numbers are allowed (open by default).
|
||||||
|
# whatsapp_allowed_phones = ["+15551234567", "+15559876543"]
|
||||||
@@ -11,12 +11,17 @@ max_coders = 3
|
|||||||
|
|
||||||
# Maximum retries per story per pipeline stage before marking as blocked.
|
# Maximum retries per story per pipeline stage before marking as blocked.
|
||||||
# Set to 0 to disable retry limits.
|
# Set to 0 to disable retry limits.
|
||||||
max_retries = 2
|
max_retries = 3
|
||||||
|
|
||||||
|
# Base branch name for this project. Worktree creation, merges, and agent prompts
|
||||||
|
# use this value for {{base_branch}}. When not set, falls back to auto-detection
|
||||||
|
# (reads current HEAD branch).
|
||||||
|
base_branch = "master"
|
||||||
|
|
||||||
[[component]]
|
[[component]]
|
||||||
name = "frontend"
|
name = "frontend"
|
||||||
path = "frontend"
|
path = "frontend"
|
||||||
setup = ["npm install", "npm run build"]
|
setup = ["npm ci", "npm run build"]
|
||||||
teardown = []
|
teardown = []
|
||||||
|
|
||||||
[[component]]
|
[[component]]
|
||||||
|
|||||||
@@ -0,0 +1,43 @@
|
|||||||
|
# Example project.toml — copy to .storkit/project.toml and customise.
|
||||||
|
# This file is checked in; project.toml itself is gitignored (it may contain
|
||||||
|
# instance-specific settings).
|
||||||
|
|
||||||
|
# Project-wide default QA mode: "server", "agent", or "human".
|
||||||
|
# Per-story `qa` front matter overrides this setting.
|
||||||
|
default_qa = "server"
|
||||||
|
|
||||||
|
# Default model for coder agents. Only agents with this model are auto-assigned.
|
||||||
|
# Opus coders are reserved for explicit per-story `agent:` front matter requests.
|
||||||
|
default_coder_model = "sonnet"
|
||||||
|
|
||||||
|
# Maximum concurrent coder agents. Stories wait in 2_current/ when all slots are full.
|
||||||
|
max_coders = 3
|
||||||
|
|
||||||
|
# Maximum retries per story per pipeline stage before marking as blocked.
|
||||||
|
# Set to 0 to disable retry limits.
|
||||||
|
max_retries = 2
|
||||||
|
|
||||||
|
# Base branch name for this project. Worktree creation, merges, and agent prompts
|
||||||
|
# use this value for {{base_branch}}. When not set, falls back to auto-detection
|
||||||
|
# (reads current HEAD branch).
|
||||||
|
base_branch = "main"
|
||||||
|
|
||||||
|
[[component]]
|
||||||
|
name = "server"
|
||||||
|
path = "."
|
||||||
|
setup = ["cargo build"]
|
||||||
|
teardown = []
|
||||||
|
|
||||||
|
[[agent]]
|
||||||
|
name = "coder-1"
|
||||||
|
role = "Full-stack engineer"
|
||||||
|
stage = "coder"
|
||||||
|
model = "sonnet"
|
||||||
|
max_turns = 50
|
||||||
|
max_budget_usd = 5.00
|
||||||
|
prompt = """
|
||||||
|
You are working in a git worktree on story {{story_id}}.
|
||||||
|
Read CLAUDE.md first, then .storkit/README.md to understand the dev process.
|
||||||
|
Run: cd "{{worktree_path}}" && git difftool {{base_branch}}...HEAD
|
||||||
|
Commit all your work before your process exits.
|
||||||
|
"""
|
||||||
@@ -0,0 +1,24 @@
|
|||||||
|
---
|
||||||
|
name: "WhatsApp webhook HMAC signature verification"
|
||||||
|
retry_count: 3
|
||||||
|
blocked: true
|
||||||
|
---
|
||||||
|
|
||||||
|
# Story 388: WhatsApp webhook HMAC signature verification
|
||||||
|
|
||||||
|
## User Story
|
||||||
|
|
||||||
|
As a bot operator, I want incoming WhatsApp webhook requests to be cryptographically verified, so that forged requests from unauthorized sources are rejected.
|
||||||
|
|
||||||
|
## Acceptance Criteria
|
||||||
|
|
||||||
|
- [ ] Meta webhooks: validate X-Hub-Signature-256 HMAC-SHA256 header using the app secret before processing
|
||||||
|
- [ ] Twilio webhooks: validate request signature using the auth token before processing
|
||||||
|
- [ ] Requests with missing or invalid signatures are rejected with 403 Forbidden
|
||||||
|
- [ ] Verification is fail-closed: if signature checking is configured, unsigned requests are rejected
|
||||||
|
- [ ] Existing bot.toml config is extended with any needed secrets (e.g. Meta app_secret for HMAC verification)
|
||||||
|
- [ ] MUST use audited crypto crates (hmac, sha2, sha1, base64) — no hand-rolled cryptographic primitives
|
||||||
|
|
||||||
|
## Out of Scope
|
||||||
|
|
||||||
|
- TBD
|
||||||
+40
@@ -0,0 +1,40 @@
|
|||||||
|
---
|
||||||
|
name: "Fly.io Machines API integration for multi-tenant storkit SaaS"
|
||||||
|
---
|
||||||
|
|
||||||
|
# Spike 408: Fly.io Machines API integration for multi-tenant storkit SaaS
|
||||||
|
|
||||||
|
## Question
|
||||||
|
|
||||||
|
Can we build a working Rust integration that creates and manages per-tenant Fly.io Machines, attaches volumes, injects Claude credentials, and proxies JWT-authenticated HTTP/WebSocket traffic to the right machine?
|
||||||
|
|
||||||
|
## Hypothesis
|
||||||
|
|
||||||
|
A thin Rust service using `reqwest` for the Machines API and `axum` for the reverse proxy is sufficient. No heavyweight orchestration framework needed.
|
||||||
|
|
||||||
|
## Prerequisites
|
||||||
|
|
||||||
|
- Fly.io account with API token (set `FLY_API_TOKEN` env var)
|
||||||
|
- Spike 407 findings reviewed
|
||||||
|
|
||||||
|
## Timebox
|
||||||
|
|
||||||
|
4 hours
|
||||||
|
|
||||||
|
## Investigation Plan
|
||||||
|
|
||||||
|
- [ ] Create a minimal Rust crate in `spikes/fly_machines/` — do not touch production code
|
||||||
|
- [ ] Implement machine lifecycle: create, start, stop, destroy via Fly Machines REST API using `reqwest`
|
||||||
|
- [ ] Test attaching a persistent volume to a machine and verify it persists across stop/start
|
||||||
|
- [ ] Test secret injection — pass a dummy `credentials.json` as a Fly secret and verify it's readable inside the machine
|
||||||
|
- [ ] Sketch the auth proxy: JWT validation → machine lookup → reverse proxy to machine's private IP; verify WebSocket proxying works
|
||||||
|
- [ ] Measure actual cold start time for a minimal storkit container image
|
||||||
|
- [ ] Document any API quirks, rate limits, or sharp edges discovered during testing
|
||||||
|
|
||||||
|
## Findings
|
||||||
|
|
||||||
|
- TBD
|
||||||
|
|
||||||
|
## Recommendation
|
||||||
|
|
||||||
|
- TBD
|
||||||
@@ -0,0 +1,22 @@
|
|||||||
|
---
|
||||||
|
name: "Multi-account OAuth token rotation on rate limit"
|
||||||
|
---
|
||||||
|
|
||||||
|
# Story 411: Multi-account OAuth token rotation on rate limit
|
||||||
|
|
||||||
|
## User Story
|
||||||
|
|
||||||
|
As a storkit user with multiple Claude Max subscriptions, I want the system to automatically rotate to a different account when one gets rate limited, so that agents and chat don't stall out waiting for limits to reset.
|
||||||
|
|
||||||
|
## Acceptance Criteria
|
||||||
|
|
||||||
|
- [ ] OAuth login flow stores credentials per-account (keyed by email), not overwriting previous accounts
|
||||||
|
- [ ] GET /oauth/status returns all stored accounts and their status (active, rate-limited, expired)
|
||||||
|
- [ ] When the active account hits a rate limit, storkit automatically swaps to the next available account's refresh token, refreshes, and retries
|
||||||
|
- [ ] The bot sends a notification in Matrix/WhatsApp when it swaps accounts
|
||||||
|
- [ ] If all accounts are rate limited, the bot surfaces a clear message with the time until the earliest reset
|
||||||
|
- [ ] A new /oauth/authorize login adds to the account pool rather than replacing the current credentials
|
||||||
|
|
||||||
|
## Out of Scope
|
||||||
|
|
||||||
|
- TBD
|
||||||
+24
@@ -0,0 +1,24 @@
|
|||||||
|
---
|
||||||
|
name: "Recheck bot command to re-run gates without restarting agent"
|
||||||
|
---
|
||||||
|
|
||||||
|
# Story 412: Recheck bot command to re-run gates without restarting agent
|
||||||
|
|
||||||
|
## User Story
|
||||||
|
|
||||||
|
As a user, I want to send `recheck <number>` to the bot so that it re-runs acceptance gates on an existing worktree without spawning a new agent, so I can unblock stories that failed due to environment issues without wasting agent turns.
|
||||||
|
|
||||||
|
## Acceptance Criteria
|
||||||
|
|
||||||
|
- [ ] recheck command is registered in chat/commands/mod.rs and appears in help output
|
||||||
|
- [ ] `recheck <number>` runs run_acceptance_gates on the story's existing worktree
|
||||||
|
- [ ] If gates pass, the story advances through the pipeline (same as if a coder completed successfully)
|
||||||
|
- [ ] If gates fail, the error output is returned to the user (not silently retried)
|
||||||
|
- [ ] If no worktree exists for the story, returns a clear error
|
||||||
|
- [ ] Does not spawn a new agent or increment retry_count
|
||||||
|
- [ ] Works from all transports (Matrix, WhatsApp, Slack)
|
||||||
|
- [ ] Works from web UI slash commands
|
||||||
|
|
||||||
|
## Out of Scope
|
||||||
|
|
||||||
|
- TBD
|
||||||
+27
@@ -0,0 +1,27 @@
|
|||||||
|
---
|
||||||
|
name: "Status command traffic light dots not coloured in Matrix"
|
||||||
|
---
|
||||||
|
|
||||||
|
# Bug 430: Status command traffic light dots not coloured in Matrix
|
||||||
|
|
||||||
|
## Description
|
||||||
|
|
||||||
|
The traffic light dots in the status command use plain Unicode characters (○ ● ◑ ✗) which render without colour in Matrix. The HTML formatted_body should use data-mx-color to colour them green/yellow/red.
|
||||||
|
|
||||||
|
## How to Reproduce
|
||||||
|
|
||||||
|
Send the status command to the bot in Matrix. Observe the dots are monochrome.
|
||||||
|
|
||||||
|
## Actual Result
|
||||||
|
|
||||||
|
Dots render as plain monochrome Unicode characters.
|
||||||
|
|
||||||
|
## Expected Result
|
||||||
|
|
||||||
|
Dots render in colour: green (● running), yellow (◑ throttled), red (✗ blocked), grey (○ idle). Use font tag with data-mx-color attribute for Matrix HTML formatted_body.
|
||||||
|
|
||||||
|
## Acceptance Criteria
|
||||||
|
|
||||||
|
- [ ] HTML formatted_body uses <font data-mx-color="#colour">dot</font> for each traffic light state
|
||||||
|
- [ ] Green (#00cc00) for running, yellow (#ffaa00) for throttled, red (#cc0000) for blocked, grey (#888888) for idle
|
||||||
|
- [ ] Plain text fallback remains unchanged (Unicode dots for non-HTML transports)
|
||||||
@@ -0,0 +1,30 @@
|
|||||||
|
---
|
||||||
|
name: "Split matrix/bot.rs into focused modules"
|
||||||
|
---
|
||||||
|
|
||||||
|
# Refactor 417: Split matrix/bot.rs into focused modules
|
||||||
|
|
||||||
|
## Current State
|
||||||
|
|
||||||
|
- TBD
|
||||||
|
|
||||||
|
## Desired State
|
||||||
|
|
||||||
|
Refactor the monolithic server/src/chat/transport/matrix/bot.rs (1926 lines) into focused submodules.
|
||||||
|
|
||||||
|
## Acceptance Criteria
|
||||||
|
|
||||||
|
- [ ] history.rs contains ConversationRole, ConversationEntry, RoomConversation, PersistedHistory, load_history, save_history and their unit tests
|
||||||
|
- [ ] context.rs contains BotContext struct
|
||||||
|
- [ ] run.rs contains run_bot main event loop
|
||||||
|
- [ ] messages.rs contains on_room_message, handle_message, format_user_prompt, is_permission_approval and their unit tests
|
||||||
|
- [ ] mentions.rs contains mentions_bot, contains_word, is_reply_to_bot and their unit tests
|
||||||
|
- [ ] verification.rs contains check_sender_verified, on_to_device_verification_request, handle_sas_verification and their unit tests
|
||||||
|
- [ ] format.rs contains markdown_to_html, format_startup_announcement and their unit tests
|
||||||
|
- [ ] mod.rs re-exports all public types
|
||||||
|
- [ ] Unit tests live in their respective module files
|
||||||
|
- [ ] No public API changes — all existing imports continue to work
|
||||||
|
|
||||||
|
## Out of Scope
|
||||||
|
|
||||||
|
- TBD
|
||||||
@@ -0,0 +1,28 @@
|
|||||||
|
---
|
||||||
|
name: "Split pool/auto_assign.rs into submodules"
|
||||||
|
---
|
||||||
|
|
||||||
|
# Refactor 418: Split pool/auto_assign.rs into submodules
|
||||||
|
|
||||||
|
## Current State
|
||||||
|
|
||||||
|
- TBD
|
||||||
|
|
||||||
|
## Desired State
|
||||||
|
|
||||||
|
Refactor the monolithic server/src/agents/pool/auto_assign.rs (1813 lines) into focused submodules.
|
||||||
|
|
||||||
|
## Acceptance Criteria
|
||||||
|
|
||||||
|
- [ ] auto_assign.rs contains auto_assign_available_work and its unit tests
|
||||||
|
- [ ] reconcile.rs contains reconcile_on_startup and its unit tests
|
||||||
|
- [ ] watchdog.rs contains run_watchdog_once, spawn_watchdog, check_orphaned_agents and their unit tests
|
||||||
|
- [ ] scan.rs contains scan_stage_items, is_story_assigned_for_stage, count_active_agents_for_stage, find_free_agent_for_stage, is_agent_free and their unit tests
|
||||||
|
- [ ] story_checks.rs contains read_story_front_matter_agent, has_review_hold, is_story_blocked, has_merge_failure and their unit tests
|
||||||
|
- [ ] mod.rs wires the submodules and re-exports all public items
|
||||||
|
- [ ] Unit tests live in their respective module files
|
||||||
|
- [ ] No public API changes — all existing imports continue to work
|
||||||
|
|
||||||
|
## Out of Scope
|
||||||
|
|
||||||
|
- TBD
|
||||||
+29
@@ -0,0 +1,29 @@
|
|||||||
|
---
|
||||||
|
name: "Matrix bot crashes on transient network error instead of retrying"
|
||||||
|
---
|
||||||
|
|
||||||
|
# Bug 419: Matrix bot crashes on transient network error instead of retrying
|
||||||
|
|
||||||
|
## Description
|
||||||
|
|
||||||
|
The Matrix bot treats a transient sync error as fatal and stops entirely. A single failed HTTP request to the homeserver kills the bot, requiring a full server rebuild to recover.
|
||||||
|
|
||||||
|
## How to Reproduce
|
||||||
|
|
||||||
|
1. Run storkit with Matrix bot enabled\n2. Homeserver becomes temporarily unreachable (network blip, DNS hiccup, server restart)\n3. Bot hits sync error and crashes
|
||||||
|
|
||||||
|
## Actual Result
|
||||||
|
|
||||||
|
Bot logs "Fatal error: Matrix sync error: error sending request for url (...)" and stops responding. No retry, no recovery.
|
||||||
|
|
||||||
|
## Expected Result
|
||||||
|
|
||||||
|
Bot logs a warning, backs off with exponential delay, and retries the sync. Only crash on unrecoverable errors (invalid credentials, banned, etc).
|
||||||
|
|
||||||
|
## Acceptance Criteria
|
||||||
|
|
||||||
|
- [ ] Transient network errors (connection refused, timeout, DNS failure) trigger a retry with exponential backoff
|
||||||
|
- [ ] Bot logs a warning on each failed retry attempt
|
||||||
|
- [ ] Bot resumes normal operation once the homeserver is reachable again
|
||||||
|
- [ ] Unrecoverable errors (401, 403) still cause a clean shutdown with a clear error message
|
||||||
|
- [ ] Bot sends a notification after recovering from a network outage
|
||||||
+23
@@ -0,0 +1,23 @@
|
|||||||
|
---
|
||||||
|
name: "loc for a specified file — bot command and web UI slash command"
|
||||||
|
---
|
||||||
|
|
||||||
|
# Story 420: loc for a specified file — bot command and web UI slash command
|
||||||
|
|
||||||
|
## User Story
|
||||||
|
|
||||||
|
As a developer, I want to send `loc <filepath>` to the bot or use it as a slash command in the web UI to see the line count for a specific file, so I can quickly check how large a file is without leaving my workflow.
|
||||||
|
|
||||||
|
## Acceptance Criteria
|
||||||
|
|
||||||
|
- [ ] loc <filepath> returns the line count for the specified file
|
||||||
|
- [ ] Relative paths are resolved against the project root
|
||||||
|
- [ ] If the file does not exist, returns a clear error
|
||||||
|
- [ ] Works from all transports (Matrix, WhatsApp, Slack)
|
||||||
|
- [ ] Works as a slash command in the web UI
|
||||||
|
- [ ] loc with no argument retains existing behavior (top files by line count)
|
||||||
|
- [ ] Exposed as an MCP tool so agents can query file line counts programmatically
|
||||||
|
|
||||||
|
## Out of Scope
|
||||||
|
|
||||||
|
- TBD
|
||||||
@@ -0,0 +1,24 @@
|
|||||||
|
---
|
||||||
|
name: "Timer command for deferred agent start"
|
||||||
|
---
|
||||||
|
|
||||||
|
# Story 421: Timer command for deferred agent start
|
||||||
|
|
||||||
|
## User Story
|
||||||
|
|
||||||
|
As a ..., I want ..., so that ...
|
||||||
|
|
||||||
|
## Acceptance Criteria
|
||||||
|
|
||||||
|
- [ ] Bot command `timer <story_id> <HH:MM>` schedules a one-shot deferred start for the given story at the next occurrence of that time (server-local timezone)
|
||||||
|
- [ ] Bot command `timer list` shows all pending timers with story ID and scheduled time
|
||||||
|
- [ ] Bot command `timer cancel <story_id>` removes the pending timer for that story
|
||||||
|
- [ ] Timers are persisted to .storkit/timers.json so they survive server restarts
|
||||||
|
- [ ] A 30s tick loop (tokio task, same pattern as watchdog) checks for due timers and calls start_agent when triggered
|
||||||
|
- [ ] When a timer fires, the story must already be in current — timer does not move stories between stages
|
||||||
|
- [ ] Fired timers are removed after execution (one-shot, not recurring)
|
||||||
|
- [ ] Multiple timers for the same time are supported and respect agent slot contention via auto-assign
|
||||||
|
|
||||||
|
## Out of Scope
|
||||||
|
|
||||||
|
- TBD
|
||||||
@@ -0,0 +1,22 @@
|
|||||||
|
---
|
||||||
|
name: "Unblock command to reset blocked stories"
|
||||||
|
---
|
||||||
|
|
||||||
|
# Story 422: Unblock command to reset blocked stories
|
||||||
|
|
||||||
|
## User Story
|
||||||
|
|
||||||
|
As a ..., I want ..., so that ...
|
||||||
|
|
||||||
|
## Acceptance Criteria
|
||||||
|
|
||||||
|
- [ ] Bot command `unblock <story_id>` clears blocked flag and resets retry_count to 0 on the story front matter
|
||||||
|
- [ ] Replies with confirmation including story ID and name
|
||||||
|
- [ ] Returns clear error if story is not found or not blocked
|
||||||
|
- [ ] Works from all transports (Matrix, WhatsApp, Slack)
|
||||||
|
- [ ] Exposed as an MCP tool so agents can unblock stories programmatically
|
||||||
|
- [ ] Works as a slash command in the web UI
|
||||||
|
|
||||||
|
## Out of Scope
|
||||||
|
|
||||||
|
- TBD
|
||||||
+22
@@ -0,0 +1,22 @@
|
|||||||
|
---
|
||||||
|
name: "Auto-schedule timer on rate limit to resume after reset"
|
||||||
|
---
|
||||||
|
|
||||||
|
# Story 423: Auto-schedule timer on rate limit to resume after reset
|
||||||
|
|
||||||
|
## User Story
|
||||||
|
|
||||||
|
As a ..., I want ..., so that ...
|
||||||
|
|
||||||
|
## Acceptance Criteria
|
||||||
|
|
||||||
|
- [ ] When a rate_limit_event with a hard block (not just allowed_warning) is received from the PTY stream, parse the reset time from rate_limit_info
|
||||||
|
- [ ] Automatically create a timer (via TimerStore from story 421) for the blocked story at the parsed reset time
|
||||||
|
- [ ] If a timer already exists for that story, update it to the later reset time rather than creating a duplicate
|
||||||
|
- [ ] Log the auto-scheduled timer with story ID, agent name, and scheduled resume time
|
||||||
|
- [ ] Notify chat transports that the story was rate-limited and will auto-resume at the scheduled time
|
||||||
|
- [ ] When the timer fires and restarts the agent, the existing worktree and committed work are preserved
|
||||||
|
|
||||||
|
## Out of Scope
|
||||||
|
|
||||||
|
- TBD
|
||||||
+23
@@ -0,0 +1,23 @@
|
|||||||
|
---
|
||||||
|
name: "Rate limit traffic light status and hard block alerts"
|
||||||
|
agent: coder-opus
|
||||||
|
---
|
||||||
|
|
||||||
|
# Story 424: Rate limit traffic light status and hard block alerts
|
||||||
|
|
||||||
|
## User Story
|
||||||
|
|
||||||
|
As a ..., I want ..., so that ...
|
||||||
|
|
||||||
|
## Acceptance Criteria
|
||||||
|
|
||||||
|
- [ ] Remove repetitive per-message throttle warnings (allowed_warning) from chat transports entirely
|
||||||
|
- [ ] Pipeline status messages show a coloured dot next to each work item: green for running normally, yellow for throttled, red for hard blocked, white/grey for idle/no agent
|
||||||
|
- [ ] Hard block events (429 / rate_limit_exceeded) still send an individual chat notification with a red icon, including the reset time
|
||||||
|
- [ ] Throttle and block state tracked per-agent so the status dot updates in real time
|
||||||
|
- [ ] Server-side logging of throttle warnings is preserved for debugging
|
||||||
|
- [ ] Traffic light dots in status report should be small/compact, not large emoji
|
||||||
|
|
||||||
|
## Out of Scope
|
||||||
|
|
||||||
|
- TBD
|
||||||
@@ -0,0 +1,20 @@
|
|||||||
|
---
|
||||||
|
name: "Chat notification when a story blocks with reason"
|
||||||
|
---
|
||||||
|
|
||||||
|
# Story 425: Chat notification when a story blocks with reason
|
||||||
|
|
||||||
|
## User Story
|
||||||
|
|
||||||
|
As a project owner monitoring agent progress via chat, I want to receive a notification when a story gets blocked, including the reason, so that I can decide whether to unblock it or investigate the failure.
|
||||||
|
|
||||||
|
## Acceptance Criteria
|
||||||
|
|
||||||
|
- [ ] When a story transitions to blocked state, send a chat notification to all configured transports
|
||||||
|
- [ ] Notification includes the story ID, story name, and the reason for blocking (e.g. gate failure output, max retries exceeded, empty diff)
|
||||||
|
- [ ] Notification uses a red or warning icon to distinguish from normal status messages
|
||||||
|
- [ ] Works across Matrix, WhatsApp, and Slack transports
|
||||||
|
|
||||||
|
## Out of Scope
|
||||||
|
|
||||||
|
- TBD
|
||||||
+77
@@ -0,0 +1,77 @@
|
|||||||
|
---
|
||||||
|
name: "Mergemaster pipeline marks story done without verifying code landed on master"
|
||||||
|
retry_count: 1
|
||||||
|
---
|
||||||
|
|
||||||
|
# Bug 426: Mergemaster pipeline marks story done without verifying code landed on master
|
||||||
|
|
||||||
|
## Description
|
||||||
|
|
||||||
|
The mergemaster pipeline can mark a story as done even when the feature code never makes it to master. The cherry-pick step in merge.rs may fail or be skipped, but the pipeline still advances the story to done via the filesystem watcher. There is no post-merge verification that the code actually exists on master before marking done.
|
||||||
|
|
||||||
|
## How to Reproduce
|
||||||
|
|
||||||
|
Observed on stories 422 and 403. For 422: mergemaster created merge-queue branch, resolved 2 conflicts in chat/commands/mod.rs and http/mcp/mod.rs, passed quality gates, created merge-queue commit cb2ef6b (4 files, 333 insertions including unblock.rs). But the done commit on master (05db012) only moves the story file — zero code changes. There is no 'storkit: merge 422' commit on master at all. The feature branch (db3157f) still has the code but it was never cherry-picked onto master.
|
||||||
|
|
||||||
|
## Manual Merge Notes
|
||||||
|
|
||||||
|
When manually cherry-picking 422 onto master, two conflicts arose:
|
||||||
|
|
||||||
|
1. `server/src/chat/commands/mod.rs` — both 421 (timer) and 422 (unblock) added entries to the same BotCommand registry. Resolution: keep both.
|
||||||
|
2. `server/src/http/mcp/mod.rs` — 420 (loc_file) and 422 (unblock) both bumped the tool count assertion from 49→50. Resolution: keep loc_file assertion, bump count to 51.
|
||||||
|
|
||||||
|
Additionally, the cherry-pick could not proceed at all because master was on the `merge-queue/424` branch with 3 unresolved files (notifications.rs, ws.rs, watcher.rs). A concurrent in-progress merge left the working tree dirty, which likely caused the original cherry-pick to fail silently. This suggests a race condition: the filesystem watcher commits (story file moves) can leave master in a state where the cherry-pick step in merge.rs fails.
|
||||||
|
|
||||||
|
## Full Audit of Done Stories (2026-03-28)
|
||||||
|
|
||||||
|
Audited all 9 stories in `5_done/` to check whether their code actually landed on master:
|
||||||
|
|
||||||
|
| Story | Merge Commit | Code on Master |
|
||||||
|
|-------|-------------|----------------|
|
||||||
|
| 417 — Split matrix/bot.rs | `665c036` (9 files, +1973/-1926) | YES |
|
||||||
|
| 418 — Split pool/auto_assign.rs | `d375c4b` (7 files, +1901/-1813) | YES |
|
||||||
|
| 419 — Matrix bot network error | `1193b7a` (1 file, +121/-3) | YES |
|
||||||
|
| 420 — loc file command | `d6f8239` (5 files, +112/-32) | YES |
|
||||||
|
| 421 — Timer command | `cf5424f` (7 files, +836) | YES |
|
||||||
|
| 422 — Unblock command | `6c6bc35` (4 files, +336) — manual cherry-pick | YES |
|
||||||
|
| 423 — Auto-schedule timer on rate limit | `b44f3a3` + `8ab2e19` (6 files, +375/-8) — manual cherry-pick | YES |
|
||||||
|
| **424 — Rate limit traffic light** | **None** | **NO — moved back to backlog for redo** |
|
||||||
|
| 425 — Chat notification on story block | `98b5475` (5 files, +184/-15) | YES |
|
||||||
|
| **427 — Text normalization for line breaks** | **None** | **NO — phantom done, code never landed** |
|
||||||
|
|
||||||
|
**4 out of 10 stories (422, 423, 424, 427) had broken merges.** 422 and 423 were fixed via manual cherry-pick. 424 was moved back to backlog for a fresh run. 427 also hit the same bug — marked done without code on master.
|
||||||
|
|
||||||
|
## Actual Result
|
||||||
|
|
||||||
|
Story moved to done with no code on master. The merge-queue commit exists on a detached branch but was never applied to master. No merge commit appears in git log on master.
|
||||||
|
|
||||||
|
## Expected Result
|
||||||
|
|
||||||
|
Pipeline should verify that the cherry-pick produced a merge commit on master before advancing to done. If cherry-pick fails or is missing, the story should remain in merge stage with a merge_failure flag.
|
||||||
|
|
||||||
|
## Suggested Fix
|
||||||
|
|
||||||
|
The code path is: `merge.rs::run_squash_merge` → `pipeline/merge.rs::start_merge_agent_work` → `lifecycle.rs::move_story_to_archived`.
|
||||||
|
|
||||||
|
`run_squash_merge` (merge.rs:354) cherry-picks the merge-queue commit onto `project_root` and checks `cp.status.success()`. If it returns `success: true`, `start_merge_agent_work` (pipeline/merge.rs:106) immediately calls `move_story_to_archived`, which moves the story file to `5_done/`. The watcher then commits "storkit: done".
|
||||||
|
|
||||||
|
The gap: between the cherry-pick returning success and the story moving to done, nobody verifies the cherry-pick actually produced a code commit on master. Possible failure modes:
|
||||||
|
|
||||||
|
1. `project_root` is not on master (e.g. checked out to a merge-queue branch from a concurrent merge)
|
||||||
|
2. Cherry-pick exits 0 but produces an empty commit (no code diff)
|
||||||
|
3. Cherry-pick succeeds on the wrong branch
|
||||||
|
|
||||||
|
**Fix:** After the cherry-pick in `run_squash_merge` succeeds (line 384), before returning `success: true`:
|
||||||
|
|
||||||
|
1. Verify `project_root` is on master: `git rev-parse --abbrev-ref HEAD` must equal the base branch
|
||||||
|
2. Verify the HEAD commit on master contains the expected merge message (e.g. matches `storkit: merge <story_id>`) or has a non-empty diff
|
||||||
|
3. If either check fails, abort the cherry-pick and return `success: false`
|
||||||
|
|
||||||
|
This keeps the fix entirely within `run_squash_merge` — no changes needed to the pipeline advance or lifecycle code.
|
||||||
|
|
||||||
|
## Acceptance Criteria
|
||||||
|
|
||||||
|
- [ ] Pipeline must not move a story to done unless a merge commit containing the feature code exists on master
|
||||||
|
- [ ] If cherry-pick fails or produces no code diff on master, the merge must be reported as failed
|
||||||
|
- [ ] Add a post-merge verification step that checks git log on master for the expected merge commit before advancing to done
|
||||||
|
- [ ] When verification fails, emit a merge_failure and leave the story in the merge stage for retry
|
||||||
+20
@@ -0,0 +1,20 @@
|
|||||||
|
---
|
||||||
|
name: "Server-side text normalization for chat message line breaks"
|
||||||
|
---
|
||||||
|
|
||||||
|
# Story 427: Server-side text normalization for chat message line breaks
|
||||||
|
|
||||||
|
## User Story
|
||||||
|
|
||||||
|
As a user reading bot messages in Matrix, I want single newlines between sentences to render correctly, so that messages don't show up with words joined together like "sentence one.Sentence two".
|
||||||
|
|
||||||
|
## Acceptance Criteria
|
||||||
|
|
||||||
|
- [ ] Add a text normalization step before markdown-to-HTML conversion in the Matrix transport that converts single newlines between non-empty prose lines into double newlines
|
||||||
|
- [ ] Preserve intentional single-newline formatting in bullet lists, headings, table rows, and code fences
|
||||||
|
- [ ] Apply the same normalization in WhatsApp and Slack transports
|
||||||
|
- [ ] Unit tests covering prose paragraphs, bullet lists, code blocks, and mixed content
|
||||||
|
|
||||||
|
## Out of Scope
|
||||||
|
|
||||||
|
- TBD
|
||||||
@@ -0,0 +1,26 @@
|
|||||||
|
---
|
||||||
|
name: "Split pool/pipeline.rs into submodules"
|
||||||
|
---
|
||||||
|
|
||||||
|
# Refactor 428: Split pool/pipeline.rs into submodules
|
||||||
|
|
||||||
|
## Current State
|
||||||
|
|
||||||
|
- TBD
|
||||||
|
|
||||||
|
## Desired State
|
||||||
|
|
||||||
|
Refactor the monolithic server/src/agents/pool/pipeline.rs (1789 lines) into focused submodules.
|
||||||
|
|
||||||
|
## Acceptance Criteria
|
||||||
|
|
||||||
|
- [ ] advance.rs contains run_pipeline_advance, spawn_pipeline_advance, should_block_story and their unit tests
|
||||||
|
- [ ] completion.rs contains run_server_owned_completion, report_completion and their unit tests
|
||||||
|
- [ ] merge.rs contains start_merge_agent_work, run_merge_pipeline, get_merge_status, set_merge_failure_reported and their unit tests
|
||||||
|
- [ ] mod.rs re-exports all public items and wires the submodules
|
||||||
|
- [ ] Unit tests live in their respective module files
|
||||||
|
- [ ] No public API changes — all existing imports continue to work
|
||||||
|
|
||||||
|
## Out of Scope
|
||||||
|
|
||||||
|
- TBD
|
||||||
+27
@@ -0,0 +1,27 @@
|
|||||||
|
---
|
||||||
|
name: "Interactive project setup wizard for new storkit projects"
|
||||||
|
agent: coder-opus
|
||||||
|
---
|
||||||
|
|
||||||
|
# Story 429: Interactive project setup wizard for new storkit projects
|
||||||
|
|
||||||
|
## User Story
|
||||||
|
|
||||||
|
As a developer adopting storkit on an existing project, I want a guided setup process that scaffolds the .storkit directory and has an agent generate project-specific configuration files, so that I can get up and running without manually writing specs and scripts.
|
||||||
|
|
||||||
|
## Acceptance Criteria
|
||||||
|
|
||||||
|
- [ ] storkit init scaffolds .storkit/ directory structure, project.toml, and .mcp.json without clobbering any existing files (especially CLAUDE.md)
|
||||||
|
- [ ] Setup wizard tracks progress through ordered steps, resumable if interrupted
|
||||||
|
- [ ] Step 1: scaffold .storkit/ directory structure and project.toml
|
||||||
|
- [ ] Step 2: agent reads codebase and generates specs/00_CONTEXT.md, user confirms or requests revision
|
||||||
|
- [ ] Step 3: agent reads tech stack and generates specs/tech/STACK.md, user confirms or requests revision
|
||||||
|
- [ ] Step 4: agent creates script/test that runs the project's actual test suite, user runs it to verify, then confirms
|
||||||
|
- [ ] Step 5: agent creates script/release tailored to the project's deployment, user confirms
|
||||||
|
- [ ] Step 6: agent creates script/test_coverage if the stack supports it, user confirms
|
||||||
|
- [ ] Each step gates on user confirmation before advancing to the next
|
||||||
|
- [ ] Existing CLAUDE.md is preserved — storkit appends its content or leaves it untouched
|
||||||
|
|
||||||
|
## Out of Scope
|
||||||
|
|
||||||
|
- TBD
|
||||||
@@ -1,6 +1,5 @@
|
|||||||
---
|
---
|
||||||
name: "Work item titles render too large in expanded view"
|
name: "Work item titles render too large in expanded view"
|
||||||
merge_failure: "Merge pipeline infrastructure failure: squash merge committed successfully on merge-queue branch, but cherry-pick onto master failed with 'fatal: bad revision merge-queue/237_bug_work_item_titles_render_too_large_in_expanded_view'. The merge worktree setup also failed (ENOENT for .story_kit/merge_workspace — pnpm install, pnpm build, cargo check all skipped). The merge-queue branch appears to have been cleaned up before the cherry-pick step could reference it. Master is untouched."
|
|
||||||
---
|
---
|
||||||
|
|
||||||
# Bug 237: Work item titles render too large in expanded view
|
# Bug 237: Work item titles render too large in expanded view
|
||||||
|
|||||||
@@ -1,6 +1,5 @@
|
|||||||
---
|
---
|
||||||
name: "Add refactor work item type"
|
name: "Add refactor work item type"
|
||||||
merge_failure: "merge_agent_work tool returned empty output on two attempts. The merge-queue branch (merge-queue/254_story_add_refactor_work_item_type) was created with squash merge commit 27d24b2, and the merge workspace worktree exists at .story_kit/merge_workspace, but the pipeline never completed (no success/failure logged after MERGE-DEBUG calls). The stale merge workspace worktree may be blocking completion. Possibly related to bug 250 (merge pipeline cherry-pick fails with bad revision on merge-queue branch). Human intervention needed to: 1) clean up the merge-queue worktree and branch, 2) investigate why the merge pipeline hangs after creating the squash merge commit, 3) retry the merge."
|
|
||||||
---
|
---
|
||||||
|
|
||||||
# Story 254: Add refactor work item type
|
# Story 254: Add refactor work item type
|
||||||
|
|||||||
@@ -1,6 +1,5 @@
|
|||||||
---
|
---
|
||||||
name: "Show agent logs in expanded story popup"
|
name: "Show agent logs in expanded story popup"
|
||||||
merge_failure: "merge_agent_work tool returned empty output. The merge pipeline created the merge-queue branch (merge-queue/255_story_show_agent_logs_in_expanded_story_popup) and merge workspace worktree at .story_kit/merge_workspace, but hung without completing. This is the same issue that affected story 254 — likely related to bug 250 (merge pipeline cherry-pick fails with bad revision on merge-queue branch). The stale merge workspace worktree on the merge-queue branch may be blocking completion. Human intervention needed to: 1) clean up the merge workspace worktree and merge-queue branch, 2) investigate the root cause in the merge pipeline (possibly the cherry-pick/fast-forward step after squash merge), 3) retry the merge."
|
|
||||||
---
|
---
|
||||||
|
|
||||||
# Story 255: Show agent logs in expanded story popup
|
# Story 255: Show agent logs in expanded story popup
|
||||||
|
|||||||
+1
@@ -1,5 +1,6 @@
|
|||||||
---
|
---
|
||||||
name: "Web UI OAuth flow for Claude authentication"
|
name: "Web UI OAuth flow for Claude authentication"
|
||||||
|
agent: "coder-opus"
|
||||||
---
|
---
|
||||||
|
|
||||||
# Story 368: Web UI OAuth flow for Claude authentication
|
# Story 368: Web UI OAuth flow for Claude authentication
|
||||||
@@ -0,0 +1,32 @@
|
|||||||
|
---
|
||||||
|
name: "No-arg storkit in empty directory skips scaffold"
|
||||||
|
---
|
||||||
|
|
||||||
|
# Bug 371: No-arg storkit in empty directory skips scaffold
|
||||||
|
|
||||||
|
## Description
|
||||||
|
|
||||||
|
When running `storkit` with no path argument from an empty directory (no `.storkit/`), the server starts but never calls `open_project` or the scaffold. The `find_story_kit_root` check fails to find `.storkit/`, so the fallback at main.rs:179-186 just sets `project_root = cwd` without scaffolding. This means no `.storkit/`, no `project.toml`, no `.mcp.json`, no `CLAUDE.md` — the project is non-functional.
|
||||||
|
|
||||||
|
The explicit path branch (`storkit .`) works correctly because it calls `open_project` → `ensure_project_root_with_story_kit` → `scaffold_story_kit`. The no-arg branch should do the same.
|
||||||
|
|
||||||
|
## How to Reproduce
|
||||||
|
|
||||||
|
1. Create a new empty directory
|
||||||
|
2. cd into it
|
||||||
|
3. Run `storkit` (no path argument)
|
||||||
|
4. Observe that no scaffold is created — `.storkit/`, `CLAUDE.md`, `.mcp.json`, etc. are all missing
|
||||||
|
|
||||||
|
## Actual Result
|
||||||
|
|
||||||
|
Server starts with project_root set to cwd but no scaffold runs. The project is non-functional — no agent config, no MCP endpoint, no work pipeline directories.
|
||||||
|
|
||||||
|
## Expected Result
|
||||||
|
|
||||||
|
Running `storkit` with no arguments from a directory without `.storkit/` should scaffold the project the same as `storkit .` does — calling `open_project` and triggering `ensure_project_root_with_story_kit`.
|
||||||
|
|
||||||
|
## Acceptance Criteria
|
||||||
|
|
||||||
|
- [ ] Running `storkit` with no args from a dir without `.storkit/` calls `open_project` and triggers the full scaffold
|
||||||
|
- [ ] The no-arg fallback path in main.rs calls `open_project(cwd)` instead of just setting project_root directly
|
||||||
|
- [ ] After `storkit` completes startup, `.storkit/project.toml`, `.mcp.json`, `CLAUDE.md`, and `script/test` all exist
|
||||||
+24
@@ -0,0 +1,24 @@
|
|||||||
|
---
|
||||||
|
name: "Scaffold auto-detects tech stack and configures script/test"
|
||||||
|
---
|
||||||
|
|
||||||
|
# Story 372: Scaffold auto-detects tech stack and configures script/test
|
||||||
|
|
||||||
|
## User Story
|
||||||
|
|
||||||
|
As a user setting up a new project with storkit, I want the scaffold to detect my project's tech stack and generate a working `script/test` automatically, so that agents can run tests immediately without manual configuration.
|
||||||
|
|
||||||
|
## Acceptance Criteria
|
||||||
|
|
||||||
|
- [ ] Scaffold detects Go projects (go.mod) and adds `go test ./...` to script/test
|
||||||
|
- [ ] Scaffold detects Node.js projects (package.json) and adds `npm test` to script/test
|
||||||
|
- [ ] Scaffold detects Rust projects (Cargo.toml) and adds `cargo test` to script/test
|
||||||
|
- [ ] Scaffold detects Python projects (pyproject.toml or requirements.txt) and adds `pytest` to script/test
|
||||||
|
- [ ] Scaffold handles multi-stack projects (e.g. Go + Next.js) by combining the relevant test commands
|
||||||
|
- [ ] project.toml component entries are generated to match detected tech stack
|
||||||
|
- [ ] Falls back to the generic 'No tests configured' stub if no known stack is detected
|
||||||
|
- [ ] Coder agent prompt includes instruction to configure `script/test` for the project's test framework if it still contains the generic stub
|
||||||
|
|
||||||
|
## Out of Scope
|
||||||
|
|
||||||
|
- TBD
|
||||||
+28
@@ -0,0 +1,28 @@
|
|||||||
|
---
|
||||||
|
name: "Scaffold gitignore missing transient pipeline stage directories"
|
||||||
|
---
|
||||||
|
|
||||||
|
# Bug 373: Scaffold gitignore missing transient pipeline stage directories
|
||||||
|
|
||||||
|
## Description
|
||||||
|
|
||||||
|
The `write_story_kit_gitignore` function in `server/src/io/fs.rs` does not include the transient pipeline stages (`work/2_current/`, `work/3_qa/`, `work/4_merge/`) in the `.storkit/.gitignore` entries list. These stages are not committed to git (only `1_backlog`, `5_done`, and `6_archived` are commit-worthy per spike 92), so they should be ignored for new projects.
|
||||||
|
|
||||||
|
## How to Reproduce
|
||||||
|
|
||||||
|
1. Scaffold a new project with storkit
|
||||||
|
2. Check `.storkit/.gitignore`
|
||||||
|
|
||||||
|
## Actual Result
|
||||||
|
|
||||||
|
`.storkit/.gitignore` only contains `bot.toml`, `matrix_store/`, `matrix_device_id`, `worktrees/`, `merge_workspace/`, `coverage/`. The transient pipeline directories are missing.
|
||||||
|
|
||||||
|
## Expected Result
|
||||||
|
|
||||||
|
`.storkit/.gitignore` also includes `work/2_current/`, `work/3_qa/`, `work/4_merge/`.
|
||||||
|
|
||||||
|
## Acceptance Criteria
|
||||||
|
|
||||||
|
- [ ] Scaffold writes work/2_current/, work/3_qa/, work/4_merge/ to .storkit/.gitignore
|
||||||
|
- [ ] Idempotent — running scaffold again does not duplicate entries
|
||||||
|
- [ ] Existing .storkit/.gitignore files get the new entries appended on next scaffold run
|
||||||
+30
@@ -0,0 +1,30 @@
|
|||||||
|
---
|
||||||
|
name: "Web UI implements all bot commands as slash commands"
|
||||||
|
---
|
||||||
|
|
||||||
|
# Story 374: Web UI implements all bot commands as slash commands
|
||||||
|
|
||||||
|
## User Story
|
||||||
|
|
||||||
|
As a user working in the storkit web UI, I want to type slash commands (e.g. `/status`, `/start 42`, `/cost`) in the chat input to trigger the same deterministic bot commands available in Matrix, so that I can manage my project entirely from the browser without needing a chat bot.
|
||||||
|
|
||||||
|
## Acceptance Criteria
|
||||||
|
|
||||||
|
- [ ] /status — shows pipeline status and agent availability; /status <number> shows story triage dump
|
||||||
|
- [ ] /assign <number> <model> — pre-assign a model to a story
|
||||||
|
- [ ] /start <number> — start a coder on a story; /start <number> opus for specific model
|
||||||
|
- [ ] /show <number> — display full text of a work item
|
||||||
|
- [ ] /move <number> <stage> — move a work item to a pipeline stage
|
||||||
|
- [ ] /delete <number> — remove a work item from the pipeline
|
||||||
|
- [ ] /cost — show token spend (24h total, top stories, by agent type, all-time)
|
||||||
|
- [ ] /git — show git status (branch, uncommitted changes, ahead/behind)
|
||||||
|
- [ ] /overview <number> — show implementation summary for a merged story
|
||||||
|
- [ ] /rebuild — rebuild the server binary and restart
|
||||||
|
- [ ] /reset — clear the current Claude Code session
|
||||||
|
- [ ] /help — list all available slash commands
|
||||||
|
- [ ] Slash commands are handled at the frontend/backend level without LLM invocation
|
||||||
|
- [ ] Unrecognised slash commands show a helpful error message
|
||||||
|
|
||||||
|
## Out of Scope
|
||||||
|
|
||||||
|
- TBD
|
||||||
+43
@@ -0,0 +1,43 @@
|
|||||||
|
---
|
||||||
|
name: "Default project.toml contains Rust-specific setup commands for non-Rust projects"
|
||||||
|
---
|
||||||
|
|
||||||
|
# Bug 375: Default project.toml contains Rust-specific setup commands for non-Rust projects
|
||||||
|
|
||||||
|
## Description
|
||||||
|
|
||||||
|
When scaffolding a new project where no tech stack is detected, the generated `project.toml` contains Rust-specific setup commands (`cargo check`) as example fallback components. This causes coder agents to try to satisfy Rust gates on non-Rust projects.
|
||||||
|
|
||||||
|
## Fix
|
||||||
|
|
||||||
|
1. In `detect_components_toml()` fallback (when no stack markers found): replace the Rust/pnpm example components with a single generic `app` component with empty `setup = []`
|
||||||
|
2. In the onboarding prompt Step 4: simplify to configure `[[component]]` entries based on what the user told the LLM in Step 2 (tech stack), rather than re-scanning the filesystem independently
|
||||||
|
|
||||||
|
## Acceptance Criteria
|
||||||
|
|
||||||
|
- [ ] Default project.toml does not contain language-specific setup commands when that language is not detected in the project
|
||||||
|
- [ ] If go.mod is present, setup commands use Go tooling
|
||||||
|
- [ ] If package.json is present, setup commands use npm/node tooling
|
||||||
|
- [ ] If no known stack is detected, setup commands are empty or just echo a placeholder
|
||||||
|
|
||||||
|
## How to Reproduce
|
||||||
|
|
||||||
|
1. Create a new Go + Next.js project directory with `go.mod` and `package.json`
|
||||||
|
2. Run `storkit .` to scaffold
|
||||||
|
3. Check `.storkit/project.toml` — the component setup commands reference cargo/Rust
|
||||||
|
4. Start a coder agent — it creates a `Cargo.toml` trying to satisfy the Rust setup commands
|
||||||
|
|
||||||
|
## Actual Result
|
||||||
|
|
||||||
|
The scaffolded `project.toml` has Rust-specific setup commands (`cargo check`) even for non-Rust projects. Agents try to satisfy these and create spurious files.
|
||||||
|
|
||||||
|
## Expected Result
|
||||||
|
|
||||||
|
The scaffolded `project.toml` should have generic or stack-appropriate setup commands. If no known stack is detected, setup commands should be empty or minimal (not Rust-specific).
|
||||||
|
|
||||||
|
## Acceptance Criteria
|
||||||
|
|
||||||
|
- [ ] Default project.toml does not contain language-specific setup commands when that language is not detected in the project
|
||||||
|
- [ ] If go.mod is present, setup commands use Go tooling
|
||||||
|
- [ ] If package.json is present, setup commands use npm/node tooling
|
||||||
|
- [ ] If no known stack is detected, setup commands are empty or just echo a placeholder
|
||||||
+22
@@ -0,0 +1,22 @@
|
|||||||
|
---
|
||||||
|
name: "Rename MCP whatsup tool to status for consistency"
|
||||||
|
agent: coder-opus
|
||||||
|
---
|
||||||
|
|
||||||
|
# Story 376: Rename MCP whatsup tool to status for consistency
|
||||||
|
|
||||||
|
## User Story
|
||||||
|
|
||||||
|
As a developer using storkit's MCP tools, I want the MCP tool to be called `status` instead of `whatsup`, so that the naming is consistent between the bot command (`status`), the web UI slash command (`/status`), and the MCP tool.
|
||||||
|
|
||||||
|
## Acceptance Criteria
|
||||||
|
|
||||||
|
- [ ] MCP tool is renamed from 'whatsup' to 'status'
|
||||||
|
- [ ] MCP tool is discoverable as 'status' via tools/list
|
||||||
|
- [ ] The tool still accepts a story_id parameter and returns the same triage data
|
||||||
|
- [ ] Old 'whatsup' tool name is removed from the MCP registry
|
||||||
|
- [ ] Any internal references to the whatsup tool name are updated
|
||||||
|
|
||||||
|
## Out of Scope
|
||||||
|
|
||||||
|
- TBD
|
||||||
+30
@@ -0,0 +1,30 @@
|
|||||||
|
---
|
||||||
|
name: "update_story MCP tool writes front matter values as YAML strings instead of native types"
|
||||||
|
---
|
||||||
|
|
||||||
|
# Bug 377: update_story MCP tool writes front matter values as YAML strings instead of native types
|
||||||
|
|
||||||
|
## Description
|
||||||
|
|
||||||
|
The `update_story` MCP tool accepts `front_matter` as a `Map<String, String>`, so all values are written as quoted YAML strings. Fields like `retry_count` (expected `u32`) and `blocked` (expected `bool`) end up as `"0"` and `"false"` in the YAML. This causes `parse_front_matter()` to fail because serde_yaml cannot deserialize a quoted string into `u32` or `bool`. When parsing fails, the story `name` comes back as `None`, so the status command shows no title for the story.
|
||||||
|
|
||||||
|
## How to Reproduce
|
||||||
|
|
||||||
|
1. Call `update_story` with `front_matter: {"blocked": "false", "retry_count": "0"}`
|
||||||
|
2. Read the story file — front matter contains `blocked: "false"` and `retry_count: "0"` (quoted strings)
|
||||||
|
3. Call `get_pipeline_status` or the bot `status` command
|
||||||
|
4. The story shows with no title/name
|
||||||
|
|
||||||
|
## Actual Result
|
||||||
|
|
||||||
|
Front matter values are written as quoted YAML strings. `parse_front_matter()` fails to deserialize `"false"` as `bool` and `"0"` as `u32`, returning an error. The story name is lost and the status command shows no title.
|
||||||
|
|
||||||
|
## Expected Result
|
||||||
|
|
||||||
|
The `update_story` tool should write `blocked` and `retry_count` as native YAML types (unquoted `false` and `0`), or `parse_front_matter()` should accept both string and native representations. The story name should always be displayed correctly in the status command.
|
||||||
|
|
||||||
|
## Acceptance Criteria
|
||||||
|
|
||||||
|
- [ ] update_story with front_matter {"blocked": "false"} writes `blocked: false` (unquoted) in the YAML
|
||||||
|
- [ ] update_story with front_matter {"retry_count": "0"} writes `retry_count: 0` (unquoted) in the YAML
|
||||||
|
- [ ] Story name is displayed correctly in the status command after update_story modifies front matter fields
|
||||||
+20
@@ -0,0 +1,20 @@
|
|||||||
|
---
|
||||||
|
name: "Status command shows work item type (story, bug, spike, refactor) next to each item"
|
||||||
|
---
|
||||||
|
|
||||||
|
# Story 378: Status command shows work item type (story, bug, spike, refactor) next to each item
|
||||||
|
|
||||||
|
## User Story
|
||||||
|
|
||||||
|
As a user viewing the pipeline status, I want to see the type of each work item (story, bug, spike, refactor) so that I can quickly understand what kind of work is in progress without having to open individual files.
|
||||||
|
|
||||||
|
## Acceptance Criteria
|
||||||
|
|
||||||
|
- [ ] The status command displays the work item type (story, bug, spike, refactor) as a label next to each item — e.g. "375 [bug] — Default project.toml contains Rust-specific setup commands"
|
||||||
|
- [ ] The type is extracted from the story_id filename convention ({id}_{type}_{slug})
|
||||||
|
- [ ] All known types are supported: story, bug, spike, refactor
|
||||||
|
- [ ] Unknown or missing types are omitted gracefully (no crash, no placeholder)
|
||||||
|
|
||||||
|
## Out of Scope
|
||||||
|
|
||||||
|
- TBD
|
||||||
+34
@@ -0,0 +1,34 @@
|
|||||||
|
---
|
||||||
|
name: "start_agent ignores story front matter agent assignment"
|
||||||
|
---
|
||||||
|
|
||||||
|
# Bug 379: start_agent ignores story front matter agent assignment
|
||||||
|
|
||||||
|
## Description
|
||||||
|
|
||||||
|
When a model is pre-assigned to a story via the `assign` command (which writes `agent: coder-opus` to the story's YAML front matter), the MCP `start_agent` tool ignores this field. It only looks at the `agent_name` argument passed directly in the tool call. If none is passed, it auto-selects the first idle coder (usually sonnet), bypassing the user's assignment.
|
||||||
|
|
||||||
|
The auto-assign pipeline (`auto_assign.rs`) correctly reads and respects the front matter `agent` field, but the direct `tool_start_agent` path in `agent_tools.rs` does not.
|
||||||
|
|
||||||
|
Additionally, the `show` (whatsup/triage) command should display the assigned agent from the story's front matter so users can verify their assignment took effect.
|
||||||
|
|
||||||
|
## How to Reproduce
|
||||||
|
|
||||||
|
1. Run `assign 368 opus` — this writes `agent: coder-opus` to story 368's front matter
|
||||||
|
2. Run `start 368` (without specifying a model)
|
||||||
|
3. Observe that a sonnet coder is assigned, not coder-opus
|
||||||
|
4. Run `show 368` — the assigned agent is not displayed
|
||||||
|
|
||||||
|
## Actual Result
|
||||||
|
|
||||||
|
The `start_agent` MCP tool ignores the `agent` field in the story's front matter and picks the first idle coder. The `show` command does not display the pre-assigned agent.
|
||||||
|
|
||||||
|
## Expected Result
|
||||||
|
|
||||||
|
When no explicit `agent_name` is passed to `start_agent`, it should read the story's front matter `agent` field and use that agent if it's available. The `show` command should display the assigned agent from front matter.
|
||||||
|
|
||||||
|
## Acceptance Criteria
|
||||||
|
|
||||||
|
- [ ] start_agent without an explicit agent_name reads the story's front matter `agent` field and uses it if the agent is idle
|
||||||
|
- [ ] If the preferred agent from front matter is busy, start_agent either waits or falls back to auto-selection (matching auto_assign behavior)
|
||||||
|
- [ ] The show/triage command displays the assigned agent from story front matter when present
|
||||||
+20
@@ -0,0 +1,20 @@
|
|||||||
|
---
|
||||||
|
name: "Assign command restarts coder when story is already in progress"
|
||||||
|
---
|
||||||
|
|
||||||
|
# Story 380: Assign command restarts coder when story is already in progress
|
||||||
|
|
||||||
|
## User Story
|
||||||
|
|
||||||
|
As a user, I want `assign X opus` on a running story to stop the current coder, update the front matter, and start the newly assigned agent, so that I can switch models mid-flight without manually stopping and restarting.
|
||||||
|
|
||||||
|
## Acceptance Criteria
|
||||||
|
|
||||||
|
- [ ] When assign is called on a story with a running coder, the current coder agent is stopped
|
||||||
|
- [ ] The story's front matter `agent` field is updated to the new agent name
|
||||||
|
- [ ] The newly assigned agent is started on the story automatically
|
||||||
|
- [ ] When assign is called on a story with no running coder, it behaves as before (just updates front matter)
|
||||||
|
|
||||||
|
## Out of Scope
|
||||||
|
|
||||||
|
- TBD
|
||||||
@@ -0,0 +1,20 @@
|
|||||||
|
---
|
||||||
|
name: "Bot command to delete a worktree"
|
||||||
|
---
|
||||||
|
|
||||||
|
# Story 381: Bot command to delete a worktree
|
||||||
|
|
||||||
|
## User Story
|
||||||
|
|
||||||
|
As a user, I want a bot command to delete a worktree so that I can clean up orphaned or unwanted worktrees without SSHing into the server.
|
||||||
|
|
||||||
|
## Acceptance Criteria
|
||||||
|
|
||||||
|
- [ ] A new bot command (e.g. `rmtree <story_number>`) deletes the worktree for the given story
|
||||||
|
- [ ] The command stops any running agent on that story before removing the worktree
|
||||||
|
- [ ] The command returns a confirmation message on success
|
||||||
|
- [ ] The command returns a helpful error if no worktree exists for the given story
|
||||||
|
|
||||||
|
## Out of Scope
|
||||||
|
|
||||||
|
- TBD
|
||||||
+22
@@ -0,0 +1,22 @@
|
|||||||
|
---
|
||||||
|
name: "WhatsApp transport supports Twilio API as alternative to Meta Cloud API"
|
||||||
|
---
|
||||||
|
|
||||||
|
# Story 382: WhatsApp transport supports Twilio API as alternative to Meta Cloud API
|
||||||
|
|
||||||
|
## User Story
|
||||||
|
|
||||||
|
As a user, I want to use Twilio's WhatsApp API instead of Meta's Cloud API directly, so that I can avoid Meta's painful developer onboarding and use Twilio's simpler signup process.
|
||||||
|
|
||||||
|
## Acceptance Criteria
|
||||||
|
|
||||||
|
- [ ] bot.toml supports a `whatsapp_provider` field with values `meta` (default, current behavior) or `twilio`
|
||||||
|
- [ ] When provider is `twilio`, messages are sent via Twilio's REST API (`api.twilio.com`) using Account SID + Auth Token
|
||||||
|
- [ ] When provider is `twilio`, inbound webhooks parse Twilio's form-encoded format instead of Meta's JSON
|
||||||
|
- [ ] Twilio config requires `twilio_account_sid`, `twilio_auth_token`, and `twilio_whatsapp_number` in bot.toml
|
||||||
|
- [ ] All existing bot commands and LLM passthrough work identically regardless of provider
|
||||||
|
- [ ] 24-hour messaging window logic still applies (Twilio enforces this server-side too)
|
||||||
|
|
||||||
|
## Out of Scope
|
||||||
|
|
||||||
|
- TBD
|
||||||
+41
@@ -0,0 +1,41 @@
|
|||||||
|
---
|
||||||
|
name: "Reorganize chat system into chat module with transport submodules"
|
||||||
|
---
|
||||||
|
|
||||||
|
# Refactor 383: Reorganize chat system into chat module with transport submodules
|
||||||
|
|
||||||
|
## Current State
|
||||||
|
|
||||||
|
- TBD
|
||||||
|
|
||||||
|
## Desired State
|
||||||
|
|
||||||
|
Currently chat-related code is scattered at the top level of `src/`: `transport.rs`, `whatsapp.rs`, `slack.rs`, plus `matrix/` as a directory module. This should be reorganized into a clean module hierarchy:
|
||||||
|
|
||||||
|
```
|
||||||
|
src/
|
||||||
|
chat/
|
||||||
|
mod.rs # Generic chat traits, types, ChatTransport etc.
|
||||||
|
transport/
|
||||||
|
mod.rs
|
||||||
|
matrix/ # Existing matrix module moved here
|
||||||
|
whatsapp.rs # Existing whatsapp.rs moved here
|
||||||
|
slack.rs # Existing slack.rs moved here
|
||||||
|
twilio.rs # Future Twilio transport
|
||||||
|
```
|
||||||
|
|
||||||
|
The `ChatTransport` trait and shared chat types should live in `chat/mod.rs`. Each transport implementation becomes a submodule of `chat::transport`.
|
||||||
|
|
||||||
|
## Acceptance Criteria
|
||||||
|
|
||||||
|
- [ ] ChatTransport trait and shared chat types live in `chat/mod.rs`
|
||||||
|
- [ ] Matrix transport lives in `chat/transport/matrix/`
|
||||||
|
- [ ] WhatsApp transport lives in `chat/transport/whatsapp.rs`
|
||||||
|
- [ ] Slack transport lives in `chat/transport/slack.rs`
|
||||||
|
- [ ] Top-level `transport.rs`, `whatsapp.rs`, `slack.rs`, and `matrix/` are removed
|
||||||
|
- [ ] All existing tests pass without modification (or with only import path changes)
|
||||||
|
- [ ] No functional changes — pure file reorganization and re-exports
|
||||||
|
|
||||||
|
## Out of Scope
|
||||||
|
|
||||||
|
- TBD
|
||||||
+23
@@ -0,0 +1,23 @@
|
|||||||
|
---
|
||||||
|
name: "WhatsApp markdown-to-WhatsApp formatting conversion"
|
||||||
|
---
|
||||||
|
|
||||||
|
# Story 384: WhatsApp markdown-to-WhatsApp formatting conversion
|
||||||
|
|
||||||
|
## User Story
|
||||||
|
|
||||||
|
As a WhatsApp user, I want bot messages to use WhatsApp-native formatting instead of raw markdown, so that headers, bold text, and links render properly.
|
||||||
|
|
||||||
|
## Acceptance Criteria
|
||||||
|
|
||||||
|
- [ ] Headers (# ## ### etc.) are converted to bold text (*Header*) in WhatsApp messages
|
||||||
|
- [ ] Markdown bold (**text**) is converted to WhatsApp bold (*text*)
|
||||||
|
- [ ] Markdown strikethrough (~~text~~) is converted to WhatsApp strikethrough (~text~)
|
||||||
|
- [ ] Markdown links [text](url) are converted to readable format: text (url)
|
||||||
|
- [ ] Code blocks and inline code are preserved as-is (already compatible)
|
||||||
|
- [ ] Matrix bot formatting is completely unaffected (conversion only applied in WhatsApp send paths)
|
||||||
|
- [ ] Existing WhatsApp chunking (4096 char limit) still works correctly after conversion
|
||||||
|
|
||||||
|
## Out of Scope
|
||||||
|
|
||||||
|
- TBD
|
||||||
@@ -0,0 +1,23 @@
|
|||||||
|
---
|
||||||
|
name: "Slack markdown-to-mrkdwn formatting conversion"
|
||||||
|
---
|
||||||
|
|
||||||
|
# Story 385: Slack markdown-to-mrkdwn formatting conversion
|
||||||
|
|
||||||
|
## User Story
|
||||||
|
|
||||||
|
As a Slack user, I want bot messages to use Slack-native mrkdwn formatting instead of raw markdown, so that headers, bold text, and links render properly.
|
||||||
|
|
||||||
|
## Acceptance Criteria
|
||||||
|
|
||||||
|
- [ ] Headers (# ## ### etc.) are converted to bold text (*Header*) in Slack messages
|
||||||
|
- [ ] Markdown bold (**text**) is converted to Slack bold (*text*)
|
||||||
|
- [ ] Markdown strikethrough (~~text~~) is converted to Slack strikethrough (~text~)
|
||||||
|
- [ ] Markdown links [text](url) are converted to Slack format: <url|text>
|
||||||
|
- [ ] Code blocks and inline code are preserved as-is (already compatible)
|
||||||
|
- [ ] WhatsApp and Matrix bot formatting are completely unaffected (conversion only applied in Slack send paths)
|
||||||
|
- [ ] Conversion is applied to all Slack send paths: command responses, LLM streaming, htop snapshots, delete responses, and slash command responses
|
||||||
|
|
||||||
|
## Out of Scope
|
||||||
|
|
||||||
|
- TBD
|
||||||
+22
@@ -0,0 +1,22 @@
|
|||||||
|
---
|
||||||
|
name: "Unreleased command shows list of stories since last release"
|
||||||
|
---
|
||||||
|
|
||||||
|
# Story 386: Unreleased command shows list of stories since last release
|
||||||
|
|
||||||
|
## User Story
|
||||||
|
|
||||||
|
As a user, I want a bot command and web UI slash command called "unreleased" that shows a list of stories completed since the last release, so that I can see what's ready to ship.
|
||||||
|
|
||||||
|
## Acceptance Criteria
|
||||||
|
|
||||||
|
- [ ] Bot command `unreleased` returns a list of stories merged to master since the last release tag
|
||||||
|
- [ ] Web UI slash command /unreleased returns the same list
|
||||||
|
- [ ] Each entry shows story number and name
|
||||||
|
- [ ] If there are no unreleased stories, a clear message is shown
|
||||||
|
- [ ] Command is registered in the help command output
|
||||||
|
- [ ] WhatsApp, Slack, and Matrix transports all support the command via the shared command dispatcher
|
||||||
|
|
||||||
|
## Out of Scope
|
||||||
|
|
||||||
|
- TBD
|
||||||
@@ -0,0 +1,23 @@
|
|||||||
|
---
|
||||||
|
name: "Configurable base branch name in project.toml"
|
||||||
|
---
|
||||||
|
|
||||||
|
# Story 387: Configurable base branch name in project.toml
|
||||||
|
|
||||||
|
## User Story
|
||||||
|
|
||||||
|
As a project owner, I want to configure the main branch name in project.toml (e.g. "main", "master", "develop"), so that the system doesn't hardcode "master" and works with any branching convention.
|
||||||
|
|
||||||
|
## Acceptance Criteria
|
||||||
|
|
||||||
|
- [ ] New optional `base_branch` setting in project.toml (e.g. base_branch = "main")
|
||||||
|
- [ ] When set, all worktree creation, merge operations, and agent prompts use the configured branch name
|
||||||
|
- [ ] When not set, falls back to the existing auto-detection logic (detect_base_branch) which reads the current git branch
|
||||||
|
- [ ] The hardcoded "master" fallback in detect_base_branch is replaced by the project.toml setting when available
|
||||||
|
- [ ] Agent prompt template {{base_branch}} resolves to the configured value
|
||||||
|
- [ ] Existing projects without the setting continue to work unchanged (backwards compatible)
|
||||||
|
- [ ] project.toml.example uses base_branch = \"main\" as the example value; the actual project.toml uses base_branch = \"master\"
|
||||||
|
|
||||||
|
## Out of Scope
|
||||||
|
|
||||||
|
- TBD
|
||||||
@@ -0,0 +1,21 @@
|
|||||||
|
---
|
||||||
|
name: "WhatsApp phone number allowlist authorization"
|
||||||
|
---
|
||||||
|
|
||||||
|
# Story 389: WhatsApp phone number allowlist authorization
|
||||||
|
|
||||||
|
## User Story
|
||||||
|
|
||||||
|
As a bot operator, I want to restrict which phone numbers can interact with the bot, so that only authorized users can send commands.
|
||||||
|
|
||||||
|
## Acceptance Criteria
|
||||||
|
|
||||||
|
- [ ] New optional allowed_phones list in bot.toml for WhatsApp (similar to Matrix allowed_users)
|
||||||
|
- [ ] When configured, only messages from listed phone numbers are processed; all others are silently ignored
|
||||||
|
- [ ] When not configured (empty or absent), all phone numbers are allowed (backwards compatible)
|
||||||
|
- [ ] Unauthorized senders are logged but receive no response
|
||||||
|
- [ ] The allowlist applies to all message types: commands, LLM conversations, and async commands (htop, delete)
|
||||||
|
|
||||||
|
## Out of Scope
|
||||||
|
|
||||||
|
- TBD
|
||||||
+31
@@ -0,0 +1,31 @@
|
|||||||
|
---
|
||||||
|
name: "WhatsApp missing async command handlers for start, rebuild, reset, rmtree, assign"
|
||||||
|
---
|
||||||
|
|
||||||
|
# Bug 390: WhatsApp missing async command handlers for start, rebuild, reset, rmtree, assign
|
||||||
|
|
||||||
|
## Description
|
||||||
|
|
||||||
|
Five bot commands listed in help don't work in WhatsApp. Matrix's on_room_message pre-dispatches these via extract_*_command() functions before calling try_handle_command(), but WhatsApp's handle_incoming_message only pre-dispatches htop and delete. The missing commands have fallback handlers that return None, so they silently fall through to the LLM instead of executing.
|
||||||
|
|
||||||
|
## How to Reproduce
|
||||||
|
|
||||||
|
1. Send "rebuild" (or "start 386", "reset", "rmtree 386", "assign 386 opus") to the WhatsApp bot\n2. Observe the message is forwarded to the LLM instead of executing the command
|
||||||
|
|
||||||
|
## Actual Result
|
||||||
|
|
||||||
|
The 5 commands (start, rebuild, reset, rmtree, assign) fall through to the LLM and generate a conversational response instead of executing the bot command.
|
||||||
|
|
||||||
|
## Expected Result
|
||||||
|
|
||||||
|
All commands listed in help should work in WhatsApp, matching Matrix behavior. start should spawn an agent, rebuild should rebuild the server, reset should clear the session, rmtree should remove a worktree, assign should pre-assign a model.
|
||||||
|
|
||||||
|
## Acceptance Criteria
|
||||||
|
|
||||||
|
- [ ] start command works in WhatsApp (extract_start_command dispatch)
|
||||||
|
- [ ] rebuild command works in WhatsApp (extract_rebuild_command dispatch)
|
||||||
|
- [ ] reset command works in WhatsApp (extract_reset_command dispatch)
|
||||||
|
- [ ] rmtree command works in WhatsApp (extract_rmtree_command dispatch)
|
||||||
|
- [ ] assign command works in WhatsApp (extract_assign_command dispatch)
|
||||||
|
- [ ] Same 5 commands also work in Slack transport if similarly missing
|
||||||
|
- [ ] RETRY: Previous attempt was marked done without any code changes — the mergemaster moved the story to done but no async command handlers were actually added to whatsapp.rs. The fix must add extract_start_command, extract_rebuild_command, extract_reset_command, extract_rmtree_command, and extract_assign_command dispatch blocks to handle_incoming_message in whatsapp.rs, following the existing pattern used for htop and delete. Also check and fix Slack if similarly missing.
|
||||||
+27
@@ -0,0 +1,27 @@
|
|||||||
|
---
|
||||||
|
name: "strip_prefix_ci panics on multi-byte UTF-8 characters"
|
||||||
|
---
|
||||||
|
|
||||||
|
# Bug 391: strip_prefix_ci panics on multi-byte UTF-8 characters
|
||||||
|
|
||||||
|
## Description
|
||||||
|
|
||||||
|
strip_prefix_ci in commands/mod.rs slices text by byte offset using prefix.len(), which panics when the slice boundary falls inside a multi-byte UTF-8 character (e.g. right single quote U+2019, emojis). The function assumes ASCII-safe byte boundaries but real WhatsApp/Matrix messages contain Unicode.
|
||||||
|
|
||||||
|
## How to Reproduce
|
||||||
|
|
||||||
|
1. Send a message to the bot containing a smart quote or emoji within the first N bytes (where N = bot name length)\n2. e.g. "For now let\u2019s just deal with it" where the bot name prefix check slices at byte 12, inside the 3-byte \u2019 character
|
||||||
|
|
||||||
|
## Actual Result
|
||||||
|
|
||||||
|
Thread panics: "byte index 12 is not a char boundary; it is inside \u2018\u2019\u2019 (bytes 11..14)"
|
||||||
|
|
||||||
|
## Expected Result
|
||||||
|
|
||||||
|
The function should safely handle multi-byte UTF-8 without panicking. If the slice boundary isn't a char boundary, the prefix doesn't match — return None.
|
||||||
|
|
||||||
|
## Acceptance Criteria
|
||||||
|
|
||||||
|
- [ ] strip_prefix_ci does not panic on messages containing multi-byte UTF-8 characters (smart quotes, emojis, CJK, etc.)
|
||||||
|
- [ ] Use text.get(..prefix.len()) or text.is_char_boundary() instead of direct indexing
|
||||||
|
- [ ] Add test cases for messages with emojis and smart quotes
|
||||||
+27
@@ -0,0 +1,27 @@
|
|||||||
|
---
|
||||||
|
name: "Extract shared transport utilities from matrix module into chat submodule"
|
||||||
|
agent: "coder-opus"
|
||||||
|
---
|
||||||
|
|
||||||
|
# Refactor 392: Extract shared transport utilities from matrix module into chat submodule
|
||||||
|
|
||||||
|
## Current State
|
||||||
|
|
||||||
|
- TBD
|
||||||
|
|
||||||
|
## Desired State
|
||||||
|
|
||||||
|
Several functions currently living in the matrix transport module are used by all transports (WhatsApp, Slack, Matrix). These should be pulled up into a shared location under the chat module. Candidates include: strip_prefix_ci, strip_bot_mention, try_handle_command, drain_complete_paragraphs, markdown_to_whatsapp (pattern could generalize), chunk_for_whatsapp, and the command dispatch infrastructure. A chat::util or chat::text submodule would be a natural home for string utilities like strip_prefix_ci. The command dispatch (try_handle_command, CommandDispatch, BotCommand registry) could live in chat::commands.
|
||||||
|
|
||||||
|
## Acceptance Criteria
|
||||||
|
|
||||||
|
- [ ] Shared string utilities (strip_prefix_ci, strip_bot_mention, drain_complete_paragraphs) moved to a chat::util or chat::text submodule
|
||||||
|
- [ ] Command dispatch infrastructure (try_handle_command, CommandDispatch, BotCommand, command registry) moved to chat::commands
|
||||||
|
- [ ] Per-transport formatting functions (markdown_to_whatsapp, markdown_to_slack) remain in their respective transport modules
|
||||||
|
- [ ] All transports import from the new shared location instead of reaching into matrix::
|
||||||
|
- [ ] No functional changes — purely structural refactor
|
||||||
|
- [ ] All existing tests pass and move with their code
|
||||||
|
|
||||||
|
## Out of Scope
|
||||||
|
|
||||||
|
- TBD
|
||||||
+23
@@ -0,0 +1,23 @@
|
|||||||
|
---
|
||||||
|
name: "Pipeline stage notifications for WhatsApp and Slack transports"
|
||||||
|
---
|
||||||
|
|
||||||
|
# Story 393: Pipeline stage notifications for WhatsApp and Slack transports
|
||||||
|
|
||||||
|
## User Story
|
||||||
|
|
||||||
|
As a WhatsApp or Slack user, I want to receive pipeline stage transition notifications (e.g. "story moved from Current to QA") just like Matrix users do, so I can track story progress from any transport.
|
||||||
|
|
||||||
|
## Acceptance Criteria
|
||||||
|
|
||||||
|
- [ ] WhatsApp transport spawns a notification listener at startup using the existing spawn_notification_listener infrastructure
|
||||||
|
- [ ] Slack transport spawns a notification listener at startup using the same infrastructure
|
||||||
|
- [ ] Notifications are sent to all active ambient senders/channels for the respective transport
|
||||||
|
- [ ] Stage transition notifications (story moved between pipeline stages) are delivered
|
||||||
|
- [ ] Error notifications (story failures) are delivered
|
||||||
|
- [ ] Rate limit warnings are delivered with debouncing
|
||||||
|
- [ ] Matrix notification behavior is completely unaffected
|
||||||
|
|
||||||
|
## Out of Scope
|
||||||
|
|
||||||
|
- TBD
|
||||||
@@ -0,0 +1,23 @@
|
|||||||
|
---
|
||||||
|
name: "WhatsApp and Slack permission prompt forwarding"
|
||||||
|
---
|
||||||
|
|
||||||
|
# Story 394: WhatsApp and Slack permission prompt forwarding
|
||||||
|
|
||||||
|
## User Story
|
||||||
|
|
||||||
|
As a WhatsApp or Slack user, I want permission requests from Claude Code to be forwarded to my chat so I can approve or deny them, rather than having them silently fail.
|
||||||
|
|
||||||
|
## Acceptance Criteria
|
||||||
|
|
||||||
|
- [ ] Permission requests are sent as messages to the WhatsApp sender with tool name and input details
|
||||||
|
- [ ] User can reply yes/y/approve or no/n/deny to approve or deny the permission
|
||||||
|
- [ ] Permission requests time out and auto-deny (fail-closed) if not answered within the configured timeout
|
||||||
|
- [ ] Slack receives the same permission forwarding treatment
|
||||||
|
- [ ] Reuses the existing permission channel infrastructure (perm_rx, PermissionForward, PermissionDecision)
|
||||||
|
- [ ] Matrix permission handling is completely unaffected
|
||||||
|
- [ ] handle_llm_message uses a tokio::select! loop (like Matrix bot.rs) to listen for both LLM output and permission requests concurrently
|
||||||
|
|
||||||
|
## Out of Scope
|
||||||
|
|
||||||
|
- TBD
|
||||||
@@ -0,0 +1,24 @@
|
|||||||
|
---
|
||||||
|
name: "Fix npm deprecated module warnings"
|
||||||
|
---
|
||||||
|
|
||||||
|
# Refactor 395: Fix npm deprecated module warnings
|
||||||
|
|
||||||
|
## Current State
|
||||||
|
|
||||||
|
- TBD
|
||||||
|
|
||||||
|
## Desired State
|
||||||
|
|
||||||
|
Address npm warnings about deprecated modules in the frontend dependencies. Update or replace deprecated packages to eliminate warnings during npm install.
|
||||||
|
|
||||||
|
## Acceptance Criteria
|
||||||
|
|
||||||
|
- [ ] npm install runs with zero deprecation warnings
|
||||||
|
- [ ] All existing frontend tests (npm test) still pass
|
||||||
|
- [ ] npm run build succeeds without errors
|
||||||
|
- [ ] No functional regressions in the frontend
|
||||||
|
|
||||||
|
## Out of Scope
|
||||||
|
|
||||||
|
- TBD
|
||||||
@@ -0,0 +1,21 @@
|
|||||||
|
---
|
||||||
|
name: "WhatsApp bot startup announcement after restart"
|
||||||
|
---
|
||||||
|
|
||||||
|
# Story 396: WhatsApp bot startup announcement after restart
|
||||||
|
|
||||||
|
## User Story
|
||||||
|
|
||||||
|
As a WhatsApp user, I want the bot to announce its presence when it starts up or restarts, like it does in Matrix, so I know it's back online and ready.
|
||||||
|
|
||||||
|
## Acceptance Criteria
|
||||||
|
|
||||||
|
- [ ] Bot sends a startup message to all known WhatsApp senders (from conversation history or ambient rooms) when the server starts
|
||||||
|
- [ ] Startup message includes the bot name and indicates it is online/ready
|
||||||
|
- [ ] Slack transport gets the same startup announcement treatment
|
||||||
|
- [ ] Matrix startup announcement behavior is unaffected
|
||||||
|
- [ ] After a rebuild command, the new process sends the announcement on startup
|
||||||
|
|
||||||
|
## Out of Scope
|
||||||
|
|
||||||
|
- TBD
|
||||||
+30
@@ -0,0 +1,30 @@
|
|||||||
|
---
|
||||||
|
name: "Selection screen directory picker unreadable in dark mode"
|
||||||
|
---
|
||||||
|
|
||||||
|
# Bug 397: Selection screen directory picker unreadable in dark mode
|
||||||
|
|
||||||
|
## Description
|
||||||
|
|
||||||
|
The ProjectPathInput component in the selection screen uses hardcoded light-theme inline styles (white backgrounds, dark borders, dark text highlights) that don't adapt to dark mode. When the browser/OS uses dark mode, the global CSS sets text color to #f6f6f6 (white) but the dropdown keeps background: #fff — resulting in white text on a white background, making the directory picker completely unreadable.
|
||||||
|
|
||||||
|
## How to Reproduce
|
||||||
|
|
||||||
|
1. Run storkit under Docker (or locally) with a browser set to dark mode (prefers-color-scheme: dark).
|
||||||
|
2. Open http://localhost:3001 in the browser.
|
||||||
|
3. Click into the project path input and start typing a path to trigger the autocomplete dropdown.
|
||||||
|
|
||||||
|
## Actual Result
|
||||||
|
|
||||||
|
The suggestion dropdown has white background with white/light text inherited from the dark-mode global styles. Match highlights use color: #222 which is barely visible. The close button and header bar also use light-only colors. The entire directory picker is effectively unreadable.
|
||||||
|
|
||||||
|
## Expected Result
|
||||||
|
|
||||||
|
The directory picker dropdown should be readable in both light and dark mode. Colors for background, text, borders, and highlights should adapt to the active color scheme.
|
||||||
|
|
||||||
|
## Acceptance Criteria
|
||||||
|
|
||||||
|
- [ ] ProjectPathInput dropdown is readable in dark mode (prefers-color-scheme: dark)
|
||||||
|
- [ ] ProjectPathInput dropdown remains readable in light mode
|
||||||
|
- [ ] Suggestion highlight text is visible against the dropdown background in both themes
|
||||||
|
- [ ] No hardcoded light-only colors remain in ProjectPathInput inline styles
|
||||||
@@ -0,0 +1,31 @@
|
|||||||
|
---
|
||||||
|
name: "CLI --port flag with project.toml persistence"
|
||||||
|
---
|
||||||
|
|
||||||
|
# Story 399: CLI --port flag with project.toml persistence
|
||||||
|
|
||||||
|
## User Story
|
||||||
|
|
||||||
|
As a developer, I want to set the server port via a --port CLI flag that persists to project.toml, so that I don't have to remember an environment variable on every run.
|
||||||
|
|
||||||
|
## Acceptance Criteria
|
||||||
|
|
||||||
|
- [ ] `storkit --help` shows a `--port` option
|
||||||
|
- [ ] `storkit --port 4000` starts the server on port 4000
|
||||||
|
- [ ] After first run with `--port`, the port is saved to `project.toml`
|
||||||
|
- [ ] On subsequent runs without `--port`, the port from `project.toml` is used
|
||||||
|
- [ ] CLI `--port` overrides the value in `project.toml`
|
||||||
|
- [ ] Default port is 3001 when neither `--port` nor `project.toml` port is set
|
||||||
|
- [ ] `STORKIT_PORT` env var is removed — no longer read or respected
|
||||||
|
- [ ] `.storkit_port` lock file mechanism is removed (`write_port_file` / `remove_port_file`)
|
||||||
|
|
||||||
|
## Out of Scope
|
||||||
|
|
||||||
|
- Docker compose changes (can update `STORKIT_PORT` references separately)
|
||||||
|
- Adding other CLI flags beyond `--port`
|
||||||
|
|
||||||
|
## Technical Notes
|
||||||
|
|
||||||
|
Port resolution priority: `--port` flag > `project.toml` `port` field > default 3001
|
||||||
|
|
||||||
|
The port should be written to `project.toml` on startup so subsequent runs remember it. Use the existing `config.rs` / `ProjectConfig` struct — add a `port` field.
|
||||||
@@ -0,0 +1,45 @@
|
|||||||
|
---
|
||||||
|
name: "WhatsApp and Slack missing reset command handler"
|
||||||
|
---
|
||||||
|
|
||||||
|
# Bug 400: WhatsApp and Slack missing reset command handler
|
||||||
|
|
||||||
|
## Description
|
||||||
|
|
||||||
|
The reset command has a fallback handler in chat/commands/mod.rs that returns None with a comment saying it's handled before try_handle_command. This is only true for Matrix. WhatsApp and Slack don't have pre-dispatch handling, so None causes fallthrough to LLM. This caused a real outage when stale session IDs couldn't be cleared via the bot after switching from Docker to bare-metal.
|
||||||
|
|
||||||
|
## Implementation Note
|
||||||
|
|
||||||
|
Follow the **rebuild pattern** established in story 402, with one complication: `handle_reset` in `server/src/chat/transport/matrix/reset.rs` takes a Matrix-specific `ConversationHistory` (`Arc<TokioMutex<HashMap<OwnedRoomId, RoomConversation>>>`), so it cannot be called directly from WhatsApp or Slack.
|
||||||
|
|
||||||
|
**WhatsApp session storage** (`server/src/chat/transport/whatsapp.rs`):
|
||||||
|
- Type: `WhatsAppConversationHistory = Arc<TokioMutex<HashMap<String, RoomConversation>>>` (key = sender phone number)
|
||||||
|
- Persisted to `.storkit/whatsapp_history.json` via `save_whatsapp_history`
|
||||||
|
|
||||||
|
**Slack session storage** (`server/src/chat/transport/slack.rs`):
|
||||||
|
- Type: `SlackConversationHistory = Arc<TokioMutex<HashMap<String, RoomConversation>>>` (key = channel ID)
|
||||||
|
- Persisted to `.storkit/slack_history.json` via `save_slack_history`
|
||||||
|
|
||||||
|
**Approach:**
|
||||||
|
- Use `extract_reset_command` from `server/src/chat/transport/matrix/reset.rs` to detect the command (it works transport-agnostically)
|
||||||
|
- Implement the reset inline in each transport's async message handler: clear `session_id` and `entries` for the sender/channel key, call the transport's own `save_*_history`, reply with confirmation
|
||||||
|
- Add async intercepts in `whatsapp.rs` (~line 1107, after the rebuild intercept) and `slack.rs` (~line 845, after the rebuild intercept)
|
||||||
|
- The fallback handler in `chat/commands/mod.rs` (`handle_reset_fallback`) stays as-is
|
||||||
|
|
||||||
|
## How to Reproduce
|
||||||
|
|
||||||
|
1. Configure bot with transport = "whatsapp" or "slack"\n2. Send "reset" to the bot\n3. Check server logs
|
||||||
|
|
||||||
|
## Actual Result
|
||||||
|
|
||||||
|
Log shows "No command matched, forwarding to LLM" — reset is sent to the LLM as a conversational message instead of clearing the session.
|
||||||
|
|
||||||
|
## Expected Result
|
||||||
|
|
||||||
|
The bot clears the sender's session_id from conversation history and replies with confirmation like "Session cleared."
|
||||||
|
|
||||||
|
## Acceptance Criteria
|
||||||
|
|
||||||
|
- [ ] WhatsApp transport handles reset command: clears sender session_id and replies with confirmation
|
||||||
|
- [ ] Slack transport handles reset command: clears channel session_id and replies with confirmation
|
||||||
|
- [ ] Fallback handler in chat/commands/mod.rs no longer silently swallows the reset command
|
||||||
@@ -0,0 +1,35 @@
|
|||||||
|
---
|
||||||
|
name: "WhatsApp and Slack missing start command handler"
|
||||||
|
---
|
||||||
|
|
||||||
|
# Bug 401: WhatsApp and Slack missing start command handler
|
||||||
|
|
||||||
|
## Description
|
||||||
|
|
||||||
|
The start command has a fallback handler in chat/commands/mod.rs that returns None. Only Matrix has pre-dispatch handling for this command. On WhatsApp and Slack, the command falls through to the LLM path.
|
||||||
|
|
||||||
|
## Implementation Note
|
||||||
|
|
||||||
|
Follow the **rebuild pattern** established in story 402.
|
||||||
|
|
||||||
|
- `extract_start_command` and `handle_start` already exist in `server/src/chat/transport/matrix/start.rs`
|
||||||
|
- Add an async intercept in `server/src/chat/transport/whatsapp.rs` (see rebuild intercept ~line 1107) and `server/src/chat/transport/slack.rs` (see rebuild intercept ~line 845)
|
||||||
|
- Call `crate::chat::transport::matrix::start::extract_start_command` to detect the command, then `crate::chat::transport::matrix::start::handle_start` to execute it
|
||||||
|
- The fallback handler in `chat/commands/mod.rs` (`handle_start_fallback`) stays as-is — it exists only so `help` lists the command
|
||||||
|
|
||||||
|
## How to Reproduce
|
||||||
|
|
||||||
|
1. Configure bot with transport = "whatsapp" or "slack"\n2. Send "start <story_id>" to the bot\n3. Check server logs
|
||||||
|
|
||||||
|
## Actual Result
|
||||||
|
|
||||||
|
Command falls through to LLM instead of starting an agent.
|
||||||
|
|
||||||
|
## Expected Result
|
||||||
|
|
||||||
|
The bot starts an agent for the specified story and replies with confirmation.
|
||||||
|
|
||||||
|
## Acceptance Criteria
|
||||||
|
|
||||||
|
- [ ] WhatsApp transport handles start command: starts agent and replies with confirmation
|
||||||
|
- [ ] Slack transport handles start command: starts agent and replies with confirmation
|
||||||
+26
@@ -0,0 +1,26 @@
|
|||||||
|
---
|
||||||
|
name: "WhatsApp and Slack missing rebuild command handler"
|
||||||
|
---
|
||||||
|
|
||||||
|
# Bug 402: WhatsApp and Slack missing rebuild command handler
|
||||||
|
|
||||||
|
## Description
|
||||||
|
|
||||||
|
The rebuild command has a fallback handler in chat/commands/mod.rs that returns None. Only Matrix has pre-dispatch handling for this command. On WhatsApp and Slack, the command falls through to the LLM path.
|
||||||
|
|
||||||
|
## How to Reproduce
|
||||||
|
|
||||||
|
1. Configure bot with transport = "whatsapp" or "slack"\n2. Send "rebuild" to the bot\n3. Check server logs
|
||||||
|
|
||||||
|
## Actual Result
|
||||||
|
|
||||||
|
Command falls through to LLM instead of triggering a server rebuild.
|
||||||
|
|
||||||
|
## Expected Result
|
||||||
|
|
||||||
|
The bot triggers a server rebuild and replies with confirmation.
|
||||||
|
|
||||||
|
## Acceptance Criteria
|
||||||
|
|
||||||
|
- [ ] WhatsApp transport handles rebuild command: triggers rebuild and replies with confirmation
|
||||||
|
- [ ] Slack transport handles rebuild command: triggers rebuild and replies with confirmation
|
||||||
@@ -0,0 +1,37 @@
|
|||||||
|
---
|
||||||
|
name: "WhatsApp and Slack missing rmtree command handler"
|
||||||
|
retry_count: 2
|
||||||
|
blocked: true
|
||||||
|
---
|
||||||
|
|
||||||
|
# Bug 403: WhatsApp and Slack missing rmtree command handler
|
||||||
|
|
||||||
|
## Description
|
||||||
|
|
||||||
|
The rmtree command has a fallback handler in chat/commands/mod.rs that returns None. Only Matrix has pre-dispatch handling for this command. On WhatsApp and Slack, the command falls through to the LLM path.
|
||||||
|
|
||||||
|
## Implementation Note
|
||||||
|
|
||||||
|
Follow the **rebuild pattern** established in story 402.
|
||||||
|
|
||||||
|
- `extract_rmtree_command` and `handle_rmtree` already exist in `server/src/chat/transport/matrix/rmtree.rs`
|
||||||
|
- Add an async intercept in `server/src/chat/transport/whatsapp.rs` (see rebuild intercept ~line 1107) and `server/src/chat/transport/slack.rs` (see rebuild intercept ~line 845)
|
||||||
|
- Call `crate::chat::transport::matrix::rmtree::extract_rmtree_command` to detect the command, then `crate::chat::transport::matrix::rmtree::handle_rmtree` to execute it
|
||||||
|
- The fallback handler in `chat/commands/mod.rs` (`handle_rmtree_fallback`) stays as-is — it exists only so `help` lists the command
|
||||||
|
|
||||||
|
## How to Reproduce
|
||||||
|
|
||||||
|
1. Configure bot with transport = "whatsapp" or "slack"\n2. Send "rmtree <story_id>" to the bot\n3. Check server logs
|
||||||
|
|
||||||
|
## Actual Result
|
||||||
|
|
||||||
|
Command falls through to LLM instead of removing the worktree.
|
||||||
|
|
||||||
|
## Expected Result
|
||||||
|
|
||||||
|
The bot removes the worktree for the specified story and replies with confirmation.
|
||||||
|
|
||||||
|
## Acceptance Criteria
|
||||||
|
|
||||||
|
- [ ] WhatsApp transport handles rmtree command: removes worktree and replies with confirmation
|
||||||
|
- [ ] Slack transport handles rmtree command: removes worktree and replies with confirmation
|
||||||
@@ -0,0 +1,36 @@
|
|||||||
|
---
|
||||||
|
name: "WhatsApp and Slack missing assign command handler"
|
||||||
|
---
|
||||||
|
|
||||||
|
# Bug 404: WhatsApp and Slack missing assign command handler
|
||||||
|
|
||||||
|
## Description
|
||||||
|
|
||||||
|
The assign command has a fallback handler in chat/commands/mod.rs that returns None. Only Matrix has pre-dispatch handling for this command. On WhatsApp and Slack, the command falls through to the LLM path.
|
||||||
|
|
||||||
|
## Implementation Note
|
||||||
|
|
||||||
|
Follow the **rebuild pattern** established in story 402.
|
||||||
|
|
||||||
|
- `extract_assign_command` and `handle_assign` already exist in `server/src/chat/transport/matrix/assign.rs`
|
||||||
|
- Add an async intercept in `server/src/chat/transport/whatsapp.rs` (see rebuild intercept ~line 1107) and `server/src/chat/transport/slack.rs` (see rebuild intercept ~line 845)
|
||||||
|
- Call `crate::chat::transport::matrix::assign::extract_assign_command` to detect the command, then `crate::chat::transport::matrix::assign::handle_assign` to execute it
|
||||||
|
- The fallback handler in `chat/commands/mod.rs` (`handle_assign_fallback` — note: the registry entry for `assign` currently calls `assign::handle_assign` synchronously; verify this doesn't conflict) stays as-is for `help` listing
|
||||||
|
- The fallback in `chat/commands/assign.rs` may need to return `None` instead of a real response once the async path handles it
|
||||||
|
|
||||||
|
## How to Reproduce
|
||||||
|
|
||||||
|
1. Configure bot with transport = "whatsapp" or "slack"\n2. Send "assign <story_id> <agent>" to the bot\n3. Check server logs
|
||||||
|
|
||||||
|
## Actual Result
|
||||||
|
|
||||||
|
Command falls through to LLM instead of assigning the agent.
|
||||||
|
|
||||||
|
## Expected Result
|
||||||
|
|
||||||
|
The bot assigns the specified agent to the story and replies with confirmation.
|
||||||
|
|
||||||
|
## Acceptance Criteria
|
||||||
|
|
||||||
|
- [ ] WhatsApp transport handles assign command: assigns agent and replies with confirmation
|
||||||
|
- [ ] Slack transport handles assign command: assigns agent and replies with confirmation
|
||||||
+30
@@ -0,0 +1,30 @@
|
|||||||
|
---
|
||||||
|
name: "Auto-refresh expired OAuth token for Claude Code PTY"
|
||||||
|
---
|
||||||
|
|
||||||
|
# Story 405: Auto-refresh expired OAuth token for Claude Code PTY
|
||||||
|
|
||||||
|
## User Story
|
||||||
|
|
||||||
|
As a storkit user with a Claude Max subscription, I want the server to automatically refresh my expired OAuth token so that chat, Matrix, and WhatsApp integrations don't stop working when the token expires.
|
||||||
|
|
||||||
|
## Acceptance Criteria
|
||||||
|
|
||||||
|
### Detection
|
||||||
|
- [ ] When the Claude Code PTY returns an `authentication_failed` error, storkit detects it instead of passing the raw 401 JSON to the user
|
||||||
|
|
||||||
|
### Auto-refresh (credentials exist, refresh token valid)
|
||||||
|
- [ ] Storkit reads the OAuth refresh token from `~/.claude/.credentials.json`
|
||||||
|
- [ ] Storkit calls the Anthropic OAuth token refresh endpoint (`https://console.anthropic.com/v1/oauth/token` with `grant_type=refresh_token`) to obtain a new access token
|
||||||
|
- [ ] Storkit writes the refreshed access token (and new expiresAt) back to `~/.claude/.credentials.json`
|
||||||
|
- [ ] After a successful refresh, storkit automatically retries the original chat request
|
||||||
|
- [ ] The refresh+retry is transparent to the user — they see no error
|
||||||
|
|
||||||
|
### Full login required (no credentials, or refresh token also expired)
|
||||||
|
- [ ] If `.credentials.json` doesn't exist or the refresh call itself fails, storkit surfaces a clear error: "OAuth session expired. Please run `claude login` to re-authenticate."
|
||||||
|
- [ ] The error message is surfaced through the normal chat stream (not just server logs)
|
||||||
|
|
||||||
|
## Out of Scope
|
||||||
|
|
||||||
|
- Implementing the full interactive `claude login` browser OAuth flow inside storkit
|
||||||
|
- Proactive token refresh before expiry (refreshing on demand when the error occurs is sufficient)
|
||||||
+21
@@ -0,0 +1,21 @@
|
|||||||
|
---
|
||||||
|
name: "Browser-based OAuth login flow from web UI and chat integrations"
|
||||||
|
---
|
||||||
|
|
||||||
|
# Story 406: Browser-based OAuth login flow from web UI and chat integrations
|
||||||
|
|
||||||
|
## User Story
|
||||||
|
|
||||||
|
As a new storkit user (or one whose refresh token has expired), I want to complete the full Claude OAuth login flow from the web UI, Matrix, or WhatsApp so that I don't need terminal access to run `claude login`.
|
||||||
|
|
||||||
|
## Acceptance Criteria
|
||||||
|
|
||||||
|
- [ ] From the web UI, the user can initiate OAuth login — storkit generates the Anthropic authorize URL and opens it in a new tab
|
||||||
|
- [ ] After the user authenticates in the browser, the OAuth callback writes accessToken, refreshToken, and expiresAt to ~/.claude/.credentials.json
|
||||||
|
- [ ] From Matrix or WhatsApp, storkit sends the user a clickable OAuth authorize link when credentials are missing or fully expired
|
||||||
|
- [ ] After successful login, the user can immediately start chatting without restarting storkit
|
||||||
|
- [ ] If the OAuth callback fails or the user cancels, a clear error is shown
|
||||||
|
|
||||||
|
## Out of Scope
|
||||||
|
|
||||||
|
- TBD
|
||||||
@@ -0,0 +1,195 @@
|
|||||||
|
---
|
||||||
|
name: "Fly.io Machines for multi-tenant storkit SaaS — docs, security & pricing"
|
||||||
|
retry_count: 2
|
||||||
|
blocked: true
|
||||||
|
---
|
||||||
|
|
||||||
|
# Spike 407: Fly.io Machines for multi-tenant storkit SaaS — docs, security & pricing
|
||||||
|
|
||||||
|
## Question
|
||||||
|
|
||||||
|
What do Fly.io's published docs, security claims, and pricing say about using Machines as the isolation layer for a multi-tenant storkit SaaS? Is there anything that rules it out before we write code?
|
||||||
|
|
||||||
|
## Hypothesis
|
||||||
|
|
||||||
|
Fly.io Machines (Firecracker-based microVMs) are a viable isolation primitive for tenants running arbitrary shell commands, and the pricing model is workable at early SaaS scale.
|
||||||
|
|
||||||
|
## Timebox
|
||||||
|
|
||||||
|
2 hours
|
||||||
|
|
||||||
|
## Investigation Plan
|
||||||
|
|
||||||
|
- [x] Read Fly.io Machines API docs — what are the core primitives (machine lifecycle, networking, volumes, secrets)?
|
||||||
|
- [x] Research Fly.io's published isolation model — what security guarantees do they document for Firecracker microVMs? Summarise claims and explicitly flag what would require independent security review before production use.
|
||||||
|
- [x] Research cold start time — what do Fly.io docs and community benchmarks claim? Note that real numbers require a test account (covered in spike 408).
|
||||||
|
- [x] Research persistent volume support — can a volume be attached per-tenant? What are the size/count limits?
|
||||||
|
- [x] Research secret injection options — env vars, Fly Secrets API, volume mounts. What's the right approach for per-tenant `~/.claude/.credentials.json`?
|
||||||
|
- [x] Research machine count and org limits — any hard caps that would block SaaS growth?
|
||||||
|
- [x] Research pricing — always-on vs stop-on-idle machine costs at 10, 100, 1000 tenants. Include volume and egress costs.
|
||||||
|
- [x] Identify any documented showstoppers.
|
||||||
|
|
||||||
|
## Findings
|
||||||
|
|
||||||
|
### 1. Core API Primitives
|
||||||
|
|
||||||
|
Base URL: `https://api.machines.dev` (or `http://_api.internal:4280` from within 6PN).
|
||||||
|
Auth: `Authorization: Bearer <fly_api_token>`.
|
||||||
|
|
||||||
|
**Machine lifecycle** — full REST API:
|
||||||
|
- `POST /v1/apps/{app}/machines` — create (+ optionally start via `skip_launch: false`)
|
||||||
|
- `POST /v1/apps/{app}/machines/{id}/start` — start stopped machine (~10ms same-region)
|
||||||
|
- `POST /v1/apps/{app}/machines/{id}/stop` — stop (SIGINT/SIGKILL, retains disk)
|
||||||
|
- `POST /v1/apps/{app}/machines/{id}/suspend` — snapshot RAM to disk for fast resume
|
||||||
|
- `DELETE /v1/apps/{app}/machines/{id}` — destroy (irreversible)
|
||||||
|
- `GET /v1/apps/{app}/machines/{id}/wait?state=started` — synchronize on state transitions
|
||||||
|
|
||||||
|
Machine states: `created → started → stopped/suspended → destroyed`.
|
||||||
|
Leases (`POST .../lease`) provide exclusive mutation locks — useful for orchestration.
|
||||||
|
|
||||||
|
**Rate limits**: 1 req/s per action per machine/app ID (burst to 3). Matters for rapid tenant provisioning.
|
||||||
|
|
||||||
|
### 2. Isolation Model
|
||||||
|
|
||||||
|
Each Fly Machine is a **Firecracker microVM** — a separate Linux kernel, not a container. Defense in depth:
|
||||||
|
1. KVM hardware-enforced memory and CPU isolation
|
||||||
|
2. Minimal device model (5 virtual devices vs QEMU's hundreds)
|
||||||
|
3. Rust VMM implementation (no C memory-safety bugs in VMM)
|
||||||
|
4. `seccomp-bpf` limits Firecracker process to ~40 syscalls with argument filters
|
||||||
|
5. Jailer chroots + namespaces + drops privileges around the Firecracker process
|
||||||
|
|
||||||
|
From official docs: *"MicroVMs provide strong hardware-virtualization-based security and workload isolation, which allows us to safely run applications from different customers on shared hardware."* Full VM isolation prevents kernel sharing between apps.
|
||||||
|
|
||||||
|
Tenants have full root inside their VM by design — the kernel boundary contains blast radius.
|
||||||
|
|
||||||
|
**Claims requiring independent verification before production use:**
|
||||||
|
- Whether SMT/hyperthreading is disabled on hosts (directly relevant to Spectre/MDS side-channel attacks — Firecracker's own docs recommend disabling SMT for strict multi-tenancy, but Fly.io does not publicly document this)
|
||||||
|
- CPU dedication is explicitly described as "best-effort", not a hard guarantee
|
||||||
|
- Pentest scope/dates/findings for three named firms (Atredis Partners, Doyensec, Tetrel) are not published
|
||||||
|
- Whether the SOC 2 Type II report scope covers the Firecracker isolation layer specifically
|
||||||
|
|
||||||
|
**Compliance**: SOC 2 Type II certified (report available on request), ISO 27001 datacenters (Equinix), HIPAA BAA available, GDPR DPA available.
|
||||||
|
|
||||||
|
### 3. Network Isolation
|
||||||
|
|
||||||
|
Each machine gets a private IPv6 (6PN) address. Key isolation controls:
|
||||||
|
- Cross-organization: Fly.io platform blocks all cross-org traffic at the platform level — strong boundary
|
||||||
|
- Intra-organization: **open by default** — any machine in the same org can reach any other
|
||||||
|
|
||||||
|
For multi-tenant SaaS, this means tenant machines in the same Fly.io org are NOT network-isolated from each other unless you use **Custom Private Networks (6PNs)**:
|
||||||
|
- `POST /v1/apps` with a `network` field assigns that app to an isolated 6PN
|
||||||
|
- Apps on different 6PNs cannot reach each other via private networking (only via public IPs)
|
||||||
|
- **Assignment is permanent** — cannot be changed after app creation; plan upfront
|
||||||
|
|
||||||
|
Stable machine addressing: `<machine_id>.vm.<appname>.internal` (6PN addresses change on migration).
|
||||||
|
|
||||||
|
### 4. Cold Start Times
|
||||||
|
|
||||||
|
| Scenario | Documented Latency |
|
||||||
|
|---|---|
|
||||||
|
| Cold boot (create + start, same region) | ~300 ms |
|
||||||
|
| Start existing stopped machine (same region) | ~10 ms |
|
||||||
|
| Start stopped machine (cross-region) | ~150 ms |
|
||||||
|
| Resume from suspend (same region) | Sub-100ms (implied) |
|
||||||
|
|
||||||
|
Community-observed: 400–600ms end-to-end (including app init) for stopped machine cold starts.
|
||||||
|
FLAME workloads report 3–8s in some restart-race conditions.
|
||||||
|
|
||||||
|
Real latency numbers with our actual image size require a test account — covered by spike 408.
|
||||||
|
|
||||||
|
### 5. Persistent Volume Support
|
||||||
|
|
||||||
|
- Volumes are created via `POST /v1/apps/{app}/volumes` with `size_gb` (default 3 GB), region, encryption flag
|
||||||
|
- Attached to machine via `config.mounts[].volume` at create/update time
|
||||||
|
- **1:1 constraint**: one volume per machine, one machine per volume, same region required
|
||||||
|
- Volumes persist across machine stop/start/suspend/destroy — they are a separate resource
|
||||||
|
- Can extend volume online (`PUT .../volumes/{id}/extend`)
|
||||||
|
- Volume snapshots available (billed at $0.08/GB/month as of Jan 2026)
|
||||||
|
- No documented per-org volume count cap (separate from machine cap)
|
||||||
|
|
||||||
|
For per-tenant `~/.claude/` home directories, attach one volume per tenant machine — straightforward.
|
||||||
|
|
||||||
|
### 6. Secret Injection
|
||||||
|
|
||||||
|
Four methods, in order of recommendation for sensitive credentials:
|
||||||
|
|
||||||
|
1. **Fly Secrets** (`fly secrets set KEY=value`) — encrypted at rest, injected as env vars at boot to all machines in the app. **Secrets are per-app, not per-machine** — all machines in an app share the same secret set. For per-tenant isolated secrets, each tenant needs their own app (or use method 3).
|
||||||
|
|
||||||
|
2. **`config.files` with `secret_name`** — writes a named secret to a file path inside the machine at start time:
|
||||||
|
```json
|
||||||
|
{"guest_path": "/root/.claude/.credentials.json", "secret_name": "TENANT_CREDENTIALS"}
|
||||||
|
```
|
||||||
|
This is the right approach for per-tenant `~/.claude/.credentials.json` if tenants share an app — pair with `ignore_app_secrets: true` and per-process secret scoping.
|
||||||
|
|
||||||
|
3. **`config.env`** — plain env vars in machine config, not encrypted at rest. Non-sensitive config only.
|
||||||
|
|
||||||
|
4. **`config.processes[].secrets`** — inject named secrets only to specific process groups; `ignore_app_secrets: true` prevents inheritance of app-level secrets.
|
||||||
|
|
||||||
|
**Recommended architecture**: One app per tenant (isolated 6PN + isolated secrets) is the cleanest security model. Secrets stored per app via Fly Secrets, credentials file written via `config.files` at boot.
|
||||||
|
|
||||||
|
### 7. Machine Count and Org Limits
|
||||||
|
|
||||||
|
| Limit | Default | Hard Cap |
|
||||||
|
|---|---|---|
|
||||||
|
| Machines per org (all states) | 50 | None architectural |
|
||||||
|
|
||||||
|
- The 50-machine default is a **fail-safe**, not an architectural limit. Fly.io runs customers with 100,000+ machines.
|
||||||
|
- To raise: email `billing@fly.io` with requirements.
|
||||||
|
- **This limit will be hit immediately in any real multi-tenant deployment** — must budget for an early limit-raise request before launching.
|
||||||
|
- API rate limit of 1 req/s per action also needs consideration for bulk tenant provisioning scripts.
|
||||||
|
|
||||||
|
### 8. Pricing (as of March 2026)
|
||||||
|
|
||||||
|
**Compute (per second, billed only while running):**
|
||||||
|
|
||||||
|
| Preset | Per Month always-on |
|
||||||
|
|---|---|
|
||||||
|
| shared-cpu-1x (256 MB) | $2.05 |
|
||||||
|
| shared-cpu-2x (512 MB) | $4.10 |
|
||||||
|
| performance-1x (2 GB) | $32.64 |
|
||||||
|
|
||||||
|
**Storage**: $0.15/GB/month (provisioned, regardless of machine state)
|
||||||
|
**Egress**: $0.02/GB (North America/Europe), $0.04/GB (APAC/SA), $0.12/GB (Africa/India)
|
||||||
|
**Dedicated IPv4**: $2.00/month per app (shared IPv6 is free)
|
||||||
|
|
||||||
|
**No free tier** for new orgs (eliminated 2024). No minimum spend, no base fee.
|
||||||
|
|
||||||
|
**Monthly cost estimates** (1x shared-cpu-1x, 1 GB volume, 1 GB egress/tenant, US East):
|
||||||
|
|
||||||
|
| Scenario | Per Tenant | 10 Tenants | 100 Tenants | 1,000 Tenants |
|
||||||
|
|---|---|---|---|---|
|
||||||
|
| Always-on (730h/month) | $2.22 | $22 | $222 | $2,220 |
|
||||||
|
| Autostop, 8h/day active | $0.92 | $9 | $92 | $920 |
|
||||||
|
| Autostop, 2h/day active | $0.53 | $5 | $53 | $530 |
|
||||||
|
|
||||||
|
At scale, volume storage becomes the dominant cost when machines are idle. At 1,000 tenants autostopped, storage is ~$150/month vs compute of $170–$370/month.
|
||||||
|
|
||||||
|
### 9. Showstoppers
|
||||||
|
|
||||||
|
**None identified** that rule it out. The following require action before launch:
|
||||||
|
|
||||||
|
| Risk | Severity | Mitigation |
|
||||||
|
|---|---|---|
|
||||||
|
| Default 50-machine org cap | High (blocks launch) | Email billing@fly.io early; no architectural cap |
|
||||||
|
| SMT/hyperthreading not documented | Medium (security) | Request confirmation from Fly.io support before production; mitigated by VM-level isolation |
|
||||||
|
| Intra-org network open by default | Medium (security) | Use one app per tenant with custom 6PNs |
|
||||||
|
| Secrets are per-app not per-machine | Low | Use one app per tenant or `config.files` with `secret_name` |
|
||||||
|
| Volume and machine must be same region | Low (ops) | Enforce region consistency in provisioning code |
|
||||||
|
| API rate limit 1 req/s per machine | Low | Throttle bulk provisioning loops |
|
||||||
|
|
||||||
|
## Recommendation
|
||||||
|
|
||||||
|
**Proceed.** Fly.io Machines are a viable isolation layer for multi-tenant storkit SaaS.
|
||||||
|
|
||||||
|
**Architecture to validate in spike 408:**
|
||||||
|
- One Fly.io app per tenant (provides 6PN network isolation + isolated secrets)
|
||||||
|
- One Firecracker microVM per tenant app (shared-cpu-1x 256 MB baseline; adjust per observed usage)
|
||||||
|
- One persistent volume per tenant (1 GB baseline for `~/.claude/`, repos, storkit state)
|
||||||
|
- Autostop/autoresume enabled — 70–92% compute cost reduction vs always-on for typical dev tool usage
|
||||||
|
- Tenant credentials injected via `config.files` + Fly Secrets at machine start
|
||||||
|
|
||||||
|
**Pricing verdict**: Workable at early SaaS scale. At 100 tenants with autostop (8h/day), costs ~$92/month; at 1,000 tenants ~$920/month. Margins are viable if per-tenant pricing is $5–$20/month.
|
||||||
|
|
||||||
|
**Before production**: Confirm with Fly.io support whether SMT is disabled on worker hosts. Request org machine limit raised to 200–500 during private beta.
|
||||||
|
|
||||||
|
**Spike 408 scope**: Validate cold start latency, autostop resume behavior, and volume persistence with a real test machine running the storkit container image.
|
||||||
@@ -0,0 +1,69 @@
|
|||||||
|
---
|
||||||
|
name: "Split whatsapp.rs into focused modules"
|
||||||
|
retry_count: 2
|
||||||
|
blocked: true
|
||||||
|
---
|
||||||
|
|
||||||
|
# Refactor 409: Split whatsapp.rs into focused modules
|
||||||
|
|
||||||
|
## Current State
|
||||||
|
|
||||||
|
- TBD
|
||||||
|
|
||||||
|
## Desired State
|
||||||
|
|
||||||
|
whatsapp.rs is 2000+ lines making it expensive for agents to navigate and edit. Split into focused modules under chat/transport/whatsapp/.
|
||||||
|
|
||||||
|
## Acceptance Criteria
|
||||||
|
|
||||||
|
- [x] mod.rs contains webhook handlers, WebhookContext, and re-exports
|
||||||
|
- [x] meta.rs contains WhatsAppTransport, ChatTransport impl, and Graph API structs/calls
|
||||||
|
- [x] twilio.rs contains TwilioWhatsAppTransport, ChatTransport impl, and Twilio structs/calls
|
||||||
|
- [x] history.rs contains WhatsAppConversationHistory, load/save_whatsapp_history, and MessagingWindowTracker
|
||||||
|
- [x] commands.rs contains handle_incoming_message, handle_llm_message, and all async command dispatch
|
||||||
|
- [x] format.rs contains markdown_to_whatsapp and chunk_for_whatsapp
|
||||||
|
- [x] All existing tests pass
|
||||||
|
- [x] No behaviour changes — pure structural refactor
|
||||||
|
|
||||||
|
## Out of Scope
|
||||||
|
|
||||||
|
- TBD
|
||||||
|
|
||||||
|
## Test Results
|
||||||
|
|
||||||
|
<!-- storkit-test-results: {"unit":[{"name":"whatsapp::format::tests::chunk_short_message_returns_single_chunk","status":"pass","details":null},{"name":"whatsapp::format::tests::chunk_exactly_at_limit_returns_single_chunk","status":"pass","details":null},{"name":"whatsapp::format::tests::chunk_splits_on_paragraph_boundary","status":"pass","details":null},{"name":"whatsapp::format::tests::chunk_splits_on_line_boundary_when_no_paragraph_break","status":"pass","details":null},{"name":"whatsapp::format::tests::chunk_hard_splits_continuous_text","status":"pass","details":null},{"name":"whatsapp::format::tests::chunk_empty_string_returns_single_empty","status":"pass","details":null},{"name":"whatsapp::format::tests::md_to_wa_converts_headers_to_bold","status":"pass","details":null},{"name":"whatsapp::format::tests::md_to_wa_converts_bold","status":"pass","details":null},{"name":"whatsapp::format::tests::md_to_wa_converts_bold_italic","status":"pass","details":null},{"name":"whatsapp::format::tests::md_to_wa_converts_strikethrough","status":"pass","details":null},{"name":"whatsapp::format::tests::md_to_wa_converts_links","status":"pass","details":null},{"name":"whatsapp::format::tests::md_to_wa_removes_horizontal_rules","status":"pass","details":null},{"name":"whatsapp::format::tests::md_to_wa_preserves_inline_code","status":"pass","details":null},{"name":"whatsapp::format::tests::md_to_wa_preserves_code_blocks","status":"pass","details":null},{"name":"whatsapp::format::tests::md_to_wa_mixed_message","status":"pass","details":null},{"name":"whatsapp::format::tests::md_to_wa_passthrough_plain_text","status":"pass","details":null},{"name":"whatsapp::history::tests::messaging_window_tracker_basics","status":"pass","details":null},{"name":"whatsapp::history::tests::messaging_window_tracker_expiry","status":"pass","details":null},{"name":"whatsapp::history::tests::messaging_window_tracker_reset","status":"pass","details":null},{"name":"whatsapp::history::tests::load_empty_history","status":"pass","details":null},{"name":"whatsapp::history::tests::save_and_load_history","status":"pass","details":null},{"name":"whatsapp::twilio::tests::parse_twilio_form_valid","status":"pass","details":null},{"name":"whatsapp::twilio::tests::parse_twilio_form_missing_body","status":"pass","details":null},{"name":"whatsapp::twilio::tests::parse_twilio_form_missing_from","status":"pass","details":null},{"name":"whatsapp::commands::tests::parse_command_help","status":"pass","details":null},{"name":"whatsapp::commands::tests::parse_command_status","status":"pass","details":null},{"name":"whatsapp::commands::tests::parse_command_unknown","status":"pass","details":null},{"name":"whatsapp::mod::tests::webhook_context_basics","status":"pass","details":null}],"integration":[]} -->
|
||||||
|
|
||||||
|
### Unit Tests (28 passed, 0 failed)
|
||||||
|
|
||||||
|
- ✅ whatsapp::format::tests::chunk_short_message_returns_single_chunk
|
||||||
|
- ✅ whatsapp::format::tests::chunk_exactly_at_limit_returns_single_chunk
|
||||||
|
- ✅ whatsapp::format::tests::chunk_splits_on_paragraph_boundary
|
||||||
|
- ✅ whatsapp::format::tests::chunk_splits_on_line_boundary_when_no_paragraph_break
|
||||||
|
- ✅ whatsapp::format::tests::chunk_hard_splits_continuous_text
|
||||||
|
- ✅ whatsapp::format::tests::chunk_empty_string_returns_single_empty
|
||||||
|
- ✅ whatsapp::format::tests::md_to_wa_converts_headers_to_bold
|
||||||
|
- ✅ whatsapp::format::tests::md_to_wa_converts_bold
|
||||||
|
- ✅ whatsapp::format::tests::md_to_wa_converts_bold_italic
|
||||||
|
- ✅ whatsapp::format::tests::md_to_wa_converts_strikethrough
|
||||||
|
- ✅ whatsapp::format::tests::md_to_wa_converts_links
|
||||||
|
- ✅ whatsapp::format::tests::md_to_wa_removes_horizontal_rules
|
||||||
|
- ✅ whatsapp::format::tests::md_to_wa_preserves_inline_code
|
||||||
|
- ✅ whatsapp::format::tests::md_to_wa_preserves_code_blocks
|
||||||
|
- ✅ whatsapp::format::tests::md_to_wa_mixed_message
|
||||||
|
- ✅ whatsapp::format::tests::md_to_wa_passthrough_plain_text
|
||||||
|
- ✅ whatsapp::history::tests::messaging_window_tracker_basics
|
||||||
|
- ✅ whatsapp::history::tests::messaging_window_tracker_expiry
|
||||||
|
- ✅ whatsapp::history::tests::messaging_window_tracker_reset
|
||||||
|
- ✅ whatsapp::history::tests::load_empty_history
|
||||||
|
- ✅ whatsapp::history::tests::save_and_load_history
|
||||||
|
- ✅ whatsapp::twilio::tests::parse_twilio_form_valid
|
||||||
|
- ✅ whatsapp::twilio::tests::parse_twilio_form_missing_body
|
||||||
|
- ✅ whatsapp::twilio::tests::parse_twilio_form_missing_from
|
||||||
|
- ✅ whatsapp::commands::tests::parse_command_help
|
||||||
|
- ✅ whatsapp::commands::tests::parse_command_status
|
||||||
|
- ✅ whatsapp::commands::tests::parse_command_unknown
|
||||||
|
- ✅ whatsapp::mod::tests::webhook_context_basics
|
||||||
|
|
||||||
|
### Integration Tests (0 passed, 0 failed)
|
||||||
|
|
||||||
|
*No integration tests recorded.*
|
||||||
@@ -0,0 +1,22 @@
|
|||||||
|
---
|
||||||
|
name: "loc bot command — top files by line count"
|
||||||
|
---
|
||||||
|
|
||||||
|
# Story 410: loc bot command — top files by line count
|
||||||
|
|
||||||
|
## User Story
|
||||||
|
|
||||||
|
As a developer, I want to send `loc` to the bot and see the top files by line count, so I can spot files that are getting too large before they become a problem for agents.
|
||||||
|
|
||||||
|
## Acceptance Criteria
|
||||||
|
|
||||||
|
- [ ] loc command is registered in chat/commands/mod.rs and appears in help output
|
||||||
|
- [ ] `loc` returns the top 10 source files by line count (excluding generated files, node_modules, target/, .storkit/worktrees/)
|
||||||
|
- [ ] `loc 5` returns the top 5 files
|
||||||
|
- [ ] `loc 20` returns the top 20 files
|
||||||
|
- [ ] Output includes file path, line count, and rank
|
||||||
|
- [ ] Command works from all transports (Matrix, WhatsApp, Slack)
|
||||||
|
|
||||||
|
## Out of Scope
|
||||||
|
|
||||||
|
- TBD
|
||||||
@@ -0,0 +1,29 @@
|
|||||||
|
---
|
||||||
|
name: "Split slack.rs into focused modules"
|
||||||
|
---
|
||||||
|
|
||||||
|
# Refactor 413: Split slack.rs into focused modules
|
||||||
|
|
||||||
|
## Current State
|
||||||
|
|
||||||
|
- TBD
|
||||||
|
|
||||||
|
## Desired State
|
||||||
|
|
||||||
|
Refactor the monolithic server/src/chat/transport/slack.rs (1902 lines) into a slack/ directory with focused modules, mirroring the whatsapp/ module structure from story 409.
|
||||||
|
|
||||||
|
## Acceptance Criteria
|
||||||
|
|
||||||
|
- [ ] slack.rs is replaced by a slack/ directory with mod.rs re-exporting all public types
|
||||||
|
- [ ] meta.rs contains SlackTransport struct, ChatTransport trait impl, and Slack API request/response types
|
||||||
|
- [ ] commands.rs contains incoming message dispatch, permission logic, and slash command handling
|
||||||
|
- [ ] format.rs contains markdown_to_slack() conversion
|
||||||
|
- [ ] history.rs contains load_slack_history(), save_slack_history(), and SlackHistoryDump
|
||||||
|
- [ ] verify.rs contains verify_slack_signature(), sha256(), and constant_time_eq()
|
||||||
|
- [ ] mod.rs contains Slack event types, webhook handlers, and SlackWebhookContext
|
||||||
|
- [ ] All existing tests are preserved and pass in their respective modules
|
||||||
|
- [ ] No public API changes — all existing imports from other crates continue to work
|
||||||
|
|
||||||
|
## Out of Scope
|
||||||
|
|
||||||
|
- TBD
|
||||||
@@ -0,0 +1,19 @@
|
|||||||
|
---
|
||||||
|
name: "loc command filters out known-huge files"
|
||||||
|
---
|
||||||
|
|
||||||
|
# Story 414: loc command filters out known-huge files
|
||||||
|
|
||||||
|
## User Story
|
||||||
|
|
||||||
|
As a ..., I want ..., so that ...
|
||||||
|
|
||||||
|
## Acceptance Criteria
|
||||||
|
|
||||||
|
- [ ] loc command excludes lockfiles and generated files (e.g. package-lock.json, Cargo.lock, frontend/package-lock.json) from results
|
||||||
|
- [ ] Exclusion list is defined as a constant, easy to extend
|
||||||
|
- [ ] Excluded files do not count toward line totals
|
||||||
|
|
||||||
|
## Out of Scope
|
||||||
|
|
||||||
|
- TBD
|
||||||
@@ -0,0 +1,29 @@
|
|||||||
|
---
|
||||||
|
name: "Split agents/pool/mod.rs into submodules"
|
||||||
|
---
|
||||||
|
|
||||||
|
# Refactor 415: Split agents/pool/mod.rs into submodules
|
||||||
|
|
||||||
|
## Current State
|
||||||
|
|
||||||
|
- TBD
|
||||||
|
|
||||||
|
## Desired State
|
||||||
|
|
||||||
|
Refactor the monolithic server/src/agents/pool/mod.rs (2407 lines) into focused submodules within the pool/ directory.
|
||||||
|
|
||||||
|
## Acceptance Criteria
|
||||||
|
|
||||||
|
- [ ] types.rs contains StoryAgent, PendingGuard, AgentInfo, composite_key, and related helper structs
|
||||||
|
- [ ] lifecycle.rs contains start_agent, stop_agent, wait_for_agent and their unit tests
|
||||||
|
- [ ] worktree.rs contains create_worktree, get_project_root, find_active_story_stage and their unit tests
|
||||||
|
- [ ] query.rs contains list_agents, available_agents_for_stage, get_log_info, subscribe, drain_events and their unit tests
|
||||||
|
- [ ] process.rs contains kill_all_children, kill_child_for_key, ChildKiller registry methods and their unit tests
|
||||||
|
- [ ] test_helpers.rs contains inject_test_agent and its variants (4 methods)
|
||||||
|
- [ ] mod.rs contains AgentPool struct, new(), and re-exports all public types
|
||||||
|
- [ ] Unit tests live in their respective module files, not in a separate tests module
|
||||||
|
- [ ] No public API changes — all existing imports continue to work
|
||||||
|
|
||||||
|
## Out of Scope
|
||||||
|
|
||||||
|
- TBD
|
||||||
@@ -0,0 +1,28 @@
|
|||||||
|
---
|
||||||
|
name: "Split io/fs.rs into submodules"
|
||||||
|
---
|
||||||
|
|
||||||
|
# Refactor 416: Split io/fs.rs into submodules
|
||||||
|
|
||||||
|
## Current State
|
||||||
|
|
||||||
|
- TBD
|
||||||
|
|
||||||
|
## Desired State
|
||||||
|
|
||||||
|
Refactor the monolithic server/src/io/fs.rs (2007 lines) into focused submodules within an fs/ directory.
|
||||||
|
|
||||||
|
## Acceptance Criteria
|
||||||
|
|
||||||
|
- [ ] scaffold.rs contains scaffold_story_kit, write_file_if_missing, write_script_if_missing, write_story_kit_gitignore, append_root_gitignore_entries, detect_components_toml, detect_script_test, generate_project_toml and their unit tests
|
||||||
|
- [ ] project.rs contains open_project, close_project, get_current_project, get_known_projects, forget_known_project, ensure_project_root_with_story_kit, validate_project_path and their unit tests
|
||||||
|
- [ ] files.rs contains read_file, write_file, list_directory, list_project_files, FileEntry, create_directory_absolute and their unit tests
|
||||||
|
- [ ] paths.rs contains resolve_cli_path, resolve_path, resolve_path_impl, find_story_kit_root, get_home_directory and their unit tests
|
||||||
|
- [ ] preferences.rs contains get_model_preference, set_model_preference and their unit tests
|
||||||
|
- [ ] mod.rs re-exports all public types and functions
|
||||||
|
- [ ] Unit tests live in their respective module files
|
||||||
|
- [ ] No public API changes — all existing imports continue to work
|
||||||
|
|
||||||
|
## Out of Scope
|
||||||
|
|
||||||
|
- TBD
|
||||||
@@ -1 +0,0 @@
|
|||||||
3001
|
|
||||||
Generated
+55
-54
@@ -209,9 +209,9 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "aws-lc-sys"
|
name = "aws-lc-sys"
|
||||||
version = "0.39.0"
|
version = "0.39.1"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "1fa7e52a4c5c547c741610a2c6f123f3881e409b714cd27e6798ef020c514f0a"
|
checksum = "83a25cf98105baa966497416dbd42565ce3a8cf8dbfd59803ec9ad46f3126399"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"cc",
|
"cc",
|
||||||
"cmake",
|
"cmake",
|
||||||
@@ -349,9 +349,9 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "cc"
|
name = "cc"
|
||||||
version = "1.2.57"
|
version = "1.2.58"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "7a0dd1ca384932ff3641c8718a02769f1698e7563dc6974ffd03346116310423"
|
checksum = "e1e928d4b69e3077709075a938a05ffbedfa53a84c8f766efbf8220bb1ff60e1"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"find-msvc-tools",
|
"find-msvc-tools",
|
||||||
"jobserver",
|
"jobserver",
|
||||||
@@ -434,9 +434,9 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "cmake"
|
name = "cmake"
|
||||||
version = "0.1.57"
|
version = "0.1.58"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "75443c44cd6b379beb8c5b45d85d0773baf31cce901fe7bb252f4eff3008ef7d"
|
checksum = "c0f78a02292a74a88ac736019ab962ece0bc380e3f977bf72e376c5d78ff0678"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"cc",
|
"cc",
|
||||||
]
|
]
|
||||||
@@ -1862,10 +1862,12 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "js-sys"
|
name = "js-sys"
|
||||||
version = "0.3.91"
|
version = "0.3.92"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "b49715b7073f385ba4bc528e5747d02e66cb39c6146efb66b781f131f0fb399c"
|
checksum = "cc4c90f45aa2e6eacbe8645f77fdea542ac97a494bcd117a67df9ff4d611f995"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
|
"cfg-if",
|
||||||
|
"futures-util",
|
||||||
"once_cell",
|
"once_cell",
|
||||||
"wasm-bindgen",
|
"wasm-bindgen",
|
||||||
]
|
]
|
||||||
@@ -1954,9 +1956,9 @@ checksum = "b5b646652bf6661599e1da8901b3b9522896f01e736bad5f723fe7a3a27f899d"
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "libredox"
|
name = "libredox"
|
||||||
version = "0.1.14"
|
version = "0.1.15"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "1744e39d1d6a9948f4f388969627434e31128196de472883b39f148769bfe30a"
|
checksum = "7ddbf48fd451246b1f8c2610bd3b4ac0cc6e149d89832867093ab69a17194f08"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"bitflags 2.11.0",
|
"bitflags 2.11.0",
|
||||||
"libc",
|
"libc",
|
||||||
@@ -2428,9 +2430,9 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "mio"
|
name = "mio"
|
||||||
version = "1.1.1"
|
version = "1.2.0"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "a69bcab0ad47271a0234d9422b131806bf3968021e5dc9328caf2d4cd58557fc"
|
checksum = "50b7e5b27aa02a74bac8c3f23f448f8d87ff11f92d3aac1a6ed369ee08cc56c1"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"libc",
|
"libc",
|
||||||
"log",
|
"log",
|
||||||
@@ -2559,9 +2561,9 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "num-conv"
|
name = "num-conv"
|
||||||
version = "0.2.0"
|
version = "0.2.1"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "cf97ec579c3c42f953ef76dbf8d55ac91fb219dde70e49aa4a6b7d74e9919050"
|
checksum = "c6673768db2d862beb9b39a78fdcb1a69439615d5794a1be50caa9bc92c81967"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "num-traits"
|
name = "num-traits"
|
||||||
@@ -3274,6 +3276,7 @@ dependencies = [
|
|||||||
"rustls-platform-verifier",
|
"rustls-platform-verifier",
|
||||||
"serde",
|
"serde",
|
||||||
"serde_json",
|
"serde_json",
|
||||||
|
"serde_urlencoded",
|
||||||
"sync_wrapper",
|
"sync_wrapper",
|
||||||
"tokio",
|
"tokio",
|
||||||
"tokio-rustls",
|
"tokio-rustls",
|
||||||
@@ -3555,9 +3558,9 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "rustc-hash"
|
name = "rustc-hash"
|
||||||
version = "2.1.1"
|
version = "2.1.2"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "357703d41365b4b27c590e3ed91eabb1b663f07c4c084095e60cbed4362dff0d"
|
checksum = "94300abf3f1ae2e2b8ffb7b58043de3d399c73fa6f4b73826402a5c457614dbe"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "rustc_version"
|
name = "rustc_version"
|
||||||
@@ -3823,9 +3826,9 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "serde_spanned"
|
name = "serde_spanned"
|
||||||
version = "1.0.4"
|
version = "1.1.0"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "f8bbf91e5a4d6315eee45e704372590b30e260ee83af6639d64557f51b067776"
|
checksum = "876ac351060d4f882bb1032b6369eb0aef79ad9df1ea8bc404874d8cc3d0cd98"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"serde_core",
|
"serde_core",
|
||||||
]
|
]
|
||||||
@@ -3940,9 +3943,9 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "simd-adler32"
|
name = "simd-adler32"
|
||||||
version = "0.3.8"
|
version = "0.3.9"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "e320a6c5ad31d271ad523dcf3ad13e2767ad8b1cb8f047f75a8aeaf8da139da2"
|
checksum = "703d5c7ef118737c72f1af64ad2f6f8c5e1921f818cdcb97b8fe6fc69bf66214"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "similar"
|
name = "similar"
|
||||||
@@ -4016,7 +4019,7 @@ checksum = "6ce2be8dc25455e1f91df71bfa12ad37d7af1092ae736f3a6cd0e37bc7810596"
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "storkit"
|
name = "storkit"
|
||||||
version = "0.5.0"
|
version = "0.8.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"async-stream",
|
"async-stream",
|
||||||
"async-trait",
|
"async-trait",
|
||||||
@@ -4036,17 +4039,19 @@ dependencies = [
|
|||||||
"poem-openapi",
|
"poem-openapi",
|
||||||
"portable-pty",
|
"portable-pty",
|
||||||
"pulldown-cmark",
|
"pulldown-cmark",
|
||||||
|
"regex",
|
||||||
"reqwest 0.13.2",
|
"reqwest 0.13.2",
|
||||||
"rust-embed",
|
"rust-embed",
|
||||||
"serde",
|
"serde",
|
||||||
"serde_json",
|
"serde_json",
|
||||||
"serde_urlencoded",
|
"serde_urlencoded",
|
||||||
"serde_yaml",
|
"serde_yaml",
|
||||||
|
"sha2",
|
||||||
"strip-ansi-escapes",
|
"strip-ansi-escapes",
|
||||||
"tempfile",
|
"tempfile",
|
||||||
"tokio",
|
"tokio",
|
||||||
"tokio-tungstenite 0.29.0",
|
"tokio-tungstenite 0.29.0",
|
||||||
"toml 1.0.7+spec-1.1.0",
|
"toml 1.1.0+spec-1.1.0",
|
||||||
"uuid",
|
"uuid",
|
||||||
"wait-timeout",
|
"wait-timeout",
|
||||||
"walkdir",
|
"walkdir",
|
||||||
@@ -4393,14 +4398,14 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "toml"
|
name = "toml"
|
||||||
version = "1.0.7+spec-1.1.0"
|
version = "1.1.0+spec-1.1.0"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "dd28d57d8a6f6e458bc0b8784f8fdcc4b99a437936056fa122cb234f18656a96"
|
checksum = "f8195ca05e4eb728f4ba94f3e3291661320af739c4e43779cbdfae82ab239fcc"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"indexmap",
|
"indexmap",
|
||||||
"serde_core",
|
"serde_core",
|
||||||
"serde_spanned",
|
"serde_spanned",
|
||||||
"toml_datetime 1.0.1+spec-1.1.0",
|
"toml_datetime 1.1.0+spec-1.1.0",
|
||||||
"toml_parser",
|
"toml_parser",
|
||||||
"toml_writer",
|
"toml_writer",
|
||||||
"winnow 1.0.0",
|
"winnow 1.0.0",
|
||||||
@@ -4417,39 +4422,39 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "toml_datetime"
|
name = "toml_datetime"
|
||||||
version = "1.0.1+spec-1.1.0"
|
version = "1.1.0+spec-1.1.0"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "9b320e741db58cac564e26c607d3cc1fdc4a88fd36c879568c07856ed83ff3e9"
|
checksum = "97251a7c317e03ad83774a8752a7e81fb6067740609f75ea2b585b569a59198f"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"serde_core",
|
"serde_core",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "toml_edit"
|
name = "toml_edit"
|
||||||
version = "0.25.5+spec-1.1.0"
|
version = "0.25.8+spec-1.1.0"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "8ca1a40644a28bce036923f6a431df0b34236949d111cc07cb6dca830c9ef2e1"
|
checksum = "16bff38f1d86c47f9ff0647e6838d7bb362522bdf44006c7068c2b1e606f1f3c"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"indexmap",
|
"indexmap",
|
||||||
"toml_datetime 1.0.1+spec-1.1.0",
|
"toml_datetime 1.1.0+spec-1.1.0",
|
||||||
"toml_parser",
|
"toml_parser",
|
||||||
"winnow 1.0.0",
|
"winnow 1.0.0",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "toml_parser"
|
name = "toml_parser"
|
||||||
version = "1.0.10+spec-1.1.0"
|
version = "1.1.0+spec-1.1.0"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "7df25b4befd31c4816df190124375d5a20c6b6921e2cad937316de3fccd63420"
|
checksum = "2334f11ee363607eb04df9b8fc8a13ca1715a72ba8662a26ac285c98aabb4011"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"winnow 1.0.0",
|
"winnow 1.0.0",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "toml_writer"
|
name = "toml_writer"
|
||||||
version = "1.0.7+spec-1.1.0"
|
version = "1.1.0+spec-1.1.0"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "f17aaa1c6e3dc22b1da4b6bba97d066e354c7945cac2f7852d4e4e7ca7a6b56d"
|
checksum = "d282ade6016312faf3e41e57ebbba0c073e4056dab1232ab1cb624199648f8ed"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "tower"
|
name = "tower"
|
||||||
@@ -4660,9 +4665,9 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "unicode-segmentation"
|
name = "unicode-segmentation"
|
||||||
version = "1.12.0"
|
version = "1.13.2"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "f6ccf251212114b54433ec949fd6a7841275f9ada20dddd2f29e9ceea4501493"
|
checksum = "9629274872b2bfaf8d66f5f15725007f635594914870f65218920345aa11aa8c"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "unicode-xid"
|
name = "unicode-xid"
|
||||||
@@ -4725,9 +4730,9 @@ checksum = "b6c140620e7ffbb22c2dee59cafe6084a59b5ffc27a8859a5f0d494b5d52b6be"
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "uuid"
|
name = "uuid"
|
||||||
version = "1.22.0"
|
version = "1.23.0"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "a68d3c8f01c0cfa54a75291d83601161799e4a89a39e0929f4b0354d88757a37"
|
checksum = "5ac8b6f42ead25368cf5b098aeb3dc8a1a2c05a3eee8a9a1a68c640edbfc79d9"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"getrandom 0.4.2",
|
"getrandom 0.4.2",
|
||||||
"js-sys",
|
"js-sys",
|
||||||
@@ -4846,9 +4851,9 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "wasm-bindgen"
|
name = "wasm-bindgen"
|
||||||
version = "0.2.114"
|
version = "0.2.115"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "6532f9a5c1ece3798cb1c2cfdba640b9b3ba884f5db45973a6f442510a87d38e"
|
checksum = "6523d69017b7633e396a89c5efab138161ed5aafcbc8d3e5c5a42ae38f50495a"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"cfg-if",
|
"cfg-if",
|
||||||
"once_cell",
|
"once_cell",
|
||||||
@@ -4859,23 +4864,19 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "wasm-bindgen-futures"
|
name = "wasm-bindgen-futures"
|
||||||
version = "0.4.64"
|
version = "0.4.65"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "e9c5522b3a28661442748e09d40924dfb9ca614b21c00d3fd135720e48b67db8"
|
checksum = "2d1faf851e778dfa54db7cd438b70758eba9755cb47403f3496edd7c8fc212f0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"cfg-if",
|
|
||||||
"futures-util",
|
|
||||||
"js-sys",
|
"js-sys",
|
||||||
"once_cell",
|
|
||||||
"wasm-bindgen",
|
"wasm-bindgen",
|
||||||
"web-sys",
|
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "wasm-bindgen-macro"
|
name = "wasm-bindgen-macro"
|
||||||
version = "0.2.114"
|
version = "0.2.115"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "18a2d50fcf105fb33bb15f00e7a77b772945a2ee45dcf454961fd843e74c18e6"
|
checksum = "4e3a6c758eb2f701ed3d052ff5737f5bfe6614326ea7f3bbac7156192dc32e67"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"quote",
|
"quote",
|
||||||
"wasm-bindgen-macro-support",
|
"wasm-bindgen-macro-support",
|
||||||
@@ -4883,9 +4884,9 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "wasm-bindgen-macro-support"
|
name = "wasm-bindgen-macro-support"
|
||||||
version = "0.2.114"
|
version = "0.2.115"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "03ce4caeaac547cdf713d280eda22a730824dd11e6b8c3ca9e42247b25c631e3"
|
checksum = "921de2737904886b52bcbb237301552d05969a6f9c40d261eb0533c8b055fedf"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"bumpalo",
|
"bumpalo",
|
||||||
"proc-macro2",
|
"proc-macro2",
|
||||||
@@ -4896,9 +4897,9 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "wasm-bindgen-shared"
|
name = "wasm-bindgen-shared"
|
||||||
version = "0.2.114"
|
version = "0.2.115"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "75a326b8c223ee17883a4251907455a2431acc2791c98c26279376490c378c16"
|
checksum = "a93e946af942b58934c604527337bad9ae33ba1d5c6900bbb41c2c07c2364a93"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"unicode-ident",
|
"unicode-ident",
|
||||||
]
|
]
|
||||||
@@ -4983,9 +4984,9 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "web-sys"
|
name = "web-sys"
|
||||||
version = "0.3.91"
|
version = "0.3.92"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "854ba17bb104abfb26ba36da9729addc7ce7f06f5c0f90f3c391f8461cca21f9"
|
checksum = "84cde8507f4d7cfcb1185b8cb5890c494ffea65edbe1ba82cfd63661c805ed94"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"js-sys",
|
"js-sys",
|
||||||
"wasm-bindgen",
|
"wasm-bindgen",
|
||||||
|
|||||||
+4
-2
@@ -21,11 +21,12 @@ rust-embed = "8"
|
|||||||
serde = { version = "1", features = ["derive"] }
|
serde = { version = "1", features = ["derive"] }
|
||||||
serde_json = "1"
|
serde_json = "1"
|
||||||
serde_urlencoded = "0.7"
|
serde_urlencoded = "0.7"
|
||||||
|
sha2 = "0.10"
|
||||||
serde_yaml = "0.9"
|
serde_yaml = "0.9"
|
||||||
strip-ansi-escapes = "0.2"
|
strip-ansi-escapes = "0.2"
|
||||||
tempfile = "3"
|
tempfile = "3"
|
||||||
tokio = { version = "1", features = ["rt-multi-thread", "macros", "sync"] }
|
tokio = { version = "1", features = ["rt-multi-thread", "macros", "sync"] }
|
||||||
toml = "1.0.7"
|
toml = "1.1.0"
|
||||||
uuid = { version = "1.22.0", features = ["v4", "serde"] }
|
uuid = { version = "1.22.0", features = ["v4", "serde"] }
|
||||||
tokio-tungstenite = "0.29.0"
|
tokio-tungstenite = "0.29.0"
|
||||||
walkdir = "2.5.0"
|
walkdir = "2.5.0"
|
||||||
@@ -35,6 +36,7 @@ matrix-sdk = { version = "0.16.0", default-features = false, features = [
|
|||||||
"sqlite",
|
"sqlite",
|
||||||
"e2e-encryption",
|
"e2e-encryption",
|
||||||
] }
|
] }
|
||||||
pulldown-cmark = { version = "0.13.1", default-features = false, features = [
|
pulldown-cmark = { version = "0.13.3", default-features = false, features = [
|
||||||
"html",
|
"html",
|
||||||
] }
|
] }
|
||||||
|
regex = "1"
|
||||||
|
|||||||
@@ -0,0 +1,674 @@
|
|||||||
|
GNU GENERAL PUBLIC LICENSE
|
||||||
|
Version 3, 29 June 2007
|
||||||
|
|
||||||
|
Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
|
||||||
|
Everyone is permitted to copy and distribute verbatim copies
|
||||||
|
of this license document, but changing it is not allowed.
|
||||||
|
|
||||||
|
Preamble
|
||||||
|
|
||||||
|
The GNU General Public License is a free, copyleft license for
|
||||||
|
software and other kinds of works.
|
||||||
|
|
||||||
|
The licenses for most software and other practical works are designed
|
||||||
|
to take away your freedom to share and change the works. By contrast,
|
||||||
|
the GNU General Public License is intended to guarantee your freedom to
|
||||||
|
share and change all versions of a program--to make sure it remains free
|
||||||
|
software for all its users. We, the Free Software Foundation, use the
|
||||||
|
GNU General Public License for most of our software; it applies also to
|
||||||
|
any other work released this way by its authors. You can apply it to
|
||||||
|
your programs, too.
|
||||||
|
|
||||||
|
When we speak of free software, we are referring to freedom, not
|
||||||
|
price. Our General Public Licenses are designed to make sure that you
|
||||||
|
have the freedom to distribute copies of free software (and charge for
|
||||||
|
them if you wish), that you receive source code or can get it if you
|
||||||
|
want it, that you can change the software or use pieces of it in new
|
||||||
|
free programs, and that you know you can do these things.
|
||||||
|
|
||||||
|
To protect your rights, we need to prevent others from denying you
|
||||||
|
these rights or asking you to surrender the rights. Therefore, you have
|
||||||
|
certain responsibilities if you distribute copies of the software, or if
|
||||||
|
you modify it: responsibilities to respect the freedom of others.
|
||||||
|
|
||||||
|
For example, if you distribute copies of such a program, whether
|
||||||
|
gratis or for a fee, you must pass on to the recipients the same
|
||||||
|
freedoms that you received. You must make sure that they, too, receive
|
||||||
|
or can get the source code. And you must show them these terms so they
|
||||||
|
know their rights.
|
||||||
|
|
||||||
|
Developers that use the GNU GPL protect your rights with two steps:
|
||||||
|
(1) assert copyright on the software, and (2) offer you this License
|
||||||
|
giving you legal permission to copy, distribute and/or modify it.
|
||||||
|
|
||||||
|
For the developers' and authors' protection, the GPL clearly explains
|
||||||
|
that there is no warranty for this free software. For both users' and
|
||||||
|
authors' sake, the GPL requires that modified versions be marked as
|
||||||
|
changed, so that their problems will not be attributed erroneously to
|
||||||
|
authors of previous versions.
|
||||||
|
|
||||||
|
Some devices are designed to deny users access to install or run
|
||||||
|
modified versions of the software inside them, although the manufacturer
|
||||||
|
can do so. This is fundamentally incompatible with the aim of
|
||||||
|
protecting users' freedom to change the software. The systematic
|
||||||
|
pattern of such abuse occurs in the area of products for individuals to
|
||||||
|
use, which is precisely where it is most unacceptable. Therefore, we
|
||||||
|
have designed this version of the GPL to prohibit the practice for those
|
||||||
|
products. If such problems arise substantially in other domains, we
|
||||||
|
stand ready to extend this provision to those domains in future versions
|
||||||
|
of the GPL, as needed to protect the freedom of users.
|
||||||
|
|
||||||
|
Finally, every program is threatened constantly by software patents.
|
||||||
|
States should not allow patents to restrict development and use of
|
||||||
|
software on general-purpose computers, but in those that do, we wish to
|
||||||
|
avoid the special danger that patents applied to a free program could
|
||||||
|
make it effectively proprietary. To prevent this, the GPL assures that
|
||||||
|
patents cannot be used to render the program non-free.
|
||||||
|
|
||||||
|
The precise terms and conditions for copying, distribution and
|
||||||
|
modification follow.
|
||||||
|
|
||||||
|
TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
0. Definitions.
|
||||||
|
|
||||||
|
"This License" refers to version 3 of the GNU General Public License.
|
||||||
|
|
||||||
|
"Copyright" also means copyright-like laws that apply to other kinds of
|
||||||
|
works, such as semiconductor masks.
|
||||||
|
|
||||||
|
"The Program" refers to any copyrightable work licensed under this
|
||||||
|
License. Each licensee is addressed as "you". "Licensees" and
|
||||||
|
"recipients" may be individuals or organizations.
|
||||||
|
|
||||||
|
To "modify" a work means to copy from or adapt all or part of the work
|
||||||
|
in a fashion requiring copyright permission, other than the making of an
|
||||||
|
exact copy. The resulting work is called a "modified version" of the
|
||||||
|
earlier work or a work "based on" the earlier work.
|
||||||
|
|
||||||
|
A "covered work" means either the unmodified Program or a work based
|
||||||
|
on the Program.
|
||||||
|
|
||||||
|
To "propagate" a work means to do anything with it that, without
|
||||||
|
permission, would make you directly or secondarily liable for
|
||||||
|
infringement under applicable copyright law, except executing it on a
|
||||||
|
computer or modifying a private copy. Propagation includes copying,
|
||||||
|
distribution (with or without modification), making available to the
|
||||||
|
public, and in some countries other activities as well.
|
||||||
|
|
||||||
|
To "convey" a work means any kind of propagation that enables other
|
||||||
|
parties to make or receive copies. Mere interaction with a user through
|
||||||
|
a computer network, with no transfer of a copy, is not conveying.
|
||||||
|
|
||||||
|
An interactive user interface displays "Appropriate Legal Notices"
|
||||||
|
to the extent that it includes a convenient and prominently visible
|
||||||
|
feature that (1) displays an appropriate copyright notice, and (2)
|
||||||
|
tells the user that there is no warranty for the work (except to the
|
||||||
|
extent that warranties are provided), that licensees may convey the
|
||||||
|
work under this License, and how to view a copy of this License. If
|
||||||
|
the interface presents a list of user commands or options, such as a
|
||||||
|
menu, a prominent item in the list meets this criterion.
|
||||||
|
|
||||||
|
1. Source Code.
|
||||||
|
|
||||||
|
The "source code" for a work means the preferred form of the work
|
||||||
|
for making modifications to it. "Object code" means any non-source
|
||||||
|
form of a work.
|
||||||
|
|
||||||
|
A "Standard Interface" means an interface that either is an official
|
||||||
|
standard defined by a recognized standards body, or, in the case of
|
||||||
|
interfaces specified for a particular programming language, one that
|
||||||
|
is widely used among developers working in that language.
|
||||||
|
|
||||||
|
The "System Libraries" of an executable work include anything, other
|
||||||
|
than the work as a whole, that (a) is included in the normal form of
|
||||||
|
packaging a Major Component, but which is not part of that Major
|
||||||
|
Component, and (b) serves only to enable use of the work with that
|
||||||
|
Major Component, or to implement a Standard Interface for which an
|
||||||
|
implementation is available to the public in source code form. A
|
||||||
|
"Major Component", in this context, means a major essential component
|
||||||
|
(kernel, window system, and so on) of the specific operating system
|
||||||
|
(if any) on which the executable work runs, or a compiler used to
|
||||||
|
produce the work, or an object code interpreter used to run it.
|
||||||
|
|
||||||
|
The "Corresponding Source" for a work in object code form means all
|
||||||
|
the source code needed to generate, install, and (for an executable
|
||||||
|
work) run the object code and to modify the work, including scripts to
|
||||||
|
control those activities. However, it does not include the work's
|
||||||
|
System Libraries, or general-purpose tools or generally available free
|
||||||
|
programs which are used unmodified in performing those activities but
|
||||||
|
which are not part of the work. For example, Corresponding Source
|
||||||
|
includes interface definition files associated with source files for
|
||||||
|
the work, and the source code for shared libraries and dynamically
|
||||||
|
linked subprograms that the work is specifically designed to require,
|
||||||
|
such as by intimate data communication or control flow between those
|
||||||
|
subprograms and other parts of the work.
|
||||||
|
|
||||||
|
The Corresponding Source need not include anything that users
|
||||||
|
can regenerate automatically from other parts of the Corresponding
|
||||||
|
Source.
|
||||||
|
|
||||||
|
The Corresponding Source for a work in source code form is that
|
||||||
|
same work.
|
||||||
|
|
||||||
|
2. Basic Permissions.
|
||||||
|
|
||||||
|
All rights granted under this License are granted for the term of
|
||||||
|
copyright on the Program, and are irrevocable provided the stated
|
||||||
|
conditions are met. This License explicitly affirms your unlimited
|
||||||
|
permission to run the unmodified Program. The output from running a
|
||||||
|
covered work is covered by this License only if the output, given its
|
||||||
|
content, constitutes a covered work. This License acknowledges your
|
||||||
|
rights of fair use or other equivalent, as provided by copyright law.
|
||||||
|
|
||||||
|
You may make, run and propagate covered works that you do not
|
||||||
|
convey, without conditions so long as your license otherwise remains
|
||||||
|
in force. You may convey covered works to others for the sole purpose
|
||||||
|
of having them make modifications exclusively for you, or provide you
|
||||||
|
with facilities for running those works, provided that you comply with
|
||||||
|
the terms of this License in conveying all material for which you do
|
||||||
|
not control copyright. Those thus making or running the covered works
|
||||||
|
for you must do so exclusively on your behalf, under your direction
|
||||||
|
and control, on terms that prohibit them from making any copies of
|
||||||
|
your copyrighted material outside their relationship with you.
|
||||||
|
|
||||||
|
Conveying under any other circumstances is permitted solely under
|
||||||
|
the conditions stated below. Sublicensing is not allowed; section 10
|
||||||
|
makes it unnecessary.
|
||||||
|
|
||||||
|
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
|
||||||
|
|
||||||
|
No covered work shall be deemed part of an effective technological
|
||||||
|
measure under any applicable law fulfilling obligations under article
|
||||||
|
11 of the WIPO copyright treaty adopted on 20 December 1996, or
|
||||||
|
similar laws prohibiting or restricting circumvention of such
|
||||||
|
measures.
|
||||||
|
|
||||||
|
When you convey a covered work, you waive any legal power to forbid
|
||||||
|
circumvention of technological measures to the extent such circumvention
|
||||||
|
is effected by exercising rights under this License with respect to
|
||||||
|
the covered work, and you disclaim any intention to limit operation or
|
||||||
|
modification of the work as a means of enforcing, against the work's
|
||||||
|
users, your or third parties' legal rights to forbid circumvention of
|
||||||
|
technological measures.
|
||||||
|
|
||||||
|
4. Conveying Verbatim Copies.
|
||||||
|
|
||||||
|
You may convey verbatim copies of the Program's source code as you
|
||||||
|
receive it, in any medium, provided that you conspicuously and
|
||||||
|
appropriately publish on each copy an appropriate copyright notice;
|
||||||
|
keep intact all notices stating that this License and any
|
||||||
|
non-permissive terms added in accord with section 7 apply to the code;
|
||||||
|
keep intact all notices of the absence of any warranty; and give all
|
||||||
|
recipients a copy of this License along with the Program.
|
||||||
|
|
||||||
|
You may charge any price or no price for each copy that you convey,
|
||||||
|
and you may offer support or warranty protection for a fee.
|
||||||
|
|
||||||
|
5. Conveying Modified Source Versions.
|
||||||
|
|
||||||
|
You may convey a work based on the Program, or the modifications to
|
||||||
|
produce it from the Program, in the form of source code under the
|
||||||
|
terms of section 4, provided that you also meet all of these conditions:
|
||||||
|
|
||||||
|
a) The work must carry prominent notices stating that you modified
|
||||||
|
it, and giving a relevant date.
|
||||||
|
|
||||||
|
b) The work must carry prominent notices stating that it is
|
||||||
|
released under this License and any conditions added under section
|
||||||
|
7. This requirement modifies the requirement in section 4 to
|
||||||
|
"keep intact all notices".
|
||||||
|
|
||||||
|
c) You must license the entire work, as a whole, under this
|
||||||
|
License to anyone who comes into possession of a copy. This
|
||||||
|
License will therefore apply, along with any applicable section 7
|
||||||
|
additional terms, to the whole of the work, and all its parts,
|
||||||
|
regardless of how they are packaged. This License gives no
|
||||||
|
permission to license the work in any other way, but it does not
|
||||||
|
invalidate such permission if you have separately received it.
|
||||||
|
|
||||||
|
d) If the work has interactive user interfaces, each must display
|
||||||
|
Appropriate Legal Notices; however, if the Program has interactive
|
||||||
|
interfaces that do not display Appropriate Legal Notices, your
|
||||||
|
work need not make them do so.
|
||||||
|
|
||||||
|
A compilation of a covered work with other separate and independent
|
||||||
|
works, which are not by their nature extensions of the covered work,
|
||||||
|
and which are not combined with it such as to form a larger program,
|
||||||
|
in or on a volume of a storage or distribution medium, is called an
|
||||||
|
"aggregate" if the compilation and its resulting copyright are not
|
||||||
|
used to limit the access or legal rights of the compilation's users
|
||||||
|
beyond what the individual works permit. Inclusion of a covered work
|
||||||
|
in an aggregate does not cause this License to apply to the other
|
||||||
|
parts of the aggregate.
|
||||||
|
|
||||||
|
6. Conveying Non-Source Forms.
|
||||||
|
|
||||||
|
You may convey a covered work in object code form under the terms
|
||||||
|
of sections 4 and 5, provided that you also convey the
|
||||||
|
machine-readable Corresponding Source under the terms of this License,
|
||||||
|
in one of these ways:
|
||||||
|
|
||||||
|
a) Convey the object code in, or embodied in, a physical product
|
||||||
|
(including a physical distribution medium), accompanied by the
|
||||||
|
Corresponding Source fixed on a durable physical medium
|
||||||
|
customarily used for software interchange.
|
||||||
|
|
||||||
|
b) Convey the object code in, or embodied in, a physical product
|
||||||
|
(including a physical distribution medium), accompanied by a
|
||||||
|
written offer, valid for at least three years and valid for as
|
||||||
|
long as you offer spare parts or customer support for that product
|
||||||
|
model, to give anyone who possesses the object code either (1) a
|
||||||
|
copy of the Corresponding Source for all the software in the
|
||||||
|
product that is covered by this License, on a durable physical
|
||||||
|
medium customarily used for software interchange, for a price no
|
||||||
|
more than your reasonable cost of physically performing this
|
||||||
|
conveying of source, or (2) access to copy the
|
||||||
|
Corresponding Source from a network server at no charge.
|
||||||
|
|
||||||
|
c) Convey individual copies of the object code with a copy of the
|
||||||
|
written offer to provide the Corresponding Source. This
|
||||||
|
alternative is allowed only occasionally and noncommercially, and
|
||||||
|
only if you received the object code with such an offer, in accord
|
||||||
|
with subsection 6b.
|
||||||
|
|
||||||
|
d) Convey the object code by offering access from a designated
|
||||||
|
place (gratis or for a charge), and offer equivalent access to the
|
||||||
|
Corresponding Source in the same way through the same place at no
|
||||||
|
further charge. You need not require recipients to copy the
|
||||||
|
Corresponding Source along with the object code. If the place to
|
||||||
|
copy the object code is a network server, the Corresponding Source
|
||||||
|
may be on a different server (operated by you or a third party)
|
||||||
|
that supports equivalent copying facilities, provided you maintain
|
||||||
|
clear directions next to the object code saying where to find the
|
||||||
|
Corresponding Source. Regardless of what server hosts the
|
||||||
|
Corresponding Source, you remain obligated to ensure that it is
|
||||||
|
available for as long as needed to satisfy these requirements.
|
||||||
|
|
||||||
|
e) Convey the object code using peer-to-peer transmission, provided
|
||||||
|
you inform other peers where the object code and Corresponding
|
||||||
|
Source of the work are being offered to the general public at no
|
||||||
|
charge under subsection 6d.
|
||||||
|
|
||||||
|
A separable portion of the object code, whose source code is excluded
|
||||||
|
from the Corresponding Source as a System Library, need not be
|
||||||
|
included in conveying the object code work.
|
||||||
|
|
||||||
|
A "User Product" is either (1) a "consumer product", which means any
|
||||||
|
tangible personal property which is normally used for personal, family,
|
||||||
|
or household purposes, or (2) anything designed or sold for incorporation
|
||||||
|
into a dwelling. In determining whether a product is a consumer product,
|
||||||
|
doubtful cases shall be resolved in favor of coverage. For a particular
|
||||||
|
product received by a particular user, "normally used" refers to a
|
||||||
|
typical or common use of that class of product, regardless of the status
|
||||||
|
of the particular user or of the way in which the particular user
|
||||||
|
actually uses, or expects or is expected to use, the product. A product
|
||||||
|
is a consumer product regardless of whether the product has substantial
|
||||||
|
commercial, industrial or non-consumer uses, unless such uses represent
|
||||||
|
the only significant mode of use of the product.
|
||||||
|
|
||||||
|
"Installation Information" for a User Product means any methods,
|
||||||
|
procedures, authorization keys, or other information required to install
|
||||||
|
and execute modified versions of a covered work in that User Product from
|
||||||
|
a modified version of its Corresponding Source. The information must
|
||||||
|
suffice to ensure that the continued functioning of the modified object
|
||||||
|
code is in no case prevented or interfered with solely because
|
||||||
|
modification has been made.
|
||||||
|
|
||||||
|
If you convey an object code work under this section in, or with, or
|
||||||
|
specifically for use in, a User Product, and the conveying occurs as
|
||||||
|
part of a transaction in which the right of possession and use of the
|
||||||
|
User Product is transferred to the recipient in perpetuity or for a
|
||||||
|
fixed term (regardless of how the transaction is characterized), the
|
||||||
|
Corresponding Source conveyed under this section must be accompanied
|
||||||
|
by the Installation Information. But this requirement does not apply
|
||||||
|
if neither you nor any third party retains the ability to install
|
||||||
|
modified object code on the User Product (for example, the work has
|
||||||
|
been installed in ROM).
|
||||||
|
|
||||||
|
The requirement to provide Installation Information does not include a
|
||||||
|
requirement to continue to provide support service, warranty, or updates
|
||||||
|
for a work that has been modified or installed by the recipient, or for
|
||||||
|
the User Product in which it has been modified or installed. Access to a
|
||||||
|
network may be denied when the modification itself materially and
|
||||||
|
adversely affects the operation of the network or violates the rules and
|
||||||
|
protocols for communication across the network.
|
||||||
|
|
||||||
|
Corresponding Source conveyed, and Installation Information provided,
|
||||||
|
in accord with this section must be in a format that is publicly
|
||||||
|
documented (and with an implementation available to the public in
|
||||||
|
source code form), and must require no special password or key for
|
||||||
|
unpacking, reading or copying.
|
||||||
|
|
||||||
|
7. Additional Terms.
|
||||||
|
|
||||||
|
"Additional permissions" are terms that supplement the terms of this
|
||||||
|
License by making exceptions from one or more of its conditions.
|
||||||
|
Additional permissions that are applicable to the entire Program shall
|
||||||
|
be treated as though they were included in this License, to the extent
|
||||||
|
that they are valid under applicable law. If additional permissions
|
||||||
|
apply only to part of the Program, that part may be used separately
|
||||||
|
under those permissions, but the entire Program remains governed by
|
||||||
|
this License without regard to the additional permissions.
|
||||||
|
|
||||||
|
When you convey a copy of a covered work, you may at your option
|
||||||
|
remove any additional permissions from that copy, or from any part of
|
||||||
|
it. (Additional permissions may be written to require their own
|
||||||
|
removal in certain cases when you modify the work.) You may place
|
||||||
|
additional permissions on material, added by you to a covered work,
|
||||||
|
for which you have or can give appropriate copyright permission.
|
||||||
|
|
||||||
|
Notwithstanding any other provision of this License, for material you
|
||||||
|
add to a covered work, you may (if authorized by the copyright holders of
|
||||||
|
that material) supplement the terms of this License with terms:
|
||||||
|
|
||||||
|
a) Disclaiming warranty or limiting liability differently from the
|
||||||
|
terms of sections 15 and 16 of this License; or
|
||||||
|
|
||||||
|
b) Requiring preservation of specified reasonable legal notices or
|
||||||
|
author attributions in that material or in the Appropriate Legal
|
||||||
|
Notices displayed by works containing it; or
|
||||||
|
|
||||||
|
c) Prohibiting misrepresentation of the origin of that material, or
|
||||||
|
requiring that modified versions of such material be marked in
|
||||||
|
reasonable ways as different from the original version; or
|
||||||
|
|
||||||
|
d) Limiting the use for publicity purposes of names of licensors or
|
||||||
|
authors of the material; or
|
||||||
|
|
||||||
|
e) Declining to grant rights under trademark law for use of some
|
||||||
|
trade names, trademarks, or service marks; or
|
||||||
|
|
||||||
|
f) Requiring indemnification of licensors and authors of that
|
||||||
|
material by anyone who conveys the material (or modified versions of
|
||||||
|
it) with contractual assumptions of liability to the recipient, for
|
||||||
|
any liability that these contractual assumptions directly impose on
|
||||||
|
those licensors and authors.
|
||||||
|
|
||||||
|
All other non-permissive additional terms are considered "further
|
||||||
|
restrictions" within the meaning of section 10. If the Program as you
|
||||||
|
received it, or any part of it, contains a notice stating that it is
|
||||||
|
governed by this License along with a term that is a further
|
||||||
|
restriction, you may remove that term. If a license document contains
|
||||||
|
a further restriction but permits relicensing or conveying under this
|
||||||
|
License, you may add to a covered work material governed by the terms
|
||||||
|
of that license document, provided that the further restriction does
|
||||||
|
not survive such relicensing or conveying.
|
||||||
|
|
||||||
|
If you add terms to a covered work in accord with this section, you
|
||||||
|
must place, in the relevant source files, a statement of the
|
||||||
|
additional terms that apply to those files, or a notice indicating
|
||||||
|
where to find the applicable terms.
|
||||||
|
|
||||||
|
Additional terms, permissive or non-permissive, may be stated in the
|
||||||
|
form of a separately written license, or stated as exceptions;
|
||||||
|
the above requirements apply either way.
|
||||||
|
|
||||||
|
8. Termination.
|
||||||
|
|
||||||
|
You may not propagate or modify a covered work except as expressly
|
||||||
|
provided under this License. Any attempt otherwise to propagate or
|
||||||
|
modify it is void, and will automatically terminate your rights under
|
||||||
|
this License (including any patent licenses granted under the third
|
||||||
|
paragraph of section 11).
|
||||||
|
|
||||||
|
However, if you cease all violation of this License, then your
|
||||||
|
license from a particular copyright holder is reinstated (a)
|
||||||
|
provisionally, unless and until the copyright holder explicitly and
|
||||||
|
finally terminates your license, and (b) permanently, if the copyright
|
||||||
|
holder fails to notify you of the violation by some reasonable means
|
||||||
|
prior to 60 days after the cessation.
|
||||||
|
|
||||||
|
Moreover, your license from a particular copyright holder is
|
||||||
|
reinstated permanently if the copyright holder notifies you of the
|
||||||
|
violation by some reasonable means, this is the first time you have
|
||||||
|
received notice of violation of this License (for any work) from that
|
||||||
|
copyright holder, and you cure the violation prior to 30 days after
|
||||||
|
your receipt of the notice.
|
||||||
|
|
||||||
|
Termination of your rights under this section does not terminate the
|
||||||
|
licenses of parties who have received copies or rights from you under
|
||||||
|
this License. If your rights have been terminated and not permanently
|
||||||
|
reinstated, you do not qualify to receive new licenses for the same
|
||||||
|
material under section 10.
|
||||||
|
|
||||||
|
9. Acceptance Not Required for Having Copies.
|
||||||
|
|
||||||
|
You are not required to accept this License in order to receive or
|
||||||
|
run a copy of the Program. Ancillary propagation of a covered work
|
||||||
|
occurring solely as a consequence of using peer-to-peer transmission
|
||||||
|
to receive a copy likewise does not require acceptance. However,
|
||||||
|
nothing other than this License grants you permission to propagate or
|
||||||
|
modify any covered work. These actions infringe copyright if you do
|
||||||
|
not accept this License. Therefore, by modifying or propagating a
|
||||||
|
covered work, you indicate your acceptance of this License to do so.
|
||||||
|
|
||||||
|
10. Automatic Licensing of Downstream Recipients.
|
||||||
|
|
||||||
|
Each time you convey a covered work, the recipient automatically
|
||||||
|
receives a license from the original licensors, to run, modify and
|
||||||
|
propagate that work, subject to this License. You are not responsible
|
||||||
|
for enforcing compliance by third parties with this License.
|
||||||
|
|
||||||
|
An "entity transaction" is a transaction transferring control of an
|
||||||
|
organization, or substantially all assets of one, or subdividing an
|
||||||
|
organization, or merging organizations. If propagation of a covered
|
||||||
|
work results from an entity transaction, each party to that
|
||||||
|
transaction who receives a copy of the work also receives whatever
|
||||||
|
licenses to the work the party's predecessor in interest had or could
|
||||||
|
give under the previous paragraph, plus a right to possession of the
|
||||||
|
Corresponding Source of the work from the predecessor in interest, if
|
||||||
|
the predecessor has it or can get it with reasonable efforts.
|
||||||
|
|
||||||
|
You may not impose any further restrictions on the exercise of the
|
||||||
|
rights granted or affirmed under this License. For example, you may
|
||||||
|
not impose a license fee, royalty, or other charge for exercise of
|
||||||
|
rights granted under this License, and you may not initiate litigation
|
||||||
|
(including a cross-claim or counterclaim in a lawsuit) alleging that
|
||||||
|
any patent claim is infringed by making, using, selling, offering for
|
||||||
|
sale, or importing the Program or any portion of it.
|
||||||
|
|
||||||
|
11. Patents.
|
||||||
|
|
||||||
|
A "contributor" is a copyright holder who authorizes use under this
|
||||||
|
License of the Program or a work on which the Program is based. The
|
||||||
|
work thus licensed is called the contributor's "contributor version".
|
||||||
|
|
||||||
|
A contributor's "essential patent claims" are all patent claims
|
||||||
|
owned or controlled by the contributor, whether already acquired or
|
||||||
|
hereafter acquired, that would be infringed by some manner, permitted
|
||||||
|
by this License, of making, using, or selling its contributor version,
|
||||||
|
but do not include claims that would be infringed only as a
|
||||||
|
consequence of further modification of the contributor version. For
|
||||||
|
purposes of this definition, "control" includes the right to grant
|
||||||
|
patent sublicenses in a manner consistent with the requirements of
|
||||||
|
this License.
|
||||||
|
|
||||||
|
Each contributor grants you a non-exclusive, worldwide, royalty-free
|
||||||
|
patent license under the contributor's essential patent claims, to
|
||||||
|
make, use, sell, offer for sale, import and otherwise run, modify and
|
||||||
|
propagate the contents of its contributor version.
|
||||||
|
|
||||||
|
In the following three paragraphs, a "patent license" is any express
|
||||||
|
agreement or commitment, however denominated, not to enforce a patent
|
||||||
|
(such as an express permission to practice a patent or covenant not to
|
||||||
|
sue for patent infringement). To "grant" such a patent license to a
|
||||||
|
party means to make such an agreement or commitment not to enforce a
|
||||||
|
patent against the party.
|
||||||
|
|
||||||
|
If you convey a covered work, knowingly relying on a patent license,
|
||||||
|
and the Corresponding Source of the work is not available for anyone
|
||||||
|
to copy, free of charge and under the terms of this License, through a
|
||||||
|
publicly available network server or other readily accessible means,
|
||||||
|
then you must either (1) cause the Corresponding Source to be so
|
||||||
|
available, or (2) arrange to deprive yourself of the benefit of the
|
||||||
|
patent license for this particular work, or (3) arrange, in a manner
|
||||||
|
consistent with the requirements of this License, to extend the patent
|
||||||
|
license to downstream recipients. "Knowingly relying" means you have
|
||||||
|
actual knowledge that, but for the patent license, your conveying the
|
||||||
|
covered work in a country, or your recipient's use of the covered work
|
||||||
|
in a country, would infringe one or more identifiable patents in that
|
||||||
|
country that you have reason to believe are valid.
|
||||||
|
|
||||||
|
If, pursuant to or in connection with a single transaction or
|
||||||
|
arrangement, you convey, or propagate by procuring conveyance of, a
|
||||||
|
covered work, and grant a patent license to some of the parties
|
||||||
|
receiving the covered work authorizing them to use, propagate, modify
|
||||||
|
or convey a specific copy of the covered work, then the patent license
|
||||||
|
you grant is automatically extended to all recipients of the covered
|
||||||
|
work and works based on it.
|
||||||
|
|
||||||
|
A patent license is "discriminatory" if it does not include within
|
||||||
|
the scope of its coverage, prohibits the exercise of, or is
|
||||||
|
conditioned on the non-exercise of one or more of the rights that are
|
||||||
|
specifically granted under this License. You may not convey a covered
|
||||||
|
work if you are a party to an arrangement with a third party that is
|
||||||
|
in the business of distributing software, under which you make payment
|
||||||
|
to the third party based on the extent of your activity of conveying
|
||||||
|
the work, and under which the third party grants, to any of the
|
||||||
|
parties who would receive the covered work from you, a discriminatory
|
||||||
|
patent license (a) in connection with copies of the covered work
|
||||||
|
conveyed by you (or copies made from those copies), or (b) primarily
|
||||||
|
for and in connection with specific products or compilations that
|
||||||
|
contain the covered work, unless you entered into that arrangement,
|
||||||
|
or that patent license was granted, prior to 28 March 2007.
|
||||||
|
|
||||||
|
Nothing in this License shall be construed as excluding or limiting
|
||||||
|
any implied license or other defenses to infringement that may
|
||||||
|
otherwise be available to you under applicable patent law.
|
||||||
|
|
||||||
|
12. No Surrender of Others' Freedom.
|
||||||
|
|
||||||
|
If conditions are imposed on you (whether by court order, agreement or
|
||||||
|
otherwise) that contradict the conditions of this License, they do not
|
||||||
|
excuse you from the conditions of this License. If you cannot convey a
|
||||||
|
covered work so as to satisfy simultaneously your obligations under this
|
||||||
|
License and any other pertinent obligations, then as a consequence you may
|
||||||
|
not convey it at all. For example, if you agree to terms that obligate you
|
||||||
|
to collect a royalty for further conveying from those to whom you convey
|
||||||
|
the Program, the only way you could satisfy both those terms and this
|
||||||
|
License would be to refrain entirely from conveying the Program.
|
||||||
|
|
||||||
|
13. Use with the GNU Affero General Public License.
|
||||||
|
|
||||||
|
Notwithstanding any other provision of this License, you have
|
||||||
|
permission to link or combine any covered work with a work licensed
|
||||||
|
under version 3 of the GNU Affero General Public License into a single
|
||||||
|
combined work, and to convey the resulting work. The terms of this
|
||||||
|
License will continue to apply to the part which is the covered work,
|
||||||
|
but the special requirements of the GNU Affero General Public License,
|
||||||
|
section 13, concerning interaction through a network will apply to the
|
||||||
|
combination as such.
|
||||||
|
|
||||||
|
14. Revised Versions of this License.
|
||||||
|
|
||||||
|
The Free Software Foundation may publish revised and/or new versions of
|
||||||
|
the GNU General Public License from time to time. Such new versions will
|
||||||
|
be similar in spirit to the present version, but may differ in detail to
|
||||||
|
address new problems or concerns.
|
||||||
|
|
||||||
|
Each version is given a distinguishing version number. If the
|
||||||
|
Program specifies that a certain numbered version of the GNU General
|
||||||
|
Public License "or any later version" applies to it, you have the
|
||||||
|
option of following the terms and conditions either of that numbered
|
||||||
|
version or of any later version published by the Free Software
|
||||||
|
Foundation. If the Program does not specify a version number of the
|
||||||
|
GNU General Public License, you may choose any version ever published
|
||||||
|
by the Free Software Foundation.
|
||||||
|
|
||||||
|
If the Program specifies that a proxy can decide which future
|
||||||
|
versions of the GNU General Public License can be used, that proxy's
|
||||||
|
public statement of acceptance of a version permanently authorizes you
|
||||||
|
to choose that version for the Program.
|
||||||
|
|
||||||
|
Later license versions may give you additional or different
|
||||||
|
permissions. However, no additional obligations are imposed on any
|
||||||
|
author or copyright holder as a result of your choosing to follow a
|
||||||
|
later version.
|
||||||
|
|
||||||
|
15. Disclaimer of Warranty.
|
||||||
|
|
||||||
|
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
|
||||||
|
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
|
||||||
|
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
|
||||||
|
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
|
||||||
|
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
||||||
|
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
|
||||||
|
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
|
||||||
|
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
|
||||||
|
|
||||||
|
16. Limitation of Liability.
|
||||||
|
|
||||||
|
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
|
||||||
|
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
|
||||||
|
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
|
||||||
|
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
|
||||||
|
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
|
||||||
|
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
|
||||||
|
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
|
||||||
|
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
|
||||||
|
SUCH DAMAGES.
|
||||||
|
|
||||||
|
17. Interpretation of Sections 15 and 16.
|
||||||
|
|
||||||
|
If the disclaimer of warranty and limitation of liability provided
|
||||||
|
above cannot be given local legal effect according to their terms,
|
||||||
|
reviewing courts shall apply local law that most closely approximates
|
||||||
|
an absolute waiver of all civil liability in connection with the
|
||||||
|
Program, unless a warranty or assumption of liability accompanies a
|
||||||
|
copy of the Program in return for a fee.
|
||||||
|
|
||||||
|
END OF TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
How to Apply These Terms to Your New Programs
|
||||||
|
|
||||||
|
If you develop a new program, and you want it to be of the greatest
|
||||||
|
possible use to the public, the best way to achieve this is to make it
|
||||||
|
free software which everyone can redistribute and change under these terms.
|
||||||
|
|
||||||
|
To do so, attach the following notices to the program. It is safest
|
||||||
|
to attach them to the start of each source file to most effectively
|
||||||
|
state the exclusion of warranty; and each file should have at least
|
||||||
|
the "copyright" line and a pointer to where the full notice is found.
|
||||||
|
|
||||||
|
<one line to give the program's name and a brief idea of what it does.>
|
||||||
|
Copyright (C) <year> <name of author>
|
||||||
|
|
||||||
|
This program is free software: you can redistribute it and/or modify
|
||||||
|
it under the terms of the GNU General Public License as published by
|
||||||
|
the Free Software Foundation, either version 3 of the License, or
|
||||||
|
(at your option) any later version.
|
||||||
|
|
||||||
|
This program is distributed in the hope that it will be useful,
|
||||||
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
GNU General Public License for more details.
|
||||||
|
|
||||||
|
You should have received a copy of the GNU General Public License
|
||||||
|
along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
Also add information on how to contact you by electronic and paper mail.
|
||||||
|
|
||||||
|
If the program does terminal interaction, make it output a short
|
||||||
|
notice like this when it starts in an interactive mode:
|
||||||
|
|
||||||
|
<program> Copyright (C) <year> <name of author>
|
||||||
|
This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
|
||||||
|
This is free software, and you are welcome to redistribute it
|
||||||
|
under certain conditions; type `show c' for details.
|
||||||
|
|
||||||
|
The hypothetical commands `show w' and `show c' should show the appropriate
|
||||||
|
parts of the General Public License. Of course, your program's commands
|
||||||
|
might be different; for a GUI interface, you would use an "about box".
|
||||||
|
|
||||||
|
You should also get your employer (if you work as a programmer) or school,
|
||||||
|
if any, to sign a "copyright disclaimer" for the program, if necessary.
|
||||||
|
For more information on this, and how to apply and follow the GNU GPL, see
|
||||||
|
<https://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
The GNU General Public License does not permit incorporating your program
|
||||||
|
into proprietary programs. If your program is a subroutine library, you
|
||||||
|
may consider it more useful to permit linking proprietary applications with
|
||||||
|
the library. If this is what you want to do, use the GNU Lesser General
|
||||||
|
Public License instead of this License. But first, please read
|
||||||
|
<https://www.gnu.org/licenses/why-not-lgpl.html>.
|
||||||
@@ -1,182 +1,55 @@
|
|||||||
# Story Kit
|
# Storkit
|
||||||
|
|
||||||
This app runs as a single Rust web server binary that serves the Vite/React frontend and exposes APIs.
|
A story-driven development server that manages work items, spawns coding agents, and runs them through a pipeline from backlog to done. Ships as a single Rust binary with an embedded React frontend. Communicates via Matrix, WhatsApp, and Slack bot transports, and exposes MCP tools for programmatic access.
|
||||||
The frontend lives in the `frontend/` directory.
|
|
||||||
|
|
||||||
You can also run the frontend and backend separately in development (Vite dev server + Rust API).
|
## Prerequisites
|
||||||
|
|
||||||
## Running it in development
|
- Rust (2024 edition)
|
||||||
|
- Node.js and npm
|
||||||
|
- Docker (for Linux cross-compilation and container deployment)
|
||||||
|
- `cross` (`cargo install cross`) for Linux static builds
|
||||||
|
|
||||||
|
## Building for production
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
# Build the frontend
|
|
||||||
cd frontend
|
|
||||||
npm install
|
|
||||||
npm run dev
|
|
||||||
|
|
||||||
# In another terminal - run the server (serves embedded frontend/dist/)
|
|
||||||
cargo run
|
|
||||||
```
|
|
||||||
|
|
||||||
## Production
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Build the release binary (also builds the frontend via build.rs)
|
|
||||||
cargo build --release
|
cargo build --release
|
||||||
|
|
||||||
# Run the server (serves embedded frontend/dist/)
|
|
||||||
./target/release/storkit
|
|
||||||
```
|
```
|
||||||
|
|
||||||
## Cross-Platform Distribution
|
The release binary embeds the frontend via `rust-embed`. Output: `target/release/storkit`.
|
||||||
|
|
||||||
Story Kit ships as a **single self-contained binary** with the React frontend embedded via
|
For a static Linux binary (musl, zero dynamic deps):
|
||||||
`rust-embed`. No Rust toolchain, Node.js, or extra libraries are required on the target machine.
|
|
||||||
|
|
||||||
### macOS
|
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
# Native build – no extra tools required beyond Rust + npm
|
cross build --release --target x86_64-unknown-linux-musl
|
||||||
make build-macos
|
|
||||||
# Output: target/release/storkit
|
|
||||||
|
|
||||||
# Verify only system frameworks are linked (Security.framework, libSystem.B.dylib, etc.)
|
|
||||||
otool -L target/release/storkit
|
|
||||||
```
|
```
|
||||||
|
|
||||||
### Linux (static x86_64, zero dynamic deps)
|
Docker:
|
||||||
|
|
||||||
The Linux build uses the `x86_64-unknown-linux-musl` target to produce a fully static binary.
|
|
||||||
|
|
||||||
**Prerequisites:**
|
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
# Install cross – a Rust cross-compilation tool backed by Docker
|
docker compose -f docker/docker-compose.yml build
|
||||||
cargo install cross
|
|
||||||
|
|
||||||
# Ensure Docker Desktop (or Docker Engine) is running
|
|
||||||
```
|
```
|
||||||
|
|
||||||
**Build:**
|
## Running in development
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
make build-linux
|
# Run tests
|
||||||
# Output: target/x86_64-unknown-linux-musl/release/storkit
|
script/test
|
||||||
|
|
||||||
# Verify the binary is statically linked
|
# Run the server
|
||||||
file target/x86_64-unknown-linux-musl/release/storkit
|
cargo run -- --port 3000
|
||||||
# Expected: ELF 64-bit LSB executable, x86-64, statically linked
|
|
||||||
|
|
||||||
ldd target/x86_64-unknown-linux-musl/release/storkit
|
# In another terminal, run the frontend dev server
|
||||||
# Expected: not a dynamic executable
|
cd frontend && npm install && npm run dev
|
||||||
```
|
```
|
||||||
|
|
||||||
**Running on any Linux x86_64 machine:**
|
Configuration lives in `.storkit/project.toml`. See `.storkit/bot.toml.*.example` for transport setup.
|
||||||
|
|
||||||
```bash
|
|
||||||
# No Rust, Node, glibc, or any other library needed – just copy and run
|
|
||||||
./storkit
|
|
||||||
```
|
|
||||||
|
|
||||||
## Releasing
|
## Releasing
|
||||||
|
|
||||||
Builds both macOS and Linux binaries locally, tags the repo, and publishes a Gitea release with a changelog.
|
Requires a Gitea API token in `.env` (`GITEA_TOKEN=your_token`).
|
||||||
|
|
||||||
**One-time setup:**
|
|
||||||
|
|
||||||
1. Create a Gitea API token at `https://code.crashlabs.io/user/settings/applications` (needs repository read/write)
|
|
||||||
2. Add it to `.env` (gitignored): `GITEA_TOKEN=your_token`
|
|
||||||
3. Ensure `cross` is installed (`cargo install cross`) and Docker is running
|
|
||||||
|
|
||||||
**To release:**
|
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
make release V=0.2.0
|
script/release 0.6.1
|
||||||
```
|
```
|
||||||
|
|
||||||
This will:
|
This bumps version in `Cargo.toml` and `package.json`, builds macOS arm64 and Linux amd64 binaries, tags the repo, and publishes a Gitea release with changelog and binaries attached.
|
||||||
- Build macOS arm64 (native) and Linux amd64 (static musl via cross/Docker)
|
|
||||||
- Generate a changelog from commits since the last tag
|
|
||||||
- Tag the repo as `v0.2.0` and push the tag
|
|
||||||
- Create a Gitea release with both binaries and the changelog attached
|
|
||||||
|
|
||||||
## Testing
|
|
||||||
|
|
||||||
### Frontend Tests
|
|
||||||
|
|
||||||
The frontend uses **Vitest** for unit tests and **Playwright** for end-to-end tests.
|
|
||||||
|
|
||||||
```bash
|
|
||||||
cd frontend
|
|
||||||
|
|
||||||
# Run unit tests
|
|
||||||
npm test
|
|
||||||
|
|
||||||
# Run end-to-end tests
|
|
||||||
npm run test:e2e
|
|
||||||
```
|
|
||||||
|
|
||||||
### Backend Tests
|
|
||||||
|
|
||||||
This project uses **nextest** for running tests and **cargo-llvm-cov** for code coverage.
|
|
||||||
|
|
||||||
### Install Tools
|
|
||||||
|
|
||||||
```bash
|
|
||||||
cargo install cargo-nextest cargo-llvm-cov
|
|
||||||
```
|
|
||||||
|
|
||||||
### Run Tests
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Run all tests
|
|
||||||
cargo nextest run
|
|
||||||
|
|
||||||
# Run specific module
|
|
||||||
cargo nextest run search_files
|
|
||||||
|
|
||||||
# Run with verbose output
|
|
||||||
cargo nextest run --no-capture
|
|
||||||
```
|
|
||||||
|
|
||||||
### Generate Coverage
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# HTML report (opens in browser)
|
|
||||||
cargo llvm-cov nextest --html --open
|
|
||||||
|
|
||||||
# Terminal output
|
|
||||||
cargo llvm-cov nextest
|
|
||||||
|
|
||||||
# LCOV format (for CI)
|
|
||||||
cargo llvm-cov nextest --lcov --output-path lcov.info
|
|
||||||
|
|
||||||
# Clean coverage data
|
|
||||||
cargo llvm-cov clean
|
|
||||||
```
|
|
||||||
|
|
||||||
### Configuration
|
|
||||||
|
|
||||||
- **Nextest config**: `.config/nextest.toml`
|
|
||||||
- **Coverage output**: `target/llvm-cov/html/index.html`
|
|
||||||
|
|
||||||
## Current Coverage (search_files module)
|
|
||||||
|
|
||||||
```
|
|
||||||
Module: commands/search.rs
|
|
||||||
├── Region Coverage: 75.36%
|
|
||||||
├── Function Coverage: 69.05%
|
|
||||||
└── Line Coverage: 72.55%
|
|
||||||
```
|
|
||||||
|
|
||||||
### Available Test Profiles
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Development (default)
|
|
||||||
cargo nextest run
|
|
||||||
|
|
||||||
# CI with retries
|
|
||||||
cargo nextest run --profile ci
|
|
||||||
|
|
||||||
# Coverage optimized
|
|
||||||
cargo nextest run --profile coverage
|
|
||||||
```
|
|
||||||
|
|||||||
@@ -27,6 +27,8 @@ services:
|
|||||||
- GIT_USER_EMAIL=${GIT_USER_EMAIL:?Set GIT_USER_EMAIL}
|
- GIT_USER_EMAIL=${GIT_USER_EMAIL:?Set GIT_USER_EMAIL}
|
||||||
# Optional: override the server port (default 3001)
|
# Optional: override the server port (default 3001)
|
||||||
- STORKIT_PORT=3001
|
- STORKIT_PORT=3001
|
||||||
|
# Bind to all interfaces so Docker port forwarding works.
|
||||||
|
- STORKIT_HOST=0.0.0.0
|
||||||
# Optional: Matrix bot credentials (if using Matrix integration)
|
# Optional: Matrix bot credentials (if using Matrix integration)
|
||||||
- MATRIX_HOMESERVER=${MATRIX_HOMESERVER:-}
|
- MATRIX_HOMESERVER=${MATRIX_HOMESERVER:-}
|
||||||
- MATRIX_USER=${MATRIX_USER:-}
|
- MATRIX_USER=${MATRIX_USER:-}
|
||||||
@@ -89,7 +91,6 @@ services:
|
|||||||
- no-new-privileges:true
|
- no-new-privileges:true
|
||||||
|
|
||||||
# Resource limits – cap the whole system.
|
# Resource limits – cap the whole system.
|
||||||
# Adjust based on your machine. These are conservative defaults.
|
|
||||||
deploy:
|
deploy:
|
||||||
resources:
|
resources:
|
||||||
limits:
|
limits:
|
||||||
|
|||||||
@@ -14,8 +14,12 @@ if [ -z "$GIT_USER_EMAIL" ]; then
|
|||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Use GIT_AUTHOR/COMMITTER env vars instead of git config --global,
|
# Set git identity globally so it persists for all shells (docker exec, etc.),
|
||||||
# so the root filesystem can stay read-only (no ~/.gitconfig write).
|
# not just the entrypoint process tree.
|
||||||
|
git config --global user.name "$GIT_USER_NAME"
|
||||||
|
git config --global user.email "$GIT_USER_EMAIL"
|
||||||
|
|
||||||
|
# Also set env vars for backwards compatibility.
|
||||||
export GIT_AUTHOR_NAME="$GIT_USER_NAME"
|
export GIT_AUTHOR_NAME="$GIT_USER_NAME"
|
||||||
export GIT_COMMITTER_NAME="$GIT_USER_NAME"
|
export GIT_COMMITTER_NAME="$GIT_USER_NAME"
|
||||||
export GIT_AUTHOR_EMAIL="$GIT_USER_EMAIL"
|
export GIT_AUTHOR_EMAIL="$GIT_USER_EMAIL"
|
||||||
@@ -27,7 +31,7 @@ export GIT_COMMITTER_EMAIL="$GIT_USER_EMAIL"
|
|||||||
# binaries on a Linux container). Reinstall to get the right ones.
|
# binaries on a Linux container). Reinstall to get the right ones.
|
||||||
if [ -d /workspace/frontend ] && [ -f /workspace/frontend/package.json ]; then
|
if [ -d /workspace/frontend ] && [ -f /workspace/frontend/package.json ]; then
|
||||||
echo "Installing frontend dependencies for container platform..."
|
echo "Installing frontend dependencies for container platform..."
|
||||||
cd /workspace/frontend && npm install --prefer-offline 2>/dev/null || true
|
cd /workspace/frontend && npm ci --prefer-offline 2>/dev/null || true
|
||||||
cd /workspace
|
cd /workspace
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
|||||||
Generated
+4811
-8406
File diff suppressed because it is too large
Load Diff
+40
-39
@@ -1,41 +1,42 @@
|
|||||||
{
|
{
|
||||||
"name": "living-spec-standalone",
|
"name": "living-spec-standalone",
|
||||||
"private": true,
|
"private": true,
|
||||||
"version": "0.5.0",
|
"version": "0.8.0",
|
||||||
"type": "module",
|
"type": "module",
|
||||||
"scripts": {
|
"scripts": {
|
||||||
"dev": "vite",
|
"dev": "vite",
|
||||||
"build": "tsc && vite build",
|
"build": "tsc && vite build",
|
||||||
"preview": "vite preview",
|
"preview": "vite preview",
|
||||||
"server": "cargo run --manifest-path server/Cargo.toml",
|
"server": "cargo run --manifest-path server/Cargo.toml",
|
||||||
"test": "vitest run",
|
"test": "vitest run",
|
||||||
"test:unit": "vitest run",
|
"test:unit": "vitest run",
|
||||||
"test:e2e": "playwright test",
|
"test:e2e": "playwright test",
|
||||||
"test:coverage": "vitest run --coverage"
|
"test:coverage": "vitest run --coverage"
|
||||||
},
|
},
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"@types/react-syntax-highlighter": "^15.5.13",
|
"@types/react-syntax-highlighter": "^15.5.13",
|
||||||
"react": "^19.1.0",
|
"react": "^19.1.0",
|
||||||
"react-dom": "^19.1.0",
|
"react-dom": "^19.1.0",
|
||||||
"react-markdown": "^10.1.0",
|
"react-markdown": "^10.1.0",
|
||||||
"react-syntax-highlighter": "^16.1.0"
|
"react-syntax-highlighter": "^16.1.0"
|
||||||
},
|
},
|
||||||
"devDependencies": {
|
"overrides": {
|
||||||
"@biomejs/biome": "^2.4.2",
|
"glob": "^13.0.0"
|
||||||
"@playwright/test": "^1.47.2",
|
},
|
||||||
"@testing-library/jest-dom": "^6.0.0",
|
"devDependencies": {
|
||||||
"@testing-library/react": "^16.0.0",
|
"@biomejs/biome": "^2.4.2",
|
||||||
"@testing-library/user-event": "^14.4.3",
|
"@playwright/test": "^1.47.2",
|
||||||
"@types/node": "^25.0.0",
|
"@testing-library/jest-dom": "^6.0.0",
|
||||||
"@types/react": "^19.1.8",
|
"@testing-library/react": "^16.0.0",
|
||||||
"@types/react-dom": "^19.1.6",
|
"@testing-library/user-event": "^14.4.3",
|
||||||
"@vitejs/plugin-react": "^4.6.0",
|
"@types/node": "^25.0.0",
|
||||||
"@vitest/coverage-v8": "^2.1.9",
|
"@types/react": "^19.1.8",
|
||||||
"jest": "^29.0.0",
|
"@types/react-dom": "^19.1.6",
|
||||||
"jsdom": "^28.1.0",
|
"@vitejs/plugin-react": "^4.6.0",
|
||||||
"ts-jest": "^29.0.0",
|
"@vitest/coverage-v8": "^2.1.9",
|
||||||
"typescript": "~5.8.3",
|
"jsdom": "^28.1.0",
|
||||||
"vite": "^5.4.21",
|
"typescript": "~5.8.3",
|
||||||
"vitest": "^2.1.4"
|
"vite": "^5.4.21",
|
||||||
}
|
"vitest": "^2.1.4"
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,27 +1,27 @@
|
|||||||
import { defineConfig } from "@playwright/test";
|
|
||||||
import { dirname, resolve } from "node:path";
|
import { dirname, resolve } from "node:path";
|
||||||
import { fileURLToPath } from "node:url";
|
import { fileURLToPath } from "node:url";
|
||||||
|
import { defineConfig } from "@playwright/test";
|
||||||
|
|
||||||
const configDir = dirname(fileURLToPath(new URL(import.meta.url)));
|
const configDir = dirname(fileURLToPath(new URL(import.meta.url)));
|
||||||
const frontendRoot = resolve(configDir, ".");
|
const frontendRoot = resolve(configDir, ".");
|
||||||
|
|
||||||
export default defineConfig({
|
export default defineConfig({
|
||||||
testDir: "./tests/e2e",
|
testDir: "./tests/e2e",
|
||||||
fullyParallel: true,
|
fullyParallel: true,
|
||||||
timeout: 30_000,
|
timeout: 30_000,
|
||||||
expect: {
|
expect: {
|
||||||
timeout: 5_000,
|
timeout: 5_000,
|
||||||
},
|
},
|
||||||
use: {
|
use: {
|
||||||
baseURL: "http://127.0.0.1:41700",
|
baseURL: "http://127.0.0.1:41700",
|
||||||
trace: "on-first-retry",
|
trace: "on-first-retry",
|
||||||
},
|
},
|
||||||
webServer: {
|
webServer: {
|
||||||
command:
|
command:
|
||||||
"pnpm exec vite --config vite.config.ts --host 127.0.0.1 --port 41700 --strictPort",
|
"pnpm exec vite --config vite.config.ts --host 127.0.0.1 --port 41700 --strictPort",
|
||||||
url: "http://127.0.0.1:41700/@vite/client",
|
url: "http://127.0.0.1:41700/@vite/client",
|
||||||
reuseExistingServer: true,
|
reuseExistingServer: true,
|
||||||
timeout: 120_000,
|
timeout: 120_000,
|
||||||
cwd: frontendRoot,
|
cwd: frontendRoot,
|
||||||
},
|
},
|
||||||
});
|
});
|
||||||
|
|||||||
+50
-4
@@ -127,13 +127,13 @@ details summary::-webkit-details-marker {
|
|||||||
display: none;
|
display: none;
|
||||||
}
|
}
|
||||||
|
|
||||||
details[open] summary span:first-child {
|
details summary span:first-child {
|
||||||
transform: rotate(90deg);
|
|
||||||
display: inline-block;
|
|
||||||
transition: transform 0.2s ease;
|
transition: transform 0.2s ease;
|
||||||
}
|
}
|
||||||
|
|
||||||
details summary span:first-child {
|
details[open] summary span:first-child {
|
||||||
|
transform: rotate(90deg);
|
||||||
|
display: inline-block;
|
||||||
transition: transform 0.2s ease;
|
transition: transform 0.2s ease;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -236,3 +236,49 @@ body,
|
|||||||
opacity: 0;
|
opacity: 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* ProjectPathInput dropdown theming */
|
||||||
|
.path-dropdown {
|
||||||
|
border: 1px solid #ddd;
|
||||||
|
background: #fff;
|
||||||
|
color: #0f0f0f;
|
||||||
|
}
|
||||||
|
|
||||||
|
.path-dropdown-header {
|
||||||
|
border-bottom: 1px solid #eee;
|
||||||
|
background: #fafafa;
|
||||||
|
}
|
||||||
|
|
||||||
|
.path-dropdown-item {
|
||||||
|
background: transparent;
|
||||||
|
}
|
||||||
|
|
||||||
|
.path-dropdown-item--selected {
|
||||||
|
background: #f0f0f0;
|
||||||
|
}
|
||||||
|
|
||||||
|
.path-match-highlight {
|
||||||
|
font-weight: 600;
|
||||||
|
color: #222;
|
||||||
|
}
|
||||||
|
|
||||||
|
@media (prefers-color-scheme: dark) {
|
||||||
|
.path-dropdown {
|
||||||
|
border-color: #555;
|
||||||
|
background: #1e1e1e;
|
||||||
|
color: #f6f6f6;
|
||||||
|
}
|
||||||
|
|
||||||
|
.path-dropdown-header {
|
||||||
|
border-bottom-color: #444;
|
||||||
|
background: #2a2a2a;
|
||||||
|
}
|
||||||
|
|
||||||
|
.path-dropdown-item--selected {
|
||||||
|
background: #3a3a3a;
|
||||||
|
}
|
||||||
|
|
||||||
|
.path-match-highlight {
|
||||||
|
color: #f6f6f6;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -21,6 +21,19 @@ export type WsRequest =
|
|||||||
config: ProviderConfig;
|
config: ProviderConfig;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
export interface WizardStepInfo {
|
||||||
|
step: string;
|
||||||
|
label: string;
|
||||||
|
status: string;
|
||||||
|
content?: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
export interface WizardStateData {
|
||||||
|
steps: WizardStepInfo[];
|
||||||
|
current_step_index: number;
|
||||||
|
completed: boolean;
|
||||||
|
}
|
||||||
|
|
||||||
export interface AgentAssignment {
|
export interface AgentAssignment {
|
||||||
agent_name: string;
|
agent_name: string;
|
||||||
model: string | null;
|
model: string | null;
|
||||||
@@ -80,6 +93,13 @@ export type WsResponse =
|
|||||||
| { type: "pong" }
|
| { type: "pong" }
|
||||||
/** Sent on connect when the project still needs onboarding (specs are placeholders). */
|
/** Sent on connect when the project still needs onboarding (specs are placeholders). */
|
||||||
| { type: "onboarding_status"; needs_onboarding: boolean }
|
| { type: "onboarding_status"; needs_onboarding: boolean }
|
||||||
|
/** Sent on connect when a setup wizard is active. */
|
||||||
|
| {
|
||||||
|
type: "wizard_state";
|
||||||
|
steps: WizardStepInfo[];
|
||||||
|
current_step_index: number;
|
||||||
|
completed: boolean;
|
||||||
|
}
|
||||||
/** Streaming thinking token from an extended-thinking block, separate from regular text. */
|
/** Streaming thinking token from an extended-thinking block, separate from regular text. */
|
||||||
| { type: "thinking_token"; content: string }
|
| { type: "thinking_token"; content: string }
|
||||||
/** Streaming token from a /btw side question response. */
|
/** Streaming token from a /btw side question response. */
|
||||||
@@ -382,6 +402,14 @@ export const api = {
|
|||||||
deleteStory(storyId: string) {
|
deleteStory(storyId: string) {
|
||||||
return callMcpTool("delete_story", { story_id: storyId });
|
return callMcpTool("delete_story", { story_id: storyId });
|
||||||
},
|
},
|
||||||
|
/** Execute a bot slash command without LLM invocation. Returns markdown response text. */
|
||||||
|
botCommand(command: string, args: string, baseUrl?: string) {
|
||||||
|
return requestJson<{ response: string }>(
|
||||||
|
"/bot/command",
|
||||||
|
{ method: "POST", body: JSON.stringify({ command, args }) },
|
||||||
|
baseUrl,
|
||||||
|
);
|
||||||
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
async function callMcpTool(
|
async function callMcpTool(
|
||||||
@@ -430,6 +458,7 @@ export class ChatWebSocket {
|
|||||||
private onAgentConfigChanged?: () => void;
|
private onAgentConfigChanged?: () => void;
|
||||||
private onAgentStateChanged?: () => void;
|
private onAgentStateChanged?: () => void;
|
||||||
private onOnboardingStatus?: (needsOnboarding: boolean) => void;
|
private onOnboardingStatus?: (needsOnboarding: boolean) => void;
|
||||||
|
private onWizardState?: (state: WizardStateData) => void;
|
||||||
private onSideQuestionToken?: (content: string) => void;
|
private onSideQuestionToken?: (content: string) => void;
|
||||||
private onSideQuestionDone?: (response: string) => void;
|
private onSideQuestionDone?: (response: string) => void;
|
||||||
private onLogEntry?: (
|
private onLogEntry?: (
|
||||||
@@ -520,6 +549,12 @@ export class ChatWebSocket {
|
|||||||
if (data.type === "agent_state_changed") this.onAgentStateChanged?.();
|
if (data.type === "agent_state_changed") this.onAgentStateChanged?.();
|
||||||
if (data.type === "onboarding_status")
|
if (data.type === "onboarding_status")
|
||||||
this.onOnboardingStatus?.(data.needs_onboarding);
|
this.onOnboardingStatus?.(data.needs_onboarding);
|
||||||
|
if (data.type === "wizard_state")
|
||||||
|
this.onWizardState?.({
|
||||||
|
steps: data.steps,
|
||||||
|
current_step_index: data.current_step_index,
|
||||||
|
completed: data.completed,
|
||||||
|
});
|
||||||
if (data.type === "side_question_token")
|
if (data.type === "side_question_token")
|
||||||
this.onSideQuestionToken?.(data.content);
|
this.onSideQuestionToken?.(data.content);
|
||||||
if (data.type === "side_question_done")
|
if (data.type === "side_question_done")
|
||||||
@@ -579,6 +614,7 @@ export class ChatWebSocket {
|
|||||||
onAgentConfigChanged?: () => void;
|
onAgentConfigChanged?: () => void;
|
||||||
onAgentStateChanged?: () => void;
|
onAgentStateChanged?: () => void;
|
||||||
onOnboardingStatus?: (needsOnboarding: boolean) => void;
|
onOnboardingStatus?: (needsOnboarding: boolean) => void;
|
||||||
|
onWizardState?: (state: WizardStateData) => void;
|
||||||
onSideQuestionToken?: (content: string) => void;
|
onSideQuestionToken?: (content: string) => void;
|
||||||
onSideQuestionDone?: (response: string) => void;
|
onSideQuestionDone?: (response: string) => void;
|
||||||
onLogEntry?: (timestamp: string, level: string, message: string) => void;
|
onLogEntry?: (timestamp: string, level: string, message: string) => void;
|
||||||
@@ -598,6 +634,7 @@ export class ChatWebSocket {
|
|||||||
this.onAgentConfigChanged = handlers.onAgentConfigChanged;
|
this.onAgentConfigChanged = handlers.onAgentConfigChanged;
|
||||||
this.onAgentStateChanged = handlers.onAgentStateChanged;
|
this.onAgentStateChanged = handlers.onAgentStateChanged;
|
||||||
this.onOnboardingStatus = handlers.onOnboardingStatus;
|
this.onOnboardingStatus = handlers.onOnboardingStatus;
|
||||||
|
this.onWizardState = handlers.onWizardState;
|
||||||
this.onSideQuestionToken = handlers.onSideQuestionToken;
|
this.onSideQuestionToken = handlers.onSideQuestionToken;
|
||||||
this.onSideQuestionDone = handlers.onSideQuestionDone;
|
this.onSideQuestionDone = handlers.onSideQuestionDone;
|
||||||
this.onLogEntry = handlers.onLogEntry;
|
this.onLogEntry = handlers.onLogEntry;
|
||||||
|
|||||||
@@ -40,6 +40,7 @@ vi.mock("../api/client", () => {
|
|||||||
setAnthropicApiKey: vi.fn(),
|
setAnthropicApiKey: vi.fn(),
|
||||||
readFile: vi.fn(),
|
readFile: vi.fn(),
|
||||||
listProjectFiles: vi.fn(),
|
listProjectFiles: vi.fn(),
|
||||||
|
botCommand: vi.fn(),
|
||||||
};
|
};
|
||||||
class ChatWebSocket {
|
class ChatWebSocket {
|
||||||
connect(handlers: WsHandlers) {
|
connect(handlers: WsHandlers) {
|
||||||
@@ -64,6 +65,7 @@ const mockedApi = {
|
|||||||
setAnthropicApiKey: vi.mocked(api.setAnthropicApiKey),
|
setAnthropicApiKey: vi.mocked(api.setAnthropicApiKey),
|
||||||
readFile: vi.mocked(api.readFile),
|
readFile: vi.mocked(api.readFile),
|
||||||
listProjectFiles: vi.mocked(api.listProjectFiles),
|
listProjectFiles: vi.mocked(api.listProjectFiles),
|
||||||
|
botCommand: vi.mocked(api.botCommand),
|
||||||
};
|
};
|
||||||
|
|
||||||
function setupMocks() {
|
function setupMocks() {
|
||||||
@@ -76,6 +78,7 @@ function setupMocks() {
|
|||||||
mockedApi.listProjectFiles.mockResolvedValue([]);
|
mockedApi.listProjectFiles.mockResolvedValue([]);
|
||||||
mockedApi.cancelChat.mockResolvedValue(true);
|
mockedApi.cancelChat.mockResolvedValue(true);
|
||||||
mockedApi.setAnthropicApiKey.mockResolvedValue(true);
|
mockedApi.setAnthropicApiKey.mockResolvedValue(true);
|
||||||
|
mockedApi.botCommand.mockResolvedValue({ response: "Bot response" });
|
||||||
}
|
}
|
||||||
|
|
||||||
describe("Default provider selection (Story 206)", () => {
|
describe("Default provider selection (Story 206)", () => {
|
||||||
@@ -1457,3 +1460,204 @@ describe("File reference expansion (Story 269 AC4)", () => {
|
|||||||
expect(mockedApi.readFile).not.toHaveBeenCalled();
|
expect(mockedApi.readFile).not.toHaveBeenCalled();
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|
||||||
|
describe("Slash command handling (Story 374)", () => {
|
||||||
|
beforeEach(() => {
|
||||||
|
capturedWsHandlers = null;
|
||||||
|
lastSendChatArgs = null;
|
||||||
|
setupMocks();
|
||||||
|
});
|
||||||
|
|
||||||
|
afterEach(() => {
|
||||||
|
vi.clearAllMocks();
|
||||||
|
});
|
||||||
|
|
||||||
|
it("AC: /status calls botCommand and displays response", async () => {
|
||||||
|
mockedApi.botCommand.mockResolvedValue({ response: "Pipeline: 3 active" });
|
||||||
|
render(<Chat projectPath="/tmp/project" onCloseProject={vi.fn()} />);
|
||||||
|
await waitFor(() => expect(capturedWsHandlers).not.toBeNull());
|
||||||
|
|
||||||
|
const input = screen.getByPlaceholderText("Send a message...");
|
||||||
|
await act(async () => {
|
||||||
|
fireEvent.change(input, { target: { value: "/status" } });
|
||||||
|
});
|
||||||
|
await act(async () => {
|
||||||
|
fireEvent.keyDown(input, { key: "Enter", shiftKey: false });
|
||||||
|
});
|
||||||
|
|
||||||
|
await waitFor(() => {
|
||||||
|
expect(mockedApi.botCommand).toHaveBeenCalledWith(
|
||||||
|
"status",
|
||||||
|
"",
|
||||||
|
undefined,
|
||||||
|
);
|
||||||
|
});
|
||||||
|
expect(await screen.findByText("Pipeline: 3 active")).toBeInTheDocument();
|
||||||
|
// Should NOT go to LLM
|
||||||
|
expect(lastSendChatArgs).toBeNull();
|
||||||
|
});
|
||||||
|
|
||||||
|
it("AC: /status <number> passes args to botCommand", async () => {
|
||||||
|
mockedApi.botCommand.mockResolvedValue({ response: "Story 42 details" });
|
||||||
|
render(<Chat projectPath="/tmp/project" onCloseProject={vi.fn()} />);
|
||||||
|
await waitFor(() => expect(capturedWsHandlers).not.toBeNull());
|
||||||
|
|
||||||
|
const input = screen.getByPlaceholderText("Send a message...");
|
||||||
|
await act(async () => {
|
||||||
|
fireEvent.change(input, { target: { value: "/status 42" } });
|
||||||
|
});
|
||||||
|
await act(async () => {
|
||||||
|
fireEvent.keyDown(input, { key: "Enter", shiftKey: false });
|
||||||
|
});
|
||||||
|
|
||||||
|
await waitFor(() => {
|
||||||
|
expect(mockedApi.botCommand).toHaveBeenCalledWith(
|
||||||
|
"status",
|
||||||
|
"42",
|
||||||
|
undefined,
|
||||||
|
);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
it("AC: /start <number> calls botCommand", async () => {
|
||||||
|
mockedApi.botCommand.mockResolvedValue({ response: "Started agent" });
|
||||||
|
render(<Chat projectPath="/tmp/project" onCloseProject={vi.fn()} />);
|
||||||
|
await waitFor(() => expect(capturedWsHandlers).not.toBeNull());
|
||||||
|
|
||||||
|
const input = screen.getByPlaceholderText("Send a message...");
|
||||||
|
await act(async () => {
|
||||||
|
fireEvent.change(input, { target: { value: "/start 42 opus" } });
|
||||||
|
});
|
||||||
|
await act(async () => {
|
||||||
|
fireEvent.keyDown(input, { key: "Enter", shiftKey: false });
|
||||||
|
});
|
||||||
|
|
||||||
|
await waitFor(() => {
|
||||||
|
expect(mockedApi.botCommand).toHaveBeenCalledWith(
|
||||||
|
"start",
|
||||||
|
"42 opus",
|
||||||
|
undefined,
|
||||||
|
);
|
||||||
|
});
|
||||||
|
expect(await screen.findByText("Started agent")).toBeInTheDocument();
|
||||||
|
});
|
||||||
|
|
||||||
|
it("AC: /git calls botCommand", async () => {
|
||||||
|
mockedApi.botCommand.mockResolvedValue({ response: "On branch main" });
|
||||||
|
render(<Chat projectPath="/tmp/project" onCloseProject={vi.fn()} />);
|
||||||
|
await waitFor(() => expect(capturedWsHandlers).not.toBeNull());
|
||||||
|
|
||||||
|
const input = screen.getByPlaceholderText("Send a message...");
|
||||||
|
await act(async () => {
|
||||||
|
fireEvent.change(input, { target: { value: "/git" } });
|
||||||
|
});
|
||||||
|
await act(async () => {
|
||||||
|
fireEvent.keyDown(input, { key: "Enter", shiftKey: false });
|
||||||
|
});
|
||||||
|
|
||||||
|
await waitFor(() => {
|
||||||
|
expect(mockedApi.botCommand).toHaveBeenCalledWith("git", "", undefined);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
it("AC: /cost calls botCommand", async () => {
|
||||||
|
mockedApi.botCommand.mockResolvedValue({ response: "$1.23 today" });
|
||||||
|
render(<Chat projectPath="/tmp/project" onCloseProject={vi.fn()} />);
|
||||||
|
await waitFor(() => expect(capturedWsHandlers).not.toBeNull());
|
||||||
|
|
||||||
|
const input = screen.getByPlaceholderText("Send a message...");
|
||||||
|
await act(async () => {
|
||||||
|
fireEvent.change(input, { target: { value: "/cost" } });
|
||||||
|
});
|
||||||
|
await act(async () => {
|
||||||
|
fireEvent.keyDown(input, { key: "Enter", shiftKey: false });
|
||||||
|
});
|
||||||
|
|
||||||
|
await waitFor(() => {
|
||||||
|
expect(mockedApi.botCommand).toHaveBeenCalledWith("cost", "", undefined);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
it("AC: /reset clears messages and session without LLM", async () => {
|
||||||
|
render(<Chat projectPath="/tmp/project" onCloseProject={vi.fn()} />);
|
||||||
|
await waitFor(() => expect(capturedWsHandlers).not.toBeNull());
|
||||||
|
|
||||||
|
// First add a message so there is history to clear
|
||||||
|
act(() => {
|
||||||
|
capturedWsHandlers?.onUpdate([
|
||||||
|
{ role: "user", content: "hello" },
|
||||||
|
{ role: "assistant", content: "world" },
|
||||||
|
]);
|
||||||
|
});
|
||||||
|
expect(await screen.findByText("world")).toBeInTheDocument();
|
||||||
|
|
||||||
|
const input = screen.getByPlaceholderText("Send a message...");
|
||||||
|
await act(async () => {
|
||||||
|
fireEvent.change(input, { target: { value: "/reset" } });
|
||||||
|
});
|
||||||
|
await act(async () => {
|
||||||
|
fireEvent.keyDown(input, { key: "Enter", shiftKey: false });
|
||||||
|
});
|
||||||
|
|
||||||
|
// LLM must NOT be invoked
|
||||||
|
expect(lastSendChatArgs).toBeNull();
|
||||||
|
// botCommand must NOT be invoked (reset is frontend-only)
|
||||||
|
expect(mockedApi.botCommand).not.toHaveBeenCalled();
|
||||||
|
// Confirmation message should appear
|
||||||
|
expect(await screen.findByText(/Session reset/)).toBeInTheDocument();
|
||||||
|
});
|
||||||
|
|
||||||
|
it("AC: unrecognised slash command shows error message", async () => {
|
||||||
|
render(<Chat projectPath="/tmp/project" onCloseProject={vi.fn()} />);
|
||||||
|
await waitFor(() => expect(capturedWsHandlers).not.toBeNull());
|
||||||
|
|
||||||
|
const input = screen.getByPlaceholderText("Send a message...");
|
||||||
|
await act(async () => {
|
||||||
|
fireEvent.change(input, { target: { value: "/foobar" } });
|
||||||
|
});
|
||||||
|
await act(async () => {
|
||||||
|
fireEvent.keyDown(input, { key: "Enter", shiftKey: false });
|
||||||
|
});
|
||||||
|
|
||||||
|
expect(await screen.findByText(/Unknown command/)).toBeInTheDocument();
|
||||||
|
// Should NOT go to LLM
|
||||||
|
expect(lastSendChatArgs).toBeNull();
|
||||||
|
// Should NOT call botCommand
|
||||||
|
expect(mockedApi.botCommand).not.toHaveBeenCalled();
|
||||||
|
});
|
||||||
|
|
||||||
|
it("AC: /help shows help overlay", async () => {
|
||||||
|
render(<Chat projectPath="/tmp/project" onCloseProject={vi.fn()} />);
|
||||||
|
await waitFor(() => expect(capturedWsHandlers).not.toBeNull());
|
||||||
|
|
||||||
|
const input = screen.getByPlaceholderText("Send a message...");
|
||||||
|
await act(async () => {
|
||||||
|
fireEvent.change(input, { target: { value: "/help" } });
|
||||||
|
});
|
||||||
|
await act(async () => {
|
||||||
|
fireEvent.keyDown(input, { key: "Enter", shiftKey: false });
|
||||||
|
});
|
||||||
|
|
||||||
|
expect(await screen.findByTestId("help-overlay")).toBeInTheDocument();
|
||||||
|
expect(lastSendChatArgs).toBeNull();
|
||||||
|
expect(mockedApi.botCommand).not.toHaveBeenCalled();
|
||||||
|
});
|
||||||
|
|
||||||
|
it("AC: botCommand API error shows error message in chat", async () => {
|
||||||
|
mockedApi.botCommand.mockRejectedValue(new Error("Server error"));
|
||||||
|
render(<Chat projectPath="/tmp/project" onCloseProject={vi.fn()} />);
|
||||||
|
await waitFor(() => expect(capturedWsHandlers).not.toBeNull());
|
||||||
|
|
||||||
|
const input = screen.getByPlaceholderText("Send a message...");
|
||||||
|
await act(async () => {
|
||||||
|
fireEvent.change(input, { target: { value: "/git" } });
|
||||||
|
});
|
||||||
|
await act(async () => {
|
||||||
|
fireEvent.keyDown(input, { key: "Enter", shiftKey: false });
|
||||||
|
});
|
||||||
|
|
||||||
|
expect(
|
||||||
|
await screen.findByText(/Error running command/),
|
||||||
|
).toBeInTheDocument();
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|||||||
@@ -4,7 +4,11 @@ import { Prism as SyntaxHighlighter } from "react-syntax-highlighter";
|
|||||||
import { oneDark } from "react-syntax-highlighter/dist/esm/styles/prism";
|
import { oneDark } from "react-syntax-highlighter/dist/esm/styles/prism";
|
||||||
import type { AgentConfigInfo } from "../api/agents";
|
import type { AgentConfigInfo } from "../api/agents";
|
||||||
import { agentsApi } from "../api/agents";
|
import { agentsApi } from "../api/agents";
|
||||||
import type { AnthropicModelInfo, PipelineState } from "../api/client";
|
import type {
|
||||||
|
AnthropicModelInfo,
|
||||||
|
PipelineState,
|
||||||
|
WizardStateData,
|
||||||
|
} from "../api/client";
|
||||||
import { api, ChatWebSocket } from "../api/client";
|
import { api, ChatWebSocket } from "../api/client";
|
||||||
import { useChatHistory } from "../hooks/useChatHistory";
|
import { useChatHistory } from "../hooks/useChatHistory";
|
||||||
import type { Message, ProviderConfig } from "../types";
|
import type { Message, ProviderConfig } from "../types";
|
||||||
@@ -17,6 +21,7 @@ import { LozengeFlyProvider } from "./LozengeFlyContext";
|
|||||||
import { MessageItem } from "./MessageItem";
|
import { MessageItem } from "./MessageItem";
|
||||||
import type { LogEntry } from "./ServerLogsPanel";
|
import type { LogEntry } from "./ServerLogsPanel";
|
||||||
import { ServerLogsPanel } from "./ServerLogsPanel";
|
import { ServerLogsPanel } from "./ServerLogsPanel";
|
||||||
|
import SetupWizard from "./SetupWizard";
|
||||||
import { SideQuestionOverlay } from "./SideQuestionOverlay";
|
import { SideQuestionOverlay } from "./SideQuestionOverlay";
|
||||||
import { StagePanel } from "./StagePanel";
|
import { StagePanel } from "./StagePanel";
|
||||||
import { WorkItemDetailPanel } from "./WorkItemDetailPanel";
|
import { WorkItemDetailPanel } from "./WorkItemDetailPanel";
|
||||||
@@ -217,6 +222,7 @@ export function Chat({ projectPath, onCloseProject }: ChatProps) {
|
|||||||
new Map(),
|
new Map(),
|
||||||
);
|
);
|
||||||
const [needsOnboarding, setNeedsOnboarding] = useState(false);
|
const [needsOnboarding, setNeedsOnboarding] = useState(false);
|
||||||
|
const [wizardState, setWizardState] = useState<WizardStateData | null>(null);
|
||||||
const onboardingTriggeredRef = useRef(false);
|
const onboardingTriggeredRef = useRef(false);
|
||||||
const [selectedWorkItemId, setSelectedWorkItemId] = useState<string | null>(
|
const [selectedWorkItemId, setSelectedWorkItemId] = useState<string | null>(
|
||||||
null,
|
null,
|
||||||
@@ -466,6 +472,9 @@ export function Chat({ projectPath, onCloseProject }: ChatProps) {
|
|||||||
onOnboardingStatus: (onboarding: boolean) => {
|
onOnboardingStatus: (onboarding: boolean) => {
|
||||||
setNeedsOnboarding(onboarding);
|
setNeedsOnboarding(onboarding);
|
||||||
},
|
},
|
||||||
|
onWizardState: (state: WizardStateData) => {
|
||||||
|
setWizardState(state);
|
||||||
|
},
|
||||||
onSideQuestionToken: (content) => {
|
onSideQuestionToken: (content) => {
|
||||||
setSideQuestion((prev) =>
|
setSideQuestion((prev) =>
|
||||||
prev ? { ...prev, response: prev.response + content } : prev,
|
prev ? { ...prev, response: prev.response + content } : prev,
|
||||||
@@ -612,6 +621,81 @@ export function Chat({ projectPath, onCloseProject }: ChatProps) {
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// /reset — clear session and message history without LLM
|
||||||
|
if (/^\/reset\s*$/i.test(messageText)) {
|
||||||
|
setMessages([]);
|
||||||
|
setClaudeSessionId(null);
|
||||||
|
setStreamingContent("");
|
||||||
|
setStreamingThinking("");
|
||||||
|
setActivityStatus(null);
|
||||||
|
setMessages([
|
||||||
|
{
|
||||||
|
role: "assistant",
|
||||||
|
content: "Session reset. Starting a fresh conversation.",
|
||||||
|
},
|
||||||
|
]);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Slash commands forwarded to the backend bot command endpoint
|
||||||
|
const slashMatch = messageText.match(/^\/(\S+)(?:\s+([\s\S]*))?$/);
|
||||||
|
if (slashMatch) {
|
||||||
|
const cmd = slashMatch[1].toLowerCase();
|
||||||
|
const args = (slashMatch[2] ?? "").trim();
|
||||||
|
|
||||||
|
// Ignore commands handled elsewhere
|
||||||
|
if (cmd !== "btw") {
|
||||||
|
const knownCommands = new Set([
|
||||||
|
"status",
|
||||||
|
"assign",
|
||||||
|
"start",
|
||||||
|
"show",
|
||||||
|
"move",
|
||||||
|
"delete",
|
||||||
|
"cost",
|
||||||
|
"git",
|
||||||
|
"overview",
|
||||||
|
"rebuild",
|
||||||
|
"loc",
|
||||||
|
]);
|
||||||
|
|
||||||
|
if (knownCommands.has(cmd)) {
|
||||||
|
// Show the slash command in chat as a user message (display only)
|
||||||
|
setMessages((prev: Message[]) => [
|
||||||
|
...prev,
|
||||||
|
{ role: "user", content: messageText },
|
||||||
|
]);
|
||||||
|
try {
|
||||||
|
const result = await api.botCommand(cmd, args, undefined);
|
||||||
|
setMessages((prev: Message[]) => [
|
||||||
|
...prev,
|
||||||
|
{ role: "assistant", content: result.response },
|
||||||
|
]);
|
||||||
|
} catch (e) {
|
||||||
|
setMessages((prev: Message[]) => [
|
||||||
|
...prev,
|
||||||
|
{
|
||||||
|
role: "assistant",
|
||||||
|
content: `**Error running command:** ${e}`,
|
||||||
|
},
|
||||||
|
]);
|
||||||
|
}
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unknown slash command
|
||||||
|
setMessages((prev: Message[]) => [
|
||||||
|
...prev,
|
||||||
|
{ role: "user", content: messageText },
|
||||||
|
{
|
||||||
|
role: "assistant",
|
||||||
|
content: `Unknown command: \`/${cmd}\`. Type \`/help\` to see available commands.`,
|
||||||
|
},
|
||||||
|
]);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// /btw <question> — answered from context without disrupting main chat
|
// /btw <question> — answered from context without disrupting main chat
|
||||||
const btwMatch = messageText.match(/^\/btw\s+(.+)/s);
|
const btwMatch = messageText.match(/^\/btw\s+(.+)/s);
|
||||||
if (btwMatch) {
|
if (btwMatch) {
|
||||||
@@ -903,63 +987,76 @@ export function Chat({ projectPath, onCloseProject }: ChatProps) {
|
|||||||
gap: "24px",
|
gap: "24px",
|
||||||
}}
|
}}
|
||||||
>
|
>
|
||||||
{needsOnboarding && messages.length === 0 && !loading && (
|
{wizardState &&
|
||||||
<div
|
!wizardState.completed &&
|
||||||
data-testid="onboarding-welcome"
|
messages.length === 0 &&
|
||||||
style={{
|
!loading && (
|
||||||
padding: "24px",
|
<SetupWizard
|
||||||
borderRadius: "12px",
|
wizardState={wizardState}
|
||||||
background: "#1c2a1c",
|
onWizardUpdate={setWizardState}
|
||||||
border: "1px solid #2d4a2d",
|
sendMessage={sendMessage}
|
||||||
marginBottom: "8px",
|
/>
|
||||||
}}
|
)}
|
||||||
>
|
{needsOnboarding &&
|
||||||
<h3
|
!wizardState &&
|
||||||
|
messages.length === 0 &&
|
||||||
|
!loading && (
|
||||||
|
<div
|
||||||
|
data-testid="onboarding-welcome"
|
||||||
style={{
|
style={{
|
||||||
margin: "0 0 8px 0",
|
padding: "24px",
|
||||||
color: "#a0d4a0",
|
borderRadius: "12px",
|
||||||
fontSize: "1.1rem",
|
background: "#1c2a1c",
|
||||||
|
border: "1px solid #2d4a2d",
|
||||||
|
marginBottom: "8px",
|
||||||
}}
|
}}
|
||||||
>
|
>
|
||||||
Welcome to Storkit
|
<h3
|
||||||
</h3>
|
style={{
|
||||||
<p
|
margin: "0 0 8px 0",
|
||||||
style={{
|
color: "#a0d4a0",
|
||||||
margin: "0 0 16px 0",
|
fontSize: "1.1rem",
|
||||||
color: "#ccc",
|
}}
|
||||||
lineHeight: 1.5,
|
>
|
||||||
}}
|
Welcome to Storkit
|
||||||
>
|
</h3>
|
||||||
This project needs to be set up before you can start writing
|
<p
|
||||||
stories. The agent will guide you through configuring your
|
style={{
|
||||||
project goals and tech stack.
|
margin: "0 0 16px 0",
|
||||||
</p>
|
color: "#ccc",
|
||||||
<button
|
lineHeight: 1.5,
|
||||||
type="button"
|
}}
|
||||||
data-testid="onboarding-start-button"
|
>
|
||||||
onClick={() => {
|
This project needs to be set up before you can start
|
||||||
if (onboardingTriggeredRef.current) return;
|
writing stories. The agent will guide you through
|
||||||
onboardingTriggeredRef.current = true;
|
configuring your project goals and tech stack.
|
||||||
setNeedsOnboarding(false);
|
</p>
|
||||||
sendMessage(
|
<button
|
||||||
"I just created a new project. Help me set it up.",
|
type="button"
|
||||||
);
|
data-testid="onboarding-start-button"
|
||||||
}}
|
onClick={() => {
|
||||||
style={{
|
if (onboardingTriggeredRef.current) return;
|
||||||
padding: "10px 20px",
|
onboardingTriggeredRef.current = true;
|
||||||
borderRadius: "8px",
|
setNeedsOnboarding(false);
|
||||||
border: "none",
|
sendMessage(
|
||||||
backgroundColor: "#a0d4a0",
|
"I just created a new project. Help me set it up.",
|
||||||
color: "#1a1a1a",
|
);
|
||||||
cursor: "pointer",
|
}}
|
||||||
fontSize: "0.95rem",
|
style={{
|
||||||
fontWeight: 600,
|
padding: "10px 20px",
|
||||||
}}
|
borderRadius: "8px",
|
||||||
>
|
border: "none",
|
||||||
Start Project Setup
|
backgroundColor: "#a0d4a0",
|
||||||
</button>
|
color: "#1a1a1a",
|
||||||
</div>
|
cursor: "pointer",
|
||||||
)}
|
fontSize: "0.95rem",
|
||||||
|
fontWeight: 600,
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
Start Project Setup
|
||||||
|
</button>
|
||||||
|
</div>
|
||||||
|
)}
|
||||||
{messages.map((msg: Message, idx: number) => (
|
{messages.map((msg: Message, idx: number) => (
|
||||||
<MessageItem
|
<MessageItem
|
||||||
key={`msg-${idx}-${msg.role}-${msg.content.substring(0, 20)}`}
|
key={`msg-${idx}-${msg.role}-${msg.content.substring(0, 20)}`}
|
||||||
|
|||||||
@@ -12,6 +12,57 @@ const SLASH_COMMANDS: SlashCommand[] = [
|
|||||||
name: "/help",
|
name: "/help",
|
||||||
description: "Show this list of available slash commands.",
|
description: "Show this list of available slash commands.",
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
name: "/status",
|
||||||
|
description:
|
||||||
|
"Show pipeline status and agent availability. `/status <number>` shows a story triage dump.",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "/assign <number> <model>",
|
||||||
|
description: "Pre-assign a model to a story (e.g. `/assign 42 opus`).",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "/start <number>",
|
||||||
|
description:
|
||||||
|
"Start a coder on a story. Optionally specify a model: `/start <number> opus`.",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "/show <number>",
|
||||||
|
description: "Display the full text of a work item.",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "/move <number> <stage>",
|
||||||
|
description:
|
||||||
|
"Move a work item to a pipeline stage (backlog, current, qa, merge, done).",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "/delete <number>",
|
||||||
|
description:
|
||||||
|
"Remove a work item from the pipeline and stop any running agent.",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "/cost",
|
||||||
|
description:
|
||||||
|
"Show token spend: 24h total, top stories, breakdown by agent type, and all-time total.",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "/git",
|
||||||
|
description:
|
||||||
|
"Show git status: branch, uncommitted changes, and ahead/behind remote.",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "/overview <number>",
|
||||||
|
description: "Show the implementation summary for a merged story.",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "/rebuild",
|
||||||
|
description: "Rebuild the server binary and restart.",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "/reset",
|
||||||
|
description:
|
||||||
|
"Clear the current Claude Code session and start fresh (messages and session ID are cleared locally).",
|
||||||
|
},
|
||||||
{
|
{
|
||||||
name: "/btw <question>",
|
name: "/btw <question>",
|
||||||
description:
|
description:
|
||||||
|
|||||||
@@ -0,0 +1,354 @@
|
|||||||
|
import { useCallback, useState } from "react";
|
||||||
|
import type { WizardStateData, WizardStepInfo } from "../api/client";
|
||||||
|
|
||||||
|
const API_BASE = "/api";
|
||||||
|
|
||||||
|
interface SetupWizardProps {
|
||||||
|
wizardState: WizardStateData;
|
||||||
|
onWizardUpdate: (state: WizardStateData) => void;
|
||||||
|
sendMessage: (message: string) => void;
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Style constants for the wizard UI. */
|
||||||
|
const STEP_BG_PENDING = "#1a1f2e";
|
||||||
|
const STEP_BG_ACTIVE = "#1c2a1c";
|
||||||
|
const STEP_BG_DONE = "#1a2a1a";
|
||||||
|
const STEP_BORDER_PENDING = "#2a2f3e";
|
||||||
|
const STEP_BORDER_ACTIVE = "#2d4a2d";
|
||||||
|
const STEP_BORDER_DONE = "#2d4a2d";
|
||||||
|
const COLOR_LABEL = "#ccc";
|
||||||
|
const COLOR_LABEL_DONE = "#a0d4a0";
|
||||||
|
const COLOR_ACCENT = "#a0d4a0";
|
||||||
|
|
||||||
|
function statusIcon(status: string): string {
|
||||||
|
switch (status) {
|
||||||
|
case "confirmed":
|
||||||
|
return "\u2713";
|
||||||
|
case "skipped":
|
||||||
|
return "\u2013";
|
||||||
|
case "generating":
|
||||||
|
return "\u2026";
|
||||||
|
case "awaiting_confirmation":
|
||||||
|
return "?";
|
||||||
|
default:
|
||||||
|
return "\u00B7";
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
function stepBackground(status: string, isActive: boolean): string {
|
||||||
|
if (status === "confirmed" || status === "skipped") return STEP_BG_DONE;
|
||||||
|
if (isActive) return STEP_BG_ACTIVE;
|
||||||
|
return STEP_BG_PENDING;
|
||||||
|
}
|
||||||
|
|
||||||
|
function stepBorder(status: string, isActive: boolean): string {
|
||||||
|
if (status === "confirmed" || status === "skipped") return STEP_BORDER_DONE;
|
||||||
|
if (isActive) return STEP_BORDER_ACTIVE;
|
||||||
|
return STEP_BORDER_PENDING;
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Messages sent to the chat to trigger agent generation for each step. */
|
||||||
|
const STEP_PROMPTS: Record<string, string> = {
|
||||||
|
context:
|
||||||
|
"Read the codebase and generate .storkit/specs/00_CONTEXT.md with a project context spec. Include High-Level Goal, Core Features, Domain Definition, and Glossary sections. Then call the wizard API to store the content: PUT /api/wizard/step/context/content",
|
||||||
|
stack:
|
||||||
|
"Read the tech stack and generate .storkit/specs/tech/STACK.md with a tech stack spec. Include Core Stack, Coding Standards, Quality Gates, and Libraries sections. Then call the wizard API to store the content: PUT /api/wizard/step/stack/content",
|
||||||
|
test_script:
|
||||||
|
"Read the project structure and create script/test — a bash script that runs the project's actual test suite. Then call the wizard API: PUT /api/wizard/step/test_script/content",
|
||||||
|
release_script:
|
||||||
|
"Read the project's deployment setup and create script/release tailored to the project. Then call the wizard API: PUT /api/wizard/step/release_script/content",
|
||||||
|
test_coverage:
|
||||||
|
"If the stack supports coverage reporting, create script/test_coverage. Then call the wizard API: PUT /api/wizard/step/test_coverage/content",
|
||||||
|
};
|
||||||
|
|
||||||
|
async function apiPost(path: string): Promise<WizardStateData | null> {
|
||||||
|
try {
|
||||||
|
const resp = await fetch(`${API_BASE}${path}`, { method: "POST" });
|
||||||
|
if (!resp.ok) return null;
|
||||||
|
return (await resp.json()) as WizardStateData;
|
||||||
|
} catch {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
function StepCard({
|
||||||
|
step,
|
||||||
|
isActive,
|
||||||
|
onGenerate,
|
||||||
|
onConfirm,
|
||||||
|
onSkip,
|
||||||
|
}: {
|
||||||
|
step: WizardStepInfo;
|
||||||
|
isActive: boolean;
|
||||||
|
onGenerate: () => void;
|
||||||
|
onConfirm: () => void;
|
||||||
|
onSkip: () => void;
|
||||||
|
}) {
|
||||||
|
const isDone = step.status === "confirmed" || step.status === "skipped";
|
||||||
|
|
||||||
|
return (
|
||||||
|
<div
|
||||||
|
data-testid={`wizard-step-${step.step}`}
|
||||||
|
style={{
|
||||||
|
padding: "16px",
|
||||||
|
borderRadius: "8px",
|
||||||
|
background: stepBackground(step.status, isActive),
|
||||||
|
border: `1px solid ${stepBorder(step.status, isActive)}`,
|
||||||
|
opacity: !isActive && !isDone ? 0.5 : 1,
|
||||||
|
transition: "all 0.2s ease",
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
<div
|
||||||
|
style={{
|
||||||
|
display: "flex",
|
||||||
|
alignItems: "center",
|
||||||
|
gap: "12px",
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
<span
|
||||||
|
style={{
|
||||||
|
width: "24px",
|
||||||
|
height: "24px",
|
||||||
|
borderRadius: "50%",
|
||||||
|
display: "flex",
|
||||||
|
alignItems: "center",
|
||||||
|
justifyContent: "center",
|
||||||
|
fontSize: "14px",
|
||||||
|
fontWeight: 600,
|
||||||
|
background: isDone ? COLOR_ACCENT : "transparent",
|
||||||
|
border: isDone ? "none" : `1px solid ${COLOR_LABEL}`,
|
||||||
|
color: isDone ? "#1a1a1a" : COLOR_LABEL,
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
{statusIcon(step.status)}
|
||||||
|
</span>
|
||||||
|
<span
|
||||||
|
style={{
|
||||||
|
flex: 1,
|
||||||
|
color: isDone ? COLOR_LABEL_DONE : COLOR_LABEL,
|
||||||
|
fontWeight: isActive ? 600 : 400,
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
{step.label}
|
||||||
|
</span>
|
||||||
|
{isActive && step.status === "pending" && (
|
||||||
|
<button
|
||||||
|
type="button"
|
||||||
|
data-testid={`wizard-generate-${step.step}`}
|
||||||
|
onClick={onGenerate}
|
||||||
|
style={{
|
||||||
|
padding: "6px 14px",
|
||||||
|
borderRadius: "6px",
|
||||||
|
border: "none",
|
||||||
|
backgroundColor: COLOR_ACCENT,
|
||||||
|
color: "#1a1a1a",
|
||||||
|
cursor: "pointer",
|
||||||
|
fontSize: "0.85rem",
|
||||||
|
fontWeight: 600,
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
Generate
|
||||||
|
</button>
|
||||||
|
)}
|
||||||
|
{isActive && step.status === "generating" && (
|
||||||
|
<span style={{ color: "#aaa", fontSize: "0.85rem" }}>
|
||||||
|
Generating...
|
||||||
|
</span>
|
||||||
|
)}
|
||||||
|
</div>
|
||||||
|
|
||||||
|
{step.content && step.status === "awaiting_confirmation" && (
|
||||||
|
<div style={{ marginTop: "12px" }}>
|
||||||
|
<pre
|
||||||
|
data-testid={`wizard-preview-${step.step}`}
|
||||||
|
style={{
|
||||||
|
background: "#111",
|
||||||
|
padding: "12px",
|
||||||
|
borderRadius: "6px",
|
||||||
|
fontSize: "0.8rem",
|
||||||
|
color: "#ddd",
|
||||||
|
whiteSpace: "pre-wrap",
|
||||||
|
maxHeight: "200px",
|
||||||
|
overflow: "auto",
|
||||||
|
margin: "0 0 12px 0",
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
{step.content}
|
||||||
|
</pre>
|
||||||
|
<div style={{ display: "flex", gap: "8px" }}>
|
||||||
|
<button
|
||||||
|
type="button"
|
||||||
|
data-testid={`wizard-confirm-${step.step}`}
|
||||||
|
onClick={onConfirm}
|
||||||
|
style={{
|
||||||
|
padding: "6px 14px",
|
||||||
|
borderRadius: "6px",
|
||||||
|
border: "none",
|
||||||
|
backgroundColor: COLOR_ACCENT,
|
||||||
|
color: "#1a1a1a",
|
||||||
|
cursor: "pointer",
|
||||||
|
fontSize: "0.85rem",
|
||||||
|
fontWeight: 600,
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
Confirm
|
||||||
|
</button>
|
||||||
|
<button
|
||||||
|
type="button"
|
||||||
|
data-testid={`wizard-revise-${step.step}`}
|
||||||
|
onClick={onGenerate}
|
||||||
|
style={{
|
||||||
|
padding: "6px 14px",
|
||||||
|
borderRadius: "6px",
|
||||||
|
border: "1px solid #555",
|
||||||
|
backgroundColor: "transparent",
|
||||||
|
color: "#ccc",
|
||||||
|
cursor: "pointer",
|
||||||
|
fontSize: "0.85rem",
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
Revise
|
||||||
|
</button>
|
||||||
|
<button
|
||||||
|
type="button"
|
||||||
|
data-testid={`wizard-skip-${step.step}`}
|
||||||
|
onClick={onSkip}
|
||||||
|
style={{
|
||||||
|
padding: "6px 14px",
|
||||||
|
borderRadius: "6px",
|
||||||
|
border: "1px solid #555",
|
||||||
|
backgroundColor: "transparent",
|
||||||
|
color: "#888",
|
||||||
|
cursor: "pointer",
|
||||||
|
fontSize: "0.85rem",
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
Skip
|
||||||
|
</button>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
)}
|
||||||
|
|
||||||
|
{isActive && step.status === "pending" && !step.content && (
|
||||||
|
<div style={{ marginTop: "8px", display: "flex", gap: "8px" }}>
|
||||||
|
<button
|
||||||
|
type="button"
|
||||||
|
data-testid={`wizard-skip-${step.step}`}
|
||||||
|
onClick={onSkip}
|
||||||
|
style={{
|
||||||
|
padding: "4px 10px",
|
||||||
|
borderRadius: "6px",
|
||||||
|
border: "1px solid #444",
|
||||||
|
backgroundColor: "transparent",
|
||||||
|
color: "#888",
|
||||||
|
cursor: "pointer",
|
||||||
|
fontSize: "0.8rem",
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
Skip this step
|
||||||
|
</button>
|
||||||
|
</div>
|
||||||
|
)}
|
||||||
|
</div>
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
export default function SetupWizard({
|
||||||
|
wizardState,
|
||||||
|
onWizardUpdate,
|
||||||
|
sendMessage,
|
||||||
|
}: SetupWizardProps) {
|
||||||
|
const [, setRefreshKey] = useState(0);
|
||||||
|
|
||||||
|
const handleGenerate = useCallback(
|
||||||
|
(step: WizardStepInfo) => {
|
||||||
|
const prompt = STEP_PROMPTS[step.step];
|
||||||
|
if (prompt) {
|
||||||
|
sendMessage(prompt);
|
||||||
|
}
|
||||||
|
},
|
||||||
|
[sendMessage],
|
||||||
|
);
|
||||||
|
|
||||||
|
const handleConfirm = useCallback(
|
||||||
|
async (step: WizardStepInfo) => {
|
||||||
|
const result = await apiPost(`/wizard/step/${step.step}/confirm`);
|
||||||
|
if (result) {
|
||||||
|
onWizardUpdate(result);
|
||||||
|
setRefreshKey((k) => k + 1);
|
||||||
|
}
|
||||||
|
},
|
||||||
|
[onWizardUpdate],
|
||||||
|
);
|
||||||
|
|
||||||
|
const handleSkip = useCallback(
|
||||||
|
async (step: WizardStepInfo) => {
|
||||||
|
const result = await apiPost(`/wizard/step/${step.step}/skip`);
|
||||||
|
if (result) {
|
||||||
|
onWizardUpdate(result);
|
||||||
|
setRefreshKey((k) => k + 1);
|
||||||
|
}
|
||||||
|
},
|
||||||
|
[onWizardUpdate],
|
||||||
|
);
|
||||||
|
|
||||||
|
if (wizardState.completed) {
|
||||||
|
return (
|
||||||
|
<div
|
||||||
|
data-testid="wizard-complete"
|
||||||
|
style={{
|
||||||
|
padding: "24px",
|
||||||
|
borderRadius: "12px",
|
||||||
|
background: STEP_BG_DONE,
|
||||||
|
border: `1px solid ${STEP_BORDER_DONE}`,
|
||||||
|
textAlign: "center",
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
<h3 style={{ margin: "0 0 8px 0", color: COLOR_ACCENT }}>
|
||||||
|
Setup Complete
|
||||||
|
</h3>
|
||||||
|
<p style={{ margin: 0, color: COLOR_LABEL }}>
|
||||||
|
Your project is configured. You can start writing stories.
|
||||||
|
</p>
|
||||||
|
</div>
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
return (
|
||||||
|
<div
|
||||||
|
data-testid="setup-wizard"
|
||||||
|
style={{
|
||||||
|
display: "flex",
|
||||||
|
flexDirection: "column",
|
||||||
|
gap: "12px",
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
<div style={{ marginBottom: "8px" }}>
|
||||||
|
<h3
|
||||||
|
style={{
|
||||||
|
margin: "0 0 4px 0",
|
||||||
|
color: COLOR_ACCENT,
|
||||||
|
fontSize: "1.1rem",
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
Project Setup Wizard
|
||||||
|
</h3>
|
||||||
|
<p style={{ margin: 0, color: "#999", fontSize: "0.85rem" }}>
|
||||||
|
Step {wizardState.current_step_index + 1} of{" "}
|
||||||
|
{wizardState.steps.length}
|
||||||
|
</p>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
{wizardState.steps.map((step, idx) => (
|
||||||
|
<StepCard
|
||||||
|
key={step.step}
|
||||||
|
step={step}
|
||||||
|
isActive={idx === wizardState.current_step_index}
|
||||||
|
onGenerate={() => handleGenerate(step)}
|
||||||
|
onConfirm={() => handleConfirm(step)}
|
||||||
|
onSkip={() => handleSkip(step)}
|
||||||
|
/>
|
||||||
|
))}
|
||||||
|
</div>
|
||||||
|
);
|
||||||
|
}
|
||||||
@@ -32,7 +32,7 @@ function renderHighlightedMatch(text: string, query: string) {
|
|||||||
return (
|
return (
|
||||||
<span
|
<span
|
||||||
key={`${char}-${count}`}
|
key={`${char}-${count}`}
|
||||||
style={isMatch ? { fontWeight: 600, color: "#222" } : undefined}
|
className={isMatch ? "path-match-highlight" : undefined}
|
||||||
>
|
>
|
||||||
{char}
|
{char}
|
||||||
</span>
|
</span>
|
||||||
@@ -93,16 +93,15 @@ export function ProjectPathInput({
|
|||||||
/>
|
/>
|
||||||
{matchList.length > 0 && (
|
{matchList.length > 0 && (
|
||||||
<div
|
<div
|
||||||
|
className="path-dropdown"
|
||||||
style={{
|
style={{
|
||||||
position: "absolute",
|
position: "absolute",
|
||||||
top: "100%",
|
top: "100%",
|
||||||
left: 0,
|
left: 0,
|
||||||
right: 0,
|
right: 0,
|
||||||
marginTop: "6px",
|
marginTop: "6px",
|
||||||
border: "1px solid #ddd",
|
|
||||||
borderRadius: "6px",
|
borderRadius: "6px",
|
||||||
overflow: "hidden",
|
overflow: "hidden",
|
||||||
background: "#fff",
|
|
||||||
fontFamily: "monospace",
|
fontFamily: "monospace",
|
||||||
height: "160px",
|
height: "160px",
|
||||||
overflowY: "auto",
|
overflowY: "auto",
|
||||||
@@ -111,13 +110,12 @@ export function ProjectPathInput({
|
|||||||
}}
|
}}
|
||||||
>
|
>
|
||||||
<div
|
<div
|
||||||
|
className="path-dropdown-header"
|
||||||
style={{
|
style={{
|
||||||
display: "flex",
|
display: "flex",
|
||||||
justifyContent: "flex-end",
|
justifyContent: "flex-end",
|
||||||
alignItems: "center",
|
alignItems: "center",
|
||||||
padding: "4px 6px",
|
padding: "4px 6px",
|
||||||
borderBottom: "1px solid #eee",
|
|
||||||
background: "#fafafa",
|
|
||||||
}}
|
}}
|
||||||
>
|
>
|
||||||
<button
|
<button
|
||||||
@@ -128,8 +126,6 @@ export function ProjectPathInput({
|
|||||||
width: "24px",
|
width: "24px",
|
||||||
height: "24px",
|
height: "24px",
|
||||||
borderRadius: "4px",
|
borderRadius: "4px",
|
||||||
border: "1px solid #ddd",
|
|
||||||
background: "#fff",
|
|
||||||
cursor: "pointer",
|
cursor: "pointer",
|
||||||
lineHeight: 1,
|
lineHeight: 1,
|
||||||
}}
|
}}
|
||||||
@@ -143,6 +139,7 @@ export function ProjectPathInput({
|
|||||||
<button
|
<button
|
||||||
key={match.path}
|
key={match.path}
|
||||||
type="button"
|
type="button"
|
||||||
|
className={`path-dropdown-item${isSelected ? " path-dropdown-item--selected" : ""}`}
|
||||||
onMouseEnter={() => onSelectMatch(index)}
|
onMouseEnter={() => onSelectMatch(index)}
|
||||||
onMouseDown={(event) => {
|
onMouseDown={(event) => {
|
||||||
event.preventDefault();
|
event.preventDefault();
|
||||||
@@ -154,7 +151,6 @@ export function ProjectPathInput({
|
|||||||
textAlign: "left",
|
textAlign: "left",
|
||||||
padding: "6px 8px",
|
padding: "6px 8px",
|
||||||
border: "none",
|
border: "none",
|
||||||
background: isSelected ? "#f0f0f0" : "transparent",
|
|
||||||
cursor: "pointer",
|
cursor: "pointer",
|
||||||
fontFamily: "inherit",
|
fontFamily: "inherit",
|
||||||
}}
|
}}
|
||||||
|
|||||||
@@ -70,11 +70,11 @@ export type WsResponse =
|
|||||||
|
|
||||||
// Re-export API client types for convenience
|
// Re-export API client types for convenience
|
||||||
export type {
|
export type {
|
||||||
|
CommandOutput as ApiCommandOutput,
|
||||||
|
FileEntry as ApiFileEntry,
|
||||||
Message as ApiMessage,
|
Message as ApiMessage,
|
||||||
ProviderConfig as ApiProviderConfig,
|
ProviderConfig as ApiProviderConfig,
|
||||||
FileEntry as ApiFileEntry,
|
|
||||||
SearchResult as ApiSearchResult,
|
SearchResult as ApiSearchResult,
|
||||||
CommandOutput as ApiCommandOutput,
|
|
||||||
WsRequest as ApiWsRequest,
|
WsRequest as ApiWsRequest,
|
||||||
WsResponse as ApiWsResponse,
|
WsResponse as ApiWsResponse,
|
||||||
};
|
};
|
||||||
|
|||||||
+20
-20
@@ -1,24 +1,24 @@
|
|||||||
{
|
{
|
||||||
"compilerOptions": {
|
"compilerOptions": {
|
||||||
"target": "ES2020",
|
"target": "ES2020",
|
||||||
"useDefineForClassFields": true,
|
"useDefineForClassFields": true,
|
||||||
"lib": ["ES2020", "DOM", "DOM.Iterable"],
|
"lib": ["ES2020", "DOM", "DOM.Iterable"],
|
||||||
"module": "ESNext",
|
"module": "ESNext",
|
||||||
"skipLibCheck": true,
|
"skipLibCheck": true,
|
||||||
|
|
||||||
/* Bundler mode */
|
/* Bundler mode */
|
||||||
"moduleResolution": "bundler",
|
"moduleResolution": "bundler",
|
||||||
"allowImportingTsExtensions": true,
|
"allowImportingTsExtensions": true,
|
||||||
"resolveJsonModule": true,
|
"resolveJsonModule": true,
|
||||||
"isolatedModules": true,
|
"isolatedModules": true,
|
||||||
"noEmit": true,
|
"noEmit": true,
|
||||||
"jsx": "react-jsx",
|
"jsx": "react-jsx",
|
||||||
|
|
||||||
/* Linting */
|
/* Linting */
|
||||||
"strict": true,
|
"strict": true,
|
||||||
"noUnusedLocals": true,
|
"noUnusedLocals": true,
|
||||||
"noUnusedParameters": true,
|
"noUnusedParameters": true,
|
||||||
"noFallthroughCasesInSwitch": true
|
"noFallthroughCasesInSwitch": true
|
||||||
},
|
},
|
||||||
"include": ["src"]
|
"include": ["src"]
|
||||||
}
|
}
|
||||||
|
|||||||
+45
-45
@@ -3,49 +3,49 @@ import { defineConfig } from "vite";
|
|||||||
|
|
||||||
// https://vite.dev/config/
|
// https://vite.dev/config/
|
||||||
export default defineConfig(() => {
|
export default defineConfig(() => {
|
||||||
const backendPort = Number(process.env.STORKIT_PORT || "3001");
|
const backendPort = Number(process.env.STORKIT_PORT || "3001");
|
||||||
return {
|
return {
|
||||||
plugins: [react()],
|
plugins: [react()],
|
||||||
define: {
|
define: {
|
||||||
__STORKIT_PORT__: JSON.stringify(String(backendPort)),
|
__STORKIT_PORT__: JSON.stringify(String(backendPort)),
|
||||||
__BUILD_TIME__: JSON.stringify(new Date().toISOString()),
|
__BUILD_TIME__: JSON.stringify(new Date().toISOString()),
|
||||||
},
|
},
|
||||||
server: {
|
server: {
|
||||||
port: backendPort + 2172,
|
port: backendPort + 2172,
|
||||||
proxy: {
|
proxy: {
|
||||||
"/api": {
|
"/api": {
|
||||||
target: `http://127.0.0.1:${String(backendPort)}`,
|
target: `http://127.0.0.1:${String(backendPort)}`,
|
||||||
timeout: 120000,
|
timeout: 120000,
|
||||||
configure: (proxy) => {
|
configure: (proxy) => {
|
||||||
proxy.on("error", (_err) => {
|
proxy.on("error", (_err) => {
|
||||||
// Swallow proxy errors (e.g. ECONNREFUSED during backend restart)
|
// Swallow proxy errors (e.g. ECONNREFUSED during backend restart)
|
||||||
// so the vite dev server doesn't crash.
|
// so the vite dev server doesn't crash.
|
||||||
});
|
});
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
"/agents": {
|
"/agents": {
|
||||||
target: `http://127.0.0.1:${String(backendPort)}`,
|
target: `http://127.0.0.1:${String(backendPort)}`,
|
||||||
timeout: 120000,
|
timeout: 120000,
|
||||||
configure: (proxy) => {
|
configure: (proxy) => {
|
||||||
proxy.on("error", (_err) => {});
|
proxy.on("error", (_err) => {});
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
watch: {
|
watch: {
|
||||||
ignored: [
|
ignored: [
|
||||||
"**/.story_kit/**",
|
"**/.story_kit/**",
|
||||||
"**/target/**",
|
"**/target/**",
|
||||||
"**/.git/**",
|
"**/.git/**",
|
||||||
"**/server/**",
|
"**/server/**",
|
||||||
"**/Cargo.*",
|
"**/Cargo.*",
|
||||||
"**/vendor/**",
|
"**/vendor/**",
|
||||||
"**/node_modules/**",
|
"**/node_modules/**",
|
||||||
],
|
],
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
build: {
|
build: {
|
||||||
outDir: "dist",
|
outDir: "dist",
|
||||||
emptyOutDir: true,
|
emptyOutDir: true,
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
});
|
});
|
||||||
|
|||||||
+22
-22
@@ -2,26 +2,26 @@ import react from "@vitejs/plugin-react";
|
|||||||
import { defineConfig } from "vitest/config";
|
import { defineConfig } from "vitest/config";
|
||||||
|
|
||||||
export default defineConfig({
|
export default defineConfig({
|
||||||
plugins: [react()],
|
plugins: [react()],
|
||||||
define: {
|
define: {
|
||||||
__BUILD_TIME__: JSON.stringify("2026-01-01T00:00:00.000Z"),
|
__BUILD_TIME__: JSON.stringify("2026-01-01T00:00:00.000Z"),
|
||||||
},
|
},
|
||||||
test: {
|
test: {
|
||||||
environment: "jsdom",
|
environment: "jsdom",
|
||||||
environmentOptions: {
|
environmentOptions: {
|
||||||
jsdom: {
|
jsdom: {
|
||||||
url: "http://localhost:3000",
|
url: "http://localhost:3000",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
globals: true,
|
globals: true,
|
||||||
testTimeout: 10_000,
|
testTimeout: 10_000,
|
||||||
setupFiles: ["./src/setupTests.ts"],
|
setupFiles: ["./src/setupTests.ts"],
|
||||||
css: true,
|
css: true,
|
||||||
exclude: ["tests/e2e/**", "node_modules/**"],
|
exclude: ["tests/e2e/**", "node_modules/**"],
|
||||||
coverage: {
|
coverage: {
|
||||||
provider: "v8",
|
provider: "v8",
|
||||||
reporter: ["text", "json-summary"],
|
reporter: ["text", "json-summary"],
|
||||||
reportsDirectory: "./coverage",
|
reportsDirectory: "./coverage",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
});
|
});
|
||||||
|
|||||||
+9
-5
@@ -59,7 +59,11 @@ PACKAGE_LOCK="${SCRIPT_DIR}/frontend/package-lock.json"
|
|||||||
echo "==> Regenerated package-lock.json"
|
echo "==> Regenerated package-lock.json"
|
||||||
|
|
||||||
git add "$CARGO_TOML" "$CARGO_LOCK" "$PACKAGE_JSON" "$PACKAGE_LOCK"
|
git add "$CARGO_TOML" "$CARGO_LOCK" "$PACKAGE_JSON" "$PACKAGE_LOCK"
|
||||||
git commit -m "Bump version to ${VERSION}"
|
if git diff --cached --quiet; then
|
||||||
|
echo "==> Version already at ${VERSION}, skipping commit"
|
||||||
|
else
|
||||||
|
git commit -m "Bump version to ${VERSION}"
|
||||||
|
fi
|
||||||
|
|
||||||
if ! command -v cross >/dev/null 2>&1; then
|
if ! command -v cross >/dev/null 2>&1; then
|
||||||
echo "Error: 'cross' is not installed. Run: cargo install cross"
|
echo "Error: 'cross' is not installed. Run: cargo install cross"
|
||||||
@@ -109,10 +113,10 @@ fi
|
|||||||
MERGE_RE="^(storkit|story-kit): merge "
|
MERGE_RE="^(storkit|story-kit): merge "
|
||||||
if [ -n "$LOG_RANGE" ]; then
|
if [ -n "$LOG_RANGE" ]; then
|
||||||
MERGED_RAW=$(git log "$LOG_RANGE" --pretty=format:"%s" --no-merges \
|
MERGED_RAW=$(git log "$LOG_RANGE" --pretty=format:"%s" --no-merges \
|
||||||
| grep -E "$MERGE_RE" | sed -E "s/$MERGE_RE//" | sort -u)
|
| grep -E "$MERGE_RE" | sed -E "s/$MERGE_RE//" | sort -u || true)
|
||||||
else
|
else
|
||||||
MERGED_RAW=$(git log --pretty=format:"%s" --no-merges \
|
MERGED_RAW=$(git log --pretty=format:"%s" --no-merges \
|
||||||
| grep -E "$MERGE_RE" | sed -E "s/$MERGE_RE//" | sort -u)
|
| grep -E "$MERGE_RE" | sed -E "s/$MERGE_RE//" | sort -u || true)
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Categorise merged work items and format names.
|
# Categorise merged work items and format names.
|
||||||
@@ -138,13 +142,13 @@ if [ -n "$LOG_RANGE" ]; then
|
|||||||
| grep -Ev "^(storkit|story-kit): " \
|
| grep -Ev "^(storkit|story-kit): " \
|
||||||
| grep -Ev "^Revert \"(storkit|story-kit): " \
|
| grep -Ev "^Revert \"(storkit|story-kit): " \
|
||||||
| grep -v "^Bump version" \
|
| grep -v "^Bump version" \
|
||||||
| sed 's/^/- /')
|
| sed 's/^/- /' || true)
|
||||||
else
|
else
|
||||||
MANUAL=$(git log --pretty=format:"%s" --no-merges \
|
MANUAL=$(git log --pretty=format:"%s" --no-merges \
|
||||||
| grep -Ev "^(storkit|story-kit): " \
|
| grep -Ev "^(storkit|story-kit): " \
|
||||||
| grep -Ev "^Revert \"(storkit|story-kit): " \
|
| grep -Ev "^Revert \"(storkit|story-kit): " \
|
||||||
| grep -v "^Bump version" \
|
| grep -v "^Bump version" \
|
||||||
| sed 's/^/- /')
|
| sed 's/^/- /' || true)
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# ── Generate summary overview ─────────────────────────────────
|
# ── Generate summary overview ─────────────────────────────────
|
||||||
|
|||||||
+4
-2
@@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "storkit"
|
name = "storkit"
|
||||||
version = "0.5.0"
|
version = "0.8.0"
|
||||||
edition = "2024"
|
edition = "2024"
|
||||||
build = "build.rs"
|
build = "build.rs"
|
||||||
|
|
||||||
@@ -18,11 +18,12 @@ notify = { workspace = true }
|
|||||||
poem = { workspace = true, features = ["websocket"] }
|
poem = { workspace = true, features = ["websocket"] }
|
||||||
poem-openapi = { workspace = true, features = ["swagger-ui"] }
|
poem-openapi = { workspace = true, features = ["swagger-ui"] }
|
||||||
portable-pty = { workspace = true }
|
portable-pty = { workspace = true }
|
||||||
reqwest = { workspace = true, features = ["json", "stream"] }
|
reqwest = { workspace = true, features = ["json", "stream", "form"] }
|
||||||
rust-embed = { workspace = true }
|
rust-embed = { workspace = true }
|
||||||
serde = { workspace = true, features = ["derive"] }
|
serde = { workspace = true, features = ["derive"] }
|
||||||
serde_json = { workspace = true }
|
serde_json = { workspace = true }
|
||||||
serde_urlencoded = { workspace = true }
|
serde_urlencoded = { workspace = true }
|
||||||
|
sha2 = { workspace = true }
|
||||||
serde_yaml = { workspace = true }
|
serde_yaml = { workspace = true }
|
||||||
strip-ansi-escapes = { workspace = true }
|
strip-ansi-escapes = { workspace = true }
|
||||||
tokio = { workspace = true, features = ["rt-multi-thread", "macros", "sync", "process"] }
|
tokio = { workspace = true, features = ["rt-multi-thread", "macros", "sync", "process"] }
|
||||||
@@ -31,6 +32,7 @@ uuid = { workspace = true, features = ["v4", "serde"] }
|
|||||||
walkdir = { workspace = true }
|
walkdir = { workspace = true }
|
||||||
matrix-sdk = { workspace = true }
|
matrix-sdk = { workspace = true }
|
||||||
pulldown-cmark = { workspace = true }
|
pulldown-cmark = { workspace = true }
|
||||||
|
regex = { workspace = true }
|
||||||
|
|
||||||
# Force bundled SQLite so static musl builds don't need a system libsqlite3
|
# Force bundled SQLite so static musl builds don't need a system libsqlite3
|
||||||
libsqlite3-sys = { version = "0.35.0", features = ["bundled"] }
|
libsqlite3-sys = { version = "0.35.0", features = ["bundled"] }
|
||||||
|
|||||||
@@ -102,13 +102,29 @@ fn run_command_with_timeout(
|
|||||||
args: &[&str],
|
args: &[&str],
|
||||||
dir: &Path,
|
dir: &Path,
|
||||||
) -> Result<(bool, String), String> {
|
) -> Result<(bool, String), String> {
|
||||||
let mut child = Command::new(program)
|
// On Linux, execve can return ETXTBSY (26) briefly after a file is written
|
||||||
.args(args)
|
// before the kernel releases its "write open" state. Retry once after a
|
||||||
|
// short pause to handle this race condition.
|
||||||
|
let mut last_err = None;
|
||||||
|
let mut cmd = Command::new(&program);
|
||||||
|
cmd.args(args)
|
||||||
.current_dir(dir)
|
.current_dir(dir)
|
||||||
.stdout(std::process::Stdio::piped())
|
.stdout(std::process::Stdio::piped())
|
||||||
.stderr(std::process::Stdio::piped())
|
.stderr(std::process::Stdio::piped());
|
||||||
.spawn()
|
let mut child = loop {
|
||||||
.map_err(|e| format!("Failed to spawn command: {e}"))?;
|
match cmd.spawn() {
|
||||||
|
Ok(c) => break c,
|
||||||
|
Err(e) if e.raw_os_error() == Some(26) => {
|
||||||
|
// ETXTBSY — wait briefly and retry once
|
||||||
|
if last_err.is_some() {
|
||||||
|
return Err(format!("Failed to spawn command: {e}"));
|
||||||
|
}
|
||||||
|
last_err = Some(e);
|
||||||
|
std::thread::sleep(std::time::Duration::from_millis(50));
|
||||||
|
}
|
||||||
|
Err(e) => return Err(format!("Failed to spawn command: {e}")),
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
// Drain stdout/stderr in background threads so the pipe buffers never fill.
|
// Drain stdout/stderr in background threads so the pipe buffers never fill.
|
||||||
let stdout_handle = child.stdout.take().map(|r| {
|
let stdout_handle = child.stdout.take().map(|r| {
|
||||||
|
|||||||
@@ -102,7 +102,7 @@ pub fn feature_branch_has_unmerged_changes(project_root: &Path, story_id: &str)
|
|||||||
/// * If the story is in `4_merge/`, it is moved to `5_done/` and committed.
|
/// * If the story is in `4_merge/`, it is moved to `5_done/` and committed.
|
||||||
/// * If the story is already in `5_done/` or `6_archived/`, this is a no-op (idempotent).
|
/// * If the story is already in `5_done/` or `6_archived/`, this is a no-op (idempotent).
|
||||||
/// * If the story is not found in `2_current/`, `4_merge/`, `5_done/`, or `6_archived/`, an error is returned.
|
/// * If the story is not found in `2_current/`, `4_merge/`, `5_done/`, or `6_archived/`, an error is returned.
|
||||||
pub fn move_story_to_archived(project_root: &Path, story_id: &str) -> Result<(), String> {
|
pub fn move_story_to_done(project_root: &Path, story_id: &str) -> Result<(), String> {
|
||||||
let sk = project_root.join(".storkit").join("work");
|
let sk = project_root.join(".storkit").join("work");
|
||||||
let current_path = sk.join("2_current").join(format!("{story_id}.md"));
|
let current_path = sk.join("2_current").join(format!("{story_id}.md"));
|
||||||
let merge_path = sk.join("4_merge").join(format!("{story_id}.md"));
|
let merge_path = sk.join("4_merge").join(format!("{story_id}.md"));
|
||||||
@@ -584,10 +584,10 @@ mod tests {
|
|||||||
assert!(result.unwrap_err().contains("not found in work/2_current/"));
|
assert!(result.unwrap_err().contains("not found in work/2_current/"));
|
||||||
}
|
}
|
||||||
|
|
||||||
// ── move_story_to_archived tests ──────────────────────────────────────────
|
// ── move_story_to_done tests ──────────────────────────────────────────
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn move_story_to_archived_finds_in_merge_dir() {
|
fn move_story_to_done_finds_in_merge_dir() {
|
||||||
use std::fs;
|
use std::fs;
|
||||||
let tmp = tempfile::tempdir().unwrap();
|
let tmp = tempfile::tempdir().unwrap();
|
||||||
let root = tmp.path();
|
let root = tmp.path();
|
||||||
@@ -595,16 +595,16 @@ mod tests {
|
|||||||
fs::create_dir_all(&merge_dir).unwrap();
|
fs::create_dir_all(&merge_dir).unwrap();
|
||||||
fs::write(merge_dir.join("22_story_test.md"), "test").unwrap();
|
fs::write(merge_dir.join("22_story_test.md"), "test").unwrap();
|
||||||
|
|
||||||
move_story_to_archived(root, "22_story_test").unwrap();
|
move_story_to_done(root, "22_story_test").unwrap();
|
||||||
|
|
||||||
assert!(!merge_dir.join("22_story_test.md").exists());
|
assert!(!merge_dir.join("22_story_test.md").exists());
|
||||||
assert!(root.join(".storkit/work/5_done/22_story_test.md").exists());
|
assert!(root.join(".storkit/work/5_done/22_story_test.md").exists());
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn move_story_to_archived_error_when_not_in_current_or_merge() {
|
fn move_story_to_done_error_when_not_in_current_or_merge() {
|
||||||
let tmp = tempfile::tempdir().unwrap();
|
let tmp = tempfile::tempdir().unwrap();
|
||||||
let result = move_story_to_archived(tmp.path(), "99_nonexistent");
|
let result = move_story_to_done(tmp.path(), "99_nonexistent");
|
||||||
assert!(result.unwrap_err().contains("4_merge"));
|
assert!(result.unwrap_err().contains("4_merge"));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user