Compare commits

..

25 Commits

Author SHA1 Message Date
Timmy fd7698f0e7 Bump version to 0.10.3 2026-04-16 18:08:23 +01:00
dave 4b710b02f2 huskies: merge 591_story_gateway_chat_commands_use_active_project_root_instead_of_gateway_config_dir 2026-04-16 16:14:05 +00:00
dave e734e80da5 huskies: merge 590_story_gateway_native_mcp_tools_return_json_rpc_responses_missing_request_id 2026-04-16 11:41:52 +00:00
dave 4ddf2a4367 fix: strip front matter from show command, display useful metadata inline
Strips the YAML front matter block and shows useful fields
(depends_on, agent, blocked, retries) as a summary line at the top.
Eliminates the duplicate title problem.

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-04-16 09:01:54 +00:00
dave 2b95388efd fix: convert markdown headings to bold in show command for Matrix rendering
Element X doesn't style <h2> tags distinctly. Convert ## headings to
**bold** text with a blank line above for consistent rendering across
all Matrix clients.

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-04-16 08:47:41 +00:00
dave 9f0274417d huskies: merge 579_bug_matrix_bot_messages_render_markdown_headings_without_line_breaks_or_formatting 2026-04-16 08:22:34 +00:00
dave df2f20a5e5 huskies: merge 589_story_wizard_auto_detects_project_components_and_configures_scripts_accordingly 2026-04-16 00:22:53 +00:00
dave 61502f51d9 huskies: merge 588_bug_wizard_generated_script_test_misses_frontend_tests_for_projects_with_a_frontend 2026-04-15 23:57:12 +00:00
dave 4553d7215a huskies: merge 586_bug_wizard_skips_context_and_stack_generation_when_files_already_exist_from_scaffold 2026-04-15 23:52:25 +00:00
dave 4a1c6b4cfa huskies: merge 585_bug_bot_not_aware_of_actual_running_port_defaults_to_3001 2026-04-15 23:47:37 +00:00
dave 2663c5f91f huskies: merge 583_bug_add_test_that_builds_gateway_route_tree_to_catch_duplicate_route_panics 2026-04-15 19:57:12 +00:00
dave 79ee19ca5b huskies: merge 587_bug_pipeline_db_not_in_default_gitignore_novice_users_will_commit_it 2026-04-15 19:49:46 +00:00
dave 871a18f821 huskies: merge 584_bug_bot_asks_user_to_run_huskies_init_instead_of_running_wizard_automatically 2026-04-15 19:28:58 +00:00
Timmy f4a97c1135 Bump version to 0.10.2 2026-04-15 20:07:55 +01:00
dave 0969fb5d51 fix: remove duplicate / route in gateway that causes panic on startup
gateway_index_handler and embedded_index both registered at /. The
embedded React frontend should serve /. Remove the old gateway
index handler.

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-04-15 18:57:35 +00:00
dave 744cc9dca4 huskies: merge 569_story_gateway_ui_cross_project_pipeline_status_view 2026-04-15 18:38:33 +00:00
dave ce37281333 huskies: merge 571_story_expose_agent_remaining_turns_and_budget_via_mcp_tool 2026-04-15 18:30:32 +00:00
dave 149a383447 huskies: merge 568_story_gateway_ui_connected_agents_dashboard 2026-04-15 18:25:17 +00:00
dave d68614e26a huskies: merge 580_story_diff_bot_command_shows_git_diff_from_main_branch_to_worktree_branch 2026-04-15 18:16:26 +00:00
dave a4480fa067 chore: feed CONTEXT and STACK specs to all agents, update STACK with source map
Agents now read specs/00_CONTEXT.md (what the project does) and
specs/tech/STACK.md (tech stack + source map) in addition to the
README. STACK.md rewritten to reflect current state — removes stale
references to biome, tauri-specta, .story_kit.

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-04-15 18:15:09 +00:00
dave beb84ade9f huskies: merge 567_story_gateway_ui_project_management_add_and_remove_projects 2026-04-15 18:06:43 +00:00
dave d235fd41ac huskies: merge 581_story_freeze_command_to_hold_a_story_at_its_current_stage_without_advancing 2026-04-15 18:02:14 +00:00
dave 2246278845 huskies: merge 582_story_bot_configuration_page 2026-04-15 17:37:52 +00:00
dave d80fc143c2 huskies: merge 577_bug_show_command_reads_story_files_from_filesystem_instead_of_crdt 2026-04-15 17:28:05 +00:00
dave 1fe4ca2b7a Revert "huskies: merge 566_story_gateway_ui_bot_configuration_page"
This reverts commit c28c86dbc6.
2026-04-15 17:13:01 +00:00
39 changed files with 3620 additions and 642 deletions
+7 -7
View File
@@ -5,7 +5,7 @@ role = "Full-stack engineer. Implements features across all components."
model = "sonnet"
max_turns = 50
max_budget_usd = 5.00
prompt = "You are working in a git worktree on story {{story_id}}. Read CLAUDE.md first, then .huskies/README.md to understand the dev process. The story details are in your prompt above. The worktree and feature branch already exist - do not create them.\n\n## Your workflow\n1. Read the story and understand the acceptance criteria.\n2. Implement the changes.\n3. As you complete each acceptance criterion, call check_criterion MCP tool to mark it done.\n4. Run the run_tests MCP tool. It blocks until tests complete and returns the results.\n5. If tests fail, fix the failures and run run_tests again. Do not commit until tests pass.\n6. Once tests pass, commit your work with a descriptive message and exit.\n\nDo NOT accept stories, move them between stages, or merge to master. The server handles all of that after you exit.\n\n## Bug Workflow: Trust the Story, Act Fast\nWhen working on bugs:\n1. READ THE STORY DESCRIPTION FIRST. If it specifies exact files, functions, and line numbers — go directly there and make the fix.\n2. If the story does NOT specify the exact location, investigate with targeted grep.\n3. Fix with a surgical, minimal change.\n4. Run tests, fix failures, commit and exit.\n5. Write commit messages that explain what broke and why."
prompt = "You are working in a git worktree on story {{story_id}}. Read CLAUDE.md first, then .huskies/README.md for the dev process, .huskies/specs/00_CONTEXT.md for what this project does, and .huskies/specs/tech/STACK.md for the tech stack and source map. The story details are in your prompt above. The worktree and feature branch already exist - do not create them.\n\n## Your workflow\n1. Read the story and understand the acceptance criteria.\n2. Implement the changes.\n3. As you complete each acceptance criterion, call check_criterion MCP tool to mark it done.\n4. Run the run_tests MCP tool. It blocks until tests complete and returns the results.\n5. If tests fail, fix the failures and run run_tests again. Do not commit until tests pass.\n6. Once tests pass, commit your work with a descriptive message and exit.\n\nDo NOT accept stories, move them between stages, or merge to master. The server handles all of that after you exit.\n\n## Bug Workflow: Trust the Story, Act Fast\nWhen working on bugs:\n1. READ THE STORY DESCRIPTION FIRST. If it specifies exact files, functions, and line numbers — go directly there and make the fix.\n2. If the story does NOT specify the exact location, investigate with targeted grep.\n3. Fix with a surgical, minimal change.\n4. Run tests, fix failures, commit and exit.\n5. Write commit messages that explain what broke and why."
system_prompt = "You are a full-stack engineer working autonomously in a git worktree. Always run the run_tests MCP tool before committing — do not commit until tests pass. As you complete each acceptance criterion, call check_criterion MCP tool to mark it done. Add //! module-level doc comments to any new modules and /// doc comments to any new public functions, structs, or enums. Do not accept stories, move them between stages, or merge to master — the server handles that. For bugs, trust the story description and make surgical fixes."
[[agent]]
@@ -15,7 +15,7 @@ role = "Full-stack engineer. Implements features across all components."
model = "sonnet"
max_turns = 50
max_budget_usd = 5.00
prompt = "You are working in a git worktree on story {{story_id}}. Read CLAUDE.md first, then .huskies/README.md to understand the dev process. The story details are in your prompt above. The worktree and feature branch already exist - do not create them.\n\n## Your workflow\n1. Read the story and understand the acceptance criteria.\n2. Implement the changes.\n3. As you complete each acceptance criterion, call check_criterion MCP tool to mark it done.\n4. Run the run_tests MCP tool. It blocks until tests complete and returns the results.\n5. If tests fail, fix the failures and run run_tests again. Do not commit until tests pass.\n6. Once tests pass, commit your work with a descriptive message and exit.\n\nDo NOT accept stories, move them between stages, or merge to master. The server handles all of that after you exit.\n\n## Bug Workflow: Trust the Story, Act Fast\nWhen working on bugs:\n1. READ THE STORY DESCRIPTION FIRST. If it specifies exact files, functions, and line numbers — go directly there and make the fix.\n2. If the story does NOT specify the exact location, investigate with targeted grep.\n3. Fix with a surgical, minimal change.\n4. Run tests, fix failures, commit and exit.\n5. Write commit messages that explain what broke and why."
prompt = "You are working in a git worktree on story {{story_id}}. Read CLAUDE.md first, then .huskies/README.md for the dev process, .huskies/specs/00_CONTEXT.md for what this project does, and .huskies/specs/tech/STACK.md for the tech stack and source map. The story details are in your prompt above. The worktree and feature branch already exist - do not create them.\n\n## Your workflow\n1. Read the story and understand the acceptance criteria.\n2. Implement the changes.\n3. As you complete each acceptance criterion, call check_criterion MCP tool to mark it done.\n4. Run the run_tests MCP tool. It blocks until tests complete and returns the results.\n5. If tests fail, fix the failures and run run_tests again. Do not commit until tests pass.\n6. Once tests pass, commit your work with a descriptive message and exit.\n\nDo NOT accept stories, move them between stages, or merge to master. The server handles all of that after you exit.\n\n## Bug Workflow: Trust the Story, Act Fast\nWhen working on bugs:\n1. READ THE STORY DESCRIPTION FIRST. If it specifies exact files, functions, and line numbers — go directly there and make the fix.\n2. If the story does NOT specify the exact location, investigate with targeted grep.\n3. Fix with a surgical, minimal change.\n4. Run tests, fix failures, commit and exit.\n5. Write commit messages that explain what broke and why."
system_prompt = "You are a full-stack engineer working autonomously in a git worktree. Always run the run_tests MCP tool before committing — do not commit until tests pass. As you complete each acceptance criterion, call check_criterion MCP tool to mark it done. Add //! module-level doc comments to any new modules and /// doc comments to any new public functions, structs, or enums. Do not accept stories, move them between stages, or merge to master — the server handles that. For bugs, trust the story description and make surgical fixes."
[[agent]]
@@ -25,7 +25,7 @@ role = "Full-stack engineer. Implements features across all components."
model = "sonnet"
max_turns = 50
max_budget_usd = 5.00
prompt = "You are working in a git worktree on story {{story_id}}. Read CLAUDE.md first, then .huskies/README.md to understand the dev process. The story details are in your prompt above. The worktree and feature branch already exist - do not create them.\n\n## Your workflow\n1. Read the story and understand the acceptance criteria.\n2. Implement the changes.\n3. As you complete each acceptance criterion, call check_criterion MCP tool to mark it done.\n4. Run the run_tests MCP tool. It blocks until tests complete and returns the results.\n5. If tests fail, fix the failures and run run_tests again. Do not commit until tests pass.\n6. Once tests pass, commit your work with a descriptive message and exit.\n\nDo NOT accept stories, move them between stages, or merge to master. The server handles all of that after you exit.\n\n## Bug Workflow: Trust the Story, Act Fast\nWhen working on bugs:\n1. READ THE STORY DESCRIPTION FIRST. If it specifies exact files, functions, and line numbers — go directly there and make the fix.\n2. If the story does NOT specify the exact location, investigate with targeted grep.\n3. Fix with a surgical, minimal change.\n4. Run tests, fix failures, commit and exit.\n5. Write commit messages that explain what broke and why."
prompt = "You are working in a git worktree on story {{story_id}}. Read CLAUDE.md first, then .huskies/README.md for the dev process, .huskies/specs/00_CONTEXT.md for what this project does, and .huskies/specs/tech/STACK.md for the tech stack and source map. The story details are in your prompt above. The worktree and feature branch already exist - do not create them.\n\n## Your workflow\n1. Read the story and understand the acceptance criteria.\n2. Implement the changes.\n3. As you complete each acceptance criterion, call check_criterion MCP tool to mark it done.\n4. Run the run_tests MCP tool. It blocks until tests complete and returns the results.\n5. If tests fail, fix the failures and run run_tests again. Do not commit until tests pass.\n6. Once tests pass, commit your work with a descriptive message and exit.\n\nDo NOT accept stories, move them between stages, or merge to master. The server handles all of that after you exit.\n\n## Bug Workflow: Trust the Story, Act Fast\nWhen working on bugs:\n1. READ THE STORY DESCRIPTION FIRST. If it specifies exact files, functions, and line numbers — go directly there and make the fix.\n2. If the story does NOT specify the exact location, investigate with targeted grep.\n3. Fix with a surgical, minimal change.\n4. Run tests, fix failures, commit and exit.\n5. Write commit messages that explain what broke and why."
system_prompt = "You are a full-stack engineer working autonomously in a git worktree. Always run the run_tests MCP tool before committing — do not commit until tests pass. As you complete each acceptance criterion, call check_criterion MCP tool to mark it done. Add //! module-level doc comments to any new modules and /// doc comments to any new public functions, structs, or enums. Do not accept stories, move them between stages, or merge to master — the server handles that. For bugs, trust the story description and make surgical fixes."
[[agent]]
@@ -37,7 +37,7 @@ max_turns = 40
max_budget_usd = 4.00
prompt = """You are the QA agent for story {{story_id}}. Your job is to verify the coder's work satisfies the story's acceptance criteria and produce a structured QA report.
Read CLAUDE.md first, then .huskies/README.md to understand the dev process.
Read CLAUDE.md first, then .huskies/README.md for the dev process, .huskies/specs/00_CONTEXT.md for what this project does, and .huskies/specs/tech/STACK.md for the tech stack and source map.
## Your Workflow
@@ -126,7 +126,7 @@ role = "Senior full-stack engineer for complex tasks. Implements features across
model = "opus"
max_turns = 80
max_budget_usd = 20.00
prompt = "You are working in a git worktree on story {{story_id}}. Read CLAUDE.md first, then .huskies/README.md to understand the dev process. The story details are in your prompt above. The worktree and feature branch already exist - do not create them.\n\n## Your workflow\n1. Read the story and understand the acceptance criteria.\n2. Implement the changes.\n3. As you complete each acceptance criterion, call check_criterion MCP tool to mark it done.\n4. Run the run_tests MCP tool. It blocks until tests complete and returns the results.\n5. If tests fail, fix the failures and run run_tests again. Do not commit until tests pass.\n6. Once tests pass, commit your work with a descriptive message and exit.\n\nDo NOT accept stories, move them between stages, or merge to master. The server handles all of that after you exit.\n\n## Bug Workflow: Trust the Story, Act Fast\nWhen working on bugs:\n1. READ THE STORY DESCRIPTION FIRST. If it specifies exact files, functions, and line numbers — go directly there and make the fix.\n2. If the story does NOT specify the exact location, investigate with targeted grep.\n3. Fix with a surgical, minimal change.\n4. Run tests, fix failures, commit and exit.\n5. Write commit messages that explain what broke and why."
prompt = "You are working in a git worktree on story {{story_id}}. Read CLAUDE.md first, then .huskies/README.md for the dev process, .huskies/specs/00_CONTEXT.md for what this project does, and .huskies/specs/tech/STACK.md for the tech stack and source map. The story details are in your prompt above. The worktree and feature branch already exist - do not create them.\n\n## Your workflow\n1. Read the story and understand the acceptance criteria.\n2. Implement the changes.\n3. As you complete each acceptance criterion, call check_criterion MCP tool to mark it done.\n4. Run the run_tests MCP tool. It blocks until tests complete and returns the results.\n5. If tests fail, fix the failures and run run_tests again. Do not commit until tests pass.\n6. Once tests pass, commit your work with a descriptive message and exit.\n\nDo NOT accept stories, move them between stages, or merge to master. The server handles all of that after you exit.\n\n## Bug Workflow: Trust the Story, Act Fast\nWhen working on bugs:\n1. READ THE STORY DESCRIPTION FIRST. If it specifies exact files, functions, and line numbers — go directly there and make the fix.\n2. If the story does NOT specify the exact location, investigate with targeted grep.\n3. Fix with a surgical, minimal change.\n4. Run tests, fix failures, commit and exit.\n5. Write commit messages that explain what broke and why."
system_prompt = "You are a senior full-stack engineer working autonomously in a git worktree. You handle complex tasks requiring deep architectural understanding. Always run the run_tests MCP tool before committing — do not commit until tests pass. As you complete each acceptance criterion, call check_criterion MCP tool to mark it done. Add //! module-level doc comments to any new modules and /// doc comments to any new public functions, structs, or enums. Do not accept stories, move them between stages, or merge to master — the server handles that. For bugs, trust the story description and make surgical fixes."
[[agent]]
@@ -138,7 +138,7 @@ max_turns = 40
max_budget_usd = 4.00
prompt = """You are the QA agent for story {{story_id}}. Your job is to verify the coder's work satisfies the story's acceptance criteria and produce a structured QA report.
Read CLAUDE.md first, then .huskies/README.md to understand the dev process.
Read CLAUDE.md first, then .huskies/README.md for the dev process, .huskies/specs/00_CONTEXT.md for what this project does, and .huskies/specs/tech/STACK.md for the tech stack and source map.
## Your Workflow
@@ -229,7 +229,7 @@ max_turns = 30
max_budget_usd = 5.00
prompt = """You are the mergemaster agent for story {{story_id}}. Your job is to merge the completed coder work into master.
Read CLAUDE.md first, then .huskies/README.md to understand the project.
Read CLAUDE.md first, then .huskies/README.md for the dev process, .huskies/specs/00_CONTEXT.md for what this project does, and .huskies/specs/tech/STACK.md for the tech stack and source map.
## Your Workflow
1. Call merge_agent_work(story_id='{{story_id}}'). It blocks until the merge completes and returns the full result.
+112 -112
View File
@@ -1,130 +1,130 @@
# Tech Stack & Constraints
# Tech Stack
## Overview
This project is a standalone Rust **web server binary** that serves a Vite/React frontend and exposes a **WebSocket API**. The built frontend assets are packaged with the binary (in a `frontend` directory) and served as static files. It functions as an **Agentic Code Assistant** capable of safely executing tools on the host system.
## Backend
- **Language:** Rust
- **Framework:** Poem (HTTP + WebSocket + OpenAPI)
- **Database:** SQLite via sqlx + rusqlite
- **State:** BFT CRDT replicated document backed by SQLite
- **Agents:** Claude Code CLI spawned in PTY pseudo-terminals
- **Package manager:** cargo
## Core Stack
* **Backend:** Rust (Web Server)
* **MSRV:** Stable (latest)
* **Framework:** Poem HTTP server with WebSocket support for streaming; HTTP APIs should use Poem OpenAPI (Swagger) for non-streaming endpoints.
* **Frontend:** TypeScript + React
* **Build Tool:** Vite
* **Package Manager:** npm
* **Styling:** CSS Modules or Tailwind (TBD - Defaulting to CSS Modules)
* **State Management:** React Context / Hooks
* **Chat UI:** Rendered Markdown with syntax highlighting.
## Frontend
- **Language:** TypeScript + React
- **Build:** Vite
- **Package manager:** npm
- **Testing:** Vitest (unit), Playwright (e2e)
## Agent Architecture
The application follows a **Tool-Use (Function Calling)** architecture:
1. **Frontend:** Collects user input and sends it to the LLM.
2. **LLM:** Decides to generate text OR request a **Tool Call** (e.g., `execute_shell`, `read_file`).
3. **Web Server Backend (The "Hand"):**
* Intercepts Tool Calls.
* Validates the request against the **Safety Policy**.
* Executes the native code (File I/O, Shell Process, Search).
* Returns the output (stdout/stderr/file content) to the LLM.
* **Streaming:** The backend sends real-time updates over WebSocket to keep the UI responsive during long-running Agent tasks.
## Deployment
- Single Rust binary with embedded React frontend (rust-embed)
- Three modes: standard server, headless build agent (`--rendezvous`), multi-project gateway (`--gateway`)
- Docker container with OrbStack recommended on macOS
## LLM Provider Abstraction
To support both Remote and Local models, the system implements a `ModelProvider` abstraction layer.
## Project Layout
```
server/src/ — Rust backend
frontend/src/ — React frontend
crates/bft-json-crdt/ — CRDT library
.huskies/ — Pipeline config, agent config, specs
script/ — test, build, lint scripts
docker/ — Dockerfile and docker-compose
website/ — Static marketing/docs site
```
* **Strategy:**
* Abstract the differences between API formats (OpenAI-compatible vs Anthropic vs Gemini).
* Normalize "Tool Use" definitions, as each provider handles function calling schemas differently.
* **Supported Providers:**
* **Ollama:** Local inference (e.g., Llama 3, DeepSeek Coder) for privacy and offline usage.
* **Anthropic:** Claude 3.5 models (Sonnet, Haiku) via API for coding tasks (Story 12).
* **Provider Selection:**
* Automatic detection based on model name prefix:
* `claude-` → Anthropic API
* Otherwise → Ollama
* Single unified model dropdown with section headers ("Anthropic", "Ollama")
* **API Key Management:**
* Anthropic API key stored server-side and persisted securely
* On first use of Claude model, user prompted to enter API key
* Key persists across sessions (no re-entry needed)
## Source Map
## Tooling Capabilities
### Core
### 1. Filesystem (Native)
* **Scope:** Strictly limited to the user-selected `project_root`.
* **Operations:** Read, Write, List, Delete.
* **Constraint:** Modifications to `.git/` are strictly forbidden via file APIs (use Git tools instead).
| File | Description |
|------|-------------|
| `server/src/main.rs` | Entry point, CLI argument parsing, and server startup |
| `server/src/config.rs` | Parses `project.toml` for agents, components, and server settings |
| `server/src/state.rs` | Global mutable session state (project root, cancellation) |
| `server/src/store.rs` | JSON-backed persistent key-value store for settings |
| `server/src/gateway.rs` | Multi-project gateway mode (MCP proxy, project switching, agent registration) |
### 2. Shell Execution
* **Library:** `tokio::process` for async execution.
* **Constraint:** We do **not** run an interactive shell (repl). We run discrete, stateless commands.
* **Allowlist:** The agent may only execute specific binaries:
* `git`
* `cargo`, `rustc`, `rustfmt`, `clippy`
* `npm`, `node`, `yarn`, `pnpm`, `bun`
* `ls`, `find`, `grep` (if not using internal search)
* `mkdir`, `rm`, `touch`, `mv`, `cp`
### Agents
### 3. Search & Navigation
* **Library:** `ignore` (by BurntSushi) + `grep` logic.
* **Behavior:**
* Must respect `.gitignore` files automatically.
* Must be performant (parallel traversal).
| File | Description |
|------|-------------|
| `server/src/agents/mod.rs` | Types, configuration, and orchestration for coding agents |
| `server/src/agents/gates.rs` | Runs test suites and validation scripts in agent worktrees |
| `server/src/agents/lifecycle.rs` | File creation, archival, and stage transitions for pipeline items |
| `server/src/agents/merge.rs` | Rebases agent work onto master and runs post-merge validation |
| `server/src/agents/pty.rs` | Spawns agent processes in pseudo-terminals and streams output |
| `server/src/agents/token_usage.rs` | Persists per-agent token consumption records to disk |
| `server/src/agent_log.rs` | Reads and writes JSONL agent event logs to disk |
| `server/src/agent_mode.rs` | Headless build-agent mode for distributed story processing |
## Coding Standards
### Agent Pool
### Rust
* **Style:** `rustfmt` standard.
* **Linter:** `clippy` - Must pass with 0 warnings before merging.
* **Error Handling:** Custom `AppError` type deriving `thiserror`. All Commands return `Result<T, AppError>`.
* **Concurrency:** Heavy tools (Search, Shell) must run on `tokio` threads to avoid blocking the UI.
* **Quality Gates:**
* `cargo clippy --all-targets --all-features` must show 0 errors, 0 warnings
* `cargo check` must succeed
* `cargo nextest run` must pass all tests
* **Test Coverage:**
* Generate JSON report: `cargo llvm-cov nextest --no-clean --json --output-path .story_kit/coverage/server.json`
* Generate lcov report: `cargo llvm-cov report --lcov --output-path .story_kit/coverage/server.lcov`
* Reports are written to `.story_kit/coverage/` (excluded from git)
| File | Description |
|------|-------------|
| `server/src/agents/pool/mod.rs` | Manages the set of active agents across all pipeline stages |
| `server/src/agents/pool/start.rs` | Spawns a new agent process in a worktree for a story |
| `server/src/agents/pool/stop.rs` | Terminates a running agent while preserving its worktree |
| `server/src/agents/pool/pipeline/advance.rs` | Moves stories forward through pipeline stages |
| `server/src/agents/pool/pipeline/completion.rs` | Processes exit results and triggers pipeline advancement |
| `server/src/agents/pool/pipeline/merge.rs` | Orchestrates the merge-to-master flow for completed stories |
| `server/src/agents/pool/auto_assign/auto_assign.rs` | Scans pipeline stages and dispatches agents to unassigned stories |
### TypeScript / React
* **Style:** Biome formatter (replaces Prettier/ESLint).
* **Linter:** Biome - Must pass with 0 errors, 0 warnings before merging.
* **Types:** Shared types with Rust (via `tauri-specta` or manual interface matching) are preferred to ensure type safety across the bridge.
* **Testing:** Vitest for unit/component tests; Playwright for end-to-end tests.
* **Quality Gates:**
* `npx @biomejs/biome check src/` must show 0 errors, 0 warnings
* `npm run build` must succeed
* `npm test` must pass
* `npm run test:e2e` must pass
* No `any` types allowed (use proper types or `unknown`)
* React keys must use stable IDs, not array indices
* All buttons must have explicit `type` attribute
### CRDT & Database
## Libraries (Approved)
* **Rust:**
* `serde`, `serde_json`: Serialization.
* `ignore`: Fast recursive directory iteration respecting gitignore.
* `walkdir`: Simple directory traversal.
* `tokio`: Async runtime.
* `reqwest`: For LLM API calls (Anthropic, Ollama).
* `eventsource-stream`: For Server-Sent Events (Anthropic streaming).
* `uuid`: For unique message IDs.
* `chrono`: For timestamps.
* `poem`: HTTP server framework.
* `poem-openapi`: OpenAPI (Swagger) for non-streaming HTTP APIs.
* **JavaScript:**
* `react-markdown`: For rendering chat responses.
* `vitest`: Unit/component testing.
* `playwright`: End-to-end testing.
| File | Description |
|------|-------------|
| `server/src/crdt_state.rs` | Pipeline state as a conflict-free replicated document backed by SQLite |
| `server/src/crdt_sync.rs` | WebSocket-based replication of pipeline state between nodes |
| `server/src/pipeline_state.rs` | Typed pipeline state machine |
| `server/src/db/mod.rs` | Content store, shadow writes, and CRDT op persistence |
## Running the App (Worktrees & Ports)
### HTTP — MCP Tools (the tools agents call)
Multiple instances can run simultaneously in different worktrees. To avoid port conflicts:
| File | Description |
|------|-------------|
| `server/src/http/mcp/mod.rs` | MCP endpoint dispatching tool calls |
| `server/src/http/mcp/agent_tools.rs` | Start, stop, wait, list, and inspect agents |
| `server/src/http/mcp/git_tools.rs` | Status, diff, add, commit, and log on agent worktrees |
| `server/src/http/mcp/merge_tools.rs` | Merge agent work to master and report failures |
| `server/src/http/mcp/shell_tools.rs` | Run commands, execute tests, and stream output |
| `server/src/http/mcp/story_tools.rs` | Create, update, move, and manage stories/bugs/refactors |
| `server/src/http/mcp/diagnostics.rs` | Server logs, CRDT dump, version, and story movement helpers |
- **Backend:** Set `HUSKIES_PORT` to a unique port (default is 3001). Example: `HUSKIES_PORT=3002 cargo run`
- **Frontend:** Run `npm run dev` from `frontend/`. It auto-selects the next unused port. It reads `HUSKIES_PORT` to know which backend to talk to, so export it before running: `export HUSKIES_PORT=3002 && cd frontend && npm run dev`
### Chat — Bot Commands
When running in a worktree, use a port that won't conflict with the main instance (3001). Ports 3002+ are good choices.
| File | Description |
|------|-------------|
| `server/src/chat/commands/mod.rs` | Bot-level command registry shared by all transports |
| `server/src/chat/commands/status.rs` | `status` command and pipeline status helpers |
| `server/src/chat/commands/backlog.rs` | `backlog` command — shows only backlog-stage items |
| `server/src/chat/commands/run_tests.rs` | `run_tests` command — run the project's test suite |
## Safety & Sandbox
1. **Project Scope:** The application must strictly enforce that it does not read/write outside the `project_root` selected by the user.
2. **Human in the Loop:**
* Shell commands that modify state (non-readonly) should ideally require a UI confirmation (configurable).
* File writes must be confirmed or revertible.
### Chat — Transports
| File | Description |
|------|-------------|
| `server/src/chat/transport/matrix/` | Matrix bot integration |
| `server/src/chat/transport/slack/` | Slack bot integration |
| `server/src/chat/transport/whatsapp/` | WhatsApp Business API integration |
| `server/src/chat/transport/discord/` | Discord bot integration |
### Frontend
| Directory | Description |
|-----------|-------------|
| `frontend/src/components/` | React UI components |
| `frontend/src/api/` | API client code (gateway, agents, etc.) |
### Utilities
| File | Description |
|------|-------------|
| `server/src/rebuild.rs` | Server rebuild and restart logic |
| `server/src/worktree.rs` | Creates, lists, and removes git worktrees for agent isolation |
| `server/src/io/watcher.rs` | Filesystem watcher for `.huskies/work/` and `project.toml` |
## Quality Gates
All enforced by `script/test`:
1. Frontend build (`npm run build`)
2. Rust formatting (`cargo fmt --all --check`)
3. Rust linting (`cargo clippy -- -D warnings`)
4. Rust tests (`cargo test`)
5. Frontend tests (`npm test`)
Generated
+17 -17
View File
@@ -366,9 +366,9 @@ checksum = "c08606f8c3cbf4ce6ec8e28fb0014a2c086708fe954eaa885384a6165172e7e8"
[[package]]
name = "aws-lc-rs"
version = "1.16.2"
version = "1.16.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a054912289d18629dc78375ba2c3726a3afe3ff71b4edba9dedfca0e3446d1fc"
checksum = "0ec6fb3fe69024a75fa7e1bfb48aa6cf59706a101658ea01bfd33b2b248a038f"
dependencies = [
"aws-lc-sys",
"zeroize",
@@ -376,9 +376,9 @@ dependencies = [
[[package]]
name = "aws-lc-sys"
version = "0.39.1"
version = "0.40.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "83a25cf98105baa966497416dbd42565ce3a8cf8dbfd59803ec9ad46f3126399"
checksum = "f50037ee5e1e41e7b8f9d161680a725bd1626cb6f8c7e901f91f942850852fe7"
dependencies = [
"cc",
"cmake",
@@ -2288,7 +2288,7 @@ checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9"
[[package]]
name = "huskies"
version = "0.10.1"
version = "0.10.3"
dependencies = [
"async-stream",
"async-trait",
@@ -2802,9 +2802,9 @@ dependencies = [
[[package]]
name = "konst"
version = "0.3.16"
version = "0.3.17"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4381b9b00c55f251f2ebe9473aef7c117e96828def1a7cb3bd3f0f903c6894e9"
checksum = "97feab15b395d1860944abe6a8dd8ed9f8eadfae01750fada8427abda531d887"
dependencies = [
"const_panic",
"konst_kernel",
@@ -5996,9 +5996,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20"
[[package]]
name = "tokio"
version = "1.51.1"
version = "1.52.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f66bf9585cda4b724d3e78ab34b73fb2bbaba9011b9bfdf69dc836382ea13b8c"
checksum = "a91135f59b1cbf38c91e73cf3386fca9bb77915c45ce2771460c9d92f0f3d776"
dependencies = [
"bytes",
"libc",
@@ -6333,9 +6333,9 @@ checksum = "562d481066bde0658276a35467c4af00bdc6ee726305698a55b86e61d7ad82bb"
[[package]]
name = "typewit"
version = "1.15.1"
version = "1.15.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bc19094686c694eb41b3b99dcc2f2975d4b078512fa22ae6c63f7ca318bdcff7"
checksum = "214ca0b2191785cbc06209b9ca1861e048e39b5ba33574b3cedd58363d5bb5f6"
dependencies = [
"typewit_proc_macros",
]
@@ -6465,9 +6465,9 @@ checksum = "b6c140620e7ffbb22c2dee59cafe6084a59b5ffc27a8859a5f0d494b5d52b6be"
[[package]]
name = "uuid"
version = "1.23.0"
version = "1.23.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5ac8b6f42ead25368cf5b098aeb3dc8a1a2c05a3eee8a9a1a68c640edbfc79d9"
checksum = "ddd74a9687298c6858e9b88ec8935ec45d22e8fd5e6394fa1bd4e99a87789c76"
dependencies = [
"getrandom 0.4.2",
"js-sys",
@@ -6770,18 +6770,18 @@ dependencies = [
[[package]]
name = "webpki-root-certs"
version = "1.0.6"
version = "1.0.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "804f18a4ac2676ffb4e8b5b5fa9ae38af06df08162314f96a68d2a363e21a8ca"
checksum = "f31141ce3fc3e300ae89b78c0dd67f9708061d1d2eda54b8209346fd6be9a92c"
dependencies = [
"rustls-pki-types",
]
[[package]]
name = "webpki-roots"
version = "1.0.6"
version = "1.0.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "22cfaf3c063993ff62e73cb4311efde4db1efb31ab78a3e5c457939ad5cc0bed"
checksum = "52f5ee44c96cf55f1b349600768e3ece3a8f26010c05265ab73f945bb1a2eb9d"
dependencies = [
"rustls-pki-types",
]
+1 -278
View File
@@ -220,283 +220,6 @@ Both return a JSON document with:
## Source Map
### Core
| File | Description |
|------|-------------|
| `server/src/main.rs` | Entry point, CLI argument parsing, and server startup |
| `server/src/config.rs` | Parses `project.toml` for agents, components, and server settings |
| `server/src/state.rs` | Global mutable session state (project root, cancellation) |
| `server/src/store.rs` | JSON-backed persistent key-value store for settings |
### Agents
| File | Description |
|------|-------------|
| `server/src/agents/mod.rs` | Types, configuration, and orchestration for coding agents |
| `server/src/agents/gates.rs` | Runs test suites and validation scripts in agent worktrees |
| `server/src/agents/lifecycle.rs` | File creation, archival, and stage transitions for pipeline items |
| `server/src/agents/merge.rs` | Rebases agent work onto master and runs post-merge validation |
| `server/src/agents/pty.rs` | Spawns agent processes in pseudo-terminals and streams output |
| `server/src/agents/token_usage.rs` | Persists per-agent token consumption records to disk |
| `server/src/agent_log.rs` | Reads and writes JSONL agent event logs to disk |
| `server/src/agent_mode.rs` | Headless build-agent mode for distributed story processing |
### Agent Pool
| File | Description |
|------|-------------|
| `server/src/agents/pool/mod.rs` | Manages the set of active agents across all pipeline stages |
| `server/src/agents/pool/types.rs` | `AgentPool`, `StoryAgent`, and related data structures |
| `server/src/agents/pool/start.rs` | Spawns a new agent process in a worktree for a story |
| `server/src/agents/pool/stop.rs` | Terminates a running agent while preserving its worktree |
| `server/src/agents/pool/wait.rs` | Blocks until an agent reaches a terminal state |
| `server/src/agents/pool/query.rs` | Lists available/active agents and info lookups |
| `server/src/agents/pool/process.rs` | Kills orphaned PTY child processes on shutdown |
| `server/src/agents/pool/worktree.rs` | Creates and configures git worktrees for agents |
| `server/src/agents/pool/test_helpers.rs` | In-memory pool construction and test assertions |
### Agent Pool — Auto-assign
| File | Description |
|------|-------------|
| `server/src/agents/pool/auto_assign/mod.rs` | Wires sub-files and re-exports public items |
| `server/src/agents/pool/auto_assign/auto_assign.rs` | Scans pipeline stages and dispatches agents to unassigned stories |
| `server/src/agents/pool/auto_assign/reconcile.rs` | Startup reconciliation: detects committed work and advances pipeline |
| `server/src/agents/pool/auto_assign/scan.rs` | Scans pipeline stages for work items and queries pool state |
| `server/src/agents/pool/auto_assign/story_checks.rs` | Front-matter checks: review holds, blocked state, merge failures |
| `server/src/agents/pool/auto_assign/watchdog.rs` | Detects orphaned agents and triggers auto-assign |
### Agent Pool — Pipeline
| File | Description |
|------|-------------|
| `server/src/agents/pool/pipeline/mod.rs` | Stage advancement, completion handling, and merge orchestration |
| `server/src/agents/pool/pipeline/advance.rs` | Moves stories forward through pipeline stages |
| `server/src/agents/pool/pipeline/completion.rs` | Processes exit results and triggers pipeline advancement |
| `server/src/agents/pool/pipeline/merge.rs` | Orchestrates the merge-to-master flow for completed stories |
### Agent Runtimes
| File | Description |
|------|-------------|
| `server/src/agents/runtime/mod.rs` | Pluggable backends (Claude Code, Gemini, OpenAI) for running agents |
| `server/src/agents/runtime/claude_code.rs` | Launches Claude Code CLI sessions as agent backends |
| `server/src/agents/runtime/gemini.rs` | Drives Google Gemini API sessions as agent backends |
| `server/src/agents/runtime/openai.rs` | Drives OpenAI API sessions as agent backends |
### CRDT
| File | Description |
|------|-------------|
| `server/src/crdt_state.rs` | Pipeline state as a conflict-free replicated document backed by SQLite |
| `server/src/crdt_sync.rs` | WebSocket-based replication of pipeline state between nodes |
| `server/src/crdt_wire.rs` | Serialization format for `SignedOp` sync messages |
| `server/src/pipeline_state.rs` | Typed pipeline state machine |
### Database
| File | Description |
|------|-------------|
| `server/src/db/mod.rs` | Content store, shadow writes, and CRDT op persistence |
### HTTP Server
| File | Description |
|------|-------------|
| `server/src/http/mod.rs` | Module declarations for all REST, MCP, WebSocket, and SSE endpoints |
| `server/src/http/context.rs` | Shared `AppContext` threaded through all HTTP handlers |
| `server/src/http/agents.rs` | REST API for listing, starting, stopping, and inspecting agents |
| `server/src/http/agents_sse.rs` | Server-Sent Events endpoint for real-time agent output |
| `server/src/http/anthropic.rs` | Proxy for model listing and key-validation to Anthropic |
| `server/src/http/assets.rs` | Serves the embedded React frontend via `rust-embed` |
| `server/src/http/bot_command.rs` | Bot command HTTP endpoint |
| `server/src/http/chat.rs` | REST API for the LLM-powered chat interface |
| `server/src/http/health.rs` | Returns a static "ok" response |
| `server/src/http/io.rs` | REST API for file and directory operations |
| `server/src/http/model.rs` | REST API for model selection and LLM provider management |
| `server/src/http/oauth.rs` | Anthropic OAuth callback and token exchange flow |
| `server/src/http/project.rs` | REST API for project initialization and context management |
| `server/src/http/settings.rs` | REST API for user preferences and editor configuration |
| `server/src/http/wizard.rs` | REST API for the project setup wizard |
| `server/src/http/ws.rs` | Real-time pipeline updates, chat, and permission prompts |
| `server/src/http/test_helpers.rs` | Shared test utilities for HTTP handler tests |
### HTTP — MCP Tools
| File | Description |
|------|-------------|
| `server/src/http/mcp/mod.rs` | Model Context Protocol endpoint dispatching tool calls |
| `server/src/http/mcp/agent_tools.rs` | Start, stop, wait, list, and inspect agents via MCP |
| `server/src/http/mcp/diagnostics.rs` | Server logs, CRDT dump, and story movement helpers |
| `server/src/http/mcp/git_tools.rs` | Status, diff, add, commit, and log on agent worktrees |
| `server/src/http/mcp/merge_tools.rs` | Merge agent work to master and report failures |
| `server/src/http/mcp/qa_tools.rs` | Request, approve, and reject QA reviews |
| `server/src/http/mcp/shell_tools.rs` | Run commands, execute tests, and stream output |
| `server/src/http/mcp/status_tools.rs` | Pipeline status, story triage, and AC inspection |
| `server/src/http/mcp/story_tools.rs` | Create, update, move, and manage stories/bugs/refactors |
| `server/src/http/mcp/wizard_tools.rs` | Interactive setup wizard tool implementations |
### HTTP — Workflow
| File | Description |
|------|-------------|
| `server/src/http/workflow/mod.rs` | Shared story/bug file operations for HTTP and MCP handlers |
| `server/src/http/workflow/bug_ops.rs` | Creates bug, refactor, and spike files in the pipeline |
| `server/src/http/workflow/story_ops.rs` | Creates, updates, and manages acceptance criteria in stories |
| `server/src/http/workflow/test_results.rs` | Writes structured test results into story markdown |
### I/O
| File | Description |
|------|-------------|
| `server/src/io/mod.rs` | Filesystem, shell, search, onboarding, and story metadata operations |
| `server/src/io/fs/mod.rs` | Module declarations and re-exports for file operations |
| `server/src/io/fs/files.rs` | Read, write, list, and create files and directories |
| `server/src/io/fs/paths.rs` | Resolves CLI and session-relative paths to absolute paths |
| `server/src/io/fs/preferences.rs` | Reads and writes model selection and user settings |
| `server/src/io/fs/project.rs` | Tracks known projects and resolves the active project root |
| `server/src/io/fs/scaffold.rs` | Creates the `.huskies/` directory structure and default files |
| `server/src/io/onboarding.rs` | Checks whether scaffold templates have been customized |
| `server/src/io/search.rs` | Full-text search across project files |
| `server/src/io/shell.rs` | Runs commands in the project directory and captures output |
| `server/src/io/story_metadata.rs` | Parses and modifies YAML front matter in story markdown |
| `server/src/io/watcher.rs` | Filesystem watcher for `.huskies/work/` and `project.toml` |
| `server/src/io/wizard.rs` | Multi-step project onboarding flow with per-step status |
| `server/src/io/test_helpers.rs` | Shared test utilities for I/O module tests |
### Chat
| File | Description |
|------|-------------|
| `server/src/chat/mod.rs` | Transport abstraction for chat platforms |
| `server/src/chat/lookup.rs` | Shared story-lookup helper for chat commands |
| `server/src/chat/timer.rs` | Deferred agent start via one-shot timers |
| `server/src/chat/util.rs` | Shared text utilities used by all transports |
| `server/src/chat/test_helpers.rs` | Shared test utilities for chat handler tests |
### Chat — Commands
| File | Description |
|------|-------------|
| `server/src/chat/commands/mod.rs` | Bot-level command registry shared by all transports |
| `server/src/chat/commands/ambient.rs` | `ambient` command handler |
| `server/src/chat/commands/assign.rs` | `assign` command handler |
| `server/src/chat/commands/backlog.rs` | `backlog` command — shows only backlog-stage items |
| `server/src/chat/commands/cost.rs` | `cost` command handler |
| `server/src/chat/commands/coverage.rs` | `coverage` command — show or refresh test coverage |
| `server/src/chat/commands/depends.rs` | `depends` command handler |
| `server/src/chat/commands/git.rs` | `git` command handler |
| `server/src/chat/commands/help.rs` | `help` command handler |
| `server/src/chat/commands/loc.rs` | `loc` command — top source files by line count |
| `server/src/chat/commands/move_story.rs` | `move` command handler |
| `server/src/chat/commands/overview.rs` | `overview` command handler |
| `server/src/chat/commands/run_tests.rs` | `test` command — run the project's test suite |
| `server/src/chat/commands/setup.rs` | `setup` command handler |
| `server/src/chat/commands/show.rs` | `show` command handler |
| `server/src/chat/commands/status.rs` | `status` command and pipeline status helpers |
| `server/src/chat/commands/timer.rs` | `timer` command handler |
| `server/src/chat/commands/triage.rs` | Story triage dump subcommand of `status` |
| `server/src/chat/commands/unblock.rs` | `unblock` command handler |
| `server/src/chat/commands/unreleased.rs` | `unreleased` command handler |
### Chat — Matrix Transport
| File | Description |
|------|-------------|
| `server/src/chat/transport/matrix/mod.rs` | Matrix bot integration |
| `server/src/chat/transport/matrix/config.rs` | Deserialization of `bot.toml` Matrix settings |
| `server/src/chat/transport/matrix/commands.rs` | Re-exports from `crate::chat::commands` |
| `server/src/chat/transport/matrix/transport_impl.rs` | Matrix `ChatTransport` implementation |
| `server/src/chat/transport/matrix/assign.rs` | Assign/re-assign a coder model to a story |
| `server/src/chat/transport/matrix/delete.rs` | Delete a story/bug/spike from the pipeline |
| `server/src/chat/transport/matrix/htop.rs` | Live-updating system and agent process dashboard |
| `server/src/chat/transport/matrix/notifications.rs` | Stage transition notifications for Matrix rooms |
| `server/src/chat/transport/matrix/rebuild.rs` | Trigger a server rebuild and restart |
| `server/src/chat/transport/matrix/reset.rs` | Clear the current Claude Code session for a room |
| `server/src/chat/transport/matrix/rmtree.rs` | Delete the worktree for a story |
| `server/src/chat/transport/matrix/start.rs` | Start a coder agent on a story |
### Chat — Matrix Bot
| File | Description |
|------|-------------|
| `server/src/chat/transport/matrix/bot/mod.rs` | Sub-modules for the Matrix chat bot |
| `server/src/chat/transport/matrix/bot/context.rs` | Shared state (rooms, history, permissions) |
| `server/src/chat/transport/matrix/bot/format.rs` | Markdown-to-HTML conversion and startup announcements |
| `server/src/chat/transport/matrix/bot/history.rs` | Per-room message history for LLM context |
| `server/src/chat/transport/matrix/bot/mentions.rs` | Checks whether a message mentions the bot |
| `server/src/chat/transport/matrix/bot/messages.rs` | Processes incoming messages and dispatches commands |
| `server/src/chat/transport/matrix/bot/run.rs` | Connects to homeserver and processes sync events |
| `server/src/chat/transport/matrix/bot/verification.rs` | Interactive emoji verification flow for E2EE |
### Chat — Slack Transport
| File | Description |
|------|-------------|
| `server/src/chat/transport/slack/mod.rs` | Slack Bot API integration |
| `server/src/chat/transport/slack/commands.rs` | Incoming message dispatch and slash command handling |
| `server/src/chat/transport/slack/format.rs` | Markdown to Slack mrkdwn conversion |
| `server/src/chat/transport/slack/history.rs` | Conversation history persistence |
| `server/src/chat/transport/slack/meta.rs` | `ChatTransport` implementation for Slack |
| `server/src/chat/transport/slack/verify.rs` | Request signature verification |
### Chat — Discord Transport
| File | Description |
|------|-------------|
| `server/src/chat/transport/discord/mod.rs` | Discord Bot integration |
| `server/src/chat/transport/discord/commands.rs` | Incoming message dispatch and command handling |
| `server/src/chat/transport/discord/format.rs` | Markdown to Discord format conversion |
| `server/src/chat/transport/discord/gateway.rs` | Minimal Discord Gateway WebSocket client |
| `server/src/chat/transport/discord/history.rs` | Conversation history persistence |
| `server/src/chat/transport/discord/meta.rs` | `ChatTransport` implementation for Discord |
### Chat — WhatsApp Transport
| File | Description |
|------|-------------|
| `server/src/chat/transport/whatsapp/mod.rs` | WhatsApp Business API integration |
| `server/src/chat/transport/whatsapp/commands.rs` | Processes incoming messages as bot commands |
| `server/src/chat/transport/whatsapp/format.rs` | Markdown-to-WhatsApp conversion and message chunking |
| `server/src/chat/transport/whatsapp/history.rs` | Per-number history and messaging window tracking |
| `server/src/chat/transport/whatsapp/meta.rs` | Meta Cloud API transport via Graph API |
| `server/src/chat/transport/whatsapp/twilio.rs` | Twilio transport for sending/receiving messages |
### Chat — Transport Abstraction
| File | Description |
|------|-------------|
| `server/src/chat/transport/mod.rs` | Pluggable backends (Matrix, Slack, WhatsApp, Discord) |
### LLM
| File | Description |
|------|-------------|
| `server/src/llm/mod.rs` | Chat orchestration, prompts, OAuth, and provider integrations |
| `server/src/llm/chat.rs` | Multi-turn conversations with tool-calling LLM providers |
| `server/src/llm/oauth.rs` | Token refresh and credential management for Claude API |
| `server/src/llm/prompts.rs` | Static prompt templates for chat and onboarding |
| `server/src/llm/types.rs` | `Message`, `Role`, `ToolCall`, `ModelProvider` types |
### LLM — Providers
| File | Description |
|------|-------------|
| `server/src/llm/providers/mod.rs` | Module declarations for Anthropic, Claude Code, and Ollama |
| `server/src/llm/providers/anthropic.rs` | Streaming completion client for Claude Messages API |
| `server/src/llm/providers/claude_code.rs` | Runs Claude Code CLI in a PTY and parses output |
| `server/src/llm/providers/ollama.rs` | Streaming completion client for Ollama models |
### Utilities
| File | Description |
|------|-------------|
| `server/src/log_buffer.rs` | Bounded in-memory ring buffer for server log output |
| `server/src/rebuild.rs` | Server rebuild and restart logic |
| `server/src/workflow.rs` | Test result tracking and acceptance evaluation |
| `server/src/worktree.rs` | Creates, lists, and removes git worktrees for agent isolation |
## License
See `.huskies/specs/tech/STACK.md` for the full source map.
GPL-3.0. See [LICENSE](LICENSE).
+2 -2
View File
@@ -1,12 +1,12 @@
{
"name": "huskies",
"version": "0.10.1",
"version": "0.10.3",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"name": "huskies",
"version": "0.10.1",
"version": "0.10.3",
"dependencies": {
"@types/react-syntax-highlighter": "^15.5.13",
"react": "^19.1.0",
+1 -1
View File
@@ -1,7 +1,7 @@
{
"name": "huskies",
"private": true,
"version": "0.10.1",
"version": "0.10.3",
"type": "module",
"scripts": {
"dev": "vite",
+43
View File
@@ -0,0 +1,43 @@
export interface BotConfig {
transport: string | null;
enabled: boolean | null;
homeserver: string | null;
username: string | null;
password: string | null;
room_ids: string[] | null;
slack_bot_token: string | null;
slack_signing_secret: string | null;
slack_channel_ids: string[] | null;
}
const DEFAULT_API_BASE = "/api";
async function requestJson<T>(
path: string,
options: RequestInit = {},
baseUrl = DEFAULT_API_BASE,
): Promise<T> {
const res = await fetch(`${baseUrl}${path}`, {
headers: { "Content-Type": "application/json", ...(options.headers ?? {}) },
...options,
});
if (!res.ok) {
const text = await res.text();
throw new Error(text || `Request failed (${res.status})`);
}
return res.json() as Promise<T>;
}
export const botConfigApi = {
getConfig(baseUrl?: string): Promise<BotConfig> {
return requestJson<BotConfig>("/bot/config", {}, baseUrl);
},
saveConfig(config: BotConfig, baseUrl?: string): Promise<BotConfig> {
return requestJson<BotConfig>(
"/bot/config",
{ method: "PUT", body: JSON.stringify(config) },
baseUrl,
);
},
};
+60
View File
@@ -8,6 +8,8 @@ export interface JoinedAgent {
label: string;
address: string;
registered_at: number;
/// Unix timestamp of the last heartbeat from this agent.
last_seen: number;
/// Project this agent is assigned to, if any.
assigned_project?: string;
}
@@ -22,6 +24,28 @@ export interface GatewayInfo {
projects: GatewayProject[];
}
export interface PipelineItem {
story_id: string;
name: string;
stage: string;
agent?: { agent_name: string; model: string; status: string } | null;
blocked?: boolean;
retry_count?: number;
merge_failure?: string;
}
export interface ProjectPipelineStatus {
active: PipelineItem[];
backlog: { story_id: string; name: string }[];
backlog_count: number;
error?: string;
}
export interface AllProjectsPipeline {
active: string;
projects: Record<string, ProjectPipelineStatus>;
}
export interface GenerateTokenResponse {
token: string;
}
@@ -86,4 +110,40 @@ export const gatewayApi = {
getGatewayInfo(): Promise<GatewayInfo> {
return gatewayRequest<GatewayInfo>("/api/gateway");
},
/// Add a new project to the gateway config.
addProject(name: string, url: string): Promise<GatewayProject> {
return gatewayRequest<GatewayProject>("/api/gateway/projects", {
method: "POST",
body: JSON.stringify({ name, url }),
});
},
/// Remove a project from the gateway config.
removeProject(name: string): Promise<void> {
return gatewayRequest<void>(
`/api/gateway/projects/${encodeURIComponent(name)}`,
{ method: "DELETE" },
);
},
/// Send a heartbeat for an agent to update its last-seen timestamp.
heartbeat(id: string): Promise<void> {
return gatewayRequest<void>(`/gateway/agents/${id}/heartbeat`, {
method: "POST",
});
},
/// Fetch pipeline status from all registered projects.
getAllProjectsPipeline(): Promise<AllProjectsPipeline> {
return gatewayRequest<AllProjectsPipeline>("/api/gateway/pipeline");
},
/// Switch the active project.
switchProject(project: string): Promise<{ ok: boolean; error?: string }> {
return gatewayRequest<{ ok: boolean; error?: string }>(
"/api/gateway/switch",
{ method: "POST", body: JSON.stringify({ project }) },
);
},
};
+344
View File
@@ -0,0 +1,344 @@
import * as React from "react";
import type { BotConfig } from "../api/bot_config";
import { botConfigApi } from "../api/bot_config";
const { useState, useEffect } = React;
interface BotConfigPageProps {
onBack: () => void;
}
const fieldStyle: React.CSSProperties = {
display: "flex",
flexDirection: "column",
gap: "4px",
};
const labelStyle: React.CSSProperties = {
fontSize: "0.8em",
color: "#aaa",
fontWeight: 500,
};
const inputStyle: React.CSSProperties = {
padding: "8px 10px",
borderRadius: "6px",
border: "1px solid #333",
background: "#1e1e1e",
color: "#ececec",
fontSize: "0.9em",
fontFamily: "monospace",
outline: "none",
};
const sectionStyle: React.CSSProperties = {
background: "#1e1e1e",
border: "1px solid #333",
borderRadius: "8px",
padding: "20px",
display: "flex",
flexDirection: "column",
gap: "14px",
};
const sectionTitleStyle: React.CSSProperties = {
fontSize: "0.85em",
fontWeight: 600,
color: "#aaa",
textTransform: "uppercase",
letterSpacing: "0.06em",
marginBottom: "2px",
};
function Field({
label,
value,
onChange,
placeholder,
type = "text",
}: {
label: string;
value: string;
onChange: (v: string) => void;
placeholder?: string;
type?: string;
}) {
return (
<div style={fieldStyle}>
<label style={labelStyle}>{label}</label>
<input
type={type}
value={value}
onChange={(e) => onChange(e.target.value)}
placeholder={placeholder}
style={inputStyle}
autoComplete="off"
/>
</div>
);
}
function ListField({
label,
value,
onChange,
placeholder,
}: {
label: string;
value: string[];
onChange: (v: string[]) => void;
placeholder?: string;
}) {
return (
<div style={fieldStyle}>
<label style={labelStyle}>{label} (one per line)</label>
<textarea
value={value.join("\n")}
onChange={(e) =>
onChange(e.target.value.split("\n").filter((s) => s.trim()))
}
placeholder={placeholder}
rows={3}
style={{ ...inputStyle, resize: "vertical" }}
/>
</div>
);
}
/// Bot configuration page — form for Matrix and Slack credentials.
export function BotConfigPage({ onBack }: BotConfigPageProps) {
const [transport, setTransport] = useState<"matrix" | "slack">("matrix");
const [enabled, setEnabled] = useState(false);
const [homeserver, setHomeserver] = useState("");
const [username, setUsername] = useState("");
const [password, setPassword] = useState("");
const [roomIds, setRoomIds] = useState<string[]>([]);
const [slackBotToken, setSlackBotToken] = useState("");
const [slackSigningSecret, setSlackSigningSecret] = useState("");
const [slackChannelIds, setSlackChannelIds] = useState<string[]>([]);
const [status, setStatus] = useState<"idle" | "saving" | "saved" | "error">(
"idle",
);
const [errorMsg, setErrorMsg] = useState<string | null>(null);
useEffect(() => {
botConfigApi
.getConfig()
.then((cfg) => {
if (cfg.transport === "slack") setTransport("slack");
setEnabled(cfg.enabled ?? false);
setHomeserver(cfg.homeserver ?? "");
setUsername(cfg.username ?? "");
setPassword(cfg.password ?? "");
setRoomIds(cfg.room_ids ?? []);
setSlackBotToken(cfg.slack_bot_token ?? "");
setSlackSigningSecret(cfg.slack_signing_secret ?? "");
setSlackChannelIds(cfg.slack_channel_ids ?? []);
})
.catch(() => {});
}, []);
function buildConfig(): BotConfig {
return {
transport,
enabled,
homeserver: homeserver || null,
username: username || null,
password: password || null,
room_ids: roomIds.length > 0 ? roomIds : null,
slack_bot_token: slackBotToken || null,
slack_signing_secret: slackSigningSecret || null,
slack_channel_ids: slackChannelIds.length > 0 ? slackChannelIds : null,
};
}
async function handleSave() {
setStatus("saving");
setErrorMsg(null);
try {
await botConfigApi.saveConfig(buildConfig());
setStatus("saved");
setTimeout(() => setStatus("idle"), 2000);
} catch (e) {
setStatus("error");
setErrorMsg(e instanceof Error ? e.message : "Save failed");
}
}
return (
<div
style={{
display: "flex",
flexDirection: "column",
height: "100%",
backgroundColor: "#171717",
color: "#ececec",
overflow: "auto",
}}
>
<div
style={{
padding: "12px 24px",
borderBottom: "1px solid #333",
display: "flex",
alignItems: "center",
gap: "16px",
background: "#171717",
flexShrink: 0,
}}
>
<button
type="button"
onClick={onBack}
style={{
background: "transparent",
border: "none",
cursor: "pointer",
color: "#888",
fontSize: "0.9em",
padding: "4px 8px",
borderRadius: "4px",
}}
>
Back
</button>
<span style={{ fontWeight: 700, fontSize: "1em" }}>
Bot Configuration
</span>
</div>
<div
style={{
flex: 1,
padding: "24px",
display: "flex",
flexDirection: "column",
gap: "20px",
maxWidth: "600px",
}}
>
<div style={sectionStyle}>
<div style={sectionTitleStyle}>General</div>
<div style={fieldStyle}>
<label style={labelStyle}>Transport</label>
<select
value={transport}
onChange={(e) =>
setTransport(e.target.value as "matrix" | "slack")
}
style={{ ...inputStyle, cursor: "pointer" }}
>
<option value="matrix">Matrix</option>
<option value="slack">Slack</option>
</select>
</div>
<label
style={{
display: "flex",
alignItems: "center",
gap: "8px",
cursor: "pointer",
fontSize: "0.9em",
color: "#ccc",
}}
>
<input
type="checkbox"
checked={enabled}
onChange={(e) => setEnabled(e.target.checked)}
/>
Enabled
</label>
</div>
{transport === "matrix" && (
<div style={sectionStyle}>
<div style={sectionTitleStyle}>Matrix Credentials</div>
<Field
label="Homeserver"
value={homeserver}
onChange={setHomeserver}
placeholder="https://matrix.example.com"
/>
<Field
label="Username"
value={username}
onChange={setUsername}
placeholder="@botname:example.com"
/>
<Field
label="Password"
value={password}
onChange={setPassword}
placeholder="bot password"
type="password"
/>
<ListField
label="Room IDs"
value={roomIds}
onChange={setRoomIds}
placeholder="!roomid:example.com"
/>
</div>
)}
{transport === "slack" && (
<div style={sectionStyle}>
<div style={sectionTitleStyle}>Slack Credentials</div>
<Field
label="Bot Token"
value={slackBotToken}
onChange={setSlackBotToken}
placeholder="xoxb-..."
/>
<Field
label="Signing Secret"
value={slackSigningSecret}
onChange={setSlackSigningSecret}
placeholder="signing secret"
type="password"
/>
<ListField
label="Channel IDs"
value={slackChannelIds}
onChange={setSlackChannelIds}
placeholder="C01ABCDEF"
/>
</div>
)}
<div style={{ display: "flex", alignItems: "center", gap: "12px" }}>
<button
type="button"
onClick={handleSave}
disabled={status === "saving"}
style={{
padding: "8px 24px",
borderRadius: "6px",
border: "none",
background: status === "saved" ? "#1a5c2a" : "#2563eb",
color: "#fff",
cursor: status === "saving" ? "not-allowed" : "pointer",
fontSize: "0.9em",
fontWeight: 600,
opacity: status === "saving" ? 0.7 : 1,
}}
>
{status === "saving"
? "Saving..."
: status === "saved"
? "Saved!"
: "Save"}
</button>
{status === "error" && errorMsg && (
<span style={{ color: "#f08080", fontSize: "0.85em" }}>
{errorMsg}
</span>
)}
</div>
</div>
</div>
);
}
+8 -1
View File
@@ -8,6 +8,7 @@ import { useChatSend } from "../hooks/useChatSend";
import { useChatWebSocket } from "../hooks/useChatWebSocket";
import { estimateTokens, getContextWindowSize } from "../utils/chatUtils";
import { ApiKeyDialog } from "./ApiKeyDialog";
import { BotConfigPage } from "./BotConfigPage";
import { ChatHeader } from "./ChatHeader";
import type { ChatInputHandle } from "./ChatInput";
import { ChatInput } from "./ChatInput";
@@ -61,6 +62,7 @@ export function Chat({
null,
);
const [showHelp, setShowHelp] = useState(false);
const [view, setView] = useState<"chat" | "bot-config">("chat");
const [queuedMessages, setQueuedMessages] = useState<
{ id: string; text: string }[]
>([]);
@@ -373,12 +375,17 @@ export function Chat({
onToggleTools={setEnableTools}
wsConnected={wsConnected}
oauthStatus={oauthStatus}
onShowBotConfig={() => setView("bot-config")}
/>
{view === "bot-config" && (
<BotConfigPage onBack={() => setView("chat")} />
)}
<div
data-testid="chat-content-area"
style={{
display: "flex",
display: view === "bot-config" ? "none" : "flex",
flex: 1,
minHeight: 0,
flexDirection: isNarrowScreen ? "column" : "row",
+39
View File
@@ -34,6 +34,7 @@ interface ChatHeaderProps {
onToggleTools: (enabled: boolean) => void;
wsConnected: boolean;
oauthStatus?: OAuthStatus | null;
onShowBotConfig?: () => void;
}
const getContextEmoji = (percentage: number): string => {
@@ -58,6 +59,7 @@ export function ChatHeader({
onToggleTools,
wsConnected,
oauthStatus = null,
onShowBotConfig,
}: ChatHeaderProps) {
const hasModelOptions = availableModels.length > 0 || claudeModels.length > 0;
const [showConfirm, setShowConfirm] = useState(false);
@@ -513,6 +515,43 @@ export function ChatHeader({
🔄 New Session
</button>
{onShowBotConfig && (
<button
type="button"
onClick={onShowBotConfig}
title="Configure bot credentials"
style={{
padding: "6px 12px",
borderRadius: "99px",
border: "none",
fontSize: "0.85em",
backgroundColor: "#2f2f2f",
color: "#888",
cursor: "pointer",
outline: "none",
transition: "all 0.2s",
}}
onMouseOver={(e) => {
e.currentTarget.style.backgroundColor = "#3f3f3f";
e.currentTarget.style.color = "#ccc";
}}
onMouseOut={(e) => {
e.currentTarget.style.backgroundColor = "#2f2f2f";
e.currentTarget.style.color = "#888";
}}
onFocus={(e) => {
e.currentTarget.style.backgroundColor = "#3f3f3f";
e.currentTarget.style.color = "#ccc";
}}
onBlur={(e) => {
e.currentTarget.style.backgroundColor = "#2f2f2f";
e.currentTarget.style.color = "#888";
}}
>
Bot
</button>
)}
{hasModelOptions ? (
<select
value={model}
+427 -7
View File
@@ -1,14 +1,173 @@
/// Gateway management panel shown when huskies runs in `--gateway` mode.
///
/// Provides:
/// - A cross-project pipeline status view showing active stories per project.
/// - Clicking a project card switches to it.
/// - An "Add Agent" button that generates a one-time join token.
/// - Instructions for running a build agent with the token.
/// - A list of connected agents with per-agent project assignment and "Remove" buttons.
/// - A list of connected agents with per-agent status, project assignment, and "Remove" buttons.
/// - Auto-refresh every 5 seconds so new agents and disconnections appear without a page reload.
import * as React from "react";
import { gatewayApi, type JoinedAgent, type GatewayProject } from "../api/gateway";
import {
gatewayApi,
type JoinedAgent,
type GatewayProject,
type AllProjectsPipeline,
type PipelineItem,
} from "../api/gateway";
const { useCallback, useEffect, useState } = React;
const { useCallback, useEffect, useRef, useState } = React;
/// Seconds of silence before an agent is considered disconnected.
const DISCONNECT_THRESHOLD_SECS = 60;
/// Poll the agent list this often (milliseconds).
const POLL_INTERVAL_MS = 5_000;
type AgentStatus = "idle" | "working" | "disconnected";
/// Derive an agent's display status from its last-seen timestamp and project assignment.
function agentStatus(agent: JoinedAgent): AgentStatus {
const nowSecs = Date.now() / 1000;
if (nowSecs - agent.last_seen > DISCONNECT_THRESHOLD_SECS) {
return "disconnected";
}
return agent.assigned_project ? "working" : "idle";
}
const STATUS_COLORS: Record<AgentStatus, string> = {
idle: "#6e7681",
working: "#3fb950",
disconnected: "#f85149",
};
const STATUS_LABELS: Record<AgentStatus, string> = {
idle: "Idle",
working: "Working",
disconnected: "Disconnected",
};
const STAGE_COLORS: Record<string, string> = {
current: "#3fb950",
qa: "#d2a679",
merge: "#79c0ff",
done: "#6e7681",
};
const STAGE_LABELS: Record<string, string> = {
current: "In Progress",
qa: "QA",
merge: "Merging",
done: "Done",
};
/// A single story row inside a project pipeline card.
function StoryRow({ item }: { item: PipelineItem }) {
const color = STAGE_COLORS[item.stage] ?? "#8b949e";
const label = STAGE_LABELS[item.stage] ?? item.stage;
return (
<div
style={{
display: "flex",
alignItems: "center",
gap: "8px",
padding: "4px 0",
fontSize: "0.82em",
}}
>
<span
style={{
padding: "1px 6px",
borderRadius: "10px",
background: `${color}22`,
color,
border: `1px solid ${color}44`,
whiteSpace: "nowrap",
flexShrink: 0,
}}
>
{label}
</span>
<span style={{ color: "#e6edf3", overflow: "hidden", textOverflow: "ellipsis", whiteSpace: "nowrap" }}>
{item.name}
</span>
</div>
);
}
/// Pipeline status card for a single project.
function ProjectPipelineCard({
name,
pipeline,
isActive,
onSwitch,
}: {
name: string;
pipeline: AllProjectsPipeline["projects"][string];
isActive: boolean;
onSwitch: (name: string) => void;
}) {
const activeItems = pipeline.active ?? [];
const backlogCount = pipeline.backlog_count ?? 0;
const hasError = Boolean(pipeline.error);
return (
<div
data-testid={`pipeline-card-${name}`}
onClick={() => onSwitch(name)}
style={{
padding: "12px 16px",
background: "#161b22",
border: `1px solid ${isActive ? "#238636" : "#30363d"}`,
borderRadius: "8px",
marginBottom: "8px",
cursor: "pointer",
}}
>
<div
style={{
display: "flex",
alignItems: "center",
gap: "8px",
marginBottom: activeItems.length > 0 ? "8px" : 0,
}}
>
<span style={{ fontWeight: 600, color: "#e6edf3" }}>{name}</span>
{isActive && (
<span
style={{
fontSize: "0.7em",
padding: "1px 6px",
borderRadius: "10px",
background: "#23863622",
color: "#3fb950",
border: "1px solid #23863644",
}}
>
active
</span>
)}
<span style={{ marginLeft: "auto", fontSize: "0.75em", color: "#6e7681" }}>
{backlogCount > 0 ? `${backlogCount} in backlog` : ""}
</span>
</div>
{hasError ? (
<div style={{ fontSize: "0.8em", color: "#f85149" }}>{pipeline.error}</div>
) : activeItems.length === 0 ? (
<div style={{ fontSize: "0.8em", color: "#6e7681" }}>No active stories</div>
) : (
<div>
{activeItems.map((item) => (
<StoryRow key={item.story_id} item={item} />
))}
</div>
)}
</div>
);
}
function TokenDisplay({ token }: { token: string }) {
const [copied, setCopied] = useState(false);
@@ -100,7 +259,9 @@ function AgentRow({
onAssign: (id: string, project: string | null) => void;
}) {
const registeredAt = new Date(agent.registered_at * 1000).toLocaleString();
const isAssigned = Boolean(agent.assigned_project);
const status = agentStatus(agent);
const statusColor = STATUS_COLORS[status];
const statusLabel = STATUS_LABELS[status];
return (
<div
@@ -121,18 +282,38 @@ function AgentRow({
width: "8px",
height: "8px",
borderRadius: "50%",
background: isAssigned ? "#3fb950" : "#6e7681",
background: statusColor,
flexShrink: 0,
}}
title={isAssigned ? "Assigned" : "Idle (unassigned)"}
title={statusLabel}
/>
<div style={{ flex: 1 }}>
<div style={{ fontWeight: 600, color: "#e6edf3" }}>{agent.label}</div>
<div style={{ display: "flex", alignItems: "center", gap: "8px" }}>
<span style={{ fontWeight: 600, color: "#e6edf3" }}>{agent.label}</span>
<span
data-testid={`agent-status-${agent.id}`}
style={{
fontSize: "0.75em",
padding: "1px 6px",
borderRadius: "10px",
background: `${statusColor}22`,
color: statusColor,
border: `1px solid ${statusColor}44`,
}}
>
{statusLabel}
</span>
</div>
<div style={{ fontSize: "0.8em", color: "#8b949e" }}>
{agent.address}
</div>
<div style={{ fontSize: "0.75em", color: "#6e7681" }}>
Registered {registeredAt}
{agent.assigned_project && (
<span style={{ marginLeft: "8px", color: "#8b949e" }}>
· Project: {agent.assigned_project}
</span>
)}
</div>
</div>
<select
@@ -185,8 +366,21 @@ export function GatewayPanel() {
const [token, setToken] = useState<string | null>(null);
const [generating, setGenerating] = useState(false);
const [error, setError] = useState<string | null>(null);
const [pipeline, setPipeline] = useState<AllProjectsPipeline | null>(null);
// Add-project form state
const [newProjectName, setNewProjectName] = useState("");
const [newProjectUrl, setNewProjectUrl] = useState("");
const [addingProject, setAddingProject] = useState(false);
// Keep stable refs so polling intervals don't recreate on state changes.
const setAgentsRef = useRef(setAgents);
setAgentsRef.current = setAgents;
const setPipelineRef = useRef(setPipeline);
setPipelineRef.current = setPipeline;
useEffect(() => {
// Initial load.
gatewayApi
.listAgents()
.then(setAgents)
@@ -195,6 +389,25 @@ export function GatewayPanel() {
.getGatewayInfo()
.then((info) => setProjects(info.projects))
.catch(() => setProjects([]));
gatewayApi
.getAllProjectsPipeline()
.then(setPipeline)
.catch(() => setPipeline(null));
// Poll so the dashboard auto-updates as agents connect/disconnect and
// stories move through pipelines.
const timer = setInterval(() => {
gatewayApi
.listAgents()
.then((updated) => setAgentsRef.current(updated))
.catch(() => {});
gatewayApi
.getAllProjectsPipeline()
.then((updated) => setPipelineRef.current(updated))
.catch(() => {});
}, POLL_INTERVAL_MS);
return () => clearInterval(timer);
}, []);
const handleAddAgent = useCallback(async () => {
@@ -234,6 +447,53 @@ export function GatewayPanel() {
[],
);
const handleAddProject = useCallback(async () => {
const name = newProjectName.trim();
const url = newProjectUrl.trim();
if (!name || !url) return;
setAddingProject(true);
setError(null);
try {
const created = await gatewayApi.addProject(name, url);
setProjects((prev) => [...prev, created]);
setNewProjectName("");
setNewProjectUrl("");
} catch (e) {
setError(e instanceof Error ? e.message : String(e));
} finally {
setAddingProject(false);
}
}, [newProjectName, newProjectUrl]);
const handleSwitchProject = useCallback(async (name: string) => {
setError(null);
try {
const result = await gatewayApi.switchProject(name);
if (!result.ok) {
setError(result.error ?? "Failed to switch project");
return;
}
// Refresh pipeline to reflect new active project.
const updated = await gatewayApi.getAllProjectsPipeline();
setPipeline(updated);
} catch (e) {
setError(e instanceof Error ? e.message : String(e));
}
}, []);
const handleRemoveProject = useCallback(async (name: string) => {
if (!window.confirm(`Remove project "${name}"? This cannot be undone.`)) {
return;
}
setError(null);
try {
await gatewayApi.removeProject(name);
setProjects((prev) => prev.filter((p) => p.name !== name));
} catch (e) {
setError(e instanceof Error ? e.message : String(e));
}
}, []);
return (
<div
style={{
@@ -252,6 +512,34 @@ export function GatewayPanel() {
Manage build agents connected to this gateway.
</p>
{/* Cross-project pipeline status */}
<section style={{ marginBottom: "32px" }}>
<h2
style={{
fontSize: "1.1em",
fontWeight: 600,
marginBottom: "12px",
borderBottom: "1px solid #21262d",
paddingBottom: "8px",
}}
>
Pipeline Status
</h2>
{pipeline ? (
Object.entries(pipeline.projects).map(([name, status]) => (
<ProjectPipelineCard
key={name}
name={name}
pipeline={status}
isActive={name === pipeline.active}
onSwitch={handleSwitchProject}
/>
))
) : (
<p style={{ color: "#6e7681" }}>Loading pipeline status</p>
)}
</section>
{/* Add Agent */}
<section style={{ marginBottom: "32px" }}>
<h2
@@ -330,6 +618,138 @@ export function GatewayPanel() {
)}
</section>
{/* Project management */}
<section style={{ marginTop: "32px" }}>
<h2
style={{
fontSize: "1.1em",
fontWeight: 600,
marginBottom: "12px",
borderBottom: "1px solid #21262d",
paddingBottom: "8px",
}}
>
Projects{" "}
{projects.length > 0 && (
<span style={{ fontSize: "0.8em", color: "#8b949e", fontWeight: 400 }}>
({projects.length})
</span>
)}
</h2>
{/* Existing projects list */}
{projects.map((p) => (
<div
key={p.name}
data-testid={`project-row-${p.name}`}
style={{
display: "flex",
alignItems: "center",
gap: "12px",
padding: "10px 14px",
background: "#161b22",
border: "1px solid #30363d",
borderRadius: "8px",
marginBottom: "8px",
}}
>
<div style={{ flex: 1 }}>
<div style={{ fontWeight: 600, color: "#e6edf3" }}>{p.name}</div>
<div style={{ fontSize: "0.8em", color: "#8b949e" }}>{p.url}</div>
</div>
<button
type="button"
data-testid={`remove-project-${p.name}`}
onClick={() => handleRemoveProject(p.name)}
style={{
fontSize: "0.8em",
padding: "4px 10px",
borderRadius: "4px",
border: "1px solid #f85149",
background: "none",
color: "#f85149",
cursor: "pointer",
}}
>
Remove
</button>
</div>
))}
{/* Add project form */}
<div
style={{
marginTop: "12px",
display: "flex",
gap: "8px",
alignItems: "flex-end",
flexWrap: "wrap",
}}
>
<div style={{ flex: "1 1 140px" }}>
<div style={{ fontSize: "0.75em", color: "#8b949e", marginBottom: "4px" }}>
Name
</div>
<input
data-testid="new-project-name"
type="text"
placeholder="my-project"
value={newProjectName}
onChange={(e) => setNewProjectName(e.target.value)}
style={{
width: "100%",
padding: "6px 10px",
borderRadius: "4px",
border: "1px solid #30363d",
background: "#0d1117",
color: "#e6edf3",
fontSize: "0.85em",
}}
/>
</div>
<div style={{ flex: "2 1 200px" }}>
<div style={{ fontSize: "0.75em", color: "#8b949e", marginBottom: "4px" }}>
Container URL
</div>
<input
data-testid="new-project-url"
type="text"
placeholder="http://localhost:3001"
value={newProjectUrl}
onChange={(e) => setNewProjectUrl(e.target.value)}
style={{
width: "100%",
padding: "6px 10px",
borderRadius: "4px",
border: "1px solid #30363d",
background: "#0d1117",
color: "#e6edf3",
fontSize: "0.85em",
}}
/>
</div>
<button
type="button"
data-testid="add-project-button"
onClick={handleAddProject}
disabled={addingProject || !newProjectName.trim() || !newProjectUrl.trim()}
style={{
padding: "6px 14px",
borderRadius: "4px",
border: "1px solid #238636",
background: addingProject ? "#1a2f1a" : "#238636",
color: "#fff",
cursor: addingProject ? "not-allowed" : "pointer",
fontWeight: 600,
fontSize: "0.85em",
whiteSpace: "nowrap",
}}
>
{addingProject ? "Adding…" : "Add Project"}
</button>
</div>
</section>
{error && (
<div
style={{
+1 -1
View File
@@ -1,6 +1,6 @@
[package]
name = "huskies"
version = "0.10.1"
version = "0.10.3"
edition = "2024"
build = "build.rs"
@@ -15,7 +15,7 @@ use super::scan::{
};
use super::story_checks::{
check_archived_dependencies, has_merge_failure, has_review_hold, has_unmet_dependencies,
is_story_blocked, read_story_front_matter_agent,
is_story_blocked, is_story_frozen, read_story_front_matter_agent,
};
impl AgentPool {
@@ -103,6 +103,12 @@ impl AgentPool {
continue;
}
// Skip frozen stories — pipeline advancement is suspended.
if is_story_frozen(project_root, stage_dir, story_id) {
slog!("[auto-assign] Story '{story_id}' is frozen; skipping until unfrozen.");
continue;
}
// Skip blocked stories (retry limit exceeded).
if is_story_blocked(project_root, stage_dir, story_id) {
continue;
@@ -93,6 +93,19 @@ pub(super) fn check_archived_dependencies(
crate::io::story_metadata::check_archived_deps(project_root, stage_dir, story_id)
}
/// Return `true` if the story file has `frozen: true` in its front matter.
pub(super) fn is_story_frozen(project_root: &Path, _stage_dir: &str, story_id: &str) -> bool {
use crate::io::story_metadata::parse_front_matter;
let contents = match read_story_contents(project_root, story_id) {
Some(c) => c,
None => return false,
};
parse_front_matter(&contents)
.ok()
.and_then(|m| m.frozen)
.unwrap_or(false)
}
/// Return `true` if the story file has a `merge_failure` field in its front matter.
pub(super) fn has_merge_failure(project_root: &Path, _stage_dir: &str, story_id: &str) -> bool {
use crate::io::story_metadata::parse_front_matter;
@@ -40,6 +40,13 @@ impl AgentPool {
.map(agent_config_stage)
.unwrap_or_else(|| pipeline_stage(agent_name));
// If the story is frozen, do not advance the pipeline. The agent's work
// is done but the story stays at its current stage.
if crate::io::story_metadata::is_story_frozen_in_store(story_id) {
slog!("[pipeline] Story '{story_id}' is frozen; pipeline advancement suppressed.");
return;
}
match stage {
PipelineStage::Other => {
// Supervisors and unknown agents do not advance the pipeline.
+259
View File
@@ -0,0 +1,259 @@
//! Handler for the `diff` command.
//!
//! Shows the git diff from the configured main branch to the story's worktree
//! HEAD, formatted for readability in chat.
use super::CommandContext;
use std::path::Path;
use std::process::Command;
/// Display the git diff from the configured main branch to a story's worktree HEAD.
///
/// Usage: `diff <number>`
pub(super) fn handle_diff(ctx: &CommandContext) -> Option<String> {
let num_str = ctx.args.trim();
if num_str.is_empty() {
return Some(format!(
"Usage: `{} diff <number>`\n\nShows the git diff from the main branch to the story's worktree HEAD.",
ctx.bot_name
));
}
if !num_str.chars().all(|c| c.is_ascii_digit()) {
return Some(format!(
"Invalid story number: `{num_str}`. Usage: `{} diff <number>`",
ctx.bot_name
));
}
let story_id = match find_story_id(num_str) {
Some(id) => id,
None => {
return Some(format!(
"No story with number **{num_str}** found in the pipeline."
));
}
};
let wt_path = crate::worktree::worktree_path(ctx.project_root, &story_id);
if !wt_path.is_dir() {
return Some(format!(
"Story **{num_str}** has no worktree. The diff is only available once a coder has started working on it."
));
}
let base_branch = resolve_base_branch(ctx.project_root);
let range = format!("{base_branch}...HEAD");
let stat = run_git(&wt_path, &["diff", "--stat", &range]);
let diff = run_git(&wt_path, &["diff", &range]);
let mut out = format!("## Diff — story {num_str} vs `{base_branch}`\n\n");
if stat.is_empty() && diff.is_empty() {
out.push_str("*(no changes relative to main branch)*\n");
return Some(out);
}
if !stat.is_empty() {
out.push_str("**Changed files:**\n```\n");
out.push_str(&stat);
out.push_str("\n```\n\n");
}
if !diff.is_empty() {
const MAX_DIFF_BYTES: usize = 8_000;
if diff.len() > MAX_DIFF_BYTES {
let truncated = truncate_at_char_boundary(&diff, MAX_DIFF_BYTES);
out.push_str("**Diff** *(truncated — showing first 8 KB)*:\n```diff\n");
out.push_str(truncated);
out.push_str("\n... (truncated)\n```\n");
} else {
out.push_str("**Diff:**\n```diff\n");
out.push_str(&diff);
out.push_str("\n```\n");
}
}
Some(out)
}
/// Find the story_id in the pipeline whose numeric prefix matches `num_str`.
fn find_story_id(num_str: &str) -> Option<String> {
let items = crate::pipeline_state::read_all_typed();
items.into_iter().find_map(|item| {
let file_num = item
.story_id
.0
.split('_')
.next()
.filter(|s| !s.is_empty() && s.chars().all(|c| c.is_ascii_digit()))
.unwrap_or("");
if file_num == num_str {
Some(item.story_id.0.clone())
} else {
None
}
})
}
/// Return the configured base branch, or auto-detect it from the project root HEAD.
fn resolve_base_branch(project_root: &Path) -> String {
crate::config::ProjectConfig::load(project_root)
.ok()
.and_then(|c| c.base_branch)
.unwrap_or_else(|| {
Command::new("git")
.args(["rev-parse", "--abbrev-ref", "HEAD"])
.current_dir(project_root)
.output()
.ok()
.filter(|o| o.status.success())
.map(|o| String::from_utf8_lossy(&o.stdout).trim().to_string())
.unwrap_or_else(|| "master".to_string())
})
}
/// Run a git command in `dir`, returning trimmed stdout (empty string on failure).
fn run_git(dir: &Path, args: &[&str]) -> String {
Command::new("git")
.args(args)
.current_dir(dir)
.output()
.ok()
.filter(|o| o.status.success())
.map(|o| String::from_utf8_lossy(&o.stdout).trim().to_string())
.unwrap_or_default()
}
/// Truncate `s` to at most `max_bytes` bytes without splitting a UTF-8 character.
fn truncate_at_char_boundary(s: &str, max_bytes: usize) -> &str {
if s.len() <= max_bytes {
return s;
}
let mut boundary = max_bytes;
while !s.is_char_boundary(boundary) {
boundary -= 1;
}
&s[..boundary]
}
// ---------------------------------------------------------------------------
// Tests
// ---------------------------------------------------------------------------
#[cfg(test)]
mod tests {
use super::*;
use crate::agents::AgentPool;
use std::collections::HashSet;
use std::sync::{Arc, Mutex};
use super::super::{CommandDispatch, try_handle_command};
fn diff_cmd(root: &std::path::Path, args: &str) -> Option<String> {
let agents = Arc::new(AgentPool::new_test(3000));
let ambient_rooms = Arc::new(Mutex::new(HashSet::new()));
let room_id = "!test:example.com".to_string();
let dispatch = CommandDispatch {
bot_name: "Timmy",
bot_user_id: "@timmy:homeserver.local",
project_root: root,
agents: &agents,
ambient_rooms: &ambient_rooms,
room_id: &room_id,
};
try_handle_command(&dispatch, &format!("@timmy diff {args}"))
}
#[test]
fn diff_command_is_registered() {
let found = super::super::commands().iter().any(|c| c.name == "diff");
assert!(found, "diff command must be in the registry");
}
#[test]
fn diff_command_appears_in_help() {
let result = super::super::tests::try_cmd_addressed(
"Timmy",
"@timmy:homeserver.local",
"@timmy help",
);
let output = result.unwrap();
assert!(
output.contains("diff"),
"help should list diff command: {output}"
);
}
#[test]
fn diff_command_no_args_returns_usage() {
let tmp = tempfile::TempDir::new().unwrap();
let output = diff_cmd(tmp.path(), "").unwrap();
assert!(
output.contains("Usage"),
"no args should show usage: {output}"
);
}
#[test]
fn diff_command_non_numeric_returns_error() {
let tmp = tempfile::TempDir::new().unwrap();
let output = diff_cmd(tmp.path(), "abc").unwrap();
assert!(
output.contains("Invalid"),
"non-numeric arg should return error: {output}"
);
}
#[test]
fn diff_command_story_not_found_returns_friendly_message() {
crate::db::ensure_content_store();
let tmp = tempfile::TempDir::new().unwrap();
let output = diff_cmd(tmp.path(), "99993").unwrap();
assert!(
output.contains("99993"),
"message should include story number: {output}"
);
assert!(
output.contains("found") || output.contains("pipeline"),
"message should explain not found: {output}"
);
}
#[test]
fn diff_command_no_worktree_returns_clear_error() {
use crate::chat::test_helpers::write_story_file;
let tmp = tempfile::TempDir::new().unwrap();
write_story_file(
tmp.path(),
"2_current",
"55551_story_no_worktree.md",
"---\nname: No Worktree\n---\n",
);
let output = diff_cmd(tmp.path(), "55551").unwrap();
assert!(
output.contains("worktree")
|| output.contains("no worktree")
|| output.contains("Worktree"),
"should report missing worktree: {output}"
);
}
#[test]
fn truncate_at_char_boundary_short_string() {
let s = "hello";
assert_eq!(truncate_at_char_boundary(s, 100), "hello");
}
#[test]
fn truncate_at_char_boundary_exact_limit() {
let s = "hello";
assert_eq!(truncate_at_char_boundary(s, 5), "hello");
}
#[test]
fn truncate_at_char_boundary_over_limit() {
let s = "hello world";
assert_eq!(truncate_at_char_boundary(s, 5), "hello");
}
}
+300
View File
@@ -0,0 +1,300 @@
//! Handler for the `freeze` and `unfreeze` commands.
//!
//! `freeze <number>` sets `frozen: true` on the story, halting pipeline
//! advancement and auto-assign until `unfreeze <number>` clears the flag.
use super::CommandContext;
use crate::io::story_metadata::{
clear_front_matter_field_in_content, parse_front_matter, set_front_matter_field,
};
use std::path::Path;
/// Handle the `freeze` command.
///
/// Parses `<number>` from `ctx.args`, locates the work item, and sets
/// `frozen: true` in its front matter.
pub(super) fn handle_freeze(ctx: &CommandContext) -> Option<String> {
let num_str = ctx.args.trim();
if num_str.is_empty() || !num_str.chars().all(|c| c.is_ascii_digit()) {
return Some(format!(
"Usage: `{} freeze <number>` (e.g. `freeze 42`)",
ctx.bot_name
));
}
Some(freeze_by_number(ctx.project_root, num_str))
}
/// Core freeze logic: find story by numeric prefix and set `frozen: true`.
///
/// Returns a Markdown-formatted response string suitable for all transports.
pub(crate) fn freeze_by_number(project_root: &Path, story_number: &str) -> String {
let (story_id, _, _, _) =
match crate::chat::lookup::find_story_by_number(project_root, story_number) {
Some(found) => found,
None => {
return format!("No story, bug, or spike with number **{story_number}** found.");
}
};
freeze_by_story_id(&story_id)
}
fn freeze_by_story_id(story_id: &str) -> String {
let contents = match crate::db::read_content(story_id) {
Some(c) => c,
None => return format!("Failed to read story content for **{story_id}**"),
};
let meta = match parse_front_matter(&contents) {
Ok(m) => m,
Err(e) => return format!("Failed to parse front matter for **{story_id}**: {e}"),
};
let story_name = meta.name.as_deref().unwrap_or(story_id).to_string();
if meta.frozen == Some(true) {
return format!("**{story_name}** ({story_id}) is already frozen.");
}
let updated = set_front_matter_field(&contents, "frozen", "true");
crate::db::write_content(story_id, &updated);
let stage = crate::pipeline_state::read_typed(story_id)
.ok()
.flatten()
.map(|i| i.stage.dir_name().to_string())
.unwrap_or_else(|| "2_current".to_string());
crate::db::write_item_with_content(story_id, &stage, &updated);
format!(
"Frozen **{story_name}** ({story_id}). Pipeline advancement and auto-assign suppressed until unfrozen."
)
}
/// Handle the `unfreeze` command.
///
/// Parses `<number>` from `ctx.args`, locates the work item, and clears the
/// `frozen` flag to resume normal pipeline behaviour.
pub(super) fn handle_unfreeze(ctx: &CommandContext) -> Option<String> {
let num_str = ctx.args.trim();
if num_str.is_empty() || !num_str.chars().all(|c| c.is_ascii_digit()) {
return Some(format!(
"Usage: `{} unfreeze <number>` (e.g. `unfreeze 42`)",
ctx.bot_name
));
}
Some(unfreeze_by_number(ctx.project_root, num_str))
}
/// Core unfreeze logic: find story by numeric prefix and clear `frozen` flag.
pub(crate) fn unfreeze_by_number(project_root: &Path, story_number: &str) -> String {
let (story_id, _, _, _) =
match crate::chat::lookup::find_story_by_number(project_root, story_number) {
Some(found) => found,
None => {
return format!("No story, bug, or spike with number **{story_number}** found.");
}
};
unfreeze_by_story_id(&story_id)
}
fn unfreeze_by_story_id(story_id: &str) -> String {
let contents = match crate::db::read_content(story_id) {
Some(c) => c,
None => return format!("Failed to read story content for **{story_id}**"),
};
let meta = match parse_front_matter(&contents) {
Ok(m) => m,
Err(e) => return format!("Failed to parse front matter for **{story_id}**: {e}"),
};
let story_name = meta.name.as_deref().unwrap_or(story_id).to_string();
if meta.frozen != Some(true) {
return format!("**{story_name}** ({story_id}) is not frozen. Nothing to unfreeze.");
}
let updated = clear_front_matter_field_in_content(&contents, "frozen");
crate::db::write_content(story_id, &updated);
let stage = crate::pipeline_state::read_typed(story_id)
.ok()
.flatten()
.map(|i| i.stage.dir_name().to_string())
.unwrap_or_else(|| "2_current".to_string());
crate::db::write_item_with_content(story_id, &stage, &updated);
format!("Unfrozen **{story_name}** ({story_id}). Normal pipeline behaviour resumed.")
}
// ---------------------------------------------------------------------------
// Tests
// ---------------------------------------------------------------------------
#[cfg(test)]
mod tests {
use crate::agents::AgentPool;
use crate::chat::test_helpers::write_story_file;
use std::collections::HashSet;
use std::sync::{Arc, Mutex};
use super::super::{CommandDispatch, try_handle_command};
fn freeze_cmd_with_root(root: &std::path::Path, args: &str) -> Option<String> {
let agents = Arc::new(AgentPool::new_test(3000));
let ambient_rooms = Arc::new(Mutex::new(HashSet::new()));
let room_id = "!test:example.com".to_string();
let dispatch = CommandDispatch {
bot_name: "Timmy",
bot_user_id: "@timmy:homeserver.local",
project_root: root,
agents: &agents,
ambient_rooms: &ambient_rooms,
room_id: &room_id,
};
try_handle_command(&dispatch, &format!("@timmy freeze {args}"))
}
fn unfreeze_cmd_with_root(root: &std::path::Path, args: &str) -> Option<String> {
let agents = Arc::new(AgentPool::new_test(3000));
let ambient_rooms = Arc::new(Mutex::new(HashSet::new()));
let room_id = "!test:example.com".to_string();
let dispatch = CommandDispatch {
bot_name: "Timmy",
bot_user_id: "@timmy:homeserver.local",
project_root: root,
agents: &agents,
ambient_rooms: &ambient_rooms,
room_id: &room_id,
};
try_handle_command(&dispatch, &format!("@timmy unfreeze {args}"))
}
#[test]
fn freeze_command_is_registered() {
use super::super::commands;
assert!(
commands().iter().any(|c| c.name == "freeze"),
"freeze command must be in the registry"
);
}
#[test]
fn unfreeze_command_is_registered() {
use super::super::commands;
assert!(
commands().iter().any(|c| c.name == "unfreeze"),
"unfreeze command must be in the registry"
);
}
#[test]
fn freeze_command_no_args_returns_usage() {
let tmp = tempfile::TempDir::new().unwrap();
let output = freeze_cmd_with_root(tmp.path(), "").unwrap();
assert!(
output.contains("Usage"),
"no args should show usage: {output}"
);
}
#[test]
fn unfreeze_command_no_args_returns_usage() {
let tmp = tempfile::TempDir::new().unwrap();
let output = unfreeze_cmd_with_root(tmp.path(), "").unwrap();
assert!(
output.contains("Usage"),
"no args should show usage: {output}"
);
}
#[test]
fn freeze_command_not_found_returns_error() {
let tmp = tempfile::TempDir::new().unwrap();
let output = freeze_cmd_with_root(tmp.path(), "9988").unwrap();
assert!(
output.contains("9988") && output.contains("found"),
"not-found message should include number and 'found': {output}"
);
}
#[test]
fn freeze_command_sets_frozen_flag() {
let tmp = tempfile::TempDir::new().unwrap();
crate::db::ensure_content_store();
write_story_file(
tmp.path(),
"2_current",
"9940_story_freezeme.md",
"---\nname: Freeze Me\n---\n# Story\n",
);
let output = freeze_cmd_with_root(tmp.path(), "9940").unwrap();
assert!(
output.contains("Frozen") && output.contains("Freeze Me"),
"should confirm freeze with story name: {output}"
);
let contents = crate::db::read_content("9940_story_freezeme")
.expect("story content should be readable after freeze");
assert!(
contents.contains("frozen: true"),
"frozen flag should be set: {contents}"
);
}
#[test]
fn unfreeze_command_clears_frozen_flag() {
let tmp = tempfile::TempDir::new().unwrap();
crate::db::ensure_content_store();
write_story_file(
tmp.path(),
"2_current",
"9941_story_frozen.md",
"---\nname: Frozen Story\nfrozen: true\n---\n# Story\n",
);
let output = unfreeze_cmd_with_root(tmp.path(), "9941").unwrap();
assert!(
output.contains("Unfrozen") && output.contains("Frozen Story"),
"should confirm unfreeze with story name: {output}"
);
let contents = crate::db::read_content("9941_story_frozen")
.expect("story content should be readable after unfreeze");
assert!(
!contents.contains("frozen:"),
"frozen flag should be removed: {contents}"
);
}
#[test]
fn unfreeze_command_not_frozen_returns_error() {
let tmp = tempfile::TempDir::new().unwrap();
write_story_file(
tmp.path(),
"2_current",
"9942_story_notfrozen.md",
"---\nname: Not Frozen\n---\n# Story\n",
);
let output = unfreeze_cmd_with_root(tmp.path(), "9942").unwrap();
assert!(
output.contains("not frozen"),
"should return not-frozen error: {output}"
);
}
#[test]
fn freeze_command_already_frozen_returns_message() {
let tmp = tempfile::TempDir::new().unwrap();
write_story_file(
tmp.path(),
"2_current",
"9943_story_alreadyfrozen.md",
"---\nname: Already Frozen\nfrozen: true\n---\n# Story\n",
);
let output = freeze_cmd_with_root(tmp.path(), "9943").unwrap();
assert!(
output.contains("already frozen"),
"should say already frozen: {output}"
);
}
}
+17
View File
@@ -11,6 +11,8 @@ mod backlog;
mod cost;
mod coverage;
mod depends;
mod diff;
mod freeze;
mod git;
mod help;
pub(crate) mod loc;
@@ -163,6 +165,11 @@ pub fn commands() -> &'static [BotCommand] {
description: "Display the full text of a work item: `show <number>`",
handler: show::handle_show,
},
BotCommand {
name: "diff",
description: "Show git diff from main branch to story worktree HEAD: `diff <number>`",
handler: diff::handle_diff,
},
BotCommand {
name: "overview",
description: "Show implementation summary for a merged story: `overview <number>`",
@@ -203,6 +210,16 @@ pub fn commands() -> &'static [BotCommand] {
description: "Reset a blocked story: `unblock <number>` (clears blocked flag and resets retry count)",
handler: unblock::handle_unblock,
},
BotCommand {
name: "freeze",
description: "Freeze a story at its current stage: `freeze <number>` (suppresses pipeline advancement and auto-assign)",
handler: freeze::handle_freeze,
},
BotCommand {
name: "unfreeze",
description: "Unfreeze a story: `unfreeze <number>` (resumes normal pipeline behaviour)",
handler: freeze::handle_unfreeze,
},
BotCommand {
name: "unreleased",
description: "Show stories merged to master since the last release tag",
+15 -5
View File
@@ -59,12 +59,17 @@ fn wizard_generate_reply(ctx: &CommandContext) -> String {
}
/// Compose a status reply for the `setup` command (no args).
///
/// If no wizard state exists, automatically initializes it so the user does
/// not need to run `huskies init` manually.
fn wizard_status_reply(ctx: &CommandContext) -> String {
if WizardState::load(ctx.project_root).is_none() {
WizardState::init_if_missing(ctx.project_root);
}
match WizardState::load(ctx.project_root) {
Some(state) => format_wizard_state(&state),
None => {
"No setup wizard active. Run `huskies init` in the project root to begin.".to_string()
}
None => "Unable to initialize setup wizard. Ensure the `.huskies/` directory exists."
.to_string(),
}
}
@@ -205,13 +210,18 @@ mod tests {
}
#[test]
fn setup_no_wizard_returns_helpful_message() {
fn setup_no_wizard_auto_initializes() {
let dir = TempDir::new().unwrap();
std::fs::create_dir_all(dir.path().join(".huskies")).unwrap();
let agents = Arc::new(crate::agents::AgentPool::new_test(4000));
let rooms = Arc::new(Mutex::new(HashSet::new()));
let ctx = make_ctx("", dir.path(), &agents, &rooms);
let result = handle_setup(&ctx).unwrap();
assert!(result.contains("huskies init"));
// Bot should auto-initialize and return wizard status, not ask user to run huskies init.
assert!(result.contains("Setup wizard"));
assert!(!result.contains("huskies init"));
// Wizard state file should now exist.
assert!(WizardState::load(dir.path()).is_some());
}
#[test]
+95 -12
View File
@@ -2,6 +2,65 @@
use super::CommandContext;
/// Strip YAML front matter and return a summary of useful fields + the remaining body.
fn strip_front_matter(text: &str) -> (String, String) {
let trimmed = text.trim_start();
if !trimmed.starts_with("---") {
return (String::new(), text.to_string());
}
// Find the closing ---
if let Some(end) = trimmed[3..].find("\n---") {
let yaml_block = &trimmed[3..3 + end].trim();
let body = &trimmed[3 + end + 4..]; // skip past closing ---
// Extract useful fields from YAML (simple line-based parsing)
let mut parts = Vec::new();
for line in yaml_block.lines() {
let line = line.trim();
if line.starts_with("depends_on:") {
let val = line.trim_start_matches("depends_on:").trim();
if !val.is_empty() && val != "[]" {
parts.push(format!("**Depends on:** {val}"));
}
} else if line.starts_with("agent:") {
let val = line.trim_start_matches("agent:").trim().trim_matches('"');
if !val.is_empty() {
parts.push(format!("**Agent:** {val}"));
}
} else if line.starts_with("blocked:") {
let val = line.trim_start_matches("blocked:").trim();
if val == "true" {
parts.push("**Blocked:** yes".to_string());
}
} else if line.starts_with("retry_count:") {
let val = line.trim_start_matches("retry_count:").trim();
if val != "0" && !val.is_empty() {
parts.push(format!("**Retries:** {val}"));
}
} else if line.starts_with("qa:") {
let val = line.trim_start_matches("qa:").trim().trim_matches('"');
if val == "human" {
parts.push("**QA:** human review required".to_string());
}
} else if line.starts_with("merge_failure:") {
let val = line
.trim_start_matches("merge_failure:")
.trim()
.trim_matches('"');
if !val.is_empty() {
parts.push(format!("**Merge failure:** {val}"));
}
}
}
(parts.join(" · "), body.to_string())
} else {
// No closing ---, return as-is
(String::new(), text.to_string())
}
}
/// Display the full markdown text of a work item identified by its numeric ID.
///
/// Lookup priority: CRDT → content store → filesystem (Story 512).
@@ -21,8 +80,8 @@ pub(super) fn handle_show(ctx: &CommandContext) -> Option<String> {
));
}
// Find the story by numeric prefix: CRDT → content store → filesystem.
let (story_id, _stage_dir, path, content) =
// Find the story by numeric prefix: CRDT → content store.
let (story_id, _stage_dir, _path, content) =
match crate::chat::lookup::find_story_by_number(ctx.project_root, num_str) {
Some(found) => found,
None => {
@@ -32,16 +91,40 @@ pub(super) fn handle_show(ctx: &CommandContext) -> Option<String> {
}
};
// `content` is populated from the content store (CRDT/DB path) or read
// from disk during the filesystem fallback. If it is None (story found in
// CRDT but no content-store entry yet), attempt a direct disk read.
Some(
content
.or_else(|| std::fs::read_to_string(&path).ok())
.unwrap_or_else(|| {
format!("Story {story_id} found in pipeline but its content is unavailable.")
}),
)
// `content` comes from the CRDT / content store. If unavailable, report
// it rather than silently reading a stale on-disk copy.
let text = content.unwrap_or_else(|| {
format!("Story {story_id} found in pipeline but its content is unavailable.")
});
// Strip front matter block and extract useful metadata to show inline.
let (front_matter_summary, body) = strip_front_matter(&text);
// Convert markdown headings to bold text for consistent rendering across
// Matrix clients. Element X doesn't style <h2> tags distinctly, but bold
// text renders consistently everywhere.
let formatted = body
.lines()
.map(|line| {
let trimmed = line.trim_start();
if let Some(rest) = trimmed.strip_prefix("### ") {
format!("\n**{}**", rest)
} else if let Some(rest) = trimmed.strip_prefix("## ") {
format!("\n**{}**", rest)
} else if let Some(rest) = trimmed.strip_prefix("# ") {
format!("\n**{}**", rest)
} else {
line.to_string()
}
})
.collect::<Vec<_>>()
.join("\n");
if front_matter_summary.is_empty() {
Some(formatted.trim().to_string())
} else {
Some(format!("{front_matter_summary}\n{}", formatted.trim()))
}
}
#[cfg(test)]
+7 -1
View File
@@ -228,7 +228,13 @@ fn render_item_line(
} else {
Some(item.name.as_str())
};
let display = story_short_label(story_id, name_opt);
let frozen = crate::io::story_metadata::is_story_frozen_in_store(story_id);
let base_label = story_short_label(story_id, name_opt);
let display = if frozen {
format!("\u{2744}\u{FE0F} {base_label}") // ❄️ prefix
} else {
base_label
};
let cost_suffix = cost_by_story
.get(story_id)
.filter(|&&c| c > 0.0)
@@ -67,6 +67,23 @@ pub struct BotContext {
pub gateway_projects: Vec<String>,
}
impl BotContext {
/// Resolve the effective project root for command dispatch.
///
/// In gateway mode the bot's `project_root` is the gateway config directory.
/// Each project lives in a subdirectory named after the project, so the
/// effective root for commands is `project_root / active_project_name`.
/// In standalone (single-project) mode this returns `project_root` unchanged.
pub async fn effective_project_root(&self) -> PathBuf {
if let Some(ref ap) = self.gateway_active_project {
let name = ap.read().await.clone();
self.project_root.join(&name)
} else {
self.project_root.clone()
}
}
}
// ---------------------------------------------------------------------------
// Tests
// ---------------------------------------------------------------------------
@@ -88,6 +105,126 @@ mod tests {
assert_clone::<BotContext>();
}
#[tokio::test]
async fn effective_project_root_standalone_returns_project_root() {
// In standalone mode (gateway_active_project is None), the effective root
// must equal the project_root exactly.
let (_perm_tx, perm_rx) = mpsc::unbounded_channel();
let ctx = BotContext {
bot_user_id: make_user_id("@bot:example.com"),
target_room_ids: vec![],
project_root: PathBuf::from("/projects/myapp"),
allowed_users: vec![],
history: Arc::new(TokioMutex::new(std::collections::HashMap::new())),
history_size: 20,
bot_sent_event_ids: Arc::new(TokioMutex::new(std::collections::HashSet::new())),
perm_rx: Arc::new(TokioMutex::new(perm_rx)),
pending_perm_replies: Arc::new(TokioMutex::new(std::collections::HashMap::new())),
permission_timeout_secs: 120,
bot_name: "Assistant".to_string(),
ambient_rooms: Arc::new(std::sync::Mutex::new(std::collections::HashSet::new())),
agents: Arc::new(crate::agents::AgentPool::new_test(3000)),
htop_sessions: Arc::new(TokioMutex::new(std::collections::HashMap::new())),
transport: Arc::new(crate::chat::transport::whatsapp::WhatsAppTransport::new(
"test-phone".to_string(),
"test-token".to_string(),
"pipeline_notification".to_string(),
)),
timer_store: Arc::new(crate::chat::timer::TimerStore::load(
std::path::PathBuf::from("/tmp/timers.json"),
)),
gateway_active_project: None,
gateway_projects: vec![],
};
assert_eq!(
ctx.effective_project_root().await,
PathBuf::from("/projects/myapp")
);
}
#[tokio::test]
async fn effective_project_root_gateway_uses_active_project_subdir() {
// In gateway mode, the effective root must be config_dir / active_project_name.
let (_perm_tx, perm_rx) = mpsc::unbounded_channel();
let active = Arc::new(RwLock::new("huskies".to_string()));
let ctx = BotContext {
bot_user_id: make_user_id("@bot:example.com"),
target_room_ids: vec![],
project_root: PathBuf::from("/gateway"),
allowed_users: vec![],
history: Arc::new(TokioMutex::new(std::collections::HashMap::new())),
history_size: 20,
bot_sent_event_ids: Arc::new(TokioMutex::new(std::collections::HashSet::new())),
perm_rx: Arc::new(TokioMutex::new(perm_rx)),
pending_perm_replies: Arc::new(TokioMutex::new(std::collections::HashMap::new())),
permission_timeout_secs: 120,
bot_name: "Assistant".to_string(),
ambient_rooms: Arc::new(std::sync::Mutex::new(std::collections::HashSet::new())),
agents: Arc::new(crate::agents::AgentPool::new_test(3000)),
htop_sessions: Arc::new(TokioMutex::new(std::collections::HashMap::new())),
transport: Arc::new(crate::chat::transport::whatsapp::WhatsAppTransport::new(
"test-phone".to_string(),
"test-token".to_string(),
"pipeline_notification".to_string(),
)),
timer_store: Arc::new(crate::chat::timer::TimerStore::load(
std::path::PathBuf::from("/tmp/timers.json"),
)),
gateway_active_project: Some(Arc::clone(&active)),
gateway_projects: vec!["huskies".into(), "robot-studio".into()],
};
assert_eq!(
ctx.effective_project_root().await,
PathBuf::from("/gateway/huskies")
);
}
#[tokio::test]
async fn effective_project_root_gateway_reflects_project_switch() {
// Switching the active project must change the effective root.
let (_perm_tx, perm_rx) = mpsc::unbounded_channel();
let active = Arc::new(RwLock::new("huskies".to_string()));
let ctx = BotContext {
bot_user_id: make_user_id("@bot:example.com"),
target_room_ids: vec![],
project_root: PathBuf::from("/gateway"),
allowed_users: vec![],
history: Arc::new(TokioMutex::new(std::collections::HashMap::new())),
history_size: 20,
bot_sent_event_ids: Arc::new(TokioMutex::new(std::collections::HashSet::new())),
perm_rx: Arc::new(TokioMutex::new(perm_rx)),
pending_perm_replies: Arc::new(TokioMutex::new(std::collections::HashMap::new())),
permission_timeout_secs: 120,
bot_name: "Assistant".to_string(),
ambient_rooms: Arc::new(std::sync::Mutex::new(std::collections::HashSet::new())),
agents: Arc::new(crate::agents::AgentPool::new_test(3000)),
htop_sessions: Arc::new(TokioMutex::new(std::collections::HashMap::new())),
transport: Arc::new(crate::chat::transport::whatsapp::WhatsAppTransport::new(
"test-phone".to_string(),
"test-token".to_string(),
"pipeline_notification".to_string(),
)),
timer_store: Arc::new(crate::chat::timer::TimerStore::load(
std::path::PathBuf::from("/tmp/timers.json"),
)),
gateway_active_project: Some(Arc::clone(&active)),
gateway_projects: vec!["huskies".into(), "robot-studio".into()],
};
assert_eq!(
ctx.effective_project_root().await,
PathBuf::from("/gateway/huskies")
);
// Simulate switch_project changing the active project.
*active.write().await = "robot-studio".to_string();
assert_eq!(
ctx.effective_project_root().await,
PathBuf::from("/gateway/robot-studio")
);
}
#[test]
fn bot_context_has_no_require_verified_devices_field() {
// Verification is always on — BotContext no longer has a toggle field.
@@ -96,6 +96,49 @@ mod tests {
);
}
#[test]
fn markdown_to_html_heading_renders_as_h_tag() {
let html = markdown_to_html("## Section\nContent here.");
assert!(
html.contains("<h2>Section</h2>"),
"expected <h2> heading tag: {html}"
);
assert!(
html.contains("<p>Content here.</p>"),
"expected paragraph after heading: {html}"
);
}
#[test]
fn markdown_to_html_heading_with_preceding_prose_renders_correctly() {
let html = markdown_to_html("Intro text.\n## Section\nBody.");
assert!(
html.contains("<h2>Section</h2>"),
"expected <h2> heading tag: {html}"
);
assert!(
html.contains("<p>Intro text.</p>"),
"expected intro paragraph: {html}"
);
assert!(
html.contains("<p>Body.</p>"),
"expected body paragraph: {html}"
);
}
#[test]
fn markdown_to_html_multiple_headings_each_render_as_h_tags() {
let html = markdown_to_html("## Section 1\nContent one.\n\n## Section 2\nContent two.");
assert!(
html.contains("<h2>Section 1</h2>"),
"expected first <h2>: {html}"
);
assert!(
html.contains("<h2>Section 2</h2>"),
"expected second <h2>: {html}"
);
}
#[test]
fn startup_announcement_uses_bot_name() {
assert_eq!(format_startup_announcement("Timmy"), "Timmy is online.");
@@ -174,13 +174,18 @@ pub(super) async fn on_room_message(
let user_message = body;
slog!("[matrix-bot] Message from {sender}: {user_message}");
// In gateway mode, resolve commands against the active project's root directory.
// The gateway's own project_root is the gateway config dir; each project lives in
// a subdirectory named after the project. Standalone mode is unaffected.
let effective_root = ctx.effective_project_root().await;
// Check for bot-level commands (help, status, ambient, …) before invoking
// the LLM. All commands are registered in commands.rs — no special-casing
// needed here.
let dispatch = super::super::commands::CommandDispatch {
bot_name: &ctx.bot_name,
bot_user_id: ctx.bot_user_id.as_str(),
project_root: &ctx.project_root,
project_root: &effective_root,
agents: &ctx.agents,
ambient_rooms: &ctx.ambient_rooms,
room_id: &room_id_str,
@@ -219,7 +224,7 @@ pub(super) async fn on_room_message(
&ctx.bot_name,
&story_number,
&model,
&ctx.project_root,
&effective_root,
&ctx.agents,
)
.await
@@ -287,7 +292,7 @@ pub(super) async fn on_room_message(
super::super::delete::handle_delete(
&ctx.bot_name,
&story_number,
&ctx.project_root,
&effective_root,
&ctx.agents,
)
.await
@@ -321,7 +326,7 @@ pub(super) async fn on_room_message(
super::super::rmtree::handle_rmtree(
&ctx.bot_name,
&story_number,
&ctx.project_root,
&effective_root,
&ctx.agents,
)
.await
@@ -361,7 +366,7 @@ pub(super) async fn on_room_message(
&ctx.bot_name,
&story_number,
agent_hint.as_deref(),
&ctx.project_root,
&effective_root,
&ctx.agents,
)
.await
@@ -587,7 +592,12 @@ pub(super) async fn handle_message(
let sent_any_chunk = Arc::new(AtomicBool::new(false));
let sent_any_chunk_for_callback = Arc::clone(&sent_any_chunk);
let project_root_str = ctx.project_root.to_string_lossy().to_string();
// In gateway mode, run Claude Code in the active project's directory.
let project_root_str = ctx
.effective_project_root()
.await
.to_string_lossy()
.to_string();
let chat_fut = provider.chat_stream(
&prompt,
&project_root_str,
+50 -6
View File
@@ -223,12 +223,24 @@ pub fn normalize_line_breaks(text: &str) -> String {
let prev_line = lines[i - 1];
// Insert a blank separator when both the current and previous lines
// are non-empty prose (not inside a code fence, not structured Markdown).
// ATX headings (lines starting with one or more `#` characters) always
// need a blank line before and after them so that Matrix clients render
// the heading with visual separation. Without a blank line, a single
// newline between a heading and adjacent text is swallowed by many
// Matrix clients (including Element X), joining the heading text and
// the following content on the same line without any heading formatting.
let is_cur_heading = line.trim_start().starts_with('#');
let is_prev_heading = prev_line.trim_start().starts_with('#');
// Insert a blank separator when:
// 1. Both lines are non-empty prose (standard prose-to-prose rule).
// 2. The current line is an ATX heading (adds blank line *before* it).
// 3. The previous line was an ATX heading (adds blank line *after* it).
let should_double = !line.is_empty()
&& !prev_line.is_empty()
&& !is_structured_line(line)
&& !is_structured_line(prev_line);
&& ((!is_structured_line(line) && !is_structured_line(prev_line))
|| is_cur_heading
|| is_prev_heading);
if should_double {
result.push("");
@@ -599,10 +611,42 @@ mod tests {
}
#[test]
fn normalize_heading_single_newline_preserved() {
fn normalize_heading_followed_by_prose_gets_blank_line() {
// A blank line must be inserted after a heading so Matrix clients render
// the heading with visual separation from the following paragraph.
let input = "# My Heading\nSome text below.";
let output = normalize_line_breaks(input);
assert_eq!(output, "# My Heading\nSome text below.");
assert_eq!(output, "# My Heading\n\nSome text below.");
}
#[test]
fn normalize_prose_before_heading_gets_blank_line() {
// A blank line must be inserted before a heading when prose precedes it.
let input = "Some intro text.\n## Section";
let output = normalize_line_breaks(input);
assert_eq!(output, "Some intro text.\n\n## Section");
}
#[test]
fn normalize_heading_surrounded_by_prose_gets_blank_lines_both_sides() {
let input = "Intro.\n## Heading\nContent.";
let output = normalize_line_breaks(input);
assert_eq!(output, "Intro.\n\n## Heading\n\nContent.");
}
#[test]
fn normalize_consecutive_headings_separated_by_blank_lines() {
let input = "## Section 1\n## Section 2";
let output = normalize_line_breaks(input);
assert_eq!(output, "## Section 1\n\n## Section 2");
}
#[test]
fn normalize_heading_already_separated_by_blank_line_unchanged() {
// When there is already a blank line, no extra blank is inserted.
let input = "# Heading\n\nContent.";
let output = normalize_line_breaks(input);
assert_eq!(output, "# Heading\n\nContent.");
}
#[test]
+397 -145
View File
@@ -65,6 +65,10 @@ pub struct JoinedAgent {
pub address: String,
/// Unix timestamp when the agent registered.
pub registered_at: f64,
/// Unix timestamp of the last heartbeat from this agent. Defaults to `registered_at`
/// for agents loaded from persisted state that predate the heartbeat feature.
#[serde(default)]
pub last_seen: f64,
/// Project this agent is assigned to, if any.
#[serde(default, skip_serializing_if = "Option::is_none")]
pub assigned_project: Option<String>,
@@ -97,8 +101,8 @@ struct AssignAgentRequest {
/// Shared gateway state threaded through HTTP handlers.
#[derive(Clone)]
pub struct GatewayState {
/// The parsed gateway config with all registered projects.
pub config: GatewayConfig,
/// The live set of registered projects (initially loaded from `projects.toml`).
pub projects: Arc<RwLock<BTreeMap<String, ProjectEntry>>>,
/// The currently active project name.
pub active_project: Arc<RwLock<String>>,
/// HTTP client for proxying requests to project containers.
@@ -126,6 +130,21 @@ fn load_agents(config_dir: &Path) -> Vec<JoinedAgent> {
}
}
/// Persist the current projects map to `<config_dir>/projects.toml`.
/// Silently ignores write errors or skips when `config_dir` is empty.
async fn save_config(projects: &BTreeMap<String, ProjectEntry>, config_dir: &Path) {
if config_dir.as_os_str().is_empty() {
return;
}
let path = config_dir.join("projects.toml");
let config = GatewayConfig {
projects: projects.clone(),
};
if let Ok(data) = toml::to_string_pretty(&config) {
let _ = tokio::fs::write(&path, data).await;
}
}
/// Persist the current agent list to `<config_dir>/gateway_agents.json`.
/// Silently ignores write errors (e.g. read-only filesystem or empty path).
async fn save_agents(agents: &[JoinedAgent], config_dir: &Path) {
@@ -151,7 +170,7 @@ impl GatewayState {
let first = config.projects.keys().next().unwrap().clone();
let agents = load_agents(&config_dir);
Ok(Self {
config,
projects: Arc::new(RwLock::new(config.projects)),
active_project: Arc::new(RwLock::new(first)),
client: Client::new(),
joined_agents: Arc::new(RwLock::new(agents)),
@@ -165,8 +184,9 @@ impl GatewayState {
/// Get the URL of the currently active project.
async fn active_url(&self) -> Result<String, String> {
let name = self.active_project.read().await.clone();
self.config
.projects
self.projects
.read()
.await
.get(&name)
.map(|p| p.url.clone())
.ok_or_else(|| format!("active project '{name}' not found in config"))
@@ -300,7 +320,9 @@ pub async fn gateway_mcp_post_handler(
.unwrap_or("");
if GATEWAY_TOOLS.contains(&tool_name) {
to_json_response(handle_gateway_tool(tool_name, &rpc.params, &state).await)
to_json_response(
handle_gateway_tool(tool_name, &rpc.params, &state, rpc.id.clone()).await,
)
} else {
// Proxy to active project's container.
match proxy_mcp_call(&state, &bytes).await {
@@ -462,18 +484,22 @@ async fn handle_gateway_tool(
tool_name: &str,
params: &Value,
state: &GatewayState,
id: Option<Value>,
) -> JsonRpcResponse {
let id = None; // The caller wraps this in a proper response.
match tool_name {
"switch_project" => handle_switch_project(params, state).await,
"gateway_status" => handle_gateway_status(state).await,
"gateway_health" => handle_gateway_health(state).await,
"switch_project" => handle_switch_project(params, state, id).await,
"gateway_status" => handle_gateway_status(state, id).await,
"gateway_health" => handle_gateway_health(state, id).await,
_ => JsonRpcResponse::error(id, -32601, format!("Unknown gateway tool: {tool_name}")),
}
}
/// Switch the active project.
async fn handle_switch_project(params: &Value, state: &GatewayState) -> JsonRpcResponse {
async fn handle_switch_project(
params: &Value,
state: &GatewayState,
id: Option<Value>,
) -> JsonRpcResponse {
let project = params
.get("arguments")
.and_then(|a| a.get("project"))
@@ -482,41 +508,44 @@ async fn handle_switch_project(params: &Value, state: &GatewayState) -> JsonRpcR
.unwrap_or("");
if project.is_empty() {
return JsonRpcResponse::error(None, -32602, "missing required parameter: project".into());
return JsonRpcResponse::error(id, -32602, "missing required parameter: project".into());
}
if !state.config.projects.contains_key(project) {
let available: Vec<&str> = state.config.projects.keys().map(|s| s.as_str()).collect();
return JsonRpcResponse::error(
None,
-32602,
format!(
"unknown project '{project}'. Available: {}",
available.join(", ")
),
);
}
let url = {
let projects = state.projects.read().await;
if !projects.contains_key(project) {
let available: Vec<&str> = projects.keys().map(|s| s.as_str()).collect();
return JsonRpcResponse::error(
id,
-32602,
format!(
"unknown project '{project}'. Available: {}",
available.join(", ")
),
);
}
projects[project].url.clone()
};
*state.active_project.write().await = project.to_string();
let url = &state.config.projects[project].url;
JsonRpcResponse::success(
None,
id,
json!({
"content": [{
"type": "text",
"text": format!("Switched to project '{project}' ({})", url)
"text": format!("Switched to project '{project}' ({url})")
}]
}),
)
}
/// Show pipeline status for the active project by proxying `get_pipeline_status`.
async fn handle_gateway_status(state: &GatewayState) -> JsonRpcResponse {
async fn handle_gateway_status(state: &GatewayState, id: Option<Value>) -> JsonRpcResponse {
let active = state.active_project.read().await.clone();
let url = match state.active_url().await {
Ok(u) => u,
Err(e) => return JsonRpcResponse::error(None, -32603, e),
Err(e) => return JsonRpcResponse::error(id.clone(), -32603, e),
};
let mcp_url = format!("{}/mcp", url.trim_end_matches('/'));
@@ -537,7 +566,7 @@ async fn handle_gateway_status(state: &GatewayState) -> JsonRpcResponse {
// Extract the result from the upstream response and wrap it.
let pipeline = upstream.get("result").cloned().unwrap_or(json!(null));
JsonRpcResponse::success(
None,
id,
json!({
"content": [{
"type": "text",
@@ -550,20 +579,27 @@ async fn handle_gateway_status(state: &GatewayState) -> JsonRpcResponse {
)
}
Err(e) => {
JsonRpcResponse::error(None, -32603, format!("invalid upstream response: {e}"))
JsonRpcResponse::error(id, -32603, format!("invalid upstream response: {e}"))
}
}
}
Err(e) => JsonRpcResponse::error(None, -32603, format!("failed to reach {mcp_url}: {e}")),
Err(e) => JsonRpcResponse::error(id, -32603, format!("failed to reach {mcp_url}: {e}")),
}
}
/// Aggregate health checks across all registered projects.
async fn handle_gateway_health(state: &GatewayState) -> JsonRpcResponse {
async fn handle_gateway_health(state: &GatewayState, id: Option<Value>) -> JsonRpcResponse {
let mut results = BTreeMap::new();
for (name, entry) in &state.config.projects {
let health_url = format!("{}/health", entry.url.trim_end_matches('/'));
let project_entries: Vec<(String, String)> = state
.projects
.read()
.await
.iter()
.map(|(n, e)| (n.clone(), e.url.clone()))
.collect();
for (name, url) in &project_entries {
let health_url = format!("{}/health", url.trim_end_matches('/'));
let status = match state.client.get(&health_url).send().await {
Ok(resp) => {
if resp.status().is_success() {
@@ -579,7 +615,7 @@ async fn handle_gateway_health(state: &GatewayState) -> JsonRpcResponse {
let active = state.active_project.read().await.clone();
JsonRpcResponse::success(
None,
id,
json!({
"content": [{
"type": "text",
@@ -665,11 +701,13 @@ pub async fn gateway_register_agent_handler(
tokens.remove(&req.token);
drop(tokens);
let now = chrono::Utc::now().timestamp() as f64;
let agent = JoinedAgent {
id: Uuid::new_v4().to_string(),
label: req.label,
address: req.address,
registered_at: chrono::Utc::now().timestamp() as f64,
registered_at: now,
last_seen: now,
assigned_project: None,
};
@@ -749,7 +787,7 @@ pub async fn gateway_assign_agent_handler(
.and_then(|p| if p.is_empty() { None } else { Some(p) });
if let Some(ref p) = project
&& !state.config.projects.contains_key(p.as_str())
&& !state.projects.read().await.contains_key(p.as_str())
{
return Response::builder()
.status(StatusCode::BAD_REQUEST)
@@ -789,6 +827,38 @@ pub async fn gateway_assign_agent_handler(
}
}
/// `POST /gateway/agents/:id/heartbeat` — update an agent's last-seen timestamp.
///
/// Build agents should call this periodically (e.g. every 30 s) so the gateway
/// can distinguish live agents from disconnected ones. Returns 204 No Content on
/// success or 404 if the agent ID is not found.
#[handler]
pub async fn gateway_heartbeat_handler(
PoemPath(id): PoemPath<String>,
state: Data<&Arc<GatewayState>>,
) -> Response {
let found = {
let mut agents = state.joined_agents.write().await;
match agents.iter_mut().find(|a| a.id == id) {
None => false,
Some(a) => {
a.last_seen = chrono::Utc::now().timestamp() as f64;
true
}
}
};
if found {
Response::builder()
.status(StatusCode::NO_CONTENT)
.body(Body::empty())
} else {
Response::builder()
.status(StatusCode::NOT_FOUND)
.body(Body::from("agent not found"))
}
}
// ── Health aggregation endpoint ──────────────────────────────────────
/// HTTP GET `/health` handler for the gateway — aggregates health from all projects.
@@ -797,8 +867,15 @@ pub async fn gateway_health_handler(state: Data<&Arc<GatewayState>>) -> Response
let mut all_healthy = true;
let mut statuses = BTreeMap::new();
for (name, entry) in &state.config.projects {
let health_url = format!("{}/health", entry.url.trim_end_matches('/'));
let project_entries: Vec<(String, String)> = state
.projects
.read()
.await
.iter()
.map(|(n, e)| (n.clone(), e.url.clone()))
.collect();
for (name, url) in &project_entries {
let health_url = format!("{}/health", url.trim_end_matches('/'));
let healthy = match state.client.get(&health_url).send().await {
Ok(resp) => resp.status().is_success(),
Err(_) => false,
@@ -1000,8 +1077,9 @@ pub async fn gateway_index_handler() -> Response {
pub async fn gateway_api_handler(state: Data<&Arc<GatewayState>>) -> Response {
let active = state.active_project.read().await.clone();
let projects: Vec<Value> = state
.config
.projects
.read()
.await
.iter()
.map(|(name, entry)| {
json!({
@@ -1032,7 +1110,7 @@ pub async fn gateway_switch_handler(
body: Json<SwitchRequest>,
) -> Response {
let params = json!({ "arguments": { "project": body.project } });
let resp = handle_switch_project(&params, &state).await;
let resp = handle_switch_project(&params, &state, None).await;
let (ok, error) = if resp.result.is_some() {
(true, None)
@@ -1065,6 +1143,104 @@ pub async fn gateway_switch_handler(
))
}
// ── Project management API ───────────────────────────────────────────
/// Request body for adding a new project.
#[derive(Deserialize)]
struct AddProjectRequest {
name: String,
url: String,
}
/// `POST /api/gateway/projects` — add a new project to the gateway config.
///
/// Expects JSON `{ "name": "...", "url": "..." }`. Returns the created project
/// or 409 Conflict if a project with the same name already exists.
#[handler]
pub async fn gateway_add_project_handler(
state: Data<&Arc<GatewayState>>,
body: Json<AddProjectRequest>,
) -> Response {
let name = body.0.name.trim().to_string();
let url = body.0.url.trim().to_string();
if name.is_empty() {
return Response::builder()
.status(StatusCode::BAD_REQUEST)
.body(Body::from("project name must not be empty"));
}
if url.is_empty() {
return Response::builder()
.status(StatusCode::BAD_REQUEST)
.body(Body::from("project url must not be empty"));
}
{
let mut projects = state.projects.write().await;
if projects.contains_key(&name) {
return Response::builder()
.status(StatusCode::CONFLICT)
.body(Body::from(format!("project '{name}' already exists")));
}
projects.insert(name.clone(), ProjectEntry { url: url.clone() });
}
let snapshot = state.projects.read().await.clone();
save_config(&snapshot, &state.config_dir).await;
crate::slog!("[gateway] Added project '{name}' ({url})");
let body_val = json!({ "name": name, "url": url });
Response::builder()
.status(StatusCode::OK)
.header("Content-Type", "application/json")
.body(Body::from(
serde_json::to_vec(&body_val).unwrap_or_default(),
))
}
/// `DELETE /api/gateway/projects/:name` — remove a project from the gateway config.
///
/// Returns 204 No Content on success. Returns 400 if this is the last project
/// (the gateway requires at least one project to remain configured).
#[handler]
pub async fn gateway_remove_project_handler(
PoemPath(name): PoemPath<String>,
state: Data<&Arc<GatewayState>>,
) -> Response {
let active = state.active_project.read().await.clone();
{
let mut projects = state.projects.write().await;
if !projects.contains_key(&name) {
return Response::builder()
.status(StatusCode::NOT_FOUND)
.body(Body::from(format!("project '{name}' not found")));
}
if projects.len() == 1 {
return Response::builder()
.status(StatusCode::BAD_REQUEST)
.body(Body::from("cannot remove the last project"));
}
projects.remove(&name);
}
let snapshot = state.projects.read().await.clone();
save_config(&snapshot, &state.config_dir).await;
// If the removed project was active, switch to the first remaining.
if active == name {
let first = state.projects.read().await.keys().next().cloned();
if let Some(new_active) = first {
*state.active_project.write().await = new_active;
}
}
crate::slog!("[gateway] Removed project '{name}'");
Response::builder()
.status(StatusCode::NO_CONTENT)
.body(Body::empty())
}
// ── Bot configuration API ────────────────────────────────────────────
/// Request/response body for the bot configuration API.
@@ -1143,6 +1319,66 @@ fn toml_string(s: &str) -> String {
format!("\"{}\"", s.replace('\\', "\\\\").replace('"', "\\\""))
}
/// `GET /api/gateway/pipeline` — fetch pipeline status from all registered projects.
///
/// Returns `{ "active": "<project>", "projects": { "<name>": { "active": [...], "backlog": [...], "backlog_count": N } | { "error": "..." } } }`.
#[handler]
pub async fn gateway_all_pipeline_handler(state: Data<&Arc<GatewayState>>) -> Response {
let project_entries: Vec<(String, String)> = state
.projects
.read()
.await
.iter()
.map(|(n, e)| (n.clone(), e.url.clone()))
.collect();
let mut results: BTreeMap<String, Value> = BTreeMap::new();
for (name, url) in &project_entries {
let mcp_url = format!("{}/mcp", url.trim_end_matches('/'));
let rpc_body = json!({
"jsonrpc": "2.0",
"id": 1,
"method": "tools/call",
"params": {
"name": "get_pipeline_status",
"arguments": {}
}
});
let status = match state.client.post(&mcp_url).json(&rpc_body).send().await {
Ok(resp) => match resp.json::<Value>().await {
Ok(upstream) => {
// The tool result is a JSON string embedded in content[0].text.
if let Some(text) = upstream
.get("result")
.and_then(|r| r.get("content"))
.and_then(|c| c.get(0))
.and_then(|c| c.get("text"))
.and_then(|t| t.as_str())
{
serde_json::from_str(text)
.unwrap_or_else(|_| json!({ "error": "invalid pipeline json" }))
} else {
json!({ "error": "unexpected response shape" })
}
}
Err(e) => json!({ "error": format!("invalid response: {e}") }),
},
Err(e) => json!({ "error": format!("unreachable: {e}") }),
};
results.insert(name.clone(), status);
}
let active = state.active_project.read().await.clone();
let body = json!({ "active": active, "projects": results });
Response::builder()
.status(StatusCode::OK)
.header("Content-Type", "application/json")
.body(Body::from(serde_json::to_vec(&body).unwrap_or_default()))
}
/// `GET /api/gateway/bot-config` — return current bot.toml fields as JSON.
#[handler]
pub async fn gateway_bot_config_get_handler(state: Data<&Arc<GatewayState>>) -> Response {
@@ -1173,7 +1409,7 @@ pub async fn gateway_bot_config_save_handler(
if let Some(h) = handle.take() {
h.abort();
}
let gateway_projects: Vec<String> = state.config.projects.keys().cloned().collect();
let gateway_projects: Vec<String> = state.projects.read().await.keys().cloned().collect();
let new_handle = spawn_gateway_bot(
&state.config_dir,
Arc::clone(&state.active_project),
@@ -1404,53 +1640,27 @@ pub async fn gateway_bot_config_page_handler() -> Response {
// ── Gateway server startup ───────────────────────────────────────────
/// Start the gateway HTTP server. This is the entry point when `--gateway` is used.
pub async fn run(config_path: &Path, port: u16) -> Result<(), std::io::Error> {
// Locate the gateway config directory (parent of `projects.toml`).
let config_dir = config_path
.parent()
.unwrap_or(std::path::Path::new("."))
.to_path_buf();
let config = GatewayConfig::load(config_path).map_err(std::io::Error::other)?;
let state =
GatewayState::new(config, config_dir.clone(), port).map_err(std::io::Error::other)?;
let state_arc = Arc::new(state);
let active = state_arc.active_project.read().await.clone();
crate::slog!("[gateway] Starting gateway on port {port}, active project: {active}");
crate::slog!(
"[gateway] Registered projects: {}",
state_arc
.config
.projects
.keys()
.cloned()
.collect::<Vec<_>>()
.join(", ")
);
// Write `.mcp.json` so that the gateway's Matrix bot's Claude Code CLI
// connects to this gateway's MCP endpoint (which proxies to the active project).
if let Err(e) = write_gateway_mcp_json(&config_dir, port) {
crate::slog!("[gateway] Warning: could not write .mcp.json: {e}");
}
// Spawn the Matrix bot if `.huskies/bot.toml` exists in the config directory.
let gateway_projects: Vec<String> = state_arc.config.projects.keys().cloned().collect();
let bot_abort = spawn_gateway_bot(
&config_dir,
Arc::clone(&state_arc.active_project),
gateway_projects,
port,
);
*state_arc.bot_handle.lock().await = bot_abort;
let route = poem::Route::new()
.at("/", poem::get(gateway_index_handler))
/// Build the complete gateway route tree.
///
/// Extracted from `run` so that tests can construct the full route tree and
/// catch duplicate-route panics before they reach production.
pub fn build_gateway_route(state_arc: Arc<GatewayState>) -> impl poem::Endpoint {
poem::Route::new()
.at("/bot-config", poem::get(gateway_bot_config_page_handler))
.at("/api/gateway", poem::get(gateway_api_handler))
.at("/api/gateway/switch", poem::post(gateway_switch_handler))
.at(
"/api/gateway/pipeline",
poem::get(gateway_all_pipeline_handler),
)
.at(
"/api/gateway/projects",
poem::post(gateway_add_project_handler),
)
.at(
"/api/gateway/projects/:name",
poem::delete(gateway_remove_project_handler),
)
.at(
"/api/gateway/bot-config",
poem::get(gateway_bot_config_get_handler).post(gateway_bot_config_save_handler),
@@ -1479,6 +1689,10 @@ pub async fn run(config_path: &Path, port: u16) -> Result<(), std::io::Error> {
"/gateway/agents/:id/assign",
poem::post(gateway_assign_agent_handler),
)
.at(
"/gateway/agents/:id/heartbeat",
poem::post(gateway_heartbeat_handler),
)
// Serve the embedded React frontend so the gateway has a UI.
.at(
"/assets/*path",
@@ -1486,7 +1700,53 @@ pub async fn run(config_path: &Path, port: u16) -> Result<(), std::io::Error> {
)
.at("/*path", poem::get(crate::http::assets::embedded_file))
.at("/", poem::get(crate::http::assets::embedded_index))
.data(state_arc);
.data(state_arc)
}
/// Start the gateway HTTP server. This is the entry point when `--gateway` is used.
pub async fn run(config_path: &Path, port: u16) -> Result<(), std::io::Error> {
// Locate the gateway config directory (parent of `projects.toml`).
let config_dir = config_path
.parent()
.unwrap_or(std::path::Path::new("."))
.to_path_buf();
let config = GatewayConfig::load(config_path).map_err(std::io::Error::other)?;
let state =
GatewayState::new(config, config_dir.clone(), port).map_err(std::io::Error::other)?;
let state_arc = Arc::new(state);
let active = state_arc.active_project.read().await.clone();
crate::slog!("[gateway] Starting gateway on port {port}, active project: {active}");
crate::slog!(
"[gateway] Registered projects: {}",
state_arc
.projects
.read()
.await
.keys()
.cloned()
.collect::<Vec<_>>()
.join(", ")
);
// Write `.mcp.json` so that the gateway's Matrix bot's Claude Code CLI
// connects to this gateway's MCP endpoint (which proxies to the active project).
if let Err(e) = write_gateway_mcp_json(&config_dir, port) {
crate::slog!("[gateway] Warning: could not write .mcp.json: {e}");
}
// Spawn the Matrix bot if `.huskies/bot.toml` exists in the config directory.
let gateway_projects: Vec<String> = state_arc.projects.read().await.keys().cloned().collect();
let bot_abort = spawn_gateway_bot(
&config_dir,
Arc::clone(&state_arc.active_project),
gateway_projects,
port,
);
*state_arc.bot_handle.lock().await = bot_abort;
let route = build_gateway_route(state_arc);
let host = std::env::var("HUSKIES_HOST").unwrap_or_else(|_| "127.0.0.1".to_string());
let addr = format!("{host}:{port}");
@@ -1653,7 +1913,7 @@ url = "http://localhost:3002"
let state = GatewayState::new(config, PathBuf::from("."), 3000).unwrap();
let params = json!({ "arguments": { "project": "beta" } });
let resp = handle_switch_project(&params, &state).await;
let resp = handle_switch_project(&params, &state, None).await;
assert!(resp.result.is_some());
let active = state.active_project.read().await.clone();
@@ -1673,7 +1933,7 @@ url = "http://localhost:3002"
let state = GatewayState::new(config, PathBuf::from("."), 3000).unwrap();
let params = json!({ "arguments": { "project": "nonexistent" } });
let resp = handle_switch_project(&params, &state).await;
let resp = handle_switch_project(&params, &state, None).await;
assert!(resp.error.is_some());
}
@@ -1923,6 +2183,7 @@ enabled = false
label: "agent-1".into(),
address: "ws://a:3001/crdt-sync".into(),
registered_at: 0.0,
last_seen: 0.0,
assigned_project: None,
});
let app = poem::Route::new()
@@ -1944,6 +2205,7 @@ enabled = false
label: "to-delete".into(),
address: "ws://x:3001/crdt-sync".into(),
registered_at: 0.0,
last_seen: 0.0,
assigned_project: None,
});
let app = poem::Route::new()
@@ -1972,66 +2234,56 @@ enabled = false
assert_eq!(resp.0.status(), StatusCode::NOT_FOUND);
}
// ── Bot configuration helper tests ──────────────────────────────────
#[test]
fn toml_string_plain() {
assert_eq!(toml_string("hello"), "\"hello\"");
#[tokio::test]
async fn heartbeat_updates_last_seen() {
let state = make_test_state();
state.joined_agents.write().await.push(JoinedAgent {
id: "hb-id".into(),
label: "hb-agent".into(),
address: "ws://hb:3001/crdt-sync".into(),
registered_at: 0.0,
last_seen: 0.0,
assigned_project: None,
});
let app = poem::Route::new()
.at(
"/gateway/agents/:id/heartbeat",
poem::post(gateway_heartbeat_handler),
)
.data(state.clone());
let cli = poem::test::TestClient::new(app);
let resp = cli.post("/gateway/agents/hb-id/heartbeat").send().await;
assert_eq!(resp.0.status(), StatusCode::NO_CONTENT);
let agents = state.joined_agents.read().await;
assert!(agents[0].last_seen > 0.0);
}
#[test]
fn toml_string_escapes_quotes_and_backslashes() {
assert_eq!(toml_string(r#"say "hi""#), r#""say \"hi\"""#);
assert_eq!(toml_string(r"a\b"), r#""a\\b""#);
#[tokio::test]
async fn heartbeat_unknown_id_returns_not_found() {
let state = make_test_state();
let app = poem::Route::new()
.at(
"/gateway/agents/:id/heartbeat",
poem::post(gateway_heartbeat_handler),
)
.data(state.clone());
let cli = poem::test::TestClient::new(app);
let resp = cli
.post("/gateway/agents/no-such-id/heartbeat")
.send()
.await;
assert_eq!(resp.0.status(), StatusCode::NOT_FOUND);
}
/// Build the full gateway route tree and verify it does not panic.
///
/// Poem panics at construction time when duplicate routes are registered.
/// This test catches any regression where a duplicate route is re-introduced
/// (e.g. the `/` vs `/*path` duplicate fixed in commit 0969fb5d).
#[test]
fn write_and_read_matrix_bot_config_round_trips() {
let tmp = tempfile::tempdir().unwrap();
let payload = BotConfigPayload {
transport: "matrix".into(),
homeserver: Some("https://matrix.example.com".into()),
username: Some("@bot:example.com".into()),
password: Some("s3cr3t".into()),
slack_bot_token: None,
slack_signing_secret: None,
};
write_bot_config(tmp.path(), &payload).expect("write should succeed");
let read = read_bot_config_raw(tmp.path());
assert_eq!(read.transport, "matrix");
assert_eq!(
read.homeserver.as_deref(),
Some("https://matrix.example.com")
);
assert_eq!(read.username.as_deref(), Some("@bot:example.com"));
assert_eq!(read.password.as_deref(), Some("s3cr3t"));
}
#[test]
fn write_and_read_slack_bot_config_round_trips() {
let tmp = tempfile::tempdir().unwrap();
let payload = BotConfigPayload {
transport: "slack".into(),
homeserver: None,
username: None,
password: None,
slack_bot_token: Some("xoxb-abc123".into()),
slack_signing_secret: Some("sig-secret".into()),
};
write_bot_config(tmp.path(), &payload).expect("write should succeed");
let read = read_bot_config_raw(tmp.path());
assert_eq!(read.transport, "slack");
assert_eq!(read.slack_bot_token.as_deref(), Some("xoxb-abc123"));
assert_eq!(read.slack_signing_secret.as_deref(), Some("sig-secret"));
}
#[test]
fn read_bot_config_raw_returns_default_when_file_absent() {
let tmp = tempfile::tempdir().unwrap();
let read = read_bot_config_raw(tmp.path());
assert_eq!(read.transport, "");
assert!(read.homeserver.is_none());
fn gateway_route_tree_builds_without_panic() {
let state = make_test_state();
// build_gateway_route will panic if any route is registered more than once.
let _route = build_gateway_route(state);
}
}
+55
View File
@@ -0,0 +1,55 @@
//! Bot configuration endpoints — GET/PUT for .huskies/bot.toml credentials.
use crate::http::context::{AppContext, OpenApiResult, bad_request};
use poem_openapi::{Object, OpenApi, Tags, payload::Json};
use serde::{Deserialize, Serialize};
use std::sync::Arc;
#[derive(Tags)]
enum BotConfigTags {
BotConfig,
}
#[derive(Object, Serialize, Deserialize, Default)]
struct BotConfigPayload {
pub transport: Option<String>,
pub enabled: Option<bool>,
pub homeserver: Option<String>,
pub username: Option<String>,
pub password: Option<String>,
pub room_ids: Option<Vec<String>>,
pub slack_bot_token: Option<String>,
pub slack_signing_secret: Option<String>,
pub slack_channel_ids: Option<Vec<String>>,
}
pub struct BotConfigApi {
pub ctx: Arc<AppContext>,
}
#[OpenApi(tag = "BotConfigTags::BotConfig")]
impl BotConfigApi {
/// Read current bot credentials from .huskies/bot.toml.
#[oai(path = "/bot/config", method = "get")]
async fn get_config(&self) -> OpenApiResult<Json<BotConfigPayload>> {
let root = self.ctx.state.get_project_root().map_err(bad_request)?;
let path = root.join(".huskies").join("bot.toml");
let config: BotConfigPayload = std::fs::read_to_string(&path)
.ok()
.and_then(|s| toml::from_str(&s).ok())
.unwrap_or_default();
Ok(Json(config))
}
/// Persist bot credentials to .huskies/bot.toml.
#[oai(path = "/bot/config", method = "put")]
async fn put_config(
&self,
payload: Json<BotConfigPayload>,
) -> OpenApiResult<Json<BotConfigPayload>> {
let root = self.ctx.state.get_project_root().map_err(bad_request)?;
let path = root.join(".huskies").join("bot.toml");
let content = toml::to_string(&payload.0).map_err(|e| bad_request(e.to_string()))?;
std::fs::write(&path, content).map_err(|e| bad_request(e.to_string()))?;
Ok(payload)
}
}
+194
View File
@@ -230,6 +230,92 @@ pub(super) fn tool_get_agent_config(ctx: &AppContext) -> Result<String, String>
.map_err(|e| format!("Serialization error: {e}"))
}
/// Get remaining turns and budget for a running agent.
///
/// Returns turns used, max turns, remaining turns, budget used, max budget,
/// and remaining budget for the named agent. Fails if the agent is not
/// currently running or pending.
pub(super) fn tool_get_agent_remaining_turns_and_budget(
args: &Value,
ctx: &AppContext,
) -> Result<String, String> {
let story_id = args
.get("story_id")
.and_then(|v| v.as_str())
.ok_or("Missing required argument: story_id")?;
let agent_name = args
.get("agent_name")
.and_then(|v| v.as_str())
.ok_or("Missing required argument: agent_name")?;
// Verify the agent exists and is running/pending.
let agents = ctx.agents.list_agents()?;
let agent_info = agents
.iter()
.find(|a| a.story_id == story_id && a.agent_name == agent_name)
.ok_or_else(|| format!("No agent '{agent_name}' found for story '{story_id}'"))?;
if !matches!(
agent_info.status,
crate::agents::AgentStatus::Running | crate::agents::AgentStatus::Pending
) {
return Err(format!(
"Agent '{agent_name}' for story '{story_id}' is not running (status: {})",
agent_info.status
));
}
let project_root = ctx.agents.get_project_root(&ctx.state)?;
let config = ProjectConfig::load(&project_root)?;
// Find the agent config (max_turns, max_budget_usd).
let agent_config = config.agent.iter().find(|a| a.name == agent_name);
let max_turns = agent_config.and_then(|a| a.max_turns);
let max_budget_usd = agent_config.and_then(|a| a.max_budget_usd);
// Count turns by reading log files and counting assistant events.
let log_files =
crate::agent_log::list_story_log_files(&project_root, story_id, Some(agent_name));
let mut turns_used: u64 = 0;
for path in &log_files {
if let Ok(entries) = crate::agent_log::read_log(path) {
for entry in &entries {
if entry.event.get("type").and_then(|v| v.as_str()) == Some("agent_json")
&& let Some(data) = entry.event.get("data")
&& data.get("type").and_then(|v| v.as_str()) == Some("assistant")
{
turns_used += 1;
}
}
}
}
// Compute budget used from completed-session token usage records.
let all_records = crate::agents::token_usage::read_all(&project_root).unwrap_or_default();
let budget_used_usd: f64 = all_records
.iter()
.filter(|r| r.story_id == story_id && r.agent_name == agent_name)
.map(|r| r.usage.total_cost_usd)
.sum();
let remaining_turns = max_turns.map(|max| (max as i64) - (turns_used as i64));
let remaining_budget_usd = max_budget_usd.map(|max| max - budget_used_usd);
serde_json::to_string_pretty(&json!({
"story_id": story_id,
"agent_name": agent_name,
"status": agent_info.status.to_string(),
"turns_used": turns_used,
"max_turns": max_turns,
"remaining_turns": remaining_turns,
"budget_used_usd": budget_used_usd,
"max_budget_usd": max_budget_usd,
"remaining_budget_usd": remaining_budget_usd,
}))
.map_err(|e| format!("Serialization error: {e}"))
}
pub(super) async fn tool_wait_for_agent(args: &Value, ctx: &AppContext) -> Result<String, String> {
let story_id = args
.get("story_id")
@@ -840,4 +926,112 @@ stage = "coder"
let pct = read_coverage_percent_from_json(tmp.path());
assert!(pct.is_none());
}
// ── get_agent_remaining_turns_and_budget tests ──────────────────────────
#[test]
fn tool_get_agent_remaining_turns_and_budget_missing_story_id() {
let tmp = tempfile::tempdir().unwrap();
let ctx = test_ctx(tmp.path());
let result =
tool_get_agent_remaining_turns_and_budget(&json!({"agent_name": "coder-1"}), &ctx);
assert!(result.is_err());
assert!(result.unwrap_err().contains("story_id"));
}
#[test]
fn tool_get_agent_remaining_turns_and_budget_missing_agent_name() {
let tmp = tempfile::tempdir().unwrap();
let ctx = test_ctx(tmp.path());
let result =
tool_get_agent_remaining_turns_and_budget(&json!({"story_id": "1_test"}), &ctx);
assert!(result.is_err());
assert!(result.unwrap_err().contains("agent_name"));
}
#[test]
fn tool_get_agent_remaining_turns_and_budget_no_agent_returns_error() {
let tmp = tempfile::tempdir().unwrap();
let ctx = test_ctx(tmp.path());
let result = tool_get_agent_remaining_turns_and_budget(
&json!({"story_id": "99_nope", "agent_name": "coder-1"}),
&ctx,
);
assert!(result.is_err());
let err = result.unwrap_err();
assert!(
err.contains("No agent"),
"expected 'No agent' error, got: {err}"
);
}
#[test]
fn tool_get_agent_remaining_turns_and_budget_completed_agent_returns_error() {
use crate::agents::AgentStatus;
let tmp = tempfile::tempdir().unwrap();
let ctx = test_ctx(tmp.path());
ctx.agents
.inject_test_agent("42_story", "coder-1", AgentStatus::Completed);
let result = tool_get_agent_remaining_turns_and_budget(
&json!({"story_id": "42_story", "agent_name": "coder-1"}),
&ctx,
);
assert!(result.is_err());
let err = result.unwrap_err();
assert!(
err.contains("not running"),
"expected 'not running' error, got: {err}"
);
}
#[test]
fn tool_get_agent_remaining_turns_and_budget_running_agent_returns_data() {
use crate::agents::AgentStatus;
use crate::store::StoreOps;
let tmp = tempfile::tempdir().unwrap();
let ctx = test_ctx(tmp.path());
ctx.store
.set("project_root", json!(tmp.path().to_string_lossy().as_ref()));
ctx.agents
.inject_test_agent("42_story", "coder-1", AgentStatus::Running);
let result = tool_get_agent_remaining_turns_and_budget(
&json!({"story_id": "42_story", "agent_name": "coder-1"}),
&ctx,
)
.unwrap();
let parsed: Value = serde_json::from_str(&result).unwrap();
assert_eq!(parsed["story_id"], "42_story");
assert_eq!(parsed["agent_name"], "coder-1");
assert_eq!(parsed["status"], "running");
assert!(parsed.get("turns_used").is_some());
assert!(parsed.get("budget_used_usd").is_some());
// max_turns and max_budget_usd may be null if not configured
assert!(parsed.get("max_turns").is_some());
assert!(parsed.get("remaining_turns").is_some());
assert!(parsed.get("max_budget_usd").is_some());
assert!(parsed.get("remaining_budget_usd").is_some());
}
#[test]
fn tool_get_agent_remaining_turns_and_budget_in_tools_list() {
use super::super::handle_tools_list;
let resp = handle_tools_list(Some(json!(1)));
let tools = resp.result.unwrap()["tools"].as_array().unwrap().clone();
let tool = tools
.iter()
.find(|t| t["name"] == "get_agent_remaining_turns_and_budget");
assert!(
tool.is_some(),
"get_agent_remaining_turns_and_budget missing from tools list"
);
let t = tool.unwrap();
let required = t["inputSchema"]["required"].as_array().unwrap();
let req_names: Vec<&str> = required.iter().map(|v| v.as_str().unwrap()).collect();
assert!(req_names.contains(&"story_id"));
assert!(req_names.contains(&"agent_name"));
}
}
+3 -2
View File
@@ -349,13 +349,14 @@ pub(super) fn tool_dump_crdt(args: &Value) -> Result<String, String> {
.map_err(|e| format!("Serialization error: {e}"))
}
/// MCP tool: return the server version and build hash.
pub(super) fn tool_get_version() -> Result<String, String> {
/// MCP tool: return the server version, build hash, and running port.
pub(super) fn tool_get_version(ctx: &AppContext) -> Result<String, String> {
let build_hash =
std::fs::read_to_string(".huskies/build_hash").unwrap_or_else(|_| "unknown".to_string());
serde_json::to_string_pretty(&json!({
"version": env!("CARGO_PKG_VERSION"),
"build_hash": build_hash.trim(),
"port": ctx.agents.port(),
}))
.map_err(|e| format!("Serialization error: {e}"))
}
+25 -3
View File
@@ -431,6 +431,24 @@ fn handle_tools_list(id: Option<Value>) -> JsonRpcResponse {
"required": ["story_id", "agent_name"]
}
},
{
"name": "get_agent_remaining_turns_and_budget",
"description": "Get remaining turns and budget for a running agent. Returns turns used, max turns, remaining turns, budget used (from completed sessions), max budget, and remaining budget. Only works for agents in running or pending state.",
"inputSchema": {
"type": "object",
"properties": {
"story_id": {
"type": "string",
"description": "Story identifier (e.g. '42_story_my_feature')"
},
"agent_name": {
"type": "string",
"description": "Agent name (e.g. 'coder-1', 'mergemaster', 'qa')"
}
},
"required": ["story_id", "agent_name"]
}
},
{
"name": "create_worktree",
"description": "Create a git worktree for a story under .huskies/worktrees/{story_id} with deterministic naming. Writes .mcp.json and runs component setup. Returns the worktree path.",
@@ -879,7 +897,7 @@ fn handle_tools_list(id: Option<Value>) -> JsonRpcResponse {
},
{
"name": "get_version",
"description": "Return the server version and build hash.",
"description": "Return the server version, build hash, and running port.",
"inputSchema": {
"type": "object",
"properties": {}
@@ -1272,6 +1290,9 @@ async fn handle_tools_call(id: Option<Value>, params: &Value, ctx: &AppContext)
"reload_agent_config" => agent_tools::tool_get_agent_config(ctx),
"get_agent_output" => agent_tools::tool_get_agent_output(&args, ctx).await,
"wait_for_agent" => agent_tools::tool_wait_for_agent(&args, ctx).await,
"get_agent_remaining_turns_and_budget" => {
agent_tools::tool_get_agent_remaining_turns_and_budget(&args, ctx)
}
// Worktree tools
"create_worktree" => agent_tools::tool_create_worktree(&args, ctx).await,
"list_worktrees" => agent_tools::tool_list_worktrees(ctx),
@@ -1309,7 +1330,7 @@ async fn handle_tools_call(id: Option<Value>, params: &Value, ctx: &AppContext)
"get_pipeline_status" => story_tools::tool_get_pipeline_status(ctx),
// Diagnostics
"get_server_logs" => diagnostics::tool_get_server_logs(&args),
"get_version" => diagnostics::tool_get_version(),
"get_version" => diagnostics::tool_get_version(ctx),
// Server lifecycle
"rebuild_and_restart" => diagnostics::tool_rebuild_and_restart(ctx).await,
// Permission bridge (Claude Code → frontend dialog)
@@ -1423,6 +1444,7 @@ mod tests {
assert!(names.contains(&"reload_agent_config"));
assert!(names.contains(&"get_agent_output"));
assert!(names.contains(&"wait_for_agent"));
assert!(names.contains(&"get_agent_remaining_turns_and_budget"));
assert!(names.contains(&"create_worktree"));
assert!(names.contains(&"list_worktrees"));
assert!(names.contains(&"remove_worktree"));
@@ -1469,7 +1491,7 @@ mod tests {
assert!(names.contains(&"dump_crdt"));
assert!(names.contains(&"get_version"));
assert!(names.contains(&"remove_criterion"));
assert_eq!(tools.len(), 65);
assert_eq!(tools.len(), 66);
}
#[test]
+194 -10
View File
@@ -43,6 +43,8 @@ pub(crate) fn step_output_path(
.join("STACK.md"),
),
WizardStep::TestScript => Some(project_root.join("script").join("test")),
WizardStep::BuildScript => Some(project_root.join("script").join("build")),
WizardStep::LintScript => Some(project_root.join("script").join("lint")),
WizardStep::ReleaseScript => Some(project_root.join("script").join("release")),
WizardStep::TestCoverage => Some(project_root.join("script").join("test_coverage")),
WizardStep::Scaffold => None,
@@ -52,22 +54,35 @@ pub(crate) fn step_output_path(
pub(crate) fn is_script_step(step: WizardStep) -> bool {
matches!(
step,
WizardStep::TestScript | WizardStep::ReleaseScript | WizardStep::TestCoverage
WizardStep::TestScript
| WizardStep::BuildScript
| WizardStep::LintScript
| WizardStep::ReleaseScript
| WizardStep::TestCoverage
)
}
/// Write `content` to `path` only when the file does not already exist.
/// Write `content` to `path`, skipping if the file already exists with real
/// (non-template) content.
///
/// Existing files (including `CLAUDE.md`) are never overwritten — the wizard
/// appends or skips per the acceptance criteria. For script steps the file is
/// also made executable after writing.
/// Scaffold template files (those containing [`TEMPLATE_SENTINEL`]) are treated
/// as placeholders and will be overwritten with the wizard-generated content.
/// Files with real user content are never overwritten. For script steps the
/// file is also made executable after writing.
pub(crate) fn write_if_missing(
path: &Path,
content: &str,
executable: bool,
) -> Result<bool, String> {
use crate::io::onboarding::TEMPLATE_SENTINEL;
if path.exists() {
return Ok(false); // already present — skip silently
// Overwrite scaffold template placeholders; preserve real user content.
let is_template = std::fs::read_to_string(path)
.map(|s| s.contains(TEMPLATE_SENTINEL))
.unwrap_or(false);
if !is_template {
return Ok(false); // real content already present — skip
}
}
if let Some(parent) = path.parent() {
fs::create_dir_all(parent)
@@ -247,6 +262,90 @@ pub(crate) fn generation_hint(step: WizardStep, project_root: &Path) -> String {
}
}
}
WizardStep::BuildScript => {
if bare {
"This is a bare project with no existing code. Read the STACK.md generated \
in the previous step (or ask the user about their stack if it was skipped) \
and generate a `script/build` shell script (#!/usr/bin/env bash, set -euo pipefail) \
with appropriate build commands for their chosen language and framework."
.to_string()
} else {
let has_cargo = project_root.join("Cargo.toml").exists();
let has_pkg = project_root.join("package.json").exists();
let has_pnpm = project_root.join("pnpm-lock.yaml").exists();
let has_frontend_subdir =
project_root.join("frontend").join("package.json").exists()
|| project_root.join("client").join("package.json").exists();
let has_go = project_root.join("go.mod").exists();
let mut cmds = Vec::new();
if has_cargo {
cmds.push("cargo build --release");
}
if has_pkg {
cmds.push(if has_pnpm {
"pnpm run build"
} else {
"npm run build"
});
}
if has_frontend_subdir {
cmds.push("(cd frontend && npm run build)");
}
if has_go {
cmds.push("go build ./...");
}
if cmds.is_empty() {
"Generate a `script/build` shell script (#!/usr/bin/env bash, set -euo pipefail) that builds the project.".to_string()
} else {
format!(
"Generate a `script/build` shell script (#!/usr/bin/env bash, set -euo pipefail) that runs: {}",
cmds.join(", ")
)
}
}
}
WizardStep::LintScript => {
if bare {
"This is a bare project with no existing code. Read the STACK.md generated \
in the previous step (or ask the user about their stack if it was skipped) \
and generate a `script/lint` shell script (#!/usr/bin/env bash, set -euo pipefail) \
with appropriate lint commands for their chosen language and framework."
.to_string()
} else {
let has_cargo = project_root.join("Cargo.toml").exists();
let has_pkg = project_root.join("package.json").exists();
let has_pnpm = project_root.join("pnpm-lock.yaml").exists();
let has_python = project_root.join("pyproject.toml").exists()
|| project_root.join("requirements.txt").exists();
let has_go = project_root.join("go.mod").exists();
let mut cmds = Vec::new();
if has_cargo {
cmds.push("cargo fmt --all --check");
cmds.push("cargo clippy -- -D warnings");
}
if has_pkg {
cmds.push(if has_pnpm {
"pnpm run lint"
} else {
"npm run lint"
});
}
if has_python {
cmds.push("flake8 . (or ruff check . if ruff is configured)");
}
if has_go {
cmds.push("go vet ./...");
}
if cmds.is_empty() {
"Generate a `script/lint` shell script (#!/usr/bin/env bash, set -euo pipefail) that runs the project's linters.".to_string()
} else {
format!(
"Generate a `script/lint` shell script (#!/usr/bin/env bash, set -euo pipefail) that runs: {}",
cmds.join(", ")
)
}
}
}
WizardStep::ReleaseScript => {
if bare {
"This is a bare project with no existing code. Read the STACK.md generated \
@@ -473,13 +572,13 @@ mod tests {
fn wizard_confirm_does_not_overwrite_existing_file() {
let dir = TempDir::new().unwrap();
let ctx = setup(&dir);
// Pre-create the specs directory and file.
// Pre-create the specs directory and file with real (non-template) content.
let specs_dir = dir.path().join(".huskies").join("specs");
std::fs::create_dir_all(&specs_dir).unwrap();
let context_path = specs_dir.join("00_CONTEXT.md");
std::fs::write(&context_path, "original content").unwrap();
// Stage and confirm — existing file should NOT be overwritten.
// Stage and confirm — existing real file should NOT be overwritten.
tool_wizard_generate(&serde_json::json!({"content": "new content"}), &ctx).unwrap();
let result = tool_wizard_confirm(&ctx).unwrap();
assert!(result.contains("already exists"));
@@ -489,6 +588,34 @@ mod tests {
);
}
#[test]
fn wizard_confirm_overwrites_scaffold_template_file() {
let dir = TempDir::new().unwrap();
let ctx = setup(&dir);
// Pre-create the file with scaffold template placeholder content.
let specs_dir = dir.path().join(".huskies").join("specs");
std::fs::create_dir_all(&specs_dir).unwrap();
let context_path = specs_dir.join("00_CONTEXT.md");
std::fs::write(
&context_path,
"<!-- huskies:scaffold-template -->\n# Project Context\n\nTODO: Describe...",
)
.unwrap();
// Stage and confirm — template placeholder should be overwritten with generated content.
tool_wizard_generate(
&serde_json::json!({"content": "# My Real Project\n\nThis is a real project."}),
&ctx,
)
.unwrap();
let result = tool_wizard_confirm(&ctx).unwrap();
assert!(result.contains("confirmed"));
assert_eq!(
std::fs::read_to_string(&context_path).unwrap(),
"# My Real Project\n\nThis is a real project."
);
}
#[test]
fn wizard_skip_advances_wizard() {
let dir = TempDir::new().unwrap();
@@ -517,8 +644,8 @@ mod tests {
fn wizard_complete_returns_done_message() {
let dir = TempDir::new().unwrap();
let ctx = setup(&dir);
// Skip all remaining steps.
for _ in 0..5 {
// Skip all remaining steps (scaffold is pre-confirmed, so 7 remaining).
for _ in 0..7 {
tool_wizard_skip(&ctx).unwrap();
}
let result = tool_wizard_status(&ctx).unwrap();
@@ -629,4 +756,61 @@ mod tests {
assert!(hint.contains("cargo nextest"));
assert!(!hint.contains("bare project"));
}
#[test]
fn generation_hint_bare_build_script_references_stack() {
let dir = TempDir::new().unwrap();
std::fs::create_dir_all(dir.path().join(".huskies")).unwrap();
let hint = generation_hint(WizardStep::BuildScript, dir.path());
assert!(hint.contains("bare project"));
assert!(hint.contains("STACK.md"));
}
#[test]
fn generation_hint_bare_lint_script_references_stack() {
let dir = TempDir::new().unwrap();
std::fs::create_dir_all(dir.path().join(".huskies")).unwrap();
let hint = generation_hint(WizardStep::LintScript, dir.path());
assert!(hint.contains("bare project"));
assert!(hint.contains("STACK.md"));
}
#[test]
fn generation_hint_existing_project_build_script_detects_cargo() {
let dir = TempDir::new().unwrap();
std::fs::write(dir.path().join("Cargo.toml"), "[package]").unwrap();
let hint = generation_hint(WizardStep::BuildScript, dir.path());
assert!(hint.contains("cargo build --release"));
assert!(!hint.contains("bare project"));
}
#[test]
fn generation_hint_existing_project_lint_script_detects_cargo() {
let dir = TempDir::new().unwrap();
std::fs::write(dir.path().join("Cargo.toml"), "[package]").unwrap();
let hint = generation_hint(WizardStep::LintScript, dir.path());
assert!(hint.contains("cargo fmt --all --check"));
assert!(hint.contains("cargo clippy -- -D warnings"));
assert!(!hint.contains("bare project"));
}
#[test]
fn step_output_path_build_script_returns_script_build() {
let dir = TempDir::new().unwrap();
let path = step_output_path(dir.path(), WizardStep::BuildScript).unwrap();
assert!(path.ends_with("script/build"));
}
#[test]
fn step_output_path_lint_script_returns_script_lint() {
let dir = TempDir::new().unwrap();
let path = step_output_path(dir.path(), WizardStep::LintScript).unwrap();
assert!(path.ends_with("script/lint"));
}
#[test]
fn is_script_step_includes_build_and_lint() {
assert!(is_script_step(WizardStep::BuildScript));
assert!(is_script_step(WizardStep::LintScript));
}
}
+6 -1
View File
@@ -4,6 +4,7 @@ pub mod agents_sse;
pub mod anthropic;
pub mod assets;
pub mod bot_command;
pub mod bot_config;
pub mod chat;
pub mod context;
pub mod health;
@@ -23,6 +24,7 @@ pub mod ws;
use agents::AgentsApi;
use anthropic::AnthropicApi;
use bot_command::BotCommandApi;
use bot_config::BotConfigApi;
use chat::ChatApi;
use context::AppContext;
use health::HealthApi;
@@ -196,6 +198,7 @@ type ApiTuple = (
HealthApi,
BotCommandApi,
wizard::WizardApi,
BotConfigApi,
);
type ApiService = OpenApiService<ApiTuple, ()>;
@@ -213,6 +216,7 @@ pub fn build_openapi_service(ctx: Arc<AppContext>) -> (ApiService, ApiService) {
HealthApi,
BotCommandApi { ctx: ctx.clone() },
wizard::WizardApi { ctx: ctx.clone() },
BotConfigApi { ctx: ctx.clone() },
);
let api_service =
@@ -228,7 +232,8 @@ pub fn build_openapi_service(ctx: Arc<AppContext>) -> (ApiService, ApiService) {
SettingsApi { ctx: ctx.clone() },
HealthApi,
BotCommandApi { ctx: ctx.clone() },
wizard::WizardApi { ctx },
wizard::WizardApi { ctx: ctx.clone() },
BotConfigApi { ctx },
);
let docs_service =
+4 -2
View File
@@ -195,7 +195,7 @@ mod tests {
let body: serde_json::Value = resp.0.into_body().into_json().await.unwrap();
assert_eq!(body["current_step_index"], 1);
assert!(!body["completed"].as_bool().unwrap());
assert_eq!(body["steps"].as_array().unwrap().len(), 6);
assert_eq!(body["steps"].as_array().unwrap().len(), 8);
assert_eq!(body["steps"][0]["status"], "confirmed");
}
@@ -279,11 +279,13 @@ mod tests {
let (dir, client) = setup();
WizardState::init_if_missing(dir.path());
// Steps 2-6 (scaffold is already confirmed)
// Steps 2-8 (scaffold is already confirmed)
let steps = [
"context",
"stack",
"test_script",
"build_script",
"lint_script",
"release_script",
"test_coverage",
];
+21 -11
View File
@@ -37,6 +37,13 @@ pub(crate) async fn ensure_project_root_with_story_kit(
if !path.join(".huskies").is_dir() {
scaffold_story_kit(&path, port)?;
}
// Always update .mcp.json with the current port so the bot connects to
// the right endpoint even when HUSKIES_PORT changes between restarts.
let mcp_content = format!(
"{{\n \"mcpServers\": {{\n \"huskies\": {{\n \"type\": \"http\",\n \"url\": \"http://localhost:{port}/mcp\"\n }}\n }}\n}}\n"
);
fs::write(path.join(".mcp.json"), mcp_content)
.map_err(|e| format!("Failed to write .mcp.json: {}", e))?;
Ok(())
})
.await
@@ -194,16 +201,15 @@ mod tests {
}
#[tokio::test]
async fn open_project_does_not_overwrite_existing_mcp_json() {
// scaffold must NOT overwrite .mcp.json when it already exists — QA
// test servers share the real project root, and re-writing would
// clobber the file with the wrong port.
async fn open_project_updates_mcp_json_with_current_port() {
// .mcp.json must always be updated with the actual running port so the
// bot connects to the right MCP endpoint even when HUSKIES_PORT changes.
let dir = tempdir().unwrap();
let project_dir = dir.path().join("myproject");
fs::create_dir_all(&project_dir).unwrap();
// Pre-write .mcp.json with a different port to simulate an already-configured project.
// Pre-write .mcp.json with a different port to simulate a stale file.
let mcp_path = project_dir.join(".mcp.json");
fs::write(&mcp_path, "{\"existing\": true}").unwrap();
fs::write(&mcp_path, "{\"stale\": true}").unwrap();
let store = make_store(&dir);
let state = SessionState::default();
@@ -211,15 +217,19 @@ mod tests {
project_dir.to_string_lossy().to_string(),
&state,
&store,
3001,
3002,
)
.await
.unwrap();
assert_eq!(
fs::read_to_string(&mcp_path).unwrap(),
"{\"existing\": true}",
"open_project must not overwrite an existing .mcp.json"
let content = fs::read_to_string(&mcp_path).unwrap();
assert!(
content.contains("3002"),
"open_project must update .mcp.json with the actual running port"
);
assert!(
content.contains("localhost"),
"mcp.json must reference localhost"
);
}
+661 -7
View File
@@ -199,33 +199,202 @@ pub fn detect_components_toml(root: &Path) -> String {
sections.join("\n")
}
/// Detect the appropriate Node.js test command for a directory containing `package.json`.
///
/// Reads the `package.json` content to identify known test runners (vitest, jest).
/// Falls back to `npm test` or `pnpm test` based on which lock file is present.
fn detect_node_test_cmd(pkg_dir: &Path) -> String {
let has_pnpm = pkg_dir.join("pnpm-lock.yaml").exists();
let content = std::fs::read_to_string(pkg_dir.join("package.json")).unwrap_or_default();
if content.contains("\"vitest\"") {
let pm = if has_pnpm { "pnpm" } else { "npx" };
return format!("{} vitest run", pm);
}
if content.contains("\"jest\"") {
let pm = if has_pnpm { "pnpm" } else { "npx" };
return format!("{} jest", pm);
}
if has_pnpm {
"pnpm test".to_string()
} else {
"npm test".to_string()
}
}
/// Detect the appropriate Node.js build command for a directory containing `package.json`.
fn detect_node_build_cmd(pkg_dir: &Path) -> String {
if pkg_dir.join("pnpm-lock.yaml").exists() {
"pnpm run build".to_string()
} else {
"npm run build".to_string()
}
}
/// Detect the appropriate Node.js lint command for a directory containing `package.json`.
///
/// Reads the `package.json` content to identify eslint. Falls back to
/// `npm run lint` or `pnpm run lint` based on which lock file is present.
fn detect_node_lint_cmd(pkg_dir: &Path) -> String {
let has_pnpm = pkg_dir.join("pnpm-lock.yaml").exists();
let content = std::fs::read_to_string(pkg_dir.join("package.json")).unwrap_or_default();
if content.contains("\"eslint\"") {
let pm = if has_pnpm { "pnpm" } else { "npx" };
return format!("{pm} eslint .");
}
if has_pnpm {
"pnpm run lint".to_string()
} else {
"npm run lint".to_string()
}
}
/// Generate `script/build` content for a new project at `root`.
///
/// Inspects well-known marker files to identify which tech stacks are present
/// and emits the appropriate build commands. Multi-stack projects get combined
/// commands run sequentially. Falls back to a generic stub when no markers
/// are found so the scaffold is always valid.
///
/// For projects with a frontend in a known subdirectory (`frontend/`, `client/`),
/// the build command is detected from the presence of `pnpm-lock.yaml`.
pub fn detect_script_build(root: &Path) -> String {
let mut commands: Vec<String> = Vec::new();
if root.join("Cargo.toml").exists() {
commands.push("cargo build --release".to_string());
}
if root.join("package.json").exists() {
commands.push(detect_node_build_cmd(root));
}
// Detect frontend in known subdirectories (e.g. frontend/, client/)
for subdir in &["frontend", "client"] {
let sub_path = root.join(subdir);
if sub_path.join("package.json").exists() {
let cmd = detect_node_build_cmd(&sub_path);
commands.push(format!("(cd {} && {})", subdir, cmd));
}
}
if root.join("pyproject.toml").exists() {
commands.push("python -m build".to_string());
}
if root.join("go.mod").exists() {
commands.push("go build ./...".to_string());
}
if commands.is_empty() {
return "#!/usr/bin/env bash\nset -euo pipefail\n\n# Add your project's build commands here.\necho \"No build configured\"\n".to_string();
}
let mut script = "#!/usr/bin/env bash\nset -euo pipefail\n\n".to_string();
for cmd in commands {
script.push_str(&cmd);
script.push('\n');
}
script
}
/// Generate `script/lint` content for a new project at `root`.
///
/// Inspects well-known marker files to identify which linters are present
/// and emits the appropriate lint commands. Multi-stack projects get combined
/// commands run sequentially. Falls back to a generic stub when no markers
/// are found so the scaffold is always valid.
///
/// For projects with a frontend in a known subdirectory (`frontend/`, `client/`),
/// the lint command is detected from the `package.json` (eslint, npm, pnpm).
pub fn detect_script_lint(root: &Path) -> String {
let mut commands: Vec<String> = Vec::new();
if root.join("Cargo.toml").exists() {
commands.push("cargo fmt --all --check".to_string());
commands.push("cargo clippy -- -D warnings".to_string());
}
if root.join("package.json").exists() {
commands.push(detect_node_lint_cmd(root));
}
// Detect frontend in known subdirectories (e.g. frontend/, client/)
for subdir in &["frontend", "client"] {
let sub_path = root.join(subdir);
if sub_path.join("package.json").exists() {
let cmd = detect_node_lint_cmd(&sub_path);
commands.push(format!("(cd {} && {})", subdir, cmd));
}
}
if root.join("pyproject.toml").exists() || root.join("requirements.txt").exists() {
let mut content = std::fs::read_to_string(root.join("pyproject.toml")).unwrap_or_default();
content
.push_str(&std::fs::read_to_string(root.join("requirements.txt")).unwrap_or_default());
if content.contains("ruff") {
commands.push("ruff check .".to_string());
} else {
commands.push("flake8 .".to_string());
}
}
if root.join("go.mod").exists() {
commands.push("go vet ./...".to_string());
}
if commands.is_empty() {
return "#!/usr/bin/env bash\nset -euo pipefail\n\n# Add your project's lint commands here.\necho \"No linters configured\"\n".to_string();
}
let mut script = "#!/usr/bin/env bash\nset -euo pipefail\n\n".to_string();
for cmd in commands {
script.push_str(&cmd);
script.push('\n');
}
script
}
/// Generate `script/test` content for a new project at `root`.
///
/// Inspects well-known marker files to identify which tech stacks are present
/// and emits the appropriate test commands. Multi-stack projects get combined
/// commands run sequentially. Falls back to the generic stub when no markers
/// are found so the scaffold is always valid.
///
/// For projects with a frontend in a known subdirectory (`frontend/`, `client/`),
/// the test runner is detected from the `package.json` (vitest, jest, npm, pnpm).
pub fn detect_script_test(root: &Path) -> String {
let mut commands: Vec<&str> = Vec::new();
let mut commands: Vec<String> = Vec::new();
if root.join("Cargo.toml").exists() {
commands.push("cargo test");
commands.push("cargo test".to_string());
}
if root.join("package.json").exists() {
if root.join("pnpm-lock.yaml").exists() {
commands.push("pnpm test");
commands.push("pnpm test".to_string());
} else {
commands.push("npm test");
commands.push("npm test".to_string());
}
}
// Detect frontend in known subdirectories (e.g. frontend/, client/)
for subdir in &["frontend", "client"] {
let sub_path = root.join(subdir);
if sub_path.join("package.json").exists() {
let cmd = detect_node_test_cmd(&sub_path);
commands.push(format!("(cd {} && {})", subdir, cmd));
}
}
if root.join("pyproject.toml").exists() || root.join("requirements.txt").exists() {
commands.push("pytest");
commands.push("pytest".to_string());
}
if root.join("go.mod").exists() {
commands.push("go test ./...");
commands.push("go test ./...".to_string());
}
if commands.is_empty() {
@@ -234,7 +403,7 @@ pub fn detect_script_test(root: &Path) -> String {
let mut script = "#!/usr/bin/env bash\nset -euo pipefail\n\n".to_string();
for cmd in commands {
script.push_str(cmd);
script.push_str(&cmd);
script.push('\n');
}
script
@@ -298,6 +467,8 @@ fn write_story_kit_gitignore(root: &Path) -> Result<(), String> {
"token_usage.jsonl",
"wizard_state.json",
"store.json",
"pipeline.db",
"*.db",
];
let gitignore_path = root.join(".huskies").join(".gitignore");
@@ -411,6 +582,10 @@ pub(crate) fn scaffold_story_kit(root: &Path, port: u16) -> Result<(), String> {
write_file_if_missing(&tech_root.join("STACK.md"), STORY_KIT_STACK)?;
let script_test_content = detect_script_test(root);
write_script_if_missing(&script_root.join("test"), &script_test_content)?;
let script_build_content = detect_script_build(root);
write_script_if_missing(&script_root.join("build"), &script_build_content)?;
let script_lint_content = detect_script_lint(root);
write_script_if_missing(&script_root.join("lint"), &script_lint_content)?;
write_file_if_missing(&root.join("CLAUDE.md"), STORY_KIT_CLAUDE_MD)?;
// Write per-transport bot.toml example files so users can see all options.
@@ -744,6 +919,9 @@ mod tests {
assert!(!root_content.contains(".huskies/coverage/"));
// store.json must be in .huskies/.gitignore instead
assert!(sk_content.contains("store.json"));
// Database files must be ignored so novice users don't accidentally commit them
assert!(sk_content.contains("pipeline.db"));
assert!(sk_content.contains("*.db"));
}
#[test]
@@ -1165,6 +1343,141 @@ mod tests {
);
}
#[test]
fn detect_script_test_frontend_subdir_with_vitest_uses_npx_vitest() {
let dir = tempdir().unwrap();
let frontend = dir.path().join("frontend");
fs::create_dir_all(&frontend).unwrap();
fs::write(
frontend.join("package.json"),
r#"{"devDependencies":{"vitest":"^1.0.0"},"scripts":{"test":"vitest run"}}"#,
)
.unwrap();
let script = detect_script_test(dir.path());
assert!(
script.contains("vitest run"),
"frontend with vitest should emit vitest run"
);
assert!(
script.contains("cd frontend"),
"should cd into the frontend directory"
);
assert!(
!script.contains("No tests configured"),
"should not use stub when frontend is detected"
);
}
#[test]
fn detect_script_test_frontend_subdir_with_jest_uses_npx_jest() {
let dir = tempdir().unwrap();
let frontend = dir.path().join("frontend");
fs::create_dir_all(&frontend).unwrap();
fs::write(
frontend.join("package.json"),
r#"{"devDependencies":{"jest":"^29.0.0"},"scripts":{"test":"jest"}}"#,
)
.unwrap();
let script = detect_script_test(dir.path());
assert!(
script.contains("jest"),
"frontend with jest should emit jest"
);
assert!(
script.contains("cd frontend"),
"should cd into the frontend directory"
);
}
#[test]
fn detect_script_test_frontend_subdir_no_known_runner_uses_npm_test() {
let dir = tempdir().unwrap();
let frontend = dir.path().join("frontend");
fs::create_dir_all(&frontend).unwrap();
fs::write(
frontend.join("package.json"),
r#"{"scripts":{"test":"mocha"}}"#,
)
.unwrap();
let script = detect_script_test(dir.path());
assert!(
script.contains("npm test"),
"frontend without known runner should fall back to npm test"
);
assert!(script.contains("cd frontend"));
}
#[test]
fn detect_script_test_frontend_subdir_pnpm_uses_pnpm_vitest() {
let dir = tempdir().unwrap();
let frontend = dir.path().join("frontend");
fs::create_dir_all(&frontend).unwrap();
fs::write(
frontend.join("package.json"),
r#"{"devDependencies":{"vitest":"^1.0.0"}}"#,
)
.unwrap();
fs::write(frontend.join("pnpm-lock.yaml"), "").unwrap();
let script = detect_script_test(dir.path());
assert!(
script.contains("pnpm vitest run"),
"pnpm frontend with vitest should use pnpm vitest run"
);
}
#[test]
fn detect_script_test_rust_plus_frontend_subdir_both_included() {
let dir = tempdir().unwrap();
fs::write(
dir.path().join("Cargo.toml"),
"[package]\nname = \"server\"\n",
)
.unwrap();
let frontend = dir.path().join("frontend");
fs::create_dir_all(&frontend).unwrap();
fs::write(
frontend.join("package.json"),
r#"{"devDependencies":{"vitest":"^1.0.0"}}"#,
)
.unwrap();
let script = detect_script_test(dir.path());
assert!(
script.contains("cargo test"),
"Rust + frontend should include cargo test"
);
assert!(
script.contains("vitest run"),
"Rust + frontend should include vitest run"
);
assert!(
script.contains("cd frontend"),
"Rust + frontend should cd into frontend"
);
}
#[test]
fn detect_script_test_client_subdir_detected() {
let dir = tempdir().unwrap();
let client = dir.path().join("client");
fs::create_dir_all(&client).unwrap();
fs::write(
client.join("package.json"),
r#"{"scripts":{"test":"jest"}}"#,
)
.unwrap();
let script = detect_script_test(dir.path());
assert!(
script.contains("cd client"),
"client/ subdir should also be detected"
);
}
#[test]
fn detect_script_test_output_starts_with_shebang() {
let dir = tempdir().unwrap();
@@ -1211,6 +1524,347 @@ mod tests {
);
}
// --- detect_script_build ---
#[test]
fn detect_script_build_no_markers_returns_stub() {
let dir = tempdir().unwrap();
let script = detect_script_build(dir.path());
assert!(
script.contains("No build configured"),
"fallback should contain the generic stub message"
);
assert!(script.starts_with("#!/usr/bin/env bash"));
}
#[test]
fn detect_script_build_cargo_toml_adds_cargo_build_release() {
let dir = tempdir().unwrap();
fs::write(dir.path().join("Cargo.toml"), "[package]\nname = \"x\"\n").unwrap();
let script = detect_script_build(dir.path());
assert!(
script.contains("cargo build --release"),
"Rust project should run cargo build --release"
);
assert!(!script.contains("No build configured"));
}
#[test]
fn detect_script_build_package_json_npm_adds_npm_run_build() {
let dir = tempdir().unwrap();
fs::write(dir.path().join("package.json"), "{}").unwrap();
let script = detect_script_build(dir.path());
assert!(
script.contains("npm run build"),
"Node project without pnpm-lock should run npm run build"
);
}
#[test]
fn detect_script_build_package_json_pnpm_adds_pnpm_run_build() {
let dir = tempdir().unwrap();
fs::write(dir.path().join("package.json"), "{}").unwrap();
fs::write(dir.path().join("pnpm-lock.yaml"), "").unwrap();
let script = detect_script_build(dir.path());
assert!(
script.contains("pnpm run build"),
"Node project with pnpm-lock should run pnpm run build"
);
assert!(
!script.lines().any(|l| l.trim() == "npm run build"),
"should not use npm when pnpm-lock.yaml is present"
);
}
#[test]
fn detect_script_build_go_mod_adds_go_build() {
let dir = tempdir().unwrap();
fs::write(dir.path().join("go.mod"), "module example.com/app\n").unwrap();
let script = detect_script_build(dir.path());
assert!(
script.contains("go build ./..."),
"Go project should run go build ./..."
);
}
#[test]
fn detect_script_build_pyproject_toml_adds_python_build() {
let dir = tempdir().unwrap();
fs::write(
dir.path().join("pyproject.toml"),
"[project]\nname = \"x\"\n",
)
.unwrap();
let script = detect_script_build(dir.path());
assert!(
script.contains("python -m build"),
"Python project should run python -m build"
);
}
#[test]
fn detect_script_build_frontend_subdir_detected() {
let dir = tempdir().unwrap();
let frontend = dir.path().join("frontend");
fs::create_dir_all(&frontend).unwrap();
fs::write(frontend.join("package.json"), "{}").unwrap();
let script = detect_script_build(dir.path());
assert!(
script.contains("cd frontend"),
"frontend subdir should be detected for build"
);
assert!(script.contains("npm run build"));
}
#[test]
fn detect_script_build_rust_plus_frontend_subdir_both_included() {
let dir = tempdir().unwrap();
fs::write(
dir.path().join("Cargo.toml"),
"[package]\nname = \"server\"\n",
)
.unwrap();
let frontend = dir.path().join("frontend");
fs::create_dir_all(&frontend).unwrap();
fs::write(frontend.join("package.json"), "{}").unwrap();
let script = detect_script_build(dir.path());
assert!(script.contains("cargo build --release"));
assert!(script.contains("cd frontend"));
assert!(script.contains("npm run build"));
}
// --- detect_script_lint ---
#[test]
fn detect_script_lint_no_markers_returns_stub() {
let dir = tempdir().unwrap();
let script = detect_script_lint(dir.path());
assert!(
script.contains("No linters configured"),
"fallback should contain the generic stub message"
);
assert!(script.starts_with("#!/usr/bin/env bash"));
}
#[test]
fn detect_script_lint_cargo_toml_adds_fmt_and_clippy() {
let dir = tempdir().unwrap();
fs::write(dir.path().join("Cargo.toml"), "[package]\nname = \"x\"\n").unwrap();
let script = detect_script_lint(dir.path());
assert!(
script.contains("cargo fmt --all --check"),
"Rust project should check formatting"
);
assert!(
script.contains("cargo clippy -- -D warnings"),
"Rust project should run clippy"
);
assert!(!script.contains("No linters configured"));
}
#[test]
fn detect_script_lint_package_json_without_eslint_uses_npm_run_lint() {
let dir = tempdir().unwrap();
fs::write(dir.path().join("package.json"), "{}").unwrap();
let script = detect_script_lint(dir.path());
assert!(
script.contains("npm run lint"),
"Node project without eslint dep should fall back to npm run lint"
);
}
#[test]
fn detect_script_lint_package_json_with_eslint_uses_npx_eslint() {
let dir = tempdir().unwrap();
fs::write(
dir.path().join("package.json"),
r#"{"devDependencies":{"eslint":"^8.0.0"}}"#,
)
.unwrap();
let script = detect_script_lint(dir.path());
assert!(
script.contains("npx eslint ."),
"Node project with eslint should use npx eslint ."
);
}
#[test]
fn detect_script_lint_pnpm_with_eslint_uses_pnpm_eslint() {
let dir = tempdir().unwrap();
fs::write(
dir.path().join("package.json"),
r#"{"devDependencies":{"eslint":"^8.0.0"}}"#,
)
.unwrap();
fs::write(dir.path().join("pnpm-lock.yaml"), "").unwrap();
let script = detect_script_lint(dir.path());
assert!(
script.contains("pnpm eslint ."),
"pnpm project with eslint should use pnpm eslint ."
);
}
#[test]
fn detect_script_lint_python_requirements_uses_flake8() {
let dir = tempdir().unwrap();
fs::write(dir.path().join("requirements.txt"), "flask\n").unwrap();
let script = detect_script_lint(dir.path());
assert!(
script.contains("flake8 ."),
"Python project without ruff should use flake8"
);
}
#[test]
fn detect_script_lint_python_with_ruff_uses_ruff() {
let dir = tempdir().unwrap();
fs::write(
dir.path().join("pyproject.toml"),
"[project]\nname = \"x\"\n\n[tool.ruff]\n",
)
.unwrap();
let script = detect_script_lint(dir.path());
assert!(
script.contains("ruff check ."),
"Python project with ruff configured should use ruff"
);
assert!(
!script.contains("flake8"),
"should not use flake8 when ruff is configured"
);
}
#[test]
fn detect_script_lint_go_mod_adds_go_vet() {
let dir = tempdir().unwrap();
fs::write(dir.path().join("go.mod"), "module example.com/app\n").unwrap();
let script = detect_script_lint(dir.path());
assert!(
script.contains("go vet ./..."),
"Go project should run go vet ./..."
);
}
#[test]
fn detect_script_lint_frontend_subdir_detected() {
let dir = tempdir().unwrap();
let frontend = dir.path().join("frontend");
fs::create_dir_all(&frontend).unwrap();
fs::write(frontend.join("package.json"), "{}").unwrap();
let script = detect_script_lint(dir.path());
assert!(
script.contains("cd frontend"),
"frontend subdir should be detected for lint"
);
}
#[test]
fn detect_script_lint_rust_plus_frontend_subdir_both_included() {
let dir = tempdir().unwrap();
fs::write(
dir.path().join("Cargo.toml"),
"[package]\nname = \"server\"\n",
)
.unwrap();
let frontend = dir.path().join("frontend");
fs::create_dir_all(&frontend).unwrap();
fs::write(frontend.join("package.json"), "{}").unwrap();
let script = detect_script_lint(dir.path());
assert!(script.contains("cargo fmt --all --check"));
assert!(script.contains("cargo clippy -- -D warnings"));
assert!(script.contains("cd frontend"));
}
#[test]
fn scaffold_story_kit_creates_script_build_and_lint() {
let dir = tempdir().unwrap();
scaffold_story_kit(dir.path(), 3001).unwrap();
assert!(
dir.path().join("script/build").exists(),
"script/build should be created by scaffold"
);
assert!(
dir.path().join("script/lint").exists(),
"script/lint should be created by scaffold"
);
}
#[cfg(unix)]
#[test]
fn scaffold_story_kit_creates_executable_script_build_and_lint() {
use std::os::unix::fs::PermissionsExt;
let dir = tempdir().unwrap();
scaffold_story_kit(dir.path(), 3001).unwrap();
for name in &["build", "lint"] {
let path = dir.path().join("script").join(name);
assert!(path.exists(), "script/{name} should be created");
let perms = fs::metadata(&path).unwrap().permissions();
assert!(
perms.mode() & 0o111 != 0,
"script/{name} should be executable"
);
}
}
#[test]
fn scaffold_script_build_contains_detected_commands_for_rust() {
let dir = tempdir().unwrap();
fs::write(
dir.path().join("Cargo.toml"),
"[package]\nname = \"myapp\"\n",
)
.unwrap();
scaffold_story_kit(dir.path(), 3001).unwrap();
let content = fs::read_to_string(dir.path().join("script/build")).unwrap();
assert!(
content.contains("cargo build --release"),
"Rust project scaffold should set cargo build --release in script/build"
);
}
#[test]
fn scaffold_script_lint_contains_detected_commands_for_rust() {
let dir = tempdir().unwrap();
fs::write(
dir.path().join("Cargo.toml"),
"[package]\nname = \"myapp\"\n",
)
.unwrap();
scaffold_story_kit(dir.path(), 3001).unwrap();
let content = fs::read_to_string(dir.path().join("script/lint")).unwrap();
assert!(
content.contains("cargo fmt --all --check"),
"Rust project scaffold should include fmt check in script/lint"
);
assert!(
content.contains("cargo clippy -- -D warnings"),
"Rust project scaffold should include clippy in script/lint"
);
}
// --- generate_project_toml ---
#[test]
+1 -1
View File
@@ -5,7 +5,7 @@ use std::path::Path;
/// Only untouched templates contain this marker — real project content
/// will never include it, so it avoids false positives when the project
/// itself is an "Agentic AI Code Assistant".
const TEMPLATE_SENTINEL: &str = "<!-- huskies:scaffold-template -->";
pub(crate) const TEMPLATE_SENTINEL: &str = "<!-- huskies:scaffold-template -->";
/// Marker found in the default `script/test` scaffold output.
const TEMPLATE_MARKER_SCRIPT: &str = "No tests configured";
+20
View File
@@ -57,6 +57,9 @@ pub struct StoryMetadata {
/// Story numbers this story depends on. Auto-assign will skip this story
/// until all dependencies have reached `5_done` or `6_archived`.
pub depends_on: Option<Vec<u32>>,
/// When `true`, the story is frozen: auto-assign skips it, the pipeline
/// does not advance it, and no mergemaster is spawned.
pub frozen: Option<bool>,
}
#[derive(Debug, Clone, PartialEq, Eq)]
@@ -89,6 +92,8 @@ struct FrontMatter {
blocked: Option<bool>,
/// Story numbers this story depends on.
depends_on: Option<Vec<u32>>,
/// When `true`, the story is frozen.
frozen: Option<bool>,
}
pub fn parse_front_matter(contents: &str) -> Result<StoryMetadata, StoryMetaError> {
@@ -129,6 +134,7 @@ fn build_metadata(front: FrontMatter) -> StoryMetadata {
retry_count: front.retry_count,
blocked: front.blocked,
depends_on: front.depends_on,
frozen: front.frozen,
}
}
@@ -439,6 +445,20 @@ pub fn increment_retry_count_in_content(contents: &str) -> (String, u32) {
(updated, new_count)
}
/// Return `true` if the story has `frozen: true` in the content store.
///
/// Used by the pipeline advance code to suppress stage transitions for frozen stories.
pub fn is_story_frozen_in_store(story_id: &str) -> bool {
let contents = match crate::db::read_content(story_id) {
Some(c) => c,
None => return false,
};
parse_front_matter(&contents)
.ok()
.and_then(|m| m.frozen)
.unwrap_or(false)
}
/// Write `blocked: true` to story content (pure function).
pub fn write_blocked_in_content(contents: &str) -> String {
set_front_matter_field(contents, "blocked", "true")
+11 -3
View File
@@ -16,9 +16,13 @@ pub enum WizardStep {
Stack,
/// Step 4: create script/test
TestScript,
/// Step 5: create script/release
/// Step 5: create script/build
BuildScript,
/// Step 6: create script/lint
LintScript,
/// Step 7: create script/release
ReleaseScript,
/// Step 6: create script/test_coverage
/// Step 8: create script/test_coverage
TestCoverage,
}
@@ -29,6 +33,8 @@ impl WizardStep {
WizardStep::Context,
WizardStep::Stack,
WizardStep::TestScript,
WizardStep::BuildScript,
WizardStep::LintScript,
WizardStep::ReleaseScript,
WizardStep::TestCoverage,
];
@@ -40,6 +46,8 @@ impl WizardStep {
WizardStep::Context => "Generate project context (00_CONTEXT.md)",
WizardStep::Stack => "Generate tech stack spec (STACK.md)",
WizardStep::TestScript => "Create test script (script/test)",
WizardStep::BuildScript => "Create build script (script/build)",
WizardStep::LintScript => "Create lint script (script/lint)",
WizardStep::ReleaseScript => "Create release script (script/release)",
WizardStep::TestCoverage => "Create test coverage script (script/test_coverage)",
}
@@ -262,7 +270,7 @@ mod tests {
#[test]
fn default_state_has_all_steps_pending() {
let state = WizardState::default();
assert_eq!(state.steps.len(), 6);
assert_eq!(state.steps.len(), 8);
for step in &state.steps {
assert_eq!(step.status, StepStatus::Pending);
}