Restore codebase deleted by bad auto-commit e4227cf
Commit e4227cf (a story creation auto-commit) erroneously deleted 175
files from master's tree, likely due to a race condition between
concurrent git operations. This commit re-adds all files from the
working directory.
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
73
.claude/settings.json
Normal file
73
.claude/settings.json
Normal file
@@ -0,0 +1,73 @@
|
|||||||
|
{
|
||||||
|
"enabledMcpjsonServers": ["storkit"],
|
||||||
|
"permissions": {
|
||||||
|
"allow": [
|
||||||
|
"Bash(./server/target/debug/storkit:*)",
|
||||||
|
"Bash(./target/debug/storkit:*)",
|
||||||
|
"Bash(STORKIT_PORT=*)",
|
||||||
|
"Bash(cargo build:*)",
|
||||||
|
"Bash(cargo check:*)",
|
||||||
|
"Bash(cargo clippy:*)",
|
||||||
|
"Bash(cargo doc:*)",
|
||||||
|
"Bash(cargo llvm-cov:*)",
|
||||||
|
"Bash(cargo nextest run:*)",
|
||||||
|
"Bash(cargo run:*)",
|
||||||
|
"Bash(cargo test:*)",
|
||||||
|
"Bash(cargo watch:*)",
|
||||||
|
"Bash(cd *)",
|
||||||
|
"Bash(claude:*)",
|
||||||
|
"Bash(curl:*)",
|
||||||
|
"Bash(echo:*)",
|
||||||
|
"Bash(env:*)",
|
||||||
|
"Bash(git *)",
|
||||||
|
"Bash(grep:*)",
|
||||||
|
"Bash(kill *)",
|
||||||
|
"Bash(ls *)",
|
||||||
|
"Bash(lsof *)",
|
||||||
|
"Bash(mkdir *)",
|
||||||
|
"Bash(mv *)",
|
||||||
|
"Bash(npm run build:*)",
|
||||||
|
"Bash(npx @biomejs/biome check:*)",
|
||||||
|
"Bash(npx @playwright/test test:*)",
|
||||||
|
"Bash(npx biome check:*)",
|
||||||
|
"Bash(npx playwright test:*)",
|
||||||
|
"Bash(npx tsc:*)",
|
||||||
|
"Bash(npx vitest:*)",
|
||||||
|
"Bash(pnpm add:*)",
|
||||||
|
"Bash(pnpm build:*)",
|
||||||
|
"Bash(pnpm dev:*)",
|
||||||
|
"Bash(pnpm install:*)",
|
||||||
|
"Bash(pnpm run build:*)",
|
||||||
|
"Bash(pnpm run test:*)",
|
||||||
|
"Bash(pnpm test:*)",
|
||||||
|
"Bash(printf:*)",
|
||||||
|
"Bash(ps *)",
|
||||||
|
"Bash(python3:*)",
|
||||||
|
"Bash(pwd *)",
|
||||||
|
"Bash(rm *)",
|
||||||
|
"Bash(sleep *)",
|
||||||
|
"Bash(touch *)",
|
||||||
|
"Bash(xargs:*)",
|
||||||
|
"WebFetch(domain:crates.io)",
|
||||||
|
"WebFetch(domain:docs.rs)",
|
||||||
|
"WebFetch(domain:github.com)",
|
||||||
|
"WebFetch(domain:portkey.ai)",
|
||||||
|
"WebFetch(domain:www.shuttle.dev)",
|
||||||
|
"WebSearch",
|
||||||
|
"mcp__storkit__*",
|
||||||
|
"Edit",
|
||||||
|
"Write",
|
||||||
|
"Bash(find *)",
|
||||||
|
"Bash(sqlite3 *)",
|
||||||
|
"Bash(cat <<:*)",
|
||||||
|
"Bash(cat <<'ENDJSON:*)",
|
||||||
|
"Bash(make release:*)",
|
||||||
|
"Bash(npm test:*)",
|
||||||
|
"Bash(head *)",
|
||||||
|
"Bash(tail *)",
|
||||||
|
"Bash(wc *)",
|
||||||
|
"Bash(npx vite:*)",
|
||||||
|
"Bash(npm run dev:*)"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
2
.coverage_baseline
Normal file
2
.coverage_baseline
Normal file
@@ -0,0 +1,2 @@
|
|||||||
|
60.00
|
||||||
|
65.21
|
||||||
11
.dockerignore
Normal file
11
.dockerignore
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
# Docker build context exclusions
|
||||||
|
**/target/
|
||||||
|
**/node_modules/
|
||||||
|
frontend/dist/
|
||||||
|
.storkit/worktrees/
|
||||||
|
.storkit/logs/
|
||||||
|
.storkit/work/6_archived/
|
||||||
|
.git/
|
||||||
|
*.swp
|
||||||
|
*.swo
|
||||||
|
.DS_Store
|
||||||
46
.gitignore
vendored
Normal file
46
.gitignore
vendored
Normal file
@@ -0,0 +1,46 @@
|
|||||||
|
# Claude Code
|
||||||
|
.claude/settings.local.json
|
||||||
|
.mcp.json
|
||||||
|
|
||||||
|
# Local environment (secrets)
|
||||||
|
.env
|
||||||
|
|
||||||
|
# App specific (root-level; storkit subdirectory patterns live in .storkit/.gitignore)
|
||||||
|
store.json
|
||||||
|
.storkit_port
|
||||||
|
|
||||||
|
# Rust stuff
|
||||||
|
target
|
||||||
|
|
||||||
|
# Logs
|
||||||
|
logs
|
||||||
|
*.log
|
||||||
|
npm-debug.log*
|
||||||
|
yarn-debug.log*
|
||||||
|
yarn-error.log*
|
||||||
|
pnpm-debug.log*
|
||||||
|
lerna-debug.log*
|
||||||
|
|
||||||
|
node_modules
|
||||||
|
frontend/node_modules
|
||||||
|
frontend/dist
|
||||||
|
frontend/dist-ssr
|
||||||
|
frontend/test-results
|
||||||
|
frontend/serve
|
||||||
|
frontend/*.local
|
||||||
|
server/target
|
||||||
|
|
||||||
|
# Editor directories and files
|
||||||
|
.vscode/*
|
||||||
|
!.vscode/extensions.json
|
||||||
|
.idea
|
||||||
|
.DS_Store
|
||||||
|
|
||||||
|
# Vite/Vitest cache
|
||||||
|
.vite/
|
||||||
|
*.suo
|
||||||
|
*.ntvs*
|
||||||
|
*.njsproj
|
||||||
|
*.sln
|
||||||
|
*.sw?
|
||||||
|
/test-results/.last-run.json
|
||||||
8
.ignore
Normal file
8
.ignore
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
# Prevents cargo watch from restarting the server on non-Rust file changes
|
||||||
|
frontend/
|
||||||
|
node_modules/
|
||||||
|
.claude/
|
||||||
|
.git/
|
||||||
|
.storkit/
|
||||||
|
store.json
|
||||||
|
.storkit_port
|
||||||
22
.storkit/.gitignore
vendored
Normal file
22
.storkit/.gitignore
vendored
Normal file
@@ -0,0 +1,22 @@
|
|||||||
|
# Bot config (contains credentials)
|
||||||
|
bot.toml
|
||||||
|
|
||||||
|
# Matrix SDK state store
|
||||||
|
matrix_store/
|
||||||
|
matrix_device_id
|
||||||
|
matrix_history.json
|
||||||
|
|
||||||
|
# Agent worktrees and merge workspace (managed by the server, not tracked in git)
|
||||||
|
worktrees/
|
||||||
|
merge_workspace/
|
||||||
|
|
||||||
|
# Intermediate pipeline stages (transient, not committed per spike 92)
|
||||||
|
work/2_current/
|
||||||
|
work/3_qa/
|
||||||
|
work/4_merge/
|
||||||
|
|
||||||
|
# Coverage reports (generated by cargo-llvm-cov, not tracked in git)
|
||||||
|
coverage/
|
||||||
|
|
||||||
|
# Token usage log (generated at runtime, contains cost data)
|
||||||
|
token_usage.jsonl
|
||||||
239
.storkit/README.md
Normal file
239
.storkit/README.md
Normal file
@@ -0,0 +1,239 @@
|
|||||||
|
# Story Kit: The Story-Driven Test Workflow (SDTW)
|
||||||
|
|
||||||
|
**Target Audience:** Large Language Models (LLMs) acting as Senior Engineers.
|
||||||
|
**Goal:** To maintain long-term project coherence, prevent context window exhaustion, and ensure high-quality, testable code generation in large software projects.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 0. First Steps (For New LLM Sessions)
|
||||||
|
|
||||||
|
When you start a new session with this project:
|
||||||
|
|
||||||
|
1. **Check for MCP Tools:** Read `.mcp.json` to discover the MCP server endpoint. Then list available tools by calling:
|
||||||
|
```bash
|
||||||
|
curl -s "$(jq -r '.mcpServers["storkit"].url' .mcp.json)" \
|
||||||
|
-H 'Content-Type: application/json' \
|
||||||
|
-d '{"jsonrpc":"2.0","id":1,"method":"tools/list","params":{}}'
|
||||||
|
```
|
||||||
|
This returns the full tool catalog (create stories, spawn agents, record tests, manage worktrees, etc.). Familiarize yourself with the available tools before proceeding. These tools allow you to directly manipulate the workflow and spawn subsidiary agents without manual file manipulation.
|
||||||
|
2. **Read Context:** Check `.story_kit/specs/00_CONTEXT.md` for high-level project goals.
|
||||||
|
3. **Read Stack:** Check `.story_kit/specs/tech/STACK.md` for technical constraints and patterns.
|
||||||
|
4. **Check Work Items:** Look at `.story_kit/work/1_backlog/` and `.story_kit/work/2_current/` to see what work is pending.
|
||||||
|
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 1. The Philosophy
|
||||||
|
|
||||||
|
We treat the codebase as the implementation of a **"Living Specification."** driven by **User Stories**
|
||||||
|
Instead of ephemeral chat prompts ("Fix this", "Add that"), we work through persistent artifacts.
|
||||||
|
* **Stories** define the *Change*.
|
||||||
|
* **Tests** define the *Truth*.
|
||||||
|
* **Code** defines the *Reality*.
|
||||||
|
|
||||||
|
**The Golden Rule:** You are not allowed to write code until the Acceptance Criteria are captured in the story.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 1.5 MCP Tools
|
||||||
|
|
||||||
|
Agents have programmatic access to the workflow via MCP tools served at `POST /mcp`. The project `.mcp.json` registers this endpoint automatically so Claude Code sessions and spawned agents can call tools like `create_story`, `validate_stories`, `list_upcoming`, `get_story_todos`, `record_tests`, `ensure_acceptance`, `start_agent`, `stop_agent`, `list_agents`, and `get_agent_output` without parsing English instructions.
|
||||||
|
|
||||||
|
**To discover what tools are available:** Check `.mcp.json` for the server endpoint, then use the MCP protocol to list available tools.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 2. Directory Structure
|
||||||
|
|
||||||
|
```text
|
||||||
|
project_root/
|
||||||
|
.mcp.json # MCP server configuration (if MCP tools are available)
|
||||||
|
.story_kit/
|
||||||
|
├── README.md # This document
|
||||||
|
├── project.toml # Agent configuration (roles, models, prompts)
|
||||||
|
├── work/ # Unified work item pipeline (stories, bugs, spikes)
|
||||||
|
│ ├── 1_backlog/ # New work items awaiting implementation
|
||||||
|
│ ├── 2_current/ # Work in progress
|
||||||
|
│ ├── 3_qa/ # QA review
|
||||||
|
│ ├── 4_merge/ # Ready to merge to master
|
||||||
|
│ ├── 5_done/ # Merged and completed (auto-swept to 6_archived after 4 hours)
|
||||||
|
│ └── 6_archived/ # Long-term archive
|
||||||
|
├── worktrees/ # Agent worktrees (managed by the server)
|
||||||
|
├── specs/ # Minimal guardrails (context + stack)
|
||||||
|
│ ├── 00_CONTEXT.md # High-level goals, domain definition, and glossary
|
||||||
|
│ ├── tech/ # Implementation details (Stack, Architecture, Constraints)
|
||||||
|
│ │ └── STACK.md # The "Constitution" (Languages, Libs, Patterns)
|
||||||
|
│ └── functional/ # Domain logic (Platform-agnostic behavior)
|
||||||
|
│ └── ...
|
||||||
|
└── src/ # The Code
|
||||||
|
```
|
||||||
|
|
||||||
|
### Work Items
|
||||||
|
|
||||||
|
All work items (stories, bugs, spikes) live in the same `work/` pipeline. Items are named: `{id}_{type}_{slug}.md`
|
||||||
|
|
||||||
|
* Stories: `57_story_live_test_gate_updates.md`
|
||||||
|
* Bugs: `4_bug_run_button_does_not_start_agent.md`
|
||||||
|
* Spikes: `61_spike_filesystem_watcher_architecture.md`
|
||||||
|
|
||||||
|
Items move through stages by moving the file between directories:
|
||||||
|
|
||||||
|
`1_backlog` → `2_current` → `3_qa` → `4_merge` → `5_done` → `6_archived`
|
||||||
|
|
||||||
|
Items in `5_done` are auto-swept to `6_archived` after 4 hours by the server.
|
||||||
|
|
||||||
|
### Filesystem Watcher
|
||||||
|
|
||||||
|
The server watches `.story_kit/work/` for changes. When a file is created, moved, or modified, the watcher auto-commits with a deterministic message and broadcasts a WebSocket notification to the frontend. This means:
|
||||||
|
|
||||||
|
* MCP tools only need to write/move files — the watcher handles git commits
|
||||||
|
* IDE drag-and-drop works (drag a story from `1_backlog/` to `2_current/`)
|
||||||
|
* The frontend updates automatically without manual refresh
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 3. The Cycle (The "Loop")
|
||||||
|
|
||||||
|
When the user asks for a feature, follow this 4-step loop strictly:
|
||||||
|
|
||||||
|
### Step 1: The Story (Ingest)
|
||||||
|
* **User Input:** "I want the robot to dance."
|
||||||
|
* **Action:** Create a story via MCP tool `create_story` (guarantees correct front matter and auto-assigns the story number).
|
||||||
|
* **Front Matter (Required):** Every work item file MUST begin with YAML front matter containing a `name` field:
|
||||||
|
```yaml
|
||||||
|
---
|
||||||
|
name: Short Human-Readable Story Name
|
||||||
|
---
|
||||||
|
```
|
||||||
|
* **Move to Current:** Once the story is validated and ready for coding, move it to `work/2_current/`.
|
||||||
|
* **Tracking:** Mark Acceptance Criteria as tested directly in the story file as tests are completed.
|
||||||
|
* **Content:**
|
||||||
|
* **User Story:** "As a user, I want..."
|
||||||
|
* **Acceptance Criteria:** Bullet points of observable success.
|
||||||
|
* **Out of scope:** Things that are out of scope so that the LLM doesn't go crazy
|
||||||
|
* **Story Quality (INVEST):** Stories should be Independent, Negotiable, Valuable, Estimable, Small, and Testable.
|
||||||
|
* **Git:** The `start_agent` MCP tool automatically creates a worktree under `.story_kit/worktrees/`, checks out a feature branch, moves the story to `work/2_current/`, and spawns the agent. No manual branch or worktree creation is needed.
|
||||||
|
|
||||||
|
### Step 2: The Implementation (Code)
|
||||||
|
* **Action:** Write the code to satisfy the approved tests and Acceptance Criteria.
|
||||||
|
* **Constraint:** adhere strictly to `specs/tech/STACK.md` (e.g., if it forbids certain patterns, you must not use them).
|
||||||
|
* **Full-Stack Completion:** Every story must be completed across all components of the stack. If a feature touches the backend, frontend, and API layer, all three must be fully implemented and working end-to-end before the story can be accepted. Partial implementations (e.g., backend logic with no frontend wiring, or UI scaffolding with no real data) do not satisfy acceptance criteria.
|
||||||
|
|
||||||
|
### Step 3: Verification (Close)
|
||||||
|
* **Action:** For each Acceptance Criterion in the story, write a failing test (red), mark the criterion as tested, make the test pass (green), and refactor if needed. Keep only one failing test at a time.
|
||||||
|
* **Action:** Run compilation and make sure it succeeds without errors. Consult `specs/tech/STACK.md` and run all required linters listed there (treat warnings as errors). Run tests and make sure they all pass before proceeding. Ask questions here if needed.
|
||||||
|
* **Action:** Do not accept stories yourself. Ask the user if they accept the story. If they agree, move the story file to `work/5_done/`.
|
||||||
|
* **Move to Done:** After acceptance, move the story from `work/2_current/` (or `work/4_merge/`) to `work/5_done/`.
|
||||||
|
* **Action:** When the user accepts:
|
||||||
|
1. Move the story file to `work/5_done/`
|
||||||
|
2. Commit both changes to the feature branch
|
||||||
|
3. Perform the squash merge: `git merge --squash feature/story-name`
|
||||||
|
4. Commit to master with a comprehensive commit message
|
||||||
|
5. Delete the feature branch: `git branch -D feature/story-name`
|
||||||
|
* **Important:** Do NOT mark acceptance criteria as complete before user acceptance. Only mark them complete when the user explicitly accepts the story.
|
||||||
|
|
||||||
|
**CRITICAL - NO SUMMARY DOCUMENTS:**
|
||||||
|
* **NEVER** create a separate summary document (e.g., `STORY_XX_SUMMARY.md`, `IMPLEMENTATION_NOTES.md`, etc.)
|
||||||
|
* **NEVER** write terminal output to a markdown file for "documentation purposes"
|
||||||
|
* Tests are the primary source of truth. Keep test coverage and Acceptance Criteria aligned after each story.
|
||||||
|
* If you find yourself typing `cat << 'EOF' > SUMMARY.md` or similar, **STOP IMMEDIATELY**.
|
||||||
|
* The only files that should exist after story completion:
|
||||||
|
* Updated code in `src/`
|
||||||
|
* Updated guardrails in `specs/` (if needed)
|
||||||
|
* Archived work item in `work/5_done/` (server auto-sweeps to `work/6_archived/` after 4 hours)
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
|
||||||
|
## 3.5. Bug Workflow (Simplified Path)
|
||||||
|
|
||||||
|
Not everything needs to be a full story. Simple bugs can skip the story process:
|
||||||
|
|
||||||
|
### When to Use Bug Workflow
|
||||||
|
* Defects in existing functionality (not new features)
|
||||||
|
* State inconsistencies or data corruption
|
||||||
|
* UI glitches that don't require spec changes
|
||||||
|
* Performance issues with known fixes
|
||||||
|
|
||||||
|
### Bug Process
|
||||||
|
1. **Document Bug:** Create a bug file in `work/1_backlog/` named `{id}_bug_{slug}.md` with:
|
||||||
|
* **Symptom:** What the user observes
|
||||||
|
* **Root Cause:** Technical explanation (if known)
|
||||||
|
* **Reproduction Steps:** How to trigger the bug
|
||||||
|
* **Proposed Fix:** Brief technical approach
|
||||||
|
* **Workaround:** Temporary solution if available
|
||||||
|
2. **Start an Agent:** Use the `start_agent` MCP tool to create a worktree and spawn an agent for the bug fix.
|
||||||
|
3. **Write a Failing Test:** Before fixing the bug, write a test that reproduces it (red). This proves the bug exists and prevents regression.
|
||||||
|
4. **Fix the Bug:** Make minimal code changes to make the test pass (green).
|
||||||
|
5. **User Testing:** Let the user verify the fix in the worktree before merging. Do not proceed until they confirm.
|
||||||
|
6. **Archive & Merge:** Move the bug file to `work/5_done/`, squash merge to master, delete the worktree and branch.
|
||||||
|
7. **No Guardrail Update Needed:** Unless the bug reveals a missing constraint
|
||||||
|
|
||||||
|
### Bug vs Story vs Spike
|
||||||
|
* **Bug:** Existing functionality is broken → Fix it
|
||||||
|
* **Story:** New functionality is needed → Test it, then build it
|
||||||
|
* **Spike:** Uncertainty/feasibility discovery → Run spike workflow
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 3.6. Spike Workflow (Research Path)
|
||||||
|
|
||||||
|
Not everything needs a story or bug fix. Spikes are time-boxed investigations to reduce uncertainty.
|
||||||
|
|
||||||
|
### When to Use a Spike
|
||||||
|
* Unclear root cause or feasibility
|
||||||
|
* Need to compare libraries/encoders/formats
|
||||||
|
* Need to validate performance constraints
|
||||||
|
|
||||||
|
### Spike Process
|
||||||
|
1. **Document Spike:** Create a spike file in `work/1_backlog/` named `{id}_spike_{slug}.md` with:
|
||||||
|
* **Question:** What you need to answer
|
||||||
|
* **Hypothesis:** What you expect to be true
|
||||||
|
* **Timebox:** Strict limit for the research
|
||||||
|
* **Investigation Plan:** Steps/tools to use
|
||||||
|
* **Findings:** Evidence and observations
|
||||||
|
* **Recommendation:** Next step (Story, Bug, or No Action)
|
||||||
|
2. **Execute Research:** Stay within the timebox. No production code changes.
|
||||||
|
3. **Escalate if Needed:** If implementation is required, open a Story or Bug and follow that workflow.
|
||||||
|
4. **Archive:** Move the spike file to `work/5_done/`.
|
||||||
|
|
||||||
|
### Spike Output
|
||||||
|
* Decision and evidence, not production code
|
||||||
|
* Specs updated only if the spike changes system truth
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 4. Context Reset Protocol
|
||||||
|
|
||||||
|
When the LLM context window fills up (or the chat gets slow/confused):
|
||||||
|
1. **Stop Coding.**
|
||||||
|
2. **Instruction:** Tell the user to open a new chat.
|
||||||
|
3. **Handoff:** The only context the new LLM needs is in the `specs/` folder and `.mcp.json`.
|
||||||
|
* *Prompt for New Session:* "I am working on Project X. Read `.mcp.json` to discover available tools, then read `specs/00_CONTEXT.md` and `specs/tech/STACK.md`. Then look at `work/1_backlog/` and `work/2_current/` to see what is pending."
|
||||||
|
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 5. Setup Instructions (For the LLM)
|
||||||
|
|
||||||
|
If a user hands you this document and says "Apply this process to my project":
|
||||||
|
|
||||||
|
1. **Check for MCP Tools:** Look for `.mcp.json` in the project root. If it exists, you have programmatic access to workflow tools and agent spawning capabilities.
|
||||||
|
2. **Analyze the Request:** Ask for the high-level goal ("What are we building?") and the tech preferences ("Rust or Python?").
|
||||||
|
3. **Git Check:** Check if the directory is a git repository (`git status`). If not, run `git init`.
|
||||||
|
4. **Scaffold:** Run commands to create the `work/` and `specs/` folders with the 6-stage pipeline (`work/1_backlog/` through `work/6_archived/`).
|
||||||
|
5. **Draft Context:** Write `specs/00_CONTEXT.md` based on the user's answer.
|
||||||
|
6. **Draft Stack:** Write `specs/tech/STACK.md` based on best practices for that language.
|
||||||
|
7. **Wait:** Ask the user for "Story #1".
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 6. Code Quality
|
||||||
|
|
||||||
|
**MANDATORY:** Before completing Step 3 (Verification) of any story, you MUST run all applicable linters, formatters, and test suites and fix ALL errors and warnings. Zero tolerance for warnings or errors.
|
||||||
|
|
||||||
|
**AUTO-RUN CHECKS:** Always run the required lint/test/build checks as soon as relevant changes are made. Do not ask for permission to run them—run them automatically and fix any failures.
|
||||||
|
|
||||||
|
**ALWAYS FIX DIAGNOSTICS:** At every stage, you must proactively fix all errors and warnings without waiting for user confirmation. Do not pause to ask whether to fix diagnostics—fix them immediately as part of the workflow.
|
||||||
|
|
||||||
|
**Consult `specs/tech/STACK.md`** for the specific tools, commands, linter configurations, and quality gates for this project. The STACK file is the single source of truth for what must pass before a story can be accepted.
|
||||||
61
.storkit/bot.toml.example
Normal file
61
.storkit/bot.toml.example
Normal file
@@ -0,0 +1,61 @@
|
|||||||
|
homeserver = "https://matrix.example.com"
|
||||||
|
username = "@botname:example.com"
|
||||||
|
password = "your-bot-password"
|
||||||
|
|
||||||
|
# List one or more rooms to listen in. Use a single-element list for one room.
|
||||||
|
room_ids = ["!roomid:example.com"]
|
||||||
|
|
||||||
|
# Optional: the deprecated single-room key is still accepted for backwards compat.
|
||||||
|
# room_id = "!roomid:example.com"
|
||||||
|
|
||||||
|
allowed_users = ["@youruser:example.com"]
|
||||||
|
enabled = false
|
||||||
|
|
||||||
|
# Maximum conversation turns to remember per room (default: 20).
|
||||||
|
# history_size = 20
|
||||||
|
|
||||||
|
# Rooms where the bot responds to all messages (not just addressed ones).
|
||||||
|
# This list is updated automatically when users toggle ambient mode at runtime.
|
||||||
|
# ambient_rooms = ["!roomid:example.com"]
|
||||||
|
|
||||||
|
# ── WhatsApp Business API ──────────────────────────────────────────────
|
||||||
|
# Set transport = "whatsapp" to use WhatsApp instead of Matrix.
|
||||||
|
# The webhook endpoint will be available at /webhook/whatsapp.
|
||||||
|
# You must configure this URL in the Meta Developer Dashboard.
|
||||||
|
#
|
||||||
|
# transport = "whatsapp"
|
||||||
|
# whatsapp_phone_number_id = "123456789012345"
|
||||||
|
# whatsapp_access_token = "EAAx..."
|
||||||
|
# whatsapp_verify_token = "my-secret-verify-token"
|
||||||
|
#
|
||||||
|
# ── 24-hour messaging window & notification templates ─────────────────
|
||||||
|
# WhatsApp only allows free-form text messages within 24 hours of the last
|
||||||
|
# inbound message from a user. For proactive pipeline notifications sent
|
||||||
|
# after the window expires, an approved Meta message template is used.
|
||||||
|
#
|
||||||
|
# Register the template in the Meta Business Manager:
|
||||||
|
# 1. Go to Business Settings → WhatsApp → Message Templates → Create.
|
||||||
|
# 2. Category: UTILITY
|
||||||
|
# 3. Template name: pipeline_notification (or your chosen name below)
|
||||||
|
# 4. Language: English (en_US)
|
||||||
|
# 5. Body text (example):
|
||||||
|
# Story *{{1}}* has moved to *{{2}}*.
|
||||||
|
# Where {{1}} = story name, {{2}} = pipeline stage.
|
||||||
|
# 6. Submit for review. Meta typically approves utility templates within
|
||||||
|
# minutes; transactional categories may take longer.
|
||||||
|
#
|
||||||
|
# Once approved, set the name below (default: "pipeline_notification"):
|
||||||
|
# whatsapp_notification_template = "pipeline_notification"
|
||||||
|
|
||||||
|
# ── Slack Bot API ─────────────────────────────────────────────────────
|
||||||
|
# Set transport = "slack" to use Slack instead of Matrix.
|
||||||
|
# The webhook endpoint will be available at /webhook/slack.
|
||||||
|
# Configure this URL in the Slack App → Event Subscriptions → Request URL.
|
||||||
|
#
|
||||||
|
# Required Slack App scopes: chat:write, chat:update
|
||||||
|
# Subscribe to bot events: message.channels, message.groups, message.im
|
||||||
|
#
|
||||||
|
# transport = "slack"
|
||||||
|
# slack_bot_token = "xoxb-..."
|
||||||
|
# slack_signing_secret = "your-signing-secret"
|
||||||
|
# slack_channel_ids = ["C01ABCDEF"]
|
||||||
28
.storkit/problems.md
Normal file
28
.storkit/problems.md
Normal file
@@ -0,0 +1,28 @@
|
|||||||
|
# Problems
|
||||||
|
|
||||||
|
Recurring issues observed during pipeline operation. Review periodically and create stories for systemic problems.
|
||||||
|
|
||||||
|
## 2026-03-18: Stories graduating to "done" with empty merges (7 of 10)
|
||||||
|
|
||||||
|
Pipeline allows stories to move through coding → QA → merge → done without any actual code changes landing on master. The squash-merge produces an empty diff but the pipeline still marks the story as done. Affected stories: 247, 273, 274, 278, 279, 280, 92. Only 266, 271, 277, and 281 actually shipped code. Root cause: no check that the merge commit contains a non-empty diff. Filed bug 283 for the manual_qa gate issue specifically, but the empty-merge-to-done problem is broader and needs its own fix.
|
||||||
|
|
||||||
|
## 2026-03-18: Agent committed directly to master instead of worktree
|
||||||
|
|
||||||
|
Multiple agents have committed directly to master instead of their worktree/feature branch:
|
||||||
|
|
||||||
|
- Commit `5f4591f` ("fix: update should_commit_stage test to match 5_done") — likely mergemaster
|
||||||
|
- Commit `a32cfbd` ("Add bot-level command registry with help command") — story 285 coder committed code + Cargo.lock directly to master
|
||||||
|
|
||||||
|
Agents should only commit to their feature branch or merge-queue branch, never to master directly. Suspect agents are running `git commit` in the project root instead of the worktree directory. This can also revert uncommitted fixes on master (e.g. project.toml pkill fix was overwritten). Frequency: at least 2 confirmed cases. This is a recurring and serious problem — needs a guard in the server or agent prompts.
|
||||||
|
|
||||||
|
## 2026-03-19: Auto-assign re-assigns mergemaster to failed merge stories in a loop
|
||||||
|
|
||||||
|
After bug 295 fix (`auto_assign_available_work` after every pipeline advance), mergemaster gets re-assigned to stories that already have a merge failure flag. Story 310 had an empty diff merge failure — mergemaster correctly reported the failure, but auto-assign immediately re-assigned mergemaster to the same story, creating an infinite retry loop. The auto-assign logic needs to check for the `merge_failure` front matter flag before re-assigning agents to stories in `4_merge/`.
|
||||||
|
|
||||||
|
## 2026-03-19: Coder produces no code (complete ghost — story 310)
|
||||||
|
|
||||||
|
Story 310 (Bot delete command) went through the full pipeline — coder session ran, passed QA/gates, moved to merge — but the coder produced zero code. No commits on the feature branch, no commits on master. The entire agent session was a no-op. This is different from the "committed to master instead of worktree" problem — in this case, the coder simply did nothing. Need to investigate the coder logs to understand what happened. The empty-diff merge check would catch this at merge time, but ideally the server should detect "coder finished with no commits on feature branch" at the gate-check stage and fail early.
|
||||||
|
|
||||||
|
## 2026-03-19: Auto-assign assigns mergemaster to coding-stage stories
|
||||||
|
|
||||||
|
Auto-assign picked mergemaster for story 310 which was in `2_current/`. Mergemaster should only work on stories in `4_merge/`. The `auto_assign_available_work` function doesn't enforce that the agent's configured stage matches the pipeline stage of the story it's being assigned to. Story 279 (auto-assign respects agent stage from front matter) was supposed to fix this, but the check may only apply to front-matter preferences, not the fallback assignment path.
|
||||||
272
.storkit/project.toml
Normal file
272
.storkit/project.toml
Normal file
@@ -0,0 +1,272 @@
|
|||||||
|
# Project-wide default QA mode: "server", "agent", or "human".
|
||||||
|
# Per-story `qa` front matter overrides this setting.
|
||||||
|
default_qa = "server"
|
||||||
|
|
||||||
|
# Default model for coder agents. Only agents with this model are auto-assigned.
|
||||||
|
# Opus coders are reserved for explicit per-story `agent:` front matter requests.
|
||||||
|
default_coder_model = "sonnet"
|
||||||
|
|
||||||
|
# Maximum concurrent coder agents. Stories wait in 2_current/ when all slots are full.
|
||||||
|
max_coders = 3
|
||||||
|
|
||||||
|
# Maximum retries per story per pipeline stage before marking as blocked.
|
||||||
|
# Set to 0 to disable retry limits.
|
||||||
|
max_retries = 2
|
||||||
|
|
||||||
|
[[component]]
|
||||||
|
name = "frontend"
|
||||||
|
path = "frontend"
|
||||||
|
setup = ["npm install", "npm run build"]
|
||||||
|
teardown = []
|
||||||
|
|
||||||
|
[[component]]
|
||||||
|
name = "server"
|
||||||
|
path = "."
|
||||||
|
setup = ["mkdir -p frontend/dist", "cargo check"]
|
||||||
|
teardown = []
|
||||||
|
|
||||||
|
[[agent]]
|
||||||
|
name = "coder-1"
|
||||||
|
stage = "coder"
|
||||||
|
role = "Full-stack engineer. Implements features across all components."
|
||||||
|
model = "sonnet"
|
||||||
|
max_turns = 50
|
||||||
|
max_budget_usd = 5.00
|
||||||
|
prompt = "You are working in a git worktree on story {{story_id}}. Read CLAUDE.md first, then .story_kit/README.md to understand the dev process. The story details are in your prompt above. Follow the SDTW process through implementation and verification (Steps 1-3). The worktree and feature branch already exist - do not create them. Check .mcp.json for MCP tools. Do NOT accept the story or merge - commit your work and stop. If the user asks to review your changes, tell them to run: cd \"{{worktree_path}}\" && git difftool {{base_branch}}...HEAD\n\nIMPORTANT: Commit all your work before your process exits. The server will automatically run acceptance gates (cargo clippy + tests) when your process exits and advance the pipeline based on the results.\n\n## Bug Workflow: Root Cause First\nWhen working on bugs:\n1. Investigate the root cause before writing any fix. Use `git bisect` to find the breaking commit or `git log` to trace history. Read the relevant code before touching anything.\n2. Fix the root cause with a surgical, minimal change. Do NOT add new abstractions, wrappers, or workarounds when a targeted fix to the original code is possible.\n3. Write commit messages that explain what broke and why, not just what was changed.\n4. If you cannot determine the root cause after thorough investigation, document what you tried and why it was inconclusive — do not guess and ship a speculative fix."
|
||||||
|
system_prompt = "You are a full-stack engineer working autonomously in a git worktree. Follow the Story-Driven Test Workflow strictly. Run cargo clippy --all-targets --all-features and biome checks before considering work complete. Commit all your work before finishing - use a descriptive commit message. Do not accept stories, move them to archived, or merge to master - a human will do that. Do not coordinate with other agents - focus on your assigned story. The server automatically runs acceptance gates when your process exits. For bugs, always find and fix the root cause. Use git bisect to find breaking commits. Do not layer new code on top of existing code when a surgical fix is possible. If root cause is unclear after investigation, document what you tried rather than guessing."
|
||||||
|
|
||||||
|
[[agent]]
|
||||||
|
name = "coder-2"
|
||||||
|
stage = "coder"
|
||||||
|
role = "Full-stack engineer. Implements features across all components."
|
||||||
|
model = "sonnet"
|
||||||
|
max_turns = 50
|
||||||
|
max_budget_usd = 5.00
|
||||||
|
prompt = "You are working in a git worktree on story {{story_id}}. Read CLAUDE.md first, then .story_kit/README.md to understand the dev process. The story details are in your prompt above. Follow the SDTW process through implementation and verification (Steps 1-3). The worktree and feature branch already exist - do not create them. Check .mcp.json for MCP tools. Do NOT accept the story or merge - commit your work and stop. If the user asks to review your changes, tell them to run: cd \"{{worktree_path}}\" && git difftool {{base_branch}}...HEAD\n\nIMPORTANT: Commit all your work before your process exits. The server will automatically run acceptance gates (cargo clippy + tests) when your process exits and advance the pipeline based on the results.\n\n## Bug Workflow: Root Cause First\nWhen working on bugs:\n1. Investigate the root cause before writing any fix. Use `git bisect` to find the breaking commit or `git log` to trace history. Read the relevant code before touching anything.\n2. Fix the root cause with a surgical, minimal change. Do NOT add new abstractions, wrappers, or workarounds when a targeted fix to the original code is possible.\n3. Write commit messages that explain what broke and why, not just what was changed.\n4. If you cannot determine the root cause after thorough investigation, document what you tried and why it was inconclusive — do not guess and ship a speculative fix."
|
||||||
|
system_prompt = "You are a full-stack engineer working autonomously in a git worktree. Follow the Story-Driven Test Workflow strictly. Run cargo clippy --all-targets --all-features and biome checks before considering work complete. Commit all your work before finishing - use a descriptive commit message. Do not accept stories, move them to archived, or merge to master - a human will do that. Do not coordinate with other agents - focus on your assigned story. The server automatically runs acceptance gates when your process exits. For bugs, always find and fix the root cause. Use git bisect to find breaking commits. Do not layer new code on top of existing code when a surgical fix is possible. If root cause is unclear after investigation, document what you tried rather than guessing."
|
||||||
|
|
||||||
|
[[agent]]
|
||||||
|
name = "coder-3"
|
||||||
|
stage = "coder"
|
||||||
|
role = "Full-stack engineer. Implements features across all components."
|
||||||
|
model = "sonnet"
|
||||||
|
max_turns = 50
|
||||||
|
max_budget_usd = 5.00
|
||||||
|
prompt = "You are working in a git worktree on story {{story_id}}. Read CLAUDE.md first, then .story_kit/README.md to understand the dev process. The story details are in your prompt above. Follow the SDTW process through implementation and verification (Steps 1-3). The worktree and feature branch already exist - do not create them. Check .mcp.json for MCP tools. Do NOT accept the story or merge - commit your work and stop. If the user asks to review your changes, tell them to run: cd \"{{worktree_path}}\" && git difftool {{base_branch}}...HEAD\n\nIMPORTANT: Commit all your work before your process exits. The server will automatically run acceptance gates (cargo clippy + tests) when your process exits and advance the pipeline based on the results.\n\n## Bug Workflow: Root Cause First\nWhen working on bugs:\n1. Investigate the root cause before writing any fix. Use `git bisect` to find the breaking commit or `git log` to trace history. Read the relevant code before touching anything.\n2. Fix the root cause with a surgical, minimal change. Do NOT add new abstractions, wrappers, or workarounds when a targeted fix to the original code is possible.\n3. Write commit messages that explain what broke and why, not just what was changed.\n4. If you cannot determine the root cause after thorough investigation, document what you tried and why it was inconclusive — do not guess and ship a speculative fix."
|
||||||
|
system_prompt = "You are a full-stack engineer working autonomously in a git worktree. Follow the Story-Driven Test Workflow strictly. Run cargo clippy --all-targets --all-features and biome checks before considering work complete. Commit all your work before finishing - use a descriptive commit message. Do not accept stories, move them to archived, or merge to master - a human will do that. Do not coordinate with other agents - focus on your assigned story. The server automatically runs acceptance gates when your process exits. For bugs, always find and fix the root cause. Use git bisect to find breaking commits. Do not layer new code on top of existing code when a surgical fix is possible. If root cause is unclear after investigation, document what you tried rather than guessing."
|
||||||
|
|
||||||
|
[[agent]]
|
||||||
|
name = "qa-2"
|
||||||
|
stage = "qa"
|
||||||
|
role = "Reviews coder work in worktrees: runs quality gates, generates testing plans, and reports findings."
|
||||||
|
model = "sonnet"
|
||||||
|
max_turns = 40
|
||||||
|
max_budget_usd = 4.00
|
||||||
|
prompt = """You are the QA agent for story {{story_id}}. Your job is to review the coder's work in the worktree and produce a structured QA report.
|
||||||
|
|
||||||
|
Read CLAUDE.md first, then .story_kit/README.md to understand the dev process.
|
||||||
|
|
||||||
|
## Your Workflow
|
||||||
|
|
||||||
|
### 1. Code Quality Scan
|
||||||
|
- Run `git diff master...HEAD --stat` to see what files changed
|
||||||
|
- Run `git diff master...HEAD` to review the actual changes for obvious coding mistakes (unused imports, dead code, unhandled errors, hardcoded values)
|
||||||
|
- Run `cargo clippy --all-targets --all-features` and note any warnings
|
||||||
|
- If a `frontend/` directory exists:
|
||||||
|
- Run `npm run build` and note any TypeScript errors
|
||||||
|
- Run `npx @biomejs/biome check src/` and note any linting issues
|
||||||
|
|
||||||
|
### 2. Test Verification
|
||||||
|
- Run `cargo test` and verify all tests pass
|
||||||
|
- If `frontend/` exists: run `npm test` and verify all frontend tests pass
|
||||||
|
- Review test quality: look for tests that are trivial or don't assert meaningful behavior
|
||||||
|
|
||||||
|
### 3. Manual Testing Support
|
||||||
|
- Build the server: run `cargo build` and note success/failure
|
||||||
|
- If build succeeds: find a free port (try 3010-3020) and attempt to start the server
|
||||||
|
- Generate a testing plan including:
|
||||||
|
- URL to visit in the browser
|
||||||
|
- Things to check in the UI
|
||||||
|
- curl commands to exercise relevant API endpoints
|
||||||
|
- Kill the test server when done: `pkill -f 'target.*storkit' || true` (NEVER use `pkill -f storkit` — it kills the vite dev server)
|
||||||
|
|
||||||
|
### 4. Produce Structured Report
|
||||||
|
Print your QA report to stdout before your process exits. The server will automatically run acceptance gates. Use this format:
|
||||||
|
|
||||||
|
```
|
||||||
|
## QA Report for {{story_id}}
|
||||||
|
|
||||||
|
### Code Quality
|
||||||
|
- clippy: PASS/FAIL (details)
|
||||||
|
- TypeScript build: PASS/FAIL/SKIP (details)
|
||||||
|
- Biome lint: PASS/FAIL/SKIP (details)
|
||||||
|
- Code review findings: (list any issues found, or "None")
|
||||||
|
|
||||||
|
### Test Verification
|
||||||
|
- cargo test: PASS/FAIL (N tests)
|
||||||
|
- npm test: PASS/FAIL/SKIP (N tests)
|
||||||
|
- Test quality issues: (list any trivial/weak tests, or "None")
|
||||||
|
|
||||||
|
### Manual Testing Plan
|
||||||
|
- Server URL: http://localhost:PORT (or "Build failed")
|
||||||
|
- Pages to visit: (list)
|
||||||
|
- Things to check: (list)
|
||||||
|
- curl commands: (list)
|
||||||
|
|
||||||
|
### Overall: PASS/FAIL
|
||||||
|
```
|
||||||
|
|
||||||
|
## Rules
|
||||||
|
- Do NOT modify any code — read-only review only
|
||||||
|
- If the server fails to start, still provide the testing plan with curl commands
|
||||||
|
- The server automatically runs acceptance gates when your process exits"""
|
||||||
|
system_prompt = "You are a QA agent. Your job is read-only: review code quality, run tests, try to start the server, and produce a structured QA report. Do not modify code. The server automatically runs acceptance gates when your process exits."
|
||||||
|
|
||||||
|
[[agent]]
|
||||||
|
name = "coder-opus"
|
||||||
|
stage = "coder"
|
||||||
|
role = "Senior full-stack engineer for complex tasks. Implements features across all components."
|
||||||
|
model = "opus"
|
||||||
|
max_turns = 80
|
||||||
|
max_budget_usd = 20.00
|
||||||
|
prompt = "You are working in a git worktree on story {{story_id}}. Read CLAUDE.md first, then .story_kit/README.md to understand the dev process. The story details are in your prompt above. Follow the SDTW process through implementation and verification (Steps 1-3). The worktree and feature branch already exist - do not create them. Check .mcp.json for MCP tools. Do NOT accept the story or merge - commit your work and stop. If the user asks to review your changes, tell them to run: cd \"{{worktree_path}}\" && git difftool {{base_branch}}...HEAD\n\nIMPORTANT: Commit all your work before your process exits. The server will automatically run acceptance gates (cargo clippy + tests) when your process exits and advance the pipeline based on the results.\n\n## Bug Workflow: Root Cause First\nWhen working on bugs:\n1. Investigate the root cause before writing any fix. Use `git bisect` to find the breaking commit or `git log` to trace history. Read the relevant code before touching anything.\n2. Fix the root cause with a surgical, minimal change. Do NOT add new abstractions, wrappers, or workarounds when a targeted fix to the original code is possible.\n3. Write commit messages that explain what broke and why, not just what was changed.\n4. If you cannot determine the root cause after thorough investigation, document what you tried and why it was inconclusive — do not guess and ship a speculative fix."
|
||||||
|
system_prompt = "You are a senior full-stack engineer working autonomously in a git worktree. You handle complex tasks requiring deep architectural understanding. Follow the Story-Driven Test Workflow strictly. Run cargo clippy --all-targets --all-features and biome checks before considering work complete. Commit all your work before finishing - use a descriptive commit message. Do not accept stories, move them to archived, or merge to master - a human will do that. Do not coordinate with other agents - focus on your assigned story. The server automatically runs acceptance gates when your process exits. For bugs, always find and fix the root cause. Use git bisect to find breaking commits. Do not layer new code on top of existing code when a surgical fix is possible. If root cause is unclear after investigation, document what you tried rather than guessing."
|
||||||
|
|
||||||
|
[[agent]]
|
||||||
|
name = "qa"
|
||||||
|
stage = "qa"
|
||||||
|
role = "Reviews coder work in worktrees: runs quality gates, generates testing plans, and reports findings."
|
||||||
|
model = "sonnet"
|
||||||
|
max_turns = 40
|
||||||
|
max_budget_usd = 4.00
|
||||||
|
prompt = """You are the QA agent for story {{story_id}}. Your job is to review the coder's work in the worktree and produce a structured QA report.
|
||||||
|
|
||||||
|
Read CLAUDE.md first, then .story_kit/README.md to understand the dev process.
|
||||||
|
|
||||||
|
## Your Workflow
|
||||||
|
|
||||||
|
### 1. Code Quality Scan
|
||||||
|
- Run `git diff master...HEAD --stat` to see what files changed
|
||||||
|
- Run `git diff master...HEAD` to review the actual changes for obvious coding mistakes (unused imports, dead code, unhandled errors, hardcoded values)
|
||||||
|
- Run `cargo clippy --all-targets --all-features` and note any warnings
|
||||||
|
- If a `frontend/` directory exists:
|
||||||
|
- Run `npm run build` and note any TypeScript errors
|
||||||
|
- Run `npx @biomejs/biome check src/` and note any linting issues
|
||||||
|
|
||||||
|
### 2. Test Verification
|
||||||
|
- Run `cargo test` and verify all tests pass
|
||||||
|
- If `frontend/` exists: run `npm test` and verify all frontend tests pass
|
||||||
|
- Review test quality: look for tests that are trivial or don't assert meaningful behavior
|
||||||
|
|
||||||
|
### 3. Manual Testing Support
|
||||||
|
- Build the server: run `cargo build` and note success/failure
|
||||||
|
- If build succeeds: find a free port (try 3010-3020) and attempt to start the server
|
||||||
|
- Generate a testing plan including:
|
||||||
|
- URL to visit in the browser
|
||||||
|
- Things to check in the UI
|
||||||
|
- curl commands to exercise relevant API endpoints
|
||||||
|
- Kill the test server when done: `pkill -f 'target.*storkit' || true` (NEVER use `pkill -f storkit` — it kills the vite dev server)
|
||||||
|
|
||||||
|
### 4. Produce Structured Report
|
||||||
|
Print your QA report to stdout before your process exits. The server will automatically run acceptance gates. Use this format:
|
||||||
|
|
||||||
|
```
|
||||||
|
## QA Report for {{story_id}}
|
||||||
|
|
||||||
|
### Code Quality
|
||||||
|
- clippy: PASS/FAIL (details)
|
||||||
|
- TypeScript build: PASS/FAIL/SKIP (details)
|
||||||
|
- Biome lint: PASS/FAIL/SKIP (details)
|
||||||
|
- Code review findings: (list any issues found, or "None")
|
||||||
|
|
||||||
|
### Test Verification
|
||||||
|
- cargo test: PASS/FAIL (N tests)
|
||||||
|
- npm test: PASS/FAIL/SKIP (N tests)
|
||||||
|
- Test quality issues: (list any trivial/weak tests, or "None")
|
||||||
|
|
||||||
|
### Manual Testing Plan
|
||||||
|
- Server URL: http://localhost:PORT (or "Build failed")
|
||||||
|
- Pages to visit: (list)
|
||||||
|
- Things to check: (list)
|
||||||
|
- curl commands: (list)
|
||||||
|
|
||||||
|
### Overall: PASS/FAIL
|
||||||
|
```
|
||||||
|
|
||||||
|
## Rules
|
||||||
|
- Do NOT modify any code — read-only review only
|
||||||
|
- If the server fails to start, still provide the testing plan with curl commands
|
||||||
|
- The server automatically runs acceptance gates when your process exits"""
|
||||||
|
system_prompt = "You are a QA agent. Your job is read-only: review code quality, run tests, try to start the server, and produce a structured QA report. Do not modify code. The server automatically runs acceptance gates when your process exits."
|
||||||
|
|
||||||
|
[[agent]]
|
||||||
|
name = "mergemaster"
|
||||||
|
stage = "mergemaster"
|
||||||
|
role = "Merges completed coder work into master, runs quality gates, archives stories, and cleans up worktrees."
|
||||||
|
model = "opus"
|
||||||
|
max_turns = 30
|
||||||
|
max_budget_usd = 5.00
|
||||||
|
prompt = """You are the mergemaster agent for story {{story_id}}. Your job is to merge the completed coder work into master.
|
||||||
|
|
||||||
|
Read CLAUDE.md first, then .story_kit/README.md to understand the dev process.
|
||||||
|
|
||||||
|
## Your Workflow
|
||||||
|
1. Call merge_agent_work(story_id='{{story_id}}') via the MCP tool to trigger the full merge pipeline
|
||||||
|
2. Review the result: check success, had_conflicts, conflicts_resolved, gates_passed, and gate_output
|
||||||
|
3. If merge succeeded and gates passed: report success to the human
|
||||||
|
4. If conflicts were auto-resolved (conflicts_resolved=true) and gates passed: report success, noting which conflicts were resolved
|
||||||
|
5. If conflicts could not be auto-resolved: **resolve them yourself** in the merge worktree (see below)
|
||||||
|
6. If merge failed for any other reason: call report_merge_failure(story_id='{{story_id}}', reason='<details>') and report to the human
|
||||||
|
7. If gates failed after merge: attempt to fix the issues yourself in the merge worktree, then re-trigger merge_agent_work. After 3 fix attempts, call report_merge_failure and stop.
|
||||||
|
|
||||||
|
## Resolving Complex Conflicts Yourself
|
||||||
|
|
||||||
|
When the auto-resolver fails, you have access to the merge worktree at `.story_kit/merge_workspace/`. Go in there and resolve the conflicts manually:
|
||||||
|
|
||||||
|
1. Run `git diff --name-only --diff-filter=U` in the merge worktree to list conflicted files
|
||||||
|
2. **Build context before touching code.** Run `git log --oneline master...HEAD` on the feature branch to see its commits. Then run `git log --oneline --since="$(git log -1 --format=%ci <feature-branch-base-commit>)" master` to see what landed on master since the branch was created. Read the story files in `.story_kit/work/` for any recently merged stories that touch the same files — this tells you WHY master changed and what must be preserved.
|
||||||
|
3. Read each conflicted file and understand both sides of the conflict
|
||||||
|
4. **Understand intent, not just syntax.** The feature branch may be behind master — master's version of shared infrastructure is almost always correct. The feature branch's contribution is the NEW functionality it adds. Your job is to integrate the new into master's structure, not pick one side.
|
||||||
|
5. Resolve by integrating the feature's new functionality into master's code structure
|
||||||
|
5. Stage resolved files with `git add`
|
||||||
|
6. Run `cargo check` (and `npm run build` if frontend changed) to verify compilation
|
||||||
|
7. If it compiles, commit and re-trigger merge_agent_work
|
||||||
|
|
||||||
|
### Common conflict patterns in this project:
|
||||||
|
|
||||||
|
**Story file rename/rename conflicts:** Both branches moved the story .md file to different pipeline directories. Resolution: `git rm` both sides — story files in `work/2_current/`, `work/3_qa/`, `work/4_merge/` are gitignored and don't need to be committed.
|
||||||
|
|
||||||
|
**bot.rs tokio::select! conflicts:** Master has a `tokio::select!` loop in `handle_message()` that handles permission forwarding (story 275). Feature branches created before story 275 have a simpler direct `provider.chat_stream().await` call. Resolution: KEEP master's tokio::select! loop. Integrate only the feature's new logic (e.g. typing indicators, new callbacks) into the existing loop structure. Do NOT replace the loop with the old direct call.
|
||||||
|
|
||||||
|
**Duplicate functions/imports:** The auto-resolver keeps both sides, producing duplicates. Resolution: keep one copy (prefer master's version), delete the duplicate.
|
||||||
|
|
||||||
|
**Formatting-only conflicts:** Both sides reformatted the same code differently. Resolution: pick either side (prefer master).
|
||||||
|
|
||||||
|
## Fixing Gate Failures
|
||||||
|
|
||||||
|
If quality gates fail (cargo clippy, cargo test, npm run build, npm test), attempt to fix issues yourself in the merge worktree.
|
||||||
|
|
||||||
|
**Fix yourself (up to 3 attempts total):**
|
||||||
|
- Syntax errors (missing semicolons, brackets, commas)
|
||||||
|
- Duplicate definitions from merge artifacts
|
||||||
|
- Simple type annotation errors
|
||||||
|
- Unused import warnings flagged by clippy
|
||||||
|
- Mismatched braces from bad conflict resolution
|
||||||
|
- Trivial formatting issues that block compilation or linting
|
||||||
|
|
||||||
|
**Report to human without attempting a fix:**
|
||||||
|
- Logic errors or incorrect business logic
|
||||||
|
- Missing function implementations
|
||||||
|
- Architectural changes required
|
||||||
|
- Non-trivial refactoring needed
|
||||||
|
|
||||||
|
**Max retry limit:** If gates still fail after 3 fix attempts, call report_merge_failure to record the failure, then stop immediately and report the full gate output to the human.
|
||||||
|
|
||||||
|
## CRITICAL Rules
|
||||||
|
- NEVER manually move story files between pipeline stages (e.g. from 4_merge/ to 5_done/)
|
||||||
|
- NEVER call accept_story — only merge_agent_work can move stories to done after a successful merge
|
||||||
|
- When merge fails after exhausting your fix attempts, ALWAYS call report_merge_failure
|
||||||
|
- Report conflict resolution outcomes clearly
|
||||||
|
- Report gate failures with full output so the human can act if needed
|
||||||
|
- The server automatically runs acceptance gates when your process exits"""
|
||||||
|
system_prompt = "You are the mergemaster agent. Your primary job is to merge feature branches to master. First try the merge_agent_work MCP tool. If the auto-resolver fails on complex conflicts, resolve them yourself in the merge worktree — you are an opus-class agent capable of understanding both sides of a conflict and producing correct merged code. Common patterns: keep master's tokio::select! permission loop in bot.rs, discard story file rename conflicts (gitignored), remove duplicate definitions. After resolving, verify compilation before re-triggering merge. CRITICAL: Never manually move story files or call accept_story. After 3 failed fix attempts, call report_merge_failure and stop."
|
||||||
33
.storkit/specs/00_CONTEXT.md
Normal file
33
.storkit/specs/00_CONTEXT.md
Normal file
@@ -0,0 +1,33 @@
|
|||||||
|
# Project Context
|
||||||
|
|
||||||
|
## High-Level Goal
|
||||||
|
To build a standalone **Agentic AI Code Assistant** application as a single Rust binary that serves a Vite/React web UI and exposes a WebSocket API. The assistant will facilitate a test-driven development (TDD) workflow first, with both unit and integration tests providing the primary guardrails for code changes. Once the single-threaded TDD workflow is stable and usable (including compatibility with lower-cost agents), the project will evolve to a multi-agent orchestration model using Git worktrees and supervisory roles to maximize throughput. Unlike a passive chat interface, this assistant acts as an **Agent**, capable of using tools to read the filesystem, execute shell commands, manage git repositories, and modify code directly to implement features.
|
||||||
|
|
||||||
|
## Core Features
|
||||||
|
1. **Chat Interface:** A conversational UI for the user to interact with the AI assistant.
|
||||||
|
2. **Agentic Tool Bridge:** A robust system mapping LLM "Tool Calls" to native Rust functions.
|
||||||
|
* **Filesystem:** Read/Write access (scoped to the target project).
|
||||||
|
* **Search:** High-performance file searching (ripgrep-style) and content retrieval.
|
||||||
|
* **Shell Integration:** Ability to execute approved commands (e.g., `cargo`, `npm`, `git`) to run tests, linters, and version control.
|
||||||
|
3. **Workflow Management:** Specialized tools to manage a TDD-first lifecycle:
|
||||||
|
* Defining test requirements (unit + integration) before code changes.
|
||||||
|
* Implementing code via red-green-refactor.
|
||||||
|
* Enforcing test and quality gates before acceptance.
|
||||||
|
* Scaling later to multi-agent orchestration with Git worktrees and supervisory checks, after the single-threaded process is stable.
|
||||||
|
4. **LLM Integration:** Connection to an LLM backend to drive the intelligence and tool selection.
|
||||||
|
* **Remote:** Support for major APIs (Anthropic Claude, Google Gemini, OpenAI, etc).
|
||||||
|
* **Local:** Support for local inference via Ollama.
|
||||||
|
|
||||||
|
## Domain Definition
|
||||||
|
* **User:** A software engineer using the assistant to build a project.
|
||||||
|
* **Target Project:** The local software project the user is working on.
|
||||||
|
* **Agent:** The AI entity that receives prompts and decides which **Tools** to invoke to solve the problem.
|
||||||
|
* **Tool:** A discrete function exposed to the Agent (e.g., `run_shell_command`, `write_file`, `search_project`).
|
||||||
|
* **Story:** A unit of work defining a change (Feature Request).
|
||||||
|
* **Spec:** A persistent documentation artifact defining the current truth of the system.
|
||||||
|
|
||||||
|
## Glossary
|
||||||
|
* **SDSW:** Story-Driven Spec Workflow.
|
||||||
|
* **Web Server Binary:** The Rust binary that serves the Vite/React frontend and exposes the WebSocket API.
|
||||||
|
* **Living Spec:** The collection of Markdown files in `.story_kit/` that define the project.
|
||||||
|
* **Tool Call:** A structured request from the LLM to execute a specific native function.
|
||||||
44
.storkit/specs/functional/SLACK_SETUP.md
Normal file
44
.storkit/specs/functional/SLACK_SETUP.md
Normal file
@@ -0,0 +1,44 @@
|
|||||||
|
# Slack Integration Setup
|
||||||
|
|
||||||
|
## Bot Configuration
|
||||||
|
|
||||||
|
Slack integration is configured via `bot.toml` in the project's `.story_kit/` directory:
|
||||||
|
|
||||||
|
```toml
|
||||||
|
transport = "slack"
|
||||||
|
display_name = "Storkit"
|
||||||
|
slack_bot_token = "xoxb-..."
|
||||||
|
slack_signing_secret = "..."
|
||||||
|
slack_channel_ids = ["C01ABCDEF"]
|
||||||
|
```
|
||||||
|
|
||||||
|
## Slack App Configuration
|
||||||
|
|
||||||
|
### Event Subscriptions
|
||||||
|
|
||||||
|
1. In your Slack app settings, enable **Event Subscriptions**.
|
||||||
|
2. Set the **Request URL** to: `https://<your-host>/webhook/slack`
|
||||||
|
3. Subscribe to the `message.channels` and `message.im` bot events.
|
||||||
|
|
||||||
|
### Slash Commands
|
||||||
|
|
||||||
|
Slash commands provide quick access to pipeline commands without mentioning the bot.
|
||||||
|
|
||||||
|
1. In your Slack app settings, go to **Slash Commands**.
|
||||||
|
2. Create the following commands, all pointing to the same **Request URL**: `https://<your-host>/webhook/slack/command`
|
||||||
|
|
||||||
|
| Command | Description |
|
||||||
|
|---------|-------------|
|
||||||
|
| `/storkit-status` | Show pipeline status and agent availability |
|
||||||
|
| `/storkit-cost` | Show token spend: 24h total, top stories, and breakdown |
|
||||||
|
| `/storkit-show` | Display the full text of a work item (e.g. `/storkit-show 42`) |
|
||||||
|
| `/storkit-git` | Show git status: branch, changes, ahead/behind |
|
||||||
|
| `/storkit-htop` | Show system and agent process dashboard |
|
||||||
|
|
||||||
|
All slash command responses are **ephemeral** — only the user who invoked the command sees the response.
|
||||||
|
|
||||||
|
### OAuth & Permissions
|
||||||
|
|
||||||
|
Required bot token scopes:
|
||||||
|
- `chat:write` — send messages
|
||||||
|
- `commands` — handle slash commands
|
||||||
33
.storkit/specs/functional/UI_LAYOUT.md
Normal file
33
.storkit/specs/functional/UI_LAYOUT.md
Normal file
@@ -0,0 +1,33 @@
|
|||||||
|
# Functional Spec: UI Layout
|
||||||
|
|
||||||
|
## 1. Global Structure
|
||||||
|
The application uses a **fixed-layout** strategy to maximize chat visibility.
|
||||||
|
|
||||||
|
```text
|
||||||
|
+-------------------------------------------------------+
|
||||||
|
| HEADER (Fixed Height, e.g., 50px) |
|
||||||
|
| [Project: ~/foo/bar] [Model: llama3] [x] Tools |
|
||||||
|
+-------------------------------------------------------+
|
||||||
|
| |
|
||||||
|
| CHAT AREA (Flex Grow, Scrollable) |
|
||||||
|
| |
|
||||||
|
| (User Message) |
|
||||||
|
| (Agent Message) |
|
||||||
|
| |
|
||||||
|
+-------------------------------------------------------+
|
||||||
|
| INPUT AREA (Fixed Height, Bottom) |
|
||||||
|
| [ Input Field ........................... ] [Send] |
|
||||||
|
+-------------------------------------------------------+
|
||||||
|
```
|
||||||
|
|
||||||
|
## 2. Components
|
||||||
|
* **Header:** Contains global context (Project) and session config (Model/Tools).
|
||||||
|
* *Constraint:* Must not scroll away.
|
||||||
|
* **ChatList:** The scrollable container for messages.
|
||||||
|
* **InputBar:** Pinned to the bottom.
|
||||||
|
|
||||||
|
## 3. Styling
|
||||||
|
* Use Flexbox (`flex-direction: column`) on the main container.
|
||||||
|
* Header: `flex-shrink: 0`.
|
||||||
|
* ChatList: `flex-grow: 1`, `overflow-y: auto`.
|
||||||
|
* InputBar: `flex-shrink: 0`.
|
||||||
474
.storkit/specs/functional/UI_UX.md
Normal file
474
.storkit/specs/functional/UI_UX.md
Normal file
@@ -0,0 +1,474 @@
|
|||||||
|
# Functional Spec: UI/UX Responsiveness
|
||||||
|
|
||||||
|
## Problem
|
||||||
|
Currently, the `chat` command in Rust is an async function that performs a long-running, blocking loop (waiting for LLM, executing tools). While Tauri executes this on a separate thread from the UI, the frontend awaits the *entire* result before re-rendering. This makes the app feel "frozen" because there is no feedback during the 10-60 seconds of generation.
|
||||||
|
|
||||||
|
## Solution: Event-Driven Feedback
|
||||||
|
Instead of waiting for the final array of messages, the Backend should emit **Events** to the Frontend in real-time.
|
||||||
|
|
||||||
|
### 1. Events
|
||||||
|
* `chat:token`: Emitted when a text token is generated (Streaming text).
|
||||||
|
* `chat:tool-start`: Emitted when a tool call begins (e.g., `{ tool: "git status" }`).
|
||||||
|
* `chat:tool-end`: Emitted when a tool call finishes (e.g., `{ output: "..." }`).
|
||||||
|
|
||||||
|
### 2. Implementation Strategy
|
||||||
|
|
||||||
|
#### Token-by-Token Streaming (Story 18)
|
||||||
|
The system now implements full token streaming for real-time response display:
|
||||||
|
|
||||||
|
* **Backend (Rust):**
|
||||||
|
* Set `stream: true` in Ollama API requests
|
||||||
|
* Parse newline-delimited JSON from Ollama's streaming response
|
||||||
|
* Emit `chat:token` events for each token received
|
||||||
|
* Use `reqwest` streaming body with async iteration
|
||||||
|
* After streaming completes, emit `chat:update` with the full message
|
||||||
|
|
||||||
|
* **Frontend (TypeScript):**
|
||||||
|
* Listen for `chat:token` events
|
||||||
|
* Append tokens to the current assistant message in real-time
|
||||||
|
* Maintain smooth auto-scroll as tokens arrive
|
||||||
|
* After streaming completes, process `chat:update` for final state
|
||||||
|
|
||||||
|
* **Event-Driven Updates:**
|
||||||
|
* `chat:token`: Emitted for each token during streaming (payload: `{ content: string }`)
|
||||||
|
* `chat:update`: Emitted after LLM response complete or after Tool Execution (payload: `Message[]`)
|
||||||
|
* Frontend maintains streaming state separate from message history
|
||||||
|
|
||||||
|
### 3. Visuals
|
||||||
|
* **Loading State:** The "Send" button should show a spinner or "Stop" button.
|
||||||
|
* **Auto-Scroll:** The chat view uses smart auto-scroll that respects user scrolling (see Smart Auto-Scroll section below).
|
||||||
|
|
||||||
|
## Smart Auto-Scroll (Story 22)
|
||||||
|
|
||||||
|
### Problem
|
||||||
|
Users need to review previous messages while the AI is streaming new content, but aggressive auto-scrolling constantly drags them back to the bottom, making it impossible to read older content.
|
||||||
|
|
||||||
|
### Solution: Scroll-Position-Aware Auto-Scroll
|
||||||
|
|
||||||
|
The chat implements intelligent auto-scroll that:
|
||||||
|
* Automatically scrolls to show new content when the user is at/near the bottom
|
||||||
|
* Pauses auto-scroll when the user scrolls up to review older messages
|
||||||
|
* Resumes auto-scroll when the user scrolls back to the bottom
|
||||||
|
|
||||||
|
### Requirements
|
||||||
|
|
||||||
|
1. **Scroll Detection:** Track whether the user is at the bottom of the chat
|
||||||
|
2. **Threshold:** Define "near bottom" as within 25px of the bottom
|
||||||
|
3. **Auto-Scroll Logic:** Only trigger auto-scroll if user is at/near bottom
|
||||||
|
4. **Smooth Operation:** No flickering or jarring behavior during scrolling
|
||||||
|
5. **Universal:** Works during both streaming responses and tool execution
|
||||||
|
|
||||||
|
### Implementation Notes
|
||||||
|
|
||||||
|
**Core Components:**
|
||||||
|
* `scrollContainerRef`: Reference to the scrollable messages container
|
||||||
|
* `shouldAutoScrollRef`: Tracks whether auto-scroll should be active (uses ref to avoid re-renders)
|
||||||
|
* `messagesEndRef`: Target element for scroll-to-bottom behavior
|
||||||
|
|
||||||
|
**Detection Function:**
|
||||||
|
```typescript
|
||||||
|
const isScrolledToBottom = () => {
|
||||||
|
const element = scrollContainerRef.current;
|
||||||
|
if (!element) return true;
|
||||||
|
const threshold = 25; // pixels from bottom
|
||||||
|
return (
|
||||||
|
element.scrollHeight - element.scrollTop - element.clientHeight < threshold
|
||||||
|
);
|
||||||
|
};
|
||||||
|
```
|
||||||
|
|
||||||
|
**Scroll Handler:**
|
||||||
|
```typescript
|
||||||
|
const handleScroll = () => {
|
||||||
|
// Update auto-scroll state based on scroll position
|
||||||
|
shouldAutoScrollRef.current = isScrolledToBottom();
|
||||||
|
};
|
||||||
|
```
|
||||||
|
|
||||||
|
**Conditional Auto-Scroll:**
|
||||||
|
```typescript
|
||||||
|
useEffect(() => {
|
||||||
|
if (shouldAutoScrollRef.current) {
|
||||||
|
scrollToBottom();
|
||||||
|
}
|
||||||
|
}, [messages, streamingContent]);
|
||||||
|
```
|
||||||
|
|
||||||
|
**DOM Setup:**
|
||||||
|
* Attach `ref={scrollContainerRef}` to the messages container
|
||||||
|
* Attach `onScroll={handleScroll}` to detect user scrolling
|
||||||
|
* Initialize `shouldAutoScrollRef` to `true` (enable auto-scroll by default)
|
||||||
|
|
||||||
|
### Edge Cases
|
||||||
|
|
||||||
|
1. **Initial Load:** Auto-scroll is enabled by default
|
||||||
|
2. **Rapid Scrolling:** Uses refs to avoid race conditions and excessive re-renders
|
||||||
|
3. **Manual Scroll to Bottom:** Auto-scroll re-enables when user scrolls near bottom
|
||||||
|
4. **No Container:** Falls back to always allowing auto-scroll if container ref is null
|
||||||
|
|
||||||
|
## Tool Output Display
|
||||||
|
|
||||||
|
### Problem
|
||||||
|
Tool outputs (like file contents, search results, or command output) can be very long, making the chat history difficult to read. Users need to see the Agent's reasoning and responses without being overwhelmed by verbose tool output.
|
||||||
|
|
||||||
|
### Solution: Collapsible Tool Outputs
|
||||||
|
Tool outputs should be rendered in a collapsible component that is **closed by default**.
|
||||||
|
|
||||||
|
### Requirements
|
||||||
|
|
||||||
|
1. **Default State:** Tool outputs are collapsed/closed when first rendered
|
||||||
|
2. **Summary Line:** Shows essential information without expanding:
|
||||||
|
- Tool name (e.g., `read_file`, `exec_shell`)
|
||||||
|
- Key arguments (e.g., file path, command name)
|
||||||
|
- Format: "▶ tool_name(key_arg)"
|
||||||
|
- Example: "▶ read_file(src/main.rs)"
|
||||||
|
- Example: "▶ exec_shell(cargo check)"
|
||||||
|
3. **Expandable:** User can click the summary to toggle expansion
|
||||||
|
4. **Output Display:** When expanded, shows the complete tool output in a readable format:
|
||||||
|
- Use `<pre>` or monospace font for code/terminal output
|
||||||
|
- Preserve whitespace and line breaks
|
||||||
|
- Limit height with scrolling for very long outputs (e.g., max-height: 300px)
|
||||||
|
5. **Visual Indicator:** Clear arrow or icon showing collapsed/expanded state
|
||||||
|
6. **Styling:** Consistent with the dark theme, distinguishable from assistant messages
|
||||||
|
|
||||||
|
### Implementation Notes
|
||||||
|
* Use native `<details>` and `<summary>` HTML elements for accessibility
|
||||||
|
* Or implement custom collapsible component with proper ARIA attributes
|
||||||
|
* Tool outputs should be visually distinct (border, background color, or badge)
|
||||||
|
* Multiple tool calls in sequence should each be independently collapsible
|
||||||
|
|
||||||
|
## Scroll Bar Styling
|
||||||
|
|
||||||
|
### Problem
|
||||||
|
Visible scroll bars create visual clutter and make the interface feel less polished. Standard browser scroll bars can be distracting and break the clean aesthetic of the dark theme.
|
||||||
|
|
||||||
|
### Solution: Hidden Scroll Bars with Maintained Functionality
|
||||||
|
Scroll bars should be hidden while maintaining full scroll functionality.
|
||||||
|
|
||||||
|
### Requirements
|
||||||
|
|
||||||
|
1. **Visual:** Scroll bars should not be visible to the user
|
||||||
|
2. **Functionality:** Scrolling must still work perfectly:
|
||||||
|
- Mouse wheel scrolling
|
||||||
|
- Trackpad scrolling
|
||||||
|
- Keyboard navigation (arrow keys, page up/down)
|
||||||
|
- Auto-scroll to bottom for new messages
|
||||||
|
3. **Cross-browser:** Solution must work on Chrome, Firefox, and Safari
|
||||||
|
4. **Areas affected:**
|
||||||
|
- Main chat message area (vertical scroll)
|
||||||
|
- Tool output content (both vertical and horizontal)
|
||||||
|
- Any other scrollable containers
|
||||||
|
|
||||||
|
### Implementation Notes
|
||||||
|
* Use CSS `scrollbar-width: none` for Firefox
|
||||||
|
* Use `::-webkit-scrollbar { display: none; }` for Chrome/Safari/Edge
|
||||||
|
* Maintain `overflow: auto` or `overflow-y: scroll` to preserve scroll functionality
|
||||||
|
* Ensure `overflow-x: hidden` where horizontal scroll is not needed
|
||||||
|
* Test with very long messages and large tool outputs to ensure no layout breaking
|
||||||
|
|
||||||
|
## Text Alignment and Readability
|
||||||
|
|
||||||
|
### Problem
|
||||||
|
Center-aligned text in a chat interface is unconventional and reduces readability, especially for code blocks and long-form content. Standard chat UIs align messages differently based on the sender.
|
||||||
|
|
||||||
|
### Solution: Context-Appropriate Text Alignment
|
||||||
|
Messages should follow standard chat UI conventions with proper alignment based on message type.
|
||||||
|
|
||||||
|
### Requirements
|
||||||
|
|
||||||
|
1. **User Messages:** Right-aligned (standard pattern showing messages sent by the user)
|
||||||
|
2. **Assistant Messages:** Left-aligned (standard pattern showing messages received)
|
||||||
|
3. **Tool Outputs:** Left-aligned (part of the system/assistant response flow)
|
||||||
|
4. **Code Blocks:** Always left-aligned regardless of message type (for readability)
|
||||||
|
5. **Container:** Remove any center-alignment from the chat container
|
||||||
|
6. **Max-Width:** Maintain current max-width constraint (e.g., 768px) for optimal readability
|
||||||
|
7. **Spacing:** Maintain proper padding and visual hierarchy between messages
|
||||||
|
|
||||||
|
### Implementation Notes
|
||||||
|
* Check for `textAlign: "center"` in inline styles and remove
|
||||||
|
* Check for `text-align: center` in CSS and remove from chat-related classes
|
||||||
|
* Ensure flexbox alignment is set appropriately:
|
||||||
|
* User messages: `alignItems: "flex-end"`
|
||||||
|
* Assistant/Tool messages: `alignItems: "flex-start"`
|
||||||
|
* Code blocks should have `text-align: left` explicitly set
|
||||||
|
|
||||||
|
## Syntax Highlighting
|
||||||
|
|
||||||
|
### Problem
|
||||||
|
Code blocks in assistant responses currently lack syntax highlighting, making them harder to read and understand. Developers expect colored syntax highlighting similar to their code editors.
|
||||||
|
|
||||||
|
### Solution: Syntax Highlighting for Code Blocks
|
||||||
|
Integrate syntax highlighting into markdown code blocks rendered by the assistant.
|
||||||
|
|
||||||
|
### Requirements
|
||||||
|
|
||||||
|
1. **Languages Supported:** At minimum:
|
||||||
|
- JavaScript/TypeScript
|
||||||
|
- Rust
|
||||||
|
- Python
|
||||||
|
- JSON
|
||||||
|
- Markdown
|
||||||
|
- Shell/Bash
|
||||||
|
- HTML/CSS
|
||||||
|
- SQL
|
||||||
|
2. **Theme:** Use a dark theme that complements the existing dark UI (e.g., `oneDark`, `vsDark`, `dracula`)
|
||||||
|
3. **Integration:** Work seamlessly with `react-markdown` component
|
||||||
|
4. **Performance:** Should not significantly impact rendering performance
|
||||||
|
5. **Fallback:** Plain monospace text for unrecognized languages
|
||||||
|
6. **Inline Code:** Inline code (single backticks) should maintain simple styling without full syntax highlighting
|
||||||
|
|
||||||
|
### Implementation Notes
|
||||||
|
* Use `react-syntax-highlighter` library with `react-markdown`
|
||||||
|
* Or use `rehype-highlight` plugin for `react-markdown`
|
||||||
|
* Configure with a dark theme preset (e.g., `oneDark` from `react-syntax-highlighter/dist/esm/styles/prism`)
|
||||||
|
* Apply to code blocks via `react-markdown` components prop:
|
||||||
|
```tsx
|
||||||
|
<Markdown
|
||||||
|
components={{
|
||||||
|
code: ({node, inline, className, children, ...props}) => {
|
||||||
|
const match = /language-(\w+)/.exec(className || '');
|
||||||
|
return !inline && match ? (
|
||||||
|
<SyntaxHighlighter style={oneDark} language={match[1]} {...props}>
|
||||||
|
{String(children).replace(/\n$/, '')}
|
||||||
|
</SyntaxHighlighter>
|
||||||
|
) : (
|
||||||
|
<code className={className} {...props}>{children}</code>
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}}
|
||||||
|
/>
|
||||||
|
```
|
||||||
|
* Ensure syntax highlighted code blocks are left-aligned
|
||||||
|
* Test with various code samples to ensure proper rendering
|
||||||
|
|
||||||
|
## Token Streaming
|
||||||
|
|
||||||
|
### Problem
|
||||||
|
Without streaming, users see no feedback during model generation. The response appears all at once after waiting, which feels unresponsive and provides no indication that the system is working.
|
||||||
|
|
||||||
|
### Solution: Token-by-Token Streaming
|
||||||
|
Stream tokens from Ollama in real-time and display them as they arrive, providing immediate feedback and a responsive chat experience similar to ChatGPT.
|
||||||
|
|
||||||
|
### Requirements
|
||||||
|
|
||||||
|
1. **Real-time Display:** Tokens appear immediately as Ollama generates them
|
||||||
|
2. **Smooth Performance:** No lag or stuttering during high token throughput
|
||||||
|
3. **Tool Compatibility:** Streaming works correctly with tool calls and multi-turn conversations
|
||||||
|
4. **Auto-scroll:** Chat view follows streaming content automatically
|
||||||
|
5. **Error Handling:** Gracefully handle stream interruptions or errors
|
||||||
|
6. **State Management:** Maintain clean separation between streaming state and final message history
|
||||||
|
|
||||||
|
### Implementation Notes
|
||||||
|
|
||||||
|
#### Backend (Rust)
|
||||||
|
* Enable streaming in Ollama requests: `stream: true`
|
||||||
|
* Parse newline-delimited JSON from response body
|
||||||
|
* Each line is a separate JSON object: `{"message":{"content":"token"},"done":false}`
|
||||||
|
* Use `futures::StreamExt` or similar for async stream processing
|
||||||
|
* Emit `chat:token` event for each token
|
||||||
|
* Emit `chat:update` when streaming completes
|
||||||
|
* Handle both streaming text and tool call interruptions
|
||||||
|
|
||||||
|
#### Frontend (TypeScript)
|
||||||
|
* Create streaming state separate from message history
|
||||||
|
* Listen for `chat:token` events and append to streaming buffer
|
||||||
|
* Render streaming content in real-time
|
||||||
|
* On `chat:update`, replace streaming content with final message
|
||||||
|
* Maintain scroll position during streaming
|
||||||
|
|
||||||
|
#### Ollama Streaming Format
|
||||||
|
```json
|
||||||
|
{"message":{"role":"assistant","content":"Hello"},"done":false}
|
||||||
|
{"message":{"role":"assistant","content":" world"},"done":false}
|
||||||
|
{"message":{"role":"assistant","content":"!"},"done":true}
|
||||||
|
{"message":{"role":"assistant","tool_calls":[...]},"done":true}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Edge Cases
|
||||||
|
* Tool calls during streaming: Switch from text streaming to tool execution
|
||||||
|
* Cancellation during streaming: Clean up streaming state properly
|
||||||
|
* Network interruptions: Show error and preserve partial content
|
||||||
|
* Very fast streaming: Throttle UI updates if needed for performance
|
||||||
|
|
||||||
|
## Input Focus Management
|
||||||
|
|
||||||
|
### Problem
|
||||||
|
When the app loads with a project selected, users need to click into the chat input box before they can start typing. This adds unnecessary friction to the user experience.
|
||||||
|
|
||||||
|
### Solution: Auto-focus on Component Mount
|
||||||
|
The chat input field should automatically receive focus when the chat component mounts, allowing users to immediately start typing.
|
||||||
|
|
||||||
|
### Requirements
|
||||||
|
|
||||||
|
1. **Auto-focus:** Input field receives focus automatically when chat component loads
|
||||||
|
2. **Visible Cursor:** Cursor should be visible and blinking in the input field
|
||||||
|
3. **Immediate Typing:** User can start typing without clicking into the field
|
||||||
|
4. **Non-intrusive:** Should not interfere with other UI interactions or accessibility
|
||||||
|
5. **Timing:** Focus should be set after the component fully mounts
|
||||||
|
|
||||||
|
### Implementation Notes
|
||||||
|
* Use React `useRef` to create a reference to the input element
|
||||||
|
* Use `useEffect` with empty dependency array to run once on mount
|
||||||
|
* Call `inputRef.current?.focus()` in the effect
|
||||||
|
* Ensure the ref is properly attached to the input element
|
||||||
|
* Example implementation:
|
||||||
|
```tsx
|
||||||
|
const inputRef = useRef<HTMLInputElement>(null);
|
||||||
|
|
||||||
|
useEffect(() => {
|
||||||
|
inputRef.current?.focus();
|
||||||
|
}, []);
|
||||||
|
|
||||||
|
return <input ref={inputRef} ... />
|
||||||
|
```
|
||||||
|
|
||||||
|
## Response Interruption
|
||||||
|
|
||||||
|
### Problem
|
||||||
|
Users may want to interrupt a long-running model response to ask a different question or change direction. Having to wait for the full response to complete creates friction and wastes time.
|
||||||
|
|
||||||
|
### Solution: Interrupt on Typing
|
||||||
|
When the user starts typing in the input field while the model is generating a response, the generation should be cancelled immediately, allowing the user to send a new message.
|
||||||
|
|
||||||
|
### Requirements
|
||||||
|
|
||||||
|
1. **Input Always Enabled:** The input field should remain enabled and usable even while the model is generating
|
||||||
|
2. **Interrupt Detection:** Detect when user types in the input field while `loading` state is true
|
||||||
|
3. **Immediate Cancellation:** Cancel the ongoing generation as soon as typing is detected
|
||||||
|
4. **Preserve Partial Response:** Any partial response generated before interruption should remain visible in the chat
|
||||||
|
5. **State Reset:** UI should return to normal state (ready to send) after interruption
|
||||||
|
6. **Preserve User Input:** The user's new input should be preserved in the input field
|
||||||
|
7. **Visual Feedback:** "Thinking..." indicator should disappear when generation is interrupted
|
||||||
|
|
||||||
|
### Implementation Notes
|
||||||
|
* Do NOT disable the input field during loading
|
||||||
|
* Listen for input changes while `loading` is true
|
||||||
|
* When user types during loading, call backend to cancel generation (if possible) or just stop waiting
|
||||||
|
* Set `loading` state to false immediately when typing detected
|
||||||
|
* Backend may need a `cancel_chat` command or similar
|
||||||
|
* Consider if Ollama requests can be cancelled mid-generation or if we just stop processing the response
|
||||||
|
* Example implementation:
|
||||||
|
```tsx
|
||||||
|
const handleInputChange = (e: React.ChangeEvent<HTMLInputElement>) => {
|
||||||
|
const newValue = e.target.value;
|
||||||
|
setInput(newValue);
|
||||||
|
|
||||||
|
// If user starts typing while model is generating, interrupt
|
||||||
|
if (loading && newValue.length > input.length) {
|
||||||
|
setLoading(false);
|
||||||
|
// Optionally call backend to cancel: invoke("cancel_chat")
|
||||||
|
}
|
||||||
|
};
|
||||||
|
```
|
||||||
|
|
||||||
|
## Session Management
|
||||||
|
|
||||||
|
### Problem
|
||||||
|
Users may want to start a fresh conversation without restarting the application. Long conversations can become unwieldy, and users need a way to clear context for new tasks while keeping the same project open.
|
||||||
|
|
||||||
|
### Solution: New Session Button
|
||||||
|
Provide a clear, accessible way for users to start a new session by clearing the chat history.
|
||||||
|
|
||||||
|
### Requirements
|
||||||
|
|
||||||
|
1. **Button Placement:** Located in the header area, near model controls
|
||||||
|
2. **Visual Design:** Secondary/subtle styling to prevent accidental clicks
|
||||||
|
3. **Confirmation Dialog:** Ask "Are you sure? This will clear all messages." before clearing
|
||||||
|
4. **State Management:**
|
||||||
|
- Clear `messages` state array
|
||||||
|
- Clear `streamingContent` if any streaming is in progress
|
||||||
|
- Preserve project path, model selection, and tool settings
|
||||||
|
- Cancel any in-flight backend operations before clearing
|
||||||
|
5. **User Feedback:** Immediate visual response (messages disappear)
|
||||||
|
6. **Empty State:** Show a welcome message or empty state after clearing
|
||||||
|
|
||||||
|
### Implementation Notes
|
||||||
|
|
||||||
|
**Frontend:**
|
||||||
|
- Add "New Session" button to header
|
||||||
|
- Implement confirmation modal/dialog
|
||||||
|
- Call `setMessages([])` after confirmation
|
||||||
|
- Cancel any ongoing streaming/tool execution
|
||||||
|
- Consider keyboard shortcut (e.g., Cmd/Ctrl+K)
|
||||||
|
|
||||||
|
**Backend:**
|
||||||
|
- May need to cancel ongoing chat operations
|
||||||
|
- Clear any server-side state if applicable
|
||||||
|
- No persistent session history (sessions are ephemeral)
|
||||||
|
|
||||||
|
**Edge Cases:**
|
||||||
|
- Don't clear while actively streaming (cancel first, then clear)
|
||||||
|
- Handle confirmation dismissal (do nothing)
|
||||||
|
- Ensure button is always accessible (not disabled)
|
||||||
|
|
||||||
|
### Button Label Options
|
||||||
|
- "New Session" (clear and descriptive)
|
||||||
|
- "Clear Chat" (direct but less friendly)
|
||||||
|
- "Start Over" (conversational)
|
||||||
|
- Icon: 🔄 or ⊕ (plus in circle)
|
||||||
|
|
||||||
|
## Context Window Usage Display
|
||||||
|
|
||||||
|
### Problem
|
||||||
|
Users have no visibility into how much of the model's context window they're using. This leads to:
|
||||||
|
- Unexpected quality degradation when context limit is reached
|
||||||
|
- Uncertainty about when to start a new session
|
||||||
|
- Inability to gauge conversation length
|
||||||
|
|
||||||
|
### Solution: Real-time Context Usage Indicator
|
||||||
|
Display a persistent indicator showing current token usage vs. model's context window limit.
|
||||||
|
|
||||||
|
### Requirements
|
||||||
|
|
||||||
|
1. **Visual Indicator:** Always visible in header area
|
||||||
|
2. **Real-time Updates:** Updates as messages are added
|
||||||
|
3. **Model-Aware:** Shows correct limit based on selected model
|
||||||
|
4. **Color Coding:** Visual warning as limit approaches
|
||||||
|
- Green/default: 0-74% usage
|
||||||
|
- Yellow/warning: 75-89% usage
|
||||||
|
- Red/danger: 90-100% usage
|
||||||
|
5. **Clear Format:** "2.5K / 8K tokens (31%)" or similar
|
||||||
|
6. **Token Estimation:** Approximate token count for all messages
|
||||||
|
|
||||||
|
### Implementation Notes
|
||||||
|
|
||||||
|
**Token Estimation:**
|
||||||
|
- Use simple approximation: 1 token ≈ 4 characters
|
||||||
|
- Or integrate `gpt-tokenizer` for more accuracy
|
||||||
|
- Count: system prompts + user messages + assistant responses + tool outputs + tool calls
|
||||||
|
|
||||||
|
**Model Context Windows:**
|
||||||
|
- llama3.1, llama3.2: 8K tokens
|
||||||
|
- qwen2.5-coder: 32K tokens
|
||||||
|
- deepseek-coder: 16K tokens
|
||||||
|
- Default/unknown: 8K tokens
|
||||||
|
|
||||||
|
**Calculation:**
|
||||||
|
```tsx
|
||||||
|
const estimateTokens = (text: string): number => {
|
||||||
|
return Math.ceil(text.length / 4);
|
||||||
|
};
|
||||||
|
|
||||||
|
const calculateContextUsage = (messages: Message[], systemPrompt: string) => {
|
||||||
|
let total = estimateTokens(systemPrompt);
|
||||||
|
messages.forEach(msg => {
|
||||||
|
total += estimateTokens(msg.content);
|
||||||
|
if (msg.tool_calls) {
|
||||||
|
total += estimateTokens(JSON.stringify(msg.tool_calls));
|
||||||
|
}
|
||||||
|
});
|
||||||
|
return total;
|
||||||
|
};
|
||||||
|
```
|
||||||
|
|
||||||
|
**UI Placement:**
|
||||||
|
- Header area, near model selector
|
||||||
|
- Non-intrusive but always visible
|
||||||
|
- Optional tooltip with breakdown on hover
|
||||||
|
|
||||||
|
### Edge Cases
|
||||||
|
- Empty conversation: Show "0 / 8K"
|
||||||
|
- During streaming: Include partial content
|
||||||
|
- After clearing: Reset to 0
|
||||||
|
- Model change: Update context window limit
|
||||||
|
|
||||||
130
.storkit/specs/tech/STACK.md
Normal file
130
.storkit/specs/tech/STACK.md
Normal file
@@ -0,0 +1,130 @@
|
|||||||
|
# Tech Stack & Constraints
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
This project is a standalone Rust **web server binary** that serves a Vite/React frontend and exposes a **WebSocket API**. The built frontend assets are packaged with the binary (in a `frontend` directory) and served as static files. It functions as an **Agentic Code Assistant** capable of safely executing tools on the host system.
|
||||||
|
|
||||||
|
## Core Stack
|
||||||
|
* **Backend:** Rust (Web Server)
|
||||||
|
* **MSRV:** Stable (latest)
|
||||||
|
* **Framework:** Poem HTTP server with WebSocket support for streaming; HTTP APIs should use Poem OpenAPI (Swagger) for non-streaming endpoints.
|
||||||
|
* **Frontend:** TypeScript + React
|
||||||
|
* **Build Tool:** Vite
|
||||||
|
* **Package Manager:** npm
|
||||||
|
* **Styling:** CSS Modules or Tailwind (TBD - Defaulting to CSS Modules)
|
||||||
|
* **State Management:** React Context / Hooks
|
||||||
|
* **Chat UI:** Rendered Markdown with syntax highlighting.
|
||||||
|
|
||||||
|
## Agent Architecture
|
||||||
|
The application follows a **Tool-Use (Function Calling)** architecture:
|
||||||
|
1. **Frontend:** Collects user input and sends it to the LLM.
|
||||||
|
2. **LLM:** Decides to generate text OR request a **Tool Call** (e.g., `execute_shell`, `read_file`).
|
||||||
|
3. **Web Server Backend (The "Hand"):**
|
||||||
|
* Intercepts Tool Calls.
|
||||||
|
* Validates the request against the **Safety Policy**.
|
||||||
|
* Executes the native code (File I/O, Shell Process, Search).
|
||||||
|
* Returns the output (stdout/stderr/file content) to the LLM.
|
||||||
|
* **Streaming:** The backend sends real-time updates over WebSocket to keep the UI responsive during long-running Agent tasks.
|
||||||
|
|
||||||
|
## LLM Provider Abstraction
|
||||||
|
To support both Remote and Local models, the system implements a `ModelProvider` abstraction layer.
|
||||||
|
|
||||||
|
* **Strategy:**
|
||||||
|
* Abstract the differences between API formats (OpenAI-compatible vs Anthropic vs Gemini).
|
||||||
|
* Normalize "Tool Use" definitions, as each provider handles function calling schemas differently.
|
||||||
|
* **Supported Providers:**
|
||||||
|
* **Ollama:** Local inference (e.g., Llama 3, DeepSeek Coder) for privacy and offline usage.
|
||||||
|
* **Anthropic:** Claude 3.5 models (Sonnet, Haiku) via API for coding tasks (Story 12).
|
||||||
|
* **Provider Selection:**
|
||||||
|
* Automatic detection based on model name prefix:
|
||||||
|
* `claude-` → Anthropic API
|
||||||
|
* Otherwise → Ollama
|
||||||
|
* Single unified model dropdown with section headers ("Anthropic", "Ollama")
|
||||||
|
* **API Key Management:**
|
||||||
|
* Anthropic API key stored server-side and persisted securely
|
||||||
|
* On first use of Claude model, user prompted to enter API key
|
||||||
|
* Key persists across sessions (no re-entry needed)
|
||||||
|
|
||||||
|
## Tooling Capabilities
|
||||||
|
|
||||||
|
### 1. Filesystem (Native)
|
||||||
|
* **Scope:** Strictly limited to the user-selected `project_root`.
|
||||||
|
* **Operations:** Read, Write, List, Delete.
|
||||||
|
* **Constraint:** Modifications to `.git/` are strictly forbidden via file APIs (use Git tools instead).
|
||||||
|
|
||||||
|
### 2. Shell Execution
|
||||||
|
* **Library:** `tokio::process` for async execution.
|
||||||
|
* **Constraint:** We do **not** run an interactive shell (repl). We run discrete, stateless commands.
|
||||||
|
* **Allowlist:** The agent may only execute specific binaries:
|
||||||
|
* `git`
|
||||||
|
* `cargo`, `rustc`, `rustfmt`, `clippy`
|
||||||
|
* `npm`, `node`, `yarn`, `pnpm`, `bun`
|
||||||
|
* `ls`, `find`, `grep` (if not using internal search)
|
||||||
|
* `mkdir`, `rm`, `touch`, `mv`, `cp`
|
||||||
|
|
||||||
|
### 3. Search & Navigation
|
||||||
|
* **Library:** `ignore` (by BurntSushi) + `grep` logic.
|
||||||
|
* **Behavior:**
|
||||||
|
* Must respect `.gitignore` files automatically.
|
||||||
|
* Must be performant (parallel traversal).
|
||||||
|
|
||||||
|
## Coding Standards
|
||||||
|
|
||||||
|
### Rust
|
||||||
|
* **Style:** `rustfmt` standard.
|
||||||
|
* **Linter:** `clippy` - Must pass with 0 warnings before merging.
|
||||||
|
* **Error Handling:** Custom `AppError` type deriving `thiserror`. All Commands return `Result<T, AppError>`.
|
||||||
|
* **Concurrency:** Heavy tools (Search, Shell) must run on `tokio` threads to avoid blocking the UI.
|
||||||
|
* **Quality Gates:**
|
||||||
|
* `cargo clippy --all-targets --all-features` must show 0 errors, 0 warnings
|
||||||
|
* `cargo check` must succeed
|
||||||
|
* `cargo nextest run` must pass all tests
|
||||||
|
* **Test Coverage:**
|
||||||
|
* Generate JSON report: `cargo llvm-cov nextest --no-clean --json --output-path .story_kit/coverage/server.json`
|
||||||
|
* Generate lcov report: `cargo llvm-cov report --lcov --output-path .story_kit/coverage/server.lcov`
|
||||||
|
* Reports are written to `.story_kit/coverage/` (excluded from git)
|
||||||
|
|
||||||
|
### TypeScript / React
|
||||||
|
* **Style:** Biome formatter (replaces Prettier/ESLint).
|
||||||
|
* **Linter:** Biome - Must pass with 0 errors, 0 warnings before merging.
|
||||||
|
* **Types:** Shared types with Rust (via `tauri-specta` or manual interface matching) are preferred to ensure type safety across the bridge.
|
||||||
|
* **Testing:** Vitest for unit/component tests; Playwright for end-to-end tests.
|
||||||
|
* **Quality Gates:**
|
||||||
|
* `npx @biomejs/biome check src/` must show 0 errors, 0 warnings
|
||||||
|
* `npm run build` must succeed
|
||||||
|
* `npm test` must pass
|
||||||
|
* `npm run test:e2e` must pass
|
||||||
|
* No `any` types allowed (use proper types or `unknown`)
|
||||||
|
* React keys must use stable IDs, not array indices
|
||||||
|
* All buttons must have explicit `type` attribute
|
||||||
|
|
||||||
|
## Libraries (Approved)
|
||||||
|
* **Rust:**
|
||||||
|
* `serde`, `serde_json`: Serialization.
|
||||||
|
* `ignore`: Fast recursive directory iteration respecting gitignore.
|
||||||
|
* `walkdir`: Simple directory traversal.
|
||||||
|
* `tokio`: Async runtime.
|
||||||
|
* `reqwest`: For LLM API calls (Anthropic, Ollama).
|
||||||
|
* `eventsource-stream`: For Server-Sent Events (Anthropic streaming).
|
||||||
|
* `uuid`: For unique message IDs.
|
||||||
|
* `chrono`: For timestamps.
|
||||||
|
* `poem`: HTTP server framework.
|
||||||
|
* `poem-openapi`: OpenAPI (Swagger) for non-streaming HTTP APIs.
|
||||||
|
* **JavaScript:**
|
||||||
|
* `react-markdown`: For rendering chat responses.
|
||||||
|
* `vitest`: Unit/component testing.
|
||||||
|
* `playwright`: End-to-end testing.
|
||||||
|
|
||||||
|
## Running the App (Worktrees & Ports)
|
||||||
|
|
||||||
|
Multiple instances can run simultaneously in different worktrees. To avoid port conflicts:
|
||||||
|
|
||||||
|
- **Backend:** Set `STORKIT_PORT` to a unique port (default is 3001). Example: `STORKIT_PORT=3002 cargo run`
|
||||||
|
- **Frontend:** Run `npm run dev` from `frontend/`. It auto-selects the next unused port. It reads `STORKIT_PORT` to know which backend to talk to, so export it before running: `export STORKIT_PORT=3002 && cd frontend && npm run dev`
|
||||||
|
|
||||||
|
When running in a worktree, use a port that won't conflict with the main instance (3001). Ports 3002+ are good choices.
|
||||||
|
|
||||||
|
## Safety & Sandbox
|
||||||
|
1. **Project Scope:** The application must strictly enforce that it does not read/write outside the `project_root` selected by the user.
|
||||||
|
2. **Human in the Loop:**
|
||||||
|
* Shell commands that modify state (non-readonly) should ideally require a UI confirmation (configurable).
|
||||||
|
* File writes must be confirmed or revertible.
|
||||||
1
.story_kit_port
Normal file
1
.story_kit_port
Normal file
@@ -0,0 +1 @@
|
|||||||
|
3001
|
||||||
3
CLAUDE.md
Normal file
3
CLAUDE.md
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
Never chain shell commands with `&&`, `||`, or `;` in a single Bash call. The permission system validates the entire command string, and chained commands won't match allow rules like `Bash(git *)`. Use separate Bash calls instead — parallel calls work fine.
|
||||||
|
|
||||||
|
Read .storkit/README.md to see our dev process.
|
||||||
5694
Cargo.lock
generated
Normal file
5694
Cargo.lock
generated
Normal file
File diff suppressed because it is too large
Load Diff
40
Cargo.toml
Normal file
40
Cargo.toml
Normal file
@@ -0,0 +1,40 @@
|
|||||||
|
[workspace]
|
||||||
|
members = ["server"]
|
||||||
|
resolver = "3"
|
||||||
|
|
||||||
|
[workspace.dependencies]
|
||||||
|
async-stream = "0.3"
|
||||||
|
async-trait = "0.1.89"
|
||||||
|
bytes = "1"
|
||||||
|
chrono = { version = "0.4.44", features = ["serde"] }
|
||||||
|
eventsource-stream = "0.2.3"
|
||||||
|
futures = "0.3"
|
||||||
|
homedir = "0.3.6"
|
||||||
|
ignore = "0.4.25"
|
||||||
|
mime_guess = "2"
|
||||||
|
notify = "8.2.0"
|
||||||
|
poem = { version = "3", features = ["websocket", "test"] }
|
||||||
|
poem-openapi = { version = "5", features = ["swagger-ui"] }
|
||||||
|
portable-pty = "0.9.0"
|
||||||
|
reqwest = { version = "0.13.2", features = ["json", "stream"] }
|
||||||
|
rust-embed = "8"
|
||||||
|
serde = { version = "1", features = ["derive"] }
|
||||||
|
serde_json = "1"
|
||||||
|
serde_urlencoded = "0.7"
|
||||||
|
serde_yaml = "0.9"
|
||||||
|
strip-ansi-escapes = "0.2"
|
||||||
|
tempfile = "3"
|
||||||
|
tokio = { version = "1", features = ["rt-multi-thread", "macros", "sync"] }
|
||||||
|
toml = "1.0.7"
|
||||||
|
uuid = { version = "1.22.0", features = ["v4", "serde"] }
|
||||||
|
tokio-tungstenite = "0.29.0"
|
||||||
|
walkdir = "2.5.0"
|
||||||
|
filetime = "0.2"
|
||||||
|
matrix-sdk = { version = "0.16.0", default-features = false, features = [
|
||||||
|
"rustls-tls",
|
||||||
|
"sqlite",
|
||||||
|
"e2e-encryption",
|
||||||
|
] }
|
||||||
|
pulldown-cmark = { version = "0.13.1", default-features = false, features = [
|
||||||
|
"html",
|
||||||
|
] }
|
||||||
182
README.md
Normal file
182
README.md
Normal file
@@ -0,0 +1,182 @@
|
|||||||
|
# Story Kit
|
||||||
|
|
||||||
|
This app runs as a single Rust web server binary that serves the Vite/React frontend and exposes APIs.
|
||||||
|
The frontend lives in the `frontend/` directory.
|
||||||
|
|
||||||
|
You can also run the frontend and backend separately in development (Vite dev server + Rust API).
|
||||||
|
|
||||||
|
## Running it in development
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Build the frontend
|
||||||
|
cd frontend
|
||||||
|
npm install
|
||||||
|
npm run dev
|
||||||
|
|
||||||
|
# In another terminal - run the server (serves embedded frontend/dist/)
|
||||||
|
cargo run
|
||||||
|
```
|
||||||
|
|
||||||
|
## Production
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Build the release binary (also builds the frontend via build.rs)
|
||||||
|
cargo build --release
|
||||||
|
|
||||||
|
# Run the server (serves embedded frontend/dist/)
|
||||||
|
./target/release/storkit
|
||||||
|
```
|
||||||
|
|
||||||
|
## Cross-Platform Distribution
|
||||||
|
|
||||||
|
Story Kit ships as a **single self-contained binary** with the React frontend embedded via
|
||||||
|
`rust-embed`. No Rust toolchain, Node.js, or extra libraries are required on the target machine.
|
||||||
|
|
||||||
|
### macOS
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Native build – no extra tools required beyond Rust + npm
|
||||||
|
make build-macos
|
||||||
|
# Output: target/release/storkit
|
||||||
|
|
||||||
|
# Verify only system frameworks are linked (Security.framework, libSystem.B.dylib, etc.)
|
||||||
|
otool -L target/release/storkit
|
||||||
|
```
|
||||||
|
|
||||||
|
### Linux (static x86_64, zero dynamic deps)
|
||||||
|
|
||||||
|
The Linux build uses the `x86_64-unknown-linux-musl` target to produce a fully static binary.
|
||||||
|
|
||||||
|
**Prerequisites:**
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Install cross – a Rust cross-compilation tool backed by Docker
|
||||||
|
cargo install cross
|
||||||
|
|
||||||
|
# Ensure Docker Desktop (or Docker Engine) is running
|
||||||
|
```
|
||||||
|
|
||||||
|
**Build:**
|
||||||
|
|
||||||
|
```bash
|
||||||
|
make build-linux
|
||||||
|
# Output: target/x86_64-unknown-linux-musl/release/storkit
|
||||||
|
|
||||||
|
# Verify the binary is statically linked
|
||||||
|
file target/x86_64-unknown-linux-musl/release/storkit
|
||||||
|
# Expected: ELF 64-bit LSB executable, x86-64, statically linked
|
||||||
|
|
||||||
|
ldd target/x86_64-unknown-linux-musl/release/storkit
|
||||||
|
# Expected: not a dynamic executable
|
||||||
|
```
|
||||||
|
|
||||||
|
**Running on any Linux x86_64 machine:**
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# No Rust, Node, glibc, or any other library needed – just copy and run
|
||||||
|
./storkit
|
||||||
|
```
|
||||||
|
|
||||||
|
## Releasing
|
||||||
|
|
||||||
|
Builds both macOS and Linux binaries locally, tags the repo, and publishes a Gitea release with a changelog.
|
||||||
|
|
||||||
|
**One-time setup:**
|
||||||
|
|
||||||
|
1. Create a Gitea API token at `https://code.crashlabs.io/user/settings/applications` (needs repository read/write)
|
||||||
|
2. Add it to `.env` (gitignored): `GITEA_TOKEN=your_token`
|
||||||
|
3. Ensure `cross` is installed (`cargo install cross`) and Docker is running
|
||||||
|
|
||||||
|
**To release:**
|
||||||
|
|
||||||
|
```bash
|
||||||
|
make release V=0.2.0
|
||||||
|
```
|
||||||
|
|
||||||
|
This will:
|
||||||
|
- Build macOS arm64 (native) and Linux amd64 (static musl via cross/Docker)
|
||||||
|
- Generate a changelog from commits since the last tag
|
||||||
|
- Tag the repo as `v0.2.0` and push the tag
|
||||||
|
- Create a Gitea release with both binaries and the changelog attached
|
||||||
|
|
||||||
|
## Testing
|
||||||
|
|
||||||
|
### Frontend Tests
|
||||||
|
|
||||||
|
The frontend uses **Vitest** for unit tests and **Playwright** for end-to-end tests.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cd frontend
|
||||||
|
|
||||||
|
# Run unit tests
|
||||||
|
npm test
|
||||||
|
|
||||||
|
# Run end-to-end tests
|
||||||
|
npm run test:e2e
|
||||||
|
```
|
||||||
|
|
||||||
|
### Backend Tests
|
||||||
|
|
||||||
|
This project uses **nextest** for running tests and **cargo-llvm-cov** for code coverage.
|
||||||
|
|
||||||
|
### Install Tools
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cargo install cargo-nextest cargo-llvm-cov
|
||||||
|
```
|
||||||
|
|
||||||
|
### Run Tests
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Run all tests
|
||||||
|
cargo nextest run
|
||||||
|
|
||||||
|
# Run specific module
|
||||||
|
cargo nextest run search_files
|
||||||
|
|
||||||
|
# Run with verbose output
|
||||||
|
cargo nextest run --no-capture
|
||||||
|
```
|
||||||
|
|
||||||
|
### Generate Coverage
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# HTML report (opens in browser)
|
||||||
|
cargo llvm-cov nextest --html --open
|
||||||
|
|
||||||
|
# Terminal output
|
||||||
|
cargo llvm-cov nextest
|
||||||
|
|
||||||
|
# LCOV format (for CI)
|
||||||
|
cargo llvm-cov nextest --lcov --output-path lcov.info
|
||||||
|
|
||||||
|
# Clean coverage data
|
||||||
|
cargo llvm-cov clean
|
||||||
|
```
|
||||||
|
|
||||||
|
### Configuration
|
||||||
|
|
||||||
|
- **Nextest config**: `.config/nextest.toml`
|
||||||
|
- **Coverage output**: `target/llvm-cov/html/index.html`
|
||||||
|
|
||||||
|
## Current Coverage (search_files module)
|
||||||
|
|
||||||
|
```
|
||||||
|
Module: commands/search.rs
|
||||||
|
├── Region Coverage: 75.36%
|
||||||
|
├── Function Coverage: 69.05%
|
||||||
|
└── Line Coverage: 72.55%
|
||||||
|
```
|
||||||
|
|
||||||
|
### Available Test Profiles
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Development (default)
|
||||||
|
cargo nextest run
|
||||||
|
|
||||||
|
# CI with retries
|
||||||
|
cargo nextest run --profile ci
|
||||||
|
|
||||||
|
# Coverage optimized
|
||||||
|
cargo nextest run --profile coverage
|
||||||
|
```
|
||||||
34
biome.json
Normal file
34
biome.json
Normal file
@@ -0,0 +1,34 @@
|
|||||||
|
{
|
||||||
|
"$schema": "https://biomejs.dev/schemas/2.3.15/schema.json",
|
||||||
|
"vcs": {
|
||||||
|
"enabled": true,
|
||||||
|
"clientKind": "git",
|
||||||
|
"useIgnoreFile": true
|
||||||
|
},
|
||||||
|
"files": {
|
||||||
|
"includes": ["frontend/**"]
|
||||||
|
},
|
||||||
|
"formatter": {
|
||||||
|
"enabled": true,
|
||||||
|
"indentStyle": "tab"
|
||||||
|
},
|
||||||
|
"linter": {
|
||||||
|
"enabled": true,
|
||||||
|
"rules": {
|
||||||
|
"recommended": true
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"javascript": {
|
||||||
|
"formatter": {
|
||||||
|
"quoteStyle": "double"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"assist": {
|
||||||
|
"enabled": true,
|
||||||
|
"actions": {
|
||||||
|
"source": {
|
||||||
|
"organizeImports": "on"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
11
docker/.dockerignore
Normal file
11
docker/.dockerignore
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
# Docker build context exclusions
|
||||||
|
**/target/
|
||||||
|
**/node_modules/
|
||||||
|
frontend/dist/
|
||||||
|
.storkit/worktrees/
|
||||||
|
.storkit/logs/
|
||||||
|
.storkit/work/6_archived/
|
||||||
|
.git/
|
||||||
|
*.swp
|
||||||
|
*.swo
|
||||||
|
.DS_Store
|
||||||
132
docker/Dockerfile
Normal file
132
docker/Dockerfile
Normal file
@@ -0,0 +1,132 @@
|
|||||||
|
# Story Kit – single-container runtime
|
||||||
|
# All components (server, agents, web UI) run inside this container.
|
||||||
|
# The target project repo is bind-mounted at /workspace.
|
||||||
|
#
|
||||||
|
# Build: docker build -t storkit -f docker/Dockerfile .
|
||||||
|
# Run: docker compose -f docker/docker-compose.yml up
|
||||||
|
#
|
||||||
|
# Tested with: OrbStack (recommended on macOS), Docker Desktop (slower bind mounts)
|
||||||
|
|
||||||
|
FROM rust:1.90-bookworm AS base
|
||||||
|
|
||||||
|
# Clippy is needed at runtime for acceptance gates (cargo clippy)
|
||||||
|
RUN rustup component add clippy
|
||||||
|
|
||||||
|
# ── System deps ──────────────────────────────────────────────────────
|
||||||
|
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||||
|
git \
|
||||||
|
curl \
|
||||||
|
ca-certificates \
|
||||||
|
build-essential \
|
||||||
|
pkg-config \
|
||||||
|
libssl-dev \
|
||||||
|
# cargo-nextest is a pre-built binary
|
||||||
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
|
# ── Node.js 22.x (matches host) ─────────────────────────────────────
|
||||||
|
RUN curl -fsSL https://deb.nodesource.com/setup_22.x | bash - \
|
||||||
|
&& apt-get install -y --no-install-recommends nodejs \
|
||||||
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
|
# ── cargo-nextest (test runner) ──────────────────────────────────────
|
||||||
|
RUN curl -LsSf https://get.nexte.st/latest/linux | tar zxf - -C /usr/local/bin
|
||||||
|
|
||||||
|
# ── Claude Code CLI ──────────────────────────────────────────────────
|
||||||
|
# Claude Code is distributed as an npm global package.
|
||||||
|
# The CLI binary is `claude`.
|
||||||
|
RUN npm install -g @anthropic-ai/claude-code
|
||||||
|
|
||||||
|
# ── Working directory ────────────────────────────────────────────────
|
||||||
|
# /app holds the storkit source (copied in at build time for the binary).
|
||||||
|
# /workspace is where the target project repo gets bind-mounted at runtime.
|
||||||
|
WORKDIR /app
|
||||||
|
|
||||||
|
# ── Build the storkit server binary ─────────────────────────────────
|
||||||
|
# Copy the full project tree so `cargo build` and `npm run build` (via
|
||||||
|
# build.rs) can produce the release binary with embedded frontend assets.
|
||||||
|
COPY . .
|
||||||
|
|
||||||
|
# Build frontend deps first (better layer caching)
|
||||||
|
RUN cd frontend && npm ci
|
||||||
|
|
||||||
|
# Build the release binary (build.rs runs npm run build for the frontend)
|
||||||
|
RUN cargo build --release \
|
||||||
|
&& cp target/release/storkit /usr/local/bin/storkit
|
||||||
|
|
||||||
|
# ── Runtime stage (smaller image) ───────────────────────────────────
|
||||||
|
FROM debian:bookworm-slim AS runtime
|
||||||
|
|
||||||
|
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||||
|
git \
|
||||||
|
curl \
|
||||||
|
ca-certificates \
|
||||||
|
libssl3 \
|
||||||
|
# build-essential (gcc/cc) needed at runtime for:
|
||||||
|
# - rebuild_and_restart (cargo build --release)
|
||||||
|
# - agent-driven cargo commands (clippy, test, build)
|
||||||
|
build-essential \
|
||||||
|
pkg-config \
|
||||||
|
libssl-dev \
|
||||||
|
# procps provides ps, needed by tests and process management
|
||||||
|
procps \
|
||||||
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
|
# Node.js in runtime
|
||||||
|
RUN curl -fsSL https://deb.nodesource.com/setup_22.x | bash - \
|
||||||
|
&& apt-get install -y --no-install-recommends nodejs \
|
||||||
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
|
# Claude Code CLI in runtime
|
||||||
|
RUN npm install -g @anthropic-ai/claude-code
|
||||||
|
|
||||||
|
# Cargo and Rust toolchain needed at runtime for:
|
||||||
|
# - rebuild_and_restart (cargo build inside the container)
|
||||||
|
# - Agent-driven cargo commands (cargo clippy, cargo test, etc.)
|
||||||
|
COPY --from=base /usr/local/cargo /usr/local/cargo
|
||||||
|
COPY --from=base /usr/local/rustup /usr/local/rustup
|
||||||
|
ENV PATH="/usr/local/cargo/bin:${PATH}"
|
||||||
|
ENV RUSTUP_HOME="/usr/local/rustup"
|
||||||
|
ENV CARGO_HOME="/usr/local/cargo"
|
||||||
|
|
||||||
|
# cargo-nextest
|
||||||
|
COPY --from=base /usr/local/bin/cargo-nextest /usr/local/bin/cargo-nextest
|
||||||
|
|
||||||
|
# The storkit binary
|
||||||
|
COPY --from=base /usr/local/bin/storkit /usr/local/bin/storkit
|
||||||
|
|
||||||
|
# Copy the full source tree so rebuild_and_restart can do `cargo build`
|
||||||
|
# from the workspace root (CARGO_MANIFEST_DIR is baked into the binary).
|
||||||
|
# Alternative: mount the source as a volume.
|
||||||
|
COPY --from=base /app /app
|
||||||
|
|
||||||
|
# ── Non-root user ────────────────────────────────────────────────────
|
||||||
|
# Claude Code refuses --dangerously-skip-permissions (bypassPermissions)
|
||||||
|
# when running as root. Create a dedicated user so agents can launch.
|
||||||
|
RUN groupadd -r storkit \
|
||||||
|
&& useradd -r -g storkit -m -d /home/storkit storkit \
|
||||||
|
&& mkdir -p /home/storkit/.claude \
|
||||||
|
&& chown -R storkit:storkit /home/storkit \
|
||||||
|
&& chown -R storkit:storkit /usr/local/cargo /usr/local/rustup \
|
||||||
|
&& chown -R storkit:storkit /app \
|
||||||
|
&& mkdir -p /workspace/target /app/target \
|
||||||
|
&& chown storkit:storkit /workspace/target /app/target
|
||||||
|
|
||||||
|
# ── Entrypoint ───────────────────────────────────────────────────────
|
||||||
|
# Validates required env vars (GIT_USER_NAME, GIT_USER_EMAIL) and
|
||||||
|
# configures git identity before starting the server.
|
||||||
|
COPY docker/entrypoint.sh /usr/local/bin/entrypoint.sh
|
||||||
|
|
||||||
|
USER storkit
|
||||||
|
WORKDIR /workspace
|
||||||
|
|
||||||
|
# ── Ports ────────────────────────────────────────────────────────────
|
||||||
|
# Web UI + MCP server
|
||||||
|
EXPOSE 3001
|
||||||
|
|
||||||
|
# ── Volumes (defined in docker-compose.yml) ──────────────────────────
|
||||||
|
# /workspace – bind mount: target project repo
|
||||||
|
# /home/storkit/.claude – named volume: Claude Code sessions/state
|
||||||
|
# /usr/local/cargo/registry – named volume: cargo dependency cache
|
||||||
|
|
||||||
|
ENTRYPOINT ["entrypoint.sh"]
|
||||||
|
CMD ["storkit", "/workspace"]
|
||||||
118
docker/docker-compose.yml
Normal file
118
docker/docker-compose.yml
Normal file
@@ -0,0 +1,118 @@
|
|||||||
|
# Story Kit – single-container deployment
|
||||||
|
#
|
||||||
|
# Usage:
|
||||||
|
# # Set your API key and project path, then:
|
||||||
|
# ANTHROPIC_API_KEY=sk-ant-... PROJECT_PATH=/path/to/your/repo \
|
||||||
|
# docker compose -f docker/docker-compose.yml up
|
||||||
|
#
|
||||||
|
# OrbStack users: just install OrbStack and use `docker compose` normally.
|
||||||
|
# OrbStack's VirtioFS bind mount driver is significantly faster than
|
||||||
|
# Docker Desktop's default (see spike findings).
|
||||||
|
|
||||||
|
services:
|
||||||
|
storkit:
|
||||||
|
build:
|
||||||
|
context: ..
|
||||||
|
dockerfile: docker/Dockerfile
|
||||||
|
container_name: storkit
|
||||||
|
ports:
|
||||||
|
# Bind to localhost only — not exposed on all interfaces.
|
||||||
|
- "127.0.0.1:3001:3001"
|
||||||
|
environment:
|
||||||
|
# Optional: Anthropic API key. If unset, Claude Code falls back to
|
||||||
|
# OAuth credentials from `claude login` (e.g. Max subscription).
|
||||||
|
- ANTHROPIC_API_KEY=${ANTHROPIC_API_KEY:-}
|
||||||
|
# Required: git identity for agent commits
|
||||||
|
- GIT_USER_NAME=${GIT_USER_NAME:?Set GIT_USER_NAME}
|
||||||
|
- GIT_USER_EMAIL=${GIT_USER_EMAIL:?Set GIT_USER_EMAIL}
|
||||||
|
# Optional: override the server port (default 3001)
|
||||||
|
- STORKIT_PORT=3001
|
||||||
|
# Optional: Matrix bot credentials (if using Matrix integration)
|
||||||
|
- MATRIX_HOMESERVER=${MATRIX_HOMESERVER:-}
|
||||||
|
- MATRIX_USER=${MATRIX_USER:-}
|
||||||
|
- MATRIX_PASSWORD=${MATRIX_PASSWORD:-}
|
||||||
|
# Optional: Slack webhook (if using Slack integration)
|
||||||
|
- SLACK_BOT_TOKEN=${SLACK_BOT_TOKEN:-}
|
||||||
|
- SLACK_APP_TOKEN=${SLACK_APP_TOKEN:-}
|
||||||
|
volumes:
|
||||||
|
# The target project repo – bind-mounted from host.
|
||||||
|
# Changes made by agents inside the container are immediately
|
||||||
|
# visible on the host (and vice versa).
|
||||||
|
- ${PROJECT_PATH:?Set PROJECT_PATH}:/workspace
|
||||||
|
|
||||||
|
# Cargo registry cache – persists downloaded crates across
|
||||||
|
# container restarts so `cargo build` doesn't re-download.
|
||||||
|
- cargo-registry:/usr/local/cargo/registry
|
||||||
|
|
||||||
|
# Cargo git checkouts – persists git-based dependencies.
|
||||||
|
- cargo-git:/usr/local/cargo/git
|
||||||
|
|
||||||
|
# Claude Code state – persists session history, projects config,
|
||||||
|
# and conversation transcripts so --resume works across restarts.
|
||||||
|
- claude-state:/home/storkit/.claude
|
||||||
|
|
||||||
|
# Storkit source tree for rebuild_and_restart.
|
||||||
|
# The binary has CARGO_MANIFEST_DIR baked in at compile time
|
||||||
|
# pointing to /app/server, so the source must be at /app.
|
||||||
|
# This is COPY'd in the Dockerfile; mounting over it allows
|
||||||
|
# live source updates without rebuilding the image.
|
||||||
|
# Mount host source so rebuild_and_restart picks up live changes:
|
||||||
|
- ./..:/app
|
||||||
|
|
||||||
|
# Keep cargo build artifacts off the bind mount.
|
||||||
|
# Bind-mount directory traversal is ~23x slower than Docker volumes
|
||||||
|
# (confirmed in spike 329). Cargo stat-checks every file in target/
|
||||||
|
# on incremental builds — leaving it on the bind mount makes builds
|
||||||
|
# catastrophically slow (~12s just to traverse the tree).
|
||||||
|
- workspace-target:/workspace/target
|
||||||
|
- storkit-target:/app/target
|
||||||
|
|
||||||
|
# ── Security hardening ──────────────────────────────────────────
|
||||||
|
# Read-only root filesystem. Only explicitly mounted volumes and
|
||||||
|
# tmpfs paths are writable.
|
||||||
|
read_only: true
|
||||||
|
tmpfs:
|
||||||
|
- /tmp:size=512M,exec
|
||||||
|
- /home/storkit:size=512M,uid=999,gid=999,exec
|
||||||
|
|
||||||
|
# Drop all Linux capabilities, then add back only what's needed.
|
||||||
|
# SETUID/SETGID needed by Claude Code's PTY allocation (openpty).
|
||||||
|
cap_drop:
|
||||||
|
- ALL
|
||||||
|
cap_add:
|
||||||
|
- SETUID
|
||||||
|
- SETGID
|
||||||
|
|
||||||
|
# Prevent child processes from gaining new privileges via setuid,
|
||||||
|
# setgid, or other mechanisms.
|
||||||
|
security_opt:
|
||||||
|
- no-new-privileges:true
|
||||||
|
|
||||||
|
# Resource limits – cap the whole system.
|
||||||
|
# Adjust based on your machine. These are conservative defaults.
|
||||||
|
deploy:
|
||||||
|
resources:
|
||||||
|
limits:
|
||||||
|
cpus: "8"
|
||||||
|
memory: 24G
|
||||||
|
reservations:
|
||||||
|
cpus: "2"
|
||||||
|
memory: 4G
|
||||||
|
|
||||||
|
# Health check – verify the MCP endpoint responds
|
||||||
|
healthcheck:
|
||||||
|
test: ["CMD", "curl", "-sf", "http://localhost:3001/health"]
|
||||||
|
interval: 30s
|
||||||
|
timeout: 5s
|
||||||
|
retries: 3
|
||||||
|
start_period: 10s
|
||||||
|
|
||||||
|
# Restart policy – restart on crash but not on manual stop
|
||||||
|
restart: unless-stopped
|
||||||
|
|
||||||
|
volumes:
|
||||||
|
cargo-registry:
|
||||||
|
cargo-git:
|
||||||
|
claude-state:
|
||||||
|
workspace-target:
|
||||||
|
storkit-target:
|
||||||
24
docker/entrypoint.sh
Executable file
24
docker/entrypoint.sh
Executable file
@@ -0,0 +1,24 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
set -e
|
||||||
|
|
||||||
|
# ── Git identity ─────────────────────────────────────────────────────
|
||||||
|
# Agents commit code inside the container. Without a git identity,
|
||||||
|
# commits fail or use garbage defaults. Fail loudly at startup so the
|
||||||
|
# operator knows immediately.
|
||||||
|
if [ -z "$GIT_USER_NAME" ]; then
|
||||||
|
echo "FATAL: GIT_USER_NAME is not set. Export it in your environment or docker-compose.yml." >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
if [ -z "$GIT_USER_EMAIL" ]; then
|
||||||
|
echo "FATAL: GIT_USER_EMAIL is not set. Export it in your environment or docker-compose.yml." >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Use GIT_AUTHOR/COMMITTER env vars instead of git config --global,
|
||||||
|
# so the root filesystem can stay read-only (no ~/.gitconfig write).
|
||||||
|
export GIT_AUTHOR_NAME="$GIT_USER_NAME"
|
||||||
|
export GIT_COMMITTER_NAME="$GIT_USER_NAME"
|
||||||
|
export GIT_AUTHOR_EMAIL="$GIT_USER_EMAIL"
|
||||||
|
export GIT_COMMITTER_EMAIL="$GIT_USER_EMAIL"
|
||||||
|
|
||||||
|
exec "$@"
|
||||||
14
frontend/.gitignore
vendored
Normal file
14
frontend/.gitignore
vendored
Normal file
@@ -0,0 +1,14 @@
|
|||||||
|
# Logs
|
||||||
|
logs
|
||||||
|
*.log
|
||||||
|
npm-debug.log*
|
||||||
|
yarn-debug.log*
|
||||||
|
yarn-error.log*
|
||||||
|
pnpm-debug.log*
|
||||||
|
lerna-debug.log*
|
||||||
|
|
||||||
|
node_modules
|
||||||
|
dist
|
||||||
|
dist-ssr
|
||||||
|
coverage
|
||||||
|
*.local
|
||||||
14
frontend/index.html
Normal file
14
frontend/index.html
Normal file
@@ -0,0 +1,14 @@
|
|||||||
|
<!doctype html>
|
||||||
|
<html lang="en">
|
||||||
|
<head>
|
||||||
|
<meta charset="UTF-8" />
|
||||||
|
<link rel="icon" type="image/svg+xml" href="/vite.svg" />
|
||||||
|
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
|
||||||
|
<title>Storkit</title>
|
||||||
|
</head>
|
||||||
|
|
||||||
|
<body>
|
||||||
|
<div id="root"></div>
|
||||||
|
<script type="module" src="/src/main.tsx"></script>
|
||||||
|
</body>
|
||||||
|
</html>
|
||||||
8408
frontend/package-lock.json
generated
Normal file
8408
frontend/package-lock.json
generated
Normal file
File diff suppressed because it is too large
Load Diff
41
frontend/package.json
Normal file
41
frontend/package.json
Normal file
@@ -0,0 +1,41 @@
|
|||||||
|
{
|
||||||
|
"name": "living-spec-standalone",
|
||||||
|
"private": true,
|
||||||
|
"version": "0.4.1",
|
||||||
|
"type": "module",
|
||||||
|
"scripts": {
|
||||||
|
"dev": "vite",
|
||||||
|
"build": "tsc && vite build",
|
||||||
|
"preview": "vite preview",
|
||||||
|
"server": "cargo run --manifest-path server/Cargo.toml",
|
||||||
|
"test": "vitest run",
|
||||||
|
"test:unit": "vitest run",
|
||||||
|
"test:e2e": "playwright test",
|
||||||
|
"test:coverage": "vitest run --coverage"
|
||||||
|
},
|
||||||
|
"dependencies": {
|
||||||
|
"@types/react-syntax-highlighter": "^15.5.13",
|
||||||
|
"react": "^19.1.0",
|
||||||
|
"react-dom": "^19.1.0",
|
||||||
|
"react-markdown": "^10.1.0",
|
||||||
|
"react-syntax-highlighter": "^16.1.0"
|
||||||
|
},
|
||||||
|
"devDependencies": {
|
||||||
|
"@biomejs/biome": "^2.4.2",
|
||||||
|
"@playwright/test": "^1.47.2",
|
||||||
|
"@testing-library/jest-dom": "^6.0.0",
|
||||||
|
"@testing-library/react": "^16.0.0",
|
||||||
|
"@testing-library/user-event": "^14.4.3",
|
||||||
|
"@types/node": "^25.0.0",
|
||||||
|
"@types/react": "^19.1.8",
|
||||||
|
"@types/react-dom": "^19.1.6",
|
||||||
|
"@vitejs/plugin-react": "^4.6.0",
|
||||||
|
"@vitest/coverage-v8": "^2.1.9",
|
||||||
|
"jest": "^29.0.0",
|
||||||
|
"jsdom": "^28.1.0",
|
||||||
|
"ts-jest": "^29.0.0",
|
||||||
|
"typescript": "~5.8.3",
|
||||||
|
"vite": "^5.4.21",
|
||||||
|
"vitest": "^2.1.4"
|
||||||
|
}
|
||||||
|
}
|
||||||
27
frontend/playwright.config.ts
Normal file
27
frontend/playwright.config.ts
Normal file
@@ -0,0 +1,27 @@
|
|||||||
|
import { defineConfig } from "@playwright/test";
|
||||||
|
import { dirname, resolve } from "node:path";
|
||||||
|
import { fileURLToPath } from "node:url";
|
||||||
|
|
||||||
|
const configDir = dirname(fileURLToPath(new URL(import.meta.url)));
|
||||||
|
const frontendRoot = resolve(configDir, ".");
|
||||||
|
|
||||||
|
export default defineConfig({
|
||||||
|
testDir: "./tests/e2e",
|
||||||
|
fullyParallel: true,
|
||||||
|
timeout: 30_000,
|
||||||
|
expect: {
|
||||||
|
timeout: 5_000,
|
||||||
|
},
|
||||||
|
use: {
|
||||||
|
baseURL: "http://127.0.0.1:41700",
|
||||||
|
trace: "on-first-retry",
|
||||||
|
},
|
||||||
|
webServer: {
|
||||||
|
command:
|
||||||
|
"pnpm exec vite --config vite.config.ts --host 127.0.0.1 --port 41700 --strictPort",
|
||||||
|
url: "http://127.0.0.1:41700/@vite/client",
|
||||||
|
reuseExistingServer: true,
|
||||||
|
timeout: 120_000,
|
||||||
|
cwd: frontendRoot,
|
||||||
|
},
|
||||||
|
});
|
||||||
1
frontend/public/vite.svg
Normal file
1
frontend/public/vite.svg
Normal file
@@ -0,0 +1 @@
|
|||||||
|
<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" class="iconify iconify--logos" width="31.88" height="32" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 257"><defs><linearGradient id="IconifyId1813088fe1fbc01fb466" x1="-.828%" x2="57.636%" y1="7.652%" y2="78.411%"><stop offset="0%" stop-color="#41D1FF"></stop><stop offset="100%" stop-color="#BD34FE"></stop></linearGradient><linearGradient id="IconifyId1813088fe1fbc01fb467" x1="43.376%" x2="50.316%" y1="2.242%" y2="89.03%"><stop offset="0%" stop-color="#FFEA83"></stop><stop offset="8.333%" stop-color="#FFDD35"></stop><stop offset="100%" stop-color="#FFA800"></stop></linearGradient></defs><path fill="url(#IconifyId1813088fe1fbc01fb466)" d="M255.153 37.938L134.897 252.976c-2.483 4.44-8.862 4.466-11.382.048L.875 37.958c-2.746-4.814 1.371-10.646 6.827-9.67l120.385 21.517a6.537 6.537 0 0 0 2.322-.004l117.867-21.483c5.438-.991 9.574 4.796 6.877 9.62Z"></path><path fill="url(#IconifyId1813088fe1fbc01fb467)" d="M185.432.063L96.44 17.501a3.268 3.268 0 0 0-2.634 3.014l-5.474 92.456a3.268 3.268 0 0 0 3.997 3.378l24.777-5.718c2.318-.535 4.413 1.507 3.936 3.838l-7.361 36.047c-.495 2.426 1.782 4.5 4.151 3.78l15.304-4.649c2.372-.72 4.652 1.36 4.15 3.788l-11.698 56.621c-.732 3.542 3.979 5.473 5.943 2.437l1.313-2.028l72.516-144.72c1.215-2.423-.88-5.186-3.54-4.672l-25.505 4.922c-2.396.462-4.435-1.77-3.759-4.114l16.646-57.705c.677-2.35-1.37-4.583-3.769-4.113Z"></path></svg>
|
||||||
|
After Width: | Height: | Size: 1.5 KiB |
238
frontend/src/App.css
Normal file
238
frontend/src/App.css
Normal file
@@ -0,0 +1,238 @@
|
|||||||
|
.logo.vite:hover {
|
||||||
|
filter: drop-shadow(0 0 2em #747bff);
|
||||||
|
}
|
||||||
|
|
||||||
|
.logo.react:hover {
|
||||||
|
filter: drop-shadow(0 0 2em #61dafb);
|
||||||
|
}
|
||||||
|
:root {
|
||||||
|
font-family: Inter, Avenir, Helvetica, Arial, sans-serif;
|
||||||
|
font-size: 16px;
|
||||||
|
line-height: 24px;
|
||||||
|
font-weight: 400;
|
||||||
|
|
||||||
|
color: #0f0f0f;
|
||||||
|
background-color: #f6f6f6;
|
||||||
|
|
||||||
|
font-synthesis: none;
|
||||||
|
text-rendering: optimizeLegibility;
|
||||||
|
-webkit-font-smoothing: antialiased;
|
||||||
|
-moz-osx-font-smoothing: grayscale;
|
||||||
|
-webkit-text-size-adjust: 100%;
|
||||||
|
}
|
||||||
|
|
||||||
|
.container {
|
||||||
|
margin: 0;
|
||||||
|
padding-top: 0;
|
||||||
|
height: 100vh;
|
||||||
|
overflow: hidden;
|
||||||
|
box-sizing: border-box;
|
||||||
|
display: flex;
|
||||||
|
flex-direction: column;
|
||||||
|
justify-content: center;
|
||||||
|
}
|
||||||
|
|
||||||
|
.logo {
|
||||||
|
height: 6em;
|
||||||
|
padding: 1.5em;
|
||||||
|
will-change: filter;
|
||||||
|
transition: 0.75s;
|
||||||
|
}
|
||||||
|
|
||||||
|
.logo.tauri:hover {
|
||||||
|
filter: drop-shadow(0 0 2em #24c8db);
|
||||||
|
}
|
||||||
|
|
||||||
|
.row {
|
||||||
|
display: flex;
|
||||||
|
justify-content: center;
|
||||||
|
}
|
||||||
|
|
||||||
|
a {
|
||||||
|
font-weight: 500;
|
||||||
|
color: #646cff;
|
||||||
|
text-decoration: inherit;
|
||||||
|
}
|
||||||
|
|
||||||
|
a:hover {
|
||||||
|
color: #535bf2;
|
||||||
|
}
|
||||||
|
|
||||||
|
h1 {
|
||||||
|
text-align: center;
|
||||||
|
}
|
||||||
|
|
||||||
|
input,
|
||||||
|
button {
|
||||||
|
border-radius: 8px;
|
||||||
|
border: 1px solid transparent;
|
||||||
|
padding: 0.6em 1.2em;
|
||||||
|
font-size: 1em;
|
||||||
|
font-weight: 500;
|
||||||
|
font-family: inherit;
|
||||||
|
color: #0f0f0f;
|
||||||
|
background-color: #ffffff;
|
||||||
|
transition: border-color 0.25s;
|
||||||
|
box-shadow: 0 2px 2px rgba(0, 0, 0, 0.2);
|
||||||
|
}
|
||||||
|
|
||||||
|
button {
|
||||||
|
cursor: pointer;
|
||||||
|
}
|
||||||
|
|
||||||
|
button:hover {
|
||||||
|
border-color: #396cd8;
|
||||||
|
}
|
||||||
|
button:active {
|
||||||
|
border-color: #396cd8;
|
||||||
|
background-color: #e8e8e8;
|
||||||
|
}
|
||||||
|
|
||||||
|
input,
|
||||||
|
button {
|
||||||
|
outline: none;
|
||||||
|
}
|
||||||
|
|
||||||
|
#greet-input {
|
||||||
|
margin-right: 5px;
|
||||||
|
}
|
||||||
|
|
||||||
|
@media (prefers-color-scheme: dark) {
|
||||||
|
:root {
|
||||||
|
color: #f6f6f6;
|
||||||
|
background-color: #2f2f2f;
|
||||||
|
}
|
||||||
|
|
||||||
|
a:hover {
|
||||||
|
color: #24c8db;
|
||||||
|
}
|
||||||
|
|
||||||
|
input,
|
||||||
|
button {
|
||||||
|
color: #ffffff;
|
||||||
|
background-color: #0f0f0f98;
|
||||||
|
}
|
||||||
|
button:active {
|
||||||
|
background-color: #0f0f0f69;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Collapsible tool output styling */
|
||||||
|
details summary {
|
||||||
|
cursor: pointer;
|
||||||
|
user-select: none;
|
||||||
|
}
|
||||||
|
|
||||||
|
details summary::-webkit-details-marker {
|
||||||
|
display: none;
|
||||||
|
}
|
||||||
|
|
||||||
|
details[open] summary span:first-child {
|
||||||
|
transform: rotate(90deg);
|
||||||
|
display: inline-block;
|
||||||
|
transition: transform 0.2s ease;
|
||||||
|
}
|
||||||
|
|
||||||
|
details summary span:first-child {
|
||||||
|
transition: transform 0.2s ease;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Markdown body styling for dark theme */
|
||||||
|
.markdown-body {
|
||||||
|
color: #ececec;
|
||||||
|
text-align: left;
|
||||||
|
}
|
||||||
|
|
||||||
|
.markdown-body code {
|
||||||
|
background: #2f2f2f;
|
||||||
|
padding: 2px 6px;
|
||||||
|
border-radius: 3px;
|
||||||
|
font-family: monospace;
|
||||||
|
}
|
||||||
|
|
||||||
|
.markdown-body pre {
|
||||||
|
background: #1a1a1a;
|
||||||
|
padding: 12px;
|
||||||
|
border-radius: 6px;
|
||||||
|
overflow-x: auto;
|
||||||
|
text-align: left;
|
||||||
|
}
|
||||||
|
|
||||||
|
.markdown-body pre code {
|
||||||
|
background: transparent;
|
||||||
|
padding: 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Syntax highlighter styling */
|
||||||
|
.markdown-body div[class*="language-"] {
|
||||||
|
margin: 0;
|
||||||
|
border-radius: 6px;
|
||||||
|
text-align: left;
|
||||||
|
}
|
||||||
|
|
||||||
|
.markdown-body pre[class*="language-"] {
|
||||||
|
margin: 0;
|
||||||
|
padding: 12px;
|
||||||
|
background: #1a1a1a;
|
||||||
|
text-align: left;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Hide scroll bars globally while maintaining scroll functionality */
|
||||||
|
/* Firefox */
|
||||||
|
* {
|
||||||
|
scrollbar-width: none;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Chrome, Safari, Edge */
|
||||||
|
*::-webkit-scrollbar {
|
||||||
|
display: none;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Ensure scroll functionality is maintained */
|
||||||
|
html,
|
||||||
|
body,
|
||||||
|
#root {
|
||||||
|
height: 100%;
|
||||||
|
margin: 0;
|
||||||
|
overflow: hidden;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Agent activity indicator pulse */
|
||||||
|
@keyframes pulse {
|
||||||
|
0%,
|
||||||
|
100% {
|
||||||
|
opacity: 1;
|
||||||
|
transform: scale(1);
|
||||||
|
}
|
||||||
|
50% {
|
||||||
|
opacity: 0.4;
|
||||||
|
transform: scale(0.85);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Agent lozenge appearance animation (simulates arriving from agents panel) */
|
||||||
|
@keyframes agentAppear {
|
||||||
|
from {
|
||||||
|
opacity: 0;
|
||||||
|
transform: translateY(-4px) scale(0.95);
|
||||||
|
}
|
||||||
|
to {
|
||||||
|
opacity: 1;
|
||||||
|
transform: translateY(0) scale(1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Thinking/loading pulse for text */
|
||||||
|
.pulse {
|
||||||
|
animation: pulse 1.5s infinite;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Agent entry fade-out for completed/failed agents */
|
||||||
|
@keyframes agentFadeOut {
|
||||||
|
from {
|
||||||
|
opacity: 1;
|
||||||
|
}
|
||||||
|
to {
|
||||||
|
opacity: 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
366
frontend/src/App.test.tsx
Normal file
366
frontend/src/App.test.tsx
Normal file
@@ -0,0 +1,366 @@
|
|||||||
|
import { fireEvent, render, screen, waitFor } from "@testing-library/react";
|
||||||
|
import userEvent from "@testing-library/user-event";
|
||||||
|
import { beforeEach, describe, expect, it, vi } from "vitest";
|
||||||
|
import { api } from "./api/client";
|
||||||
|
|
||||||
|
vi.mock("./api/client", () => {
|
||||||
|
const api = {
|
||||||
|
getCurrentProject: vi.fn(),
|
||||||
|
getKnownProjects: vi.fn(),
|
||||||
|
getHomeDirectory: vi.fn(),
|
||||||
|
openProject: vi.fn(),
|
||||||
|
closeProject: vi.fn(),
|
||||||
|
forgetKnownProject: vi.fn(),
|
||||||
|
listDirectoryAbsolute: vi.fn(),
|
||||||
|
getOllamaModels: vi.fn(),
|
||||||
|
getAnthropicApiKeyExists: vi.fn(),
|
||||||
|
getAnthropicModels: vi.fn(),
|
||||||
|
getModelPreference: vi.fn(),
|
||||||
|
setModelPreference: vi.fn(),
|
||||||
|
cancelChat: vi.fn(),
|
||||||
|
setAnthropicApiKey: vi.fn(),
|
||||||
|
};
|
||||||
|
class ChatWebSocket {
|
||||||
|
connect() {}
|
||||||
|
close() {}
|
||||||
|
sendChat() {}
|
||||||
|
cancel() {}
|
||||||
|
}
|
||||||
|
return { api, ChatWebSocket };
|
||||||
|
});
|
||||||
|
|
||||||
|
vi.mock("./api/workflow", () => {
|
||||||
|
return {
|
||||||
|
workflowApi: {
|
||||||
|
getAcceptance: vi.fn().mockResolvedValue({
|
||||||
|
can_accept: false,
|
||||||
|
reasons: [],
|
||||||
|
warning: null,
|
||||||
|
summary: { total: 0, passed: 0, failed: 0 },
|
||||||
|
missing_categories: [],
|
||||||
|
}),
|
||||||
|
getReviewQueueAll: vi.fn().mockResolvedValue({ stories: [] }),
|
||||||
|
getUpcomingStories: vi.fn().mockResolvedValue({ stories: [] }),
|
||||||
|
recordTests: vi.fn(),
|
||||||
|
ensureAcceptance: vi.fn(),
|
||||||
|
getReviewQueue: vi.fn(),
|
||||||
|
collectCoverage: vi.fn(),
|
||||||
|
recordCoverage: vi.fn(),
|
||||||
|
getStoryTodos: vi.fn().mockResolvedValue({ stories: [] }),
|
||||||
|
},
|
||||||
|
};
|
||||||
|
});
|
||||||
|
|
||||||
|
const mockedApi = vi.mocked(api);
|
||||||
|
|
||||||
|
describe("App", () => {
|
||||||
|
beforeEach(() => {
|
||||||
|
vi.resetModules();
|
||||||
|
vi.clearAllMocks();
|
||||||
|
mockedApi.getCurrentProject.mockResolvedValue(null);
|
||||||
|
mockedApi.getKnownProjects.mockResolvedValue([]);
|
||||||
|
mockedApi.getHomeDirectory.mockResolvedValue("/home/user");
|
||||||
|
mockedApi.listDirectoryAbsolute.mockResolvedValue([]);
|
||||||
|
mockedApi.getOllamaModels.mockResolvedValue([]);
|
||||||
|
mockedApi.getAnthropicApiKeyExists.mockResolvedValue(false);
|
||||||
|
mockedApi.getAnthropicModels.mockResolvedValue([]);
|
||||||
|
mockedApi.getModelPreference.mockResolvedValue(null);
|
||||||
|
});
|
||||||
|
|
||||||
|
async function renderApp() {
|
||||||
|
const { default: App } = await import("./App");
|
||||||
|
return render(<App />);
|
||||||
|
}
|
||||||
|
|
||||||
|
it("calls getCurrentProject() on mount", async () => {
|
||||||
|
await renderApp();
|
||||||
|
|
||||||
|
await waitFor(() => {
|
||||||
|
expect(mockedApi.getCurrentProject).toHaveBeenCalledTimes(1);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
it("skips selection screen and shows workspace when server already has a project open", async () => {
|
||||||
|
mockedApi.getCurrentProject.mockResolvedValue("/home/user/myproject");
|
||||||
|
|
||||||
|
await renderApp();
|
||||||
|
|
||||||
|
await waitFor(() => {
|
||||||
|
expect(
|
||||||
|
screen.queryByPlaceholderText(/\/path\/to\/project/i),
|
||||||
|
).not.toBeInTheDocument();
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
it("renders the selection screen when no project is open", async () => {
|
||||||
|
await renderApp();
|
||||||
|
|
||||||
|
await waitFor(() => {
|
||||||
|
expect(
|
||||||
|
screen.getByPlaceholderText(/\/path\/to\/project/i),
|
||||||
|
).toBeInTheDocument();
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
it("populates path input with home directory", async () => {
|
||||||
|
mockedApi.getHomeDirectory.mockResolvedValue("/Users/dave");
|
||||||
|
|
||||||
|
await renderApp();
|
||||||
|
|
||||||
|
await waitFor(() => {
|
||||||
|
const input = screen.getByPlaceholderText(
|
||||||
|
/\/path\/to\/project/i,
|
||||||
|
) as HTMLInputElement;
|
||||||
|
expect(input.value).toBe("/Users/dave/");
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
it("opens project and shows chat view", async () => {
|
||||||
|
mockedApi.openProject.mockResolvedValue("/home/user/myproject");
|
||||||
|
|
||||||
|
await renderApp();
|
||||||
|
|
||||||
|
await waitFor(() => {
|
||||||
|
expect(
|
||||||
|
screen.getByPlaceholderText(/\/path\/to\/project/i),
|
||||||
|
).toBeInTheDocument();
|
||||||
|
});
|
||||||
|
|
||||||
|
const input = screen.getByPlaceholderText(
|
||||||
|
/\/path\/to\/project/i,
|
||||||
|
) as HTMLInputElement;
|
||||||
|
await userEvent.clear(input);
|
||||||
|
await userEvent.type(input, "/home/user/myproject");
|
||||||
|
|
||||||
|
const openButton = screen.getByRole("button", { name: /open project/i });
|
||||||
|
await userEvent.click(openButton);
|
||||||
|
|
||||||
|
await waitFor(() => {
|
||||||
|
expect(mockedApi.openProject).toHaveBeenCalledWith(
|
||||||
|
"/home/user/myproject",
|
||||||
|
);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
it("shows error when openProject fails", async () => {
|
||||||
|
mockedApi.openProject.mockRejectedValue(new Error("Path does not exist"));
|
||||||
|
|
||||||
|
await renderApp();
|
||||||
|
|
||||||
|
await waitFor(() => {
|
||||||
|
expect(
|
||||||
|
screen.getByPlaceholderText(/\/path\/to\/project/i),
|
||||||
|
).toBeInTheDocument();
|
||||||
|
});
|
||||||
|
|
||||||
|
const input = screen.getByPlaceholderText(
|
||||||
|
/\/path\/to\/project/i,
|
||||||
|
) as HTMLInputElement;
|
||||||
|
await userEvent.clear(input);
|
||||||
|
await userEvent.type(input, "/bad/path");
|
||||||
|
|
||||||
|
const openButton = screen.getByRole("button", { name: /open project/i });
|
||||||
|
await userEvent.click(openButton);
|
||||||
|
|
||||||
|
await waitFor(() => {
|
||||||
|
expect(screen.getByText(/Path does not exist/)).toBeInTheDocument();
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
it("shows known projects list", async () => {
|
||||||
|
mockedApi.getKnownProjects.mockResolvedValue([
|
||||||
|
"/home/user/project1",
|
||||||
|
"/home/user/project2",
|
||||||
|
]);
|
||||||
|
|
||||||
|
await renderApp();
|
||||||
|
|
||||||
|
await waitFor(() => {
|
||||||
|
expect(screen.getByTitle("/home/user/project1")).toBeInTheDocument();
|
||||||
|
expect(screen.getByTitle("/home/user/project2")).toBeInTheDocument();
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
it("shows error when path input is empty", async () => {
|
||||||
|
await renderApp();
|
||||||
|
|
||||||
|
await waitFor(() => {
|
||||||
|
expect(
|
||||||
|
screen.getByPlaceholderText(/\/path\/to\/project/i),
|
||||||
|
).toBeInTheDocument();
|
||||||
|
});
|
||||||
|
|
||||||
|
const input = screen.getByPlaceholderText(
|
||||||
|
/\/path\/to\/project/i,
|
||||||
|
) as HTMLInputElement;
|
||||||
|
await userEvent.clear(input);
|
||||||
|
|
||||||
|
const openButton = screen.getByRole("button", { name: /open project/i });
|
||||||
|
await userEvent.click(openButton);
|
||||||
|
|
||||||
|
await waitFor(() => {
|
||||||
|
expect(
|
||||||
|
screen.getByText(/Please enter a project path/i),
|
||||||
|
).toBeInTheDocument();
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
it("calls forgetKnownProject and removes project from list", async () => {
|
||||||
|
mockedApi.getKnownProjects.mockResolvedValue(["/home/user/project1"]);
|
||||||
|
mockedApi.forgetKnownProject.mockResolvedValue(true);
|
||||||
|
|
||||||
|
await renderApp();
|
||||||
|
|
||||||
|
await waitFor(() => {
|
||||||
|
expect(screen.getByTitle("/home/user/project1")).toBeInTheDocument();
|
||||||
|
});
|
||||||
|
|
||||||
|
const forgetButton = screen.getByRole("button", {
|
||||||
|
name: /Forget project1/i,
|
||||||
|
});
|
||||||
|
await userEvent.click(forgetButton);
|
||||||
|
|
||||||
|
await waitFor(() => {
|
||||||
|
expect(mockedApi.forgetKnownProject).toHaveBeenCalledWith(
|
||||||
|
"/home/user/project1",
|
||||||
|
);
|
||||||
|
expect(
|
||||||
|
screen.queryByTitle("/home/user/project1"),
|
||||||
|
).not.toBeInTheDocument();
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
it("closes project and returns to selection screen", async () => {
|
||||||
|
mockedApi.openProject.mockResolvedValue("/home/user/myproject");
|
||||||
|
mockedApi.closeProject.mockResolvedValue(true);
|
||||||
|
|
||||||
|
await renderApp();
|
||||||
|
|
||||||
|
await waitFor(() => {
|
||||||
|
expect(
|
||||||
|
screen.getByPlaceholderText(/\/path\/to\/project/i),
|
||||||
|
).toBeInTheDocument();
|
||||||
|
});
|
||||||
|
|
||||||
|
const input = screen.getByPlaceholderText(
|
||||||
|
/\/path\/to\/project/i,
|
||||||
|
) as HTMLInputElement;
|
||||||
|
await userEvent.clear(input);
|
||||||
|
await userEvent.type(input, "/home/user/myproject");
|
||||||
|
|
||||||
|
const openButton = screen.getByRole("button", { name: /open project/i });
|
||||||
|
await userEvent.click(openButton);
|
||||||
|
|
||||||
|
await waitFor(() => {
|
||||||
|
expect(mockedApi.openProject).toHaveBeenCalledWith(
|
||||||
|
"/home/user/myproject",
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
|
// Chat view should appear with close button
|
||||||
|
const closeButton = await waitFor(() => screen.getByText("✕"));
|
||||||
|
await userEvent.click(closeButton);
|
||||||
|
|
||||||
|
await waitFor(() => {
|
||||||
|
expect(mockedApi.closeProject).toHaveBeenCalled();
|
||||||
|
expect(
|
||||||
|
screen.getByPlaceholderText(/\/path\/to\/project/i),
|
||||||
|
).toBeInTheDocument();
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
it("handles ArrowDown and ArrowUp keyboard navigation when suggestions are visible", async () => {
|
||||||
|
mockedApi.listDirectoryAbsolute.mockResolvedValue([
|
||||||
|
{ name: "projects", kind: "dir" },
|
||||||
|
{ name: "documents", kind: "dir" },
|
||||||
|
]);
|
||||||
|
|
||||||
|
await renderApp();
|
||||||
|
|
||||||
|
// Wait for suggestions to appear after debounce
|
||||||
|
await waitFor(
|
||||||
|
() => {
|
||||||
|
expect(screen.getByText(/projects\//)).toBeInTheDocument();
|
||||||
|
},
|
||||||
|
{ timeout: 2000 },
|
||||||
|
);
|
||||||
|
|
||||||
|
const input = screen.getByPlaceholderText(/\/path\/to\/project/i);
|
||||||
|
|
||||||
|
// ArrowDown with matchList present — moves selection forward
|
||||||
|
fireEvent.keyDown(input, { key: "ArrowDown" });
|
||||||
|
|
||||||
|
// ArrowUp with matchList present — moves selection backward
|
||||||
|
fireEvent.keyDown(input, { key: "ArrowUp" });
|
||||||
|
});
|
||||||
|
|
||||||
|
it("handles Tab keyboard navigation to accept suggestion", async () => {
|
||||||
|
mockedApi.listDirectoryAbsolute.mockResolvedValue([
|
||||||
|
{ name: "myrepo", kind: "dir" },
|
||||||
|
]);
|
||||||
|
|
||||||
|
await renderApp();
|
||||||
|
|
||||||
|
await waitFor(
|
||||||
|
() => {
|
||||||
|
expect(screen.getByText(/myrepo\//)).toBeInTheDocument();
|
||||||
|
},
|
||||||
|
{ timeout: 2000 },
|
||||||
|
);
|
||||||
|
|
||||||
|
const input = screen.getByPlaceholderText(/\/path\/to\/project/i);
|
||||||
|
|
||||||
|
// Tab with matchList present — accepts the selected match
|
||||||
|
fireEvent.keyDown(input, { key: "Tab" });
|
||||||
|
});
|
||||||
|
|
||||||
|
it("handles Escape key to close suggestions", async () => {
|
||||||
|
mockedApi.listDirectoryAbsolute.mockResolvedValue([
|
||||||
|
{ name: "workspace", kind: "dir" },
|
||||||
|
]);
|
||||||
|
|
||||||
|
await renderApp();
|
||||||
|
|
||||||
|
await waitFor(
|
||||||
|
() => {
|
||||||
|
expect(screen.getByText(/workspace\//)).toBeInTheDocument();
|
||||||
|
},
|
||||||
|
{ timeout: 2000 },
|
||||||
|
);
|
||||||
|
|
||||||
|
const input = screen.getByPlaceholderText(/\/path\/to\/project/i);
|
||||||
|
|
||||||
|
// Escape closes suggestions
|
||||||
|
fireEvent.keyDown(input, { key: "Escape" });
|
||||||
|
|
||||||
|
await waitFor(() => {
|
||||||
|
expect(screen.queryByText(/workspace\//)).not.toBeInTheDocument();
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
it("handles Enter key to trigger project open", async () => {
|
||||||
|
mockedApi.openProject.mockResolvedValue("/home/user/myproject");
|
||||||
|
|
||||||
|
await renderApp();
|
||||||
|
|
||||||
|
await waitFor(() => {
|
||||||
|
expect(
|
||||||
|
screen.getByPlaceholderText(/\/path\/to\/project/i),
|
||||||
|
).toBeInTheDocument();
|
||||||
|
});
|
||||||
|
|
||||||
|
const input = screen.getByPlaceholderText(
|
||||||
|
/\/path\/to\/project/i,
|
||||||
|
) as HTMLInputElement;
|
||||||
|
await userEvent.clear(input);
|
||||||
|
await userEvent.type(input, "/home/user/myproject");
|
||||||
|
|
||||||
|
fireEvent.keyDown(input, { key: "Enter" });
|
||||||
|
|
||||||
|
await waitFor(() => {
|
||||||
|
expect(mockedApi.openProject).toHaveBeenCalledWith(
|
||||||
|
"/home/user/myproject",
|
||||||
|
);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
});
|
||||||
201
frontend/src/App.tsx
Normal file
201
frontend/src/App.tsx
Normal file
@@ -0,0 +1,201 @@
|
|||||||
|
import * as React from "react";
|
||||||
|
import { api } from "./api/client";
|
||||||
|
import { Chat } from "./components/Chat";
|
||||||
|
import { SelectionScreen } from "./components/selection/SelectionScreen";
|
||||||
|
import { usePathCompletion } from "./components/selection/usePathCompletion";
|
||||||
|
import "./App.css";
|
||||||
|
|
||||||
|
function App() {
|
||||||
|
const [projectPath, setProjectPath] = React.useState<string | null>(null);
|
||||||
|
const [_view, setView] = React.useState<"chat" | "token-usage">("chat");
|
||||||
|
const [isCheckingProject, setIsCheckingProject] = React.useState(true);
|
||||||
|
const [errorMsg, setErrorMsg] = React.useState<string | null>(null);
|
||||||
|
const [pathInput, setPathInput] = React.useState("");
|
||||||
|
const [isOpening, setIsOpening] = React.useState(false);
|
||||||
|
const [knownProjects, setKnownProjects] = React.useState<string[]>([]);
|
||||||
|
const [homeDir, setHomeDir] = React.useState<string | null>(null);
|
||||||
|
|
||||||
|
React.useEffect(() => {
|
||||||
|
api
|
||||||
|
.getCurrentProject()
|
||||||
|
.then((path) => {
|
||||||
|
if (path) {
|
||||||
|
setProjectPath(path);
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.catch((error) => console.error(error))
|
||||||
|
.finally(() => {
|
||||||
|
setIsCheckingProject(false);
|
||||||
|
});
|
||||||
|
}, []);
|
||||||
|
|
||||||
|
React.useEffect(() => {
|
||||||
|
api
|
||||||
|
.getKnownProjects()
|
||||||
|
.then((projects) => setKnownProjects(projects))
|
||||||
|
.catch((error) => console.error(error));
|
||||||
|
}, []);
|
||||||
|
|
||||||
|
React.useEffect(() => {
|
||||||
|
let active = true;
|
||||||
|
api
|
||||||
|
.getHomeDirectory()
|
||||||
|
.then((home) => {
|
||||||
|
if (!active) return;
|
||||||
|
setHomeDir(home);
|
||||||
|
setPathInput((current) => {
|
||||||
|
if (current.trim()) {
|
||||||
|
return current;
|
||||||
|
}
|
||||||
|
const initial = home.endsWith("/") ? home : `${home}/`;
|
||||||
|
return initial;
|
||||||
|
});
|
||||||
|
})
|
||||||
|
.catch((error) => {
|
||||||
|
console.error(error);
|
||||||
|
});
|
||||||
|
|
||||||
|
return () => {
|
||||||
|
active = false;
|
||||||
|
};
|
||||||
|
}, []);
|
||||||
|
|
||||||
|
const {
|
||||||
|
matchList,
|
||||||
|
selectedMatch,
|
||||||
|
suggestionTail,
|
||||||
|
completionError,
|
||||||
|
currentPartial,
|
||||||
|
setSelectedMatch,
|
||||||
|
acceptSelectedMatch,
|
||||||
|
acceptMatch,
|
||||||
|
closeSuggestions,
|
||||||
|
} = usePathCompletion({
|
||||||
|
pathInput,
|
||||||
|
setPathInput,
|
||||||
|
homeDir,
|
||||||
|
listDirectoryAbsolute: api.listDirectoryAbsolute,
|
||||||
|
});
|
||||||
|
|
||||||
|
async function openProject(path: string) {
|
||||||
|
const trimmedPath = path.trim();
|
||||||
|
if (!trimmedPath) {
|
||||||
|
setErrorMsg("Please enter a project path.");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
setErrorMsg(null);
|
||||||
|
setIsOpening(true);
|
||||||
|
const confirmedPath = await api.openProject(trimmedPath);
|
||||||
|
setProjectPath(confirmedPath);
|
||||||
|
} catch (e) {
|
||||||
|
console.error(e);
|
||||||
|
const message =
|
||||||
|
e instanceof Error
|
||||||
|
? e.message
|
||||||
|
: typeof e === "string"
|
||||||
|
? e
|
||||||
|
: "An error occurred opening the project.";
|
||||||
|
|
||||||
|
setErrorMsg(message);
|
||||||
|
} finally {
|
||||||
|
setIsOpening(false);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
function handleOpen() {
|
||||||
|
void openProject(pathInput);
|
||||||
|
}
|
||||||
|
|
||||||
|
async function handleForgetProject(path: string) {
|
||||||
|
try {
|
||||||
|
await api.forgetKnownProject(path);
|
||||||
|
setKnownProjects((prev) => prev.filter((p) => p !== path));
|
||||||
|
} catch (error) {
|
||||||
|
console.error(error);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async function closeProject() {
|
||||||
|
try {
|
||||||
|
await api.closeProject();
|
||||||
|
setProjectPath(null);
|
||||||
|
setView("chat");
|
||||||
|
} catch (e) {
|
||||||
|
console.error(e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
function handlePathInputKeyDown(
|
||||||
|
event: React.KeyboardEvent<HTMLInputElement>,
|
||||||
|
) {
|
||||||
|
if (event.key === "ArrowDown") {
|
||||||
|
if (matchList.length > 0) {
|
||||||
|
event.preventDefault();
|
||||||
|
setSelectedMatch((selectedMatch + 1) % matchList.length);
|
||||||
|
}
|
||||||
|
} else if (event.key === "ArrowUp") {
|
||||||
|
if (matchList.length > 0) {
|
||||||
|
event.preventDefault();
|
||||||
|
setSelectedMatch(
|
||||||
|
(selectedMatch - 1 + matchList.length) % matchList.length,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
} else if (event.key === "Tab") {
|
||||||
|
if (matchList.length > 0) {
|
||||||
|
event.preventDefault();
|
||||||
|
acceptSelectedMatch();
|
||||||
|
}
|
||||||
|
} else if (event.key === "Escape") {
|
||||||
|
event.preventDefault();
|
||||||
|
closeSuggestions();
|
||||||
|
} else if (event.key === "Enter") {
|
||||||
|
handleOpen();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (isCheckingProject) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
return (
|
||||||
|
<main
|
||||||
|
className="container"
|
||||||
|
style={{ height: "100vh", padding: 0, maxWidth: "100%" }}
|
||||||
|
>
|
||||||
|
{!projectPath ? (
|
||||||
|
<SelectionScreen
|
||||||
|
knownProjects={knownProjects}
|
||||||
|
onOpenProject={openProject}
|
||||||
|
onForgetProject={handleForgetProject}
|
||||||
|
pathInput={pathInput}
|
||||||
|
homeDir={homeDir}
|
||||||
|
onPathInputChange={setPathInput}
|
||||||
|
onPathInputKeyDown={handlePathInputKeyDown}
|
||||||
|
isOpening={isOpening}
|
||||||
|
suggestionTail={suggestionTail}
|
||||||
|
matchList={matchList}
|
||||||
|
selectedMatch={selectedMatch}
|
||||||
|
onSelectMatch={setSelectedMatch}
|
||||||
|
onAcceptMatch={acceptMatch}
|
||||||
|
onCloseSuggestions={closeSuggestions}
|
||||||
|
completionError={completionError}
|
||||||
|
currentPartial={currentPartial}
|
||||||
|
/>
|
||||||
|
) : (
|
||||||
|
<div className="workspace" style={{ height: "100%" }}>
|
||||||
|
<Chat projectPath={projectPath} onCloseProject={closeProject} />
|
||||||
|
</div>
|
||||||
|
)}
|
||||||
|
|
||||||
|
{errorMsg && (
|
||||||
|
<div className="error-message" style={{ marginTop: "20px" }}>
|
||||||
|
<p style={{ color: "red" }}>Error: {errorMsg}</p>
|
||||||
|
</div>
|
||||||
|
)}
|
||||||
|
</main>
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
export default App;
|
||||||
387
frontend/src/api/agents.test.ts
Normal file
387
frontend/src/api/agents.test.ts
Normal file
@@ -0,0 +1,387 @@
|
|||||||
|
import { afterEach, beforeEach, describe, expect, it, vi } from "vitest";
|
||||||
|
import type { AgentConfigInfo, AgentEvent, AgentInfo } from "./agents";
|
||||||
|
import { agentsApi, subscribeAgentStream } from "./agents";
|
||||||
|
|
||||||
|
const mockFetch = vi.fn();
|
||||||
|
|
||||||
|
beforeEach(() => {
|
||||||
|
vi.stubGlobal("fetch", mockFetch);
|
||||||
|
});
|
||||||
|
|
||||||
|
afterEach(() => {
|
||||||
|
vi.restoreAllMocks();
|
||||||
|
});
|
||||||
|
|
||||||
|
function okResponse(body: unknown) {
|
||||||
|
return new Response(JSON.stringify(body), {
|
||||||
|
status: 200,
|
||||||
|
headers: { "Content-Type": "application/json" },
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
function errorResponse(status: number, text: string) {
|
||||||
|
return new Response(text, { status });
|
||||||
|
}
|
||||||
|
|
||||||
|
const sampleAgent: AgentInfo = {
|
||||||
|
story_id: "42_story_test",
|
||||||
|
agent_name: "coder",
|
||||||
|
status: "running",
|
||||||
|
session_id: null,
|
||||||
|
worktree_path: null,
|
||||||
|
base_branch: null,
|
||||||
|
log_session_id: null,
|
||||||
|
};
|
||||||
|
|
||||||
|
const sampleConfig: AgentConfigInfo = {
|
||||||
|
name: "coder",
|
||||||
|
role: "engineer",
|
||||||
|
stage: "coder",
|
||||||
|
model: "claude-sonnet-4-6",
|
||||||
|
allowed_tools: null,
|
||||||
|
max_turns: null,
|
||||||
|
max_budget_usd: null,
|
||||||
|
};
|
||||||
|
|
||||||
|
// ── agentsApi ────────────────────────────────────────────────────────────────
|
||||||
|
|
||||||
|
describe("agentsApi", () => {
|
||||||
|
describe("startAgent", () => {
|
||||||
|
it("sends POST to /agents/start with story_id", async () => {
|
||||||
|
mockFetch.mockResolvedValueOnce(okResponse(sampleAgent));
|
||||||
|
|
||||||
|
const result = await agentsApi.startAgent("42_story_test");
|
||||||
|
|
||||||
|
expect(mockFetch).toHaveBeenCalledWith(
|
||||||
|
"/api/agents/start",
|
||||||
|
expect.objectContaining({
|
||||||
|
method: "POST",
|
||||||
|
body: JSON.stringify({
|
||||||
|
story_id: "42_story_test",
|
||||||
|
agent_name: undefined,
|
||||||
|
}),
|
||||||
|
}),
|
||||||
|
);
|
||||||
|
expect(result).toEqual(sampleAgent);
|
||||||
|
});
|
||||||
|
|
||||||
|
it("sends POST with optional agent_name", async () => {
|
||||||
|
mockFetch.mockResolvedValueOnce(okResponse(sampleAgent));
|
||||||
|
|
||||||
|
await agentsApi.startAgent("42_story_test", "coder");
|
||||||
|
|
||||||
|
expect(mockFetch).toHaveBeenCalledWith(
|
||||||
|
"/api/agents/start",
|
||||||
|
expect.objectContaining({
|
||||||
|
body: JSON.stringify({
|
||||||
|
story_id: "42_story_test",
|
||||||
|
agent_name: "coder",
|
||||||
|
}),
|
||||||
|
}),
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
|
it("uses custom baseUrl when provided", async () => {
|
||||||
|
mockFetch.mockResolvedValueOnce(okResponse(sampleAgent));
|
||||||
|
|
||||||
|
await agentsApi.startAgent(
|
||||||
|
"42_story_test",
|
||||||
|
undefined,
|
||||||
|
"http://localhost:3002/api",
|
||||||
|
);
|
||||||
|
|
||||||
|
expect(mockFetch).toHaveBeenCalledWith(
|
||||||
|
"http://localhost:3002/api/agents/start",
|
||||||
|
expect.objectContaining({ method: "POST" }),
|
||||||
|
);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe("stopAgent", () => {
|
||||||
|
it("sends POST to /agents/stop with story_id and agent_name", async () => {
|
||||||
|
mockFetch.mockResolvedValueOnce(okResponse(true));
|
||||||
|
|
||||||
|
const result = await agentsApi.stopAgent("42_story_test", "coder");
|
||||||
|
|
||||||
|
expect(mockFetch).toHaveBeenCalledWith(
|
||||||
|
"/api/agents/stop",
|
||||||
|
expect.objectContaining({
|
||||||
|
method: "POST",
|
||||||
|
body: JSON.stringify({
|
||||||
|
story_id: "42_story_test",
|
||||||
|
agent_name: "coder",
|
||||||
|
}),
|
||||||
|
}),
|
||||||
|
);
|
||||||
|
expect(result).toBe(true);
|
||||||
|
});
|
||||||
|
|
||||||
|
it("uses custom baseUrl when provided", async () => {
|
||||||
|
mockFetch.mockResolvedValueOnce(okResponse(false));
|
||||||
|
|
||||||
|
await agentsApi.stopAgent(
|
||||||
|
"42_story_test",
|
||||||
|
"coder",
|
||||||
|
"http://localhost:3002/api",
|
||||||
|
);
|
||||||
|
|
||||||
|
expect(mockFetch).toHaveBeenCalledWith(
|
||||||
|
"http://localhost:3002/api/agents/stop",
|
||||||
|
expect.objectContaining({ method: "POST" }),
|
||||||
|
);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe("listAgents", () => {
|
||||||
|
it("sends GET to /agents and returns agent list", async () => {
|
||||||
|
mockFetch.mockResolvedValueOnce(okResponse([sampleAgent]));
|
||||||
|
|
||||||
|
const result = await agentsApi.listAgents();
|
||||||
|
|
||||||
|
expect(mockFetch).toHaveBeenCalledWith(
|
||||||
|
"/api/agents",
|
||||||
|
expect.objectContaining({}),
|
||||||
|
);
|
||||||
|
expect(result).toEqual([sampleAgent]);
|
||||||
|
});
|
||||||
|
|
||||||
|
it("returns empty array when no agents running", async () => {
|
||||||
|
mockFetch.mockResolvedValueOnce(okResponse([]));
|
||||||
|
|
||||||
|
const result = await agentsApi.listAgents();
|
||||||
|
expect(result).toEqual([]);
|
||||||
|
});
|
||||||
|
|
||||||
|
it("uses custom baseUrl when provided", async () => {
|
||||||
|
mockFetch.mockResolvedValueOnce(okResponse([]));
|
||||||
|
|
||||||
|
await agentsApi.listAgents("http://localhost:3002/api");
|
||||||
|
|
||||||
|
expect(mockFetch).toHaveBeenCalledWith(
|
||||||
|
"http://localhost:3002/api/agents",
|
||||||
|
expect.objectContaining({}),
|
||||||
|
);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe("getAgentConfig", () => {
|
||||||
|
it("sends GET to /agents/config and returns config list", async () => {
|
||||||
|
mockFetch.mockResolvedValueOnce(okResponse([sampleConfig]));
|
||||||
|
|
||||||
|
const result = await agentsApi.getAgentConfig();
|
||||||
|
|
||||||
|
expect(mockFetch).toHaveBeenCalledWith(
|
||||||
|
"/api/agents/config",
|
||||||
|
expect.objectContaining({}),
|
||||||
|
);
|
||||||
|
expect(result).toEqual([sampleConfig]);
|
||||||
|
});
|
||||||
|
|
||||||
|
it("uses custom baseUrl when provided", async () => {
|
||||||
|
mockFetch.mockResolvedValueOnce(okResponse([sampleConfig]));
|
||||||
|
|
||||||
|
await agentsApi.getAgentConfig("http://localhost:3002/api");
|
||||||
|
|
||||||
|
expect(mockFetch).toHaveBeenCalledWith(
|
||||||
|
"http://localhost:3002/api/agents/config",
|
||||||
|
expect.objectContaining({}),
|
||||||
|
);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe("reloadConfig", () => {
|
||||||
|
it("sends POST to /agents/config/reload", async () => {
|
||||||
|
mockFetch.mockResolvedValueOnce(okResponse([sampleConfig]));
|
||||||
|
|
||||||
|
const result = await agentsApi.reloadConfig();
|
||||||
|
|
||||||
|
expect(mockFetch).toHaveBeenCalledWith(
|
||||||
|
"/api/agents/config/reload",
|
||||||
|
expect.objectContaining({ method: "POST" }),
|
||||||
|
);
|
||||||
|
expect(result).toEqual([sampleConfig]);
|
||||||
|
});
|
||||||
|
|
||||||
|
it("uses custom baseUrl when provided", async () => {
|
||||||
|
mockFetch.mockResolvedValueOnce(okResponse([]));
|
||||||
|
|
||||||
|
await agentsApi.reloadConfig("http://localhost:3002/api");
|
||||||
|
|
||||||
|
expect(mockFetch).toHaveBeenCalledWith(
|
||||||
|
"http://localhost:3002/api/agents/config/reload",
|
||||||
|
expect.objectContaining({ method: "POST" }),
|
||||||
|
);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe("error handling", () => {
|
||||||
|
it("throws on non-ok response with body text", async () => {
|
||||||
|
mockFetch.mockResolvedValueOnce(errorResponse(404, "agent not found"));
|
||||||
|
|
||||||
|
await expect(agentsApi.listAgents()).rejects.toThrow("agent not found");
|
||||||
|
});
|
||||||
|
|
||||||
|
it("throws with status code when no body", async () => {
|
||||||
|
mockFetch.mockResolvedValueOnce(errorResponse(500, ""));
|
||||||
|
|
||||||
|
await expect(agentsApi.listAgents()).rejects.toThrow(
|
||||||
|
"Request failed (500)",
|
||||||
|
);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
// ── subscribeAgentStream ─────────────────────────────────────────────────────
|
||||||
|
|
||||||
|
interface MockESInstance {
|
||||||
|
url: string;
|
||||||
|
onmessage: ((e: { data: string }) => void) | null;
|
||||||
|
onerror: ((e: Event) => void) | null;
|
||||||
|
close: ReturnType<typeof vi.fn>;
|
||||||
|
simulateMessage: (data: unknown) => void;
|
||||||
|
simulateError: (e: Event) => void;
|
||||||
|
}
|
||||||
|
|
||||||
|
function makeMockEventSource() {
|
||||||
|
const instances: MockESInstance[] = [];
|
||||||
|
|
||||||
|
class MockEventSource {
|
||||||
|
onmessage: ((e: { data: string }) => void) | null = null;
|
||||||
|
onerror: ((e: Event) => void) | null = null;
|
||||||
|
close = vi.fn();
|
||||||
|
|
||||||
|
constructor(public url: string) {
|
||||||
|
instances.push(this as unknown as MockESInstance);
|
||||||
|
}
|
||||||
|
|
||||||
|
simulateMessage(data: unknown) {
|
||||||
|
this.onmessage?.({ data: JSON.stringify(data) });
|
||||||
|
}
|
||||||
|
|
||||||
|
simulateError(e: Event) {
|
||||||
|
this.onerror?.(e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return { MockEventSource, instances };
|
||||||
|
}
|
||||||
|
|
||||||
|
describe("subscribeAgentStream", () => {
|
||||||
|
let instances: MockESInstance[];
|
||||||
|
|
||||||
|
beforeEach(() => {
|
||||||
|
const { MockEventSource, instances: inst } = makeMockEventSource();
|
||||||
|
instances = inst;
|
||||||
|
vi.stubGlobal("EventSource", MockEventSource);
|
||||||
|
});
|
||||||
|
|
||||||
|
it("creates an EventSource with encoded story and agent in URL", () => {
|
||||||
|
subscribeAgentStream("42_story_test", "coder", vi.fn());
|
||||||
|
|
||||||
|
expect(instances).toHaveLength(1);
|
||||||
|
expect(instances[0].url).toContain(
|
||||||
|
`/agents/${encodeURIComponent("42_story_test")}/${encodeURIComponent("coder")}/stream`,
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
|
it("calls onEvent when a message is received", () => {
|
||||||
|
const onEvent = vi.fn();
|
||||||
|
subscribeAgentStream("42_story_test", "coder", onEvent);
|
||||||
|
|
||||||
|
const event: AgentEvent = { type: "output", text: "hello" };
|
||||||
|
instances[0].simulateMessage(event);
|
||||||
|
|
||||||
|
expect(onEvent).toHaveBeenCalledWith(event);
|
||||||
|
});
|
||||||
|
|
||||||
|
it("closes EventSource on 'done' type event", () => {
|
||||||
|
subscribeAgentStream("42_story_test", "coder", vi.fn());
|
||||||
|
|
||||||
|
instances[0].simulateMessage({ type: "done" });
|
||||||
|
|
||||||
|
expect(instances[0].close).toHaveBeenCalled();
|
||||||
|
});
|
||||||
|
|
||||||
|
it("closes EventSource on 'error' type event", () => {
|
||||||
|
subscribeAgentStream("42_story_test", "coder", vi.fn());
|
||||||
|
|
||||||
|
instances[0].simulateMessage({
|
||||||
|
type: "error",
|
||||||
|
message: "something failed",
|
||||||
|
});
|
||||||
|
|
||||||
|
expect(instances[0].close).toHaveBeenCalled();
|
||||||
|
});
|
||||||
|
|
||||||
|
it("closes EventSource on status=stopped event", () => {
|
||||||
|
subscribeAgentStream("42_story_test", "coder", vi.fn());
|
||||||
|
|
||||||
|
instances[0].simulateMessage({ type: "status", status: "stopped" });
|
||||||
|
|
||||||
|
expect(instances[0].close).toHaveBeenCalled();
|
||||||
|
});
|
||||||
|
|
||||||
|
it("does not close on status=running event", () => {
|
||||||
|
subscribeAgentStream("42_story_test", "coder", vi.fn());
|
||||||
|
|
||||||
|
instances[0].simulateMessage({ type: "status", status: "running" });
|
||||||
|
|
||||||
|
expect(instances[0].close).not.toHaveBeenCalled();
|
||||||
|
});
|
||||||
|
|
||||||
|
it("does not close on 'output' event", () => {
|
||||||
|
subscribeAgentStream("42_story_test", "coder", vi.fn());
|
||||||
|
|
||||||
|
instances[0].simulateMessage({ type: "output", text: "building..." });
|
||||||
|
|
||||||
|
expect(instances[0].close).not.toHaveBeenCalled();
|
||||||
|
});
|
||||||
|
|
||||||
|
it("calls onError and closes on EventSource onerror", () => {
|
||||||
|
const onError = vi.fn();
|
||||||
|
subscribeAgentStream("42_story_test", "coder", vi.fn(), onError);
|
||||||
|
|
||||||
|
const err = new Event("error");
|
||||||
|
instances[0].simulateError(err);
|
||||||
|
|
||||||
|
expect(onError).toHaveBeenCalledWith(err);
|
||||||
|
expect(instances[0].close).toHaveBeenCalled();
|
||||||
|
});
|
||||||
|
|
||||||
|
it("closes EventSource when onError is not provided", () => {
|
||||||
|
subscribeAgentStream("42_story_test", "coder", vi.fn());
|
||||||
|
|
||||||
|
const err = new Event("error");
|
||||||
|
instances[0].simulateError(err);
|
||||||
|
|
||||||
|
expect(instances[0].close).toHaveBeenCalled();
|
||||||
|
});
|
||||||
|
|
||||||
|
it("closes EventSource when cleanup function is called", () => {
|
||||||
|
const cleanup = subscribeAgentStream("42_story_test", "coder", vi.fn());
|
||||||
|
|
||||||
|
cleanup();
|
||||||
|
|
||||||
|
expect(instances[0].close).toHaveBeenCalled();
|
||||||
|
});
|
||||||
|
|
||||||
|
it("handles malformed JSON without throwing", () => {
|
||||||
|
subscribeAgentStream("42_story_test", "coder", vi.fn());
|
||||||
|
|
||||||
|
expect(() => {
|
||||||
|
instances[0].onmessage?.({ data: "{ not valid json" });
|
||||||
|
}).not.toThrow();
|
||||||
|
});
|
||||||
|
|
||||||
|
it("delivers multiple events before a terminal event", () => {
|
||||||
|
const onEvent = vi.fn();
|
||||||
|
subscribeAgentStream("42_story_test", "coder", onEvent);
|
||||||
|
|
||||||
|
instances[0].simulateMessage({ type: "output", text: "line 1" });
|
||||||
|
instances[0].simulateMessage({ type: "output", text: "line 2" });
|
||||||
|
instances[0].simulateMessage({ type: "done" });
|
||||||
|
|
||||||
|
expect(onEvent).toHaveBeenCalledTimes(3);
|
||||||
|
expect(instances[0].close).toHaveBeenCalledTimes(1);
|
||||||
|
});
|
||||||
|
});
|
||||||
162
frontend/src/api/agents.ts
Normal file
162
frontend/src/api/agents.ts
Normal file
@@ -0,0 +1,162 @@
|
|||||||
|
export type AgentStatusValue = "pending" | "running" | "completed" | "failed";
|
||||||
|
|
||||||
|
export interface AgentInfo {
|
||||||
|
story_id: string;
|
||||||
|
agent_name: string;
|
||||||
|
status: AgentStatusValue;
|
||||||
|
session_id: string | null;
|
||||||
|
worktree_path: string | null;
|
||||||
|
base_branch: string | null;
|
||||||
|
log_session_id: string | null;
|
||||||
|
}
|
||||||
|
|
||||||
|
export interface AgentEvent {
|
||||||
|
type:
|
||||||
|
| "status"
|
||||||
|
| "output"
|
||||||
|
| "thinking"
|
||||||
|
| "agent_json"
|
||||||
|
| "done"
|
||||||
|
| "error"
|
||||||
|
| "warning";
|
||||||
|
story_id?: string;
|
||||||
|
agent_name?: string;
|
||||||
|
status?: string;
|
||||||
|
text?: string;
|
||||||
|
data?: unknown;
|
||||||
|
session_id?: string | null;
|
||||||
|
message?: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
export interface AgentConfigInfo {
|
||||||
|
name: string;
|
||||||
|
role: string;
|
||||||
|
stage: string | null;
|
||||||
|
model: string | null;
|
||||||
|
allowed_tools: string[] | null;
|
||||||
|
max_turns: number | null;
|
||||||
|
max_budget_usd: number | null;
|
||||||
|
}
|
||||||
|
|
||||||
|
const DEFAULT_API_BASE = "/api";
|
||||||
|
|
||||||
|
function buildApiUrl(path: string, baseUrl = DEFAULT_API_BASE): string {
|
||||||
|
return `${baseUrl}${path}`;
|
||||||
|
}
|
||||||
|
|
||||||
|
async function requestJson<T>(
|
||||||
|
path: string,
|
||||||
|
options: RequestInit = {},
|
||||||
|
baseUrl = DEFAULT_API_BASE,
|
||||||
|
): Promise<T> {
|
||||||
|
const res = await fetch(buildApiUrl(path, baseUrl), {
|
||||||
|
headers: {
|
||||||
|
"Content-Type": "application/json",
|
||||||
|
...(options.headers ?? {}),
|
||||||
|
},
|
||||||
|
...options,
|
||||||
|
});
|
||||||
|
|
||||||
|
if (!res.ok) {
|
||||||
|
const text = await res.text();
|
||||||
|
throw new Error(text || `Request failed (${res.status})`);
|
||||||
|
}
|
||||||
|
|
||||||
|
return res.json() as Promise<T>;
|
||||||
|
}
|
||||||
|
|
||||||
|
export const agentsApi = {
|
||||||
|
startAgent(storyId: string, agentName?: string, baseUrl?: string) {
|
||||||
|
return requestJson<AgentInfo>(
|
||||||
|
"/agents/start",
|
||||||
|
{
|
||||||
|
method: "POST",
|
||||||
|
body: JSON.stringify({
|
||||||
|
story_id: storyId,
|
||||||
|
agent_name: agentName,
|
||||||
|
}),
|
||||||
|
},
|
||||||
|
baseUrl,
|
||||||
|
);
|
||||||
|
},
|
||||||
|
|
||||||
|
stopAgent(storyId: string, agentName: string, baseUrl?: string) {
|
||||||
|
return requestJson<boolean>(
|
||||||
|
"/agents/stop",
|
||||||
|
{
|
||||||
|
method: "POST",
|
||||||
|
body: JSON.stringify({
|
||||||
|
story_id: storyId,
|
||||||
|
agent_name: agentName,
|
||||||
|
}),
|
||||||
|
},
|
||||||
|
baseUrl,
|
||||||
|
);
|
||||||
|
},
|
||||||
|
|
||||||
|
listAgents(baseUrl?: string) {
|
||||||
|
return requestJson<AgentInfo[]>("/agents", {}, baseUrl);
|
||||||
|
},
|
||||||
|
|
||||||
|
getAgentConfig(baseUrl?: string) {
|
||||||
|
return requestJson<AgentConfigInfo[]>("/agents/config", {}, baseUrl);
|
||||||
|
},
|
||||||
|
|
||||||
|
reloadConfig(baseUrl?: string) {
|
||||||
|
return requestJson<AgentConfigInfo[]>(
|
||||||
|
"/agents/config/reload",
|
||||||
|
{ method: "POST" },
|
||||||
|
baseUrl,
|
||||||
|
);
|
||||||
|
},
|
||||||
|
|
||||||
|
getAgentOutput(storyId: string, agentName: string, baseUrl?: string) {
|
||||||
|
return requestJson<{ output: string }>(
|
||||||
|
`/agents/${encodeURIComponent(storyId)}/${encodeURIComponent(agentName)}/output`,
|
||||||
|
{},
|
||||||
|
baseUrl,
|
||||||
|
);
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Subscribe to SSE events for a running agent.
|
||||||
|
* Returns a cleanup function to close the connection.
|
||||||
|
*/
|
||||||
|
export function subscribeAgentStream(
|
||||||
|
storyId: string,
|
||||||
|
agentName: string,
|
||||||
|
onEvent: (event: AgentEvent) => void,
|
||||||
|
onError?: (error: Event) => void,
|
||||||
|
): () => void {
|
||||||
|
const url = `/agents/${encodeURIComponent(storyId)}/${encodeURIComponent(agentName)}/stream`;
|
||||||
|
|
||||||
|
const eventSource = new EventSource(url);
|
||||||
|
|
||||||
|
eventSource.onmessage = (e) => {
|
||||||
|
try {
|
||||||
|
const data = JSON.parse(e.data) as AgentEvent;
|
||||||
|
onEvent(data);
|
||||||
|
|
||||||
|
// Close on terminal events
|
||||||
|
if (
|
||||||
|
data.type === "done" ||
|
||||||
|
data.type === "error" ||
|
||||||
|
(data.type === "status" && data.status === "stopped")
|
||||||
|
) {
|
||||||
|
eventSource.close();
|
||||||
|
}
|
||||||
|
} catch (err) {
|
||||||
|
console.error("Failed to parse agent event:", err);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
eventSource.onerror = (e) => {
|
||||||
|
onError?.(e);
|
||||||
|
eventSource.close();
|
||||||
|
};
|
||||||
|
|
||||||
|
return () => {
|
||||||
|
eventSource.close();
|
||||||
|
};
|
||||||
|
}
|
||||||
433
frontend/src/api/client.test.ts
Normal file
433
frontend/src/api/client.test.ts
Normal file
@@ -0,0 +1,433 @@
|
|||||||
|
import { afterEach, beforeEach, describe, expect, it, vi } from "vitest";
|
||||||
|
import { api, ChatWebSocket, resolveWsHost } from "./client";
|
||||||
|
|
||||||
|
const mockFetch = vi.fn();
|
||||||
|
|
||||||
|
beforeEach(() => {
|
||||||
|
vi.stubGlobal("fetch", mockFetch);
|
||||||
|
});
|
||||||
|
|
||||||
|
afterEach(() => {
|
||||||
|
vi.restoreAllMocks();
|
||||||
|
});
|
||||||
|
|
||||||
|
function okResponse(body: unknown) {
|
||||||
|
return new Response(JSON.stringify(body), {
|
||||||
|
status: 200,
|
||||||
|
headers: { "Content-Type": "application/json" },
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
function errorResponse(status: number, text: string) {
|
||||||
|
return new Response(text, { status });
|
||||||
|
}
|
||||||
|
|
||||||
|
describe("api client", () => {
|
||||||
|
describe("getCurrentProject", () => {
|
||||||
|
it("sends GET to /project", async () => {
|
||||||
|
mockFetch.mockResolvedValueOnce(okResponse("/home/user/project"));
|
||||||
|
|
||||||
|
const result = await api.getCurrentProject();
|
||||||
|
|
||||||
|
expect(mockFetch).toHaveBeenCalledWith(
|
||||||
|
"/api/project",
|
||||||
|
expect.objectContaining({}),
|
||||||
|
);
|
||||||
|
expect(result).toBe("/home/user/project");
|
||||||
|
});
|
||||||
|
|
||||||
|
it("returns null when no project open", async () => {
|
||||||
|
mockFetch.mockResolvedValueOnce(okResponse(null));
|
||||||
|
|
||||||
|
const result = await api.getCurrentProject();
|
||||||
|
expect(result).toBeNull();
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe("openProject", () => {
|
||||||
|
it("sends POST with path", async () => {
|
||||||
|
mockFetch.mockResolvedValueOnce(okResponse("/home/user/project"));
|
||||||
|
|
||||||
|
await api.openProject("/home/user/project");
|
||||||
|
|
||||||
|
expect(mockFetch).toHaveBeenCalledWith(
|
||||||
|
"/api/project",
|
||||||
|
expect.objectContaining({
|
||||||
|
method: "POST",
|
||||||
|
body: JSON.stringify({ path: "/home/user/project" }),
|
||||||
|
}),
|
||||||
|
);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe("closeProject", () => {
|
||||||
|
it("sends DELETE to /project", async () => {
|
||||||
|
mockFetch.mockResolvedValueOnce(okResponse(true));
|
||||||
|
|
||||||
|
await api.closeProject();
|
||||||
|
|
||||||
|
expect(mockFetch).toHaveBeenCalledWith(
|
||||||
|
"/api/project",
|
||||||
|
expect.objectContaining({ method: "DELETE" }),
|
||||||
|
);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe("getKnownProjects", () => {
|
||||||
|
it("returns array of project paths", async () => {
|
||||||
|
mockFetch.mockResolvedValueOnce(okResponse(["/a", "/b"]));
|
||||||
|
|
||||||
|
const result = await api.getKnownProjects();
|
||||||
|
expect(result).toEqual(["/a", "/b"]);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe("error handling", () => {
|
||||||
|
it("throws on non-ok response with body text", async () => {
|
||||||
|
mockFetch.mockResolvedValueOnce(errorResponse(404, "Not found"));
|
||||||
|
|
||||||
|
await expect(api.getCurrentProject()).rejects.toThrow("Not found");
|
||||||
|
});
|
||||||
|
|
||||||
|
it("throws with status code when no body", async () => {
|
||||||
|
mockFetch.mockResolvedValueOnce(errorResponse(500, ""));
|
||||||
|
|
||||||
|
await expect(api.getCurrentProject()).rejects.toThrow(
|
||||||
|
"Request failed (500)",
|
||||||
|
);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe("searchFiles", () => {
|
||||||
|
it("sends POST with query", async () => {
|
||||||
|
mockFetch.mockResolvedValueOnce(
|
||||||
|
okResponse([{ path: "src/main.rs", matches: 1 }]),
|
||||||
|
);
|
||||||
|
|
||||||
|
const result = await api.searchFiles("hello");
|
||||||
|
|
||||||
|
expect(mockFetch).toHaveBeenCalledWith(
|
||||||
|
"/api/fs/search",
|
||||||
|
expect.objectContaining({
|
||||||
|
method: "POST",
|
||||||
|
body: JSON.stringify({ query: "hello" }),
|
||||||
|
}),
|
||||||
|
);
|
||||||
|
expect(result).toHaveLength(1);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe("execShell", () => {
|
||||||
|
it("sends POST with command and args", async () => {
|
||||||
|
mockFetch.mockResolvedValueOnce(
|
||||||
|
okResponse({ stdout: "output", stderr: "", exit_code: 0 }),
|
||||||
|
);
|
||||||
|
|
||||||
|
const result = await api.execShell("ls", ["-la"]);
|
||||||
|
|
||||||
|
expect(mockFetch).toHaveBeenCalledWith(
|
||||||
|
"/api/shell/exec",
|
||||||
|
expect.objectContaining({
|
||||||
|
method: "POST",
|
||||||
|
body: JSON.stringify({ command: "ls", args: ["-la"] }),
|
||||||
|
}),
|
||||||
|
);
|
||||||
|
expect(result.exit_code).toBe(0);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe("resolveWsHost", () => {
|
||||||
|
it("uses env port in dev mode", () => {
|
||||||
|
expect(resolveWsHost(true, "4200", "example.com")).toBe("127.0.0.1:4200");
|
||||||
|
});
|
||||||
|
|
||||||
|
it("defaults to 3001 in dev mode when no env port", () => {
|
||||||
|
expect(resolveWsHost(true, undefined, "example.com")).toBe(
|
||||||
|
"127.0.0.1:3001",
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
|
it("uses location host in production", () => {
|
||||||
|
expect(resolveWsHost(false, "4200", "myapp.com:8080")).toBe(
|
||||||
|
"myapp.com:8080",
|
||||||
|
);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
// ── ChatWebSocket reconnect tests ───────────────────────────────────────────
|
||||||
|
|
||||||
|
interface MockWsInstance {
|
||||||
|
onopen: (() => void) | null;
|
||||||
|
onclose: (() => void) | null;
|
||||||
|
onmessage: ((e: { data: string }) => void) | null;
|
||||||
|
onerror: (() => void) | null;
|
||||||
|
readyState: number;
|
||||||
|
sentMessages: string[];
|
||||||
|
send: (data: string) => void;
|
||||||
|
close: () => void;
|
||||||
|
simulateClose: () => void;
|
||||||
|
simulateMessage: (data: Record<string, unknown>) => void;
|
||||||
|
}
|
||||||
|
|
||||||
|
function makeMockWebSocket() {
|
||||||
|
const instances: MockWsInstance[] = [];
|
||||||
|
|
||||||
|
class MockWebSocket {
|
||||||
|
static readonly CONNECTING = 0;
|
||||||
|
static readonly OPEN = 1;
|
||||||
|
static readonly CLOSING = 2;
|
||||||
|
static readonly CLOSED = 3;
|
||||||
|
|
||||||
|
onopen: (() => void) | null = null;
|
||||||
|
onclose: (() => void) | null = null;
|
||||||
|
onmessage: ((e: { data: string }) => void) | null = null;
|
||||||
|
onerror: (() => void) | null = null;
|
||||||
|
readyState = 0;
|
||||||
|
sentMessages: string[] = [];
|
||||||
|
|
||||||
|
constructor(_url: string) {
|
||||||
|
instances.push(this as unknown as MockWsInstance);
|
||||||
|
}
|
||||||
|
|
||||||
|
send(data: string) {
|
||||||
|
this.sentMessages.push(data);
|
||||||
|
}
|
||||||
|
|
||||||
|
close() {
|
||||||
|
this.readyState = 3;
|
||||||
|
this.onclose?.();
|
||||||
|
}
|
||||||
|
|
||||||
|
simulateClose() {
|
||||||
|
this.readyState = 3;
|
||||||
|
this.onclose?.();
|
||||||
|
}
|
||||||
|
|
||||||
|
simulateMessage(data: Record<string, unknown>) {
|
||||||
|
this.onmessage?.({ data: JSON.stringify(data) });
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return { MockWebSocket, instances };
|
||||||
|
}
|
||||||
|
|
||||||
|
describe("ChatWebSocket", () => {
|
||||||
|
beforeEach(() => {
|
||||||
|
vi.useFakeTimers();
|
||||||
|
const { MockWebSocket } = makeMockWebSocket();
|
||||||
|
vi.stubGlobal("WebSocket", MockWebSocket);
|
||||||
|
// Reset shared static state between tests
|
||||||
|
(ChatWebSocket as unknown as { sharedSocket: null }).sharedSocket = null;
|
||||||
|
(ChatWebSocket as unknown as { refCount: number }).refCount = 0;
|
||||||
|
});
|
||||||
|
|
||||||
|
afterEach(() => {
|
||||||
|
vi.useRealTimers();
|
||||||
|
vi.restoreAllMocks();
|
||||||
|
});
|
||||||
|
|
||||||
|
it("schedules reconnect after socket closes unexpectedly", () => {
|
||||||
|
const { MockWebSocket, instances } = makeMockWebSocket();
|
||||||
|
vi.stubGlobal("WebSocket", MockWebSocket);
|
||||||
|
|
||||||
|
const ws = new ChatWebSocket();
|
||||||
|
ws.connect({});
|
||||||
|
|
||||||
|
expect(instances).toHaveLength(1);
|
||||||
|
|
||||||
|
instances[0].simulateClose();
|
||||||
|
|
||||||
|
// No new socket created yet
|
||||||
|
expect(instances).toHaveLength(1);
|
||||||
|
|
||||||
|
// Advance past the initial 1s reconnect delay
|
||||||
|
vi.advanceTimersByTime(1001);
|
||||||
|
|
||||||
|
// A new socket should now have been created
|
||||||
|
expect(instances).toHaveLength(2);
|
||||||
|
});
|
||||||
|
|
||||||
|
it("delivers pipeline_state after reconnect", () => {
|
||||||
|
const { MockWebSocket, instances } = makeMockWebSocket();
|
||||||
|
vi.stubGlobal("WebSocket", MockWebSocket);
|
||||||
|
|
||||||
|
const onPipelineState = vi.fn();
|
||||||
|
const ws = new ChatWebSocket();
|
||||||
|
ws.connect({ onPipelineState });
|
||||||
|
|
||||||
|
// Simulate server restart
|
||||||
|
instances[0].simulateClose();
|
||||||
|
vi.advanceTimersByTime(1001);
|
||||||
|
|
||||||
|
// Server pushes pipeline_state on fresh connection
|
||||||
|
const freshState = {
|
||||||
|
backlog: [{ story_id: "1_story_test", name: "Test", error: null }],
|
||||||
|
current: [],
|
||||||
|
qa: [],
|
||||||
|
merge: [],
|
||||||
|
done: [],
|
||||||
|
};
|
||||||
|
instances[1].simulateMessage({ type: "pipeline_state", ...freshState });
|
||||||
|
|
||||||
|
expect(onPipelineState).toHaveBeenCalledWith(freshState);
|
||||||
|
});
|
||||||
|
|
||||||
|
it("does not reconnect after explicit close()", () => {
|
||||||
|
const { MockWebSocket, instances } = makeMockWebSocket();
|
||||||
|
vi.stubGlobal("WebSocket", MockWebSocket);
|
||||||
|
|
||||||
|
const ws = new ChatWebSocket();
|
||||||
|
ws.connect({});
|
||||||
|
|
||||||
|
// Explicit close disables reconnect
|
||||||
|
ws.close();
|
||||||
|
|
||||||
|
// Advance through both the DEV close-defer (250ms) and reconnect window
|
||||||
|
vi.advanceTimersByTime(2000);
|
||||||
|
|
||||||
|
// No new socket should be created
|
||||||
|
expect(instances).toHaveLength(1);
|
||||||
|
});
|
||||||
|
|
||||||
|
it("uses exponential backoff on repeated failures", () => {
|
||||||
|
const { MockWebSocket, instances } = makeMockWebSocket();
|
||||||
|
vi.stubGlobal("WebSocket", MockWebSocket);
|
||||||
|
|
||||||
|
const ws = new ChatWebSocket();
|
||||||
|
ws.connect({});
|
||||||
|
|
||||||
|
// First close → reconnects after 1s
|
||||||
|
instances[0].simulateClose();
|
||||||
|
vi.advanceTimersByTime(1001);
|
||||||
|
expect(instances).toHaveLength(2);
|
||||||
|
|
||||||
|
// Second close → reconnects after 2s (doubled)
|
||||||
|
instances[1].simulateClose();
|
||||||
|
vi.advanceTimersByTime(1500);
|
||||||
|
// Not yet (delay is now 2s)
|
||||||
|
expect(instances).toHaveLength(2);
|
||||||
|
vi.advanceTimersByTime(600);
|
||||||
|
expect(instances).toHaveLength(3);
|
||||||
|
});
|
||||||
|
|
||||||
|
it("resets reconnect delay after successful open", () => {
|
||||||
|
const { MockWebSocket, instances } = makeMockWebSocket();
|
||||||
|
vi.stubGlobal("WebSocket", MockWebSocket);
|
||||||
|
|
||||||
|
const ws = new ChatWebSocket();
|
||||||
|
ws.connect({});
|
||||||
|
|
||||||
|
// Disconnect and reconnect twice to raise the delay
|
||||||
|
instances[0].simulateClose();
|
||||||
|
vi.advanceTimersByTime(1001);
|
||||||
|
|
||||||
|
instances[1].simulateClose();
|
||||||
|
vi.advanceTimersByTime(2001);
|
||||||
|
|
||||||
|
// Simulate a successful open on third socket — resets delay to 1s
|
||||||
|
instances[2].onopen?.();
|
||||||
|
|
||||||
|
// Close again — should use the reset 1s delay
|
||||||
|
instances[2].simulateClose();
|
||||||
|
vi.advanceTimersByTime(1001);
|
||||||
|
|
||||||
|
expect(instances).toHaveLength(4);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe("ChatWebSocket heartbeat", () => {
|
||||||
|
beforeEach(() => {
|
||||||
|
vi.useFakeTimers();
|
||||||
|
const { MockWebSocket } = makeMockWebSocket();
|
||||||
|
vi.stubGlobal("WebSocket", MockWebSocket);
|
||||||
|
(ChatWebSocket as unknown as { sharedSocket: null }).sharedSocket = null;
|
||||||
|
(ChatWebSocket as unknown as { refCount: number }).refCount = 0;
|
||||||
|
});
|
||||||
|
|
||||||
|
afterEach(() => {
|
||||||
|
vi.useRealTimers();
|
||||||
|
vi.restoreAllMocks();
|
||||||
|
});
|
||||||
|
|
||||||
|
it("sends ping after heartbeat interval", () => {
|
||||||
|
const { MockWebSocket, instances } = makeMockWebSocket();
|
||||||
|
vi.stubGlobal("WebSocket", MockWebSocket);
|
||||||
|
|
||||||
|
const ws = new ChatWebSocket();
|
||||||
|
ws.connect({});
|
||||||
|
instances[0].readyState = 1; // OPEN
|
||||||
|
instances[0].onopen?.(); // starts heartbeat
|
||||||
|
|
||||||
|
vi.advanceTimersByTime(29_999);
|
||||||
|
expect(instances[0].sentMessages).toHaveLength(0);
|
||||||
|
|
||||||
|
vi.advanceTimersByTime(1);
|
||||||
|
expect(instances[0].sentMessages).toHaveLength(1);
|
||||||
|
expect(JSON.parse(instances[0].sentMessages[0])).toEqual({ type: "ping" });
|
||||||
|
|
||||||
|
ws.close();
|
||||||
|
});
|
||||||
|
|
||||||
|
it("closes stale connection when pong is not received", () => {
|
||||||
|
const { MockWebSocket, instances } = makeMockWebSocket();
|
||||||
|
vi.stubGlobal("WebSocket", MockWebSocket);
|
||||||
|
|
||||||
|
const ws = new ChatWebSocket();
|
||||||
|
ws.connect({});
|
||||||
|
instances[0].readyState = 1; // OPEN
|
||||||
|
instances[0].onopen?.(); // starts heartbeat
|
||||||
|
|
||||||
|
// Fire heartbeat — sends ping and starts pong timeout
|
||||||
|
vi.advanceTimersByTime(30_000);
|
||||||
|
|
||||||
|
// No pong received; advance past pong timeout → socket closed → reconnect scheduled
|
||||||
|
vi.advanceTimersByTime(5_000);
|
||||||
|
|
||||||
|
// Advance past reconnect delay
|
||||||
|
vi.advanceTimersByTime(1_001);
|
||||||
|
|
||||||
|
expect(instances).toHaveLength(2);
|
||||||
|
ws.close();
|
||||||
|
});
|
||||||
|
|
||||||
|
it("does not close when pong is received before timeout", () => {
|
||||||
|
const { MockWebSocket, instances } = makeMockWebSocket();
|
||||||
|
vi.stubGlobal("WebSocket", MockWebSocket);
|
||||||
|
|
||||||
|
const ws = new ChatWebSocket();
|
||||||
|
ws.connect({});
|
||||||
|
instances[0].readyState = 1; // OPEN
|
||||||
|
instances[0].onopen?.(); // starts heartbeat
|
||||||
|
|
||||||
|
// Fire heartbeat
|
||||||
|
vi.advanceTimersByTime(30_000);
|
||||||
|
|
||||||
|
// Server responds with pong — clears the pong timeout
|
||||||
|
instances[0].simulateMessage({ type: "pong" });
|
||||||
|
|
||||||
|
// Advance past where pong timeout would have fired
|
||||||
|
vi.advanceTimersByTime(5_001);
|
||||||
|
|
||||||
|
// No reconnect triggered
|
||||||
|
expect(instances).toHaveLength(1);
|
||||||
|
ws.close();
|
||||||
|
});
|
||||||
|
|
||||||
|
it("stops sending pings after explicit close", () => {
|
||||||
|
const { MockWebSocket, instances } = makeMockWebSocket();
|
||||||
|
vi.stubGlobal("WebSocket", MockWebSocket);
|
||||||
|
|
||||||
|
const ws = new ChatWebSocket();
|
||||||
|
ws.connect({});
|
||||||
|
instances[0].readyState = 1; // OPEN
|
||||||
|
instances[0].onopen?.(); // starts heartbeat
|
||||||
|
|
||||||
|
ws.close();
|
||||||
|
|
||||||
|
// Advance well past multiple heartbeat intervals
|
||||||
|
vi.advanceTimersByTime(90_000);
|
||||||
|
|
||||||
|
expect(instances[0].sentMessages).toHaveLength(0);
|
||||||
|
});
|
||||||
|
});
|
||||||
699
frontend/src/api/client.ts
Normal file
699
frontend/src/api/client.ts
Normal file
@@ -0,0 +1,699 @@
|
|||||||
|
export type WsRequest =
|
||||||
|
| {
|
||||||
|
type: "chat";
|
||||||
|
messages: Message[];
|
||||||
|
config: ProviderConfig;
|
||||||
|
}
|
||||||
|
| {
|
||||||
|
type: "cancel";
|
||||||
|
}
|
||||||
|
| {
|
||||||
|
type: "permission_response";
|
||||||
|
request_id: string;
|
||||||
|
approved: boolean;
|
||||||
|
always_allow: boolean;
|
||||||
|
}
|
||||||
|
| { type: "ping" }
|
||||||
|
| {
|
||||||
|
type: "side_question";
|
||||||
|
question: string;
|
||||||
|
context_messages: Message[];
|
||||||
|
config: ProviderConfig;
|
||||||
|
};
|
||||||
|
|
||||||
|
export interface AgentAssignment {
|
||||||
|
agent_name: string;
|
||||||
|
model: string | null;
|
||||||
|
status: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
export interface PipelineStageItem {
|
||||||
|
story_id: string;
|
||||||
|
name: string | null;
|
||||||
|
error: string | null;
|
||||||
|
merge_failure: string | null;
|
||||||
|
agent: AgentAssignment | null;
|
||||||
|
review_hold: boolean | null;
|
||||||
|
qa: string | null;
|
||||||
|
}
|
||||||
|
|
||||||
|
export interface PipelineState {
|
||||||
|
backlog: PipelineStageItem[];
|
||||||
|
current: PipelineStageItem[];
|
||||||
|
qa: PipelineStageItem[];
|
||||||
|
merge: PipelineStageItem[];
|
||||||
|
done: PipelineStageItem[];
|
||||||
|
}
|
||||||
|
|
||||||
|
export type WsResponse =
|
||||||
|
| { type: "token"; content: string }
|
||||||
|
| { type: "update"; messages: Message[] }
|
||||||
|
| { type: "session_id"; session_id: string }
|
||||||
|
| { type: "error"; message: string }
|
||||||
|
| {
|
||||||
|
type: "pipeline_state";
|
||||||
|
backlog: PipelineStageItem[];
|
||||||
|
current: PipelineStageItem[];
|
||||||
|
qa: PipelineStageItem[];
|
||||||
|
merge: PipelineStageItem[];
|
||||||
|
done: PipelineStageItem[];
|
||||||
|
}
|
||||||
|
| {
|
||||||
|
type: "permission_request";
|
||||||
|
request_id: string;
|
||||||
|
tool_name: string;
|
||||||
|
tool_input: Record<string, unknown>;
|
||||||
|
}
|
||||||
|
| { type: "tool_activity"; tool_name: string }
|
||||||
|
| {
|
||||||
|
type: "reconciliation_progress";
|
||||||
|
story_id: string;
|
||||||
|
status: string;
|
||||||
|
message: string;
|
||||||
|
}
|
||||||
|
/** `.story_kit/project.toml` was modified; re-fetch the agent roster. */
|
||||||
|
| { type: "agent_config_changed" }
|
||||||
|
/** An agent started, stopped, or changed state; re-fetch agent list. */
|
||||||
|
| { type: "agent_state_changed" }
|
||||||
|
| { type: "tool_activity"; tool_name: string }
|
||||||
|
/** Heartbeat response confirming the connection is alive. */
|
||||||
|
| { type: "pong" }
|
||||||
|
/** Sent on connect when the project still needs onboarding (specs are placeholders). */
|
||||||
|
| { type: "onboarding_status"; needs_onboarding: boolean }
|
||||||
|
/** Streaming thinking token from an extended-thinking block, separate from regular text. */
|
||||||
|
| { type: "thinking_token"; content: string }
|
||||||
|
/** Streaming token from a /btw side question response. */
|
||||||
|
| { type: "side_question_token"; content: string }
|
||||||
|
/** Final signal that the /btw side question has been fully answered. */
|
||||||
|
| { type: "side_question_done"; response: string }
|
||||||
|
/** A single server log entry (bulk on connect, then live). */
|
||||||
|
| { type: "log_entry"; timestamp: string; level: string; message: string };
|
||||||
|
|
||||||
|
export interface ProviderConfig {
|
||||||
|
provider: string;
|
||||||
|
model: string;
|
||||||
|
base_url?: string;
|
||||||
|
enable_tools?: boolean;
|
||||||
|
session_id?: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
export type Role = "system" | "user" | "assistant" | "tool";
|
||||||
|
|
||||||
|
export interface ToolCall {
|
||||||
|
id?: string;
|
||||||
|
type: string;
|
||||||
|
function: {
|
||||||
|
name: string;
|
||||||
|
arguments: string;
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
export interface Message {
|
||||||
|
role: Role;
|
||||||
|
content: string;
|
||||||
|
tool_calls?: ToolCall[];
|
||||||
|
tool_call_id?: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
export interface AnthropicModelInfo {
|
||||||
|
id: string;
|
||||||
|
context_window: number;
|
||||||
|
}
|
||||||
|
|
||||||
|
export interface WorkItemContent {
|
||||||
|
content: string;
|
||||||
|
stage: string;
|
||||||
|
name: string | null;
|
||||||
|
agent: string | null;
|
||||||
|
}
|
||||||
|
|
||||||
|
export interface TestCaseResult {
|
||||||
|
name: string;
|
||||||
|
status: "pass" | "fail";
|
||||||
|
details: string | null;
|
||||||
|
}
|
||||||
|
|
||||||
|
export interface TestResultsResponse {
|
||||||
|
unit: TestCaseResult[];
|
||||||
|
integration: TestCaseResult[];
|
||||||
|
}
|
||||||
|
|
||||||
|
export interface FileEntry {
|
||||||
|
name: string;
|
||||||
|
kind: "file" | "dir";
|
||||||
|
}
|
||||||
|
|
||||||
|
export interface SearchResult {
|
||||||
|
path: string;
|
||||||
|
matches: number;
|
||||||
|
}
|
||||||
|
|
||||||
|
export interface AgentCostEntry {
|
||||||
|
agent_name: string;
|
||||||
|
model: string | null;
|
||||||
|
input_tokens: number;
|
||||||
|
output_tokens: number;
|
||||||
|
cache_creation_input_tokens: number;
|
||||||
|
cache_read_input_tokens: number;
|
||||||
|
total_cost_usd: number;
|
||||||
|
}
|
||||||
|
|
||||||
|
export interface TokenCostResponse {
|
||||||
|
total_cost_usd: number;
|
||||||
|
agents: AgentCostEntry[];
|
||||||
|
}
|
||||||
|
|
||||||
|
export interface TokenUsageRecord {
|
||||||
|
story_id: string;
|
||||||
|
agent_name: string;
|
||||||
|
model: string | null;
|
||||||
|
timestamp: string;
|
||||||
|
input_tokens: number;
|
||||||
|
output_tokens: number;
|
||||||
|
cache_creation_input_tokens: number;
|
||||||
|
cache_read_input_tokens: number;
|
||||||
|
total_cost_usd: number;
|
||||||
|
}
|
||||||
|
|
||||||
|
export interface AllTokenUsageResponse {
|
||||||
|
records: TokenUsageRecord[];
|
||||||
|
}
|
||||||
|
|
||||||
|
export interface CommandOutput {
|
||||||
|
stdout: string;
|
||||||
|
stderr: string;
|
||||||
|
exit_code: number;
|
||||||
|
}
|
||||||
|
|
||||||
|
declare const __STORKIT_PORT__: string;
|
||||||
|
|
||||||
|
const DEFAULT_API_BASE = "/api";
|
||||||
|
const DEFAULT_WS_PATH = "/ws";
|
||||||
|
|
||||||
|
export function resolveWsHost(
|
||||||
|
isDev: boolean,
|
||||||
|
envPort: string | undefined,
|
||||||
|
locationHost: string,
|
||||||
|
): string {
|
||||||
|
return isDev ? `127.0.0.1:${envPort || "3001"}` : locationHost;
|
||||||
|
}
|
||||||
|
|
||||||
|
function buildApiUrl(path: string, baseUrl = DEFAULT_API_BASE): string {
|
||||||
|
return `${baseUrl}${path}`;
|
||||||
|
}
|
||||||
|
|
||||||
|
async function requestJson<T>(
|
||||||
|
path: string,
|
||||||
|
options: RequestInit = {},
|
||||||
|
baseUrl = DEFAULT_API_BASE,
|
||||||
|
): Promise<T> {
|
||||||
|
const res = await fetch(buildApiUrl(path, baseUrl), {
|
||||||
|
headers: {
|
||||||
|
"Content-Type": "application/json",
|
||||||
|
...(options.headers ?? {}),
|
||||||
|
},
|
||||||
|
...options,
|
||||||
|
});
|
||||||
|
|
||||||
|
if (!res.ok) {
|
||||||
|
const text = await res.text();
|
||||||
|
throw new Error(text || `Request failed (${res.status})`);
|
||||||
|
}
|
||||||
|
|
||||||
|
return res.json() as Promise<T>;
|
||||||
|
}
|
||||||
|
|
||||||
|
export const api = {
|
||||||
|
getCurrentProject(baseUrl?: string) {
|
||||||
|
return requestJson<string | null>("/project", {}, baseUrl);
|
||||||
|
},
|
||||||
|
getKnownProjects(baseUrl?: string) {
|
||||||
|
return requestJson<string[]>("/projects", {}, baseUrl);
|
||||||
|
},
|
||||||
|
forgetKnownProject(path: string, baseUrl?: string) {
|
||||||
|
return requestJson<boolean>(
|
||||||
|
"/projects/forget",
|
||||||
|
{ method: "POST", body: JSON.stringify({ path }) },
|
||||||
|
baseUrl,
|
||||||
|
);
|
||||||
|
},
|
||||||
|
openProject(path: string, baseUrl?: string) {
|
||||||
|
return requestJson<string>(
|
||||||
|
"/project",
|
||||||
|
{ method: "POST", body: JSON.stringify({ path }) },
|
||||||
|
baseUrl,
|
||||||
|
);
|
||||||
|
},
|
||||||
|
closeProject(baseUrl?: string) {
|
||||||
|
return requestJson<boolean>("/project", { method: "DELETE" }, baseUrl);
|
||||||
|
},
|
||||||
|
getModelPreference(baseUrl?: string) {
|
||||||
|
return requestJson<string | null>("/model", {}, baseUrl);
|
||||||
|
},
|
||||||
|
setModelPreference(model: string, baseUrl?: string) {
|
||||||
|
return requestJson<boolean>(
|
||||||
|
"/model",
|
||||||
|
{ method: "POST", body: JSON.stringify({ model }) },
|
||||||
|
baseUrl,
|
||||||
|
);
|
||||||
|
},
|
||||||
|
getOllamaModels(baseUrlParam?: string, baseUrl?: string) {
|
||||||
|
const url = new URL(
|
||||||
|
buildApiUrl("/ollama/models", baseUrl),
|
||||||
|
window.location.origin,
|
||||||
|
);
|
||||||
|
if (baseUrlParam) {
|
||||||
|
url.searchParams.set("base_url", baseUrlParam);
|
||||||
|
}
|
||||||
|
return requestJson<string[]>(url.pathname + url.search, {}, "");
|
||||||
|
},
|
||||||
|
getAnthropicApiKeyExists(baseUrl?: string) {
|
||||||
|
return requestJson<boolean>("/anthropic/key/exists", {}, baseUrl);
|
||||||
|
},
|
||||||
|
getAnthropicModels(baseUrl?: string) {
|
||||||
|
return requestJson<AnthropicModelInfo[]>("/anthropic/models", {}, baseUrl);
|
||||||
|
},
|
||||||
|
setAnthropicApiKey(api_key: string, baseUrl?: string) {
|
||||||
|
return requestJson<boolean>(
|
||||||
|
"/anthropic/key",
|
||||||
|
{ method: "POST", body: JSON.stringify({ api_key }) },
|
||||||
|
baseUrl,
|
||||||
|
);
|
||||||
|
},
|
||||||
|
readFile(path: string, baseUrl?: string) {
|
||||||
|
return requestJson<string>(
|
||||||
|
"/fs/read",
|
||||||
|
{ method: "POST", body: JSON.stringify({ path }) },
|
||||||
|
baseUrl,
|
||||||
|
);
|
||||||
|
},
|
||||||
|
writeFile(path: string, content: string, baseUrl?: string) {
|
||||||
|
return requestJson<boolean>(
|
||||||
|
"/fs/write",
|
||||||
|
{ method: "POST", body: JSON.stringify({ path, content }) },
|
||||||
|
baseUrl,
|
||||||
|
);
|
||||||
|
},
|
||||||
|
listDirectory(path: string, baseUrl?: string) {
|
||||||
|
return requestJson<FileEntry[]>(
|
||||||
|
"/fs/list",
|
||||||
|
{ method: "POST", body: JSON.stringify({ path }) },
|
||||||
|
baseUrl,
|
||||||
|
);
|
||||||
|
},
|
||||||
|
listDirectoryAbsolute(path: string, baseUrl?: string) {
|
||||||
|
return requestJson<FileEntry[]>(
|
||||||
|
"/io/fs/list/absolute",
|
||||||
|
{ method: "POST", body: JSON.stringify({ path }) },
|
||||||
|
baseUrl,
|
||||||
|
);
|
||||||
|
},
|
||||||
|
createDirectoryAbsolute(path: string, baseUrl?: string) {
|
||||||
|
return requestJson<boolean>(
|
||||||
|
"/io/fs/create/absolute",
|
||||||
|
{ method: "POST", body: JSON.stringify({ path }) },
|
||||||
|
baseUrl,
|
||||||
|
);
|
||||||
|
},
|
||||||
|
getHomeDirectory(baseUrl?: string) {
|
||||||
|
return requestJson<string>("/io/fs/home", {}, baseUrl);
|
||||||
|
},
|
||||||
|
listProjectFiles(baseUrl?: string) {
|
||||||
|
return requestJson<string[]>("/io/fs/files", {}, baseUrl);
|
||||||
|
},
|
||||||
|
searchFiles(query: string, baseUrl?: string) {
|
||||||
|
return requestJson<SearchResult[]>(
|
||||||
|
"/fs/search",
|
||||||
|
{ method: "POST", body: JSON.stringify({ query }) },
|
||||||
|
baseUrl,
|
||||||
|
);
|
||||||
|
},
|
||||||
|
execShell(command: string, args: string[], baseUrl?: string) {
|
||||||
|
return requestJson<CommandOutput>(
|
||||||
|
"/shell/exec",
|
||||||
|
{ method: "POST", body: JSON.stringify({ command, args }) },
|
||||||
|
baseUrl,
|
||||||
|
);
|
||||||
|
},
|
||||||
|
cancelChat(baseUrl?: string) {
|
||||||
|
return requestJson<boolean>("/chat/cancel", { method: "POST" }, baseUrl);
|
||||||
|
},
|
||||||
|
getWorkItemContent(storyId: string, baseUrl?: string) {
|
||||||
|
return requestJson<WorkItemContent>(
|
||||||
|
`/work-items/${encodeURIComponent(storyId)}`,
|
||||||
|
{},
|
||||||
|
baseUrl,
|
||||||
|
);
|
||||||
|
},
|
||||||
|
getTestResults(storyId: string, baseUrl?: string) {
|
||||||
|
return requestJson<TestResultsResponse | null>(
|
||||||
|
`/work-items/${encodeURIComponent(storyId)}/test-results`,
|
||||||
|
{},
|
||||||
|
baseUrl,
|
||||||
|
);
|
||||||
|
},
|
||||||
|
getTokenCost(storyId: string, baseUrl?: string) {
|
||||||
|
return requestJson<TokenCostResponse>(
|
||||||
|
`/work-items/${encodeURIComponent(storyId)}/token-cost`,
|
||||||
|
{},
|
||||||
|
baseUrl,
|
||||||
|
);
|
||||||
|
},
|
||||||
|
getAllTokenUsage(baseUrl?: string) {
|
||||||
|
return requestJson<AllTokenUsageResponse>("/token-usage", {}, baseUrl);
|
||||||
|
},
|
||||||
|
/** Trigger a server rebuild and restart. */
|
||||||
|
rebuildAndRestart() {
|
||||||
|
return callMcpTool("rebuild_and_restart", {});
|
||||||
|
},
|
||||||
|
/** Approve a story in QA, moving it to merge. */
|
||||||
|
approveQa(storyId: string) {
|
||||||
|
return callMcpTool("approve_qa", { story_id: storyId });
|
||||||
|
},
|
||||||
|
/** Reject a story in QA, moving it back to current with notes. */
|
||||||
|
rejectQa(storyId: string, notes: string) {
|
||||||
|
return callMcpTool("reject_qa", { story_id: storyId, notes });
|
||||||
|
},
|
||||||
|
/** Launch the QA app for a story's worktree. */
|
||||||
|
launchQaApp(storyId: string) {
|
||||||
|
return callMcpTool("launch_qa_app", { story_id: storyId });
|
||||||
|
},
|
||||||
|
/** Delete a story from the pipeline, stopping any running agent and removing the worktree. */
|
||||||
|
deleteStory(storyId: string) {
|
||||||
|
return callMcpTool("delete_story", { story_id: storyId });
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
async function callMcpTool(
|
||||||
|
toolName: string,
|
||||||
|
args: Record<string, unknown>,
|
||||||
|
): Promise<string> {
|
||||||
|
const res = await fetch("/mcp", {
|
||||||
|
method: "POST",
|
||||||
|
headers: { "Content-Type": "application/json" },
|
||||||
|
body: JSON.stringify({
|
||||||
|
jsonrpc: "2.0",
|
||||||
|
id: 1,
|
||||||
|
method: "tools/call",
|
||||||
|
params: { name: toolName, arguments: args },
|
||||||
|
}),
|
||||||
|
});
|
||||||
|
const json = await res.json();
|
||||||
|
if (json.error) {
|
||||||
|
throw new Error(json.error.message);
|
||||||
|
}
|
||||||
|
const text = json.result?.content?.[0]?.text ?? "";
|
||||||
|
return text;
|
||||||
|
}
|
||||||
|
|
||||||
|
export class ChatWebSocket {
|
||||||
|
private static sharedSocket: WebSocket | null = null;
|
||||||
|
private static refCount = 0;
|
||||||
|
private socket?: WebSocket;
|
||||||
|
private onToken?: (content: string) => void;
|
||||||
|
private onThinkingToken?: (content: string) => void;
|
||||||
|
private onUpdate?: (messages: Message[]) => void;
|
||||||
|
private onSessionId?: (sessionId: string) => void;
|
||||||
|
private onError?: (message: string) => void;
|
||||||
|
private onPipelineState?: (state: PipelineState) => void;
|
||||||
|
private onPermissionRequest?: (
|
||||||
|
requestId: string,
|
||||||
|
toolName: string,
|
||||||
|
toolInput: Record<string, unknown>,
|
||||||
|
) => void;
|
||||||
|
private onActivity?: (toolName: string) => void;
|
||||||
|
private onReconciliationProgress?: (
|
||||||
|
storyId: string,
|
||||||
|
status: string,
|
||||||
|
message: string,
|
||||||
|
) => void;
|
||||||
|
private onAgentConfigChanged?: () => void;
|
||||||
|
private onAgentStateChanged?: () => void;
|
||||||
|
private onOnboardingStatus?: (needsOnboarding: boolean) => void;
|
||||||
|
private onSideQuestionToken?: (content: string) => void;
|
||||||
|
private onSideQuestionDone?: (response: string) => void;
|
||||||
|
private onLogEntry?: (
|
||||||
|
timestamp: string,
|
||||||
|
level: string,
|
||||||
|
message: string,
|
||||||
|
) => void;
|
||||||
|
private onConnected?: () => void;
|
||||||
|
private connected = false;
|
||||||
|
private closeTimer?: number;
|
||||||
|
private wsPath = DEFAULT_WS_PATH;
|
||||||
|
private reconnectTimer?: number;
|
||||||
|
private reconnectDelay = 1000;
|
||||||
|
private shouldReconnect = false;
|
||||||
|
private heartbeatInterval?: number;
|
||||||
|
private heartbeatTimeout?: number;
|
||||||
|
private static readonly HEARTBEAT_INTERVAL = 30_000;
|
||||||
|
private static readonly HEARTBEAT_TIMEOUT = 5_000;
|
||||||
|
|
||||||
|
private _startHeartbeat(): void {
|
||||||
|
this._stopHeartbeat();
|
||||||
|
this.heartbeatInterval = window.setInterval(() => {
|
||||||
|
if (!this.socket || this.socket.readyState !== WebSocket.OPEN) return;
|
||||||
|
const ping: WsRequest = { type: "ping" };
|
||||||
|
this.socket.send(JSON.stringify(ping));
|
||||||
|
this.heartbeatTimeout = window.setTimeout(() => {
|
||||||
|
// No pong received within timeout; close socket to trigger reconnect.
|
||||||
|
this.socket?.close();
|
||||||
|
}, ChatWebSocket.HEARTBEAT_TIMEOUT);
|
||||||
|
}, ChatWebSocket.HEARTBEAT_INTERVAL);
|
||||||
|
}
|
||||||
|
|
||||||
|
private _stopHeartbeat(): void {
|
||||||
|
window.clearInterval(this.heartbeatInterval);
|
||||||
|
window.clearTimeout(this.heartbeatTimeout);
|
||||||
|
this.heartbeatInterval = undefined;
|
||||||
|
this.heartbeatTimeout = undefined;
|
||||||
|
}
|
||||||
|
|
||||||
|
private _buildWsUrl(): string {
|
||||||
|
const protocol = window.location.protocol === "https:" ? "wss" : "ws";
|
||||||
|
const wsHost = resolveWsHost(
|
||||||
|
import.meta.env.DEV,
|
||||||
|
typeof __STORKIT_PORT__ !== "undefined" ? __STORKIT_PORT__ : undefined,
|
||||||
|
window.location.host,
|
||||||
|
);
|
||||||
|
return `${protocol}://${wsHost}${this.wsPath}`;
|
||||||
|
}
|
||||||
|
|
||||||
|
private _attachHandlers(): void {
|
||||||
|
if (!this.socket) return;
|
||||||
|
this.socket.onopen = () => {
|
||||||
|
this.reconnectDelay = 1000;
|
||||||
|
this._startHeartbeat();
|
||||||
|
this.onConnected?.();
|
||||||
|
};
|
||||||
|
this.socket.onmessage = (event) => {
|
||||||
|
try {
|
||||||
|
const data = JSON.parse(event.data) as WsResponse;
|
||||||
|
if (data.type === "token") this.onToken?.(data.content);
|
||||||
|
if (data.type === "thinking_token")
|
||||||
|
this.onThinkingToken?.(data.content);
|
||||||
|
if (data.type === "update") this.onUpdate?.(data.messages);
|
||||||
|
if (data.type === "session_id") this.onSessionId?.(data.session_id);
|
||||||
|
if (data.type === "error") this.onError?.(data.message);
|
||||||
|
if (data.type === "pipeline_state")
|
||||||
|
this.onPipelineState?.({
|
||||||
|
backlog: data.backlog,
|
||||||
|
current: data.current,
|
||||||
|
qa: data.qa,
|
||||||
|
merge: data.merge,
|
||||||
|
done: data.done,
|
||||||
|
});
|
||||||
|
if (data.type === "permission_request")
|
||||||
|
this.onPermissionRequest?.(
|
||||||
|
data.request_id,
|
||||||
|
data.tool_name,
|
||||||
|
data.tool_input,
|
||||||
|
);
|
||||||
|
if (data.type === "tool_activity") this.onActivity?.(data.tool_name);
|
||||||
|
if (data.type === "reconciliation_progress")
|
||||||
|
this.onReconciliationProgress?.(
|
||||||
|
data.story_id,
|
||||||
|
data.status,
|
||||||
|
data.message,
|
||||||
|
);
|
||||||
|
if (data.type === "agent_config_changed") this.onAgentConfigChanged?.();
|
||||||
|
if (data.type === "agent_state_changed") this.onAgentStateChanged?.();
|
||||||
|
if (data.type === "onboarding_status")
|
||||||
|
this.onOnboardingStatus?.(data.needs_onboarding);
|
||||||
|
if (data.type === "side_question_token")
|
||||||
|
this.onSideQuestionToken?.(data.content);
|
||||||
|
if (data.type === "side_question_done")
|
||||||
|
this.onSideQuestionDone?.(data.response);
|
||||||
|
if (data.type === "log_entry")
|
||||||
|
this.onLogEntry?.(data.timestamp, data.level, data.message);
|
||||||
|
if (data.type === "pong") {
|
||||||
|
window.clearTimeout(this.heartbeatTimeout);
|
||||||
|
this.heartbeatTimeout = undefined;
|
||||||
|
}
|
||||||
|
} catch (err) {
|
||||||
|
this.onError?.(String(err));
|
||||||
|
}
|
||||||
|
};
|
||||||
|
this.socket.onerror = () => {
|
||||||
|
this.onError?.("WebSocket error");
|
||||||
|
};
|
||||||
|
this.socket.onclose = () => {
|
||||||
|
if (this.shouldReconnect && this.connected) {
|
||||||
|
this._scheduleReconnect();
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
private _scheduleReconnect(): void {
|
||||||
|
window.clearTimeout(this.reconnectTimer);
|
||||||
|
const delay = this.reconnectDelay;
|
||||||
|
this.reconnectDelay = Math.min(this.reconnectDelay * 2, 30000);
|
||||||
|
this.reconnectTimer = window.setTimeout(() => {
|
||||||
|
this.reconnectTimer = undefined;
|
||||||
|
const wsUrl = this._buildWsUrl();
|
||||||
|
ChatWebSocket.sharedSocket = new WebSocket(wsUrl);
|
||||||
|
this.socket = ChatWebSocket.sharedSocket;
|
||||||
|
this._attachHandlers();
|
||||||
|
}, delay);
|
||||||
|
}
|
||||||
|
|
||||||
|
connect(
|
||||||
|
handlers: {
|
||||||
|
onToken?: (content: string) => void;
|
||||||
|
onThinkingToken?: (content: string) => void;
|
||||||
|
onUpdate?: (messages: Message[]) => void;
|
||||||
|
onSessionId?: (sessionId: string) => void;
|
||||||
|
onError?: (message: string) => void;
|
||||||
|
onPipelineState?: (state: PipelineState) => void;
|
||||||
|
onPermissionRequest?: (
|
||||||
|
requestId: string,
|
||||||
|
toolName: string,
|
||||||
|
toolInput: Record<string, unknown>,
|
||||||
|
) => void;
|
||||||
|
onActivity?: (toolName: string) => void;
|
||||||
|
onReconciliationProgress?: (
|
||||||
|
storyId: string,
|
||||||
|
status: string,
|
||||||
|
message: string,
|
||||||
|
) => void;
|
||||||
|
onAgentConfigChanged?: () => void;
|
||||||
|
onAgentStateChanged?: () => void;
|
||||||
|
onOnboardingStatus?: (needsOnboarding: boolean) => void;
|
||||||
|
onSideQuestionToken?: (content: string) => void;
|
||||||
|
onSideQuestionDone?: (response: string) => void;
|
||||||
|
onLogEntry?: (timestamp: string, level: string, message: string) => void;
|
||||||
|
onConnected?: () => void;
|
||||||
|
},
|
||||||
|
wsPath = DEFAULT_WS_PATH,
|
||||||
|
) {
|
||||||
|
this.onToken = handlers.onToken;
|
||||||
|
this.onThinkingToken = handlers.onThinkingToken;
|
||||||
|
this.onUpdate = handlers.onUpdate;
|
||||||
|
this.onSessionId = handlers.onSessionId;
|
||||||
|
this.onError = handlers.onError;
|
||||||
|
this.onPipelineState = handlers.onPipelineState;
|
||||||
|
this.onPermissionRequest = handlers.onPermissionRequest;
|
||||||
|
this.onActivity = handlers.onActivity;
|
||||||
|
this.onReconciliationProgress = handlers.onReconciliationProgress;
|
||||||
|
this.onAgentConfigChanged = handlers.onAgentConfigChanged;
|
||||||
|
this.onAgentStateChanged = handlers.onAgentStateChanged;
|
||||||
|
this.onOnboardingStatus = handlers.onOnboardingStatus;
|
||||||
|
this.onSideQuestionToken = handlers.onSideQuestionToken;
|
||||||
|
this.onSideQuestionDone = handlers.onSideQuestionDone;
|
||||||
|
this.onLogEntry = handlers.onLogEntry;
|
||||||
|
this.onConnected = handlers.onConnected;
|
||||||
|
this.wsPath = wsPath;
|
||||||
|
this.shouldReconnect = true;
|
||||||
|
|
||||||
|
if (this.connected) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
this.connected = true;
|
||||||
|
ChatWebSocket.refCount += 1;
|
||||||
|
|
||||||
|
if (
|
||||||
|
!ChatWebSocket.sharedSocket ||
|
||||||
|
ChatWebSocket.sharedSocket.readyState === WebSocket.CLOSED ||
|
||||||
|
ChatWebSocket.sharedSocket.readyState === WebSocket.CLOSING
|
||||||
|
) {
|
||||||
|
const wsUrl = this._buildWsUrl();
|
||||||
|
ChatWebSocket.sharedSocket = new WebSocket(wsUrl);
|
||||||
|
}
|
||||||
|
this.socket = ChatWebSocket.sharedSocket;
|
||||||
|
this._attachHandlers();
|
||||||
|
}
|
||||||
|
|
||||||
|
sendChat(messages: Message[], config: ProviderConfig) {
|
||||||
|
this.send({ type: "chat", messages, config });
|
||||||
|
}
|
||||||
|
|
||||||
|
sendSideQuestion(
|
||||||
|
question: string,
|
||||||
|
contextMessages: Message[],
|
||||||
|
config: ProviderConfig,
|
||||||
|
) {
|
||||||
|
this.send({
|
||||||
|
type: "side_question",
|
||||||
|
question,
|
||||||
|
context_messages: contextMessages,
|
||||||
|
config,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
cancel() {
|
||||||
|
this.send({ type: "cancel" });
|
||||||
|
}
|
||||||
|
|
||||||
|
sendPermissionResponse(
|
||||||
|
requestId: string,
|
||||||
|
approved: boolean,
|
||||||
|
alwaysAllow = false,
|
||||||
|
) {
|
||||||
|
this.send({
|
||||||
|
type: "permission_response",
|
||||||
|
request_id: requestId,
|
||||||
|
approved,
|
||||||
|
always_allow: alwaysAllow,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
close() {
|
||||||
|
this.shouldReconnect = false;
|
||||||
|
this._stopHeartbeat();
|
||||||
|
window.clearTimeout(this.reconnectTimer);
|
||||||
|
this.reconnectTimer = undefined;
|
||||||
|
|
||||||
|
if (!this.connected) return;
|
||||||
|
this.connected = false;
|
||||||
|
ChatWebSocket.refCount = Math.max(0, ChatWebSocket.refCount - 1);
|
||||||
|
|
||||||
|
if (import.meta.env.DEV) {
|
||||||
|
if (this.closeTimer) {
|
||||||
|
window.clearTimeout(this.closeTimer);
|
||||||
|
}
|
||||||
|
this.closeTimer = window.setTimeout(() => {
|
||||||
|
if (ChatWebSocket.refCount === 0) {
|
||||||
|
ChatWebSocket.sharedSocket?.close();
|
||||||
|
ChatWebSocket.sharedSocket = null;
|
||||||
|
}
|
||||||
|
this.socket = ChatWebSocket.sharedSocket ?? undefined;
|
||||||
|
this.closeTimer = undefined;
|
||||||
|
}, 250);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (ChatWebSocket.refCount === 0) {
|
||||||
|
ChatWebSocket.sharedSocket?.close();
|
||||||
|
ChatWebSocket.sharedSocket = null;
|
||||||
|
}
|
||||||
|
this.socket = ChatWebSocket.sharedSocket ?? undefined;
|
||||||
|
}
|
||||||
|
|
||||||
|
private send(payload: WsRequest) {
|
||||||
|
if (!this.socket || this.socket.readyState !== WebSocket.OPEN) {
|
||||||
|
this.onError?.("WebSocket is not connected");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
this.socket.send(JSON.stringify(payload));
|
||||||
|
}
|
||||||
|
}
|
||||||
134
frontend/src/api/settings.test.ts
Normal file
134
frontend/src/api/settings.test.ts
Normal file
@@ -0,0 +1,134 @@
|
|||||||
|
import { afterEach, beforeEach, describe, expect, it, vi } from "vitest";
|
||||||
|
import { settingsApi } from "./settings";
|
||||||
|
|
||||||
|
const mockFetch = vi.fn();
|
||||||
|
|
||||||
|
beforeEach(() => {
|
||||||
|
vi.stubGlobal("fetch", mockFetch);
|
||||||
|
});
|
||||||
|
|
||||||
|
afterEach(() => {
|
||||||
|
vi.restoreAllMocks();
|
||||||
|
});
|
||||||
|
|
||||||
|
function okResponse(body: unknown) {
|
||||||
|
return new Response(JSON.stringify(body), {
|
||||||
|
status: 200,
|
||||||
|
headers: { "Content-Type": "application/json" },
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
function errorResponse(status: number, text: string) {
|
||||||
|
return new Response(text, { status });
|
||||||
|
}
|
||||||
|
|
||||||
|
describe("settingsApi", () => {
|
||||||
|
describe("getEditorCommand", () => {
|
||||||
|
it("sends GET to /settings/editor and returns editor settings", async () => {
|
||||||
|
const expected = { editor_command: "zed" };
|
||||||
|
mockFetch.mockResolvedValueOnce(okResponse(expected));
|
||||||
|
|
||||||
|
const result = await settingsApi.getEditorCommand();
|
||||||
|
|
||||||
|
expect(mockFetch).toHaveBeenCalledWith(
|
||||||
|
"/api/settings/editor",
|
||||||
|
expect.objectContaining({
|
||||||
|
headers: expect.objectContaining({
|
||||||
|
"Content-Type": "application/json",
|
||||||
|
}),
|
||||||
|
}),
|
||||||
|
);
|
||||||
|
expect(result).toEqual(expected);
|
||||||
|
});
|
||||||
|
|
||||||
|
it("returns null editor_command when not configured", async () => {
|
||||||
|
const expected = { editor_command: null };
|
||||||
|
mockFetch.mockResolvedValueOnce(okResponse(expected));
|
||||||
|
|
||||||
|
const result = await settingsApi.getEditorCommand();
|
||||||
|
expect(result.editor_command).toBeNull();
|
||||||
|
});
|
||||||
|
|
||||||
|
it("uses custom baseUrl when provided", async () => {
|
||||||
|
mockFetch.mockResolvedValueOnce(okResponse({ editor_command: "code" }));
|
||||||
|
|
||||||
|
await settingsApi.getEditorCommand("http://localhost:4000/api");
|
||||||
|
|
||||||
|
expect(mockFetch).toHaveBeenCalledWith(
|
||||||
|
"http://localhost:4000/api/settings/editor",
|
||||||
|
expect.anything(),
|
||||||
|
);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe("setEditorCommand", () => {
|
||||||
|
it("sends PUT to /settings/editor with command body", async () => {
|
||||||
|
const expected = { editor_command: "zed" };
|
||||||
|
mockFetch.mockResolvedValueOnce(okResponse(expected));
|
||||||
|
|
||||||
|
const result = await settingsApi.setEditorCommand("zed");
|
||||||
|
|
||||||
|
expect(mockFetch).toHaveBeenCalledWith(
|
||||||
|
"/api/settings/editor",
|
||||||
|
expect.objectContaining({
|
||||||
|
method: "PUT",
|
||||||
|
body: JSON.stringify({ editor_command: "zed" }),
|
||||||
|
}),
|
||||||
|
);
|
||||||
|
expect(result).toEqual(expected);
|
||||||
|
});
|
||||||
|
|
||||||
|
it("sends PUT with null to clear the editor command", async () => {
|
||||||
|
const expected = { editor_command: null };
|
||||||
|
mockFetch.mockResolvedValueOnce(okResponse(expected));
|
||||||
|
|
||||||
|
const result = await settingsApi.setEditorCommand(null);
|
||||||
|
|
||||||
|
expect(mockFetch).toHaveBeenCalledWith(
|
||||||
|
"/api/settings/editor",
|
||||||
|
expect.objectContaining({
|
||||||
|
method: "PUT",
|
||||||
|
body: JSON.stringify({ editor_command: null }),
|
||||||
|
}),
|
||||||
|
);
|
||||||
|
expect(result.editor_command).toBeNull();
|
||||||
|
});
|
||||||
|
|
||||||
|
it("uses custom baseUrl when provided", async () => {
|
||||||
|
mockFetch.mockResolvedValueOnce(okResponse({ editor_command: "vim" }));
|
||||||
|
|
||||||
|
await settingsApi.setEditorCommand("vim", "http://localhost:4000/api");
|
||||||
|
|
||||||
|
expect(mockFetch).toHaveBeenCalledWith(
|
||||||
|
"http://localhost:4000/api/settings/editor",
|
||||||
|
expect.objectContaining({ method: "PUT" }),
|
||||||
|
);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe("error handling", () => {
|
||||||
|
it("throws with response body text on non-ok response", async () => {
|
||||||
|
mockFetch.mockResolvedValueOnce(errorResponse(400, "Bad Request"));
|
||||||
|
|
||||||
|
await expect(settingsApi.getEditorCommand()).rejects.toThrow(
|
||||||
|
"Bad Request",
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
|
it("throws with status code message when response body is empty", async () => {
|
||||||
|
mockFetch.mockResolvedValueOnce(errorResponse(500, ""));
|
||||||
|
|
||||||
|
await expect(settingsApi.getEditorCommand()).rejects.toThrow(
|
||||||
|
"Request failed (500)",
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
|
it("throws on setEditorCommand error", async () => {
|
||||||
|
mockFetch.mockResolvedValueOnce(errorResponse(403, "Forbidden"));
|
||||||
|
|
||||||
|
await expect(settingsApi.setEditorCommand("code")).rejects.toThrow(
|
||||||
|
"Forbidden",
|
||||||
|
);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
});
|
||||||
70
frontend/src/api/settings.ts
Normal file
70
frontend/src/api/settings.ts
Normal file
@@ -0,0 +1,70 @@
|
|||||||
|
export interface EditorSettings {
|
||||||
|
editor_command: string | null;
|
||||||
|
}
|
||||||
|
|
||||||
|
export interface OpenFileResult {
|
||||||
|
success: boolean;
|
||||||
|
}
|
||||||
|
|
||||||
|
const DEFAULT_API_BASE = "/api";
|
||||||
|
|
||||||
|
function buildApiUrl(path: string, baseUrl = DEFAULT_API_BASE): string {
|
||||||
|
return `${baseUrl}${path}`;
|
||||||
|
}
|
||||||
|
|
||||||
|
async function requestJson<T>(
|
||||||
|
path: string,
|
||||||
|
options: RequestInit = {},
|
||||||
|
baseUrl = DEFAULT_API_BASE,
|
||||||
|
): Promise<T> {
|
||||||
|
const res = await fetch(buildApiUrl(path, baseUrl), {
|
||||||
|
headers: {
|
||||||
|
"Content-Type": "application/json",
|
||||||
|
...(options.headers ?? {}),
|
||||||
|
},
|
||||||
|
...options,
|
||||||
|
});
|
||||||
|
|
||||||
|
if (!res.ok) {
|
||||||
|
const text = await res.text();
|
||||||
|
throw new Error(text || `Request failed (${res.status})`);
|
||||||
|
}
|
||||||
|
|
||||||
|
return res.json() as Promise<T>;
|
||||||
|
}
|
||||||
|
|
||||||
|
export const settingsApi = {
|
||||||
|
getEditorCommand(baseUrl?: string): Promise<EditorSettings> {
|
||||||
|
return requestJson<EditorSettings>("/settings/editor", {}, baseUrl);
|
||||||
|
},
|
||||||
|
|
||||||
|
setEditorCommand(
|
||||||
|
command: string | null,
|
||||||
|
baseUrl?: string,
|
||||||
|
): Promise<EditorSettings> {
|
||||||
|
return requestJson<EditorSettings>(
|
||||||
|
"/settings/editor",
|
||||||
|
{
|
||||||
|
method: "PUT",
|
||||||
|
body: JSON.stringify({ editor_command: command }),
|
||||||
|
},
|
||||||
|
baseUrl,
|
||||||
|
);
|
||||||
|
},
|
||||||
|
|
||||||
|
openFile(
|
||||||
|
path: string,
|
||||||
|
line?: number,
|
||||||
|
baseUrl?: string,
|
||||||
|
): Promise<OpenFileResult> {
|
||||||
|
const params = new URLSearchParams({ path });
|
||||||
|
if (line !== undefined) {
|
||||||
|
params.set("line", String(line));
|
||||||
|
}
|
||||||
|
return requestJson<OpenFileResult>(
|
||||||
|
`/settings/open-file?${params.toString()}`,
|
||||||
|
{ method: "POST" },
|
||||||
|
baseUrl,
|
||||||
|
);
|
||||||
|
},
|
||||||
|
};
|
||||||
1
frontend/src/assets/react.svg
Normal file
1
frontend/src/assets/react.svg
Normal file
@@ -0,0 +1 @@
|
|||||||
|
<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" class="iconify iconify--logos" width="35.93" height="32" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 228"><path fill="#00D8FF" d="M210.483 73.824a171.49 171.49 0 0 0-8.24-2.597c.465-1.9.893-3.777 1.273-5.621c6.238-30.281 2.16-54.676-11.769-62.708c-13.355-7.7-35.196.329-57.254 19.526a171.23 171.23 0 0 0-6.375 5.848a155.866 155.866 0 0 0-4.241-3.917C100.759 3.829 77.587-4.822 63.673 3.233C50.33 10.957 46.379 33.89 51.995 62.588a170.974 170.974 0 0 0 1.892 8.48c-3.28.932-6.445 1.924-9.474 2.98C17.309 83.498 0 98.307 0 113.668c0 15.865 18.582 31.778 46.812 41.427a145.52 145.52 0 0 0 6.921 2.165a167.467 167.467 0 0 0-2.01 9.138c-5.354 28.2-1.173 50.591 12.134 58.266c13.744 7.926 36.812-.22 59.273-19.855a145.567 145.567 0 0 0 5.342-4.923a168.064 168.064 0 0 0 6.92 6.314c21.758 18.722 43.246 26.282 56.54 18.586c13.731-7.949 18.194-32.003 12.4-61.268a145.016 145.016 0 0 0-1.535-6.842c1.62-.48 3.21-.974 4.76-1.488c29.348-9.723 48.443-25.443 48.443-41.52c0-15.417-17.868-30.326-45.517-39.844Zm-6.365 70.984c-1.4.463-2.836.91-4.3 1.345c-3.24-10.257-7.612-21.163-12.963-32.432c5.106-11 9.31-21.767 12.459-31.957c2.619.758 5.16 1.557 7.61 2.4c23.69 8.156 38.14 20.213 38.14 29.504c0 9.896-15.606 22.743-40.946 31.14Zm-10.514 20.834c2.562 12.94 2.927 24.64 1.23 33.787c-1.524 8.219-4.59 13.698-8.382 15.893c-8.067 4.67-25.32-1.4-43.927-17.412a156.726 156.726 0 0 1-6.437-5.87c7.214-7.889 14.423-17.06 21.459-27.246c12.376-1.098 24.068-2.894 34.671-5.345a134.17 134.17 0 0 1 1.386 6.193ZM87.276 214.515c-7.882 2.783-14.16 2.863-17.955.675c-8.075-4.657-11.432-22.636-6.853-46.752a156.923 156.923 0 0 1 1.869-8.499c10.486 2.32 22.093 3.988 34.498 4.994c7.084 9.967 14.501 19.128 21.976 27.15a134.668 134.668 0 0 1-4.877 4.492c-9.933 8.682-19.886 14.842-28.658 17.94ZM50.35 144.747c-12.483-4.267-22.792-9.812-29.858-15.863c-6.35-5.437-9.555-10.836-9.555-15.216c0-9.322 13.897-21.212 37.076-29.293c2.813-.98 5.757-1.905 8.812-2.773c3.204 10.42 7.406 21.315 12.477 32.332c-5.137 11.18-9.399 22.249-12.634 32.792a134.718 134.718 0 0 1-6.318-1.979Zm12.378-84.26c-4.811-24.587-1.616-43.134 6.425-47.789c8.564-4.958 27.502 2.111 47.463 19.835a144.318 144.318 0 0 1 3.841 3.545c-7.438 7.987-14.787 17.08-21.808 26.988c-12.04 1.116-23.565 2.908-34.161 5.309a160.342 160.342 0 0 1-1.76-7.887Zm110.427 27.268a347.8 347.8 0 0 0-7.785-12.803c8.168 1.033 15.994 2.404 23.343 4.08c-2.206 7.072-4.956 14.465-8.193 22.045a381.151 381.151 0 0 0-7.365-13.322Zm-45.032-43.861c5.044 5.465 10.096 11.566 15.065 18.186a322.04 322.04 0 0 0-30.257-.006c4.974-6.559 10.069-12.652 15.192-18.18ZM82.802 87.83a323.167 323.167 0 0 0-7.227 13.238c-3.184-7.553-5.909-14.98-8.134-22.152c7.304-1.634 15.093-2.97 23.209-3.984a321.524 321.524 0 0 0-7.848 12.897Zm8.081 65.352c-8.385-.936-16.291-2.203-23.593-3.793c2.26-7.3 5.045-14.885 8.298-22.6a321.187 321.187 0 0 0 7.257 13.246c2.594 4.48 5.28 8.868 8.038 13.147Zm37.542 31.03c-5.184-5.592-10.354-11.779-15.403-18.433c4.902.192 9.899.29 14.978.29c5.218 0 10.376-.117 15.453-.343c-4.985 6.774-10.018 12.97-15.028 18.486Zm52.198-57.817c3.422 7.8 6.306 15.345 8.596 22.52c-7.422 1.694-15.436 3.058-23.88 4.071a382.417 382.417 0 0 0 7.859-13.026a347.403 347.403 0 0 0 7.425-13.565Zm-16.898 8.101a358.557 358.557 0 0 1-12.281 19.815a329.4 329.4 0 0 1-23.444.823c-7.967 0-15.716-.248-23.178-.732a310.202 310.202 0 0 1-12.513-19.846h.001a307.41 307.41 0 0 1-10.923-20.627a310.278 310.278 0 0 1 10.89-20.637l-.001.001a307.318 307.318 0 0 1 12.413-19.761c7.613-.576 15.42-.876 23.31-.876H128c7.926 0 15.743.303 23.354.883a329.357 329.357 0 0 1 12.335 19.695a358.489 358.489 0 0 1 11.036 20.54a329.472 329.472 0 0 1-11 20.722Zm22.56-122.124c8.572 4.944 11.906 24.881 6.52 51.026c-.344 1.668-.73 3.367-1.15 5.09c-10.622-2.452-22.155-4.275-34.23-5.408c-7.034-10.017-14.323-19.124-21.64-27.008a160.789 160.789 0 0 1 5.888-5.4c18.9-16.447 36.564-22.941 44.612-18.3ZM128 90.808c12.625 0 22.86 10.235 22.86 22.86s-10.235 22.86-22.86 22.86s-22.86-10.235-22.86-22.86s10.235-22.86 22.86-22.86Z"></path></svg>
|
||||||
|
After Width: | Height: | Size: 4.0 KiB |
313
frontend/src/components/AgentPanel.test.tsx
Normal file
313
frontend/src/components/AgentPanel.test.tsx
Normal file
@@ -0,0 +1,313 @@
|
|||||||
|
import { act, render, screen } from "@testing-library/react";
|
||||||
|
import { beforeAll, beforeEach, describe, expect, it, vi } from "vitest";
|
||||||
|
import type { AgentConfigInfo, AgentEvent, AgentInfo } from "../api/agents";
|
||||||
|
import { agentsApi, subscribeAgentStream } from "../api/agents";
|
||||||
|
|
||||||
|
vi.mock("../api/agents", () => {
|
||||||
|
const agentsApi = {
|
||||||
|
listAgents: vi.fn(),
|
||||||
|
getAgentConfig: vi.fn(),
|
||||||
|
startAgent: vi.fn(),
|
||||||
|
stopAgent: vi.fn(),
|
||||||
|
reloadConfig: vi.fn(),
|
||||||
|
};
|
||||||
|
return { agentsApi, subscribeAgentStream: vi.fn(() => () => {}) };
|
||||||
|
});
|
||||||
|
|
||||||
|
// Dynamic import so the mock is in place before the module loads
|
||||||
|
const { AgentPanel } = await import("./AgentPanel");
|
||||||
|
|
||||||
|
const mockedSubscribeAgentStream = vi.mocked(subscribeAgentStream);
|
||||||
|
|
||||||
|
const mockedAgents = {
|
||||||
|
listAgents: vi.mocked(agentsApi.listAgents),
|
||||||
|
getAgentConfig: vi.mocked(agentsApi.getAgentConfig),
|
||||||
|
startAgent: vi.mocked(agentsApi.startAgent),
|
||||||
|
};
|
||||||
|
|
||||||
|
const ROSTER: AgentConfigInfo[] = [
|
||||||
|
{
|
||||||
|
name: "coder-1",
|
||||||
|
role: "Full-stack engineer",
|
||||||
|
stage: "coder",
|
||||||
|
model: "sonnet",
|
||||||
|
allowed_tools: null,
|
||||||
|
max_turns: 50,
|
||||||
|
max_budget_usd: 5.0,
|
||||||
|
},
|
||||||
|
];
|
||||||
|
|
||||||
|
describe("AgentPanel active work list removed", () => {
|
||||||
|
beforeAll(() => {
|
||||||
|
Element.prototype.scrollIntoView = vi.fn();
|
||||||
|
});
|
||||||
|
|
||||||
|
beforeEach(() => {
|
||||||
|
mockedAgents.getAgentConfig.mockResolvedValue(ROSTER);
|
||||||
|
mockedAgents.listAgents.mockResolvedValue([]);
|
||||||
|
});
|
||||||
|
|
||||||
|
it("does not render active agent entries even when agents are running", async () => {
|
||||||
|
const agentList: AgentInfo[] = [
|
||||||
|
{
|
||||||
|
story_id: "83_active",
|
||||||
|
agent_name: "coder-1",
|
||||||
|
status: "running",
|
||||||
|
session_id: null,
|
||||||
|
worktree_path: "/tmp/wt",
|
||||||
|
base_branch: "master",
|
||||||
|
log_session_id: null,
|
||||||
|
},
|
||||||
|
];
|
||||||
|
mockedAgents.listAgents.mockResolvedValue(agentList);
|
||||||
|
|
||||||
|
const { container } = render(<AgentPanel />);
|
||||||
|
|
||||||
|
// Roster badge should still be visible
|
||||||
|
await screen.findByTestId("roster-badge-coder-1");
|
||||||
|
|
||||||
|
// No agent entry divs should exist
|
||||||
|
expect(
|
||||||
|
container.querySelector('[data-testid^="agent-entry-"]'),
|
||||||
|
).not.toBeInTheDocument();
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe("Running count visibility in header", () => {
|
||||||
|
beforeAll(() => {
|
||||||
|
Element.prototype.scrollIntoView = vi.fn();
|
||||||
|
});
|
||||||
|
|
||||||
|
beforeEach(() => {
|
||||||
|
mockedAgents.getAgentConfig.mockResolvedValue(ROSTER);
|
||||||
|
mockedAgents.listAgents.mockResolvedValue([]);
|
||||||
|
});
|
||||||
|
|
||||||
|
// AC1: When no agents are running, "0 running" is NOT visible
|
||||||
|
it("does not show running count when no agents are running", async () => {
|
||||||
|
render(<AgentPanel />);
|
||||||
|
|
||||||
|
// Wait for roster to load
|
||||||
|
await screen.findByTestId("roster-badge-coder-1");
|
||||||
|
|
||||||
|
expect(screen.queryByText(/0 running/)).not.toBeInTheDocument();
|
||||||
|
});
|
||||||
|
|
||||||
|
// AC2: When agents are running, "N running" IS visible
|
||||||
|
it("shows running count when agents are running", async () => {
|
||||||
|
const agentList: AgentInfo[] = [
|
||||||
|
{
|
||||||
|
story_id: "99_active",
|
||||||
|
agent_name: "coder-1",
|
||||||
|
status: "running",
|
||||||
|
session_id: null,
|
||||||
|
worktree_path: "/tmp/wt",
|
||||||
|
base_branch: "master",
|
||||||
|
log_session_id: null,
|
||||||
|
},
|
||||||
|
];
|
||||||
|
mockedAgents.listAgents.mockResolvedValue(agentList);
|
||||||
|
|
||||||
|
render(<AgentPanel />);
|
||||||
|
|
||||||
|
await screen.findByText(/1 running/);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe("RosterBadge availability state", () => {
|
||||||
|
beforeAll(() => {
|
||||||
|
Element.prototype.scrollIntoView = vi.fn();
|
||||||
|
});
|
||||||
|
|
||||||
|
beforeEach(() => {
|
||||||
|
mockedAgents.getAgentConfig.mockResolvedValue(ROSTER);
|
||||||
|
mockedAgents.listAgents.mockResolvedValue([]);
|
||||||
|
});
|
||||||
|
|
||||||
|
it("shows a green dot for an idle agent", async () => {
|
||||||
|
render(<AgentPanel />);
|
||||||
|
|
||||||
|
const dot = await screen.findByTestId("roster-dot-coder-1");
|
||||||
|
// JSDOM normalizes #3fb950 to rgb(63, 185, 80)
|
||||||
|
expect(dot.style.background).toBe("rgb(63, 185, 80)");
|
||||||
|
expect(dot.style.animation).toBe("");
|
||||||
|
});
|
||||||
|
|
||||||
|
it("shows grey badge styling for an idle agent", async () => {
|
||||||
|
render(<AgentPanel />);
|
||||||
|
|
||||||
|
const badge = await screen.findByTestId("roster-badge-coder-1");
|
||||||
|
// JSDOM normalizes #aaa18 to rgba(170, 170, 170, 0.094) and #aaa to rgb(170, 170, 170)
|
||||||
|
expect(badge.style.background).toBe("rgba(170, 170, 170, 0.094)");
|
||||||
|
expect(badge.style.color).toBe("rgb(170, 170, 170)");
|
||||||
|
});
|
||||||
|
|
||||||
|
// AC1: roster badge always shows idle (grey) even when agent is running
|
||||||
|
it("shows a static green dot for a running agent (roster always idle)", async () => {
|
||||||
|
const agentList: AgentInfo[] = [
|
||||||
|
{
|
||||||
|
story_id: "81_active",
|
||||||
|
agent_name: "coder-1",
|
||||||
|
status: "running",
|
||||||
|
session_id: null,
|
||||||
|
worktree_path: null,
|
||||||
|
base_branch: null,
|
||||||
|
log_session_id: null,
|
||||||
|
},
|
||||||
|
];
|
||||||
|
mockedAgents.listAgents.mockResolvedValue(agentList);
|
||||||
|
|
||||||
|
render(<AgentPanel />);
|
||||||
|
|
||||||
|
const dot = await screen.findByTestId("roster-dot-coder-1");
|
||||||
|
expect(dot.style.background).toBe("rgb(63, 185, 80)");
|
||||||
|
// Roster is always idle — no pulsing animation
|
||||||
|
expect(dot.style.animation).toBe("");
|
||||||
|
});
|
||||||
|
|
||||||
|
// AC1: roster badge always shows idle (grey) even when agent is running
|
||||||
|
it("shows grey (idle) badge styling for a running agent", async () => {
|
||||||
|
const agentList: AgentInfo[] = [
|
||||||
|
{
|
||||||
|
story_id: "81_active",
|
||||||
|
agent_name: "coder-1",
|
||||||
|
status: "running",
|
||||||
|
session_id: null,
|
||||||
|
worktree_path: null,
|
||||||
|
base_branch: null,
|
||||||
|
log_session_id: null,
|
||||||
|
},
|
||||||
|
];
|
||||||
|
mockedAgents.listAgents.mockResolvedValue(agentList);
|
||||||
|
|
||||||
|
render(<AgentPanel />);
|
||||||
|
|
||||||
|
const badge = await screen.findByTestId("roster-badge-coder-1");
|
||||||
|
// Always idle: grey background and grey text
|
||||||
|
expect(badge.style.background).toBe("rgba(170, 170, 170, 0.094)");
|
||||||
|
expect(badge.style.color).toBe("rgb(170, 170, 170)");
|
||||||
|
});
|
||||||
|
|
||||||
|
// AC2: after agent completes and returns to roster, badge shows idle
|
||||||
|
it("shows idle state after agent status changes from running to completed", async () => {
|
||||||
|
const agentList: AgentInfo[] = [
|
||||||
|
{
|
||||||
|
story_id: "81_completed",
|
||||||
|
agent_name: "coder-1",
|
||||||
|
status: "completed",
|
||||||
|
session_id: null,
|
||||||
|
worktree_path: null,
|
||||||
|
base_branch: null,
|
||||||
|
log_session_id: null,
|
||||||
|
},
|
||||||
|
];
|
||||||
|
mockedAgents.listAgents.mockResolvedValue(agentList);
|
||||||
|
|
||||||
|
render(<AgentPanel />);
|
||||||
|
|
||||||
|
const badge = await screen.findByTestId("roster-badge-coder-1");
|
||||||
|
const dot = screen.getByTestId("roster-dot-coder-1");
|
||||||
|
// Completed agent: badge is idle
|
||||||
|
expect(badge.style.background).toBe("rgba(170, 170, 170, 0.094)");
|
||||||
|
expect(badge.style.color).toBe("rgb(170, 170, 170)");
|
||||||
|
expect(dot.style.animation).toBe("");
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe("Agent output not shown in sidebar (story 290)", () => {
|
||||||
|
beforeAll(() => {
|
||||||
|
Element.prototype.scrollIntoView = vi.fn();
|
||||||
|
});
|
||||||
|
|
||||||
|
beforeEach(() => {
|
||||||
|
mockedAgents.getAgentConfig.mockResolvedValue(ROSTER);
|
||||||
|
mockedAgents.listAgents.mockResolvedValue([]);
|
||||||
|
mockedSubscribeAgentStream.mockReturnValue(() => {});
|
||||||
|
});
|
||||||
|
|
||||||
|
// AC1: output events do not appear in the agents sidebar
|
||||||
|
it("does not render agent output when output event arrives", async () => {
|
||||||
|
let emitEvent: ((e: AgentEvent) => void) | null = null;
|
||||||
|
mockedSubscribeAgentStream.mockImplementation(
|
||||||
|
(_storyId, _agentName, onEvent) => {
|
||||||
|
emitEvent = onEvent;
|
||||||
|
return () => {};
|
||||||
|
},
|
||||||
|
);
|
||||||
|
|
||||||
|
const agentList: AgentInfo[] = [
|
||||||
|
{
|
||||||
|
story_id: "290_output",
|
||||||
|
agent_name: "coder-1",
|
||||||
|
status: "running",
|
||||||
|
session_id: null,
|
||||||
|
worktree_path: "/tmp/wt",
|
||||||
|
base_branch: "master",
|
||||||
|
log_session_id: null,
|
||||||
|
},
|
||||||
|
];
|
||||||
|
mockedAgents.listAgents.mockResolvedValue(agentList);
|
||||||
|
|
||||||
|
const { container } = render(<AgentPanel />);
|
||||||
|
await screen.findByTestId("roster-badge-coder-1");
|
||||||
|
|
||||||
|
await act(async () => {
|
||||||
|
emitEvent?.({
|
||||||
|
type: "output",
|
||||||
|
story_id: "290_output",
|
||||||
|
agent_name: "coder-1",
|
||||||
|
text: "doing some work...",
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
// No output elements in the sidebar
|
||||||
|
expect(
|
||||||
|
container.querySelector('[data-testid^="agent-output-"]'),
|
||||||
|
).not.toBeInTheDocument();
|
||||||
|
expect(
|
||||||
|
container.querySelector('[data-testid^="agent-stream-"]'),
|
||||||
|
).not.toBeInTheDocument();
|
||||||
|
});
|
||||||
|
|
||||||
|
// AC1: thinking events do not appear in the agents sidebar
|
||||||
|
it("does not render thinking block when thinking event arrives", async () => {
|
||||||
|
let emitEvent: ((e: AgentEvent) => void) | null = null;
|
||||||
|
mockedSubscribeAgentStream.mockImplementation(
|
||||||
|
(_storyId, _agentName, onEvent) => {
|
||||||
|
emitEvent = onEvent;
|
||||||
|
return () => {};
|
||||||
|
},
|
||||||
|
);
|
||||||
|
|
||||||
|
const agentList: AgentInfo[] = [
|
||||||
|
{
|
||||||
|
story_id: "290_thinking",
|
||||||
|
agent_name: "coder-1",
|
||||||
|
status: "running",
|
||||||
|
session_id: null,
|
||||||
|
worktree_path: "/tmp/wt",
|
||||||
|
base_branch: "master",
|
||||||
|
log_session_id: null,
|
||||||
|
},
|
||||||
|
];
|
||||||
|
mockedAgents.listAgents.mockResolvedValue(agentList);
|
||||||
|
|
||||||
|
render(<AgentPanel />);
|
||||||
|
await screen.findByTestId("roster-badge-coder-1");
|
||||||
|
|
||||||
|
await act(async () => {
|
||||||
|
emitEvent?.({
|
||||||
|
type: "thinking",
|
||||||
|
story_id: "290_thinking",
|
||||||
|
agent_name: "coder-1",
|
||||||
|
text: "Let me consider the problem carefully...",
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
// No thinking block or output in sidebar
|
||||||
|
expect(screen.queryByTestId("thinking-block")).not.toBeInTheDocument();
|
||||||
|
expect(
|
||||||
|
screen.queryByText("Let me consider the problem carefully..."),
|
||||||
|
).not.toBeInTheDocument();
|
||||||
|
});
|
||||||
|
});
|
||||||
419
frontend/src/components/AgentPanel.tsx
Normal file
419
frontend/src/components/AgentPanel.tsx
Normal file
@@ -0,0 +1,419 @@
|
|||||||
|
import * as React from "react";
|
||||||
|
import type {
|
||||||
|
AgentConfigInfo,
|
||||||
|
AgentEvent,
|
||||||
|
AgentStatusValue,
|
||||||
|
} from "../api/agents";
|
||||||
|
import { agentsApi, subscribeAgentStream } from "../api/agents";
|
||||||
|
import { settingsApi } from "../api/settings";
|
||||||
|
import { useLozengeFly } from "./LozengeFlyContext";
|
||||||
|
|
||||||
|
const { useCallback, useEffect, useRef, useState } = React;
|
||||||
|
|
||||||
|
interface AgentState {
|
||||||
|
agentName: string;
|
||||||
|
status: AgentStatusValue;
|
||||||
|
sessionId: string | null;
|
||||||
|
worktreePath: string | null;
|
||||||
|
baseBranch: string | null;
|
||||||
|
terminalAt: number | null;
|
||||||
|
}
|
||||||
|
|
||||||
|
const formatTimestamp = (value: Date | null): string => {
|
||||||
|
if (!value) return "";
|
||||||
|
return value.toLocaleTimeString([], {
|
||||||
|
hour: "2-digit",
|
||||||
|
minute: "2-digit",
|
||||||
|
second: "2-digit",
|
||||||
|
});
|
||||||
|
};
|
||||||
|
|
||||||
|
function RosterBadge({ agent }: { agent: AgentConfigInfo }) {
|
||||||
|
const { registerRosterEl } = useLozengeFly();
|
||||||
|
const badgeRef = useRef<HTMLSpanElement>(null);
|
||||||
|
|
||||||
|
// Register this element so fly animations know where to start/end
|
||||||
|
useEffect(() => {
|
||||||
|
const el = badgeRef.current;
|
||||||
|
if (el) registerRosterEl(agent.name, el);
|
||||||
|
return () => registerRosterEl(agent.name, null);
|
||||||
|
}, [agent.name, registerRosterEl]);
|
||||||
|
|
||||||
|
return (
|
||||||
|
<span
|
||||||
|
ref={badgeRef}
|
||||||
|
data-testid={`roster-badge-${agent.name}`}
|
||||||
|
style={{
|
||||||
|
display: "inline-flex",
|
||||||
|
alignItems: "center",
|
||||||
|
gap: "4px",
|
||||||
|
padding: "2px 8px",
|
||||||
|
borderRadius: "6px",
|
||||||
|
fontSize: "0.7em",
|
||||||
|
background: "#aaaaaa18",
|
||||||
|
color: "#aaa",
|
||||||
|
border: "1px solid #aaaaaa44",
|
||||||
|
}}
|
||||||
|
title={`${agent.role || agent.name} — available`}
|
||||||
|
>
|
||||||
|
<span
|
||||||
|
data-testid={`roster-dot-${agent.name}`}
|
||||||
|
style={{
|
||||||
|
width: "5px",
|
||||||
|
height: "5px",
|
||||||
|
borderRadius: "50%",
|
||||||
|
background: "#3fb950",
|
||||||
|
flexShrink: 0,
|
||||||
|
}}
|
||||||
|
/>
|
||||||
|
<span style={{ fontWeight: 600, color: "#aaa" }}>{agent.name}</span>
|
||||||
|
{agent.model && <span style={{ color: "#888" }}>{agent.model}</span>}
|
||||||
|
<span style={{ color: "#888", fontStyle: "italic" }}>available</span>
|
||||||
|
</span>
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Build a composite key for tracking agent state. */
|
||||||
|
function agentKey(storyId: string, agentName: string): string {
|
||||||
|
return `${storyId}:${agentName}`;
|
||||||
|
}
|
||||||
|
|
||||||
|
interface AgentPanelProps {
|
||||||
|
/** Increment this to trigger a re-fetch of the agent roster. */
|
||||||
|
configVersion?: number;
|
||||||
|
/** Increment this to trigger a re-fetch of the agent list (agent state changed). */
|
||||||
|
stateVersion?: number;
|
||||||
|
}
|
||||||
|
|
||||||
|
export function AgentPanel({
|
||||||
|
configVersion = 0,
|
||||||
|
stateVersion = 0,
|
||||||
|
}: AgentPanelProps) {
|
||||||
|
const { hiddenRosterAgents } = useLozengeFly();
|
||||||
|
const [agents, setAgents] = useState<Record<string, AgentState>>({});
|
||||||
|
const [roster, setRoster] = useState<AgentConfigInfo[]>([]);
|
||||||
|
const [actionError, setActionError] = useState<string | null>(null);
|
||||||
|
const [lastRefresh, setLastRefresh] = useState<Date | null>(null);
|
||||||
|
const [editorCommand, setEditorCommand] = useState<string | null>(null);
|
||||||
|
const [editorInput, setEditorInput] = useState<string>("");
|
||||||
|
const [editingEditor, setEditingEditor] = useState(false);
|
||||||
|
const cleanupRefs = useRef<Record<string, () => void>>({});
|
||||||
|
|
||||||
|
// Re-fetch roster whenever configVersion changes (triggered by agent_config_changed WS event).
|
||||||
|
useEffect(() => {
|
||||||
|
agentsApi
|
||||||
|
.getAgentConfig()
|
||||||
|
.then(setRoster)
|
||||||
|
.catch((err) => console.error("Failed to load agent config:", err));
|
||||||
|
}, [configVersion]);
|
||||||
|
|
||||||
|
const subscribeToAgent = useCallback((storyId: string, agentName: string) => {
|
||||||
|
const key = agentKey(storyId, agentName);
|
||||||
|
cleanupRefs.current[key]?.();
|
||||||
|
|
||||||
|
const cleanup = subscribeAgentStream(
|
||||||
|
storyId,
|
||||||
|
agentName,
|
||||||
|
(event: AgentEvent) => {
|
||||||
|
setAgents((prev) => {
|
||||||
|
const current = prev[key] ?? {
|
||||||
|
agentName,
|
||||||
|
status: "pending" as AgentStatusValue,
|
||||||
|
sessionId: null,
|
||||||
|
worktreePath: null,
|
||||||
|
baseBranch: null,
|
||||||
|
terminalAt: null,
|
||||||
|
};
|
||||||
|
|
||||||
|
switch (event.type) {
|
||||||
|
case "status": {
|
||||||
|
const newStatus =
|
||||||
|
(event.status as AgentStatusValue) ?? current.status;
|
||||||
|
const isTerminal =
|
||||||
|
newStatus === "completed" || newStatus === "failed";
|
||||||
|
return {
|
||||||
|
...prev,
|
||||||
|
[key]: {
|
||||||
|
...current,
|
||||||
|
status: newStatus,
|
||||||
|
terminalAt: isTerminal
|
||||||
|
? (current.terminalAt ?? Date.now())
|
||||||
|
: current.terminalAt,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
}
|
||||||
|
case "done":
|
||||||
|
return {
|
||||||
|
...prev,
|
||||||
|
[key]: {
|
||||||
|
...current,
|
||||||
|
status: "completed",
|
||||||
|
sessionId: event.session_id ?? current.sessionId,
|
||||||
|
terminalAt: current.terminalAt ?? Date.now(),
|
||||||
|
},
|
||||||
|
};
|
||||||
|
case "error":
|
||||||
|
return {
|
||||||
|
...prev,
|
||||||
|
[key]: {
|
||||||
|
...current,
|
||||||
|
status: "failed",
|
||||||
|
terminalAt: current.terminalAt ?? Date.now(),
|
||||||
|
},
|
||||||
|
};
|
||||||
|
default:
|
||||||
|
// output, thinking, and other events are not displayed in the sidebar.
|
||||||
|
// Agent output streams appear in the work item detail panel instead.
|
||||||
|
return prev;
|
||||||
|
}
|
||||||
|
});
|
||||||
|
},
|
||||||
|
() => {
|
||||||
|
// SSE error — agent may not be streaming yet
|
||||||
|
},
|
||||||
|
);
|
||||||
|
|
||||||
|
cleanupRefs.current[key] = cleanup;
|
||||||
|
}, []);
|
||||||
|
|
||||||
|
/** Shared helper: fetch the agent list and update state + SSE subscriptions. */
|
||||||
|
const refreshAgents = useCallback(() => {
|
||||||
|
agentsApi
|
||||||
|
.listAgents()
|
||||||
|
.then((agentList) => {
|
||||||
|
const agentMap: Record<string, AgentState> = {};
|
||||||
|
const now = Date.now();
|
||||||
|
for (const a of agentList) {
|
||||||
|
const key = agentKey(a.story_id, a.agent_name);
|
||||||
|
const isTerminal = a.status === "completed" || a.status === "failed";
|
||||||
|
agentMap[key] = {
|
||||||
|
agentName: a.agent_name,
|
||||||
|
status: a.status,
|
||||||
|
sessionId: a.session_id,
|
||||||
|
worktreePath: a.worktree_path,
|
||||||
|
baseBranch: a.base_branch,
|
||||||
|
terminalAt: isTerminal ? now : null,
|
||||||
|
};
|
||||||
|
if (a.status === "running" || a.status === "pending") {
|
||||||
|
subscribeToAgent(a.story_id, a.agent_name);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
setAgents(agentMap);
|
||||||
|
setLastRefresh(new Date());
|
||||||
|
})
|
||||||
|
.catch((err) => console.error("Failed to load agents:", err));
|
||||||
|
}, [subscribeToAgent]);
|
||||||
|
|
||||||
|
// Load existing agents and editor preference on mount
|
||||||
|
useEffect(() => {
|
||||||
|
refreshAgents();
|
||||||
|
|
||||||
|
settingsApi
|
||||||
|
.getEditorCommand()
|
||||||
|
.then((s) => {
|
||||||
|
setEditorCommand(s.editor_command);
|
||||||
|
setEditorInput(s.editor_command ?? "");
|
||||||
|
})
|
||||||
|
.catch((err) => console.error("Failed to load editor command:", err));
|
||||||
|
|
||||||
|
return () => {
|
||||||
|
for (const cleanup of Object.values(cleanupRefs.current)) {
|
||||||
|
cleanup();
|
||||||
|
}
|
||||||
|
};
|
||||||
|
// eslint-disable-next-line react-hooks/exhaustive-deps
|
||||||
|
}, []);
|
||||||
|
|
||||||
|
// Re-fetch agent list when agent state changes (via WebSocket notification).
|
||||||
|
// Skip the initial render (stateVersion=0) since the mount effect handles that.
|
||||||
|
useEffect(() => {
|
||||||
|
if (stateVersion > 0) {
|
||||||
|
refreshAgents();
|
||||||
|
}
|
||||||
|
}, [stateVersion, refreshAgents]);
|
||||||
|
|
||||||
|
const handleSaveEditor = async () => {
|
||||||
|
try {
|
||||||
|
const trimmed = editorInput.trim() || null;
|
||||||
|
const result = await settingsApi.setEditorCommand(trimmed);
|
||||||
|
setEditorCommand(result.editor_command);
|
||||||
|
setEditorInput(result.editor_command ?? "");
|
||||||
|
setEditingEditor(false);
|
||||||
|
} catch (err) {
|
||||||
|
const message = err instanceof Error ? err.message : String(err);
|
||||||
|
setActionError(`Failed to save editor: ${message}`);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
return (
|
||||||
|
<div
|
||||||
|
style={{
|
||||||
|
border: "1px solid #333",
|
||||||
|
borderRadius: "10px",
|
||||||
|
padding: "12px 16px",
|
||||||
|
background: "#1f1f1f",
|
||||||
|
display: "flex",
|
||||||
|
flexDirection: "column",
|
||||||
|
gap: "8px",
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
<div
|
||||||
|
style={{
|
||||||
|
display: "flex",
|
||||||
|
alignItems: "center",
|
||||||
|
justifyContent: "space-between",
|
||||||
|
gap: "12px",
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
<div
|
||||||
|
style={{
|
||||||
|
display: "flex",
|
||||||
|
alignItems: "center",
|
||||||
|
gap: "8px",
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
<div style={{ fontWeight: 600 }}>Agents</div>
|
||||||
|
{Object.values(agents).filter((a) => a.status === "running").length >
|
||||||
|
0 && (
|
||||||
|
<div
|
||||||
|
style={{
|
||||||
|
fontSize: "0.75em",
|
||||||
|
color: "#777",
|
||||||
|
fontFamily: "monospace",
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
{
|
||||||
|
Object.values(agents).filter((a) => a.status === "running")
|
||||||
|
.length
|
||||||
|
}{" "}
|
||||||
|
running
|
||||||
|
</div>
|
||||||
|
)}
|
||||||
|
</div>
|
||||||
|
{lastRefresh && (
|
||||||
|
<div style={{ fontSize: "0.7em", color: "#555" }}>
|
||||||
|
Loaded {formatTimestamp(lastRefresh)}
|
||||||
|
</div>
|
||||||
|
)}
|
||||||
|
</div>
|
||||||
|
|
||||||
|
{/* Editor preference */}
|
||||||
|
<div style={{ display: "flex", alignItems: "center", gap: "6px" }}>
|
||||||
|
<span style={{ fontSize: "0.75em", color: "#666" }}>Editor:</span>
|
||||||
|
{editingEditor ? (
|
||||||
|
<>
|
||||||
|
<input
|
||||||
|
type="text"
|
||||||
|
value={editorInput}
|
||||||
|
onChange={(e) => setEditorInput(e.target.value)}
|
||||||
|
onKeyDown={(e) => {
|
||||||
|
if (e.key === "Enter") handleSaveEditor();
|
||||||
|
if (e.key === "Escape") setEditingEditor(false);
|
||||||
|
}}
|
||||||
|
placeholder="zed, code, cursor..."
|
||||||
|
style={{
|
||||||
|
fontSize: "0.75em",
|
||||||
|
background: "#111",
|
||||||
|
border: "1px solid #444",
|
||||||
|
borderRadius: "4px",
|
||||||
|
color: "#ccc",
|
||||||
|
padding: "2px 6px",
|
||||||
|
width: "120px",
|
||||||
|
}}
|
||||||
|
/>
|
||||||
|
<button
|
||||||
|
type="button"
|
||||||
|
onClick={handleSaveEditor}
|
||||||
|
style={{
|
||||||
|
fontSize: "0.7em",
|
||||||
|
padding: "2px 8px",
|
||||||
|
borderRadius: "4px",
|
||||||
|
border: "1px solid #238636",
|
||||||
|
background: "#238636",
|
||||||
|
color: "#fff",
|
||||||
|
cursor: "pointer",
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
Save
|
||||||
|
</button>
|
||||||
|
<button
|
||||||
|
type="button"
|
||||||
|
onClick={() => setEditingEditor(false)}
|
||||||
|
style={{
|
||||||
|
fontSize: "0.7em",
|
||||||
|
padding: "2px 8px",
|
||||||
|
borderRadius: "4px",
|
||||||
|
border: "1px solid #444",
|
||||||
|
background: "none",
|
||||||
|
color: "#888",
|
||||||
|
cursor: "pointer",
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
Cancel
|
||||||
|
</button>
|
||||||
|
</>
|
||||||
|
) : (
|
||||||
|
<button
|
||||||
|
type="button"
|
||||||
|
onClick={() => setEditingEditor(true)}
|
||||||
|
style={{
|
||||||
|
fontSize: "0.75em",
|
||||||
|
background: "none",
|
||||||
|
border: "1px solid #333",
|
||||||
|
borderRadius: "4px",
|
||||||
|
color: editorCommand ? "#aaa" : "#555",
|
||||||
|
cursor: "pointer",
|
||||||
|
padding: "2px 8px",
|
||||||
|
fontFamily: editorCommand ? "monospace" : "inherit",
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
{editorCommand ?? "Set editor..."}
|
||||||
|
</button>
|
||||||
|
)}
|
||||||
|
</div>
|
||||||
|
|
||||||
|
{/* Roster badges — agents always display in idle state here */}
|
||||||
|
{roster.length > 0 && (
|
||||||
|
<div
|
||||||
|
style={{
|
||||||
|
display: "flex",
|
||||||
|
flexWrap: "wrap",
|
||||||
|
gap: "4px",
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
{roster.map((a) => {
|
||||||
|
const isHidden = hiddenRosterAgents.has(a.name);
|
||||||
|
return (
|
||||||
|
<div
|
||||||
|
key={`roster-wrapper-${a.name}`}
|
||||||
|
data-testid={`roster-badge-wrapper-${a.name}`}
|
||||||
|
style={{
|
||||||
|
overflow: "hidden",
|
||||||
|
maxWidth: isHidden ? "0" : "300px",
|
||||||
|
opacity: isHidden ? 0 : 1,
|
||||||
|
transition: "max-width 0.35s ease, opacity 0.2s ease",
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
<RosterBadge agent={a} />
|
||||||
|
</div>
|
||||||
|
);
|
||||||
|
})}
|
||||||
|
</div>
|
||||||
|
)}
|
||||||
|
|
||||||
|
{actionError && (
|
||||||
|
<div
|
||||||
|
style={{
|
||||||
|
fontSize: "0.85em",
|
||||||
|
color: "#ff7b72",
|
||||||
|
padding: "4px 8px",
|
||||||
|
background: "#ff7b7211",
|
||||||
|
borderRadius: "6px",
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
{actionError}
|
||||||
|
</div>
|
||||||
|
)}
|
||||||
|
</div>
|
||||||
|
);
|
||||||
|
}
|
||||||
1459
frontend/src/components/Chat.test.tsx
Normal file
1459
frontend/src/components/Chat.test.tsx
Normal file
File diff suppressed because it is too large
Load Diff
1398
frontend/src/components/Chat.tsx
Normal file
1398
frontend/src/components/Chat.tsx
Normal file
File diff suppressed because it is too large
Load Diff
314
frontend/src/components/ChatHeader.test.tsx
Normal file
314
frontend/src/components/ChatHeader.test.tsx
Normal file
@@ -0,0 +1,314 @@
|
|||||||
|
import { fireEvent, render, screen, waitFor } from "@testing-library/react";
|
||||||
|
import { describe, expect, it, vi } from "vitest";
|
||||||
|
import { ChatHeader } from "./ChatHeader";
|
||||||
|
|
||||||
|
vi.mock("../api/client", () => ({
|
||||||
|
api: {
|
||||||
|
rebuildAndRestart: vi.fn(),
|
||||||
|
},
|
||||||
|
}));
|
||||||
|
|
||||||
|
interface ChatHeaderProps {
|
||||||
|
projectPath: string;
|
||||||
|
onCloseProject: () => void;
|
||||||
|
contextUsage: { used: number; total: number; percentage: number };
|
||||||
|
onClearSession: () => void;
|
||||||
|
model: string;
|
||||||
|
availableModels: string[];
|
||||||
|
claudeModels: string[];
|
||||||
|
hasAnthropicKey: boolean;
|
||||||
|
onModelChange: (model: string) => void;
|
||||||
|
enableTools: boolean;
|
||||||
|
onToggleTools: (enabled: boolean) => void;
|
||||||
|
wsConnected: boolean;
|
||||||
|
}
|
||||||
|
|
||||||
|
function makeProps(overrides: Partial<ChatHeaderProps> = {}): ChatHeaderProps {
|
||||||
|
return {
|
||||||
|
projectPath: "/test/project",
|
||||||
|
onCloseProject: vi.fn(),
|
||||||
|
contextUsage: { used: 1000, total: 10000, percentage: 10 },
|
||||||
|
onClearSession: vi.fn(),
|
||||||
|
model: "claude-sonnet",
|
||||||
|
availableModels: ["llama3"],
|
||||||
|
claudeModels: ["claude-sonnet"],
|
||||||
|
hasAnthropicKey: true,
|
||||||
|
onModelChange: vi.fn(),
|
||||||
|
enableTools: true,
|
||||||
|
onToggleTools: vi.fn(),
|
||||||
|
wsConnected: false,
|
||||||
|
...overrides,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
describe("ChatHeader", () => {
|
||||||
|
it("renders project path", () => {
|
||||||
|
render(<ChatHeader {...makeProps()} />);
|
||||||
|
expect(screen.getByText("/test/project")).toBeInTheDocument();
|
||||||
|
});
|
||||||
|
|
||||||
|
it("calls onCloseProject when close button is clicked", () => {
|
||||||
|
const onCloseProject = vi.fn();
|
||||||
|
render(<ChatHeader {...makeProps({ onCloseProject })} />);
|
||||||
|
fireEvent.click(screen.getByText("\u2715"));
|
||||||
|
expect(onCloseProject).toHaveBeenCalled();
|
||||||
|
});
|
||||||
|
|
||||||
|
it("displays context percentage with green emoji when low", () => {
|
||||||
|
render(
|
||||||
|
<ChatHeader
|
||||||
|
{...makeProps({
|
||||||
|
contextUsage: { used: 1000, total: 10000, percentage: 10 },
|
||||||
|
})}
|
||||||
|
/>,
|
||||||
|
);
|
||||||
|
expect(screen.getByText(/10%/)).toBeInTheDocument();
|
||||||
|
});
|
||||||
|
|
||||||
|
it("displays yellow emoji when context is 75-89%", () => {
|
||||||
|
render(
|
||||||
|
<ChatHeader
|
||||||
|
{...makeProps({
|
||||||
|
contextUsage: { used: 8000, total: 10000, percentage: 80 },
|
||||||
|
})}
|
||||||
|
/>,
|
||||||
|
);
|
||||||
|
expect(screen.getByText(/80%/)).toBeInTheDocument();
|
||||||
|
});
|
||||||
|
|
||||||
|
it("displays red emoji when context is 90%+", () => {
|
||||||
|
render(
|
||||||
|
<ChatHeader
|
||||||
|
{...makeProps({
|
||||||
|
contextUsage: { used: 9500, total: 10000, percentage: 95 },
|
||||||
|
})}
|
||||||
|
/>,
|
||||||
|
);
|
||||||
|
expect(screen.getByText(/95%/)).toBeInTheDocument();
|
||||||
|
});
|
||||||
|
|
||||||
|
it("calls onClearSession when New Session button is clicked", () => {
|
||||||
|
const onClearSession = vi.fn();
|
||||||
|
render(<ChatHeader {...makeProps({ onClearSession })} />);
|
||||||
|
fireEvent.click(screen.getByText(/New Session/));
|
||||||
|
expect(onClearSession).toHaveBeenCalled();
|
||||||
|
});
|
||||||
|
|
||||||
|
it("renders select dropdown when model options are available", () => {
|
||||||
|
render(<ChatHeader {...makeProps()} />);
|
||||||
|
const select = screen.getByRole("combobox");
|
||||||
|
expect(select).toBeInTheDocument();
|
||||||
|
});
|
||||||
|
|
||||||
|
it("renders text input when no model options are available", () => {
|
||||||
|
render(
|
||||||
|
<ChatHeader {...makeProps({ availableModels: [], claudeModels: [] })} />,
|
||||||
|
);
|
||||||
|
expect(screen.getByPlaceholderText("Model")).toBeInTheDocument();
|
||||||
|
});
|
||||||
|
|
||||||
|
it("calls onModelChange when model is selected from dropdown", () => {
|
||||||
|
const onModelChange = vi.fn();
|
||||||
|
render(<ChatHeader {...makeProps({ onModelChange })} />);
|
||||||
|
const select = screen.getByRole("combobox");
|
||||||
|
fireEvent.change(select, { target: { value: "llama3" } });
|
||||||
|
expect(onModelChange).toHaveBeenCalledWith("llama3");
|
||||||
|
});
|
||||||
|
|
||||||
|
it("calls onModelChange when text is typed in model input", () => {
|
||||||
|
const onModelChange = vi.fn();
|
||||||
|
render(
|
||||||
|
<ChatHeader
|
||||||
|
{...makeProps({
|
||||||
|
availableModels: [],
|
||||||
|
claudeModels: [],
|
||||||
|
onModelChange,
|
||||||
|
})}
|
||||||
|
/>,
|
||||||
|
);
|
||||||
|
const input = screen.getByPlaceholderText("Model");
|
||||||
|
fireEvent.change(input, { target: { value: "custom-model" } });
|
||||||
|
expect(onModelChange).toHaveBeenCalledWith("custom-model");
|
||||||
|
});
|
||||||
|
|
||||||
|
it("calls onToggleTools when checkbox is toggled", () => {
|
||||||
|
const onToggleTools = vi.fn();
|
||||||
|
render(<ChatHeader {...makeProps({ onToggleTools })} />);
|
||||||
|
const checkbox = screen.getByRole("checkbox");
|
||||||
|
fireEvent.click(checkbox);
|
||||||
|
expect(onToggleTools).toHaveBeenCalled();
|
||||||
|
});
|
||||||
|
|
||||||
|
it("displays the build timestamp in human-readable format", () => {
|
||||||
|
render(<ChatHeader {...makeProps()} />);
|
||||||
|
expect(screen.getByText("Built: 2026-01-01 00:00")).toBeInTheDocument();
|
||||||
|
});
|
||||||
|
|
||||||
|
it("displays Storkit branding in the header", () => {
|
||||||
|
render(<ChatHeader {...makeProps()} />);
|
||||||
|
expect(screen.getByText("Storkit")).toBeInTheDocument();
|
||||||
|
});
|
||||||
|
|
||||||
|
it("labels the claude-pty optgroup as 'Claude Code'", () => {
|
||||||
|
render(<ChatHeader {...makeProps()} />);
|
||||||
|
const optgroup = document.querySelector('optgroup[label="Claude Code"]');
|
||||||
|
expect(optgroup).toBeInTheDocument();
|
||||||
|
});
|
||||||
|
|
||||||
|
it("labels the Anthropic API optgroup as 'Anthropic API'", () => {
|
||||||
|
render(<ChatHeader {...makeProps()} />);
|
||||||
|
const optgroup = document.querySelector('optgroup[label="Anthropic API"]');
|
||||||
|
expect(optgroup).toBeInTheDocument();
|
||||||
|
});
|
||||||
|
|
||||||
|
it("shows disabled placeholder when claudeModels is empty and no API key", () => {
|
||||||
|
render(
|
||||||
|
<ChatHeader
|
||||||
|
{...makeProps({
|
||||||
|
claudeModels: [],
|
||||||
|
hasAnthropicKey: false,
|
||||||
|
availableModels: ["llama3"],
|
||||||
|
})}
|
||||||
|
/>,
|
||||||
|
);
|
||||||
|
expect(
|
||||||
|
screen.getByText("Add Anthropic API key to load models"),
|
||||||
|
).toBeInTheDocument();
|
||||||
|
});
|
||||||
|
|
||||||
|
// ── Close button hover/focus handlers ─────────────────────────────────────
|
||||||
|
|
||||||
|
it("close button changes background on mouseOver and resets on mouseOut", () => {
|
||||||
|
render(<ChatHeader {...makeProps()} />);
|
||||||
|
const closeBtn = screen.getByText("\u2715");
|
||||||
|
fireEvent.mouseOver(closeBtn);
|
||||||
|
expect(closeBtn.style.background).toBe("rgb(51, 51, 51)");
|
||||||
|
fireEvent.mouseOut(closeBtn);
|
||||||
|
expect(closeBtn.style.background).toBe("transparent");
|
||||||
|
});
|
||||||
|
|
||||||
|
it("close button changes background on focus and resets on blur", () => {
|
||||||
|
render(<ChatHeader {...makeProps()} />);
|
||||||
|
const closeBtn = screen.getByText("\u2715");
|
||||||
|
fireEvent.focus(closeBtn);
|
||||||
|
expect(closeBtn.style.background).toBe("rgb(51, 51, 51)");
|
||||||
|
fireEvent.blur(closeBtn);
|
||||||
|
expect(closeBtn.style.background).toBe("transparent");
|
||||||
|
});
|
||||||
|
|
||||||
|
// ── New Session button hover/focus handlers ───────────────────────────────
|
||||||
|
|
||||||
|
it("New Session button changes style on mouseOver and resets on mouseOut", () => {
|
||||||
|
render(<ChatHeader {...makeProps()} />);
|
||||||
|
const sessionBtn = screen.getByText(/New Session/);
|
||||||
|
fireEvent.mouseOver(sessionBtn);
|
||||||
|
expect(sessionBtn.style.backgroundColor).toBe("rgb(63, 63, 63)");
|
||||||
|
expect(sessionBtn.style.color).toBe("rgb(204, 204, 204)");
|
||||||
|
fireEvent.mouseOut(sessionBtn);
|
||||||
|
expect(sessionBtn.style.backgroundColor).toBe("rgb(47, 47, 47)");
|
||||||
|
expect(sessionBtn.style.color).toBe("rgb(136, 136, 136)");
|
||||||
|
});
|
||||||
|
|
||||||
|
it("New Session button changes style on focus and resets on blur", () => {
|
||||||
|
render(<ChatHeader {...makeProps()} />);
|
||||||
|
const sessionBtn = screen.getByText(/New Session/);
|
||||||
|
fireEvent.focus(sessionBtn);
|
||||||
|
expect(sessionBtn.style.backgroundColor).toBe("rgb(63, 63, 63)");
|
||||||
|
expect(sessionBtn.style.color).toBe("rgb(204, 204, 204)");
|
||||||
|
fireEvent.blur(sessionBtn);
|
||||||
|
expect(sessionBtn.style.backgroundColor).toBe("rgb(47, 47, 47)");
|
||||||
|
expect(sessionBtn.style.color).toBe("rgb(136, 136, 136)");
|
||||||
|
});
|
||||||
|
|
||||||
|
// ── Rebuild button ────────────────────────────────────────────────────────
|
||||||
|
|
||||||
|
it("renders rebuild button", () => {
|
||||||
|
render(<ChatHeader {...makeProps()} />);
|
||||||
|
expect(
|
||||||
|
screen.getByTitle("Rebuild and restart the server"),
|
||||||
|
).toBeInTheDocument();
|
||||||
|
});
|
||||||
|
|
||||||
|
it("shows confirmation dialog when rebuild button is clicked", () => {
|
||||||
|
render(<ChatHeader {...makeProps()} />);
|
||||||
|
fireEvent.click(screen.getByTitle("Rebuild and restart the server"));
|
||||||
|
expect(screen.getByText("Rebuild and restart?")).toBeInTheDocument();
|
||||||
|
});
|
||||||
|
|
||||||
|
it("hides confirmation dialog when cancel is clicked", () => {
|
||||||
|
render(<ChatHeader {...makeProps()} />);
|
||||||
|
fireEvent.click(screen.getByTitle("Rebuild and restart the server"));
|
||||||
|
fireEvent.click(screen.getByText("Cancel"));
|
||||||
|
expect(screen.queryByText("Rebuild and restart?")).not.toBeInTheDocument();
|
||||||
|
});
|
||||||
|
|
||||||
|
it("calls api.rebuildAndRestart and shows Building... when confirmed", async () => {
|
||||||
|
const { api } = await import("../api/client");
|
||||||
|
vi.mocked(api.rebuildAndRestart).mockReturnValue(new Promise(() => {}));
|
||||||
|
|
||||||
|
render(<ChatHeader {...makeProps()} />);
|
||||||
|
fireEvent.click(screen.getByTitle("Rebuild and restart the server"));
|
||||||
|
fireEvent.click(screen.getByText("Rebuild"));
|
||||||
|
|
||||||
|
await waitFor(() => {
|
||||||
|
expect(screen.getByText("Building...")).toBeInTheDocument();
|
||||||
|
});
|
||||||
|
expect(api.rebuildAndRestart).toHaveBeenCalled();
|
||||||
|
});
|
||||||
|
|
||||||
|
it("shows Reconnecting... when rebuild triggers a network error", async () => {
|
||||||
|
const { api } = await import("../api/client");
|
||||||
|
vi.mocked(api.rebuildAndRestart).mockRejectedValue(
|
||||||
|
new TypeError("Failed to fetch"),
|
||||||
|
);
|
||||||
|
|
||||||
|
render(<ChatHeader {...makeProps()} />);
|
||||||
|
fireEvent.click(screen.getByTitle("Rebuild and restart the server"));
|
||||||
|
fireEvent.click(screen.getByText("Rebuild"));
|
||||||
|
|
||||||
|
await waitFor(() => {
|
||||||
|
expect(screen.getByText("Reconnecting...")).toBeInTheDocument();
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
it("shows error when rebuild returns a failure message", async () => {
|
||||||
|
const { api } = await import("../api/client");
|
||||||
|
vi.mocked(api.rebuildAndRestart).mockResolvedValue(
|
||||||
|
"error[E0308]: mismatched types",
|
||||||
|
);
|
||||||
|
|
||||||
|
render(<ChatHeader {...makeProps()} />);
|
||||||
|
fireEvent.click(screen.getByTitle("Rebuild and restart the server"));
|
||||||
|
fireEvent.click(screen.getByText("Rebuild"));
|
||||||
|
|
||||||
|
await waitFor(() => {
|
||||||
|
expect(screen.getByText("⚠ Rebuild failed")).toBeInTheDocument();
|
||||||
|
expect(
|
||||||
|
screen.getByText("error[E0308]: mismatched types"),
|
||||||
|
).toBeInTheDocument();
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
it("clears reconnecting state when wsConnected transitions to true", async () => {
|
||||||
|
const { api } = await import("../api/client");
|
||||||
|
vi.mocked(api.rebuildAndRestart).mockRejectedValue(
|
||||||
|
new TypeError("Failed to fetch"),
|
||||||
|
);
|
||||||
|
|
||||||
|
const { rerender } = render(
|
||||||
|
<ChatHeader {...makeProps({ wsConnected: false })} />,
|
||||||
|
);
|
||||||
|
fireEvent.click(screen.getByTitle("Rebuild and restart the server"));
|
||||||
|
fireEvent.click(screen.getByText("Rebuild"));
|
||||||
|
|
||||||
|
await waitFor(() => {
|
||||||
|
expect(screen.getByText("Reconnecting...")).toBeInTheDocument();
|
||||||
|
});
|
||||||
|
|
||||||
|
rerender(<ChatHeader {...makeProps({ wsConnected: true })} />);
|
||||||
|
|
||||||
|
await waitFor(() => {
|
||||||
|
expect(screen.getByText("↺ Rebuild")).toBeInTheDocument();
|
||||||
|
});
|
||||||
|
});
|
||||||
|
});
|
||||||
545
frontend/src/components/ChatHeader.tsx
Normal file
545
frontend/src/components/ChatHeader.tsx
Normal file
@@ -0,0 +1,545 @@
|
|||||||
|
import * as React from "react";
|
||||||
|
import { api } from "../api/client";
|
||||||
|
|
||||||
|
const { useState, useEffect } = React;
|
||||||
|
|
||||||
|
function formatBuildTime(isoString: string): string {
|
||||||
|
const d = new Date(isoString);
|
||||||
|
const year = d.getUTCFullYear();
|
||||||
|
const month = String(d.getUTCMonth() + 1).padStart(2, "0");
|
||||||
|
const day = String(d.getUTCDate()).padStart(2, "0");
|
||||||
|
const hours = String(d.getUTCHours()).padStart(2, "0");
|
||||||
|
const minutes = String(d.getUTCMinutes()).padStart(2, "0");
|
||||||
|
return `Built: ${year}-${month}-${day} ${hours}:${minutes}`;
|
||||||
|
}
|
||||||
|
|
||||||
|
interface ContextUsage {
|
||||||
|
used: number;
|
||||||
|
total: number;
|
||||||
|
percentage: number;
|
||||||
|
}
|
||||||
|
|
||||||
|
interface ChatHeaderProps {
|
||||||
|
projectPath: string;
|
||||||
|
onCloseProject: () => void;
|
||||||
|
contextUsage: ContextUsage;
|
||||||
|
onClearSession: () => void;
|
||||||
|
model: string;
|
||||||
|
availableModels: string[];
|
||||||
|
claudeModels: string[];
|
||||||
|
hasAnthropicKey: boolean;
|
||||||
|
onModelChange: (model: string) => void;
|
||||||
|
enableTools: boolean;
|
||||||
|
onToggleTools: (enabled: boolean) => void;
|
||||||
|
wsConnected: boolean;
|
||||||
|
}
|
||||||
|
|
||||||
|
const getContextEmoji = (percentage: number): string => {
|
||||||
|
if (percentage >= 90) return "🔴";
|
||||||
|
if (percentage >= 75) return "🟡";
|
||||||
|
return "🟢";
|
||||||
|
};
|
||||||
|
|
||||||
|
type RebuildStatus = "idle" | "building" | "reconnecting" | "error";
|
||||||
|
|
||||||
|
export function ChatHeader({
|
||||||
|
projectPath,
|
||||||
|
onCloseProject,
|
||||||
|
contextUsage,
|
||||||
|
onClearSession,
|
||||||
|
model,
|
||||||
|
availableModels,
|
||||||
|
claudeModels,
|
||||||
|
hasAnthropicKey,
|
||||||
|
onModelChange,
|
||||||
|
enableTools,
|
||||||
|
onToggleTools,
|
||||||
|
wsConnected,
|
||||||
|
}: ChatHeaderProps) {
|
||||||
|
const hasModelOptions = availableModels.length > 0 || claudeModels.length > 0;
|
||||||
|
const [showConfirm, setShowConfirm] = useState(false);
|
||||||
|
const [rebuildStatus, setRebuildStatus] = useState<RebuildStatus>("idle");
|
||||||
|
const [rebuildError, setRebuildError] = useState<string | null>(null);
|
||||||
|
|
||||||
|
// When WS reconnects after a rebuild, clear the reconnecting status.
|
||||||
|
useEffect(() => {
|
||||||
|
if (rebuildStatus === "reconnecting" && wsConnected) {
|
||||||
|
setRebuildStatus("idle");
|
||||||
|
}
|
||||||
|
}, [wsConnected, rebuildStatus]);
|
||||||
|
|
||||||
|
function handleRebuildClick() {
|
||||||
|
setRebuildError(null);
|
||||||
|
setShowConfirm(true);
|
||||||
|
}
|
||||||
|
|
||||||
|
function handleRebuildConfirm() {
|
||||||
|
setShowConfirm(false);
|
||||||
|
setRebuildStatus("building");
|
||||||
|
api
|
||||||
|
.rebuildAndRestart()
|
||||||
|
.then((result) => {
|
||||||
|
// Got a response = build failed (server still running).
|
||||||
|
setRebuildStatus("error");
|
||||||
|
setRebuildError(result || "Rebuild failed");
|
||||||
|
})
|
||||||
|
.catch(() => {
|
||||||
|
// Network error = server is restarting (build succeeded).
|
||||||
|
setRebuildStatus("reconnecting");
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
function handleRebuildCancel() {
|
||||||
|
setShowConfirm(false);
|
||||||
|
}
|
||||||
|
|
||||||
|
function handleDismissError() {
|
||||||
|
setRebuildStatus("idle");
|
||||||
|
setRebuildError(null);
|
||||||
|
}
|
||||||
|
|
||||||
|
const rebuildButtonLabel =
|
||||||
|
rebuildStatus === "building"
|
||||||
|
? "Building..."
|
||||||
|
: rebuildStatus === "reconnecting"
|
||||||
|
? "Reconnecting..."
|
||||||
|
: rebuildStatus === "error"
|
||||||
|
? "⚠ Rebuild Failed"
|
||||||
|
: "↺ Rebuild";
|
||||||
|
|
||||||
|
const rebuildButtonDisabled =
|
||||||
|
rebuildStatus === "building" || rebuildStatus === "reconnecting";
|
||||||
|
|
||||||
|
return (
|
||||||
|
<>
|
||||||
|
{/* Confirmation dialog overlay */}
|
||||||
|
{showConfirm && (
|
||||||
|
<div
|
||||||
|
style={{
|
||||||
|
position: "fixed",
|
||||||
|
inset: 0,
|
||||||
|
background: "rgba(0,0,0,0.6)",
|
||||||
|
display: "flex",
|
||||||
|
alignItems: "center",
|
||||||
|
justifyContent: "center",
|
||||||
|
zIndex: 1000,
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
<div
|
||||||
|
style={{
|
||||||
|
background: "#1e1e1e",
|
||||||
|
border: "1px solid #444",
|
||||||
|
borderRadius: "8px",
|
||||||
|
padding: "24px",
|
||||||
|
maxWidth: "400px",
|
||||||
|
width: "90%",
|
||||||
|
color: "#ececec",
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
<div
|
||||||
|
style={{
|
||||||
|
fontWeight: "600",
|
||||||
|
fontSize: "1em",
|
||||||
|
marginBottom: "8px",
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
Rebuild and restart?
|
||||||
|
</div>
|
||||||
|
<div
|
||||||
|
style={{
|
||||||
|
fontSize: "0.85em",
|
||||||
|
color: "#aaa",
|
||||||
|
marginBottom: "20px",
|
||||||
|
lineHeight: "1.5",
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
This will run <code>cargo build</code> and replace the running
|
||||||
|
server. All agents will be stopped. The page will reconnect
|
||||||
|
automatically when the new server is ready.
|
||||||
|
</div>
|
||||||
|
<div
|
||||||
|
style={{
|
||||||
|
display: "flex",
|
||||||
|
gap: "10px",
|
||||||
|
justifyContent: "flex-end",
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
<button
|
||||||
|
type="button"
|
||||||
|
onClick={handleRebuildCancel}
|
||||||
|
style={{
|
||||||
|
padding: "6px 16px",
|
||||||
|
borderRadius: "6px",
|
||||||
|
border: "1px solid #444",
|
||||||
|
background: "transparent",
|
||||||
|
color: "#aaa",
|
||||||
|
cursor: "pointer",
|
||||||
|
fontSize: "0.9em",
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
Cancel
|
||||||
|
</button>
|
||||||
|
<button
|
||||||
|
type="button"
|
||||||
|
onClick={handleRebuildConfirm}
|
||||||
|
style={{
|
||||||
|
padding: "6px 16px",
|
||||||
|
borderRadius: "6px",
|
||||||
|
border: "none",
|
||||||
|
background: "#c0392b",
|
||||||
|
color: "#fff",
|
||||||
|
cursor: "pointer",
|
||||||
|
fontSize: "0.9em",
|
||||||
|
fontWeight: "600",
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
Rebuild
|
||||||
|
</button>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
)}
|
||||||
|
|
||||||
|
{/* Error toast */}
|
||||||
|
{rebuildStatus === "error" && rebuildError && (
|
||||||
|
<div
|
||||||
|
style={{
|
||||||
|
position: "fixed",
|
||||||
|
bottom: "20px",
|
||||||
|
right: "20px",
|
||||||
|
background: "#3a1010",
|
||||||
|
border: "1px solid #c0392b",
|
||||||
|
borderRadius: "8px",
|
||||||
|
padding: "12px 16px",
|
||||||
|
maxWidth: "480px",
|
||||||
|
color: "#ececec",
|
||||||
|
zIndex: 1000,
|
||||||
|
fontSize: "0.85em",
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
<div
|
||||||
|
style={{
|
||||||
|
display: "flex",
|
||||||
|
justifyContent: "space-between",
|
||||||
|
alignItems: "flex-start",
|
||||||
|
gap: "12px",
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
<div>
|
||||||
|
<div style={{ fontWeight: "600", marginBottom: "4px" }}>
|
||||||
|
⚠ Rebuild failed
|
||||||
|
</div>
|
||||||
|
<pre
|
||||||
|
style={{
|
||||||
|
margin: 0,
|
||||||
|
whiteSpace: "pre-wrap",
|
||||||
|
wordBreak: "break-word",
|
||||||
|
color: "#f08080",
|
||||||
|
maxHeight: "120px",
|
||||||
|
overflowY: "auto",
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
{rebuildError}
|
||||||
|
</pre>
|
||||||
|
</div>
|
||||||
|
<button
|
||||||
|
type="button"
|
||||||
|
onClick={handleDismissError}
|
||||||
|
style={{
|
||||||
|
background: "transparent",
|
||||||
|
border: "none",
|
||||||
|
color: "#aaa",
|
||||||
|
cursor: "pointer",
|
||||||
|
fontSize: "1em",
|
||||||
|
flexShrink: 0,
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
✕
|
||||||
|
</button>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
)}
|
||||||
|
|
||||||
|
<div
|
||||||
|
style={{
|
||||||
|
padding: "12px 24px",
|
||||||
|
borderBottom: "1px solid #333",
|
||||||
|
display: "flex",
|
||||||
|
alignItems: "center",
|
||||||
|
justifyContent: "space-between",
|
||||||
|
background: "#171717",
|
||||||
|
flexShrink: 0,
|
||||||
|
fontSize: "0.9rem",
|
||||||
|
color: "#ececec",
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
<div
|
||||||
|
style={{
|
||||||
|
display: "flex",
|
||||||
|
alignItems: "center",
|
||||||
|
gap: "12px",
|
||||||
|
overflow: "hidden",
|
||||||
|
flex: 1,
|
||||||
|
marginRight: "20px",
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
<span
|
||||||
|
style={{
|
||||||
|
fontWeight: "700",
|
||||||
|
fontSize: "1em",
|
||||||
|
color: "#ececec",
|
||||||
|
flexShrink: 0,
|
||||||
|
letterSpacing: "0.02em",
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
Storkit
|
||||||
|
</span>
|
||||||
|
<div
|
||||||
|
title={projectPath}
|
||||||
|
style={{
|
||||||
|
whiteSpace: "nowrap",
|
||||||
|
overflow: "hidden",
|
||||||
|
textOverflow: "ellipsis",
|
||||||
|
fontWeight: "500",
|
||||||
|
color: "#aaa",
|
||||||
|
direction: "rtl",
|
||||||
|
textAlign: "left",
|
||||||
|
fontFamily: "monospace",
|
||||||
|
fontSize: "0.85em",
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
{projectPath}
|
||||||
|
</div>
|
||||||
|
<button
|
||||||
|
type="button"
|
||||||
|
onClick={onCloseProject}
|
||||||
|
style={{
|
||||||
|
background: "transparent",
|
||||||
|
border: "none",
|
||||||
|
cursor: "pointer",
|
||||||
|
color: "#999",
|
||||||
|
fontSize: "0.8em",
|
||||||
|
padding: "4px 8px",
|
||||||
|
borderRadius: "4px",
|
||||||
|
}}
|
||||||
|
onMouseOver={(e) => {
|
||||||
|
e.currentTarget.style.background = "#333";
|
||||||
|
}}
|
||||||
|
onMouseOut={(e) => {
|
||||||
|
e.currentTarget.style.background = "transparent";
|
||||||
|
}}
|
||||||
|
onFocus={(e) => {
|
||||||
|
e.currentTarget.style.background = "#333";
|
||||||
|
}}
|
||||||
|
onBlur={(e) => {
|
||||||
|
e.currentTarget.style.background = "transparent";
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
✕
|
||||||
|
</button>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div style={{ display: "flex", alignItems: "center", gap: "16px" }}>
|
||||||
|
<div
|
||||||
|
style={{
|
||||||
|
fontSize: "0.75em",
|
||||||
|
color: "#555",
|
||||||
|
whiteSpace: "nowrap",
|
||||||
|
fontFamily: "monospace",
|
||||||
|
}}
|
||||||
|
title={__BUILD_TIME__}
|
||||||
|
>
|
||||||
|
{formatBuildTime(__BUILD_TIME__)}
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div
|
||||||
|
style={{
|
||||||
|
fontSize: "0.9em",
|
||||||
|
color: "#ccc",
|
||||||
|
whiteSpace: "nowrap",
|
||||||
|
}}
|
||||||
|
title={`Context: ${contextUsage.used.toLocaleString()} / ${contextUsage.total.toLocaleString()} tokens (${contextUsage.percentage}%)`}
|
||||||
|
>
|
||||||
|
{getContextEmoji(contextUsage.percentage)} {contextUsage.percentage}
|
||||||
|
%
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<button
|
||||||
|
type="button"
|
||||||
|
onClick={handleRebuildClick}
|
||||||
|
disabled={rebuildButtonDisabled}
|
||||||
|
title="Rebuild and restart the server"
|
||||||
|
style={{
|
||||||
|
padding: "6px 12px",
|
||||||
|
borderRadius: "99px",
|
||||||
|
border: "none",
|
||||||
|
fontSize: "0.85em",
|
||||||
|
backgroundColor:
|
||||||
|
rebuildStatus === "error" ? "#5a1010" : "#2f2f2f",
|
||||||
|
color:
|
||||||
|
rebuildStatus === "error"
|
||||||
|
? "#f08080"
|
||||||
|
: rebuildButtonDisabled
|
||||||
|
? "#555"
|
||||||
|
: "#888",
|
||||||
|
cursor: rebuildButtonDisabled ? "not-allowed" : "pointer",
|
||||||
|
outline: "none",
|
||||||
|
transition: "all 0.2s",
|
||||||
|
opacity: rebuildButtonDisabled ? 0.7 : 1,
|
||||||
|
}}
|
||||||
|
onMouseOver={(e) => {
|
||||||
|
if (!rebuildButtonDisabled) {
|
||||||
|
e.currentTarget.style.backgroundColor = "#3f3f3f";
|
||||||
|
e.currentTarget.style.color = "#ccc";
|
||||||
|
}
|
||||||
|
}}
|
||||||
|
onMouseOut={(e) => {
|
||||||
|
if (!rebuildButtonDisabled) {
|
||||||
|
e.currentTarget.style.backgroundColor =
|
||||||
|
rebuildStatus === "error" ? "#5a1010" : "#2f2f2f";
|
||||||
|
e.currentTarget.style.color =
|
||||||
|
rebuildStatus === "error" ? "#f08080" : "#888";
|
||||||
|
}
|
||||||
|
}}
|
||||||
|
onFocus={(e) => {
|
||||||
|
if (!rebuildButtonDisabled) {
|
||||||
|
e.currentTarget.style.backgroundColor = "#3f3f3f";
|
||||||
|
e.currentTarget.style.color = "#ccc";
|
||||||
|
}
|
||||||
|
}}
|
||||||
|
onBlur={(e) => {
|
||||||
|
if (!rebuildButtonDisabled) {
|
||||||
|
e.currentTarget.style.backgroundColor =
|
||||||
|
rebuildStatus === "error" ? "#5a1010" : "#2f2f2f";
|
||||||
|
e.currentTarget.style.color =
|
||||||
|
rebuildStatus === "error" ? "#f08080" : "#888";
|
||||||
|
}
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
{rebuildButtonLabel}
|
||||||
|
</button>
|
||||||
|
|
||||||
|
<button
|
||||||
|
type="button"
|
||||||
|
onClick={onClearSession}
|
||||||
|
style={{
|
||||||
|
padding: "6px 12px",
|
||||||
|
borderRadius: "99px",
|
||||||
|
border: "none",
|
||||||
|
fontSize: "0.85em",
|
||||||
|
backgroundColor: "#2f2f2f",
|
||||||
|
color: "#888",
|
||||||
|
cursor: "pointer",
|
||||||
|
outline: "none",
|
||||||
|
transition: "all 0.2s",
|
||||||
|
}}
|
||||||
|
onMouseOver={(e) => {
|
||||||
|
e.currentTarget.style.backgroundColor = "#3f3f3f";
|
||||||
|
e.currentTarget.style.color = "#ccc";
|
||||||
|
}}
|
||||||
|
onMouseOut={(e) => {
|
||||||
|
e.currentTarget.style.backgroundColor = "#2f2f2f";
|
||||||
|
e.currentTarget.style.color = "#888";
|
||||||
|
}}
|
||||||
|
onFocus={(e) => {
|
||||||
|
e.currentTarget.style.backgroundColor = "#3f3f3f";
|
||||||
|
e.currentTarget.style.color = "#ccc";
|
||||||
|
}}
|
||||||
|
onBlur={(e) => {
|
||||||
|
e.currentTarget.style.backgroundColor = "#2f2f2f";
|
||||||
|
e.currentTarget.style.color = "#888";
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
🔄 New Session
|
||||||
|
</button>
|
||||||
|
|
||||||
|
{hasModelOptions ? (
|
||||||
|
<select
|
||||||
|
value={model}
|
||||||
|
onChange={(e) => onModelChange(e.target.value)}
|
||||||
|
style={{
|
||||||
|
padding: "6px 32px 6px 16px",
|
||||||
|
borderRadius: "99px",
|
||||||
|
border: "none",
|
||||||
|
fontSize: "0.9em",
|
||||||
|
backgroundColor: "#2f2f2f",
|
||||||
|
color: "#ececec",
|
||||||
|
cursor: "pointer",
|
||||||
|
outline: "none",
|
||||||
|
appearance: "none",
|
||||||
|
WebkitAppearance: "none",
|
||||||
|
backgroundImage: `url("data:image/svg+xml;charset=US-ASCII,%3Csvg%20xmlns%3D%22http%3A%2F%2Fwww.w3.org%2F2000%2Fsvg%22%20width%3D%22292.4%22%20height%3D%22292.4%22%3E%3Cpath%20fill%3D%22%23ececec%22%20d%3D%22M287%2069.4a17.6%2017.6%200%200%200-13-5.4H18.4c-5%200-9.3%201.8-12.9%205.4A17.6%2017.6%200%200%200%200%2082.2c0%205%201.8%209.3%205.4%2012.9l128%20127.9c3.6%203.6%207.8%205.4%2012.8%205.4s9.2-1.8%2012.8-5.4L287%2095c3.5-3.5%205.4-7.8%205.4-12.8%200-5-1.9-9.2-5.5-12.8z%22%2F%3E%3C%2Fsvg%3E")`,
|
||||||
|
backgroundRepeat: "no-repeat",
|
||||||
|
backgroundPosition: "right 12px center",
|
||||||
|
backgroundSize: "10px",
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
<optgroup label="Claude Code">
|
||||||
|
<option value="claude-code-pty">claude-code-pty</option>
|
||||||
|
</optgroup>
|
||||||
|
{(claudeModels.length > 0 || !hasAnthropicKey) && (
|
||||||
|
<optgroup label="Anthropic API">
|
||||||
|
{claudeModels.length > 0 ? (
|
||||||
|
claudeModels.map((m: string) => (
|
||||||
|
<option key={m} value={m}>
|
||||||
|
{m}
|
||||||
|
</option>
|
||||||
|
))
|
||||||
|
) : (
|
||||||
|
<option value="" disabled>
|
||||||
|
Add Anthropic API key to load models
|
||||||
|
</option>
|
||||||
|
)}
|
||||||
|
</optgroup>
|
||||||
|
)}
|
||||||
|
{availableModels.length > 0 && (
|
||||||
|
<optgroup label="Ollama">
|
||||||
|
{availableModels.map((m: string) => (
|
||||||
|
<option key={m} value={m}>
|
||||||
|
{m}
|
||||||
|
</option>
|
||||||
|
))}
|
||||||
|
</optgroup>
|
||||||
|
)}
|
||||||
|
</select>
|
||||||
|
) : (
|
||||||
|
<input
|
||||||
|
value={model}
|
||||||
|
onChange={(e) => onModelChange(e.target.value)}
|
||||||
|
placeholder="Model"
|
||||||
|
style={{
|
||||||
|
padding: "6px 12px",
|
||||||
|
borderRadius: "99px",
|
||||||
|
border: "none",
|
||||||
|
fontSize: "0.9em",
|
||||||
|
background: "#2f2f2f",
|
||||||
|
color: "#ececec",
|
||||||
|
outline: "none",
|
||||||
|
}}
|
||||||
|
/>
|
||||||
|
)}
|
||||||
|
|
||||||
|
<label
|
||||||
|
style={{
|
||||||
|
display: "flex",
|
||||||
|
alignItems: "center",
|
||||||
|
gap: "6px",
|
||||||
|
cursor: "pointer",
|
||||||
|
fontSize: "0.9em",
|
||||||
|
color: "#aaa",
|
||||||
|
}}
|
||||||
|
title="Allow the Agent to read/write files"
|
||||||
|
>
|
||||||
|
<input
|
||||||
|
type="checkbox"
|
||||||
|
checked={enableTools}
|
||||||
|
onChange={(e) => onToggleTools(e.target.checked)}
|
||||||
|
style={{ accentColor: "#000" }}
|
||||||
|
/>
|
||||||
|
<span>Tools</span>
|
||||||
|
</label>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</>
|
||||||
|
);
|
||||||
|
}
|
||||||
279
frontend/src/components/ChatInput.test.tsx
Normal file
279
frontend/src/components/ChatInput.test.tsx
Normal file
@@ -0,0 +1,279 @@
|
|||||||
|
import { act, fireEvent, render, screen } from "@testing-library/react";
|
||||||
|
import * as React from "react";
|
||||||
|
import { describe, expect, it, vi } from "vitest";
|
||||||
|
import type { ChatInputHandle } from "./ChatInput";
|
||||||
|
import { ChatInput } from "./ChatInput";
|
||||||
|
|
||||||
|
describe("ChatInput component (Story 178 AC1)", () => {
|
||||||
|
it("renders a textarea with Send a message... placeholder", () => {
|
||||||
|
render(
|
||||||
|
<ChatInput
|
||||||
|
loading={false}
|
||||||
|
queuedMessages={[]}
|
||||||
|
onSubmit={vi.fn()}
|
||||||
|
onCancel={vi.fn()}
|
||||||
|
onRemoveQueuedMessage={vi.fn()}
|
||||||
|
/>,
|
||||||
|
);
|
||||||
|
|
||||||
|
const textarea = screen.getByPlaceholderText("Send a message...");
|
||||||
|
expect(textarea.tagName.toLowerCase()).toBe("textarea");
|
||||||
|
});
|
||||||
|
|
||||||
|
it("manages input state internally — typing updates value without calling onSubmit", async () => {
|
||||||
|
const onSubmit = vi.fn();
|
||||||
|
|
||||||
|
render(
|
||||||
|
<ChatInput
|
||||||
|
loading={false}
|
||||||
|
queuedMessages={[]}
|
||||||
|
onSubmit={onSubmit}
|
||||||
|
onCancel={vi.fn()}
|
||||||
|
onRemoveQueuedMessage={vi.fn()}
|
||||||
|
/>,
|
||||||
|
);
|
||||||
|
|
||||||
|
const textarea = screen.getByPlaceholderText("Send a message...");
|
||||||
|
|
||||||
|
await act(async () => {
|
||||||
|
fireEvent.change(textarea, { target: { value: "hello world" } });
|
||||||
|
});
|
||||||
|
|
||||||
|
expect((textarea as HTMLTextAreaElement).value).toBe("hello world");
|
||||||
|
expect(onSubmit).not.toHaveBeenCalled();
|
||||||
|
});
|
||||||
|
|
||||||
|
it("calls onSubmit with the input text on Enter key press", async () => {
|
||||||
|
const onSubmit = vi.fn();
|
||||||
|
|
||||||
|
render(
|
||||||
|
<ChatInput
|
||||||
|
loading={false}
|
||||||
|
queuedMessages={[]}
|
||||||
|
onSubmit={onSubmit}
|
||||||
|
onCancel={vi.fn()}
|
||||||
|
onRemoveQueuedMessage={vi.fn()}
|
||||||
|
/>,
|
||||||
|
);
|
||||||
|
|
||||||
|
const textarea = screen.getByPlaceholderText("Send a message...");
|
||||||
|
|
||||||
|
await act(async () => {
|
||||||
|
fireEvent.change(textarea, { target: { value: "test message" } });
|
||||||
|
});
|
||||||
|
await act(async () => {
|
||||||
|
fireEvent.keyDown(textarea, { key: "Enter", shiftKey: false });
|
||||||
|
});
|
||||||
|
|
||||||
|
expect(onSubmit).toHaveBeenCalledWith("test message");
|
||||||
|
});
|
||||||
|
|
||||||
|
it("clears input after submitting", async () => {
|
||||||
|
render(
|
||||||
|
<ChatInput
|
||||||
|
loading={false}
|
||||||
|
queuedMessages={[]}
|
||||||
|
onSubmit={vi.fn()}
|
||||||
|
onCancel={vi.fn()}
|
||||||
|
onRemoveQueuedMessage={vi.fn()}
|
||||||
|
/>,
|
||||||
|
);
|
||||||
|
|
||||||
|
const textarea = screen.getByPlaceholderText("Send a message...");
|
||||||
|
|
||||||
|
await act(async () => {
|
||||||
|
fireEvent.change(textarea, { target: { value: "hello" } });
|
||||||
|
});
|
||||||
|
await act(async () => {
|
||||||
|
fireEvent.keyDown(textarea, { key: "Enter", shiftKey: false });
|
||||||
|
});
|
||||||
|
|
||||||
|
expect((textarea as HTMLTextAreaElement).value).toBe("");
|
||||||
|
});
|
||||||
|
|
||||||
|
it("does not submit on Shift+Enter", async () => {
|
||||||
|
const onSubmit = vi.fn();
|
||||||
|
|
||||||
|
render(
|
||||||
|
<ChatInput
|
||||||
|
loading={false}
|
||||||
|
queuedMessages={[]}
|
||||||
|
onSubmit={onSubmit}
|
||||||
|
onCancel={vi.fn()}
|
||||||
|
onRemoveQueuedMessage={vi.fn()}
|
||||||
|
/>,
|
||||||
|
);
|
||||||
|
|
||||||
|
const textarea = screen.getByPlaceholderText("Send a message...");
|
||||||
|
|
||||||
|
await act(async () => {
|
||||||
|
fireEvent.change(textarea, { target: { value: "multiline" } });
|
||||||
|
});
|
||||||
|
await act(async () => {
|
||||||
|
fireEvent.keyDown(textarea, { key: "Enter", shiftKey: true });
|
||||||
|
});
|
||||||
|
|
||||||
|
expect(onSubmit).not.toHaveBeenCalled();
|
||||||
|
expect((textarea as HTMLTextAreaElement).value).toBe("multiline");
|
||||||
|
});
|
||||||
|
|
||||||
|
it("calls onCancel when stop button is clicked while loading with empty input", async () => {
|
||||||
|
const onCancel = vi.fn();
|
||||||
|
|
||||||
|
render(
|
||||||
|
<ChatInput
|
||||||
|
loading={true}
|
||||||
|
queuedMessages={[]}
|
||||||
|
onSubmit={vi.fn()}
|
||||||
|
onCancel={onCancel}
|
||||||
|
onRemoveQueuedMessage={vi.fn()}
|
||||||
|
/>,
|
||||||
|
);
|
||||||
|
|
||||||
|
const stopButton = screen.getByRole("button", { name: "■" });
|
||||||
|
await act(async () => {
|
||||||
|
fireEvent.click(stopButton);
|
||||||
|
});
|
||||||
|
|
||||||
|
expect(onCancel).toHaveBeenCalled();
|
||||||
|
});
|
||||||
|
|
||||||
|
it("renders queued message indicators", () => {
|
||||||
|
render(
|
||||||
|
<ChatInput
|
||||||
|
loading={true}
|
||||||
|
queuedMessages={[
|
||||||
|
{ id: "1", text: "first message" },
|
||||||
|
{ id: "2", text: "second message" },
|
||||||
|
]}
|
||||||
|
onSubmit={vi.fn()}
|
||||||
|
onCancel={vi.fn()}
|
||||||
|
onRemoveQueuedMessage={vi.fn()}
|
||||||
|
/>,
|
||||||
|
);
|
||||||
|
|
||||||
|
const indicators = screen.getAllByTestId("queued-message-indicator");
|
||||||
|
expect(indicators).toHaveLength(2);
|
||||||
|
expect(indicators[0]).toHaveTextContent("first message");
|
||||||
|
expect(indicators[1]).toHaveTextContent("second message");
|
||||||
|
});
|
||||||
|
|
||||||
|
it("calls onRemoveQueuedMessage when cancel button is clicked", async () => {
|
||||||
|
const onRemove = vi.fn();
|
||||||
|
|
||||||
|
render(
|
||||||
|
<ChatInput
|
||||||
|
loading={true}
|
||||||
|
queuedMessages={[{ id: "q1", text: "to remove" }]}
|
||||||
|
onSubmit={vi.fn()}
|
||||||
|
onCancel={vi.fn()}
|
||||||
|
onRemoveQueuedMessage={onRemove}
|
||||||
|
/>,
|
||||||
|
);
|
||||||
|
|
||||||
|
const cancelBtn = screen.getByTitle("Cancel queued message");
|
||||||
|
await act(async () => {
|
||||||
|
fireEvent.click(cancelBtn);
|
||||||
|
});
|
||||||
|
|
||||||
|
expect(onRemove).toHaveBeenCalledWith("q1");
|
||||||
|
});
|
||||||
|
|
||||||
|
it("edit button restores queued message text to input and removes from queue", async () => {
|
||||||
|
const onRemove = vi.fn();
|
||||||
|
|
||||||
|
render(
|
||||||
|
<ChatInput
|
||||||
|
loading={true}
|
||||||
|
queuedMessages={[{ id: "q1", text: "edit me back" }]}
|
||||||
|
onSubmit={vi.fn()}
|
||||||
|
onCancel={vi.fn()}
|
||||||
|
onRemoveQueuedMessage={onRemove}
|
||||||
|
/>,
|
||||||
|
);
|
||||||
|
|
||||||
|
const editBtn = screen.getByTitle("Edit queued message");
|
||||||
|
await act(async () => {
|
||||||
|
fireEvent.click(editBtn);
|
||||||
|
});
|
||||||
|
|
||||||
|
const textarea = screen.getByPlaceholderText("Send a message...");
|
||||||
|
expect((textarea as HTMLTextAreaElement).value).toBe("edit me back");
|
||||||
|
expect(onRemove).toHaveBeenCalledWith("q1");
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe("ChatInput appendToInput (Bug 215 regression)", () => {
|
||||||
|
it("appendToInput sets text into an empty input", async () => {
|
||||||
|
const ref = React.createRef<ChatInputHandle>();
|
||||||
|
|
||||||
|
render(
|
||||||
|
<ChatInput
|
||||||
|
ref={ref}
|
||||||
|
loading={false}
|
||||||
|
queuedMessages={[]}
|
||||||
|
onSubmit={vi.fn()}
|
||||||
|
onCancel={vi.fn()}
|
||||||
|
onRemoveQueuedMessage={vi.fn()}
|
||||||
|
/>,
|
||||||
|
);
|
||||||
|
|
||||||
|
await act(async () => {
|
||||||
|
ref.current?.appendToInput("queued message");
|
||||||
|
});
|
||||||
|
|
||||||
|
const textarea = screen.getByPlaceholderText("Send a message...");
|
||||||
|
expect((textarea as HTMLTextAreaElement).value).toBe("queued message");
|
||||||
|
});
|
||||||
|
|
||||||
|
it("appendToInput appends to existing input content with a newline separator", async () => {
|
||||||
|
const ref = React.createRef<ChatInputHandle>();
|
||||||
|
|
||||||
|
render(
|
||||||
|
<ChatInput
|
||||||
|
ref={ref}
|
||||||
|
loading={false}
|
||||||
|
queuedMessages={[]}
|
||||||
|
onSubmit={vi.fn()}
|
||||||
|
onCancel={vi.fn()}
|
||||||
|
onRemoveQueuedMessage={vi.fn()}
|
||||||
|
/>,
|
||||||
|
);
|
||||||
|
|
||||||
|
const textarea = screen.getByPlaceholderText("Send a message...");
|
||||||
|
|
||||||
|
await act(async () => {
|
||||||
|
fireEvent.change(textarea, { target: { value: "existing text" } });
|
||||||
|
});
|
||||||
|
|
||||||
|
await act(async () => {
|
||||||
|
ref.current?.appendToInput("appended text");
|
||||||
|
});
|
||||||
|
|
||||||
|
expect((textarea as HTMLTextAreaElement).value).toBe(
|
||||||
|
"existing text\nappended text",
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
|
it("multiple queued messages joined with newlines are appended on cancel", async () => {
|
||||||
|
const ref = React.createRef<ChatInputHandle>();
|
||||||
|
|
||||||
|
render(
|
||||||
|
<ChatInput
|
||||||
|
ref={ref}
|
||||||
|
loading={false}
|
||||||
|
queuedMessages={[]}
|
||||||
|
onSubmit={vi.fn()}
|
||||||
|
onCancel={vi.fn()}
|
||||||
|
onRemoveQueuedMessage={vi.fn()}
|
||||||
|
/>,
|
||||||
|
);
|
||||||
|
|
||||||
|
await act(async () => {
|
||||||
|
ref.current?.appendToInput("msg one\nmsg two");
|
||||||
|
});
|
||||||
|
|
||||||
|
const textarea = screen.getByPlaceholderText("Send a message...");
|
||||||
|
expect((textarea as HTMLTextAreaElement).value).toBe("msg one\nmsg two");
|
||||||
|
});
|
||||||
|
});
|
||||||
419
frontend/src/components/ChatInput.tsx
Normal file
419
frontend/src/components/ChatInput.tsx
Normal file
@@ -0,0 +1,419 @@
|
|||||||
|
import * as React from "react";
|
||||||
|
import { api } from "../api/client";
|
||||||
|
|
||||||
|
const {
|
||||||
|
forwardRef,
|
||||||
|
useCallback,
|
||||||
|
useEffect,
|
||||||
|
useImperativeHandle,
|
||||||
|
useRef,
|
||||||
|
useState,
|
||||||
|
} = React;
|
||||||
|
|
||||||
|
export interface ChatInputHandle {
|
||||||
|
appendToInput(text: string): void;
|
||||||
|
}
|
||||||
|
|
||||||
|
interface ChatInputProps {
|
||||||
|
loading: boolean;
|
||||||
|
queuedMessages: { id: string; text: string }[];
|
||||||
|
onSubmit: (message: string) => void;
|
||||||
|
onCancel: () => void;
|
||||||
|
onRemoveQueuedMessage: (id: string) => void;
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Fuzzy-match: returns true if all chars of `query` appear in order in `str`. */
|
||||||
|
function fuzzyMatch(str: string, query: string): boolean {
|
||||||
|
if (!query) return true;
|
||||||
|
const lower = str.toLowerCase();
|
||||||
|
const q = query.toLowerCase();
|
||||||
|
let qi = 0;
|
||||||
|
for (let i = 0; i < lower.length && qi < q.length; i++) {
|
||||||
|
if (lower[i] === q[qi]) qi++;
|
||||||
|
}
|
||||||
|
return qi === q.length;
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Score a fuzzy match: lower is better. Exact prefix match wins, then shorter paths. */
|
||||||
|
function fuzzyScore(str: string, query: string): number {
|
||||||
|
const lower = str.toLowerCase();
|
||||||
|
const q = query.toLowerCase();
|
||||||
|
// Prefer matches where query appears as a contiguous substring
|
||||||
|
if (lower.includes(q)) return lower.indexOf(q);
|
||||||
|
return str.length;
|
||||||
|
}
|
||||||
|
|
||||||
|
interface FilePickerOverlayProps {
|
||||||
|
query: string;
|
||||||
|
files: string[];
|
||||||
|
selectedIndex: number;
|
||||||
|
onSelect: (file: string) => void;
|
||||||
|
onDismiss: () => void;
|
||||||
|
anchorRef: React.RefObject<HTMLTextAreaElement | null>;
|
||||||
|
}
|
||||||
|
|
||||||
|
function FilePickerOverlay({
|
||||||
|
query,
|
||||||
|
files,
|
||||||
|
selectedIndex,
|
||||||
|
onSelect,
|
||||||
|
}: FilePickerOverlayProps) {
|
||||||
|
const filtered = files
|
||||||
|
.filter((f) => fuzzyMatch(f, query))
|
||||||
|
.sort((a, b) => fuzzyScore(a, query) - fuzzyScore(b, query))
|
||||||
|
.slice(0, 10);
|
||||||
|
|
||||||
|
if (filtered.length === 0) return null;
|
||||||
|
|
||||||
|
return (
|
||||||
|
<div
|
||||||
|
data-testid="file-picker-overlay"
|
||||||
|
style={{
|
||||||
|
position: "absolute",
|
||||||
|
bottom: "100%",
|
||||||
|
left: 0,
|
||||||
|
right: 0,
|
||||||
|
background: "#1e1e1e",
|
||||||
|
border: "1px solid #444",
|
||||||
|
borderRadius: "8px",
|
||||||
|
marginBottom: "6px",
|
||||||
|
overflow: "hidden",
|
||||||
|
zIndex: 100,
|
||||||
|
boxShadow: "0 4px 16px rgba(0,0,0,0.4)",
|
||||||
|
maxHeight: "240px",
|
||||||
|
overflowY: "auto",
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
{filtered.map((file, idx) => (
|
||||||
|
<button
|
||||||
|
key={file}
|
||||||
|
type="button"
|
||||||
|
data-testid={`file-picker-item-${idx}`}
|
||||||
|
onClick={() => onSelect(file)}
|
||||||
|
style={{
|
||||||
|
display: "block",
|
||||||
|
width: "100%",
|
||||||
|
textAlign: "left",
|
||||||
|
padding: "8px 14px",
|
||||||
|
background: idx === selectedIndex ? "#2d4a6e" : "transparent",
|
||||||
|
border: "none",
|
||||||
|
color: idx === selectedIndex ? "#ececec" : "#aaa",
|
||||||
|
cursor: "pointer",
|
||||||
|
fontFamily: "monospace",
|
||||||
|
fontSize: "0.85rem",
|
||||||
|
whiteSpace: "nowrap",
|
||||||
|
overflow: "hidden",
|
||||||
|
textOverflow: "ellipsis",
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
{file}
|
||||||
|
</button>
|
||||||
|
))}
|
||||||
|
</div>
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
export const ChatInput = forwardRef<ChatInputHandle, ChatInputProps>(
|
||||||
|
function ChatInput(
|
||||||
|
{ loading, queuedMessages, onSubmit, onCancel, onRemoveQueuedMessage },
|
||||||
|
ref,
|
||||||
|
) {
|
||||||
|
const [input, setInput] = useState("");
|
||||||
|
const inputRef = useRef<HTMLTextAreaElement>(null);
|
||||||
|
|
||||||
|
// File picker state
|
||||||
|
const [projectFiles, setProjectFiles] = useState<string[]>([]);
|
||||||
|
const [pickerQuery, setPickerQuery] = useState<string | null>(null);
|
||||||
|
const [pickerSelectedIndex, setPickerSelectedIndex] = useState(0);
|
||||||
|
const [pickerAtStart, setPickerAtStart] = useState(0);
|
||||||
|
|
||||||
|
useImperativeHandle(ref, () => ({
|
||||||
|
appendToInput(text: string) {
|
||||||
|
setInput((prev) => (prev ? `${prev}\n${text}` : text));
|
||||||
|
},
|
||||||
|
}));
|
||||||
|
|
||||||
|
useEffect(() => {
|
||||||
|
inputRef.current?.focus();
|
||||||
|
}, []);
|
||||||
|
|
||||||
|
// Compute filtered files for current picker query
|
||||||
|
const filteredFiles =
|
||||||
|
pickerQuery !== null
|
||||||
|
? projectFiles
|
||||||
|
.filter((f) => fuzzyMatch(f, pickerQuery))
|
||||||
|
.sort(
|
||||||
|
(a, b) => fuzzyScore(a, pickerQuery) - fuzzyScore(b, pickerQuery),
|
||||||
|
)
|
||||||
|
.slice(0, 10)
|
||||||
|
: [];
|
||||||
|
|
||||||
|
const dismissPicker = useCallback(() => {
|
||||||
|
setPickerQuery(null);
|
||||||
|
setPickerSelectedIndex(0);
|
||||||
|
}, []);
|
||||||
|
|
||||||
|
const selectFile = useCallback(
|
||||||
|
(file: string) => {
|
||||||
|
// Replace the @query portion with @file
|
||||||
|
const before = input.slice(0, pickerAtStart);
|
||||||
|
const cursorPos = inputRef.current?.selectionStart ?? input.length;
|
||||||
|
const after = input.slice(cursorPos);
|
||||||
|
setInput(`${before}@${file}${after}`);
|
||||||
|
dismissPicker();
|
||||||
|
// Restore focus after state update
|
||||||
|
setTimeout(() => inputRef.current?.focus(), 0);
|
||||||
|
},
|
||||||
|
[input, pickerAtStart, dismissPicker],
|
||||||
|
);
|
||||||
|
|
||||||
|
const handleInputChange = useCallback(
|
||||||
|
(e: React.ChangeEvent<HTMLTextAreaElement>) => {
|
||||||
|
const val = e.target.value;
|
||||||
|
setInput(val);
|
||||||
|
|
||||||
|
const cursor = e.target.selectionStart ?? val.length;
|
||||||
|
// Find the last @ before the cursor that starts a reference token
|
||||||
|
const textUpToCursor = val.slice(0, cursor);
|
||||||
|
// Match @ not preceded by non-whitespace (i.e. @ at start or after space/newline)
|
||||||
|
const atMatch = textUpToCursor.match(/(^|[\s\n])@([^\s@]*)$/);
|
||||||
|
|
||||||
|
if (atMatch) {
|
||||||
|
const query = atMatch[2];
|
||||||
|
const atPos = textUpToCursor.lastIndexOf("@");
|
||||||
|
setPickerAtStart(atPos);
|
||||||
|
setPickerQuery(query);
|
||||||
|
setPickerSelectedIndex(0);
|
||||||
|
|
||||||
|
// Lazily load files on first trigger
|
||||||
|
if (projectFiles.length === 0) {
|
||||||
|
api
|
||||||
|
.listProjectFiles()
|
||||||
|
.then(setProjectFiles)
|
||||||
|
.catch(() => {});
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if (pickerQuery !== null) dismissPicker();
|
||||||
|
}
|
||||||
|
},
|
||||||
|
[projectFiles.length, pickerQuery, dismissPicker],
|
||||||
|
);
|
||||||
|
|
||||||
|
const handleKeyDown = useCallback(
|
||||||
|
(e: React.KeyboardEvent<HTMLTextAreaElement>) => {
|
||||||
|
if (pickerQuery !== null && filteredFiles.length > 0) {
|
||||||
|
if (e.key === "ArrowDown") {
|
||||||
|
e.preventDefault();
|
||||||
|
setPickerSelectedIndex((i) =>
|
||||||
|
Math.min(i + 1, filteredFiles.length - 1),
|
||||||
|
);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
if (e.key === "ArrowUp") {
|
||||||
|
e.preventDefault();
|
||||||
|
setPickerSelectedIndex((i) => Math.max(i - 1, 0));
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
if (e.key === "Enter" || e.key === "Tab") {
|
||||||
|
e.preventDefault();
|
||||||
|
selectFile(filteredFiles[pickerSelectedIndex] ?? filteredFiles[0]);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
if (e.key === "Escape") {
|
||||||
|
e.preventDefault();
|
||||||
|
dismissPicker();
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
} else if (e.key === "Escape" && pickerQuery !== null) {
|
||||||
|
e.preventDefault();
|
||||||
|
dismissPicker();
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (e.key === "Enter" && !e.shiftKey) {
|
||||||
|
e.preventDefault();
|
||||||
|
handleSubmit();
|
||||||
|
}
|
||||||
|
},
|
||||||
|
[
|
||||||
|
pickerQuery,
|
||||||
|
filteredFiles,
|
||||||
|
pickerSelectedIndex,
|
||||||
|
selectFile,
|
||||||
|
dismissPicker,
|
||||||
|
],
|
||||||
|
);
|
||||||
|
|
||||||
|
const handleSubmit = () => {
|
||||||
|
if (!input.trim()) return;
|
||||||
|
onSubmit(input);
|
||||||
|
setInput("");
|
||||||
|
dismissPicker();
|
||||||
|
};
|
||||||
|
|
||||||
|
return (
|
||||||
|
<div
|
||||||
|
style={{
|
||||||
|
padding: "24px",
|
||||||
|
background: "#171717",
|
||||||
|
display: "flex",
|
||||||
|
justifyContent: "center",
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
<div
|
||||||
|
style={{
|
||||||
|
maxWidth: "768px",
|
||||||
|
width: "100%",
|
||||||
|
display: "flex",
|
||||||
|
flexDirection: "column",
|
||||||
|
gap: "8px",
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
{/* Queued message indicators */}
|
||||||
|
{queuedMessages.map(({ id, text }) => (
|
||||||
|
<div
|
||||||
|
key={id}
|
||||||
|
data-testid="queued-message-indicator"
|
||||||
|
style={{
|
||||||
|
display: "flex",
|
||||||
|
alignItems: "center",
|
||||||
|
gap: "8px",
|
||||||
|
padding: "8px 12px",
|
||||||
|
background: "#1e1e1e",
|
||||||
|
border: "1px solid #3a3a3a",
|
||||||
|
borderRadius: "12px",
|
||||||
|
fontSize: "0.875rem",
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
<span
|
||||||
|
style={{
|
||||||
|
color: "#666",
|
||||||
|
flexShrink: 0,
|
||||||
|
fontSize: "0.7rem",
|
||||||
|
fontWeight: 700,
|
||||||
|
letterSpacing: "0.05em",
|
||||||
|
textTransform: "uppercase",
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
Queued
|
||||||
|
</span>
|
||||||
|
<span
|
||||||
|
style={{
|
||||||
|
color: "#888",
|
||||||
|
flex: 1,
|
||||||
|
overflow: "hidden",
|
||||||
|
textOverflow: "ellipsis",
|
||||||
|
whiteSpace: "nowrap",
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
{text}
|
||||||
|
</span>
|
||||||
|
<button
|
||||||
|
type="button"
|
||||||
|
title="Edit queued message"
|
||||||
|
onClick={() => {
|
||||||
|
setInput(text);
|
||||||
|
onRemoveQueuedMessage(id);
|
||||||
|
inputRef.current?.focus();
|
||||||
|
}}
|
||||||
|
style={{
|
||||||
|
background: "none",
|
||||||
|
border: "none",
|
||||||
|
color: "#666",
|
||||||
|
cursor: "pointer",
|
||||||
|
padding: "2px 6px",
|
||||||
|
fontSize: "0.8rem",
|
||||||
|
flexShrink: 0,
|
||||||
|
borderRadius: "4px",
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
Edit
|
||||||
|
</button>
|
||||||
|
<button
|
||||||
|
type="button"
|
||||||
|
title="Cancel queued message"
|
||||||
|
onClick={() => onRemoveQueuedMessage(id)}
|
||||||
|
style={{
|
||||||
|
background: "none",
|
||||||
|
border: "none",
|
||||||
|
color: "#666",
|
||||||
|
cursor: "pointer",
|
||||||
|
padding: "2px 4px",
|
||||||
|
fontSize: "0.875rem",
|
||||||
|
flexShrink: 0,
|
||||||
|
borderRadius: "4px",
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
✕
|
||||||
|
</button>
|
||||||
|
</div>
|
||||||
|
))}
|
||||||
|
{/* Input row with file picker overlay */}
|
||||||
|
<div
|
||||||
|
style={{
|
||||||
|
display: "flex",
|
||||||
|
gap: "8px",
|
||||||
|
alignItems: "center",
|
||||||
|
position: "relative",
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
{pickerQuery !== null && (
|
||||||
|
<FilePickerOverlay
|
||||||
|
query={pickerQuery}
|
||||||
|
files={projectFiles}
|
||||||
|
selectedIndex={pickerSelectedIndex}
|
||||||
|
onSelect={selectFile}
|
||||||
|
onDismiss={dismissPicker}
|
||||||
|
anchorRef={inputRef}
|
||||||
|
/>
|
||||||
|
)}
|
||||||
|
<textarea
|
||||||
|
ref={inputRef}
|
||||||
|
value={input}
|
||||||
|
onChange={handleInputChange}
|
||||||
|
onKeyDown={handleKeyDown}
|
||||||
|
placeholder="Send a message..."
|
||||||
|
rows={1}
|
||||||
|
style={{
|
||||||
|
flex: 1,
|
||||||
|
padding: "14px 20px",
|
||||||
|
borderRadius: "24px",
|
||||||
|
border: "1px solid #333",
|
||||||
|
outline: "none",
|
||||||
|
fontSize: "1rem",
|
||||||
|
fontWeight: "500",
|
||||||
|
background: "#2f2f2f",
|
||||||
|
color: "#ececec",
|
||||||
|
boxShadow: "0 2px 6px rgba(0,0,0,0.02)",
|
||||||
|
resize: "none",
|
||||||
|
overflowY: "auto",
|
||||||
|
fontFamily: "inherit",
|
||||||
|
}}
|
||||||
|
/>
|
||||||
|
<button
|
||||||
|
type="button"
|
||||||
|
onClick={loading && !input.trim() ? onCancel : handleSubmit}
|
||||||
|
disabled={!loading && !input.trim()}
|
||||||
|
style={{
|
||||||
|
background: "#ececec",
|
||||||
|
color: "black",
|
||||||
|
border: "none",
|
||||||
|
borderRadius: "50%",
|
||||||
|
width: "32px",
|
||||||
|
height: "32px",
|
||||||
|
display: "flex",
|
||||||
|
alignItems: "center",
|
||||||
|
justifyContent: "center",
|
||||||
|
cursor: "pointer",
|
||||||
|
opacity: !loading && !input.trim() ? 0.5 : 1,
|
||||||
|
flexShrink: 0,
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
{loading && !input.trim() ? "■" : "↑"}
|
||||||
|
</button>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
);
|
||||||
|
},
|
||||||
|
);
|
||||||
194
frontend/src/components/ChatInputFilePicker.test.tsx
Normal file
194
frontend/src/components/ChatInputFilePicker.test.tsx
Normal file
@@ -0,0 +1,194 @@
|
|||||||
|
import {
|
||||||
|
act,
|
||||||
|
fireEvent,
|
||||||
|
render,
|
||||||
|
screen,
|
||||||
|
waitFor,
|
||||||
|
} from "@testing-library/react";
|
||||||
|
import { beforeEach, describe, expect, it, vi } from "vitest";
|
||||||
|
import { api } from "../api/client";
|
||||||
|
import { ChatInput } from "./ChatInput";
|
||||||
|
|
||||||
|
vi.mock("../api/client", () => ({
|
||||||
|
api: {
|
||||||
|
listProjectFiles: vi.fn(),
|
||||||
|
},
|
||||||
|
}));
|
||||||
|
|
||||||
|
const mockedListProjectFiles = vi.mocked(api.listProjectFiles);
|
||||||
|
|
||||||
|
const defaultProps = {
|
||||||
|
loading: false,
|
||||||
|
queuedMessages: [],
|
||||||
|
onSubmit: vi.fn(),
|
||||||
|
onCancel: vi.fn(),
|
||||||
|
onRemoveQueuedMessage: vi.fn(),
|
||||||
|
};
|
||||||
|
|
||||||
|
beforeEach(() => {
|
||||||
|
vi.clearAllMocks();
|
||||||
|
mockedListProjectFiles.mockResolvedValue([
|
||||||
|
"src/main.rs",
|
||||||
|
"src/lib.rs",
|
||||||
|
"frontend/index.html",
|
||||||
|
"README.md",
|
||||||
|
]);
|
||||||
|
});
|
||||||
|
|
||||||
|
describe("File picker overlay (Story 269 AC1)", () => {
|
||||||
|
it("shows file picker overlay when @ is typed", async () => {
|
||||||
|
render(<ChatInput {...defaultProps} />);
|
||||||
|
const textarea = screen.getByPlaceholderText("Send a message...");
|
||||||
|
|
||||||
|
await act(async () => {
|
||||||
|
fireEvent.change(textarea, { target: { value: "@" } });
|
||||||
|
});
|
||||||
|
|
||||||
|
await waitFor(() => {
|
||||||
|
expect(screen.getByTestId("file-picker-overlay")).toBeInTheDocument();
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
it("does not show file picker overlay for text without @", async () => {
|
||||||
|
render(<ChatInput {...defaultProps} />);
|
||||||
|
const textarea = screen.getByPlaceholderText("Send a message...");
|
||||||
|
|
||||||
|
await act(async () => {
|
||||||
|
fireEvent.change(textarea, { target: { value: "hello world" } });
|
||||||
|
});
|
||||||
|
|
||||||
|
expect(screen.queryByTestId("file-picker-overlay")).not.toBeInTheDocument();
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe("File picker fuzzy matching (Story 269 AC2)", () => {
|
||||||
|
it("filters files by query typed after @", async () => {
|
||||||
|
render(<ChatInput {...defaultProps} />);
|
||||||
|
const textarea = screen.getByPlaceholderText("Send a message...");
|
||||||
|
|
||||||
|
await act(async () => {
|
||||||
|
fireEvent.change(textarea, { target: { value: "@main" } });
|
||||||
|
});
|
||||||
|
|
||||||
|
await waitFor(() => {
|
||||||
|
expect(screen.getByTestId("file-picker-overlay")).toBeInTheDocument();
|
||||||
|
});
|
||||||
|
|
||||||
|
// main.rs should be visible, README.md should not
|
||||||
|
expect(screen.getByText("src/main.rs")).toBeInTheDocument();
|
||||||
|
expect(screen.queryByText("README.md")).not.toBeInTheDocument();
|
||||||
|
});
|
||||||
|
|
||||||
|
it("shows all files when @ is typed with no query", async () => {
|
||||||
|
render(<ChatInput {...defaultProps} />);
|
||||||
|
const textarea = screen.getByPlaceholderText("Send a message...");
|
||||||
|
|
||||||
|
await act(async () => {
|
||||||
|
fireEvent.change(textarea, { target: { value: "@" } });
|
||||||
|
});
|
||||||
|
|
||||||
|
await waitFor(() => {
|
||||||
|
expect(screen.getByTestId("file-picker-overlay")).toBeInTheDocument();
|
||||||
|
});
|
||||||
|
|
||||||
|
// All 4 files should be visible
|
||||||
|
expect(screen.getByText("src/main.rs")).toBeInTheDocument();
|
||||||
|
expect(screen.getByText("src/lib.rs")).toBeInTheDocument();
|
||||||
|
expect(screen.getByText("README.md")).toBeInTheDocument();
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe("File picker selection (Story 269 AC3)", () => {
|
||||||
|
it("clicking a file inserts @path into the message", async () => {
|
||||||
|
render(<ChatInput {...defaultProps} />);
|
||||||
|
const textarea = screen.getByPlaceholderText("Send a message...");
|
||||||
|
|
||||||
|
await act(async () => {
|
||||||
|
fireEvent.change(textarea, { target: { value: "@" } });
|
||||||
|
});
|
||||||
|
|
||||||
|
await waitFor(() => {
|
||||||
|
expect(screen.getByTestId("file-picker-item-0")).toBeInTheDocument();
|
||||||
|
});
|
||||||
|
|
||||||
|
await act(async () => {
|
||||||
|
fireEvent.click(screen.getByTestId("file-picker-item-0"));
|
||||||
|
});
|
||||||
|
|
||||||
|
// Picker should be dismissed and the file reference inserted
|
||||||
|
expect(screen.queryByTestId("file-picker-overlay")).not.toBeInTheDocument();
|
||||||
|
expect((textarea as HTMLTextAreaElement).value).toMatch(/^@\S+/);
|
||||||
|
});
|
||||||
|
|
||||||
|
it("Enter key selects highlighted file and inserts it into message", async () => {
|
||||||
|
render(<ChatInput {...defaultProps} />);
|
||||||
|
const textarea = screen.getByPlaceholderText("Send a message...");
|
||||||
|
|
||||||
|
await act(async () => {
|
||||||
|
fireEvent.change(textarea, { target: { value: "@main" } });
|
||||||
|
});
|
||||||
|
|
||||||
|
await waitFor(() => {
|
||||||
|
expect(screen.getByTestId("file-picker-overlay")).toBeInTheDocument();
|
||||||
|
});
|
||||||
|
|
||||||
|
await act(async () => {
|
||||||
|
fireEvent.keyDown(textarea, { key: "Enter" });
|
||||||
|
});
|
||||||
|
|
||||||
|
expect(screen.queryByTestId("file-picker-overlay")).not.toBeInTheDocument();
|
||||||
|
expect((textarea as HTMLTextAreaElement).value).toContain("@src/main.rs");
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe("File picker dismiss (Story 269 AC5)", () => {
|
||||||
|
it("Escape key dismisses the file picker", async () => {
|
||||||
|
render(<ChatInput {...defaultProps} />);
|
||||||
|
const textarea = screen.getByPlaceholderText("Send a message...");
|
||||||
|
|
||||||
|
await act(async () => {
|
||||||
|
fireEvent.change(textarea, { target: { value: "@" } });
|
||||||
|
});
|
||||||
|
|
||||||
|
await waitFor(() => {
|
||||||
|
expect(screen.getByTestId("file-picker-overlay")).toBeInTheDocument();
|
||||||
|
});
|
||||||
|
|
||||||
|
await act(async () => {
|
||||||
|
fireEvent.keyDown(textarea, { key: "Escape" });
|
||||||
|
});
|
||||||
|
|
||||||
|
expect(screen.queryByTestId("file-picker-overlay")).not.toBeInTheDocument();
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe("Multiple @ references (Story 269 AC6)", () => {
|
||||||
|
it("typing @ after a completed reference triggers picker again", async () => {
|
||||||
|
render(<ChatInput {...defaultProps} />);
|
||||||
|
const textarea = screen.getByPlaceholderText("Send a message...");
|
||||||
|
|
||||||
|
// First reference
|
||||||
|
await act(async () => {
|
||||||
|
fireEvent.change(textarea, { target: { value: "@main" } });
|
||||||
|
});
|
||||||
|
|
||||||
|
await waitFor(() => {
|
||||||
|
expect(screen.getByTestId("file-picker-overlay")).toBeInTheDocument();
|
||||||
|
});
|
||||||
|
|
||||||
|
// Select file
|
||||||
|
await act(async () => {
|
||||||
|
fireEvent.keyDown(textarea, { key: "Enter" });
|
||||||
|
});
|
||||||
|
|
||||||
|
// Type a second @
|
||||||
|
await act(async () => {
|
||||||
|
const current = (textarea as HTMLTextAreaElement).value;
|
||||||
|
fireEvent.change(textarea, { target: { value: `${current} @` } });
|
||||||
|
});
|
||||||
|
|
||||||
|
await waitFor(() => {
|
||||||
|
expect(screen.getByTestId("file-picker-overlay")).toBeInTheDocument();
|
||||||
|
});
|
||||||
|
});
|
||||||
|
});
|
||||||
98
frontend/src/components/CodeRef.test.tsx
Normal file
98
frontend/src/components/CodeRef.test.tsx
Normal file
@@ -0,0 +1,98 @@
|
|||||||
|
import { fireEvent, render, screen } from "@testing-library/react";
|
||||||
|
import { describe, expect, it, vi } from "vitest";
|
||||||
|
import { InlineCodeWithRefs, parseCodeRefs } from "./CodeRef";
|
||||||
|
|
||||||
|
// Mock the settingsApi so we don't make real HTTP calls in tests
|
||||||
|
vi.mock("../api/settings", () => ({
|
||||||
|
settingsApi: {
|
||||||
|
openFile: vi.fn(() => Promise.resolve({ success: true })),
|
||||||
|
},
|
||||||
|
}));
|
||||||
|
|
||||||
|
describe("parseCodeRefs (Story 193)", () => {
|
||||||
|
it("returns a single text part for plain text with no code refs", () => {
|
||||||
|
const parts = parseCodeRefs("Hello world, no code here");
|
||||||
|
expect(parts).toHaveLength(1);
|
||||||
|
expect(parts[0]).toEqual({
|
||||||
|
type: "text",
|
||||||
|
value: "Hello world, no code here",
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
it("detects a simple code reference", () => {
|
||||||
|
const parts = parseCodeRefs("src/main.rs:42");
|
||||||
|
expect(parts).toHaveLength(1);
|
||||||
|
expect(parts[0]).toMatchObject({
|
||||||
|
type: "ref",
|
||||||
|
path: "src/main.rs",
|
||||||
|
line: 42,
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
it("detects a code reference embedded in surrounding text", () => {
|
||||||
|
const parts = parseCodeRefs("See src/lib.rs:100 for details");
|
||||||
|
expect(parts).toHaveLength(3);
|
||||||
|
expect(parts[0]).toEqual({ type: "text", value: "See " });
|
||||||
|
expect(parts[1]).toMatchObject({
|
||||||
|
type: "ref",
|
||||||
|
path: "src/lib.rs",
|
||||||
|
line: 100,
|
||||||
|
});
|
||||||
|
expect(parts[2]).toEqual({ type: "text", value: " for details" });
|
||||||
|
});
|
||||||
|
|
||||||
|
it("detects multiple code references", () => {
|
||||||
|
const parts = parseCodeRefs("Check src/a.rs:1 and src/b.ts:200");
|
||||||
|
const refs = parts.filter((p) => p.type === "ref");
|
||||||
|
expect(refs).toHaveLength(2);
|
||||||
|
expect(refs[0]).toMatchObject({ path: "src/a.rs", line: 1 });
|
||||||
|
expect(refs[1]).toMatchObject({ path: "src/b.ts", line: 200 });
|
||||||
|
});
|
||||||
|
|
||||||
|
it("does not match text without a file extension", () => {
|
||||||
|
const parts = parseCodeRefs("something:42");
|
||||||
|
// "something" has no dot so it should not match
|
||||||
|
expect(parts.every((p) => p.type === "text")).toBe(true);
|
||||||
|
});
|
||||||
|
|
||||||
|
it("matches nested paths with multiple slashes", () => {
|
||||||
|
const parts = parseCodeRefs("frontend/src/components/Chat.tsx:55");
|
||||||
|
expect(parts).toHaveLength(1);
|
||||||
|
expect(parts[0]).toMatchObject({
|
||||||
|
type: "ref",
|
||||||
|
path: "frontend/src/components/Chat.tsx",
|
||||||
|
line: 55,
|
||||||
|
});
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe("InlineCodeWithRefs component (Story 193)", () => {
|
||||||
|
it("renders plain text without buttons", () => {
|
||||||
|
render(<InlineCodeWithRefs text="just some text" />);
|
||||||
|
expect(screen.getByText("just some text")).toBeInTheDocument();
|
||||||
|
expect(screen.queryByRole("button")).toBeNull();
|
||||||
|
});
|
||||||
|
|
||||||
|
it("renders a code reference as a clickable button", () => {
|
||||||
|
render(<InlineCodeWithRefs text="src/main.rs:42" />);
|
||||||
|
const button = screen.getByRole("button", { name: /src\/main\.rs:42/ });
|
||||||
|
expect(button).toBeInTheDocument();
|
||||||
|
expect(button).toHaveAttribute("title", "Open src/main.rs:42 in editor");
|
||||||
|
});
|
||||||
|
|
||||||
|
it("calls settingsApi.openFile when a code reference is clicked", async () => {
|
||||||
|
const { settingsApi } = await import("../api/settings");
|
||||||
|
render(<InlineCodeWithRefs text="src/main.rs:42" />);
|
||||||
|
const button = screen.getByRole("button");
|
||||||
|
fireEvent.click(button);
|
||||||
|
expect(settingsApi.openFile).toHaveBeenCalledWith("src/main.rs", 42);
|
||||||
|
});
|
||||||
|
|
||||||
|
it("renders mixed text and code references correctly", () => {
|
||||||
|
render(<InlineCodeWithRefs text="See src/lib.rs:10 for the impl" />);
|
||||||
|
// getByText normalizes text (trims whitespace), so "See " → "See"
|
||||||
|
expect(screen.getByText("See")).toBeInTheDocument();
|
||||||
|
expect(screen.getByRole("button")).toBeInTheDocument();
|
||||||
|
expect(screen.getByText("for the impl")).toBeInTheDocument();
|
||||||
|
});
|
||||||
|
});
|
||||||
118
frontend/src/components/CodeRef.tsx
Normal file
118
frontend/src/components/CodeRef.tsx
Normal file
@@ -0,0 +1,118 @@
|
|||||||
|
import * as React from "react";
|
||||||
|
import { settingsApi } from "../api/settings";
|
||||||
|
|
||||||
|
// Matches patterns like `src/main.rs:42` or `path/to/file.tsx:123`
|
||||||
|
// Path must contain at least one dot (file extension) and a colon followed by digits.
|
||||||
|
const CODE_REF_PATTERN = /\b([\w.\-/]+\.\w+):(\d+)\b/g;
|
||||||
|
|
||||||
|
export interface CodeRefPart {
|
||||||
|
type: "text" | "ref";
|
||||||
|
value: string;
|
||||||
|
path?: string;
|
||||||
|
line?: number;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Parse a string into text and code-reference parts.
|
||||||
|
* Code references have the format `path/to/file.ext:line`.
|
||||||
|
*/
|
||||||
|
export function parseCodeRefs(text: string): CodeRefPart[] {
|
||||||
|
const parts: CodeRefPart[] = [];
|
||||||
|
let lastIndex = 0;
|
||||||
|
const re = new RegExp(CODE_REF_PATTERN.source, "g");
|
||||||
|
let match: RegExpExecArray | null;
|
||||||
|
|
||||||
|
match = re.exec(text);
|
||||||
|
while (match !== null) {
|
||||||
|
if (match.index > lastIndex) {
|
||||||
|
parts.push({ type: "text", value: text.slice(lastIndex, match.index) });
|
||||||
|
}
|
||||||
|
parts.push({
|
||||||
|
type: "ref",
|
||||||
|
value: match[0],
|
||||||
|
path: match[1],
|
||||||
|
line: Number(match[2]),
|
||||||
|
});
|
||||||
|
lastIndex = re.lastIndex;
|
||||||
|
match = re.exec(text);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (lastIndex < text.length) {
|
||||||
|
parts.push({ type: "text", value: text.slice(lastIndex) });
|
||||||
|
}
|
||||||
|
|
||||||
|
return parts;
|
||||||
|
}
|
||||||
|
|
||||||
|
interface CodeRefLinkProps {
|
||||||
|
path: string;
|
||||||
|
line: number;
|
||||||
|
children: React.ReactNode;
|
||||||
|
}
|
||||||
|
|
||||||
|
function CodeRefLink({ path, line, children }: CodeRefLinkProps) {
|
||||||
|
const handleClick = React.useCallback(() => {
|
||||||
|
settingsApi.openFile(path, line).catch(() => {
|
||||||
|
// Silently ignore errors (e.g. no editor configured)
|
||||||
|
});
|
||||||
|
}, [path, line]);
|
||||||
|
|
||||||
|
return (
|
||||||
|
<button
|
||||||
|
type="button"
|
||||||
|
onClick={handleClick}
|
||||||
|
title={`Open ${path}:${line} in editor`}
|
||||||
|
style={{
|
||||||
|
background: "none",
|
||||||
|
border: "none",
|
||||||
|
padding: 0,
|
||||||
|
cursor: "pointer",
|
||||||
|
color: "#7ec8e3",
|
||||||
|
fontFamily: "monospace",
|
||||||
|
fontSize: "inherit",
|
||||||
|
textDecoration: "underline",
|
||||||
|
textDecorationStyle: "dotted",
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
{children}
|
||||||
|
</button>
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
interface InlineCodeWithRefsProps {
|
||||||
|
text: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Renders inline text with code references converted to clickable links.
|
||||||
|
*/
|
||||||
|
export function InlineCodeWithRefs({ text }: InlineCodeWithRefsProps) {
|
||||||
|
const parts = parseCodeRefs(text);
|
||||||
|
|
||||||
|
if (parts.length === 1 && parts[0].type === "text") {
|
||||||
|
return <>{text}</>;
|
||||||
|
}
|
||||||
|
|
||||||
|
return (
|
||||||
|
<>
|
||||||
|
{parts.map((part) => {
|
||||||
|
if (
|
||||||
|
part.type === "ref" &&
|
||||||
|
part.path !== undefined &&
|
||||||
|
part.line !== undefined
|
||||||
|
) {
|
||||||
|
return (
|
||||||
|
<CodeRefLink
|
||||||
|
key={`ref-${part.path}:${part.line}`}
|
||||||
|
path={part.path}
|
||||||
|
line={part.line}
|
||||||
|
>
|
||||||
|
{part.value}
|
||||||
|
</CodeRefLink>
|
||||||
|
);
|
||||||
|
}
|
||||||
|
return <span key={`text-${part.value}`}>{part.value}</span>;
|
||||||
|
})}
|
||||||
|
</>
|
||||||
|
);
|
||||||
|
}
|
||||||
158
frontend/src/components/HelpOverlay.tsx
Normal file
158
frontend/src/components/HelpOverlay.tsx
Normal file
@@ -0,0 +1,158 @@
|
|||||||
|
import * as React from "react";
|
||||||
|
|
||||||
|
const { useEffect, useRef } = React;
|
||||||
|
|
||||||
|
interface SlashCommand {
|
||||||
|
name: string;
|
||||||
|
description: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
const SLASH_COMMANDS: SlashCommand[] = [
|
||||||
|
{
|
||||||
|
name: "/help",
|
||||||
|
description: "Show this list of available slash commands.",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "/btw <question>",
|
||||||
|
description:
|
||||||
|
"Ask a side question using the current conversation as context. The question and answer are not added to the conversation history.",
|
||||||
|
},
|
||||||
|
];
|
||||||
|
|
||||||
|
interface HelpOverlayProps {
|
||||||
|
onDismiss: () => void;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Dismissible overlay that lists all available slash commands.
|
||||||
|
* Dismiss with Escape, Enter, or Space.
|
||||||
|
*/
|
||||||
|
export function HelpOverlay({ onDismiss }: HelpOverlayProps) {
|
||||||
|
const dismissRef = useRef(onDismiss);
|
||||||
|
dismissRef.current = onDismiss;
|
||||||
|
|
||||||
|
useEffect(() => {
|
||||||
|
const handler = (e: KeyboardEvent) => {
|
||||||
|
if (e.key === "Escape" || e.key === "Enter" || e.key === " ") {
|
||||||
|
e.preventDefault();
|
||||||
|
dismissRef.current();
|
||||||
|
}
|
||||||
|
};
|
||||||
|
window.addEventListener("keydown", handler);
|
||||||
|
return () => window.removeEventListener("keydown", handler);
|
||||||
|
}, []);
|
||||||
|
|
||||||
|
return (
|
||||||
|
// biome-ignore lint/a11y/noStaticElementInteractions: backdrop dismiss is supplementary; keyboard handled via window keydown
|
||||||
|
// biome-ignore lint/a11y/useKeyWithClickEvents: keyboard dismiss handled via window keydown listener
|
||||||
|
<div
|
||||||
|
data-testid="help-overlay"
|
||||||
|
onClick={onDismiss}
|
||||||
|
style={{
|
||||||
|
position: "fixed",
|
||||||
|
inset: 0,
|
||||||
|
background: "rgba(0,0,0,0.55)",
|
||||||
|
display: "flex",
|
||||||
|
alignItems: "center",
|
||||||
|
justifyContent: "center",
|
||||||
|
zIndex: 1000,
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
{/* biome-ignore lint/a11y/useKeyWithClickEvents: stop-propagation only; no real interaction */}
|
||||||
|
{/* biome-ignore lint/a11y/noStaticElementInteractions: stop-propagation only; no real interaction */}
|
||||||
|
<div
|
||||||
|
data-testid="help-panel"
|
||||||
|
onClick={(e) => e.stopPropagation()}
|
||||||
|
style={{
|
||||||
|
background: "#2f2f2f",
|
||||||
|
border: "1px solid #444",
|
||||||
|
borderRadius: "12px",
|
||||||
|
padding: "24px",
|
||||||
|
maxWidth: "560px",
|
||||||
|
width: "90vw",
|
||||||
|
display: "flex",
|
||||||
|
flexDirection: "column",
|
||||||
|
gap: "16px",
|
||||||
|
boxShadow: "0 8px 32px rgba(0,0,0,0.5)",
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
{/* Header */}
|
||||||
|
<div
|
||||||
|
style={{
|
||||||
|
display: "flex",
|
||||||
|
alignItems: "center",
|
||||||
|
justifyContent: "space-between",
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
<span
|
||||||
|
style={{
|
||||||
|
fontSize: "0.7rem",
|
||||||
|
fontWeight: 700,
|
||||||
|
letterSpacing: "0.08em",
|
||||||
|
textTransform: "uppercase",
|
||||||
|
color: "#a0d4a0",
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
Slash Commands
|
||||||
|
</span>
|
||||||
|
<button
|
||||||
|
type="button"
|
||||||
|
onClick={onDismiss}
|
||||||
|
title="Dismiss (Escape, Enter, or Space)"
|
||||||
|
style={{
|
||||||
|
background: "none",
|
||||||
|
border: "none",
|
||||||
|
color: "#666",
|
||||||
|
cursor: "pointer",
|
||||||
|
fontSize: "1.1rem",
|
||||||
|
padding: "2px 6px",
|
||||||
|
borderRadius: "4px",
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
✕
|
||||||
|
</button>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
{/* Command list */}
|
||||||
|
<div style={{ display: "flex", flexDirection: "column", gap: "12px" }}>
|
||||||
|
{SLASH_COMMANDS.map((cmd) => (
|
||||||
|
<div
|
||||||
|
key={cmd.name}
|
||||||
|
style={{ display: "flex", flexDirection: "column", gap: "2px" }}
|
||||||
|
>
|
||||||
|
<code
|
||||||
|
style={{
|
||||||
|
fontSize: "0.88rem",
|
||||||
|
color: "#e0e0e0",
|
||||||
|
fontFamily: "monospace",
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
{cmd.name}
|
||||||
|
</code>
|
||||||
|
<span
|
||||||
|
style={{
|
||||||
|
fontSize: "0.85rem",
|
||||||
|
color: "#999",
|
||||||
|
lineHeight: "1.5",
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
{cmd.description}
|
||||||
|
</span>
|
||||||
|
</div>
|
||||||
|
))}
|
||||||
|
</div>
|
||||||
|
|
||||||
|
{/* Footer hint */}
|
||||||
|
<div
|
||||||
|
style={{
|
||||||
|
fontSize: "0.75rem",
|
||||||
|
color: "#555",
|
||||||
|
textAlign: "center",
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
Press Escape, Enter, or Space to dismiss
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
);
|
||||||
|
}
|
||||||
1329
frontend/src/components/LozengeFlyContext.test.tsx
Normal file
1329
frontend/src/components/LozengeFlyContext.test.tsx
Normal file
File diff suppressed because it is too large
Load Diff
445
frontend/src/components/LozengeFlyContext.tsx
Normal file
445
frontend/src/components/LozengeFlyContext.tsx
Normal file
@@ -0,0 +1,445 @@
|
|||||||
|
/**
|
||||||
|
* LozengeFlyContext – FLIP-style animation system for agent lozenges.
|
||||||
|
*
|
||||||
|
* When an agent is assigned to a story, a fixed-positioned clone of the
|
||||||
|
* agent lozenge "flies" from the roster badge in AgentPanel to the slot
|
||||||
|
* in StagePanel (or vice-versa when the agent is removed). The overlay
|
||||||
|
* travels above all other UI elements (z-index 9999) so it is never
|
||||||
|
* clipped by the layout.
|
||||||
|
*/
|
||||||
|
import * as React from "react";
|
||||||
|
import * as ReactDOM from "react-dom";
|
||||||
|
import type { PipelineState } from "../api/client";
|
||||||
|
|
||||||
|
const {
|
||||||
|
createContext,
|
||||||
|
useCallback,
|
||||||
|
useContext,
|
||||||
|
useEffect,
|
||||||
|
useLayoutEffect,
|
||||||
|
useMemo,
|
||||||
|
useRef,
|
||||||
|
useState,
|
||||||
|
} = React;
|
||||||
|
|
||||||
|
// ─── Public context shape ─────────────────────────────────────────────────────
|
||||||
|
|
||||||
|
export interface LozengeFlyContextValue {
|
||||||
|
/** Register/unregister a roster badge DOM element by agent name. */
|
||||||
|
registerRosterEl: (agentName: string, el: HTMLElement | null) => void;
|
||||||
|
/**
|
||||||
|
* Save the latest DOMRect for a story's lozenge slot.
|
||||||
|
* Called on every render of AgentLozenge via useLayoutEffect.
|
||||||
|
*/
|
||||||
|
saveSlotRect: (storyId: string, rect: DOMRect) => void;
|
||||||
|
/**
|
||||||
|
* Set of storyIds whose slot lozenges should be hidden because a
|
||||||
|
* fly-in animation is in progress.
|
||||||
|
*/
|
||||||
|
pendingFlyIns: ReadonlySet<string>;
|
||||||
|
/**
|
||||||
|
* Set of agent names whose roster badge should be hidden.
|
||||||
|
* An agent is hidden while it is assigned to a work item OR while its
|
||||||
|
* fly-out animation (work item → roster) is still in flight.
|
||||||
|
*/
|
||||||
|
hiddenRosterAgents: ReadonlySet<string>;
|
||||||
|
}
|
||||||
|
|
||||||
|
const noop = () => {};
|
||||||
|
const emptySet: ReadonlySet<string> = new Set();
|
||||||
|
|
||||||
|
export const LozengeFlyContext = createContext<LozengeFlyContextValue>({
|
||||||
|
registerRosterEl: noop,
|
||||||
|
saveSlotRect: noop,
|
||||||
|
pendingFlyIns: emptySet,
|
||||||
|
hiddenRosterAgents: emptySet,
|
||||||
|
});
|
||||||
|
|
||||||
|
// ─── Internal flying-lozenge state ───────────────────────────────────────────
|
||||||
|
|
||||||
|
interface FlyingLozenge {
|
||||||
|
id: string;
|
||||||
|
label: string;
|
||||||
|
isActive: boolean;
|
||||||
|
startX: number;
|
||||||
|
startY: number;
|
||||||
|
endX: number;
|
||||||
|
endY: number;
|
||||||
|
/** false = positioned at start, true = CSS transition to end */
|
||||||
|
flying: boolean;
|
||||||
|
}
|
||||||
|
|
||||||
|
interface PendingFlyIn {
|
||||||
|
storyId: string;
|
||||||
|
agentName: string;
|
||||||
|
label: string;
|
||||||
|
isActive: boolean;
|
||||||
|
}
|
||||||
|
|
||||||
|
interface PendingFlyOut {
|
||||||
|
storyId: string;
|
||||||
|
agentName: string;
|
||||||
|
label: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
// ─── Provider ─────────────────────────────────────────────────────────────────
|
||||||
|
|
||||||
|
interface LozengeFlyProviderProps {
|
||||||
|
children: React.ReactNode;
|
||||||
|
pipeline: PipelineState;
|
||||||
|
}
|
||||||
|
|
||||||
|
export function LozengeFlyProvider({
|
||||||
|
children,
|
||||||
|
pipeline,
|
||||||
|
}: LozengeFlyProviderProps) {
|
||||||
|
const rosterElsRef = useRef<Map<string, HTMLElement>>(new Map());
|
||||||
|
const savedSlotRectsRef = useRef<Map<string, DOMRect>>(new Map());
|
||||||
|
const prevPipelineRef = useRef<PipelineState | null>(null);
|
||||||
|
|
||||||
|
// Actions detected in useLayoutEffect, consumed in useEffect
|
||||||
|
const pendingFlyInActionsRef = useRef<PendingFlyIn[]>([]);
|
||||||
|
const pendingFlyOutActionsRef = useRef<PendingFlyOut[]>([]);
|
||||||
|
|
||||||
|
// Track the active animation ID per story/agent so stale timeouts
|
||||||
|
// from superseded animations don't prematurely clear state.
|
||||||
|
const activeFlyInPerStory = useRef<Map<string, string>>(new Map());
|
||||||
|
const activeFlyOutPerAgent = useRef<Map<string, string>>(new Map());
|
||||||
|
|
||||||
|
const [pendingFlyIns, setPendingFlyIns] = useState<ReadonlySet<string>>(
|
||||||
|
new Set(),
|
||||||
|
);
|
||||||
|
const [flyingLozenges, setFlyingLozenges] = useState<FlyingLozenge[]>([]);
|
||||||
|
|
||||||
|
// Agents currently assigned to a work item (derived from pipeline state).
|
||||||
|
const assignedAgentNames = useMemo(() => {
|
||||||
|
const names = new Set<string>();
|
||||||
|
for (const item of [
|
||||||
|
...pipeline.backlog,
|
||||||
|
...pipeline.current,
|
||||||
|
...pipeline.qa,
|
||||||
|
...pipeline.merge,
|
||||||
|
]) {
|
||||||
|
if (item.agent) names.add(item.agent.agent_name);
|
||||||
|
}
|
||||||
|
return names;
|
||||||
|
}, [pipeline]);
|
||||||
|
|
||||||
|
// Agents whose fly-out (work item → roster) animation is still in flight.
|
||||||
|
// Kept hidden until the clone lands so no duplicate badge flashes.
|
||||||
|
const [flyingOutAgents, setFlyingOutAgents] = useState<ReadonlySet<string>>(
|
||||||
|
new Set(),
|
||||||
|
);
|
||||||
|
|
||||||
|
// Union: hide badge whenever the agent is assigned OR still flying back.
|
||||||
|
const hiddenRosterAgents = useMemo(() => {
|
||||||
|
if (flyingOutAgents.size === 0) return assignedAgentNames;
|
||||||
|
const combined = new Set(assignedAgentNames);
|
||||||
|
for (const name of flyingOutAgents) combined.add(name);
|
||||||
|
return combined;
|
||||||
|
}, [assignedAgentNames, flyingOutAgents]);
|
||||||
|
|
||||||
|
const registerRosterEl = useCallback(
|
||||||
|
(agentName: string, el: HTMLElement | null) => {
|
||||||
|
if (el) {
|
||||||
|
rosterElsRef.current.set(agentName, el);
|
||||||
|
} else {
|
||||||
|
rosterElsRef.current.delete(agentName);
|
||||||
|
}
|
||||||
|
},
|
||||||
|
[],
|
||||||
|
);
|
||||||
|
|
||||||
|
const saveSlotRect = useCallback((storyId: string, rect: DOMRect) => {
|
||||||
|
savedSlotRectsRef.current.set(storyId, rect);
|
||||||
|
}, []);
|
||||||
|
|
||||||
|
// ── Detect pipeline changes (runs before paint) ───────────────────────────
|
||||||
|
// Sets pendingFlyIns so slot lozenges hide before the browser paints,
|
||||||
|
// preventing a one-frame "flash" of the visible lozenge before fly-in.
|
||||||
|
useLayoutEffect(() => {
|
||||||
|
if (prevPipelineRef.current === null) {
|
||||||
|
prevPipelineRef.current = pipeline;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
const prev = prevPipelineRef.current;
|
||||||
|
const allPrev = [
|
||||||
|
...prev.backlog,
|
||||||
|
...prev.current,
|
||||||
|
...prev.qa,
|
||||||
|
...prev.merge,
|
||||||
|
];
|
||||||
|
const allCurr = [
|
||||||
|
...pipeline.backlog,
|
||||||
|
...pipeline.current,
|
||||||
|
...pipeline.qa,
|
||||||
|
...pipeline.merge,
|
||||||
|
];
|
||||||
|
|
||||||
|
const newFlyInStoryIds = new Set<string>();
|
||||||
|
|
||||||
|
for (const curr of allCurr) {
|
||||||
|
const prevItem = allPrev.find((p) => p.story_id === curr.story_id);
|
||||||
|
const agentChanged =
|
||||||
|
curr.agent &&
|
||||||
|
(!prevItem?.agent ||
|
||||||
|
prevItem.agent.agent_name !== curr.agent.agent_name);
|
||||||
|
if (agentChanged && curr.agent) {
|
||||||
|
const label = curr.agent.model
|
||||||
|
? `${curr.agent.agent_name} ${curr.agent.model}`
|
||||||
|
: curr.agent.agent_name;
|
||||||
|
pendingFlyInActionsRef.current.push({
|
||||||
|
storyId: curr.story_id,
|
||||||
|
agentName: curr.agent.agent_name,
|
||||||
|
label,
|
||||||
|
isActive: curr.agent.status === "running",
|
||||||
|
});
|
||||||
|
newFlyInStoryIds.add(curr.story_id);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for (const prevItem of allPrev) {
|
||||||
|
if (!prevItem.agent) continue;
|
||||||
|
const currItem = allCurr.find((c) => c.story_id === prevItem.story_id);
|
||||||
|
const agentRemoved =
|
||||||
|
!currItem?.agent ||
|
||||||
|
currItem.agent.agent_name !== prevItem.agent.agent_name;
|
||||||
|
if (agentRemoved) {
|
||||||
|
const label = prevItem.agent.model
|
||||||
|
? `${prevItem.agent.agent_name} ${prevItem.agent.model}`
|
||||||
|
: prevItem.agent.agent_name;
|
||||||
|
pendingFlyOutActionsRef.current.push({
|
||||||
|
storyId: prevItem.story_id,
|
||||||
|
agentName: prevItem.agent.agent_name,
|
||||||
|
label,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
prevPipelineRef.current = pipeline;
|
||||||
|
|
||||||
|
// Only hide slots for stories that have a matching roster element
|
||||||
|
if (newFlyInStoryIds.size > 0) {
|
||||||
|
const hideable = new Set<string>();
|
||||||
|
for (const storyId of newFlyInStoryIds) {
|
||||||
|
const action = pendingFlyInActionsRef.current.find(
|
||||||
|
(a) => a.storyId === storyId,
|
||||||
|
);
|
||||||
|
if (action && rosterElsRef.current.has(action.agentName)) {
|
||||||
|
hideable.add(storyId);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (hideable.size > 0) {
|
||||||
|
setPendingFlyIns((prev) => {
|
||||||
|
const next = new Set(prev);
|
||||||
|
for (const id of hideable) next.add(id);
|
||||||
|
return next;
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}, [pipeline]);
|
||||||
|
|
||||||
|
// ── Execute animations (runs after paint, DOM positions are stable) ───────
|
||||||
|
useEffect(() => {
|
||||||
|
const flyIns = [...pendingFlyInActionsRef.current];
|
||||||
|
pendingFlyInActionsRef.current = [];
|
||||||
|
const flyOuts = [...pendingFlyOutActionsRef.current];
|
||||||
|
pendingFlyOutActionsRef.current = [];
|
||||||
|
|
||||||
|
for (const action of flyIns) {
|
||||||
|
const rosterEl = rosterElsRef.current.get(action.agentName);
|
||||||
|
const slotRect = savedSlotRectsRef.current.get(action.storyId);
|
||||||
|
|
||||||
|
if (!rosterEl || !slotRect) {
|
||||||
|
// No roster element: immediately reveal the slot lozenge
|
||||||
|
setPendingFlyIns((prev) => {
|
||||||
|
const next = new Set(prev);
|
||||||
|
next.delete(action.storyId);
|
||||||
|
return next;
|
||||||
|
});
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
const rosterRect = rosterEl.getBoundingClientRect();
|
||||||
|
const id = `fly-in-${action.agentName}-${action.storyId}-${Date.now()}`;
|
||||||
|
activeFlyInPerStory.current.set(action.storyId, id);
|
||||||
|
|
||||||
|
setFlyingLozenges((prev) => [
|
||||||
|
...prev,
|
||||||
|
{
|
||||||
|
id,
|
||||||
|
label: action.label,
|
||||||
|
isActive: action.isActive,
|
||||||
|
startX: rosterRect.left,
|
||||||
|
startY: rosterRect.top,
|
||||||
|
endX: slotRect.left,
|
||||||
|
endY: slotRect.top,
|
||||||
|
flying: false,
|
||||||
|
},
|
||||||
|
]);
|
||||||
|
|
||||||
|
// FLIP "Play" step: after two frames the transition begins
|
||||||
|
requestAnimationFrame(() => {
|
||||||
|
requestAnimationFrame(() => {
|
||||||
|
setFlyingLozenges((prev) =>
|
||||||
|
prev.map((l) => (l.id === id ? { ...l, flying: true } : l)),
|
||||||
|
);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
// After the transition completes, remove clone and reveal slot lozenge.
|
||||||
|
// Only clear pendingFlyIns if this is still the active animation for
|
||||||
|
// this story — a newer animation may have superseded this one.
|
||||||
|
setTimeout(() => {
|
||||||
|
setFlyingLozenges((prev) => prev.filter((l) => l.id !== id));
|
||||||
|
if (activeFlyInPerStory.current.get(action.storyId) === id) {
|
||||||
|
activeFlyInPerStory.current.delete(action.storyId);
|
||||||
|
setPendingFlyIns((prev) => {
|
||||||
|
const next = new Set(prev);
|
||||||
|
next.delete(action.storyId);
|
||||||
|
return next;
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}, 500);
|
||||||
|
}
|
||||||
|
|
||||||
|
for (const action of flyOuts) {
|
||||||
|
const rosterEl = rosterElsRef.current.get(action.agentName);
|
||||||
|
const slotRect = savedSlotRectsRef.current.get(action.storyId);
|
||||||
|
if (!slotRect) continue;
|
||||||
|
|
||||||
|
// Keep the roster badge hidden while the clone is flying back.
|
||||||
|
setFlyingOutAgents((prev) => {
|
||||||
|
const next = new Set(prev);
|
||||||
|
next.add(action.agentName);
|
||||||
|
return next;
|
||||||
|
});
|
||||||
|
|
||||||
|
const rosterRect = rosterEl?.getBoundingClientRect();
|
||||||
|
const id = `fly-out-${action.agentName}-${action.storyId}-${Date.now()}`;
|
||||||
|
activeFlyOutPerAgent.current.set(action.agentName, id);
|
||||||
|
|
||||||
|
setFlyingLozenges((prev) => [
|
||||||
|
...prev,
|
||||||
|
{
|
||||||
|
id,
|
||||||
|
label: action.label,
|
||||||
|
isActive: false,
|
||||||
|
startX: slotRect.left,
|
||||||
|
startY: slotRect.top,
|
||||||
|
endX: rosterRect?.left ?? slotRect.left,
|
||||||
|
endY: rosterRect?.top ?? Math.max(0, slotRect.top - 80),
|
||||||
|
flying: false,
|
||||||
|
},
|
||||||
|
]);
|
||||||
|
|
||||||
|
requestAnimationFrame(() => {
|
||||||
|
requestAnimationFrame(() => {
|
||||||
|
setFlyingLozenges((prev) =>
|
||||||
|
prev.map((l) => (l.id === id ? { ...l, flying: true } : l)),
|
||||||
|
);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
// Only reveal the roster badge if this is still the active fly-out
|
||||||
|
// for this agent — a newer fly-out may have superseded this one.
|
||||||
|
setTimeout(() => {
|
||||||
|
setFlyingLozenges((prev) => prev.filter((l) => l.id !== id));
|
||||||
|
if (activeFlyOutPerAgent.current.get(action.agentName) === id) {
|
||||||
|
activeFlyOutPerAgent.current.delete(action.agentName);
|
||||||
|
setFlyingOutAgents((prev) => {
|
||||||
|
const next = new Set(prev);
|
||||||
|
next.delete(action.agentName);
|
||||||
|
return next;
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}, 500);
|
||||||
|
}
|
||||||
|
}, [pipeline]);
|
||||||
|
|
||||||
|
const contextValue = useMemo(
|
||||||
|
() => ({
|
||||||
|
registerRosterEl,
|
||||||
|
saveSlotRect,
|
||||||
|
pendingFlyIns,
|
||||||
|
hiddenRosterAgents,
|
||||||
|
}),
|
||||||
|
[registerRosterEl, saveSlotRect, pendingFlyIns, hiddenRosterAgents],
|
||||||
|
);
|
||||||
|
|
||||||
|
return (
|
||||||
|
<LozengeFlyContext.Provider value={contextValue}>
|
||||||
|
{children}
|
||||||
|
{ReactDOM.createPortal(
|
||||||
|
<FloatingLozengeSurface lozenges={flyingLozenges} />,
|
||||||
|
document.body,
|
||||||
|
)}
|
||||||
|
</LozengeFlyContext.Provider>
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
// ─── Portal surface ───────────────────────────────────────────────────────────
|
||||||
|
|
||||||
|
function FloatingLozengeSurface({ lozenges }: { lozenges: FlyingLozenge[] }) {
|
||||||
|
return (
|
||||||
|
<>
|
||||||
|
{lozenges.map((l) => (
|
||||||
|
<FlyingLozengeClone key={l.id} lozenge={l} />
|
||||||
|
))}
|
||||||
|
</>
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
function FlyingLozengeClone({ lozenge }: { lozenge: FlyingLozenge }) {
|
||||||
|
const color = lozenge.isActive ? "#3fb950" : "#e3b341";
|
||||||
|
const x = lozenge.flying ? lozenge.endX : lozenge.startX;
|
||||||
|
const y = lozenge.flying ? lozenge.endY : lozenge.startY;
|
||||||
|
|
||||||
|
return (
|
||||||
|
<div
|
||||||
|
data-testid={`flying-lozenge-${lozenge.id}`}
|
||||||
|
style={{
|
||||||
|
position: "fixed",
|
||||||
|
left: `${x}px`,
|
||||||
|
top: `${y}px`,
|
||||||
|
zIndex: 9999,
|
||||||
|
pointerEvents: "none",
|
||||||
|
transition: lozenge.flying
|
||||||
|
? "left 0.4s cubic-bezier(0.4, 0, 0.2, 1), top 0.4s cubic-bezier(0.4, 0, 0.2, 1)"
|
||||||
|
: "none",
|
||||||
|
display: "inline-flex",
|
||||||
|
alignItems: "center",
|
||||||
|
gap: "5px",
|
||||||
|
padding: "2px 8px",
|
||||||
|
borderRadius: "999px",
|
||||||
|
fontSize: "0.72em",
|
||||||
|
fontWeight: 600,
|
||||||
|
background: `${color}18`,
|
||||||
|
color,
|
||||||
|
border: `1px solid ${color}44`,
|
||||||
|
whiteSpace: "nowrap",
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
{lozenge.isActive && (
|
||||||
|
<span
|
||||||
|
style={{
|
||||||
|
width: "5px",
|
||||||
|
height: "5px",
|
||||||
|
borderRadius: "50%",
|
||||||
|
background: color,
|
||||||
|
animation: "pulse 1.5s infinite",
|
||||||
|
flexShrink: 0,
|
||||||
|
}}
|
||||||
|
/>
|
||||||
|
)}
|
||||||
|
{lozenge.label}
|
||||||
|
</div>
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
// ─── Hook ─────────────────────────────────────────────────────────────────────
|
||||||
|
|
||||||
|
export function useLozengeFly(): LozengeFlyContextValue {
|
||||||
|
return useContext(LozengeFlyContext);
|
||||||
|
}
|
||||||
137
frontend/src/components/MessageItem.test.tsx
Normal file
137
frontend/src/components/MessageItem.test.tsx
Normal file
@@ -0,0 +1,137 @@
|
|||||||
|
import { render, screen } from "@testing-library/react";
|
||||||
|
import { describe, expect, it, vi } from "vitest";
|
||||||
|
import { MessageItem } from "./MessageItem";
|
||||||
|
|
||||||
|
vi.mock("../api/settings", () => ({
|
||||||
|
settingsApi: {
|
||||||
|
openFile: vi.fn(() => Promise.resolve({ success: true })),
|
||||||
|
},
|
||||||
|
}));
|
||||||
|
|
||||||
|
describe("MessageItem component (Story 178 AC3)", () => {
|
||||||
|
it("renders user message as a bubble", () => {
|
||||||
|
render(<MessageItem msg={{ role: "user", content: "Hello there!" }} />);
|
||||||
|
|
||||||
|
expect(screen.getByText("Hello there!")).toBeInTheDocument();
|
||||||
|
});
|
||||||
|
|
||||||
|
it("renders assistant message with markdown-body class", () => {
|
||||||
|
render(
|
||||||
|
<MessageItem
|
||||||
|
msg={{ role: "assistant", content: "Here is my response." }}
|
||||||
|
/>,
|
||||||
|
);
|
||||||
|
|
||||||
|
expect(screen.getByText("Here is my response.")).toBeInTheDocument();
|
||||||
|
const text = screen.getByText("Here is my response.");
|
||||||
|
expect(text.closest(".markdown-body")).toBeTruthy();
|
||||||
|
});
|
||||||
|
|
||||||
|
it("renders tool message as collapsible details", () => {
|
||||||
|
render(
|
||||||
|
<MessageItem
|
||||||
|
msg={{
|
||||||
|
role: "tool",
|
||||||
|
content: "tool output content",
|
||||||
|
tool_call_id: "toolu_1",
|
||||||
|
}}
|
||||||
|
/>,
|
||||||
|
);
|
||||||
|
|
||||||
|
expect(screen.getByText(/Tool Output/)).toBeInTheDocument();
|
||||||
|
});
|
||||||
|
|
||||||
|
it("renders tool call badges for assistant messages with tool_calls", () => {
|
||||||
|
render(
|
||||||
|
<MessageItem
|
||||||
|
msg={{
|
||||||
|
role: "assistant",
|
||||||
|
content: "I will read the file.",
|
||||||
|
tool_calls: [
|
||||||
|
{
|
||||||
|
id: "toolu_1",
|
||||||
|
type: "function",
|
||||||
|
function: {
|
||||||
|
name: "Read",
|
||||||
|
arguments: '{"file_path":"src/main.rs"}',
|
||||||
|
},
|
||||||
|
},
|
||||||
|
],
|
||||||
|
}}
|
||||||
|
/>,
|
||||||
|
);
|
||||||
|
|
||||||
|
expect(screen.getByText("I will read the file.")).toBeInTheDocument();
|
||||||
|
expect(screen.getByText("Read(src/main.rs)")).toBeInTheDocument();
|
||||||
|
});
|
||||||
|
|
||||||
|
it("is wrapped in React.memo (has displayName or $$typeof memo)", () => {
|
||||||
|
// React.memo wraps the component — verify the export is memoized
|
||||||
|
// by checking that the component has a memo wrapper
|
||||||
|
const { type } = { type: MessageItem };
|
||||||
|
// React.memo returns an object with $$typeof === Symbol(react.memo)
|
||||||
|
// biome-ignore lint/suspicious/noExplicitAny: checking React internals for test
|
||||||
|
expect((type as any).$$typeof).toBeDefined();
|
||||||
|
// biome-ignore lint/suspicious/noExplicitAny: checking React internals for test
|
||||||
|
const typeofStr = String((type as any).$$typeof);
|
||||||
|
expect(typeofStr).toContain("memo");
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe("MessageItem code reference rendering (Story 193)", () => {
|
||||||
|
it("renders inline code with a code reference as a clickable button in assistant messages", () => {
|
||||||
|
render(
|
||||||
|
<MessageItem
|
||||||
|
msg={{
|
||||||
|
role: "assistant",
|
||||||
|
content: "Check `src/main.rs:42` for the implementation.",
|
||||||
|
}}
|
||||||
|
/>,
|
||||||
|
);
|
||||||
|
|
||||||
|
const button = screen.getByRole("button", { name: /src\/main\.rs:42/ });
|
||||||
|
expect(button).toBeInTheDocument();
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe("MessageItem user message code fence rendering (Story 196)", () => {
|
||||||
|
it("renders code fences in user messages as code blocks", () => {
|
||||||
|
const { container } = render(
|
||||||
|
<MessageItem
|
||||||
|
msg={{
|
||||||
|
role: "user",
|
||||||
|
content: "Here is some code:\n```js\nconsole.log('hi');\n```",
|
||||||
|
}}
|
||||||
|
/>,
|
||||||
|
);
|
||||||
|
|
||||||
|
// Syntax highlighter renders a pre > div > code structure
|
||||||
|
const codeEl = container.querySelector("pre code");
|
||||||
|
expect(codeEl).toBeInTheDocument();
|
||||||
|
expect(codeEl?.textContent).toContain("console.log");
|
||||||
|
});
|
||||||
|
|
||||||
|
it("renders inline code with single backticks in user messages", () => {
|
||||||
|
render(
|
||||||
|
<MessageItem
|
||||||
|
msg={{ role: "user", content: "Use `npm install` to install." }}
|
||||||
|
/>,
|
||||||
|
);
|
||||||
|
|
||||||
|
const codeEl = screen.getByText("npm install");
|
||||||
|
expect(codeEl.tagName.toLowerCase()).toBe("code");
|
||||||
|
});
|
||||||
|
|
||||||
|
it("renders user messages with code blocks inside user-markdown-body class", () => {
|
||||||
|
const { container } = render(
|
||||||
|
<MessageItem
|
||||||
|
msg={{
|
||||||
|
role: "user",
|
||||||
|
content: "```js\nconsole.log('hi');\n```",
|
||||||
|
}}
|
||||||
|
/>,
|
||||||
|
);
|
||||||
|
|
||||||
|
expect(container.querySelector(".user-markdown-body")).toBeTruthy();
|
||||||
|
});
|
||||||
|
});
|
||||||
168
frontend/src/components/MessageItem.tsx
Normal file
168
frontend/src/components/MessageItem.tsx
Normal file
@@ -0,0 +1,168 @@
|
|||||||
|
import * as React from "react";
|
||||||
|
import Markdown from "react-markdown";
|
||||||
|
import { Prism as SyntaxHighlighter } from "react-syntax-highlighter";
|
||||||
|
import { oneDark } from "react-syntax-highlighter/dist/esm/styles/prism";
|
||||||
|
import type { Message, ToolCall } from "../types";
|
||||||
|
import { InlineCodeWithRefs } from "./CodeRef";
|
||||||
|
|
||||||
|
// biome-ignore lint/suspicious/noExplicitAny: react-markdown requires any for component props
|
||||||
|
function CodeBlock({ className, children, ...props }: any) {
|
||||||
|
const match = /language-(\w+)/.exec(className || "");
|
||||||
|
const isInline = !className;
|
||||||
|
const text = String(children);
|
||||||
|
if (!isInline && match) {
|
||||||
|
return (
|
||||||
|
<SyntaxHighlighter
|
||||||
|
// biome-ignore lint/suspicious/noExplicitAny: oneDark style types are incompatible
|
||||||
|
style={oneDark as any}
|
||||||
|
language={match[1]}
|
||||||
|
PreTag="div"
|
||||||
|
>
|
||||||
|
{text.replace(/\n$/, "")}
|
||||||
|
</SyntaxHighlighter>
|
||||||
|
);
|
||||||
|
}
|
||||||
|
// For inline code, detect and render code references as clickable links
|
||||||
|
return (
|
||||||
|
<code className={className} {...props}>
|
||||||
|
<InlineCodeWithRefs text={text} />
|
||||||
|
</code>
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
interface MessageItemProps {
|
||||||
|
msg: Message;
|
||||||
|
}
|
||||||
|
|
||||||
|
function MessageItemInner({ msg }: MessageItemProps) {
|
||||||
|
return (
|
||||||
|
<div
|
||||||
|
style={{
|
||||||
|
display: "flex",
|
||||||
|
flexDirection: "column",
|
||||||
|
alignItems: msg.role === "user" ? "flex-end" : "flex-start",
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
<div
|
||||||
|
style={{
|
||||||
|
maxWidth: "100%",
|
||||||
|
padding: msg.role === "user" ? "10px 16px" : "0",
|
||||||
|
borderRadius: msg.role === "user" ? "20px" : "0",
|
||||||
|
background:
|
||||||
|
msg.role === "user"
|
||||||
|
? "#2f2f2f"
|
||||||
|
: msg.role === "tool"
|
||||||
|
? "#222"
|
||||||
|
: "transparent",
|
||||||
|
color: "#ececec",
|
||||||
|
border: msg.role === "tool" ? "1px solid #333" : "none",
|
||||||
|
fontFamily: msg.role === "tool" ? "monospace" : "inherit",
|
||||||
|
fontSize: msg.role === "tool" ? "0.85em" : "1em",
|
||||||
|
fontWeight: "500",
|
||||||
|
whiteSpace: msg.role === "tool" ? "pre-wrap" : "normal",
|
||||||
|
lineHeight: "1.6",
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
{msg.role === "user" ? (
|
||||||
|
<div className="user-markdown-body">
|
||||||
|
<Markdown components={{ code: CodeBlock }}>{msg.content}</Markdown>
|
||||||
|
</div>
|
||||||
|
) : msg.role === "tool" ? (
|
||||||
|
<details style={{ cursor: "pointer" }}>
|
||||||
|
<summary
|
||||||
|
style={{
|
||||||
|
color: "#aaa",
|
||||||
|
fontSize: "0.9em",
|
||||||
|
marginBottom: "8px",
|
||||||
|
listStyle: "none",
|
||||||
|
display: "flex",
|
||||||
|
alignItems: "center",
|
||||||
|
gap: "6px",
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
<span style={{ fontSize: "0.8em" }}>▶</span>
|
||||||
|
<span>
|
||||||
|
Tool Output
|
||||||
|
{msg.tool_call_id && ` (${msg.tool_call_id})`}
|
||||||
|
</span>
|
||||||
|
</summary>
|
||||||
|
<pre
|
||||||
|
style={{
|
||||||
|
maxHeight: "300px",
|
||||||
|
overflow: "auto",
|
||||||
|
margin: 0,
|
||||||
|
padding: "8px",
|
||||||
|
background: "#1a1a1a",
|
||||||
|
borderRadius: "4px",
|
||||||
|
fontSize: "0.85em",
|
||||||
|
whiteSpace: "pre-wrap",
|
||||||
|
wordBreak: "break-word",
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
{msg.content}
|
||||||
|
</pre>
|
||||||
|
</details>
|
||||||
|
) : (
|
||||||
|
<div className="markdown-body">
|
||||||
|
<Markdown components={{ code: CodeBlock }}>{msg.content}</Markdown>
|
||||||
|
</div>
|
||||||
|
)}
|
||||||
|
|
||||||
|
{msg.tool_calls && (
|
||||||
|
<div
|
||||||
|
style={{
|
||||||
|
marginTop: "12px",
|
||||||
|
fontSize: "0.85em",
|
||||||
|
color: "#aaa",
|
||||||
|
display: "flex",
|
||||||
|
flexDirection: "column",
|
||||||
|
gap: "8px",
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
{msg.tool_calls.map((tc: ToolCall, i: number) => {
|
||||||
|
let argsSummary = "";
|
||||||
|
try {
|
||||||
|
const args = JSON.parse(tc.function.arguments);
|
||||||
|
const firstKey = Object.keys(args)[0];
|
||||||
|
if (firstKey && args[firstKey]) {
|
||||||
|
argsSummary = String(args[firstKey]);
|
||||||
|
if (argsSummary.length > 50) {
|
||||||
|
argsSummary = `${argsSummary.substring(0, 47)}...`;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} catch (_e) {
|
||||||
|
// ignore
|
||||||
|
}
|
||||||
|
|
||||||
|
return (
|
||||||
|
<div
|
||||||
|
key={`tool-${i}-${tc.function.name}`}
|
||||||
|
style={{
|
||||||
|
display: "flex",
|
||||||
|
alignItems: "center",
|
||||||
|
gap: "8px",
|
||||||
|
fontFamily: "monospace",
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
<span style={{ color: "#888" }}>▶</span>
|
||||||
|
<span
|
||||||
|
style={{
|
||||||
|
background: "#333",
|
||||||
|
padding: "2px 6px",
|
||||||
|
borderRadius: "4px",
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
{tc.function.name}
|
||||||
|
{argsSummary && `(${argsSummary})`}
|
||||||
|
</span>
|
||||||
|
</div>
|
||||||
|
);
|
||||||
|
})}
|
||||||
|
</div>
|
||||||
|
)}
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
export const MessageItem = React.memo(MessageItemInner);
|
||||||
246
frontend/src/components/ServerLogsPanel.tsx
Normal file
246
frontend/src/components/ServerLogsPanel.tsx
Normal file
@@ -0,0 +1,246 @@
|
|||||||
|
import * as React from "react";
|
||||||
|
|
||||||
|
const { useCallback, useEffect, useRef, useState } = React;
|
||||||
|
|
||||||
|
export interface LogEntry {
|
||||||
|
timestamp: string;
|
||||||
|
level: string;
|
||||||
|
message: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
interface ServerLogsPanelProps {
|
||||||
|
logs: LogEntry[];
|
||||||
|
}
|
||||||
|
|
||||||
|
function levelColor(level: string): string {
|
||||||
|
switch (level.toUpperCase()) {
|
||||||
|
case "ERROR":
|
||||||
|
return "#e06c75";
|
||||||
|
case "WARN":
|
||||||
|
return "#e5c07b";
|
||||||
|
default:
|
||||||
|
return "#98c379";
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
export function ServerLogsPanel({ logs }: ServerLogsPanelProps) {
|
||||||
|
const [isOpen, setIsOpen] = useState(false);
|
||||||
|
const [filter, setFilter] = useState("");
|
||||||
|
const [severityFilter, setSeverityFilter] = useState<string>("ALL");
|
||||||
|
const scrollRef = useRef<HTMLDivElement>(null);
|
||||||
|
const userScrolledUpRef = useRef(false);
|
||||||
|
const lastScrollTopRef = useRef(0);
|
||||||
|
|
||||||
|
const filteredLogs = logs.filter((entry) => {
|
||||||
|
const matchesSeverity =
|
||||||
|
severityFilter === "ALL" || entry.level.toUpperCase() === severityFilter;
|
||||||
|
const matchesFilter =
|
||||||
|
filter === "" ||
|
||||||
|
entry.message.toLowerCase().includes(filter.toLowerCase()) ||
|
||||||
|
entry.timestamp.includes(filter);
|
||||||
|
return matchesSeverity && matchesFilter;
|
||||||
|
});
|
||||||
|
|
||||||
|
const scrollToBottom = useCallback(() => {
|
||||||
|
const el = scrollRef.current;
|
||||||
|
if (el) {
|
||||||
|
el.scrollTop = el.scrollHeight;
|
||||||
|
lastScrollTopRef.current = el.scrollTop;
|
||||||
|
}
|
||||||
|
}, []);
|
||||||
|
|
||||||
|
// Auto-scroll when new entries arrive (unless user scrolled up).
|
||||||
|
useEffect(() => {
|
||||||
|
if (!isOpen) return;
|
||||||
|
if (!userScrolledUpRef.current) {
|
||||||
|
scrollToBottom();
|
||||||
|
}
|
||||||
|
}, [filteredLogs.length, isOpen, scrollToBottom]);
|
||||||
|
|
||||||
|
const handleScroll = () => {
|
||||||
|
const el = scrollRef.current;
|
||||||
|
if (!el) return;
|
||||||
|
const isAtBottom = el.scrollHeight - el.scrollTop - el.clientHeight < 5;
|
||||||
|
if (el.scrollTop < lastScrollTopRef.current) {
|
||||||
|
userScrolledUpRef.current = true;
|
||||||
|
}
|
||||||
|
if (isAtBottom) {
|
||||||
|
userScrolledUpRef.current = false;
|
||||||
|
}
|
||||||
|
lastScrollTopRef.current = el.scrollTop;
|
||||||
|
};
|
||||||
|
|
||||||
|
const severityButtons = ["ALL", "INFO", "WARN", "ERROR"] as const;
|
||||||
|
|
||||||
|
return (
|
||||||
|
<div
|
||||||
|
data-testid="server-logs-panel"
|
||||||
|
style={{
|
||||||
|
borderRadius: "8px",
|
||||||
|
border: "1px solid #333",
|
||||||
|
overflow: "hidden",
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
{/* Header / toggle */}
|
||||||
|
<button
|
||||||
|
type="button"
|
||||||
|
data-testid="server-logs-panel-toggle"
|
||||||
|
onClick={() => setIsOpen((v) => !v)}
|
||||||
|
style={{
|
||||||
|
width: "100%",
|
||||||
|
display: "flex",
|
||||||
|
alignItems: "center",
|
||||||
|
justifyContent: "space-between",
|
||||||
|
padding: "8px 12px",
|
||||||
|
background: "#1e1e1e",
|
||||||
|
border: "none",
|
||||||
|
cursor: "pointer",
|
||||||
|
color: "#ccc",
|
||||||
|
fontSize: "0.85em",
|
||||||
|
fontWeight: 600,
|
||||||
|
textAlign: "left",
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
<span>Server Logs</span>
|
||||||
|
<span style={{ color: "#666", fontSize: "0.85em" }}>
|
||||||
|
{logs.length > 0 && (
|
||||||
|
<span style={{ marginRight: "8px", color: "#555" }}>
|
||||||
|
{logs.length}
|
||||||
|
</span>
|
||||||
|
)}
|
||||||
|
{isOpen ? "▲" : "▼"}
|
||||||
|
</span>
|
||||||
|
</button>
|
||||||
|
|
||||||
|
{isOpen && (
|
||||||
|
<div style={{ background: "#0d1117" }}>
|
||||||
|
{/* Filter controls */}
|
||||||
|
<div
|
||||||
|
style={{
|
||||||
|
display: "flex",
|
||||||
|
gap: "6px",
|
||||||
|
padding: "8px",
|
||||||
|
borderBottom: "1px solid #1e1e1e",
|
||||||
|
flexWrap: "wrap",
|
||||||
|
alignItems: "center",
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
<input
|
||||||
|
type="text"
|
||||||
|
data-testid="server-logs-filter-input"
|
||||||
|
value={filter}
|
||||||
|
onChange={(e) => setFilter(e.target.value)}
|
||||||
|
placeholder="Filter logs..."
|
||||||
|
style={{
|
||||||
|
flex: 1,
|
||||||
|
minWidth: "80px",
|
||||||
|
padding: "4px 8px",
|
||||||
|
borderRadius: "4px",
|
||||||
|
border: "1px solid #333",
|
||||||
|
background: "#161b22",
|
||||||
|
color: "#ccc",
|
||||||
|
fontSize: "0.8em",
|
||||||
|
outline: "none",
|
||||||
|
}}
|
||||||
|
/>
|
||||||
|
{severityButtons.map((sev) => (
|
||||||
|
<button
|
||||||
|
key={sev}
|
||||||
|
type="button"
|
||||||
|
data-testid={`server-logs-severity-${sev.toLowerCase()}`}
|
||||||
|
onClick={() => setSeverityFilter(sev)}
|
||||||
|
style={{
|
||||||
|
padding: "3px 8px",
|
||||||
|
borderRadius: "4px",
|
||||||
|
border: "1px solid",
|
||||||
|
borderColor:
|
||||||
|
severityFilter === sev ? levelColor(sev) : "#333",
|
||||||
|
background:
|
||||||
|
severityFilter === sev
|
||||||
|
? "rgba(255,255,255,0.06)"
|
||||||
|
: "transparent",
|
||||||
|
color:
|
||||||
|
sev === "ALL"
|
||||||
|
? severityFilter === "ALL"
|
||||||
|
? "#ccc"
|
||||||
|
: "#555"
|
||||||
|
: levelColor(sev),
|
||||||
|
fontSize: "0.75em",
|
||||||
|
cursor: "pointer",
|
||||||
|
fontWeight: severityFilter === sev ? 700 : 400,
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
{sev}
|
||||||
|
</button>
|
||||||
|
))}
|
||||||
|
</div>
|
||||||
|
|
||||||
|
{/* Log entries */}
|
||||||
|
<div
|
||||||
|
ref={scrollRef}
|
||||||
|
onScroll={handleScroll}
|
||||||
|
data-testid="server-logs-entries"
|
||||||
|
style={{
|
||||||
|
maxHeight: "240px",
|
||||||
|
overflowY: "auto",
|
||||||
|
padding: "4px 0",
|
||||||
|
fontFamily: "monospace",
|
||||||
|
fontSize: "0.75em",
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
{filteredLogs.length === 0 ? (
|
||||||
|
<div
|
||||||
|
style={{
|
||||||
|
padding: "16px",
|
||||||
|
color: "#444",
|
||||||
|
textAlign: "center",
|
||||||
|
fontSize: "0.9em",
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
No log entries
|
||||||
|
</div>
|
||||||
|
) : (
|
||||||
|
filteredLogs.map((entry, idx) => (
|
||||||
|
<div
|
||||||
|
key={`${entry.timestamp}-${idx}`}
|
||||||
|
style={{
|
||||||
|
display: "flex",
|
||||||
|
gap: "6px",
|
||||||
|
padding: "1px 8px",
|
||||||
|
lineHeight: "1.5",
|
||||||
|
borderBottom: "1px solid #111",
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
<span
|
||||||
|
style={{ color: "#444", flexShrink: 0, minWidth: "70px" }}
|
||||||
|
>
|
||||||
|
{entry.timestamp.replace("T", " ").replace("Z", "")}
|
||||||
|
</span>
|
||||||
|
<span
|
||||||
|
style={{
|
||||||
|
color: levelColor(entry.level),
|
||||||
|
flexShrink: 0,
|
||||||
|
minWidth: "38px",
|
||||||
|
fontWeight: 700,
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
{entry.level}
|
||||||
|
</span>
|
||||||
|
<span
|
||||||
|
style={{
|
||||||
|
color: "#c9d1d9",
|
||||||
|
wordBreak: "break-word",
|
||||||
|
whiteSpace: "pre-wrap",
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
{entry.message}
|
||||||
|
</span>
|
||||||
|
</div>
|
||||||
|
))
|
||||||
|
)}
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
)}
|
||||||
|
</div>
|
||||||
|
);
|
||||||
|
}
|
||||||
159
frontend/src/components/SideQuestionOverlay.tsx
Normal file
159
frontend/src/components/SideQuestionOverlay.tsx
Normal file
@@ -0,0 +1,159 @@
|
|||||||
|
import * as React from "react";
|
||||||
|
import Markdown from "react-markdown";
|
||||||
|
|
||||||
|
const { useEffect, useRef } = React;
|
||||||
|
|
||||||
|
interface SideQuestionOverlayProps {
|
||||||
|
question: string;
|
||||||
|
/** Streaming response text. Empty while loading. */
|
||||||
|
response: string;
|
||||||
|
loading: boolean;
|
||||||
|
onDismiss: () => void;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Dismissible overlay that shows a /btw side question and its streamed response.
|
||||||
|
* The question and response are NOT part of the main conversation history.
|
||||||
|
* Dismiss with Escape, Enter, or Space.
|
||||||
|
*/
|
||||||
|
export function SideQuestionOverlay({
|
||||||
|
question,
|
||||||
|
response,
|
||||||
|
loading,
|
||||||
|
onDismiss,
|
||||||
|
}: SideQuestionOverlayProps) {
|
||||||
|
const dismissRef = useRef(onDismiss);
|
||||||
|
dismissRef.current = onDismiss;
|
||||||
|
|
||||||
|
useEffect(() => {
|
||||||
|
const handler = (e: KeyboardEvent) => {
|
||||||
|
if (e.key === "Escape" || e.key === "Enter" || e.key === " ") {
|
||||||
|
e.preventDefault();
|
||||||
|
dismissRef.current();
|
||||||
|
}
|
||||||
|
};
|
||||||
|
window.addEventListener("keydown", handler);
|
||||||
|
return () => window.removeEventListener("keydown", handler);
|
||||||
|
}, []);
|
||||||
|
|
||||||
|
return (
|
||||||
|
// biome-ignore lint/a11y/noStaticElementInteractions: backdrop dismiss is supplementary; keyboard handled via window keydown
|
||||||
|
// biome-ignore lint/a11y/useKeyWithClickEvents: keyboard dismiss handled via window keydown listener
|
||||||
|
<div
|
||||||
|
data-testid="side-question-overlay"
|
||||||
|
onClick={onDismiss}
|
||||||
|
style={{
|
||||||
|
position: "fixed",
|
||||||
|
inset: 0,
|
||||||
|
background: "rgba(0,0,0,0.55)",
|
||||||
|
display: "flex",
|
||||||
|
alignItems: "center",
|
||||||
|
justifyContent: "center",
|
||||||
|
zIndex: 1000,
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
{/* biome-ignore lint/a11y/useKeyWithClickEvents: stop-propagation only; no real interaction */}
|
||||||
|
{/* biome-ignore lint/a11y/noStaticElementInteractions: stop-propagation only; no real interaction */}
|
||||||
|
<div
|
||||||
|
data-testid="side-question-panel"
|
||||||
|
onClick={(e) => e.stopPropagation()}
|
||||||
|
style={{
|
||||||
|
background: "#2f2f2f",
|
||||||
|
border: "1px solid #444",
|
||||||
|
borderRadius: "12px",
|
||||||
|
padding: "24px",
|
||||||
|
maxWidth: "640px",
|
||||||
|
width: "90vw",
|
||||||
|
maxHeight: "60vh",
|
||||||
|
display: "flex",
|
||||||
|
flexDirection: "column",
|
||||||
|
gap: "16px",
|
||||||
|
boxShadow: "0 8px 32px rgba(0,0,0,0.5)",
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
{/* Header */}
|
||||||
|
<div
|
||||||
|
style={{
|
||||||
|
display: "flex",
|
||||||
|
alignItems: "flex-start",
|
||||||
|
justifyContent: "space-between",
|
||||||
|
gap: "12px",
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
<div>
|
||||||
|
<span
|
||||||
|
style={{
|
||||||
|
display: "block",
|
||||||
|
fontSize: "0.7rem",
|
||||||
|
fontWeight: 700,
|
||||||
|
letterSpacing: "0.08em",
|
||||||
|
textTransform: "uppercase",
|
||||||
|
color: "#a0d4a0",
|
||||||
|
marginBottom: "4px",
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
/btw
|
||||||
|
</span>
|
||||||
|
<span
|
||||||
|
style={{
|
||||||
|
fontSize: "1rem",
|
||||||
|
color: "#ececec",
|
||||||
|
fontWeight: 500,
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
{question}
|
||||||
|
</span>
|
||||||
|
</div>
|
||||||
|
<button
|
||||||
|
type="button"
|
||||||
|
onClick={onDismiss}
|
||||||
|
title="Dismiss (Escape, Enter, or Space)"
|
||||||
|
style={{
|
||||||
|
background: "none",
|
||||||
|
border: "none",
|
||||||
|
color: "#666",
|
||||||
|
cursor: "pointer",
|
||||||
|
fontSize: "1.1rem",
|
||||||
|
padding: "2px 6px",
|
||||||
|
borderRadius: "4px",
|
||||||
|
flexShrink: 0,
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
✕
|
||||||
|
</button>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
{/* Response area */}
|
||||||
|
<div
|
||||||
|
style={{
|
||||||
|
overflowY: "auto",
|
||||||
|
flex: 1,
|
||||||
|
color: "#ccc",
|
||||||
|
fontSize: "0.95rem",
|
||||||
|
lineHeight: "1.6",
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
{loading && !response && (
|
||||||
|
<span style={{ color: "#666", fontStyle: "italic" }}>
|
||||||
|
Thinking…
|
||||||
|
</span>
|
||||||
|
)}
|
||||||
|
{response && <Markdown>{response}</Markdown>}
|
||||||
|
</div>
|
||||||
|
|
||||||
|
{/* Footer hint */}
|
||||||
|
{!loading && (
|
||||||
|
<div
|
||||||
|
style={{
|
||||||
|
fontSize: "0.75rem",
|
||||||
|
color: "#555",
|
||||||
|
textAlign: "center",
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
Press Escape, Enter, or Space to dismiss
|
||||||
|
</div>
|
||||||
|
)}
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
);
|
||||||
|
}
|
||||||
311
frontend/src/components/StagePanel.test.tsx
Normal file
311
frontend/src/components/StagePanel.test.tsx
Normal file
@@ -0,0 +1,311 @@
|
|||||||
|
import { render, screen } from "@testing-library/react";
|
||||||
|
import { describe, expect, it } from "vitest";
|
||||||
|
import type { PipelineStageItem } from "../api/client";
|
||||||
|
import { StagePanel } from "./StagePanel";
|
||||||
|
|
||||||
|
describe("StagePanel", () => {
|
||||||
|
it("renders empty message when no items", () => {
|
||||||
|
render(<StagePanel title="Current" items={[]} />);
|
||||||
|
expect(screen.getByText("Empty.")).toBeInTheDocument();
|
||||||
|
});
|
||||||
|
|
||||||
|
it("renders story item without agent lozenge when agent is null", () => {
|
||||||
|
const items: PipelineStageItem[] = [
|
||||||
|
{
|
||||||
|
story_id: "42_story_no_agent",
|
||||||
|
name: "No Agent Story",
|
||||||
|
error: null,
|
||||||
|
merge_failure: null,
|
||||||
|
agent: null,
|
||||||
|
review_hold: null,
|
||||||
|
qa: null,
|
||||||
|
},
|
||||||
|
];
|
||||||
|
render(<StagePanel title="Current" items={items} />);
|
||||||
|
expect(screen.getByText("No Agent Story")).toBeInTheDocument();
|
||||||
|
// No agent lozenge
|
||||||
|
expect(screen.queryByText(/coder-/)).not.toBeInTheDocument();
|
||||||
|
});
|
||||||
|
|
||||||
|
it("shows agent lozenge with agent name and model when agent is running", () => {
|
||||||
|
const items: PipelineStageItem[] = [
|
||||||
|
{
|
||||||
|
story_id: "43_story_with_agent",
|
||||||
|
name: "Active Story",
|
||||||
|
error: null,
|
||||||
|
merge_failure: null,
|
||||||
|
agent: {
|
||||||
|
agent_name: "coder-1",
|
||||||
|
model: "sonnet",
|
||||||
|
status: "running",
|
||||||
|
},
|
||||||
|
review_hold: null,
|
||||||
|
qa: null,
|
||||||
|
},
|
||||||
|
];
|
||||||
|
render(<StagePanel title="Current" items={items} />);
|
||||||
|
expect(screen.getByText("Active Story")).toBeInTheDocument();
|
||||||
|
expect(screen.getByText("coder-1 sonnet")).toBeInTheDocument();
|
||||||
|
});
|
||||||
|
|
||||||
|
it("shows agent lozenge with only agent name when model is null", () => {
|
||||||
|
const items: PipelineStageItem[] = [
|
||||||
|
{
|
||||||
|
story_id: "44_story_no_model",
|
||||||
|
name: "No Model Story",
|
||||||
|
error: null,
|
||||||
|
merge_failure: null,
|
||||||
|
agent: {
|
||||||
|
agent_name: "coder-2",
|
||||||
|
model: null,
|
||||||
|
status: "running",
|
||||||
|
},
|
||||||
|
review_hold: null,
|
||||||
|
qa: null,
|
||||||
|
},
|
||||||
|
];
|
||||||
|
render(<StagePanel title="Current" items={items} />);
|
||||||
|
expect(screen.getByText("coder-2")).toBeInTheDocument();
|
||||||
|
});
|
||||||
|
|
||||||
|
it("shows agent lozenge for pending agent", () => {
|
||||||
|
const items: PipelineStageItem[] = [
|
||||||
|
{
|
||||||
|
story_id: "45_story_pending",
|
||||||
|
name: "Pending Story",
|
||||||
|
error: null,
|
||||||
|
merge_failure: null,
|
||||||
|
agent: {
|
||||||
|
agent_name: "coder-1",
|
||||||
|
model: "haiku",
|
||||||
|
status: "pending",
|
||||||
|
},
|
||||||
|
review_hold: null,
|
||||||
|
qa: null,
|
||||||
|
},
|
||||||
|
];
|
||||||
|
render(<StagePanel title="QA" items={items} />);
|
||||||
|
expect(screen.getByText("coder-1 haiku")).toBeInTheDocument();
|
||||||
|
});
|
||||||
|
|
||||||
|
it("shows story number extracted from story_id", () => {
|
||||||
|
const items: PipelineStageItem[] = [
|
||||||
|
{
|
||||||
|
story_id: "59_story_current_work_panel",
|
||||||
|
name: "Current Work Panel",
|
||||||
|
error: null,
|
||||||
|
merge_failure: null,
|
||||||
|
agent: null,
|
||||||
|
review_hold: null,
|
||||||
|
qa: null,
|
||||||
|
},
|
||||||
|
];
|
||||||
|
render(<StagePanel title="Current" items={items} />);
|
||||||
|
expect(screen.getByText("#59")).toBeInTheDocument();
|
||||||
|
});
|
||||||
|
|
||||||
|
it("shows error message when item has an error", () => {
|
||||||
|
const items: PipelineStageItem[] = [
|
||||||
|
{
|
||||||
|
story_id: "1_story_bad",
|
||||||
|
name: null,
|
||||||
|
error: "Missing front matter",
|
||||||
|
merge_failure: null,
|
||||||
|
agent: null,
|
||||||
|
review_hold: null,
|
||||||
|
qa: null,
|
||||||
|
},
|
||||||
|
];
|
||||||
|
render(<StagePanel title="Upcoming" items={items} />);
|
||||||
|
expect(screen.getByText("Missing front matter")).toBeInTheDocument();
|
||||||
|
});
|
||||||
|
|
||||||
|
it("shows STORY badge for story items", () => {
|
||||||
|
const items: PipelineStageItem[] = [
|
||||||
|
{
|
||||||
|
story_id: "10_story_some_feature",
|
||||||
|
name: "Some Feature",
|
||||||
|
error: null,
|
||||||
|
merge_failure: null,
|
||||||
|
agent: null,
|
||||||
|
review_hold: null,
|
||||||
|
qa: null,
|
||||||
|
},
|
||||||
|
];
|
||||||
|
render(<StagePanel title="Upcoming" items={items} />);
|
||||||
|
expect(
|
||||||
|
screen.getByTestId("type-badge-10_story_some_feature"),
|
||||||
|
).toHaveTextContent("STORY");
|
||||||
|
});
|
||||||
|
|
||||||
|
it("shows BUG badge for bug items", () => {
|
||||||
|
const items: PipelineStageItem[] = [
|
||||||
|
{
|
||||||
|
story_id: "11_bug_broken_thing",
|
||||||
|
name: "Broken Thing",
|
||||||
|
error: null,
|
||||||
|
merge_failure: null,
|
||||||
|
agent: null,
|
||||||
|
review_hold: null,
|
||||||
|
qa: null,
|
||||||
|
},
|
||||||
|
];
|
||||||
|
render(<StagePanel title="Current" items={items} />);
|
||||||
|
expect(
|
||||||
|
screen.getByTestId("type-badge-11_bug_broken_thing"),
|
||||||
|
).toHaveTextContent("BUG");
|
||||||
|
});
|
||||||
|
|
||||||
|
it("shows SPIKE badge for spike items", () => {
|
||||||
|
const items: PipelineStageItem[] = [
|
||||||
|
{
|
||||||
|
story_id: "12_spike_investigate_perf",
|
||||||
|
name: "Investigate Perf",
|
||||||
|
error: null,
|
||||||
|
merge_failure: null,
|
||||||
|
agent: null,
|
||||||
|
review_hold: null,
|
||||||
|
qa: null,
|
||||||
|
},
|
||||||
|
];
|
||||||
|
render(<StagePanel title="QA" items={items} />);
|
||||||
|
expect(
|
||||||
|
screen.getByTestId("type-badge-12_spike_investigate_perf"),
|
||||||
|
).toHaveTextContent("SPIKE");
|
||||||
|
});
|
||||||
|
|
||||||
|
it("shows no badge for unrecognised type prefix", () => {
|
||||||
|
const items: PipelineStageItem[] = [
|
||||||
|
{
|
||||||
|
story_id: "13_task_do_something",
|
||||||
|
name: "Do Something",
|
||||||
|
error: null,
|
||||||
|
merge_failure: null,
|
||||||
|
agent: null,
|
||||||
|
review_hold: null,
|
||||||
|
qa: null,
|
||||||
|
},
|
||||||
|
];
|
||||||
|
render(<StagePanel title="Done" items={items} />);
|
||||||
|
expect(
|
||||||
|
screen.queryByTestId("type-badge-13_task_do_something"),
|
||||||
|
).not.toBeInTheDocument();
|
||||||
|
});
|
||||||
|
|
||||||
|
it("card has uniform border on all sides for story items", () => {
|
||||||
|
const items: PipelineStageItem[] = [
|
||||||
|
{
|
||||||
|
story_id: "20_story_uniform_border",
|
||||||
|
name: "Uniform Border",
|
||||||
|
error: null,
|
||||||
|
merge_failure: null,
|
||||||
|
agent: null,
|
||||||
|
review_hold: null,
|
||||||
|
qa: null,
|
||||||
|
},
|
||||||
|
];
|
||||||
|
render(<StagePanel title="Upcoming" items={items} />);
|
||||||
|
const card = screen.getByTestId("card-20_story_uniform_border");
|
||||||
|
// No 3px colored left border - all sides match the uniform shorthand
|
||||||
|
expect(card.style.borderLeft).not.toContain("3px");
|
||||||
|
expect(card.style.borderLeft).toBe(card.style.borderTop);
|
||||||
|
expect(card.style.borderLeft).toBe(card.style.borderRight);
|
||||||
|
expect(card.style.borderLeft).toBe(card.style.borderBottom);
|
||||||
|
});
|
||||||
|
|
||||||
|
it("card has uniform border on all sides for bug items", () => {
|
||||||
|
const items: PipelineStageItem[] = [
|
||||||
|
{
|
||||||
|
story_id: "21_bug_uniform_border",
|
||||||
|
name: "Uniform Border Bug",
|
||||||
|
error: null,
|
||||||
|
merge_failure: null,
|
||||||
|
agent: null,
|
||||||
|
review_hold: null,
|
||||||
|
qa: null,
|
||||||
|
},
|
||||||
|
];
|
||||||
|
render(<StagePanel title="Current" items={items} />);
|
||||||
|
const card = screen.getByTestId("card-21_bug_uniform_border");
|
||||||
|
expect(card.style.borderLeft).not.toContain("3px");
|
||||||
|
expect(card.style.borderLeft).toBe(card.style.borderTop);
|
||||||
|
});
|
||||||
|
|
||||||
|
it("card has uniform border on all sides for spike items", () => {
|
||||||
|
const items: PipelineStageItem[] = [
|
||||||
|
{
|
||||||
|
story_id: "22_spike_uniform_border",
|
||||||
|
name: "Uniform Border Spike",
|
||||||
|
error: null,
|
||||||
|
merge_failure: null,
|
||||||
|
agent: null,
|
||||||
|
review_hold: null,
|
||||||
|
qa: null,
|
||||||
|
},
|
||||||
|
];
|
||||||
|
render(<StagePanel title="QA" items={items} />);
|
||||||
|
const card = screen.getByTestId("card-22_spike_uniform_border");
|
||||||
|
expect(card.style.borderLeft).not.toContain("3px");
|
||||||
|
expect(card.style.borderLeft).toBe(card.style.borderTop);
|
||||||
|
});
|
||||||
|
|
||||||
|
it("card has uniform border on all sides for unrecognised type", () => {
|
||||||
|
const items: PipelineStageItem[] = [
|
||||||
|
{
|
||||||
|
story_id: "23_task_uniform_border",
|
||||||
|
name: "Uniform Border Task",
|
||||||
|
error: null,
|
||||||
|
merge_failure: null,
|
||||||
|
agent: null,
|
||||||
|
review_hold: null,
|
||||||
|
qa: null,
|
||||||
|
},
|
||||||
|
];
|
||||||
|
render(<StagePanel title="Done" items={items} />);
|
||||||
|
const card = screen.getByTestId("card-23_task_uniform_border");
|
||||||
|
expect(card.style.borderLeft).not.toContain("3px");
|
||||||
|
expect(card.style.borderLeft).toBe(card.style.borderTop);
|
||||||
|
});
|
||||||
|
|
||||||
|
it("shows merge failure icon and reason when merge_failure is set", () => {
|
||||||
|
const items: PipelineStageItem[] = [
|
||||||
|
{
|
||||||
|
story_id: "30_story_merge_failed",
|
||||||
|
name: "Failed Merge Story",
|
||||||
|
error: null,
|
||||||
|
merge_failure: "Squash merge failed: conflicts in Cargo.lock",
|
||||||
|
agent: null,
|
||||||
|
review_hold: null,
|
||||||
|
qa: null,
|
||||||
|
},
|
||||||
|
];
|
||||||
|
render(<StagePanel title="Merge" items={items} />);
|
||||||
|
expect(
|
||||||
|
screen.getByTestId("merge-failure-icon-30_story_merge_failed"),
|
||||||
|
).toBeInTheDocument();
|
||||||
|
expect(
|
||||||
|
screen.getByTestId("merge-failure-reason-30_story_merge_failed"),
|
||||||
|
).toHaveTextContent("Squash merge failed: conflicts in Cargo.lock");
|
||||||
|
});
|
||||||
|
|
||||||
|
it("does not show merge failure elements when merge_failure is null", () => {
|
||||||
|
const items: PipelineStageItem[] = [
|
||||||
|
{
|
||||||
|
story_id: "31_story_no_failure",
|
||||||
|
name: "Clean Story",
|
||||||
|
error: null,
|
||||||
|
merge_failure: null,
|
||||||
|
agent: null,
|
||||||
|
review_hold: null,
|
||||||
|
qa: null,
|
||||||
|
},
|
||||||
|
];
|
||||||
|
render(<StagePanel title="Merge" items={items} />);
|
||||||
|
expect(
|
||||||
|
screen.queryByTestId("merge-failure-icon-31_story_no_failure"),
|
||||||
|
).not.toBeInTheDocument();
|
||||||
|
expect(
|
||||||
|
screen.queryByTestId("merge-failure-reason-31_story_no_failure"),
|
||||||
|
).not.toBeInTheDocument();
|
||||||
|
});
|
||||||
|
});
|
||||||
517
frontend/src/components/StagePanel.tsx
Normal file
517
frontend/src/components/StagePanel.tsx
Normal file
@@ -0,0 +1,517 @@
|
|||||||
|
import * as React from "react";
|
||||||
|
import type { AgentConfigInfo } from "../api/agents";
|
||||||
|
import type { AgentAssignment, PipelineStageItem } from "../api/client";
|
||||||
|
import { useLozengeFly } from "./LozengeFlyContext";
|
||||||
|
|
||||||
|
const { useLayoutEffect, useRef, useState } = React;
|
||||||
|
|
||||||
|
type WorkItemType = "story" | "bug" | "spike" | "refactor" | "unknown";
|
||||||
|
|
||||||
|
const TYPE_COLORS: Record<WorkItemType, string> = {
|
||||||
|
story: "#3fb950",
|
||||||
|
bug: "#f85149",
|
||||||
|
spike: "#58a6ff",
|
||||||
|
refactor: "#a371f7",
|
||||||
|
unknown: "#444",
|
||||||
|
};
|
||||||
|
|
||||||
|
const TYPE_LABELS: Record<WorkItemType, string | null> = {
|
||||||
|
story: "STORY",
|
||||||
|
bug: "BUG",
|
||||||
|
spike: "SPIKE",
|
||||||
|
refactor: "REFACTOR",
|
||||||
|
unknown: null,
|
||||||
|
};
|
||||||
|
|
||||||
|
function getWorkItemType(storyId: string): WorkItemType {
|
||||||
|
const match = storyId.match(/^\d+_([a-z]+)_/);
|
||||||
|
if (!match) return "unknown";
|
||||||
|
const segment = match[1];
|
||||||
|
if (
|
||||||
|
segment === "story" ||
|
||||||
|
segment === "bug" ||
|
||||||
|
segment === "spike" ||
|
||||||
|
segment === "refactor"
|
||||||
|
) {
|
||||||
|
return segment;
|
||||||
|
}
|
||||||
|
return "unknown";
|
||||||
|
}
|
||||||
|
|
||||||
|
interface StagePanelProps {
|
||||||
|
title: string;
|
||||||
|
items: PipelineStageItem[];
|
||||||
|
emptyMessage?: string;
|
||||||
|
onItemClick?: (item: PipelineStageItem) => void;
|
||||||
|
onStopAgent?: (storyId: string, agentName: string) => void;
|
||||||
|
onDeleteItem?: (item: PipelineStageItem) => void;
|
||||||
|
/** Map of story_id → total_cost_usd for displaying cost badges. */
|
||||||
|
costs?: Map<string, number>;
|
||||||
|
/** Agent roster to populate the start agent dropdown. */
|
||||||
|
agentRoster?: AgentConfigInfo[];
|
||||||
|
/** Names of agents currently running/pending (busy). */
|
||||||
|
busyAgentNames?: Set<string>;
|
||||||
|
/** Called when the user requests to start an agent on a story. */
|
||||||
|
onStartAgent?: (storyId: string, agentName?: string) => void;
|
||||||
|
}
|
||||||
|
|
||||||
|
function AgentLozenge({
|
||||||
|
agent,
|
||||||
|
storyId,
|
||||||
|
onStop,
|
||||||
|
}: {
|
||||||
|
agent: AgentAssignment;
|
||||||
|
storyId: string;
|
||||||
|
onStop?: () => void;
|
||||||
|
}) {
|
||||||
|
const { saveSlotRect, pendingFlyIns } = useLozengeFly();
|
||||||
|
const lozengeRef = useRef<HTMLDivElement>(null);
|
||||||
|
const isRunning = agent.status === "running";
|
||||||
|
const isPending = agent.status === "pending";
|
||||||
|
const color = isRunning ? "#3fb950" : isPending ? "#e3b341" : "#aaa";
|
||||||
|
const label = agent.model
|
||||||
|
? `${agent.agent_name} ${agent.model}`
|
||||||
|
: agent.agent_name;
|
||||||
|
|
||||||
|
const isFlyingIn = pendingFlyIns.has(storyId);
|
||||||
|
|
||||||
|
// Save our rect on every render so flyOut can reference it after unmount
|
||||||
|
useLayoutEffect(() => {
|
||||||
|
if (lozengeRef.current) {
|
||||||
|
saveSlotRect(storyId, lozengeRef.current.getBoundingClientRect());
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
return (
|
||||||
|
<div
|
||||||
|
ref={lozengeRef}
|
||||||
|
className="agent-lozenge"
|
||||||
|
data-testid={`slot-lozenge-${storyId}`}
|
||||||
|
style={{
|
||||||
|
display: "inline-flex",
|
||||||
|
alignItems: "center",
|
||||||
|
gap: "5px",
|
||||||
|
padding: "2px 8px",
|
||||||
|
borderRadius: "999px",
|
||||||
|
fontSize: "0.72em",
|
||||||
|
fontWeight: 600,
|
||||||
|
background: `${color}18`,
|
||||||
|
color,
|
||||||
|
border: `1px solid ${color}44`,
|
||||||
|
marginTop: "4px",
|
||||||
|
// Fixed intrinsic width – never stretches to fill parent panel
|
||||||
|
alignSelf: "flex-start",
|
||||||
|
// Hidden during fly-in; revealed with a fade once the clone arrives
|
||||||
|
opacity: isFlyingIn ? 0 : 1,
|
||||||
|
transition: isFlyingIn ? "none" : "opacity 0.15s",
|
||||||
|
animation: isFlyingIn ? "none" : "agentAppear 0.3s ease-out",
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
{isRunning && (
|
||||||
|
<span
|
||||||
|
style={{
|
||||||
|
width: "5px",
|
||||||
|
height: "5px",
|
||||||
|
borderRadius: "50%",
|
||||||
|
background: color,
|
||||||
|
animation: "pulse 1.5s infinite",
|
||||||
|
flexShrink: 0,
|
||||||
|
}}
|
||||||
|
/>
|
||||||
|
)}
|
||||||
|
{isPending && (
|
||||||
|
<span
|
||||||
|
style={{
|
||||||
|
width: "5px",
|
||||||
|
height: "5px",
|
||||||
|
borderRadius: "50%",
|
||||||
|
background: color,
|
||||||
|
opacity: 0.7,
|
||||||
|
flexShrink: 0,
|
||||||
|
}}
|
||||||
|
/>
|
||||||
|
)}
|
||||||
|
{label}
|
||||||
|
{isRunning && onStop && (
|
||||||
|
<button
|
||||||
|
type="button"
|
||||||
|
data-testid={`stop-agent-${storyId}`}
|
||||||
|
onClick={(e) => {
|
||||||
|
e.stopPropagation();
|
||||||
|
onStop();
|
||||||
|
}}
|
||||||
|
title="Stop agent"
|
||||||
|
style={{
|
||||||
|
marginLeft: "4px",
|
||||||
|
padding: "0 3px",
|
||||||
|
background: "transparent",
|
||||||
|
border: "none",
|
||||||
|
color,
|
||||||
|
cursor: "pointer",
|
||||||
|
fontSize: "0.9em",
|
||||||
|
lineHeight: 1,
|
||||||
|
opacity: 0.8,
|
||||||
|
flexShrink: 0,
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
■
|
||||||
|
</button>
|
||||||
|
)}
|
||||||
|
</div>
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
function StartAgentControl({
|
||||||
|
storyId,
|
||||||
|
agentRoster,
|
||||||
|
busyAgentNames,
|
||||||
|
onStartAgent,
|
||||||
|
}: {
|
||||||
|
storyId: string;
|
||||||
|
agentRoster: AgentConfigInfo[];
|
||||||
|
busyAgentNames: Set<string>;
|
||||||
|
onStartAgent: (storyId: string, agentName?: string) => void;
|
||||||
|
}) {
|
||||||
|
const [selectedAgent, setSelectedAgent] = useState<string>("");
|
||||||
|
|
||||||
|
const allBusy =
|
||||||
|
agentRoster.length > 0 &&
|
||||||
|
agentRoster.every((a) => busyAgentNames.has(a.name));
|
||||||
|
|
||||||
|
const handleStart = (e: React.MouseEvent) => {
|
||||||
|
e.stopPropagation();
|
||||||
|
onStartAgent(storyId, selectedAgent || undefined);
|
||||||
|
};
|
||||||
|
|
||||||
|
const handleSelectChange = (e: React.ChangeEvent<HTMLSelectElement>) => {
|
||||||
|
e.stopPropagation();
|
||||||
|
setSelectedAgent(e.target.value);
|
||||||
|
};
|
||||||
|
|
||||||
|
return (
|
||||||
|
<div
|
||||||
|
style={{
|
||||||
|
display: "flex",
|
||||||
|
gap: "4px",
|
||||||
|
marginTop: "6px",
|
||||||
|
alignItems: "center",
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
{agentRoster.length > 1 && (
|
||||||
|
<select
|
||||||
|
value={selectedAgent}
|
||||||
|
onChange={handleSelectChange}
|
||||||
|
disabled={allBusy}
|
||||||
|
data-testid={`start-agent-select-${storyId}`}
|
||||||
|
style={{
|
||||||
|
background: "#2a2a2a",
|
||||||
|
color: allBusy ? "#555" : "#ccc",
|
||||||
|
border: "1px solid #444",
|
||||||
|
borderRadius: "5px",
|
||||||
|
padding: "2px 4px",
|
||||||
|
fontSize: "0.75em",
|
||||||
|
cursor: allBusy ? "not-allowed" : "pointer",
|
||||||
|
flex: 1,
|
||||||
|
minWidth: 0,
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
<option value="">Default agent</option>
|
||||||
|
{agentRoster.map((a) => (
|
||||||
|
<option key={a.name} value={a.name}>
|
||||||
|
{a.name}
|
||||||
|
</option>
|
||||||
|
))}
|
||||||
|
</select>
|
||||||
|
)}
|
||||||
|
<button
|
||||||
|
type="button"
|
||||||
|
onClick={handleStart}
|
||||||
|
disabled={allBusy}
|
||||||
|
data-testid={`start-agent-btn-${storyId}`}
|
||||||
|
title={allBusy ? "All agents are busy" : "Start a coder on this story"}
|
||||||
|
style={{
|
||||||
|
background: allBusy ? "#1a1a1a" : "#1a3a1a",
|
||||||
|
color: allBusy ? "#555" : "#3fb950",
|
||||||
|
border: `1px solid ${allBusy ? "#333" : "#2a5a2a"}`,
|
||||||
|
borderRadius: "5px",
|
||||||
|
padding: "2px 8px",
|
||||||
|
fontSize: "0.75em",
|
||||||
|
fontWeight: 600,
|
||||||
|
cursor: allBusy ? "not-allowed" : "pointer",
|
||||||
|
whiteSpace: "nowrap",
|
||||||
|
flexShrink: 0,
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
▶ Start
|
||||||
|
</button>
|
||||||
|
</div>
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
export function StagePanel({
|
||||||
|
title,
|
||||||
|
items,
|
||||||
|
emptyMessage = "Empty.",
|
||||||
|
onItemClick,
|
||||||
|
onStopAgent,
|
||||||
|
onDeleteItem,
|
||||||
|
costs,
|
||||||
|
agentRoster,
|
||||||
|
busyAgentNames,
|
||||||
|
onStartAgent,
|
||||||
|
}: StagePanelProps) {
|
||||||
|
const showStartButton =
|
||||||
|
Boolean(onStartAgent) &&
|
||||||
|
agentRoster !== undefined &&
|
||||||
|
agentRoster.length > 0;
|
||||||
|
|
||||||
|
return (
|
||||||
|
<div
|
||||||
|
style={{
|
||||||
|
border: "1px solid #333",
|
||||||
|
borderRadius: "10px",
|
||||||
|
padding: "12px 16px",
|
||||||
|
background: "#1f1f1f",
|
||||||
|
display: "flex",
|
||||||
|
flexDirection: "column",
|
||||||
|
gap: "8px",
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
<div
|
||||||
|
style={{
|
||||||
|
display: "flex",
|
||||||
|
alignItems: "center",
|
||||||
|
justifyContent: "space-between",
|
||||||
|
gap: "12px",
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
<div style={{ fontWeight: 600 }}>{title}</div>
|
||||||
|
<div
|
||||||
|
style={{
|
||||||
|
fontSize: "0.85em",
|
||||||
|
color: "#aaa",
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
{items.length}
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
{items.length === 0 ? (
|
||||||
|
<div style={{ fontSize: "0.85em", color: "#555" }}>{emptyMessage}</div>
|
||||||
|
) : (
|
||||||
|
<div
|
||||||
|
style={{
|
||||||
|
display: "flex",
|
||||||
|
flexDirection: "column",
|
||||||
|
gap: "6px",
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
{items.map((item) => {
|
||||||
|
const itemNumber = item.story_id.match(/^(\d+)/)?.[1];
|
||||||
|
const itemType = getWorkItemType(item.story_id);
|
||||||
|
const borderColor = TYPE_COLORS[itemType];
|
||||||
|
const typeLabel = TYPE_LABELS[itemType];
|
||||||
|
const hasMergeFailure = Boolean(item.merge_failure);
|
||||||
|
const cardStyle = {
|
||||||
|
border: hasMergeFailure
|
||||||
|
? "1px solid #6e1b1b"
|
||||||
|
: item.agent
|
||||||
|
? "1px solid #2a3a4a"
|
||||||
|
: "1px solid #2a2a2a",
|
||||||
|
borderRadius: "8px",
|
||||||
|
padding: "8px 12px",
|
||||||
|
background: hasMergeFailure
|
||||||
|
? "#1f1010"
|
||||||
|
: item.agent
|
||||||
|
? "#161e2a"
|
||||||
|
: "#191919",
|
||||||
|
display: "flex",
|
||||||
|
flexDirection: "column" as const,
|
||||||
|
gap: "2px",
|
||||||
|
width: "100%",
|
||||||
|
textAlign: "left" as const,
|
||||||
|
color: "inherit",
|
||||||
|
font: "inherit",
|
||||||
|
cursor: onItemClick ? "pointer" : "default",
|
||||||
|
};
|
||||||
|
|
||||||
|
// Only offer "Start" when the item has no assigned agent
|
||||||
|
const canStart = showStartButton && !item.agent;
|
||||||
|
|
||||||
|
const cardInner = (
|
||||||
|
<>
|
||||||
|
<div style={{ flex: 1 }}>
|
||||||
|
<div style={{ fontWeight: 600, fontSize: "0.9em" }}>
|
||||||
|
{hasMergeFailure && (
|
||||||
|
<span
|
||||||
|
data-testid={`merge-failure-icon-${item.story_id}`}
|
||||||
|
title="Merge failed"
|
||||||
|
style={{
|
||||||
|
color: "#f85149",
|
||||||
|
marginRight: "6px",
|
||||||
|
fontStyle: "normal",
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
✕
|
||||||
|
</span>
|
||||||
|
)}
|
||||||
|
{itemNumber && (
|
||||||
|
<span
|
||||||
|
style={{
|
||||||
|
color: "#777",
|
||||||
|
fontFamily: "monospace",
|
||||||
|
marginRight: "8px",
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
#{itemNumber}
|
||||||
|
</span>
|
||||||
|
)}
|
||||||
|
{typeLabel && (
|
||||||
|
<span
|
||||||
|
data-testid={`type-badge-${item.story_id}`}
|
||||||
|
style={{
|
||||||
|
fontSize: "0.7em",
|
||||||
|
fontWeight: 700,
|
||||||
|
color: borderColor,
|
||||||
|
marginRight: "8px",
|
||||||
|
letterSpacing: "0.05em",
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
{typeLabel}
|
||||||
|
</span>
|
||||||
|
)}
|
||||||
|
{costs?.has(item.story_id) && (
|
||||||
|
<span
|
||||||
|
data-testid={`cost-badge-${item.story_id}`}
|
||||||
|
style={{
|
||||||
|
fontSize: "0.65em",
|
||||||
|
fontWeight: 600,
|
||||||
|
color: "#e3b341",
|
||||||
|
marginRight: "8px",
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
${costs.get(item.story_id)?.toFixed(2)}
|
||||||
|
</span>
|
||||||
|
)}
|
||||||
|
{item.name ?? item.story_id}
|
||||||
|
</div>
|
||||||
|
{item.error && (
|
||||||
|
<div
|
||||||
|
style={{
|
||||||
|
fontSize: "0.8em",
|
||||||
|
color: "#ff7b72",
|
||||||
|
marginTop: "4px",
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
{item.error}
|
||||||
|
</div>
|
||||||
|
)}
|
||||||
|
{item.merge_failure && (
|
||||||
|
<div
|
||||||
|
data-testid={`merge-failure-reason-${item.story_id}`}
|
||||||
|
style={{
|
||||||
|
fontSize: "0.8em",
|
||||||
|
color: "#f85149",
|
||||||
|
marginTop: "4px",
|
||||||
|
whiteSpace: "pre-wrap",
|
||||||
|
wordBreak: "break-word",
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
{item.merge_failure}
|
||||||
|
</div>
|
||||||
|
)}
|
||||||
|
</div>
|
||||||
|
{item.agent && (
|
||||||
|
<AgentLozenge
|
||||||
|
agent={item.agent}
|
||||||
|
storyId={item.story_id}
|
||||||
|
onStop={
|
||||||
|
onStopAgent && item.agent.status === "running"
|
||||||
|
? () =>
|
||||||
|
onStopAgent(
|
||||||
|
item.story_id,
|
||||||
|
item.agent?.agent_name ?? "",
|
||||||
|
)
|
||||||
|
: undefined
|
||||||
|
}
|
||||||
|
/>
|
||||||
|
)}
|
||||||
|
{canStart && onStartAgent && (
|
||||||
|
<StartAgentControl
|
||||||
|
storyId={item.story_id}
|
||||||
|
agentRoster={agentRoster ?? []}
|
||||||
|
busyAgentNames={busyAgentNames ?? new Set()}
|
||||||
|
onStartAgent={onStartAgent}
|
||||||
|
/>
|
||||||
|
)}
|
||||||
|
</>
|
||||||
|
);
|
||||||
|
const card = onItemClick ? (
|
||||||
|
<button
|
||||||
|
type="button"
|
||||||
|
data-testid={`card-${item.story_id}`}
|
||||||
|
onClick={() => onItemClick(item)}
|
||||||
|
style={cardStyle}
|
||||||
|
>
|
||||||
|
{cardInner}
|
||||||
|
</button>
|
||||||
|
) : (
|
||||||
|
<div data-testid={`card-${item.story_id}`} style={cardStyle}>
|
||||||
|
{cardInner}
|
||||||
|
</div>
|
||||||
|
);
|
||||||
|
return (
|
||||||
|
<div
|
||||||
|
key={`${title}-${item.story_id}`}
|
||||||
|
style={{ position: "relative" }}
|
||||||
|
>
|
||||||
|
{card}
|
||||||
|
{onDeleteItem && (
|
||||||
|
<button
|
||||||
|
type="button"
|
||||||
|
data-testid={`delete-btn-${item.story_id}`}
|
||||||
|
title={`Delete ${item.name ?? item.story_id}`}
|
||||||
|
onClick={(e) => {
|
||||||
|
e.stopPropagation();
|
||||||
|
const label = item.name ?? item.story_id;
|
||||||
|
if (
|
||||||
|
window.confirm(
|
||||||
|
`Delete "${label}"? This cannot be undone.`,
|
||||||
|
)
|
||||||
|
) {
|
||||||
|
onDeleteItem(item);
|
||||||
|
}
|
||||||
|
}}
|
||||||
|
style={{
|
||||||
|
position: "absolute",
|
||||||
|
top: "4px",
|
||||||
|
right: "4px",
|
||||||
|
background: "transparent",
|
||||||
|
border: "none",
|
||||||
|
color: "#555",
|
||||||
|
cursor: "pointer",
|
||||||
|
fontSize: "0.85em",
|
||||||
|
lineHeight: 1,
|
||||||
|
padding: "2px 4px",
|
||||||
|
borderRadius: "4px",
|
||||||
|
}}
|
||||||
|
onMouseEnter={(e) => {
|
||||||
|
(e.currentTarget as HTMLButtonElement).style.color =
|
||||||
|
"#f85149";
|
||||||
|
}}
|
||||||
|
onMouseLeave={(e) => {
|
||||||
|
(e.currentTarget as HTMLButtonElement).style.color =
|
||||||
|
"#555";
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
✕
|
||||||
|
</button>
|
||||||
|
)}
|
||||||
|
</div>
|
||||||
|
);
|
||||||
|
})}
|
||||||
|
</div>
|
||||||
|
)}
|
||||||
|
</div>
|
||||||
|
);
|
||||||
|
}
|
||||||
440
frontend/src/components/TokenUsagePage.tsx
Normal file
440
frontend/src/components/TokenUsagePage.tsx
Normal file
@@ -0,0 +1,440 @@
|
|||||||
|
import * as React from "react";
|
||||||
|
import type { TokenUsageRecord } from "../api/client";
|
||||||
|
import { api } from "../api/client";
|
||||||
|
|
||||||
|
type SortKey =
|
||||||
|
| "timestamp"
|
||||||
|
| "story_id"
|
||||||
|
| "agent_name"
|
||||||
|
| "model"
|
||||||
|
| "total_cost_usd";
|
||||||
|
type SortDir = "asc" | "desc";
|
||||||
|
|
||||||
|
function formatCost(usd: number): string {
|
||||||
|
if (usd === 0) return "$0.00";
|
||||||
|
if (usd < 0.001) return `$${usd.toFixed(6)}`;
|
||||||
|
if (usd < 0.01) return `$${usd.toFixed(4)}`;
|
||||||
|
return `$${usd.toFixed(3)}`;
|
||||||
|
}
|
||||||
|
|
||||||
|
function formatTokens(n: number): string {
|
||||||
|
if (n >= 1_000_000) return `${(n / 1_000_000).toFixed(1)}M`;
|
||||||
|
if (n >= 1_000) return `${(n / 1_000).toFixed(1)}K`;
|
||||||
|
return String(n);
|
||||||
|
}
|
||||||
|
|
||||||
|
function formatTimestamp(iso: string): string {
|
||||||
|
const d = new Date(iso);
|
||||||
|
const year = d.getFullYear();
|
||||||
|
const month = String(d.getMonth() + 1).padStart(2, "0");
|
||||||
|
const day = String(d.getDate()).padStart(2, "0");
|
||||||
|
const h = String(d.getHours()).padStart(2, "0");
|
||||||
|
const m = String(d.getMinutes()).padStart(2, "0");
|
||||||
|
return `${year}-${month}-${day} ${h}:${m}`;
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Infer an agent type from the agent name. */
|
||||||
|
function agentType(agentName: string): string {
|
||||||
|
const lower = agentName.toLowerCase();
|
||||||
|
if (lower.startsWith("coder")) return "coder";
|
||||||
|
if (lower.startsWith("qa")) return "qa";
|
||||||
|
if (lower.startsWith("mergemaster") || lower.startsWith("merge"))
|
||||||
|
return "mergemaster";
|
||||||
|
return "other";
|
||||||
|
}
|
||||||
|
|
||||||
|
interface SortHeaderProps {
|
||||||
|
label: string;
|
||||||
|
sortKey: SortKey;
|
||||||
|
current: SortKey;
|
||||||
|
dir: SortDir;
|
||||||
|
onSort: (key: SortKey) => void;
|
||||||
|
align?: "left" | "right";
|
||||||
|
}
|
||||||
|
|
||||||
|
function SortHeader({
|
||||||
|
label,
|
||||||
|
sortKey,
|
||||||
|
current,
|
||||||
|
dir,
|
||||||
|
onSort,
|
||||||
|
align = "left",
|
||||||
|
}: SortHeaderProps) {
|
||||||
|
const active = current === sortKey;
|
||||||
|
return (
|
||||||
|
<th
|
||||||
|
style={{
|
||||||
|
padding: "8px 12px",
|
||||||
|
textAlign: align,
|
||||||
|
cursor: "pointer",
|
||||||
|
userSelect: "none",
|
||||||
|
borderBottom: "1px solid #333",
|
||||||
|
color: active ? "#ececec" : "#aaa",
|
||||||
|
fontWeight: active ? "700" : "500",
|
||||||
|
whiteSpace: "nowrap",
|
||||||
|
fontSize: "0.8em",
|
||||||
|
letterSpacing: "0.05em",
|
||||||
|
textTransform: "uppercase",
|
||||||
|
}}
|
||||||
|
onClick={() => onSort(sortKey)}
|
||||||
|
>
|
||||||
|
{label}
|
||||||
|
{active ? (dir === "asc" ? " ↑" : " ↓") : ""}
|
||||||
|
</th>
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
interface TokenUsagePageProps {
|
||||||
|
projectPath: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
export function TokenUsagePage({
|
||||||
|
projectPath: _projectPath,
|
||||||
|
}: TokenUsagePageProps) {
|
||||||
|
const [records, setRecords] = React.useState<TokenUsageRecord[]>([]);
|
||||||
|
const [loading, setLoading] = React.useState(true);
|
||||||
|
const [error, setError] = React.useState<string | null>(null);
|
||||||
|
const [sortKey, setSortKey] = React.useState<SortKey>("timestamp");
|
||||||
|
const [sortDir, setSortDir] = React.useState<SortDir>("desc");
|
||||||
|
|
||||||
|
React.useEffect(() => {
|
||||||
|
setLoading(true);
|
||||||
|
setError(null);
|
||||||
|
api
|
||||||
|
.getAllTokenUsage()
|
||||||
|
.then((resp) => setRecords(resp.records))
|
||||||
|
.catch((e) =>
|
||||||
|
setError(e instanceof Error ? e.message : "Failed to load token usage"),
|
||||||
|
)
|
||||||
|
.finally(() => setLoading(false));
|
||||||
|
}, []);
|
||||||
|
|
||||||
|
function handleSort(key: SortKey) {
|
||||||
|
if (key === sortKey) {
|
||||||
|
setSortDir((d) => (d === "asc" ? "desc" : "asc"));
|
||||||
|
} else {
|
||||||
|
setSortKey(key);
|
||||||
|
setSortDir(key === "timestamp" ? "desc" : "asc");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const sorted = React.useMemo(() => {
|
||||||
|
return [...records].sort((a, b) => {
|
||||||
|
let cmp = 0;
|
||||||
|
switch (sortKey) {
|
||||||
|
case "timestamp":
|
||||||
|
cmp = a.timestamp.localeCompare(b.timestamp);
|
||||||
|
break;
|
||||||
|
case "story_id":
|
||||||
|
cmp = a.story_id.localeCompare(b.story_id);
|
||||||
|
break;
|
||||||
|
case "agent_name":
|
||||||
|
cmp = a.agent_name.localeCompare(b.agent_name);
|
||||||
|
break;
|
||||||
|
case "model":
|
||||||
|
cmp = (a.model ?? "").localeCompare(b.model ?? "");
|
||||||
|
break;
|
||||||
|
case "total_cost_usd":
|
||||||
|
cmp = a.total_cost_usd - b.total_cost_usd;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
return sortDir === "asc" ? cmp : -cmp;
|
||||||
|
});
|
||||||
|
}, [records, sortKey, sortDir]);
|
||||||
|
|
||||||
|
// Compute summary totals
|
||||||
|
const totalCost = records.reduce((s, r) => s + r.total_cost_usd, 0);
|
||||||
|
|
||||||
|
const byAgentType = React.useMemo(() => {
|
||||||
|
const map: Record<string, number> = {};
|
||||||
|
for (const r of records) {
|
||||||
|
const t = agentType(r.agent_name);
|
||||||
|
map[t] = (map[t] ?? 0) + r.total_cost_usd;
|
||||||
|
}
|
||||||
|
return map;
|
||||||
|
}, [records]);
|
||||||
|
|
||||||
|
const byModel = React.useMemo(() => {
|
||||||
|
const map: Record<string, number> = {};
|
||||||
|
for (const r of records) {
|
||||||
|
const m = r.model ?? "unknown";
|
||||||
|
map[m] = (map[m] ?? 0) + r.total_cost_usd;
|
||||||
|
}
|
||||||
|
return map;
|
||||||
|
}, [records]);
|
||||||
|
|
||||||
|
const cellStyle: React.CSSProperties = {
|
||||||
|
padding: "7px 12px",
|
||||||
|
borderBottom: "1px solid #222",
|
||||||
|
fontSize: "0.85em",
|
||||||
|
color: "#ccc",
|
||||||
|
whiteSpace: "nowrap",
|
||||||
|
};
|
||||||
|
|
||||||
|
return (
|
||||||
|
<div
|
||||||
|
style={{
|
||||||
|
height: "100%",
|
||||||
|
overflowY: "auto",
|
||||||
|
background: "#111",
|
||||||
|
padding: "24px",
|
||||||
|
fontFamily: "monospace",
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
<h2
|
||||||
|
style={{
|
||||||
|
color: "#ececec",
|
||||||
|
margin: "0 0 20px",
|
||||||
|
fontSize: "1.1em",
|
||||||
|
fontWeight: "700",
|
||||||
|
letterSpacing: "0.04em",
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
Token Usage
|
||||||
|
</h2>
|
||||||
|
|
||||||
|
{/* Summary totals */}
|
||||||
|
<div
|
||||||
|
style={{
|
||||||
|
display: "flex",
|
||||||
|
gap: "16px",
|
||||||
|
flexWrap: "wrap",
|
||||||
|
marginBottom: "24px",
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
<SummaryCard
|
||||||
|
label="Total Cost"
|
||||||
|
value={formatCost(totalCost)}
|
||||||
|
highlight
|
||||||
|
/>
|
||||||
|
{Object.entries(byAgentType)
|
||||||
|
.sort(([a], [b]) => a.localeCompare(b))
|
||||||
|
.map(([type, cost]) => (
|
||||||
|
<SummaryCard
|
||||||
|
key={type}
|
||||||
|
label={`${type.charAt(0).toUpperCase()}${type.slice(1)}`}
|
||||||
|
value={formatCost(cost)}
|
||||||
|
/>
|
||||||
|
))}
|
||||||
|
{Object.entries(byModel)
|
||||||
|
.sort(([, a], [, b]) => b - a)
|
||||||
|
.map(([model, cost]) => (
|
||||||
|
<SummaryCard key={model} label={model} value={formatCost(cost)} />
|
||||||
|
))}
|
||||||
|
</div>
|
||||||
|
|
||||||
|
{loading && (
|
||||||
|
<p style={{ color: "#555", fontSize: "0.9em" }}>Loading...</p>
|
||||||
|
)}
|
||||||
|
{error && <p style={{ color: "#e05c5c", fontSize: "0.9em" }}>{error}</p>}
|
||||||
|
|
||||||
|
{!loading && !error && records.length === 0 && (
|
||||||
|
<p style={{ color: "#555", fontSize: "0.9em" }}>
|
||||||
|
No token usage records found.
|
||||||
|
</p>
|
||||||
|
)}
|
||||||
|
|
||||||
|
{!loading && !error && records.length > 0 && (
|
||||||
|
<div style={{ overflowX: "auto" }}>
|
||||||
|
<table
|
||||||
|
style={{
|
||||||
|
width: "100%",
|
||||||
|
borderCollapse: "collapse",
|
||||||
|
fontSize: "0.9em",
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
<thead>
|
||||||
|
<tr style={{ background: "#1a1a1a" }}>
|
||||||
|
<SortHeader
|
||||||
|
label="Date"
|
||||||
|
sortKey="timestamp"
|
||||||
|
current={sortKey}
|
||||||
|
dir={sortDir}
|
||||||
|
onSort={handleSort}
|
||||||
|
/>
|
||||||
|
<SortHeader
|
||||||
|
label="Story"
|
||||||
|
sortKey="story_id"
|
||||||
|
current={sortKey}
|
||||||
|
dir={sortDir}
|
||||||
|
onSort={handleSort}
|
||||||
|
/>
|
||||||
|
<SortHeader
|
||||||
|
label="Agent"
|
||||||
|
sortKey="agent_name"
|
||||||
|
current={sortKey}
|
||||||
|
dir={sortDir}
|
||||||
|
onSort={handleSort}
|
||||||
|
/>
|
||||||
|
<SortHeader
|
||||||
|
label="Model"
|
||||||
|
sortKey="model"
|
||||||
|
current={sortKey}
|
||||||
|
dir={sortDir}
|
||||||
|
onSort={handleSort}
|
||||||
|
/>
|
||||||
|
<th
|
||||||
|
style={{
|
||||||
|
...cellStyle,
|
||||||
|
borderBottom: "1px solid #333",
|
||||||
|
textAlign: "right",
|
||||||
|
color: "#aaa",
|
||||||
|
fontSize: "0.8em",
|
||||||
|
letterSpacing: "0.05em",
|
||||||
|
textTransform: "uppercase",
|
||||||
|
fontWeight: "500",
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
Input
|
||||||
|
</th>
|
||||||
|
<th
|
||||||
|
style={{
|
||||||
|
...cellStyle,
|
||||||
|
borderBottom: "1px solid #333",
|
||||||
|
textAlign: "right",
|
||||||
|
color: "#aaa",
|
||||||
|
fontSize: "0.8em",
|
||||||
|
letterSpacing: "0.05em",
|
||||||
|
textTransform: "uppercase",
|
||||||
|
fontWeight: "500",
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
Cache+
|
||||||
|
</th>
|
||||||
|
<th
|
||||||
|
style={{
|
||||||
|
...cellStyle,
|
||||||
|
borderBottom: "1px solid #333",
|
||||||
|
textAlign: "right",
|
||||||
|
color: "#aaa",
|
||||||
|
fontSize: "0.8em",
|
||||||
|
letterSpacing: "0.05em",
|
||||||
|
textTransform: "uppercase",
|
||||||
|
fontWeight: "500",
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
Cache↩
|
||||||
|
</th>
|
||||||
|
<th
|
||||||
|
style={{
|
||||||
|
...cellStyle,
|
||||||
|
borderBottom: "1px solid #333",
|
||||||
|
textAlign: "right",
|
||||||
|
color: "#aaa",
|
||||||
|
fontSize: "0.8em",
|
||||||
|
letterSpacing: "0.05em",
|
||||||
|
textTransform: "uppercase",
|
||||||
|
fontWeight: "500",
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
Output
|
||||||
|
</th>
|
||||||
|
<SortHeader
|
||||||
|
label="Cost"
|
||||||
|
sortKey="total_cost_usd"
|
||||||
|
current={sortKey}
|
||||||
|
dir={sortDir}
|
||||||
|
onSort={handleSort}
|
||||||
|
align="right"
|
||||||
|
/>
|
||||||
|
</tr>
|
||||||
|
</thead>
|
||||||
|
<tbody>
|
||||||
|
{sorted.map((r, i) => (
|
||||||
|
<tr
|
||||||
|
key={`${r.story_id}-${r.agent_name}-${r.timestamp}`}
|
||||||
|
style={{ background: i % 2 === 0 ? "#111" : "#161616" }}
|
||||||
|
>
|
||||||
|
<td style={cellStyle}>{formatTimestamp(r.timestamp)}</td>
|
||||||
|
<td
|
||||||
|
style={{
|
||||||
|
...cellStyle,
|
||||||
|
color: "#8b9cf7",
|
||||||
|
maxWidth: "220px",
|
||||||
|
overflow: "hidden",
|
||||||
|
textOverflow: "ellipsis",
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
{r.story_id}
|
||||||
|
</td>
|
||||||
|
<td style={{ ...cellStyle, color: "#7ec8a4" }}>
|
||||||
|
{r.agent_name}
|
||||||
|
</td>
|
||||||
|
<td style={{ ...cellStyle, color: "#c9a96e" }}>
|
||||||
|
{r.model ?? "—"}
|
||||||
|
</td>
|
||||||
|
<td style={{ ...cellStyle, textAlign: "right" }}>
|
||||||
|
{formatTokens(r.input_tokens)}
|
||||||
|
</td>
|
||||||
|
<td style={{ ...cellStyle, textAlign: "right" }}>
|
||||||
|
{formatTokens(r.cache_creation_input_tokens)}
|
||||||
|
</td>
|
||||||
|
<td style={{ ...cellStyle, textAlign: "right" }}>
|
||||||
|
{formatTokens(r.cache_read_input_tokens)}
|
||||||
|
</td>
|
||||||
|
<td style={{ ...cellStyle, textAlign: "right" }}>
|
||||||
|
{formatTokens(r.output_tokens)}
|
||||||
|
</td>
|
||||||
|
<td
|
||||||
|
style={{
|
||||||
|
...cellStyle,
|
||||||
|
textAlign: "right",
|
||||||
|
color: "#e08c5c",
|
||||||
|
fontWeight: "600",
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
{formatCost(r.total_cost_usd)}
|
||||||
|
</td>
|
||||||
|
</tr>
|
||||||
|
))}
|
||||||
|
</tbody>
|
||||||
|
</table>
|
||||||
|
</div>
|
||||||
|
)}
|
||||||
|
</div>
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
function SummaryCard({
|
||||||
|
label,
|
||||||
|
value,
|
||||||
|
highlight = false,
|
||||||
|
}: {
|
||||||
|
label: string;
|
||||||
|
value: string;
|
||||||
|
highlight?: boolean;
|
||||||
|
}) {
|
||||||
|
return (
|
||||||
|
<div
|
||||||
|
style={{
|
||||||
|
background: highlight ? "#1e1e2e" : "#1a1a1a",
|
||||||
|
border: `1px solid ${highlight ? "#3a3a5a" : "#2a2a2a"}`,
|
||||||
|
borderRadius: "8px",
|
||||||
|
padding: "12px 16px",
|
||||||
|
minWidth: "120px",
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
<div
|
||||||
|
style={{
|
||||||
|
fontSize: "0.7em",
|
||||||
|
color: "#666",
|
||||||
|
textTransform: "uppercase",
|
||||||
|
letterSpacing: "0.07em",
|
||||||
|
marginBottom: "4px",
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
{label}
|
||||||
|
</div>
|
||||||
|
<div
|
||||||
|
style={{
|
||||||
|
fontSize: "1.1em",
|
||||||
|
fontWeight: "700",
|
||||||
|
color: highlight ? "#c9a96e" : "#ececec",
|
||||||
|
fontFamily: "monospace",
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
{value}
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
);
|
||||||
|
}
|
||||||
761
frontend/src/components/WorkItemDetailPanel.test.tsx
Normal file
761
frontend/src/components/WorkItemDetailPanel.test.tsx
Normal file
@@ -0,0 +1,761 @@
|
|||||||
|
import { act, render, screen, waitFor } from "@testing-library/react";
|
||||||
|
import { afterEach, beforeEach, describe, expect, it, vi } from "vitest";
|
||||||
|
import type { AgentEvent, AgentInfo } from "../api/agents";
|
||||||
|
import type { TestResultsResponse, TokenCostResponse } from "../api/client";
|
||||||
|
|
||||||
|
vi.mock("../api/client", async () => {
|
||||||
|
const actual =
|
||||||
|
await vi.importActual<typeof import("../api/client")>("../api/client");
|
||||||
|
return {
|
||||||
|
...actual,
|
||||||
|
api: {
|
||||||
|
...actual.api,
|
||||||
|
getWorkItemContent: vi.fn(),
|
||||||
|
getTestResults: vi.fn(),
|
||||||
|
getTokenCost: vi.fn(),
|
||||||
|
},
|
||||||
|
};
|
||||||
|
});
|
||||||
|
|
||||||
|
vi.mock("../api/agents", () => ({
|
||||||
|
agentsApi: {
|
||||||
|
listAgents: vi.fn(),
|
||||||
|
getAgentConfig: vi.fn(),
|
||||||
|
stopAgent: vi.fn(),
|
||||||
|
startAgent: vi.fn(),
|
||||||
|
},
|
||||||
|
subscribeAgentStream: vi.fn(() => () => {}),
|
||||||
|
}));
|
||||||
|
|
||||||
|
import { agentsApi, subscribeAgentStream } from "../api/agents";
|
||||||
|
import { api } from "../api/client";
|
||||||
|
|
||||||
|
const { WorkItemDetailPanel } = await import("./WorkItemDetailPanel");
|
||||||
|
|
||||||
|
const mockedGetWorkItemContent = vi.mocked(api.getWorkItemContent);
|
||||||
|
const mockedGetTestResults = vi.mocked(api.getTestResults);
|
||||||
|
const mockedGetTokenCost = vi.mocked(api.getTokenCost);
|
||||||
|
const mockedListAgents = vi.mocked(agentsApi.listAgents);
|
||||||
|
const mockedGetAgentConfig = vi.mocked(agentsApi.getAgentConfig);
|
||||||
|
const mockedSubscribeAgentStream = vi.mocked(subscribeAgentStream);
|
||||||
|
|
||||||
|
const DEFAULT_CONTENT = {
|
||||||
|
content: "# Big Title\n\nSome content here.",
|
||||||
|
stage: "current",
|
||||||
|
name: "Big Title Story",
|
||||||
|
agent: null,
|
||||||
|
};
|
||||||
|
|
||||||
|
const sampleTestResults: TestResultsResponse = {
|
||||||
|
unit: [
|
||||||
|
{ name: "test_add", status: "pass", details: null },
|
||||||
|
{ name: "test_subtract", status: "fail", details: "expected 3, got 4" },
|
||||||
|
],
|
||||||
|
integration: [{ name: "test_api_endpoint", status: "pass", details: null }],
|
||||||
|
};
|
||||||
|
|
||||||
|
beforeEach(() => {
|
||||||
|
vi.clearAllMocks();
|
||||||
|
mockedGetWorkItemContent.mockResolvedValue(DEFAULT_CONTENT);
|
||||||
|
mockedGetTestResults.mockResolvedValue(null);
|
||||||
|
mockedGetTokenCost.mockResolvedValue({ total_cost_usd: 0, agents: [] });
|
||||||
|
mockedListAgents.mockResolvedValue([]);
|
||||||
|
mockedGetAgentConfig.mockResolvedValue([]);
|
||||||
|
mockedSubscribeAgentStream.mockReturnValue(() => {});
|
||||||
|
});
|
||||||
|
|
||||||
|
afterEach(() => {
|
||||||
|
vi.restoreAllMocks();
|
||||||
|
});
|
||||||
|
|
||||||
|
describe("WorkItemDetailPanel", () => {
|
||||||
|
it("renders the story name in the header", async () => {
|
||||||
|
render(
|
||||||
|
<WorkItemDetailPanel
|
||||||
|
storyId="237_bug_test"
|
||||||
|
pipelineVersion={0}
|
||||||
|
onClose={() => {}}
|
||||||
|
/>,
|
||||||
|
);
|
||||||
|
await waitFor(() => {
|
||||||
|
expect(screen.getByTestId("detail-panel-title")).toHaveTextContent(
|
||||||
|
"Big Title Story",
|
||||||
|
);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
it("shows loading state initially", () => {
|
||||||
|
render(
|
||||||
|
<WorkItemDetailPanel
|
||||||
|
storyId="237_bug_test"
|
||||||
|
pipelineVersion={0}
|
||||||
|
onClose={() => {}}
|
||||||
|
/>,
|
||||||
|
);
|
||||||
|
expect(screen.getByTestId("detail-panel-loading")).toBeInTheDocument();
|
||||||
|
});
|
||||||
|
|
||||||
|
it("calls onClose when close button is clicked", async () => {
|
||||||
|
const onClose = vi.fn();
|
||||||
|
render(
|
||||||
|
<WorkItemDetailPanel
|
||||||
|
storyId="237_bug_test"
|
||||||
|
pipelineVersion={0}
|
||||||
|
onClose={onClose}
|
||||||
|
/>,
|
||||||
|
);
|
||||||
|
const closeButton = screen.getByTestId("detail-panel-close");
|
||||||
|
closeButton.click();
|
||||||
|
expect(onClose).toHaveBeenCalledTimes(1);
|
||||||
|
});
|
||||||
|
|
||||||
|
it("renders markdown headings with constrained inline font size", async () => {
|
||||||
|
render(
|
||||||
|
<WorkItemDetailPanel
|
||||||
|
storyId="237_bug_test"
|
||||||
|
pipelineVersion={0}
|
||||||
|
onClose={() => {}}
|
||||||
|
/>,
|
||||||
|
);
|
||||||
|
await waitFor(() => {
|
||||||
|
const content = screen.getByTestId("detail-panel-content");
|
||||||
|
const h1 = content.querySelector("h1");
|
||||||
|
expect(h1).not.toBeNull();
|
||||||
|
expect(h1?.style.fontSize).toBeTruthy();
|
||||||
|
});
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe("WorkItemDetailPanel - Agent Logs", () => {
|
||||||
|
it("shows placeholder when no agent is assigned to the story", async () => {
|
||||||
|
render(
|
||||||
|
<WorkItemDetailPanel
|
||||||
|
storyId="42_story_test"
|
||||||
|
pipelineVersion={0}
|
||||||
|
onClose={() => {}}
|
||||||
|
/>,
|
||||||
|
);
|
||||||
|
await screen.findByTestId("detail-panel-content");
|
||||||
|
const placeholder = screen.getByTestId("placeholder-agent-logs");
|
||||||
|
expect(placeholder).toBeInTheDocument();
|
||||||
|
expect(placeholder).toHaveTextContent("Coming soon");
|
||||||
|
});
|
||||||
|
|
||||||
|
it("shows agent name and running status when agent is running", async () => {
|
||||||
|
const agentList: AgentInfo[] = [
|
||||||
|
{
|
||||||
|
story_id: "42_story_test",
|
||||||
|
agent_name: "coder-1",
|
||||||
|
status: "running",
|
||||||
|
session_id: null,
|
||||||
|
worktree_path: "/tmp/wt",
|
||||||
|
base_branch: "master",
|
||||||
|
log_session_id: null,
|
||||||
|
},
|
||||||
|
];
|
||||||
|
mockedListAgents.mockResolvedValue(agentList);
|
||||||
|
|
||||||
|
render(
|
||||||
|
<WorkItemDetailPanel
|
||||||
|
storyId="42_story_test"
|
||||||
|
pipelineVersion={0}
|
||||||
|
onClose={() => {}}
|
||||||
|
/>,
|
||||||
|
);
|
||||||
|
|
||||||
|
const statusBadge = await screen.findByTestId("agent-status-badge");
|
||||||
|
expect(statusBadge).toHaveTextContent("coder-1");
|
||||||
|
expect(statusBadge).toHaveTextContent("running");
|
||||||
|
});
|
||||||
|
|
||||||
|
it("shows log output when agent emits output events", async () => {
|
||||||
|
let emitEvent: ((e: AgentEvent) => void) | null = null;
|
||||||
|
mockedSubscribeAgentStream.mockImplementation(
|
||||||
|
(_storyId, _agentName, onEvent) => {
|
||||||
|
emitEvent = onEvent;
|
||||||
|
return () => {};
|
||||||
|
},
|
||||||
|
);
|
||||||
|
|
||||||
|
const agentList: AgentInfo[] = [
|
||||||
|
{
|
||||||
|
story_id: "42_story_test",
|
||||||
|
agent_name: "coder-1",
|
||||||
|
status: "running",
|
||||||
|
session_id: null,
|
||||||
|
worktree_path: "/tmp/wt",
|
||||||
|
base_branch: "master",
|
||||||
|
log_session_id: null,
|
||||||
|
},
|
||||||
|
];
|
||||||
|
mockedListAgents.mockResolvedValue(agentList);
|
||||||
|
|
||||||
|
render(
|
||||||
|
<WorkItemDetailPanel
|
||||||
|
storyId="42_story_test"
|
||||||
|
pipelineVersion={0}
|
||||||
|
onClose={() => {}}
|
||||||
|
/>,
|
||||||
|
);
|
||||||
|
|
||||||
|
await screen.findByTestId("agent-status-badge");
|
||||||
|
|
||||||
|
await act(async () => {
|
||||||
|
emitEvent?.({
|
||||||
|
type: "output",
|
||||||
|
story_id: "42_story_test",
|
||||||
|
agent_name: "coder-1",
|
||||||
|
text: "Writing tests...",
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
const logOutput = screen.getByTestId("agent-log-output");
|
||||||
|
expect(logOutput).toHaveTextContent("Writing tests...");
|
||||||
|
});
|
||||||
|
|
||||||
|
it("appends multiple output events to the log", async () => {
|
||||||
|
let emitEvent: ((e: AgentEvent) => void) | null = null;
|
||||||
|
mockedSubscribeAgentStream.mockImplementation(
|
||||||
|
(_storyId, _agentName, onEvent) => {
|
||||||
|
emitEvent = onEvent;
|
||||||
|
return () => {};
|
||||||
|
},
|
||||||
|
);
|
||||||
|
|
||||||
|
const agentList: AgentInfo[] = [
|
||||||
|
{
|
||||||
|
story_id: "42_story_test",
|
||||||
|
agent_name: "coder-1",
|
||||||
|
status: "running",
|
||||||
|
session_id: null,
|
||||||
|
worktree_path: "/tmp/wt",
|
||||||
|
base_branch: "master",
|
||||||
|
log_session_id: null,
|
||||||
|
},
|
||||||
|
];
|
||||||
|
mockedListAgents.mockResolvedValue(agentList);
|
||||||
|
|
||||||
|
render(
|
||||||
|
<WorkItemDetailPanel
|
||||||
|
storyId="42_story_test"
|
||||||
|
pipelineVersion={0}
|
||||||
|
onClose={() => {}}
|
||||||
|
/>,
|
||||||
|
);
|
||||||
|
|
||||||
|
await screen.findByTestId("agent-status-badge");
|
||||||
|
|
||||||
|
await act(async () => {
|
||||||
|
emitEvent?.({
|
||||||
|
type: "output",
|
||||||
|
story_id: "42_story_test",
|
||||||
|
agent_name: "coder-1",
|
||||||
|
text: "Line one\n",
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
await act(async () => {
|
||||||
|
emitEvent?.({
|
||||||
|
type: "output",
|
||||||
|
story_id: "42_story_test",
|
||||||
|
agent_name: "coder-1",
|
||||||
|
text: "Line two\n",
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
const logOutput = screen.getByTestId("agent-log-output");
|
||||||
|
expect(logOutput.textContent).toContain("Line one");
|
||||||
|
expect(logOutput.textContent).toContain("Line two");
|
||||||
|
});
|
||||||
|
|
||||||
|
it("updates status to completed after done event", async () => {
|
||||||
|
let emitEvent: ((e: AgentEvent) => void) | null = null;
|
||||||
|
mockedSubscribeAgentStream.mockImplementation(
|
||||||
|
(_storyId, _agentName, onEvent) => {
|
||||||
|
emitEvent = onEvent;
|
||||||
|
return () => {};
|
||||||
|
},
|
||||||
|
);
|
||||||
|
|
||||||
|
const agentList: AgentInfo[] = [
|
||||||
|
{
|
||||||
|
story_id: "42_story_test",
|
||||||
|
agent_name: "coder-1",
|
||||||
|
status: "running",
|
||||||
|
session_id: null,
|
||||||
|
worktree_path: "/tmp/wt",
|
||||||
|
base_branch: "master",
|
||||||
|
log_session_id: null,
|
||||||
|
},
|
||||||
|
];
|
||||||
|
mockedListAgents.mockResolvedValue(agentList);
|
||||||
|
|
||||||
|
render(
|
||||||
|
<WorkItemDetailPanel
|
||||||
|
storyId="42_story_test"
|
||||||
|
pipelineVersion={0}
|
||||||
|
onClose={() => {}}
|
||||||
|
/>,
|
||||||
|
);
|
||||||
|
|
||||||
|
await screen.findByTestId("agent-status-badge");
|
||||||
|
|
||||||
|
await act(async () => {
|
||||||
|
emitEvent?.({
|
||||||
|
type: "done",
|
||||||
|
story_id: "42_story_test",
|
||||||
|
agent_name: "coder-1",
|
||||||
|
session_id: "session-123",
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
const statusBadge = screen.getByTestId("agent-status-badge");
|
||||||
|
expect(statusBadge).toHaveTextContent("completed");
|
||||||
|
});
|
||||||
|
|
||||||
|
it("shows failed status after error event", async () => {
|
||||||
|
let emitEvent: ((e: AgentEvent) => void) | null = null;
|
||||||
|
mockedSubscribeAgentStream.mockImplementation(
|
||||||
|
(_storyId, _agentName, onEvent) => {
|
||||||
|
emitEvent = onEvent;
|
||||||
|
return () => {};
|
||||||
|
},
|
||||||
|
);
|
||||||
|
|
||||||
|
const agentList: AgentInfo[] = [
|
||||||
|
{
|
||||||
|
story_id: "42_story_test",
|
||||||
|
agent_name: "coder-1",
|
||||||
|
status: "running",
|
||||||
|
session_id: null,
|
||||||
|
worktree_path: "/tmp/wt",
|
||||||
|
base_branch: "master",
|
||||||
|
log_session_id: null,
|
||||||
|
},
|
||||||
|
];
|
||||||
|
mockedListAgents.mockResolvedValue(agentList);
|
||||||
|
|
||||||
|
render(
|
||||||
|
<WorkItemDetailPanel
|
||||||
|
storyId="42_story_test"
|
||||||
|
pipelineVersion={0}
|
||||||
|
onClose={() => {}}
|
||||||
|
/>,
|
||||||
|
);
|
||||||
|
|
||||||
|
await screen.findByTestId("agent-status-badge");
|
||||||
|
|
||||||
|
await act(async () => {
|
||||||
|
emitEvent?.({
|
||||||
|
type: "error",
|
||||||
|
story_id: "42_story_test",
|
||||||
|
agent_name: "coder-1",
|
||||||
|
message: "Process failed",
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
const statusBadge = screen.getByTestId("agent-status-badge");
|
||||||
|
expect(statusBadge).toHaveTextContent("failed");
|
||||||
|
|
||||||
|
const logOutput = screen.getByTestId("agent-log-output");
|
||||||
|
expect(logOutput.textContent).toContain("[ERROR] Process failed");
|
||||||
|
});
|
||||||
|
|
||||||
|
it("shows completed agent status without subscribing to stream", async () => {
|
||||||
|
const agentList: AgentInfo[] = [
|
||||||
|
{
|
||||||
|
story_id: "42_story_test",
|
||||||
|
agent_name: "coder-1",
|
||||||
|
status: "completed",
|
||||||
|
session_id: "session-123",
|
||||||
|
worktree_path: "/tmp/wt",
|
||||||
|
base_branch: "master",
|
||||||
|
log_session_id: null,
|
||||||
|
},
|
||||||
|
];
|
||||||
|
mockedListAgents.mockResolvedValue(agentList);
|
||||||
|
|
||||||
|
render(
|
||||||
|
<WorkItemDetailPanel
|
||||||
|
storyId="42_story_test"
|
||||||
|
pipelineVersion={0}
|
||||||
|
onClose={() => {}}
|
||||||
|
/>,
|
||||||
|
);
|
||||||
|
|
||||||
|
const statusBadge = await screen.findByTestId("agent-status-badge");
|
||||||
|
expect(statusBadge).toHaveTextContent("completed");
|
||||||
|
expect(mockedSubscribeAgentStream).not.toHaveBeenCalled();
|
||||||
|
});
|
||||||
|
|
||||||
|
it("shows failed agent status for a failed agent without subscribing to stream", async () => {
|
||||||
|
const agentList: AgentInfo[] = [
|
||||||
|
{
|
||||||
|
story_id: "42_story_test",
|
||||||
|
agent_name: "coder-1",
|
||||||
|
status: "failed",
|
||||||
|
session_id: null,
|
||||||
|
worktree_path: null,
|
||||||
|
base_branch: "master",
|
||||||
|
log_session_id: null,
|
||||||
|
},
|
||||||
|
];
|
||||||
|
mockedListAgents.mockResolvedValue(agentList);
|
||||||
|
|
||||||
|
render(
|
||||||
|
<WorkItemDetailPanel
|
||||||
|
storyId="42_story_test"
|
||||||
|
pipelineVersion={0}
|
||||||
|
onClose={() => {}}
|
||||||
|
/>,
|
||||||
|
);
|
||||||
|
|
||||||
|
const statusBadge = await screen.findByTestId("agent-status-badge");
|
||||||
|
expect(statusBadge).toHaveTextContent("failed");
|
||||||
|
expect(mockedSubscribeAgentStream).not.toHaveBeenCalled();
|
||||||
|
});
|
||||||
|
|
||||||
|
it("shows agent logs section (not placeholder) when agent is assigned", async () => {
|
||||||
|
const agentList: AgentInfo[] = [
|
||||||
|
{
|
||||||
|
story_id: "42_story_test",
|
||||||
|
agent_name: "coder-1",
|
||||||
|
status: "running",
|
||||||
|
session_id: null,
|
||||||
|
worktree_path: "/tmp/wt",
|
||||||
|
base_branch: "master",
|
||||||
|
log_session_id: null,
|
||||||
|
},
|
||||||
|
];
|
||||||
|
mockedListAgents.mockResolvedValue(agentList);
|
||||||
|
|
||||||
|
render(
|
||||||
|
<WorkItemDetailPanel
|
||||||
|
storyId="42_story_test"
|
||||||
|
pipelineVersion={0}
|
||||||
|
onClose={() => {}}
|
||||||
|
/>,
|
||||||
|
);
|
||||||
|
|
||||||
|
await screen.findByTestId("agent-logs-section");
|
||||||
|
|
||||||
|
expect(
|
||||||
|
screen.queryByTestId("placeholder-agent-logs"),
|
||||||
|
).not.toBeInTheDocument();
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe("WorkItemDetailPanel - Assigned Agent", () => {
|
||||||
|
it("shows assigned agent name when agent front matter field is set", async () => {
|
||||||
|
mockedGetWorkItemContent.mockResolvedValue({
|
||||||
|
...DEFAULT_CONTENT,
|
||||||
|
agent: "coder-opus",
|
||||||
|
});
|
||||||
|
|
||||||
|
render(
|
||||||
|
<WorkItemDetailPanel
|
||||||
|
storyId="271_story_test"
|
||||||
|
pipelineVersion={0}
|
||||||
|
onClose={() => {}}
|
||||||
|
/>,
|
||||||
|
);
|
||||||
|
|
||||||
|
const agentEl = await screen.findByTestId("detail-panel-assigned-agent");
|
||||||
|
expect(agentEl).toHaveTextContent("coder-opus");
|
||||||
|
});
|
||||||
|
|
||||||
|
it("omits assigned agent field when no agent is set in front matter", async () => {
|
||||||
|
render(
|
||||||
|
<WorkItemDetailPanel
|
||||||
|
storyId="271_story_test"
|
||||||
|
pipelineVersion={0}
|
||||||
|
onClose={() => {}}
|
||||||
|
/>,
|
||||||
|
);
|
||||||
|
|
||||||
|
await screen.findByTestId("detail-panel-content");
|
||||||
|
expect(
|
||||||
|
screen.queryByTestId("detail-panel-assigned-agent"),
|
||||||
|
).not.toBeInTheDocument();
|
||||||
|
});
|
||||||
|
|
||||||
|
it("shows the specific agent name not just 'assigned'", async () => {
|
||||||
|
mockedGetWorkItemContent.mockResolvedValue({
|
||||||
|
...DEFAULT_CONTENT,
|
||||||
|
agent: "coder-haiku",
|
||||||
|
});
|
||||||
|
|
||||||
|
render(
|
||||||
|
<WorkItemDetailPanel
|
||||||
|
storyId="271_story_test"
|
||||||
|
pipelineVersion={0}
|
||||||
|
onClose={() => {}}
|
||||||
|
/>,
|
||||||
|
);
|
||||||
|
|
||||||
|
const agentEl = await screen.findByTestId("detail-panel-assigned-agent");
|
||||||
|
expect(agentEl).toHaveTextContent("coder-haiku");
|
||||||
|
expect(agentEl).not.toHaveTextContent("assigned");
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe("WorkItemDetailPanel - Test Results", () => {
|
||||||
|
it("shows empty test results message when no results exist", async () => {
|
||||||
|
mockedGetTestResults.mockResolvedValue(null);
|
||||||
|
|
||||||
|
render(
|
||||||
|
<WorkItemDetailPanel
|
||||||
|
storyId="42_story_foo"
|
||||||
|
pipelineVersion={0}
|
||||||
|
onClose={() => {}}
|
||||||
|
/>,
|
||||||
|
);
|
||||||
|
|
||||||
|
await waitFor(() => {
|
||||||
|
expect(screen.getByTestId("test-results-empty")).toBeInTheDocument();
|
||||||
|
});
|
||||||
|
expect(screen.getByText("No test results recorded")).toBeInTheDocument();
|
||||||
|
});
|
||||||
|
|
||||||
|
it("shows unit and integration test results when available", async () => {
|
||||||
|
mockedGetTestResults.mockResolvedValue(sampleTestResults);
|
||||||
|
|
||||||
|
render(
|
||||||
|
<WorkItemDetailPanel
|
||||||
|
storyId="42_story_foo"
|
||||||
|
pipelineVersion={0}
|
||||||
|
onClose={() => {}}
|
||||||
|
/>,
|
||||||
|
);
|
||||||
|
|
||||||
|
await waitFor(() => {
|
||||||
|
expect(screen.getByTestId("test-results-content")).toBeInTheDocument();
|
||||||
|
});
|
||||||
|
|
||||||
|
// Unit test section
|
||||||
|
expect(screen.getByTestId("test-section-unit")).toBeInTheDocument();
|
||||||
|
expect(
|
||||||
|
screen.getByText("Unit Tests (1 passed, 1 failed)"),
|
||||||
|
).toBeInTheDocument();
|
||||||
|
|
||||||
|
// Integration test section
|
||||||
|
expect(screen.getByTestId("test-section-integration")).toBeInTheDocument();
|
||||||
|
expect(
|
||||||
|
screen.getByText("Integration Tests (1 passed, 0 failed)"),
|
||||||
|
).toBeInTheDocument();
|
||||||
|
});
|
||||||
|
|
||||||
|
it("shows pass/fail status and details for each test", async () => {
|
||||||
|
mockedGetTestResults.mockResolvedValue(sampleTestResults);
|
||||||
|
|
||||||
|
render(
|
||||||
|
<WorkItemDetailPanel
|
||||||
|
storyId="42_story_foo"
|
||||||
|
pipelineVersion={0}
|
||||||
|
onClose={() => {}}
|
||||||
|
/>,
|
||||||
|
);
|
||||||
|
|
||||||
|
await waitFor(() => {
|
||||||
|
expect(screen.getByTestId("test-case-test_add")).toBeInTheDocument();
|
||||||
|
});
|
||||||
|
|
||||||
|
// Passing test
|
||||||
|
expect(screen.getByTestId("test-status-test_add")).toHaveTextContent(
|
||||||
|
"PASS",
|
||||||
|
);
|
||||||
|
expect(screen.getByText("test_add")).toBeInTheDocument();
|
||||||
|
|
||||||
|
// Failing test with details
|
||||||
|
expect(screen.getByTestId("test-status-test_subtract")).toHaveTextContent(
|
||||||
|
"FAIL",
|
||||||
|
);
|
||||||
|
expect(screen.getByText("test_subtract")).toBeInTheDocument();
|
||||||
|
expect(screen.getByTestId("test-details-test_subtract")).toHaveTextContent(
|
||||||
|
"expected 3, got 4",
|
||||||
|
);
|
||||||
|
|
||||||
|
// Integration test
|
||||||
|
expect(
|
||||||
|
screen.getByTestId("test-status-test_api_endpoint"),
|
||||||
|
).toHaveTextContent("PASS");
|
||||||
|
});
|
||||||
|
|
||||||
|
it("re-fetches test results when pipelineVersion changes", async () => {
|
||||||
|
mockedGetTestResults.mockResolvedValue(null);
|
||||||
|
|
||||||
|
const { rerender } = render(
|
||||||
|
<WorkItemDetailPanel
|
||||||
|
storyId="42_story_foo"
|
||||||
|
pipelineVersion={0}
|
||||||
|
onClose={() => {}}
|
||||||
|
/>,
|
||||||
|
);
|
||||||
|
|
||||||
|
await waitFor(() => {
|
||||||
|
expect(mockedGetTestResults).toHaveBeenCalledTimes(1);
|
||||||
|
});
|
||||||
|
|
||||||
|
// Update with new results and bump pipelineVersion.
|
||||||
|
mockedGetTestResults.mockResolvedValue(sampleTestResults);
|
||||||
|
|
||||||
|
rerender(
|
||||||
|
<WorkItemDetailPanel
|
||||||
|
storyId="42_story_foo"
|
||||||
|
pipelineVersion={1}
|
||||||
|
onClose={() => {}}
|
||||||
|
/>,
|
||||||
|
);
|
||||||
|
|
||||||
|
await waitFor(() => {
|
||||||
|
expect(mockedGetTestResults).toHaveBeenCalledTimes(2);
|
||||||
|
});
|
||||||
|
|
||||||
|
await waitFor(() => {
|
||||||
|
expect(screen.getByTestId("test-results-content")).toBeInTheDocument();
|
||||||
|
});
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe("WorkItemDetailPanel - Token Cost", () => {
|
||||||
|
const sampleTokenCost: TokenCostResponse = {
|
||||||
|
total_cost_usd: 0.012345,
|
||||||
|
agents: [
|
||||||
|
{
|
||||||
|
agent_name: "coder-1",
|
||||||
|
model: "claude-sonnet-4-6",
|
||||||
|
input_tokens: 1000,
|
||||||
|
output_tokens: 500,
|
||||||
|
cache_creation_input_tokens: 200,
|
||||||
|
cache_read_input_tokens: 100,
|
||||||
|
total_cost_usd: 0.009,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
agent_name: "coder-2",
|
||||||
|
model: null,
|
||||||
|
input_tokens: 800,
|
||||||
|
output_tokens: 300,
|
||||||
|
cache_creation_input_tokens: 0,
|
||||||
|
cache_read_input_tokens: 0,
|
||||||
|
total_cost_usd: 0.003345,
|
||||||
|
},
|
||||||
|
],
|
||||||
|
};
|
||||||
|
|
||||||
|
it("shows empty state when no token data exists", async () => {
|
||||||
|
mockedGetTokenCost.mockResolvedValue({ total_cost_usd: 0, agents: [] });
|
||||||
|
|
||||||
|
render(
|
||||||
|
<WorkItemDetailPanel
|
||||||
|
storyId="42_story_foo"
|
||||||
|
pipelineVersion={0}
|
||||||
|
onClose={() => {}}
|
||||||
|
/>,
|
||||||
|
);
|
||||||
|
|
||||||
|
await waitFor(() => {
|
||||||
|
expect(screen.getByTestId("token-cost-empty")).toBeInTheDocument();
|
||||||
|
});
|
||||||
|
expect(screen.getByText("No token data recorded")).toBeInTheDocument();
|
||||||
|
});
|
||||||
|
|
||||||
|
it("shows per-agent breakdown and total cost when data exists", async () => {
|
||||||
|
mockedGetTokenCost.mockResolvedValue(sampleTokenCost);
|
||||||
|
|
||||||
|
render(
|
||||||
|
<WorkItemDetailPanel
|
||||||
|
storyId="42_story_foo"
|
||||||
|
pipelineVersion={0}
|
||||||
|
onClose={() => {}}
|
||||||
|
/>,
|
||||||
|
);
|
||||||
|
|
||||||
|
await waitFor(() => {
|
||||||
|
expect(screen.getByTestId("token-cost-content")).toBeInTheDocument();
|
||||||
|
});
|
||||||
|
|
||||||
|
expect(screen.getByTestId("token-cost-total")).toHaveTextContent(
|
||||||
|
"$0.012345",
|
||||||
|
);
|
||||||
|
expect(screen.getByTestId("token-cost-agent-coder-1")).toBeInTheDocument();
|
||||||
|
expect(screen.getByTestId("token-cost-agent-coder-2")).toBeInTheDocument();
|
||||||
|
});
|
||||||
|
|
||||||
|
it("shows agent name and model when model is present", async () => {
|
||||||
|
mockedGetTokenCost.mockResolvedValue(sampleTokenCost);
|
||||||
|
|
||||||
|
render(
|
||||||
|
<WorkItemDetailPanel
|
||||||
|
storyId="42_story_foo"
|
||||||
|
pipelineVersion={0}
|
||||||
|
onClose={() => {}}
|
||||||
|
/>,
|
||||||
|
);
|
||||||
|
|
||||||
|
await waitFor(() => {
|
||||||
|
expect(
|
||||||
|
screen.getByTestId("token-cost-agent-coder-1"),
|
||||||
|
).toBeInTheDocument();
|
||||||
|
});
|
||||||
|
|
||||||
|
const agentRow = screen.getByTestId("token-cost-agent-coder-1");
|
||||||
|
expect(agentRow).toHaveTextContent("coder-1");
|
||||||
|
expect(agentRow).toHaveTextContent("claude-sonnet-4-6");
|
||||||
|
});
|
||||||
|
|
||||||
|
it("shows agent name without model when model is null", async () => {
|
||||||
|
mockedGetTokenCost.mockResolvedValue(sampleTokenCost);
|
||||||
|
|
||||||
|
render(
|
||||||
|
<WorkItemDetailPanel
|
||||||
|
storyId="42_story_foo"
|
||||||
|
pipelineVersion={0}
|
||||||
|
onClose={() => {}}
|
||||||
|
/>,
|
||||||
|
);
|
||||||
|
|
||||||
|
await waitFor(() => {
|
||||||
|
expect(
|
||||||
|
screen.getByTestId("token-cost-agent-coder-2"),
|
||||||
|
).toBeInTheDocument();
|
||||||
|
});
|
||||||
|
|
||||||
|
const agentRow = screen.getByTestId("token-cost-agent-coder-2");
|
||||||
|
expect(agentRow).toHaveTextContent("coder-2");
|
||||||
|
expect(agentRow).not.toHaveTextContent("null");
|
||||||
|
});
|
||||||
|
|
||||||
|
it("re-fetches token cost when pipelineVersion changes", async () => {
|
||||||
|
mockedGetTokenCost.mockResolvedValue({ total_cost_usd: 0, agents: [] });
|
||||||
|
|
||||||
|
const { rerender } = render(
|
||||||
|
<WorkItemDetailPanel
|
||||||
|
storyId="42_story_foo"
|
||||||
|
pipelineVersion={0}
|
||||||
|
onClose={() => {}}
|
||||||
|
/>,
|
||||||
|
);
|
||||||
|
|
||||||
|
await waitFor(() => {
|
||||||
|
expect(mockedGetTokenCost).toHaveBeenCalledTimes(1);
|
||||||
|
});
|
||||||
|
|
||||||
|
mockedGetTokenCost.mockResolvedValue(sampleTokenCost);
|
||||||
|
|
||||||
|
rerender(
|
||||||
|
<WorkItemDetailPanel
|
||||||
|
storyId="42_story_foo"
|
||||||
|
pipelineVersion={1}
|
||||||
|
onClose={() => {}}
|
||||||
|
/>,
|
||||||
|
);
|
||||||
|
|
||||||
|
await waitFor(() => {
|
||||||
|
expect(mockedGetTokenCost).toHaveBeenCalledTimes(2);
|
||||||
|
});
|
||||||
|
|
||||||
|
await waitFor(() => {
|
||||||
|
expect(screen.getByTestId("token-cost-content")).toBeInTheDocument();
|
||||||
|
});
|
||||||
|
});
|
||||||
|
});
|
||||||
787
frontend/src/components/WorkItemDetailPanel.tsx
Normal file
787
frontend/src/components/WorkItemDetailPanel.tsx
Normal file
@@ -0,0 +1,787 @@
|
|||||||
|
import * as React from "react";
|
||||||
|
import Markdown from "react-markdown";
|
||||||
|
import type {
|
||||||
|
AgentConfigInfo,
|
||||||
|
AgentEvent,
|
||||||
|
AgentInfo,
|
||||||
|
AgentStatusValue,
|
||||||
|
} from "../api/agents";
|
||||||
|
import { agentsApi, subscribeAgentStream } from "../api/agents";
|
||||||
|
import type {
|
||||||
|
AgentCostEntry,
|
||||||
|
TestCaseResult,
|
||||||
|
TestResultsResponse,
|
||||||
|
TokenCostResponse,
|
||||||
|
} from "../api/client";
|
||||||
|
import { api } from "../api/client";
|
||||||
|
|
||||||
|
const { useCallback, useEffect, useRef, useState } = React;
|
||||||
|
|
||||||
|
const STAGE_LABELS: Record<string, string> = {
|
||||||
|
backlog: "Backlog",
|
||||||
|
current: "Current",
|
||||||
|
qa: "QA",
|
||||||
|
merge: "To Merge",
|
||||||
|
done: "Done",
|
||||||
|
archived: "Archived",
|
||||||
|
};
|
||||||
|
|
||||||
|
const STATUS_COLORS: Record<AgentStatusValue, string> = {
|
||||||
|
running: "#3fb950",
|
||||||
|
pending: "#e3b341",
|
||||||
|
completed: "#aaa",
|
||||||
|
failed: "#f85149",
|
||||||
|
};
|
||||||
|
|
||||||
|
interface WorkItemDetailPanelProps {
|
||||||
|
storyId: string;
|
||||||
|
pipelineVersion: number;
|
||||||
|
onClose: () => void;
|
||||||
|
/** True when the item is in QA and awaiting human review. */
|
||||||
|
reviewHold?: boolean;
|
||||||
|
}
|
||||||
|
|
||||||
|
function TestCaseRow({ tc }: { tc: TestCaseResult }) {
|
||||||
|
const isPassing = tc.status === "pass";
|
||||||
|
return (
|
||||||
|
<div
|
||||||
|
data-testid={`test-case-${tc.name}`}
|
||||||
|
style={{
|
||||||
|
display: "flex",
|
||||||
|
flexDirection: "column",
|
||||||
|
gap: "2px",
|
||||||
|
padding: "4px 0",
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
<div style={{ display: "flex", alignItems: "center", gap: "6px" }}>
|
||||||
|
<span
|
||||||
|
data-testid={`test-status-${tc.name}`}
|
||||||
|
style={{
|
||||||
|
fontSize: "0.85em",
|
||||||
|
color: isPassing ? "#3fb950" : "#f85149",
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
{isPassing ? "PASS" : "FAIL"}
|
||||||
|
</span>
|
||||||
|
<span style={{ fontSize: "0.82em", color: "#ccc" }}>{tc.name}</span>
|
||||||
|
</div>
|
||||||
|
{tc.details && (
|
||||||
|
<div
|
||||||
|
data-testid={`test-details-${tc.name}`}
|
||||||
|
style={{
|
||||||
|
fontSize: "0.75em",
|
||||||
|
color: "#888",
|
||||||
|
paddingLeft: "22px",
|
||||||
|
whiteSpace: "pre-wrap",
|
||||||
|
wordBreak: "break-word",
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
{tc.details}
|
||||||
|
</div>
|
||||||
|
)}
|
||||||
|
</div>
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
function TestSection({
|
||||||
|
title,
|
||||||
|
tests,
|
||||||
|
testId,
|
||||||
|
}: {
|
||||||
|
title: string;
|
||||||
|
tests: TestCaseResult[];
|
||||||
|
testId: string;
|
||||||
|
}) {
|
||||||
|
const passCount = tests.filter((t) => t.status === "pass").length;
|
||||||
|
const failCount = tests.length - passCount;
|
||||||
|
return (
|
||||||
|
<div data-testid={testId}>
|
||||||
|
<div
|
||||||
|
style={{
|
||||||
|
fontSize: "0.78em",
|
||||||
|
fontWeight: 600,
|
||||||
|
color: "#aaa",
|
||||||
|
marginBottom: "6px",
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
{title} ({passCount} passed, {failCount} failed)
|
||||||
|
</div>
|
||||||
|
{tests.length === 0 ? (
|
||||||
|
<div style={{ fontSize: "0.75em", color: "#555", fontStyle: "italic" }}>
|
||||||
|
No tests recorded
|
||||||
|
</div>
|
||||||
|
) : (
|
||||||
|
tests.map((tc) => <TestCaseRow key={tc.name} tc={tc} />)
|
||||||
|
)}
|
||||||
|
</div>
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
export function WorkItemDetailPanel({
|
||||||
|
storyId,
|
||||||
|
pipelineVersion,
|
||||||
|
onClose,
|
||||||
|
reviewHold: _reviewHold,
|
||||||
|
}: WorkItemDetailPanelProps) {
|
||||||
|
const [content, setContent] = useState<string | null>(null);
|
||||||
|
const [stage, setStage] = useState<string>("");
|
||||||
|
const [name, setName] = useState<string | null>(null);
|
||||||
|
const [assignedAgent, setAssignedAgent] = useState<string | null>(null);
|
||||||
|
const [loading, setLoading] = useState(true);
|
||||||
|
const [error, setError] = useState<string | null>(null);
|
||||||
|
const [agentInfo, setAgentInfo] = useState<AgentInfo | null>(null);
|
||||||
|
const [agentLog, setAgentLog] = useState<string[]>([]);
|
||||||
|
const [agentStatus, setAgentStatus] = useState<AgentStatusValue | null>(null);
|
||||||
|
const [testResults, setTestResults] = useState<TestResultsResponse | null>(
|
||||||
|
null,
|
||||||
|
);
|
||||||
|
const [tokenCost, setTokenCost] = useState<TokenCostResponse | null>(null);
|
||||||
|
const [agentConfig, setAgentConfig] = useState<AgentConfigInfo[]>([]);
|
||||||
|
const [assigning, setAssigning] = useState(false);
|
||||||
|
const [assignError, setAssignError] = useState<string | null>(null);
|
||||||
|
const panelRef = useRef<HTMLDivElement>(null);
|
||||||
|
const cleanupRef = useRef<(() => void) | null>(null);
|
||||||
|
|
||||||
|
useEffect(() => {
|
||||||
|
setLoading(true);
|
||||||
|
setError(null);
|
||||||
|
api
|
||||||
|
.getWorkItemContent(storyId)
|
||||||
|
.then((data) => {
|
||||||
|
setContent(data.content);
|
||||||
|
setStage(data.stage);
|
||||||
|
setName(data.name);
|
||||||
|
setAssignedAgent(data.agent);
|
||||||
|
})
|
||||||
|
.catch((err: unknown) => {
|
||||||
|
setError(err instanceof Error ? err.message : "Failed to load content");
|
||||||
|
})
|
||||||
|
.finally(() => {
|
||||||
|
setLoading(false);
|
||||||
|
});
|
||||||
|
}, [storyId]);
|
||||||
|
|
||||||
|
// Fetch test results on mount and when pipeline updates arrive.
|
||||||
|
useEffect(() => {
|
||||||
|
api
|
||||||
|
.getTestResults(storyId)
|
||||||
|
.then((data) => {
|
||||||
|
setTestResults(data);
|
||||||
|
})
|
||||||
|
.catch(() => {
|
||||||
|
// Silently ignore — test results may not exist yet.
|
||||||
|
});
|
||||||
|
}, [storyId, pipelineVersion]);
|
||||||
|
|
||||||
|
// Fetch token cost on mount and when pipeline updates arrive.
|
||||||
|
useEffect(() => {
|
||||||
|
api
|
||||||
|
.getTokenCost(storyId)
|
||||||
|
.then((data) => {
|
||||||
|
setTokenCost(data);
|
||||||
|
})
|
||||||
|
.catch(() => {
|
||||||
|
// Silently ignore — token cost may not exist yet.
|
||||||
|
});
|
||||||
|
}, [storyId, pipelineVersion]);
|
||||||
|
|
||||||
|
useEffect(() => {
|
||||||
|
cleanupRef.current?.();
|
||||||
|
cleanupRef.current = null;
|
||||||
|
setAgentInfo(null);
|
||||||
|
setAgentLog([]);
|
||||||
|
setAgentStatus(null);
|
||||||
|
|
||||||
|
agentsApi
|
||||||
|
.listAgents()
|
||||||
|
.then((agents) => {
|
||||||
|
const agent = agents.find((a) => a.story_id === storyId);
|
||||||
|
if (!agent) return;
|
||||||
|
setAgentInfo(agent);
|
||||||
|
setAgentStatus(agent.status);
|
||||||
|
|
||||||
|
if (agent.status === "running" || agent.status === "pending") {
|
||||||
|
const cleanup = subscribeAgentStream(
|
||||||
|
storyId,
|
||||||
|
agent.agent_name,
|
||||||
|
(event: AgentEvent) => {
|
||||||
|
switch (event.type) {
|
||||||
|
case "status":
|
||||||
|
setAgentStatus((event.status as AgentStatusValue) ?? null);
|
||||||
|
break;
|
||||||
|
case "output":
|
||||||
|
setAgentLog((prev) => [...prev, event.text ?? ""]);
|
||||||
|
break;
|
||||||
|
case "done":
|
||||||
|
setAgentStatus("completed");
|
||||||
|
break;
|
||||||
|
case "error":
|
||||||
|
setAgentStatus("failed");
|
||||||
|
setAgentLog((prev) => [
|
||||||
|
...prev,
|
||||||
|
`[ERROR] ${event.message ?? "Unknown error"}`,
|
||||||
|
]);
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
},
|
||||||
|
);
|
||||||
|
cleanupRef.current = cleanup;
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.catch((err: unknown) => {
|
||||||
|
console.error("Failed to load agents:", err);
|
||||||
|
});
|
||||||
|
|
||||||
|
return () => {
|
||||||
|
cleanupRef.current?.();
|
||||||
|
cleanupRef.current = null;
|
||||||
|
};
|
||||||
|
}, [storyId]);
|
||||||
|
|
||||||
|
useEffect(() => {
|
||||||
|
const handleKeyDown = (e: KeyboardEvent) => {
|
||||||
|
if (e.key === "Escape") {
|
||||||
|
onClose();
|
||||||
|
}
|
||||||
|
};
|
||||||
|
window.addEventListener("keydown", handleKeyDown);
|
||||||
|
return () => window.removeEventListener("keydown", handleKeyDown);
|
||||||
|
}, [onClose]);
|
||||||
|
|
||||||
|
// Load agent config roster for the dropdown.
|
||||||
|
useEffect(() => {
|
||||||
|
agentsApi
|
||||||
|
.getAgentConfig()
|
||||||
|
.then((config) => {
|
||||||
|
setAgentConfig(config);
|
||||||
|
})
|
||||||
|
.catch((err: unknown) => {
|
||||||
|
console.error("Failed to load agent config:", err);
|
||||||
|
});
|
||||||
|
}, []);
|
||||||
|
|
||||||
|
// Map pipeline stage → agent stage filter.
|
||||||
|
const STAGE_TO_AGENT_STAGE: Record<string, string> = {
|
||||||
|
current: "coder",
|
||||||
|
qa: "qa",
|
||||||
|
merge: "mergemaster",
|
||||||
|
};
|
||||||
|
|
||||||
|
const filteredAgents = agentConfig.filter(
|
||||||
|
(a) => a.stage === STAGE_TO_AGENT_STAGE[stage],
|
||||||
|
);
|
||||||
|
|
||||||
|
// The currently active agent name for this story (running or pending).
|
||||||
|
const activeAgentName =
|
||||||
|
agentInfo && (agentStatus === "running" || agentStatus === "pending")
|
||||||
|
? agentInfo.agent_name
|
||||||
|
: null;
|
||||||
|
|
||||||
|
const handleAgentAssign = useCallback(
|
||||||
|
async (selectedAgentName: string) => {
|
||||||
|
setAssigning(true);
|
||||||
|
setAssignError(null);
|
||||||
|
try {
|
||||||
|
// Stop current running agent if there is one.
|
||||||
|
if (activeAgentName) {
|
||||||
|
await agentsApi.stopAgent(storyId, activeAgentName);
|
||||||
|
}
|
||||||
|
// Start the new agent (or skip if "none" selected).
|
||||||
|
if (selectedAgentName) {
|
||||||
|
await agentsApi.startAgent(storyId, selectedAgentName);
|
||||||
|
}
|
||||||
|
} catch (err: unknown) {
|
||||||
|
setAssignError(
|
||||||
|
err instanceof Error ? err.message : "Failed to assign agent",
|
||||||
|
);
|
||||||
|
} finally {
|
||||||
|
setAssigning(false);
|
||||||
|
}
|
||||||
|
},
|
||||||
|
[storyId, activeAgentName],
|
||||||
|
);
|
||||||
|
|
||||||
|
const stageLabel = STAGE_LABELS[stage] ?? stage;
|
||||||
|
const hasTestResults =
|
||||||
|
testResults &&
|
||||||
|
(testResults.unit.length > 0 || testResults.integration.length > 0);
|
||||||
|
|
||||||
|
return (
|
||||||
|
<div
|
||||||
|
data-testid="work-item-detail-panel"
|
||||||
|
ref={panelRef}
|
||||||
|
style={{
|
||||||
|
display: "flex",
|
||||||
|
flexDirection: "column",
|
||||||
|
height: "100%",
|
||||||
|
overflow: "hidden",
|
||||||
|
background: "#1a1a1a",
|
||||||
|
borderRadius: "8px",
|
||||||
|
border: "1px solid #333",
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
{/* Header */}
|
||||||
|
<div
|
||||||
|
style={{
|
||||||
|
display: "flex",
|
||||||
|
alignItems: "center",
|
||||||
|
justifyContent: "space-between",
|
||||||
|
padding: "12px 16px",
|
||||||
|
borderBottom: "1px solid #333",
|
||||||
|
flexShrink: 0,
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
<div
|
||||||
|
style={{
|
||||||
|
display: "flex",
|
||||||
|
flexDirection: "column",
|
||||||
|
gap: "2px",
|
||||||
|
minWidth: 0,
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
<div
|
||||||
|
data-testid="detail-panel-title"
|
||||||
|
style={{
|
||||||
|
fontWeight: 600,
|
||||||
|
fontSize: "0.95em",
|
||||||
|
color: "#ececec",
|
||||||
|
overflow: "hidden",
|
||||||
|
textOverflow: "ellipsis",
|
||||||
|
whiteSpace: "nowrap",
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
{name ?? storyId}
|
||||||
|
</div>
|
||||||
|
{stage && (
|
||||||
|
<div
|
||||||
|
data-testid="detail-panel-stage"
|
||||||
|
style={{ fontSize: "0.75em", color: "#888" }}
|
||||||
|
>
|
||||||
|
{stageLabel}
|
||||||
|
</div>
|
||||||
|
)}
|
||||||
|
{filteredAgents.length > 0 && (
|
||||||
|
<div
|
||||||
|
data-testid="detail-panel-agent-assignment"
|
||||||
|
style={{
|
||||||
|
display: "flex",
|
||||||
|
alignItems: "center",
|
||||||
|
gap: "6px",
|
||||||
|
marginTop: "4px",
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
<span style={{ fontSize: "0.75em", color: "#666" }}>Agent:</span>
|
||||||
|
<select
|
||||||
|
data-testid="agent-assignment-dropdown"
|
||||||
|
disabled={assigning}
|
||||||
|
value={activeAgentName ?? assignedAgent ?? ""}
|
||||||
|
onChange={(e) => handleAgentAssign(e.target.value)}
|
||||||
|
style={{
|
||||||
|
background: "#1a1a1a",
|
||||||
|
border: "1px solid #444",
|
||||||
|
borderRadius: "4px",
|
||||||
|
color: "#ccc",
|
||||||
|
cursor: assigning ? "not-allowed" : "pointer",
|
||||||
|
fontSize: "0.75em",
|
||||||
|
padding: "2px 6px",
|
||||||
|
opacity: assigning ? 0.6 : 1,
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
<option value="">— none —</option>
|
||||||
|
{filteredAgents.map((a) => {
|
||||||
|
const isRunning =
|
||||||
|
agentInfo?.agent_name === a.name &&
|
||||||
|
agentStatus === "running";
|
||||||
|
const isPending =
|
||||||
|
agentInfo?.agent_name === a.name &&
|
||||||
|
agentStatus === "pending";
|
||||||
|
const statusLabel = isRunning
|
||||||
|
? " — running"
|
||||||
|
: isPending
|
||||||
|
? " — pending"
|
||||||
|
: " — idle";
|
||||||
|
const modelPart = a.model ? ` (${a.model})` : "";
|
||||||
|
return (
|
||||||
|
<option key={a.name} value={a.name}>
|
||||||
|
{a.name}
|
||||||
|
{modelPart}
|
||||||
|
{statusLabel}
|
||||||
|
</option>
|
||||||
|
);
|
||||||
|
})}
|
||||||
|
</select>
|
||||||
|
{assigning && (
|
||||||
|
<span style={{ fontSize: "0.7em", color: "#888" }}>
|
||||||
|
Assigning…
|
||||||
|
</span>
|
||||||
|
)}
|
||||||
|
{assignError && (
|
||||||
|
<span
|
||||||
|
data-testid="agent-assignment-error"
|
||||||
|
style={{ fontSize: "0.7em", color: "#f85149" }}
|
||||||
|
>
|
||||||
|
{assignError}
|
||||||
|
</span>
|
||||||
|
)}
|
||||||
|
</div>
|
||||||
|
)}
|
||||||
|
{filteredAgents.length === 0 && assignedAgent ? (
|
||||||
|
<div
|
||||||
|
data-testid="detail-panel-assigned-agent"
|
||||||
|
style={{ fontSize: "0.75em", color: "#888" }}
|
||||||
|
>
|
||||||
|
Agent: {assignedAgent}
|
||||||
|
</div>
|
||||||
|
) : null}
|
||||||
|
</div>
|
||||||
|
<button
|
||||||
|
type="button"
|
||||||
|
data-testid="detail-panel-close"
|
||||||
|
onClick={onClose}
|
||||||
|
style={{
|
||||||
|
background: "none",
|
||||||
|
border: "1px solid #444",
|
||||||
|
borderRadius: "6px",
|
||||||
|
color: "#aaa",
|
||||||
|
cursor: "pointer",
|
||||||
|
padding: "4px 10px",
|
||||||
|
fontSize: "0.8em",
|
||||||
|
flexShrink: 0,
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
Close
|
||||||
|
</button>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
{/* Scrollable content area */}
|
||||||
|
<div
|
||||||
|
style={{
|
||||||
|
flex: 1,
|
||||||
|
overflowY: "auto",
|
||||||
|
padding: "16px",
|
||||||
|
display: "flex",
|
||||||
|
flexDirection: "column",
|
||||||
|
gap: "16px",
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
{loading && (
|
||||||
|
<div
|
||||||
|
data-testid="detail-panel-loading"
|
||||||
|
style={{ color: "#666", fontSize: "0.85em" }}
|
||||||
|
>
|
||||||
|
Loading...
|
||||||
|
</div>
|
||||||
|
)}
|
||||||
|
{error && (
|
||||||
|
<div
|
||||||
|
data-testid="detail-panel-error"
|
||||||
|
style={{ color: "#ff7b72", fontSize: "0.85em" }}
|
||||||
|
>
|
||||||
|
{error}
|
||||||
|
</div>
|
||||||
|
)}
|
||||||
|
{!loading && !error && content !== null && (
|
||||||
|
<div
|
||||||
|
data-testid="detail-panel-content"
|
||||||
|
className="markdown-body"
|
||||||
|
style={{ fontSize: "0.9em", lineHeight: 1.6 }}
|
||||||
|
>
|
||||||
|
<Markdown
|
||||||
|
components={{
|
||||||
|
// biome-ignore lint/suspicious/noExplicitAny: react-markdown requires any for component props
|
||||||
|
h1: ({ children }: any) => (
|
||||||
|
<h1 style={{ fontSize: "1.2em" }}>{children}</h1>
|
||||||
|
),
|
||||||
|
// biome-ignore lint/suspicious/noExplicitAny: react-markdown requires any for component props
|
||||||
|
h2: ({ children }: any) => (
|
||||||
|
<h2 style={{ fontSize: "1.1em" }}>{children}</h2>
|
||||||
|
),
|
||||||
|
// biome-ignore lint/suspicious/noExplicitAny: react-markdown requires any for component props
|
||||||
|
h3: ({ children }: any) => (
|
||||||
|
<h3 style={{ fontSize: "1em" }}>{children}</h3>
|
||||||
|
),
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
{content}
|
||||||
|
</Markdown>
|
||||||
|
</div>
|
||||||
|
)}
|
||||||
|
|
||||||
|
{/* Token Cost section */}
|
||||||
|
<div
|
||||||
|
data-testid="token-cost-section"
|
||||||
|
style={{
|
||||||
|
border: "1px solid #2a2a2a",
|
||||||
|
borderRadius: "8px",
|
||||||
|
padding: "10px 12px",
|
||||||
|
background: "#161616",
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
<div
|
||||||
|
style={{
|
||||||
|
fontWeight: 600,
|
||||||
|
fontSize: "0.8em",
|
||||||
|
color: "#555",
|
||||||
|
marginBottom: "8px",
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
Token Cost
|
||||||
|
</div>
|
||||||
|
{tokenCost && tokenCost.agents.length > 0 ? (
|
||||||
|
<div data-testid="token-cost-content">
|
||||||
|
<div
|
||||||
|
style={{
|
||||||
|
fontSize: "0.75em",
|
||||||
|
color: "#888",
|
||||||
|
marginBottom: "8px",
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
Total:{" "}
|
||||||
|
<span data-testid="token-cost-total" style={{ color: "#ccc" }}>
|
||||||
|
${tokenCost.total_cost_usd.toFixed(6)}
|
||||||
|
</span>
|
||||||
|
</div>
|
||||||
|
{tokenCost.agents.map((agent: AgentCostEntry) => (
|
||||||
|
<div
|
||||||
|
key={agent.agent_name}
|
||||||
|
data-testid={`token-cost-agent-${agent.agent_name}`}
|
||||||
|
style={{
|
||||||
|
fontSize: "0.75em",
|
||||||
|
color: "#888",
|
||||||
|
padding: "4px 0",
|
||||||
|
borderTop: "1px solid #222",
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
<div
|
||||||
|
style={{
|
||||||
|
display: "flex",
|
||||||
|
justifyContent: "space-between",
|
||||||
|
marginBottom: "2px",
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
<span style={{ color: "#ccc", fontWeight: 600 }}>
|
||||||
|
{agent.agent_name}
|
||||||
|
{agent.model ? (
|
||||||
|
<span
|
||||||
|
style={{ color: "#666", fontWeight: 400 }}
|
||||||
|
>{` (${agent.model})`}</span>
|
||||||
|
) : null}
|
||||||
|
</span>
|
||||||
|
<span style={{ color: "#aaa" }}>
|
||||||
|
${agent.total_cost_usd.toFixed(6)}
|
||||||
|
</span>
|
||||||
|
</div>
|
||||||
|
<div style={{ color: "#555" }}>
|
||||||
|
in {agent.input_tokens.toLocaleString()} / out{" "}
|
||||||
|
{agent.output_tokens.toLocaleString()}
|
||||||
|
{(agent.cache_creation_input_tokens > 0 ||
|
||||||
|
agent.cache_read_input_tokens > 0) && (
|
||||||
|
<>
|
||||||
|
{" "}
|
||||||
|
/ cache +
|
||||||
|
{agent.cache_creation_input_tokens.toLocaleString()}{" "}
|
||||||
|
read {agent.cache_read_input_tokens.toLocaleString()}
|
||||||
|
</>
|
||||||
|
)}
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
))}
|
||||||
|
</div>
|
||||||
|
) : (
|
||||||
|
<div
|
||||||
|
data-testid="token-cost-empty"
|
||||||
|
style={{ fontSize: "0.75em", color: "#444" }}
|
||||||
|
>
|
||||||
|
No token data recorded
|
||||||
|
</div>
|
||||||
|
)}
|
||||||
|
</div>
|
||||||
|
|
||||||
|
{/* Test Results section */}
|
||||||
|
<div
|
||||||
|
data-testid="test-results-section"
|
||||||
|
style={{
|
||||||
|
border: "1px solid #2a2a2a",
|
||||||
|
borderRadius: "8px",
|
||||||
|
padding: "10px 12px",
|
||||||
|
background: "#161616",
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
<div
|
||||||
|
style={{
|
||||||
|
fontWeight: 600,
|
||||||
|
fontSize: "0.8em",
|
||||||
|
color: "#555",
|
||||||
|
marginBottom: "8px",
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
Test Results
|
||||||
|
</div>
|
||||||
|
{hasTestResults ? (
|
||||||
|
<div
|
||||||
|
data-testid="test-results-content"
|
||||||
|
style={{
|
||||||
|
display: "flex",
|
||||||
|
flexDirection: "column",
|
||||||
|
gap: "12px",
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
<TestSection
|
||||||
|
title="Unit Tests"
|
||||||
|
tests={testResults.unit}
|
||||||
|
testId="test-section-unit"
|
||||||
|
/>
|
||||||
|
<TestSection
|
||||||
|
title="Integration Tests"
|
||||||
|
tests={testResults.integration}
|
||||||
|
testId="test-section-integration"
|
||||||
|
/>
|
||||||
|
</div>
|
||||||
|
) : (
|
||||||
|
<div
|
||||||
|
data-testid="test-results-empty"
|
||||||
|
style={{ fontSize: "0.75em", color: "#444" }}
|
||||||
|
>
|
||||||
|
No test results recorded
|
||||||
|
</div>
|
||||||
|
)}
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div
|
||||||
|
style={{
|
||||||
|
display: "flex",
|
||||||
|
flexDirection: "column",
|
||||||
|
gap: "8px",
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
{/* Agent Logs section */}
|
||||||
|
{!agentInfo && (
|
||||||
|
<div
|
||||||
|
data-testid="placeholder-agent-logs"
|
||||||
|
style={{
|
||||||
|
border: "1px solid #2a2a2a",
|
||||||
|
borderRadius: "8px",
|
||||||
|
padding: "10px 12px",
|
||||||
|
background: "#161616",
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
<div
|
||||||
|
style={{
|
||||||
|
fontWeight: 600,
|
||||||
|
fontSize: "0.8em",
|
||||||
|
color: "#555",
|
||||||
|
marginBottom: "4px",
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
Agent Logs
|
||||||
|
</div>
|
||||||
|
<div style={{ fontSize: "0.75em", color: "#444" }}>
|
||||||
|
Coming soon
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
)}
|
||||||
|
{agentInfo && (
|
||||||
|
<div
|
||||||
|
data-testid="agent-logs-section"
|
||||||
|
style={{
|
||||||
|
border: "1px solid #2a2a2a",
|
||||||
|
borderRadius: "8px",
|
||||||
|
padding: "10px 12px",
|
||||||
|
background: "#161616",
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
<div
|
||||||
|
style={{
|
||||||
|
display: "flex",
|
||||||
|
alignItems: "center",
|
||||||
|
justifyContent: "space-between",
|
||||||
|
marginBottom: "6px",
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
<div
|
||||||
|
style={{
|
||||||
|
fontWeight: 600,
|
||||||
|
fontSize: "0.8em",
|
||||||
|
color: "#888",
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
Agent Logs
|
||||||
|
</div>
|
||||||
|
{agentStatus && (
|
||||||
|
<div
|
||||||
|
data-testid="agent-status-badge"
|
||||||
|
style={{
|
||||||
|
fontSize: "0.7em",
|
||||||
|
color: STATUS_COLORS[agentStatus],
|
||||||
|
fontWeight: 600,
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
{agentInfo.agent_name} — {agentStatus}
|
||||||
|
</div>
|
||||||
|
)}
|
||||||
|
</div>
|
||||||
|
{agentLog.length > 0 ? (
|
||||||
|
<div
|
||||||
|
data-testid="agent-log-output"
|
||||||
|
style={{
|
||||||
|
fontSize: "0.75em",
|
||||||
|
fontFamily: "monospace",
|
||||||
|
color: "#ccc",
|
||||||
|
whiteSpace: "pre-wrap",
|
||||||
|
wordBreak: "break-word",
|
||||||
|
lineHeight: "1.5",
|
||||||
|
maxHeight: "200px",
|
||||||
|
overflowY: "auto",
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
{agentLog.join("")}
|
||||||
|
</div>
|
||||||
|
) : (
|
||||||
|
<div style={{ fontSize: "0.75em", color: "#444" }}>
|
||||||
|
{agentStatus === "running" || agentStatus === "pending"
|
||||||
|
? "Waiting for output..."
|
||||||
|
: "No output."}
|
||||||
|
</div>
|
||||||
|
)}
|
||||||
|
</div>
|
||||||
|
)}
|
||||||
|
|
||||||
|
{/* Placeholder sections for future content */}
|
||||||
|
{(
|
||||||
|
[{ id: "coverage", label: "Coverage" }] as {
|
||||||
|
id: string;
|
||||||
|
label: string;
|
||||||
|
}[]
|
||||||
|
).map(({ id, label }) => (
|
||||||
|
<div
|
||||||
|
key={id}
|
||||||
|
data-testid={`placeholder-${id}`}
|
||||||
|
style={{
|
||||||
|
border: "1px solid #2a2a2a",
|
||||||
|
borderRadius: "8px",
|
||||||
|
padding: "10px 12px",
|
||||||
|
background: "#161616",
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
<div
|
||||||
|
style={{
|
||||||
|
fontWeight: 600,
|
||||||
|
fontSize: "0.8em",
|
||||||
|
color: "#555",
|
||||||
|
marginBottom: "4px",
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
{label}
|
||||||
|
</div>
|
||||||
|
<div style={{ fontSize: "0.75em", color: "#444" }}>
|
||||||
|
Coming soon
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
))}
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
);
|
||||||
|
}
|
||||||
170
frontend/src/components/selection/ProjectPathInput.tsx
Normal file
170
frontend/src/components/selection/ProjectPathInput.tsx
Normal file
@@ -0,0 +1,170 @@
|
|||||||
|
export interface ProjectPathMatch {
|
||||||
|
name: string;
|
||||||
|
path: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
export interface ProjectPathInputProps {
|
||||||
|
value: string;
|
||||||
|
onChange: (value: string) => void;
|
||||||
|
onKeyDown: (event: React.KeyboardEvent<HTMLInputElement>) => void;
|
||||||
|
suggestionTail: string;
|
||||||
|
matchList: ProjectPathMatch[];
|
||||||
|
selectedMatch: number;
|
||||||
|
onSelectMatch: (index: number) => void;
|
||||||
|
onAcceptMatch: (path: string) => void;
|
||||||
|
onCloseSuggestions: () => void;
|
||||||
|
currentPartial: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
function renderHighlightedMatch(text: string, query: string) {
|
||||||
|
if (!query) return text;
|
||||||
|
let qIndex = 0;
|
||||||
|
const lowerQuery = query.toLowerCase();
|
||||||
|
const counts = new Map<string, number>();
|
||||||
|
return text.split("").map((char) => {
|
||||||
|
const isMatch =
|
||||||
|
qIndex < lowerQuery.length && char.toLowerCase() === lowerQuery[qIndex];
|
||||||
|
if (isMatch) {
|
||||||
|
qIndex += 1;
|
||||||
|
}
|
||||||
|
const count = counts.get(char) ?? 0;
|
||||||
|
counts.set(char, count + 1);
|
||||||
|
return (
|
||||||
|
<span
|
||||||
|
key={`${char}-${count}`}
|
||||||
|
style={isMatch ? { fontWeight: 600, color: "#222" } : undefined}
|
||||||
|
>
|
||||||
|
{char}
|
||||||
|
</span>
|
||||||
|
);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
export function ProjectPathInput({
|
||||||
|
value,
|
||||||
|
onChange,
|
||||||
|
onKeyDown,
|
||||||
|
suggestionTail,
|
||||||
|
matchList,
|
||||||
|
selectedMatch,
|
||||||
|
onSelectMatch,
|
||||||
|
onAcceptMatch,
|
||||||
|
onCloseSuggestions,
|
||||||
|
currentPartial,
|
||||||
|
}: ProjectPathInputProps) {
|
||||||
|
return (
|
||||||
|
<div
|
||||||
|
style={{
|
||||||
|
position: "relative",
|
||||||
|
marginTop: "12px",
|
||||||
|
marginBottom: "170px",
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
<div
|
||||||
|
style={{
|
||||||
|
position: "absolute",
|
||||||
|
inset: 0,
|
||||||
|
padding: "10px",
|
||||||
|
color: "#aaa",
|
||||||
|
fontFamily: "monospace",
|
||||||
|
whiteSpace: "pre",
|
||||||
|
overflow: "hidden",
|
||||||
|
textOverflow: "ellipsis",
|
||||||
|
pointerEvents: "none",
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
{value}
|
||||||
|
{suggestionTail}
|
||||||
|
</div>
|
||||||
|
<input
|
||||||
|
type="text"
|
||||||
|
value={value}
|
||||||
|
placeholder="/path/to/project"
|
||||||
|
onChange={(event) => onChange(event.target.value)}
|
||||||
|
onKeyDown={onKeyDown}
|
||||||
|
style={{
|
||||||
|
width: "100%",
|
||||||
|
padding: "10px",
|
||||||
|
fontFamily: "monospace",
|
||||||
|
background: "transparent",
|
||||||
|
position: "relative",
|
||||||
|
zIndex: 1,
|
||||||
|
}}
|
||||||
|
/>
|
||||||
|
{matchList.length > 0 && (
|
||||||
|
<div
|
||||||
|
style={{
|
||||||
|
position: "absolute",
|
||||||
|
top: "100%",
|
||||||
|
left: 0,
|
||||||
|
right: 0,
|
||||||
|
marginTop: "6px",
|
||||||
|
border: "1px solid #ddd",
|
||||||
|
borderRadius: "6px",
|
||||||
|
overflow: "hidden",
|
||||||
|
background: "#fff",
|
||||||
|
fontFamily: "monospace",
|
||||||
|
height: "160px",
|
||||||
|
overflowY: "auto",
|
||||||
|
boxSizing: "border-box",
|
||||||
|
zIndex: 2,
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
<div
|
||||||
|
style={{
|
||||||
|
display: "flex",
|
||||||
|
justifyContent: "flex-end",
|
||||||
|
alignItems: "center",
|
||||||
|
padding: "4px 6px",
|
||||||
|
borderBottom: "1px solid #eee",
|
||||||
|
background: "#fafafa",
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
<button
|
||||||
|
type="button"
|
||||||
|
aria-label="Close suggestions"
|
||||||
|
onClick={onCloseSuggestions}
|
||||||
|
style={{
|
||||||
|
width: "24px",
|
||||||
|
height: "24px",
|
||||||
|
borderRadius: "4px",
|
||||||
|
border: "1px solid #ddd",
|
||||||
|
background: "#fff",
|
||||||
|
cursor: "pointer",
|
||||||
|
lineHeight: 1,
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
×
|
||||||
|
</button>
|
||||||
|
</div>
|
||||||
|
{matchList.map((match, index) => {
|
||||||
|
const isSelected = index === selectedMatch;
|
||||||
|
return (
|
||||||
|
<button
|
||||||
|
key={match.path}
|
||||||
|
type="button"
|
||||||
|
onMouseEnter={() => onSelectMatch(index)}
|
||||||
|
onMouseDown={(event) => {
|
||||||
|
event.preventDefault();
|
||||||
|
onSelectMatch(index);
|
||||||
|
onAcceptMatch(match.path);
|
||||||
|
}}
|
||||||
|
style={{
|
||||||
|
width: "100%",
|
||||||
|
textAlign: "left",
|
||||||
|
padding: "6px 8px",
|
||||||
|
border: "none",
|
||||||
|
background: isSelected ? "#f0f0f0" : "transparent",
|
||||||
|
cursor: "pointer",
|
||||||
|
fontFamily: "inherit",
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
{renderHighlightedMatch(match.name, currentPartial)}/
|
||||||
|
</button>
|
||||||
|
);
|
||||||
|
})}
|
||||||
|
</div>
|
||||||
|
)}
|
||||||
|
</div>
|
||||||
|
);
|
||||||
|
}
|
||||||
66
frontend/src/components/selection/RecentProjectsList.tsx
Normal file
66
frontend/src/components/selection/RecentProjectsList.tsx
Normal file
@@ -0,0 +1,66 @@
|
|||||||
|
export interface RecentProjectsListProps {
|
||||||
|
projects: string[];
|
||||||
|
onOpenProject: (path: string) => void;
|
||||||
|
onForgetProject: (path: string) => void;
|
||||||
|
}
|
||||||
|
|
||||||
|
export function RecentProjectsList({
|
||||||
|
projects,
|
||||||
|
onOpenProject,
|
||||||
|
onForgetProject,
|
||||||
|
}: RecentProjectsListProps) {
|
||||||
|
return (
|
||||||
|
<div style={{ marginTop: "12px" }}>
|
||||||
|
<div style={{ fontSize: "0.9em", color: "#666" }}>Recent projects</div>
|
||||||
|
<ul style={{ listStyle: "none", padding: 0, margin: "8px 0 0" }}>
|
||||||
|
{projects.map((project) => {
|
||||||
|
const displayName =
|
||||||
|
project.split("/").filter(Boolean).pop() ?? project;
|
||||||
|
return (
|
||||||
|
<li key={project} style={{ marginBottom: "6px" }}>
|
||||||
|
<div
|
||||||
|
style={{ display: "flex", gap: "6px", alignItems: "center" }}
|
||||||
|
>
|
||||||
|
<button
|
||||||
|
type="button"
|
||||||
|
onClick={() => onOpenProject(project)}
|
||||||
|
style={{
|
||||||
|
flex: 1,
|
||||||
|
textAlign: "left",
|
||||||
|
padding: "8px 10px",
|
||||||
|
borderRadius: "6px",
|
||||||
|
border: "1px solid #ddd",
|
||||||
|
background: "#f7f7f7",
|
||||||
|
cursor: "pointer",
|
||||||
|
fontFamily: "monospace",
|
||||||
|
fontSize: "0.9em",
|
||||||
|
}}
|
||||||
|
title={project}
|
||||||
|
>
|
||||||
|
{displayName}
|
||||||
|
</button>
|
||||||
|
<button
|
||||||
|
type="button"
|
||||||
|
aria-label={`Forget ${displayName}`}
|
||||||
|
onClick={() => onForgetProject(project)}
|
||||||
|
style={{
|
||||||
|
width: "32px",
|
||||||
|
height: "32px",
|
||||||
|
borderRadius: "6px",
|
||||||
|
border: "1px solid #ddd",
|
||||||
|
background: "#fff",
|
||||||
|
cursor: "pointer",
|
||||||
|
fontSize: "1.1em",
|
||||||
|
lineHeight: 1,
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
×
|
||||||
|
</button>
|
||||||
|
</div>
|
||||||
|
</li>
|
||||||
|
);
|
||||||
|
})}
|
||||||
|
</ul>
|
||||||
|
</div>
|
||||||
|
);
|
||||||
|
}
|
||||||
136
frontend/src/components/selection/SelectionScreen.test.tsx
Normal file
136
frontend/src/components/selection/SelectionScreen.test.tsx
Normal file
@@ -0,0 +1,136 @@
|
|||||||
|
import { fireEvent, render, screen } from "@testing-library/react";
|
||||||
|
import type { KeyboardEvent } from "react";
|
||||||
|
import { describe, expect, it, vi } from "vitest";
|
||||||
|
import type { SelectionScreenProps } from "./SelectionScreen";
|
||||||
|
import { SelectionScreen } from "./SelectionScreen";
|
||||||
|
|
||||||
|
function makeProps(
|
||||||
|
overrides: Partial<SelectionScreenProps> = {},
|
||||||
|
): SelectionScreenProps {
|
||||||
|
return {
|
||||||
|
knownProjects: [],
|
||||||
|
onOpenProject: vi.fn(),
|
||||||
|
onForgetProject: vi.fn(),
|
||||||
|
pathInput: "",
|
||||||
|
homeDir: null,
|
||||||
|
onPathInputChange: vi.fn(),
|
||||||
|
onPathInputKeyDown: vi.fn() as (
|
||||||
|
event: KeyboardEvent<HTMLInputElement>,
|
||||||
|
) => void,
|
||||||
|
isOpening: false,
|
||||||
|
suggestionTail: "",
|
||||||
|
matchList: [],
|
||||||
|
selectedMatch: -1,
|
||||||
|
onSelectMatch: vi.fn(),
|
||||||
|
onAcceptMatch: vi.fn(),
|
||||||
|
onCloseSuggestions: vi.fn(),
|
||||||
|
completionError: null,
|
||||||
|
currentPartial: "",
|
||||||
|
...overrides,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
describe("SelectionScreen", () => {
|
||||||
|
it("renders the title and description", () => {
|
||||||
|
render(<SelectionScreen {...makeProps()} />);
|
||||||
|
expect(screen.getByText("Storkit")).toBeInTheDocument();
|
||||||
|
expect(
|
||||||
|
screen.getByText("Paste or complete a project path to start."),
|
||||||
|
).toBeInTheDocument();
|
||||||
|
});
|
||||||
|
|
||||||
|
it("renders recent projects list when knownProjects is non-empty", () => {
|
||||||
|
render(
|
||||||
|
<SelectionScreen
|
||||||
|
{...makeProps({ knownProjects: ["/Users/test/project"] })}
|
||||||
|
/>,
|
||||||
|
);
|
||||||
|
expect(screen.getByText("Recent projects")).toBeInTheDocument();
|
||||||
|
});
|
||||||
|
|
||||||
|
it("does not render recent projects list when knownProjects is empty", () => {
|
||||||
|
render(<SelectionScreen {...makeProps({ knownProjects: [] })} />);
|
||||||
|
expect(screen.queryByText("Recent projects")).not.toBeInTheDocument();
|
||||||
|
});
|
||||||
|
|
||||||
|
it("calls onOpenProject when Open Project button is clicked", () => {
|
||||||
|
const onOpenProject = vi.fn();
|
||||||
|
render(
|
||||||
|
<SelectionScreen
|
||||||
|
{...makeProps({ pathInput: "/my/path", onOpenProject })}
|
||||||
|
/>,
|
||||||
|
);
|
||||||
|
fireEvent.click(screen.getByText("Open Project"));
|
||||||
|
expect(onOpenProject).toHaveBeenCalledWith("/my/path");
|
||||||
|
});
|
||||||
|
|
||||||
|
it("shows Opening... text and disables buttons when isOpening is true", () => {
|
||||||
|
render(<SelectionScreen {...makeProps({ isOpening: true })} />);
|
||||||
|
expect(screen.getByText("Opening...")).toBeInTheDocument();
|
||||||
|
const buttons = screen.getAllByRole("button");
|
||||||
|
for (const button of buttons) {
|
||||||
|
if (
|
||||||
|
button.textContent === "Opening..." ||
|
||||||
|
button.textContent === "New Project"
|
||||||
|
) {
|
||||||
|
expect(button).toBeDisabled();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
it("displays completion error when completionError is provided", () => {
|
||||||
|
render(
|
||||||
|
<SelectionScreen {...makeProps({ completionError: "Path not found" })} />,
|
||||||
|
);
|
||||||
|
expect(screen.getByText("Path not found")).toBeInTheDocument();
|
||||||
|
});
|
||||||
|
|
||||||
|
it("does not display error div when completionError is null", () => {
|
||||||
|
const { container } = render(<SelectionScreen {...makeProps()} />);
|
||||||
|
const errorDiv = container.querySelector('[style*="color: red"]');
|
||||||
|
expect(errorDiv).toBeNull();
|
||||||
|
});
|
||||||
|
|
||||||
|
it("New Project button calls onPathInputChange with homeDir (trailing slash appended)", () => {
|
||||||
|
const onPathInputChange = vi.fn();
|
||||||
|
const onCloseSuggestions = vi.fn();
|
||||||
|
render(
|
||||||
|
<SelectionScreen
|
||||||
|
{...makeProps({
|
||||||
|
homeDir: "/Users/test",
|
||||||
|
onPathInputChange,
|
||||||
|
onCloseSuggestions,
|
||||||
|
})}
|
||||||
|
/>,
|
||||||
|
);
|
||||||
|
fireEvent.click(screen.getByText("New Project"));
|
||||||
|
expect(onPathInputChange).toHaveBeenCalledWith("/Users/test/");
|
||||||
|
expect(onCloseSuggestions).toHaveBeenCalled();
|
||||||
|
});
|
||||||
|
|
||||||
|
it("New Project button uses homeDir as-is when it already ends with /", () => {
|
||||||
|
const onPathInputChange = vi.fn();
|
||||||
|
const onCloseSuggestions = vi.fn();
|
||||||
|
render(
|
||||||
|
<SelectionScreen
|
||||||
|
{...makeProps({
|
||||||
|
homeDir: "/Users/test/",
|
||||||
|
onPathInputChange,
|
||||||
|
onCloseSuggestions,
|
||||||
|
})}
|
||||||
|
/>,
|
||||||
|
);
|
||||||
|
fireEvent.click(screen.getByText("New Project"));
|
||||||
|
expect(onPathInputChange).toHaveBeenCalledWith("/Users/test/");
|
||||||
|
expect(onCloseSuggestions).toHaveBeenCalled();
|
||||||
|
});
|
||||||
|
|
||||||
|
it("New Project button uses empty string when homeDir is null", () => {
|
||||||
|
const onPathInputChange = vi.fn();
|
||||||
|
render(
|
||||||
|
<SelectionScreen {...makeProps({ homeDir: null, onPathInputChange })} />,
|
||||||
|
);
|
||||||
|
fireEvent.click(screen.getByText("New Project"));
|
||||||
|
expect(onPathInputChange).toHaveBeenCalledWith("");
|
||||||
|
});
|
||||||
|
});
|
||||||
116
frontend/src/components/selection/SelectionScreen.tsx
Normal file
116
frontend/src/components/selection/SelectionScreen.tsx
Normal file
@@ -0,0 +1,116 @@
|
|||||||
|
import type { KeyboardEvent } from "react";
|
||||||
|
import { ProjectPathInput } from "./ProjectPathInput.tsx";
|
||||||
|
import { RecentProjectsList } from "./RecentProjectsList.tsx";
|
||||||
|
|
||||||
|
export interface RecentProjectMatch {
|
||||||
|
name: string;
|
||||||
|
path: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
export interface SelectionScreenProps {
|
||||||
|
knownProjects: string[];
|
||||||
|
onOpenProject: (path: string) => void;
|
||||||
|
onForgetProject: (path: string) => void;
|
||||||
|
pathInput: string;
|
||||||
|
homeDir?: string | null;
|
||||||
|
onPathInputChange: (value: string) => void;
|
||||||
|
onPathInputKeyDown: (event: KeyboardEvent<HTMLInputElement>) => void;
|
||||||
|
isOpening: boolean;
|
||||||
|
suggestionTail: string;
|
||||||
|
matchList: RecentProjectMatch[];
|
||||||
|
selectedMatch: number;
|
||||||
|
onSelectMatch: (index: number) => void;
|
||||||
|
onAcceptMatch: (path: string) => void;
|
||||||
|
onCloseSuggestions: () => void;
|
||||||
|
completionError: string | null;
|
||||||
|
currentPartial: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
export function SelectionScreen({
|
||||||
|
knownProjects,
|
||||||
|
onOpenProject,
|
||||||
|
onForgetProject,
|
||||||
|
pathInput,
|
||||||
|
homeDir,
|
||||||
|
onPathInputChange,
|
||||||
|
onPathInputKeyDown,
|
||||||
|
isOpening,
|
||||||
|
suggestionTail,
|
||||||
|
matchList,
|
||||||
|
selectedMatch,
|
||||||
|
onSelectMatch,
|
||||||
|
onAcceptMatch,
|
||||||
|
onCloseSuggestions,
|
||||||
|
completionError,
|
||||||
|
currentPartial,
|
||||||
|
}: SelectionScreenProps) {
|
||||||
|
const resolvedHomeDir = homeDir
|
||||||
|
? homeDir.endsWith("/")
|
||||||
|
? homeDir
|
||||||
|
: `${homeDir}/`
|
||||||
|
: "";
|
||||||
|
return (
|
||||||
|
<div
|
||||||
|
className="selection-screen"
|
||||||
|
style={{ padding: "2rem", maxWidth: "800px", margin: "0 auto" }}
|
||||||
|
>
|
||||||
|
<h1>Storkit</h1>
|
||||||
|
<p>Paste or complete a project path to start.</p>
|
||||||
|
|
||||||
|
{knownProjects.length > 0 && (
|
||||||
|
<RecentProjectsList
|
||||||
|
projects={knownProjects}
|
||||||
|
onOpenProject={onOpenProject}
|
||||||
|
onForgetProject={onForgetProject}
|
||||||
|
/>
|
||||||
|
)}
|
||||||
|
|
||||||
|
<ProjectPathInput
|
||||||
|
value={pathInput}
|
||||||
|
onChange={onPathInputChange}
|
||||||
|
onKeyDown={onPathInputKeyDown}
|
||||||
|
suggestionTail={suggestionTail}
|
||||||
|
matchList={matchList}
|
||||||
|
selectedMatch={selectedMatch}
|
||||||
|
onSelectMatch={onSelectMatch}
|
||||||
|
onAcceptMatch={onAcceptMatch}
|
||||||
|
onCloseSuggestions={onCloseSuggestions}
|
||||||
|
currentPartial={currentPartial}
|
||||||
|
/>
|
||||||
|
|
||||||
|
<div
|
||||||
|
style={{
|
||||||
|
display: "flex",
|
||||||
|
gap: "8px",
|
||||||
|
marginTop: "8px",
|
||||||
|
alignItems: "center",
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
<button
|
||||||
|
type="button"
|
||||||
|
onClick={() => onOpenProject(pathInput)}
|
||||||
|
disabled={isOpening}
|
||||||
|
>
|
||||||
|
{isOpening ? "Opening..." : "Open Project"}
|
||||||
|
</button>
|
||||||
|
<button
|
||||||
|
type="button"
|
||||||
|
onClick={() => {
|
||||||
|
onPathInputChange(resolvedHomeDir);
|
||||||
|
onCloseSuggestions();
|
||||||
|
}}
|
||||||
|
disabled={isOpening}
|
||||||
|
>
|
||||||
|
New Project
|
||||||
|
</button>
|
||||||
|
<div style={{ fontSize: "0.85em", color: "#666" }}>
|
||||||
|
Press Tab to complete the next path segment
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
{completionError && (
|
||||||
|
<div style={{ color: "red", marginTop: "8px" }}>{completionError}</div>
|
||||||
|
)}
|
||||||
|
</div>
|
||||||
|
);
|
||||||
|
}
|
||||||
461
frontend/src/components/selection/usePathCompletion.test.ts
Normal file
461
frontend/src/components/selection/usePathCompletion.test.ts
Normal file
@@ -0,0 +1,461 @@
|
|||||||
|
import { act, renderHook, waitFor } from "@testing-library/react";
|
||||||
|
import { beforeEach, describe, expect, it, vi } from "vitest";
|
||||||
|
import type { FileEntry } from "./usePathCompletion";
|
||||||
|
import {
|
||||||
|
getCurrentPartial,
|
||||||
|
isFuzzyMatch,
|
||||||
|
usePathCompletion,
|
||||||
|
} from "./usePathCompletion";
|
||||||
|
|
||||||
|
describe("isFuzzyMatch", () => {
|
||||||
|
it("matches when query is empty", () => {
|
||||||
|
expect(isFuzzyMatch("anything", "")).toBe(true);
|
||||||
|
});
|
||||||
|
|
||||||
|
it("matches exact prefix", () => {
|
||||||
|
expect(isFuzzyMatch("Documents", "Doc")).toBe(true);
|
||||||
|
});
|
||||||
|
|
||||||
|
it("matches fuzzy subsequence", () => {
|
||||||
|
expect(isFuzzyMatch("Documents", "dms")).toBe(true);
|
||||||
|
});
|
||||||
|
|
||||||
|
it("is case insensitive", () => {
|
||||||
|
expect(isFuzzyMatch("Documents", "DOCU")).toBe(true);
|
||||||
|
});
|
||||||
|
|
||||||
|
it("rejects when chars not found in order", () => {
|
||||||
|
expect(isFuzzyMatch("abc", "acb")).toBe(false);
|
||||||
|
});
|
||||||
|
|
||||||
|
it("rejects completely unrelated", () => {
|
||||||
|
expect(isFuzzyMatch("hello", "xyz")).toBe(false);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe("getCurrentPartial", () => {
|
||||||
|
it("returns empty for empty input", () => {
|
||||||
|
expect(getCurrentPartial("")).toBe("");
|
||||||
|
});
|
||||||
|
|
||||||
|
it("returns empty when input ends with slash", () => {
|
||||||
|
expect(getCurrentPartial("/home/user/")).toBe("");
|
||||||
|
});
|
||||||
|
|
||||||
|
it("returns last segment", () => {
|
||||||
|
expect(getCurrentPartial("/home/user/Doc")).toBe("Doc");
|
||||||
|
});
|
||||||
|
|
||||||
|
it("returns full input when no slash", () => {
|
||||||
|
expect(getCurrentPartial("Doc")).toBe("Doc");
|
||||||
|
});
|
||||||
|
|
||||||
|
it("trims then evaluates: trailing-slash input returns empty", () => {
|
||||||
|
// " /home/user/ " trims to "/home/user/" which ends with slash
|
||||||
|
expect(getCurrentPartial(" /home/user/ ")).toBe("");
|
||||||
|
});
|
||||||
|
|
||||||
|
it("trims then returns last segment", () => {
|
||||||
|
expect(getCurrentPartial(" /home/user/Doc ")).toBe("Doc");
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe("usePathCompletion hook", () => {
|
||||||
|
const mockListDir = vi.fn<(path: string) => Promise<FileEntry[]>>();
|
||||||
|
|
||||||
|
beforeEach(() => {
|
||||||
|
mockListDir.mockReset();
|
||||||
|
});
|
||||||
|
|
||||||
|
it("returns empty matchList for empty input", async () => {
|
||||||
|
const { result } = renderHook(() =>
|
||||||
|
usePathCompletion({
|
||||||
|
pathInput: "",
|
||||||
|
setPathInput: vi.fn(),
|
||||||
|
homeDir: "/home/user",
|
||||||
|
listDirectoryAbsolute: mockListDir,
|
||||||
|
debounceMs: 0,
|
||||||
|
}),
|
||||||
|
);
|
||||||
|
|
||||||
|
// Allow effect + setTimeout(0) to fire
|
||||||
|
await waitFor(() => {
|
||||||
|
expect(mockListDir).not.toHaveBeenCalled();
|
||||||
|
});
|
||||||
|
|
||||||
|
expect(result.current.matchList).toEqual([]);
|
||||||
|
});
|
||||||
|
|
||||||
|
it("fetches directory listing and returns matches", async () => {
|
||||||
|
mockListDir.mockResolvedValue([
|
||||||
|
{ name: "Documents", kind: "dir" },
|
||||||
|
{ name: "Downloads", kind: "dir" },
|
||||||
|
{ name: ".bashrc", kind: "file" },
|
||||||
|
]);
|
||||||
|
|
||||||
|
const { result } = renderHook(() =>
|
||||||
|
usePathCompletion({
|
||||||
|
pathInput: "/home/user/",
|
||||||
|
setPathInput: vi.fn(),
|
||||||
|
homeDir: "/home/user",
|
||||||
|
listDirectoryAbsolute: mockListDir,
|
||||||
|
debounceMs: 0,
|
||||||
|
}),
|
||||||
|
);
|
||||||
|
|
||||||
|
await waitFor(() => {
|
||||||
|
expect(result.current.matchList.length).toBe(2);
|
||||||
|
});
|
||||||
|
|
||||||
|
expect(result.current.matchList[0].name).toBe("Documents");
|
||||||
|
expect(result.current.matchList[1].name).toBe("Downloads");
|
||||||
|
expect(result.current.matchList.every((m) => m.path.endsWith("/"))).toBe(
|
||||||
|
true,
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
|
it("filters by fuzzy match on partial input", async () => {
|
||||||
|
mockListDir.mockResolvedValue([
|
||||||
|
{ name: "Documents", kind: "dir" },
|
||||||
|
{ name: "Downloads", kind: "dir" },
|
||||||
|
{ name: "Desktop", kind: "dir" },
|
||||||
|
]);
|
||||||
|
|
||||||
|
const { result } = renderHook(() =>
|
||||||
|
usePathCompletion({
|
||||||
|
pathInput: "/home/user/Doc",
|
||||||
|
setPathInput: vi.fn(),
|
||||||
|
homeDir: "/home/user",
|
||||||
|
listDirectoryAbsolute: mockListDir,
|
||||||
|
debounceMs: 0,
|
||||||
|
}),
|
||||||
|
);
|
||||||
|
|
||||||
|
await waitFor(() => {
|
||||||
|
expect(result.current.matchList.length).toBe(1);
|
||||||
|
});
|
||||||
|
|
||||||
|
expect(result.current.matchList[0].name).toBe("Documents");
|
||||||
|
});
|
||||||
|
|
||||||
|
it("calls setPathInput when acceptMatch is invoked", () => {
|
||||||
|
const setPathInput = vi.fn();
|
||||||
|
|
||||||
|
const { result } = renderHook(() =>
|
||||||
|
usePathCompletion({
|
||||||
|
pathInput: "/home/",
|
||||||
|
setPathInput,
|
||||||
|
homeDir: "/home",
|
||||||
|
listDirectoryAbsolute: mockListDir,
|
||||||
|
debounceMs: 0,
|
||||||
|
}),
|
||||||
|
);
|
||||||
|
|
||||||
|
act(() => {
|
||||||
|
result.current.acceptMatch("/home/user/Documents/");
|
||||||
|
});
|
||||||
|
|
||||||
|
expect(setPathInput).toHaveBeenCalledWith("/home/user/Documents/");
|
||||||
|
});
|
||||||
|
|
||||||
|
it("uses homeDir when input has no slash (bare partial)", async () => {
|
||||||
|
mockListDir.mockResolvedValue([
|
||||||
|
{ name: "Documents", kind: "dir" },
|
||||||
|
{ name: "Downloads", kind: "dir" },
|
||||||
|
]);
|
||||||
|
|
||||||
|
const { result } = renderHook(() =>
|
||||||
|
usePathCompletion({
|
||||||
|
pathInput: "Doc",
|
||||||
|
setPathInput: vi.fn(),
|
||||||
|
homeDir: "/home/user",
|
||||||
|
listDirectoryAbsolute: mockListDir,
|
||||||
|
debounceMs: 0,
|
||||||
|
}),
|
||||||
|
);
|
||||||
|
|
||||||
|
await waitFor(() => {
|
||||||
|
expect(result.current.matchList.length).toBe(1);
|
||||||
|
});
|
||||||
|
|
||||||
|
expect(mockListDir).toHaveBeenCalledWith("/home/user");
|
||||||
|
expect(result.current.matchList[0].name).toBe("Documents");
|
||||||
|
expect(result.current.matchList[0].path).toBe("/home/user/Documents/");
|
||||||
|
});
|
||||||
|
|
||||||
|
it("returns early when input has no slash and homeDir is null", async () => {
|
||||||
|
const { result } = renderHook(() =>
|
||||||
|
usePathCompletion({
|
||||||
|
pathInput: "Doc",
|
||||||
|
setPathInput: vi.fn(),
|
||||||
|
homeDir: null,
|
||||||
|
listDirectoryAbsolute: mockListDir,
|
||||||
|
debounceMs: 0,
|
||||||
|
}),
|
||||||
|
);
|
||||||
|
|
||||||
|
// Wait for debounce + effect to fire
|
||||||
|
await waitFor(() => {
|
||||||
|
expect(result.current.matchList).toEqual([]);
|
||||||
|
});
|
||||||
|
|
||||||
|
expect(mockListDir).not.toHaveBeenCalled();
|
||||||
|
});
|
||||||
|
|
||||||
|
it("returns empty matchList when no dirs match the fuzzy filter", async () => {
|
||||||
|
mockListDir.mockResolvedValue([
|
||||||
|
{ name: "Documents", kind: "dir" },
|
||||||
|
{ name: "Downloads", kind: "dir" },
|
||||||
|
]);
|
||||||
|
|
||||||
|
const { result } = renderHook(() =>
|
||||||
|
usePathCompletion({
|
||||||
|
pathInput: "/home/user/zzz",
|
||||||
|
setPathInput: vi.fn(),
|
||||||
|
homeDir: "/home/user",
|
||||||
|
listDirectoryAbsolute: mockListDir,
|
||||||
|
debounceMs: 0,
|
||||||
|
}),
|
||||||
|
);
|
||||||
|
|
||||||
|
await waitFor(() => {
|
||||||
|
expect(mockListDir).toHaveBeenCalled();
|
||||||
|
});
|
||||||
|
|
||||||
|
// No dirs match "zzz" fuzzy filter, so matchList stays empty
|
||||||
|
expect(result.current.matchList).toEqual([]);
|
||||||
|
});
|
||||||
|
|
||||||
|
it("sets completionError when listDirectoryAbsolute throws an Error", async () => {
|
||||||
|
mockListDir.mockRejectedValue(new Error("Permission denied"));
|
||||||
|
|
||||||
|
const { result } = renderHook(() =>
|
||||||
|
usePathCompletion({
|
||||||
|
pathInput: "/root/",
|
||||||
|
setPathInput: vi.fn(),
|
||||||
|
homeDir: null,
|
||||||
|
listDirectoryAbsolute: mockListDir,
|
||||||
|
debounceMs: 0,
|
||||||
|
}),
|
||||||
|
);
|
||||||
|
|
||||||
|
await waitFor(() => {
|
||||||
|
expect(result.current.completionError).toBe("Permission denied");
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
it("sets generic completionError when listDirectoryAbsolute throws a non-Error", async () => {
|
||||||
|
mockListDir.mockRejectedValue("some string error");
|
||||||
|
|
||||||
|
const { result } = renderHook(() =>
|
||||||
|
usePathCompletion({
|
||||||
|
pathInput: "/root/",
|
||||||
|
setPathInput: vi.fn(),
|
||||||
|
homeDir: null,
|
||||||
|
listDirectoryAbsolute: mockListDir,
|
||||||
|
debounceMs: 0,
|
||||||
|
}),
|
||||||
|
);
|
||||||
|
|
||||||
|
await waitFor(() => {
|
||||||
|
expect(result.current.completionError).toBe(
|
||||||
|
"Failed to compute suggestion.",
|
||||||
|
);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
it("clears suggestionTail when selected match path does not start with input", async () => {
|
||||||
|
mockListDir.mockResolvedValue([{ name: "Documents", kind: "dir" }]);
|
||||||
|
|
||||||
|
const { result } = renderHook(() =>
|
||||||
|
usePathCompletion({
|
||||||
|
pathInput: "Doc",
|
||||||
|
setPathInput: vi.fn(),
|
||||||
|
homeDir: "/home/user",
|
||||||
|
listDirectoryAbsolute: mockListDir,
|
||||||
|
debounceMs: 0,
|
||||||
|
}),
|
||||||
|
);
|
||||||
|
|
||||||
|
// Wait for matches to load (path will be /home/user/Documents/)
|
||||||
|
await waitFor(() => {
|
||||||
|
expect(result.current.matchList.length).toBe(1);
|
||||||
|
});
|
||||||
|
|
||||||
|
// The match path is "/home/user/Documents/" which does NOT start with "Doc"
|
||||||
|
// so suggestionTail should be ""
|
||||||
|
expect(result.current.suggestionTail).toBe("");
|
||||||
|
});
|
||||||
|
|
||||||
|
it("acceptSelectedMatch calls setPathInput with the selected match path", async () => {
|
||||||
|
mockListDir.mockResolvedValue([
|
||||||
|
{ name: "Documents", kind: "dir" },
|
||||||
|
{ name: "Downloads", kind: "dir" },
|
||||||
|
]);
|
||||||
|
const setPathInput = vi.fn();
|
||||||
|
|
||||||
|
const { result } = renderHook(() =>
|
||||||
|
usePathCompletion({
|
||||||
|
pathInput: "/home/user/",
|
||||||
|
setPathInput,
|
||||||
|
homeDir: "/home/user",
|
||||||
|
listDirectoryAbsolute: mockListDir,
|
||||||
|
debounceMs: 0,
|
||||||
|
}),
|
||||||
|
);
|
||||||
|
|
||||||
|
await waitFor(() => {
|
||||||
|
expect(result.current.matchList.length).toBe(2);
|
||||||
|
});
|
||||||
|
|
||||||
|
act(() => {
|
||||||
|
result.current.acceptSelectedMatch();
|
||||||
|
});
|
||||||
|
|
||||||
|
expect(setPathInput).toHaveBeenCalledWith("/home/user/Documents/");
|
||||||
|
});
|
||||||
|
|
||||||
|
it("acceptSelectedMatch does nothing when matchList is empty", () => {
|
||||||
|
const setPathInput = vi.fn();
|
||||||
|
|
||||||
|
const { result } = renderHook(() =>
|
||||||
|
usePathCompletion({
|
||||||
|
pathInput: "",
|
||||||
|
setPathInput,
|
||||||
|
homeDir: "/home/user",
|
||||||
|
listDirectoryAbsolute: mockListDir,
|
||||||
|
debounceMs: 0,
|
||||||
|
}),
|
||||||
|
);
|
||||||
|
|
||||||
|
act(() => {
|
||||||
|
result.current.acceptSelectedMatch();
|
||||||
|
});
|
||||||
|
|
||||||
|
expect(setPathInput).not.toHaveBeenCalled();
|
||||||
|
});
|
||||||
|
|
||||||
|
it("closeSuggestions clears matchList, selectedMatch, suggestionTail, and completionError", async () => {
|
||||||
|
mockListDir.mockResolvedValue([{ name: "Documents", kind: "dir" }]);
|
||||||
|
|
||||||
|
const { result } = renderHook(() =>
|
||||||
|
usePathCompletion({
|
||||||
|
pathInput: "/home/user/",
|
||||||
|
setPathInput: vi.fn(),
|
||||||
|
homeDir: "/home/user",
|
||||||
|
listDirectoryAbsolute: mockListDir,
|
||||||
|
debounceMs: 0,
|
||||||
|
}),
|
||||||
|
);
|
||||||
|
|
||||||
|
await waitFor(() => {
|
||||||
|
expect(result.current.matchList.length).toBe(1);
|
||||||
|
});
|
||||||
|
|
||||||
|
act(() => {
|
||||||
|
result.current.closeSuggestions();
|
||||||
|
});
|
||||||
|
|
||||||
|
expect(result.current.matchList).toEqual([]);
|
||||||
|
expect(result.current.selectedMatch).toBe(0);
|
||||||
|
expect(result.current.suggestionTail).toBe("");
|
||||||
|
expect(result.current.completionError).toBeNull();
|
||||||
|
});
|
||||||
|
|
||||||
|
it("uses homeDir with trailing slash as-is", async () => {
|
||||||
|
mockListDir.mockResolvedValue([{ name: "Projects", kind: "dir" }]);
|
||||||
|
|
||||||
|
const { result } = renderHook(() =>
|
||||||
|
usePathCompletion({
|
||||||
|
pathInput: "Pro",
|
||||||
|
setPathInput: vi.fn(),
|
||||||
|
homeDir: "/home/user/",
|
||||||
|
listDirectoryAbsolute: mockListDir,
|
||||||
|
debounceMs: 0,
|
||||||
|
}),
|
||||||
|
);
|
||||||
|
|
||||||
|
await waitFor(() => {
|
||||||
|
expect(result.current.matchList.length).toBe(1);
|
||||||
|
});
|
||||||
|
|
||||||
|
expect(mockListDir).toHaveBeenCalledWith("/home/user");
|
||||||
|
expect(result.current.matchList[0].path).toBe("/home/user/Projects/");
|
||||||
|
});
|
||||||
|
|
||||||
|
it("handles root directory listing (dir = '/')", async () => {
|
||||||
|
mockListDir.mockResolvedValue([
|
||||||
|
{ name: "home", kind: "dir" },
|
||||||
|
{ name: "etc", kind: "dir" },
|
||||||
|
]);
|
||||||
|
|
||||||
|
const { result } = renderHook(() =>
|
||||||
|
usePathCompletion({
|
||||||
|
pathInput: "/",
|
||||||
|
setPathInput: vi.fn(),
|
||||||
|
homeDir: null,
|
||||||
|
listDirectoryAbsolute: mockListDir,
|
||||||
|
debounceMs: 0,
|
||||||
|
}),
|
||||||
|
);
|
||||||
|
|
||||||
|
await waitFor(() => {
|
||||||
|
expect(result.current.matchList.length).toBe(2);
|
||||||
|
});
|
||||||
|
|
||||||
|
expect(mockListDir).toHaveBeenCalledWith("/");
|
||||||
|
expect(result.current.matchList[0].name).toBe("etc");
|
||||||
|
expect(result.current.matchList[1].name).toBe("home");
|
||||||
|
});
|
||||||
|
|
||||||
|
it("computes suggestionTail when match path starts with trimmed input", async () => {
|
||||||
|
mockListDir.mockResolvedValue([{ name: "Documents", kind: "dir" }]);
|
||||||
|
|
||||||
|
const { result } = renderHook(() =>
|
||||||
|
usePathCompletion({
|
||||||
|
pathInput: "/home/user/",
|
||||||
|
setPathInput: vi.fn(),
|
||||||
|
homeDir: "/home/user",
|
||||||
|
listDirectoryAbsolute: mockListDir,
|
||||||
|
debounceMs: 0,
|
||||||
|
}),
|
||||||
|
);
|
||||||
|
|
||||||
|
await waitFor(() => {
|
||||||
|
expect(result.current.matchList.length).toBe(1);
|
||||||
|
});
|
||||||
|
|
||||||
|
// path is "/home/user/Documents/" and input is "/home/user/"
|
||||||
|
// so tail should be "Documents/"
|
||||||
|
expect(result.current.suggestionTail).toBe("Documents/");
|
||||||
|
});
|
||||||
|
|
||||||
|
it("setSelectedMatch updates the selected index", async () => {
|
||||||
|
mockListDir.mockResolvedValue([
|
||||||
|
{ name: "Documents", kind: "dir" },
|
||||||
|
{ name: "Downloads", kind: "dir" },
|
||||||
|
]);
|
||||||
|
|
||||||
|
const { result } = renderHook(() =>
|
||||||
|
usePathCompletion({
|
||||||
|
pathInput: "/home/user/",
|
||||||
|
setPathInput: vi.fn(),
|
||||||
|
homeDir: "/home/user",
|
||||||
|
listDirectoryAbsolute: mockListDir,
|
||||||
|
debounceMs: 0,
|
||||||
|
}),
|
||||||
|
);
|
||||||
|
|
||||||
|
await waitFor(() => {
|
||||||
|
expect(result.current.matchList.length).toBe(2);
|
||||||
|
});
|
||||||
|
|
||||||
|
act(() => {
|
||||||
|
result.current.setSelectedMatch(1);
|
||||||
|
});
|
||||||
|
|
||||||
|
expect(result.current.selectedMatch).toBe(1);
|
||||||
|
// After selecting index 1, suggestionTail should reflect "Downloads/"
|
||||||
|
expect(result.current.suggestionTail).toBe("Downloads/");
|
||||||
|
});
|
||||||
|
});
|
||||||
192
frontend/src/components/selection/usePathCompletion.ts
Normal file
192
frontend/src/components/selection/usePathCompletion.ts
Normal file
@@ -0,0 +1,192 @@
|
|||||||
|
import * as React from "react";
|
||||||
|
|
||||||
|
export interface FileEntry {
|
||||||
|
name: string;
|
||||||
|
kind: "file" | "dir";
|
||||||
|
}
|
||||||
|
|
||||||
|
export interface ProjectPathMatch {
|
||||||
|
name: string;
|
||||||
|
path: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
export interface UsePathCompletionArgs {
|
||||||
|
pathInput: string;
|
||||||
|
setPathInput: (value: string) => void;
|
||||||
|
homeDir: string | null;
|
||||||
|
listDirectoryAbsolute: (path: string) => Promise<FileEntry[]>;
|
||||||
|
debounceMs?: number;
|
||||||
|
}
|
||||||
|
|
||||||
|
export interface UsePathCompletionResult {
|
||||||
|
matchList: ProjectPathMatch[];
|
||||||
|
selectedMatch: number;
|
||||||
|
suggestionTail: string;
|
||||||
|
completionError: string | null;
|
||||||
|
currentPartial: string;
|
||||||
|
setSelectedMatch: (index: number) => void;
|
||||||
|
acceptSelectedMatch: () => void;
|
||||||
|
acceptMatch: (path: string) => void;
|
||||||
|
closeSuggestions: () => void;
|
||||||
|
}
|
||||||
|
|
||||||
|
export function isFuzzyMatch(candidate: string, query: string) {
|
||||||
|
if (!query) return true;
|
||||||
|
const lowerCandidate = candidate.toLowerCase();
|
||||||
|
const lowerQuery = query.toLowerCase();
|
||||||
|
let idx = 0;
|
||||||
|
for (const char of lowerQuery) {
|
||||||
|
idx = lowerCandidate.indexOf(char, idx);
|
||||||
|
if (idx === -1) return false;
|
||||||
|
idx += 1;
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
export function getCurrentPartial(input: string) {
|
||||||
|
const trimmed = input.trim();
|
||||||
|
if (!trimmed) return "";
|
||||||
|
if (trimmed.endsWith("/")) return "";
|
||||||
|
const idx = trimmed.lastIndexOf("/");
|
||||||
|
return idx >= 0 ? trimmed.slice(idx + 1) : trimmed;
|
||||||
|
}
|
||||||
|
|
||||||
|
export function usePathCompletion({
|
||||||
|
pathInput,
|
||||||
|
setPathInput,
|
||||||
|
homeDir,
|
||||||
|
listDirectoryAbsolute,
|
||||||
|
debounceMs = 60,
|
||||||
|
}: UsePathCompletionArgs): UsePathCompletionResult {
|
||||||
|
const [matchList, setMatchList] = React.useState<ProjectPathMatch[]>([]);
|
||||||
|
const [selectedMatch, setSelectedMatch] = React.useState(0);
|
||||||
|
const [suggestionTail, setSuggestionTail] = React.useState("");
|
||||||
|
const [completionError, setCompletionError] = React.useState<string | null>(
|
||||||
|
null,
|
||||||
|
);
|
||||||
|
|
||||||
|
React.useEffect(() => {
|
||||||
|
let active = true;
|
||||||
|
|
||||||
|
async function computeSuggestion() {
|
||||||
|
setCompletionError(null);
|
||||||
|
setSuggestionTail("");
|
||||||
|
setMatchList([]);
|
||||||
|
setSelectedMatch(0);
|
||||||
|
|
||||||
|
const trimmed = pathInput.trim();
|
||||||
|
if (!trimmed) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
const endsWithSlash = trimmed.endsWith("/");
|
||||||
|
let dir = trimmed;
|
||||||
|
let partial = "";
|
||||||
|
|
||||||
|
if (!endsWithSlash) {
|
||||||
|
const idx = trimmed.lastIndexOf("/");
|
||||||
|
if (idx >= 0) {
|
||||||
|
dir = trimmed.slice(0, idx + 1);
|
||||||
|
partial = trimmed.slice(idx + 1);
|
||||||
|
} else {
|
||||||
|
dir = "";
|
||||||
|
partial = trimmed;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!dir) {
|
||||||
|
if (homeDir) {
|
||||||
|
dir = homeDir.endsWith("/") ? homeDir : `${homeDir}/`;
|
||||||
|
} else {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const dirForListing = dir === "/" ? "/" : dir.replace(/\/+$/, "");
|
||||||
|
const entries = await listDirectoryAbsolute(dirForListing);
|
||||||
|
if (!active) return;
|
||||||
|
|
||||||
|
const matches = entries
|
||||||
|
.filter((entry) => entry.kind === "dir")
|
||||||
|
.filter((entry) => isFuzzyMatch(entry.name, partial))
|
||||||
|
.sort((a, b) => a.name.localeCompare(b.name))
|
||||||
|
.slice(0, 8);
|
||||||
|
|
||||||
|
if (matches.length === 0) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
const basePrefix = dir.endsWith("/") ? dir : `${dir}/`;
|
||||||
|
const list = matches.map((entry) => ({
|
||||||
|
name: entry.name,
|
||||||
|
path: `${basePrefix}${entry.name}/`,
|
||||||
|
}));
|
||||||
|
setMatchList(list);
|
||||||
|
}
|
||||||
|
|
||||||
|
const debounceId = window.setTimeout(() => {
|
||||||
|
computeSuggestion().catch((error) => {
|
||||||
|
console.error(error);
|
||||||
|
if (!active) return;
|
||||||
|
setCompletionError(
|
||||||
|
error instanceof Error
|
||||||
|
? error.message
|
||||||
|
: "Failed to compute suggestion.",
|
||||||
|
);
|
||||||
|
});
|
||||||
|
}, debounceMs);
|
||||||
|
|
||||||
|
return () => {
|
||||||
|
active = false;
|
||||||
|
window.clearTimeout(debounceId);
|
||||||
|
};
|
||||||
|
}, [pathInput, homeDir, listDirectoryAbsolute, debounceMs]);
|
||||||
|
|
||||||
|
React.useEffect(() => {
|
||||||
|
if (matchList.length === 0) {
|
||||||
|
setSuggestionTail("");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
const index = Math.min(selectedMatch, matchList.length - 1);
|
||||||
|
const next = matchList[index];
|
||||||
|
const trimmed = pathInput.trim();
|
||||||
|
if (next.path.startsWith(trimmed)) {
|
||||||
|
setSuggestionTail(next.path.slice(trimmed.length));
|
||||||
|
} else {
|
||||||
|
setSuggestionTail("");
|
||||||
|
}
|
||||||
|
}, [matchList, selectedMatch, pathInput]);
|
||||||
|
|
||||||
|
const acceptMatch = React.useCallback(
|
||||||
|
(path: string) => {
|
||||||
|
setPathInput(path);
|
||||||
|
},
|
||||||
|
[setPathInput],
|
||||||
|
);
|
||||||
|
|
||||||
|
const acceptSelectedMatch = React.useCallback(() => {
|
||||||
|
const next = matchList[selectedMatch]?.path;
|
||||||
|
if (next) {
|
||||||
|
setPathInput(next);
|
||||||
|
}
|
||||||
|
}, [matchList, selectedMatch, setPathInput]);
|
||||||
|
|
||||||
|
const closeSuggestions = React.useCallback(() => {
|
||||||
|
setMatchList([]);
|
||||||
|
setSelectedMatch(0);
|
||||||
|
setSuggestionTail("");
|
||||||
|
setCompletionError(null);
|
||||||
|
}, []);
|
||||||
|
|
||||||
|
return {
|
||||||
|
matchList,
|
||||||
|
selectedMatch,
|
||||||
|
suggestionTail,
|
||||||
|
completionError,
|
||||||
|
currentPartial: getCurrentPartial(pathInput),
|
||||||
|
setSelectedMatch,
|
||||||
|
acceptSelectedMatch,
|
||||||
|
acceptMatch,
|
||||||
|
closeSuggestions,
|
||||||
|
};
|
||||||
|
}
|
||||||
275
frontend/src/hooks/useChatHistory.test.ts
Normal file
275
frontend/src/hooks/useChatHistory.test.ts
Normal file
@@ -0,0 +1,275 @@
|
|||||||
|
import { act, renderHook } from "@testing-library/react";
|
||||||
|
import { afterEach, beforeEach, describe, expect, it, vi } from "vitest";
|
||||||
|
import type { Message } from "../types";
|
||||||
|
import { useChatHistory } from "./useChatHistory";
|
||||||
|
|
||||||
|
const PROJECT = "/tmp/test-project";
|
||||||
|
const STORAGE_KEY = `storykit-chat-history:${PROJECT}`;
|
||||||
|
const LIMIT_KEY = `storykit-chat-history-limit:${PROJECT}`;
|
||||||
|
|
||||||
|
const sampleMessages: Message[] = [
|
||||||
|
{ role: "user", content: "Hello" },
|
||||||
|
{ role: "assistant", content: "Hi there!" },
|
||||||
|
];
|
||||||
|
|
||||||
|
function makeMessages(count: number): Message[] {
|
||||||
|
return Array.from({ length: count }, (_, i) => ({
|
||||||
|
role: "user" as const,
|
||||||
|
content: `Message ${i + 1}`,
|
||||||
|
}));
|
||||||
|
}
|
||||||
|
|
||||||
|
describe("useChatHistory", () => {
|
||||||
|
beforeEach(() => {
|
||||||
|
localStorage.clear();
|
||||||
|
});
|
||||||
|
|
||||||
|
afterEach(() => {
|
||||||
|
localStorage.clear();
|
||||||
|
});
|
||||||
|
|
||||||
|
it("AC1: restores messages from localStorage on mount", () => {
|
||||||
|
localStorage.setItem(STORAGE_KEY, JSON.stringify(sampleMessages));
|
||||||
|
|
||||||
|
const { result } = renderHook(() => useChatHistory(PROJECT));
|
||||||
|
|
||||||
|
expect(result.current.messages).toEqual(sampleMessages);
|
||||||
|
});
|
||||||
|
|
||||||
|
it("AC1: returns empty array when localStorage has no data", () => {
|
||||||
|
const { result } = renderHook(() => useChatHistory(PROJECT));
|
||||||
|
|
||||||
|
expect(result.current.messages).toEqual([]);
|
||||||
|
});
|
||||||
|
|
||||||
|
it("AC1: returns empty array when localStorage contains invalid JSON", () => {
|
||||||
|
localStorage.setItem(STORAGE_KEY, "not-json{{{");
|
||||||
|
|
||||||
|
const { result } = renderHook(() => useChatHistory(PROJECT));
|
||||||
|
|
||||||
|
expect(result.current.messages).toEqual([]);
|
||||||
|
});
|
||||||
|
|
||||||
|
it("AC1: returns empty array when localStorage contains a non-array", () => {
|
||||||
|
localStorage.setItem(STORAGE_KEY, JSON.stringify({ not: "array" }));
|
||||||
|
|
||||||
|
const { result } = renderHook(() => useChatHistory(PROJECT));
|
||||||
|
|
||||||
|
expect(result.current.messages).toEqual([]);
|
||||||
|
});
|
||||||
|
|
||||||
|
it("AC2: saves messages to localStorage when setMessages is called with an array", () => {
|
||||||
|
const { result } = renderHook(() => useChatHistory(PROJECT));
|
||||||
|
|
||||||
|
act(() => {
|
||||||
|
result.current.setMessages(sampleMessages);
|
||||||
|
});
|
||||||
|
|
||||||
|
const stored = JSON.parse(localStorage.getItem(STORAGE_KEY) ?? "[]");
|
||||||
|
expect(stored).toEqual(sampleMessages);
|
||||||
|
});
|
||||||
|
|
||||||
|
it("AC2: saves messages to localStorage when setMessages is called with updater function", () => {
|
||||||
|
const { result } = renderHook(() => useChatHistory(PROJECT));
|
||||||
|
|
||||||
|
act(() => {
|
||||||
|
result.current.setMessages(() => sampleMessages);
|
||||||
|
});
|
||||||
|
|
||||||
|
const stored = JSON.parse(localStorage.getItem(STORAGE_KEY) ?? "[]");
|
||||||
|
expect(stored).toEqual(sampleMessages);
|
||||||
|
});
|
||||||
|
|
||||||
|
it("AC3: clearMessages removes messages from state and localStorage", () => {
|
||||||
|
localStorage.setItem(STORAGE_KEY, JSON.stringify(sampleMessages));
|
||||||
|
|
||||||
|
const { result } = renderHook(() => useChatHistory(PROJECT));
|
||||||
|
|
||||||
|
expect(result.current.messages).toEqual(sampleMessages);
|
||||||
|
|
||||||
|
act(() => {
|
||||||
|
result.current.clearMessages();
|
||||||
|
});
|
||||||
|
|
||||||
|
expect(result.current.messages).toEqual([]);
|
||||||
|
expect(localStorage.getItem(STORAGE_KEY)).toBeNull();
|
||||||
|
});
|
||||||
|
|
||||||
|
it("AC4: handles localStorage quota errors gracefully", () => {
|
||||||
|
const warnSpy = vi.spyOn(console, "warn").mockImplementation(() => {});
|
||||||
|
const setItemSpy = vi
|
||||||
|
.spyOn(Storage.prototype, "setItem")
|
||||||
|
.mockImplementation(() => {
|
||||||
|
throw new DOMException("QuotaExceededError");
|
||||||
|
});
|
||||||
|
|
||||||
|
const { result } = renderHook(() => useChatHistory(PROJECT));
|
||||||
|
|
||||||
|
// Should not throw
|
||||||
|
act(() => {
|
||||||
|
result.current.setMessages(sampleMessages);
|
||||||
|
});
|
||||||
|
|
||||||
|
// State should still update even though storage failed
|
||||||
|
expect(result.current.messages).toEqual(sampleMessages);
|
||||||
|
expect(warnSpy).toHaveBeenCalledWith(
|
||||||
|
"Failed to persist chat history to localStorage:",
|
||||||
|
expect.any(DOMException),
|
||||||
|
);
|
||||||
|
|
||||||
|
warnSpy.mockRestore();
|
||||||
|
setItemSpy.mockRestore();
|
||||||
|
});
|
||||||
|
|
||||||
|
it("AC5: scopes storage key to project path", () => {
|
||||||
|
const projectA = "/projects/a";
|
||||||
|
const projectB = "/projects/b";
|
||||||
|
const keyA = `storykit-chat-history:${projectA}`;
|
||||||
|
const keyB = `storykit-chat-history:${projectB}`;
|
||||||
|
|
||||||
|
const messagesA: Message[] = [{ role: "user", content: "From project A" }];
|
||||||
|
const messagesB: Message[] = [{ role: "user", content: "From project B" }];
|
||||||
|
|
||||||
|
localStorage.setItem(keyA, JSON.stringify(messagesA));
|
||||||
|
localStorage.setItem(keyB, JSON.stringify(messagesB));
|
||||||
|
|
||||||
|
const { result: resultA } = renderHook(() => useChatHistory(projectA));
|
||||||
|
const { result: resultB } = renderHook(() => useChatHistory(projectB));
|
||||||
|
|
||||||
|
expect(resultA.current.messages).toEqual(messagesA);
|
||||||
|
expect(resultB.current.messages).toEqual(messagesB);
|
||||||
|
});
|
||||||
|
|
||||||
|
it("AC2: removes localStorage key when messages are set to empty array", () => {
|
||||||
|
localStorage.setItem(STORAGE_KEY, JSON.stringify(sampleMessages));
|
||||||
|
|
||||||
|
const { result } = renderHook(() => useChatHistory(PROJECT));
|
||||||
|
|
||||||
|
act(() => {
|
||||||
|
result.current.setMessages([]);
|
||||||
|
});
|
||||||
|
|
||||||
|
expect(localStorage.getItem(STORAGE_KEY)).toBeNull();
|
||||||
|
});
|
||||||
|
|
||||||
|
// --- Story 179: Chat history pruning tests ---
|
||||||
|
|
||||||
|
it("S179: default limit of 200 is applied when saving to localStorage", () => {
|
||||||
|
const { result } = renderHook(() => useChatHistory(PROJECT));
|
||||||
|
|
||||||
|
expect(result.current.maxMessages).toBe(200);
|
||||||
|
});
|
||||||
|
|
||||||
|
it("S179: messages are pruned from the front when exceeding the limit", () => {
|
||||||
|
// Set a small limit to make testing practical
|
||||||
|
localStorage.setItem(LIMIT_KEY, "3");
|
||||||
|
|
||||||
|
const { result } = renderHook(() => useChatHistory(PROJECT));
|
||||||
|
const fiveMessages = makeMessages(5);
|
||||||
|
|
||||||
|
act(() => {
|
||||||
|
result.current.setMessages(fiveMessages);
|
||||||
|
});
|
||||||
|
|
||||||
|
// localStorage should contain only the last 3 messages
|
||||||
|
const stored: Message[] = JSON.parse(
|
||||||
|
localStorage.getItem(STORAGE_KEY) ?? "[]",
|
||||||
|
);
|
||||||
|
expect(stored).toEqual(fiveMessages.slice(-3));
|
||||||
|
expect(stored).toHaveLength(3);
|
||||||
|
expect(stored[0].content).toBe("Message 3");
|
||||||
|
});
|
||||||
|
|
||||||
|
it("S179: messages under the limit are not pruned", () => {
|
||||||
|
localStorage.setItem(LIMIT_KEY, "10");
|
||||||
|
|
||||||
|
const { result } = renderHook(() => useChatHistory(PROJECT));
|
||||||
|
const threeMessages = makeMessages(3);
|
||||||
|
|
||||||
|
act(() => {
|
||||||
|
result.current.setMessages(threeMessages);
|
||||||
|
});
|
||||||
|
|
||||||
|
const stored: Message[] = JSON.parse(
|
||||||
|
localStorage.getItem(STORAGE_KEY) ?? "[]",
|
||||||
|
);
|
||||||
|
expect(stored).toEqual(threeMessages);
|
||||||
|
expect(stored).toHaveLength(3);
|
||||||
|
});
|
||||||
|
|
||||||
|
it("S179: limit is configurable via localStorage key", () => {
|
||||||
|
localStorage.setItem(LIMIT_KEY, "5");
|
||||||
|
|
||||||
|
const { result } = renderHook(() => useChatHistory(PROJECT));
|
||||||
|
|
||||||
|
expect(result.current.maxMessages).toBe(5);
|
||||||
|
});
|
||||||
|
|
||||||
|
it("S179: setMaxMessages updates the limit and persists it", () => {
|
||||||
|
const { result } = renderHook(() => useChatHistory(PROJECT));
|
||||||
|
|
||||||
|
act(() => {
|
||||||
|
result.current.setMaxMessages(50);
|
||||||
|
});
|
||||||
|
|
||||||
|
expect(result.current.maxMessages).toBe(50);
|
||||||
|
expect(localStorage.getItem(LIMIT_KEY)).toBe("50");
|
||||||
|
});
|
||||||
|
|
||||||
|
it("S179: a limit of 0 means unlimited (no pruning)", () => {
|
||||||
|
localStorage.setItem(LIMIT_KEY, "0");
|
||||||
|
|
||||||
|
const { result } = renderHook(() => useChatHistory(PROJECT));
|
||||||
|
const manyMessages = makeMessages(500);
|
||||||
|
|
||||||
|
act(() => {
|
||||||
|
result.current.setMessages(manyMessages);
|
||||||
|
});
|
||||||
|
|
||||||
|
const stored: Message[] = JSON.parse(
|
||||||
|
localStorage.getItem(STORAGE_KEY) ?? "[]",
|
||||||
|
);
|
||||||
|
expect(stored).toHaveLength(500);
|
||||||
|
expect(stored).toEqual(manyMessages);
|
||||||
|
});
|
||||||
|
|
||||||
|
it("S179: changing the limit re-prunes messages on next save", () => {
|
||||||
|
const { result } = renderHook(() => useChatHistory(PROJECT));
|
||||||
|
const tenMessages = makeMessages(10);
|
||||||
|
|
||||||
|
act(() => {
|
||||||
|
result.current.setMessages(tenMessages);
|
||||||
|
});
|
||||||
|
|
||||||
|
// All 10 saved (default limit 200 > 10)
|
||||||
|
let stored: Message[] = JSON.parse(
|
||||||
|
localStorage.getItem(STORAGE_KEY) ?? "[]",
|
||||||
|
);
|
||||||
|
expect(stored).toHaveLength(10);
|
||||||
|
|
||||||
|
// Now lower the limit — the effect re-runs and prunes
|
||||||
|
act(() => {
|
||||||
|
result.current.setMaxMessages(3);
|
||||||
|
});
|
||||||
|
|
||||||
|
stored = JSON.parse(localStorage.getItem(STORAGE_KEY) ?? "[]");
|
||||||
|
expect(stored).toHaveLength(3);
|
||||||
|
expect(stored[0].content).toBe("Message 8");
|
||||||
|
});
|
||||||
|
|
||||||
|
it("S179: invalid limit in localStorage falls back to default", () => {
|
||||||
|
localStorage.setItem(LIMIT_KEY, "not-a-number");
|
||||||
|
|
||||||
|
const { result } = renderHook(() => useChatHistory(PROJECT));
|
||||||
|
|
||||||
|
expect(result.current.maxMessages).toBe(200);
|
||||||
|
});
|
||||||
|
|
||||||
|
it("S179: negative limit in localStorage falls back to default", () => {
|
||||||
|
localStorage.setItem(LIMIT_KEY, "-5");
|
||||||
|
|
||||||
|
const { result } = renderHook(() => useChatHistory(PROJECT));
|
||||||
|
|
||||||
|
expect(result.current.maxMessages).toBe(200);
|
||||||
|
});
|
||||||
|
});
|
||||||
117
frontend/src/hooks/useChatHistory.ts
Normal file
117
frontend/src/hooks/useChatHistory.ts
Normal file
@@ -0,0 +1,117 @@
|
|||||||
|
import { useCallback, useEffect, useRef, useState } from "react";
|
||||||
|
import type { Message } from "../types";
|
||||||
|
|
||||||
|
const STORAGE_KEY_PREFIX = "storykit-chat-history:";
|
||||||
|
const LIMIT_KEY_PREFIX = "storykit-chat-history-limit:";
|
||||||
|
const DEFAULT_LIMIT = 200;
|
||||||
|
|
||||||
|
function storageKey(projectPath: string): string {
|
||||||
|
return `${STORAGE_KEY_PREFIX}${projectPath}`;
|
||||||
|
}
|
||||||
|
|
||||||
|
function limitKey(projectPath: string): string {
|
||||||
|
return `${LIMIT_KEY_PREFIX}${projectPath}`;
|
||||||
|
}
|
||||||
|
|
||||||
|
function loadLimit(projectPath: string): number {
|
||||||
|
try {
|
||||||
|
const raw = localStorage.getItem(limitKey(projectPath));
|
||||||
|
if (raw === null) return DEFAULT_LIMIT;
|
||||||
|
const parsed = Number(raw);
|
||||||
|
if (!Number.isFinite(parsed) || parsed < 0) return DEFAULT_LIMIT;
|
||||||
|
return Math.floor(parsed);
|
||||||
|
} catch {
|
||||||
|
return DEFAULT_LIMIT;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
function saveLimit(projectPath: string, limit: number): void {
|
||||||
|
try {
|
||||||
|
localStorage.setItem(limitKey(projectPath), String(limit));
|
||||||
|
} catch {
|
||||||
|
// Ignore — quota or security errors.
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
function loadMessages(projectPath: string): Message[] {
|
||||||
|
try {
|
||||||
|
const raw = localStorage.getItem(storageKey(projectPath));
|
||||||
|
if (!raw) return [];
|
||||||
|
const parsed: unknown = JSON.parse(raw);
|
||||||
|
if (!Array.isArray(parsed)) return [];
|
||||||
|
return parsed as Message[];
|
||||||
|
} catch {
|
||||||
|
return [];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
function pruneMessages(messages: Message[], limit: number): Message[] {
|
||||||
|
if (limit === 0 || messages.length <= limit) return messages;
|
||||||
|
return messages.slice(-limit);
|
||||||
|
}
|
||||||
|
|
||||||
|
function saveMessages(
|
||||||
|
projectPath: string,
|
||||||
|
messages: Message[],
|
||||||
|
limit: number,
|
||||||
|
): void {
|
||||||
|
try {
|
||||||
|
const pruned = pruneMessages(messages, limit);
|
||||||
|
if (pruned.length === 0) {
|
||||||
|
localStorage.removeItem(storageKey(projectPath));
|
||||||
|
} else {
|
||||||
|
localStorage.setItem(storageKey(projectPath), JSON.stringify(pruned));
|
||||||
|
}
|
||||||
|
} catch (e) {
|
||||||
|
console.warn("Failed to persist chat history to localStorage:", e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
export function useChatHistory(projectPath: string) {
|
||||||
|
const [messages, setMessagesState] = useState<Message[]>(() =>
|
||||||
|
loadMessages(projectPath),
|
||||||
|
);
|
||||||
|
const [maxMessages, setMaxMessagesState] = useState<number>(() =>
|
||||||
|
loadLimit(projectPath),
|
||||||
|
);
|
||||||
|
const projectPathRef = useRef(projectPath);
|
||||||
|
|
||||||
|
// Keep the ref in sync so the effect closure always has the latest path.
|
||||||
|
projectPathRef.current = projectPath;
|
||||||
|
|
||||||
|
// Persist whenever messages or limit change.
|
||||||
|
useEffect(() => {
|
||||||
|
saveMessages(projectPathRef.current, messages, maxMessages);
|
||||||
|
}, [messages, maxMessages]);
|
||||||
|
|
||||||
|
const setMessages = useCallback(
|
||||||
|
(update: Message[] | ((prev: Message[]) => Message[])) => {
|
||||||
|
setMessagesState(update);
|
||||||
|
},
|
||||||
|
[],
|
||||||
|
);
|
||||||
|
|
||||||
|
const setMaxMessages = useCallback((limit: number) => {
|
||||||
|
setMaxMessagesState(limit);
|
||||||
|
saveLimit(projectPathRef.current, limit);
|
||||||
|
}, []);
|
||||||
|
|
||||||
|
const clearMessages = useCallback(() => {
|
||||||
|
setMessagesState([]);
|
||||||
|
// Eagerly remove from storage so clearSession doesn't depend on the
|
||||||
|
// effect firing before the component unmounts or re-renders.
|
||||||
|
try {
|
||||||
|
localStorage.removeItem(storageKey(projectPathRef.current));
|
||||||
|
} catch {
|
||||||
|
// Ignore — quota or security errors.
|
||||||
|
}
|
||||||
|
}, []);
|
||||||
|
|
||||||
|
return {
|
||||||
|
messages,
|
||||||
|
setMessages,
|
||||||
|
clearMessages,
|
||||||
|
maxMessages,
|
||||||
|
setMaxMessages,
|
||||||
|
} as const;
|
||||||
|
}
|
||||||
9
frontend/src/main.tsx
Normal file
9
frontend/src/main.tsx
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
import * as React from "react";
|
||||||
|
import ReactDOM from "react-dom/client";
|
||||||
|
import App from "./App";
|
||||||
|
|
||||||
|
ReactDOM.createRoot(document.getElementById("root") as HTMLElement).render(
|
||||||
|
<React.StrictMode>
|
||||||
|
<App />
|
||||||
|
</React.StrictMode>,
|
||||||
|
);
|
||||||
20
frontend/src/setupTests.ts
Normal file
20
frontend/src/setupTests.ts
Normal file
@@ -0,0 +1,20 @@
|
|||||||
|
import "@testing-library/jest-dom";
|
||||||
|
import { beforeEach, vi } from "vitest";
|
||||||
|
|
||||||
|
// Provide a default fetch mock so components that call API endpoints on mount
|
||||||
|
// don't throw URL-parse errors in the jsdom test environment. Tests that need
|
||||||
|
// specific responses should mock the relevant `api.*` method as usual.
|
||||||
|
beforeEach(() => {
|
||||||
|
vi.stubGlobal(
|
||||||
|
"fetch",
|
||||||
|
vi.fn((input: string | URL | Request) => {
|
||||||
|
const url = typeof input === "string" ? input : input.toString();
|
||||||
|
// Endpoints that return arrays need [] not {} to avoid "not iterable" errors.
|
||||||
|
const arrayEndpoints = ["/agents", "/agents/config"];
|
||||||
|
const body = arrayEndpoints.some((ep) => url.endsWith(ep))
|
||||||
|
? JSON.stringify([])
|
||||||
|
: JSON.stringify({});
|
||||||
|
return Promise.resolve(new Response(body, { status: 200 }));
|
||||||
|
}),
|
||||||
|
);
|
||||||
|
});
|
||||||
80
frontend/src/types.ts
Normal file
80
frontend/src/types.ts
Normal file
@@ -0,0 +1,80 @@
|
|||||||
|
export type Role = "system" | "user" | "assistant" | "tool";
|
||||||
|
|
||||||
|
export interface ToolCall {
|
||||||
|
id?: string;
|
||||||
|
type: string;
|
||||||
|
function: {
|
||||||
|
name: string;
|
||||||
|
arguments: string;
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
export interface Message {
|
||||||
|
role: Role;
|
||||||
|
content: string;
|
||||||
|
tool_calls?: ToolCall[];
|
||||||
|
tool_call_id?: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
export interface ProviderConfig {
|
||||||
|
provider: string;
|
||||||
|
model: string;
|
||||||
|
base_url?: string;
|
||||||
|
enable_tools?: boolean;
|
||||||
|
session_id?: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
export interface FileEntry {
|
||||||
|
name: string;
|
||||||
|
kind: "file" | "dir";
|
||||||
|
}
|
||||||
|
|
||||||
|
export interface SearchResult {
|
||||||
|
path: string;
|
||||||
|
matches: number;
|
||||||
|
}
|
||||||
|
|
||||||
|
export interface CommandOutput {
|
||||||
|
stdout: string;
|
||||||
|
stderr: string;
|
||||||
|
exit_code: number;
|
||||||
|
}
|
||||||
|
|
||||||
|
export type WsRequest =
|
||||||
|
| {
|
||||||
|
type: "chat";
|
||||||
|
messages: Message[];
|
||||||
|
config: ProviderConfig;
|
||||||
|
}
|
||||||
|
| {
|
||||||
|
type: "cancel";
|
||||||
|
}
|
||||||
|
| {
|
||||||
|
type: "permission_response";
|
||||||
|
request_id: string;
|
||||||
|
approved: boolean;
|
||||||
|
always_allow: boolean;
|
||||||
|
};
|
||||||
|
|
||||||
|
export type WsResponse =
|
||||||
|
| { type: "token"; content: string }
|
||||||
|
| { type: "update"; messages: Message[] }
|
||||||
|
| { type: "session_id"; session_id: string }
|
||||||
|
| { type: "error"; message: string }
|
||||||
|
| {
|
||||||
|
type: "permission_request";
|
||||||
|
request_id: string;
|
||||||
|
tool_name: string;
|
||||||
|
tool_input: Record<string, unknown>;
|
||||||
|
};
|
||||||
|
|
||||||
|
// Re-export API client types for convenience
|
||||||
|
export type {
|
||||||
|
Message as ApiMessage,
|
||||||
|
ProviderConfig as ApiProviderConfig,
|
||||||
|
FileEntry as ApiFileEntry,
|
||||||
|
SearchResult as ApiSearchResult,
|
||||||
|
CommandOutput as ApiCommandOutput,
|
||||||
|
WsRequest as ApiWsRequest,
|
||||||
|
WsResponse as ApiWsResponse,
|
||||||
|
};
|
||||||
14
frontend/src/vite-env.d.ts
vendored
Normal file
14
frontend/src/vite-env.d.ts
vendored
Normal file
@@ -0,0 +1,14 @@
|
|||||||
|
/// <reference types="vite/client" />
|
||||||
|
|
||||||
|
declare global {
|
||||||
|
const __BUILD_TIME__: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
declare module "react" {
|
||||||
|
interface InputHTMLAttributes<T> {
|
||||||
|
webkitdirectory?: string;
|
||||||
|
directory?: string;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
export {};
|
||||||
16
frontend/tests/e2e/smoke.spec.ts
Normal file
16
frontend/tests/e2e/smoke.spec.ts
Normal file
@@ -0,0 +1,16 @@
|
|||||||
|
import { expect, test } from "@playwright/test";
|
||||||
|
|
||||||
|
test.describe("App boot smoke test", () => {
|
||||||
|
test("renders the app without errors", async ({ page }) => {
|
||||||
|
await page.goto("/");
|
||||||
|
|
||||||
|
// The app should render either the project selection screen or the
|
||||||
|
// workspace chat view, depending on whether a project is already open.
|
||||||
|
// We intentionally do NOT call DELETE /api/project here because that
|
||||||
|
// would nuke the live server's project_root state and break any
|
||||||
|
// background agents that depend on it.
|
||||||
|
//
|
||||||
|
// Just verify the page loads and has a visible <main> container.
|
||||||
|
await expect(page.locator("main.container")).toBeVisible();
|
||||||
|
});
|
||||||
|
});
|
||||||
24
frontend/tsconfig.json
Normal file
24
frontend/tsconfig.json
Normal file
@@ -0,0 +1,24 @@
|
|||||||
|
{
|
||||||
|
"compilerOptions": {
|
||||||
|
"target": "ES2020",
|
||||||
|
"useDefineForClassFields": true,
|
||||||
|
"lib": ["ES2020", "DOM", "DOM.Iterable"],
|
||||||
|
"module": "ESNext",
|
||||||
|
"skipLibCheck": true,
|
||||||
|
|
||||||
|
/* Bundler mode */
|
||||||
|
"moduleResolution": "bundler",
|
||||||
|
"allowImportingTsExtensions": true,
|
||||||
|
"resolveJsonModule": true,
|
||||||
|
"isolatedModules": true,
|
||||||
|
"noEmit": true,
|
||||||
|
"jsx": "react-jsx",
|
||||||
|
|
||||||
|
/* Linting */
|
||||||
|
"strict": true,
|
||||||
|
"noUnusedLocals": true,
|
||||||
|
"noUnusedParameters": true,
|
||||||
|
"noFallthroughCasesInSwitch": true
|
||||||
|
},
|
||||||
|
"include": ["src"]
|
||||||
|
}
|
||||||
51
frontend/vite.config.ts
Normal file
51
frontend/vite.config.ts
Normal file
@@ -0,0 +1,51 @@
|
|||||||
|
import react from "@vitejs/plugin-react";
|
||||||
|
import { defineConfig } from "vite";
|
||||||
|
|
||||||
|
// https://vite.dev/config/
|
||||||
|
export default defineConfig(() => {
|
||||||
|
const backendPort = Number(process.env.STORKIT_PORT || "3001");
|
||||||
|
return {
|
||||||
|
plugins: [react()],
|
||||||
|
define: {
|
||||||
|
__STORKIT_PORT__: JSON.stringify(String(backendPort)),
|
||||||
|
__BUILD_TIME__: JSON.stringify(new Date().toISOString()),
|
||||||
|
},
|
||||||
|
server: {
|
||||||
|
port: backendPort + 2172,
|
||||||
|
proxy: {
|
||||||
|
"/api": {
|
||||||
|
target: `http://127.0.0.1:${String(backendPort)}`,
|
||||||
|
timeout: 120000,
|
||||||
|
configure: (proxy) => {
|
||||||
|
proxy.on("error", (_err) => {
|
||||||
|
// Swallow proxy errors (e.g. ECONNREFUSED during backend restart)
|
||||||
|
// so the vite dev server doesn't crash.
|
||||||
|
});
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"/agents": {
|
||||||
|
target: `http://127.0.0.1:${String(backendPort)}`,
|
||||||
|
timeout: 120000,
|
||||||
|
configure: (proxy) => {
|
||||||
|
proxy.on("error", (_err) => {});
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
watch: {
|
||||||
|
ignored: [
|
||||||
|
"**/.story_kit/**",
|
||||||
|
"**/target/**",
|
||||||
|
"**/.git/**",
|
||||||
|
"**/server/**",
|
||||||
|
"**/Cargo.*",
|
||||||
|
"**/vendor/**",
|
||||||
|
"**/node_modules/**",
|
||||||
|
],
|
||||||
|
},
|
||||||
|
},
|
||||||
|
build: {
|
||||||
|
outDir: "dist",
|
||||||
|
emptyOutDir: true,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
});
|
||||||
27
frontend/vitest.config.ts
Normal file
27
frontend/vitest.config.ts
Normal file
@@ -0,0 +1,27 @@
|
|||||||
|
import react from "@vitejs/plugin-react";
|
||||||
|
import { defineConfig } from "vitest/config";
|
||||||
|
|
||||||
|
export default defineConfig({
|
||||||
|
plugins: [react()],
|
||||||
|
define: {
|
||||||
|
__BUILD_TIME__: JSON.stringify("2026-01-01T00:00:00.000Z"),
|
||||||
|
},
|
||||||
|
test: {
|
||||||
|
environment: "jsdom",
|
||||||
|
environmentOptions: {
|
||||||
|
jsdom: {
|
||||||
|
url: "http://localhost:3000",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
globals: true,
|
||||||
|
testTimeout: 10_000,
|
||||||
|
setupFiles: ["./src/setupTests.ts"],
|
||||||
|
css: true,
|
||||||
|
exclude: ["tests/e2e/**", "node_modules/**"],
|
||||||
|
coverage: {
|
||||||
|
provider: "v8",
|
||||||
|
reporter: ["text", "json-summary"],
|
||||||
|
reportsDirectory: "./coverage",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
});
|
||||||
6
package-lock.json
generated
Normal file
6
package-lock.json
generated
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
{
|
||||||
|
"name": "workspace",
|
||||||
|
"lockfileVersion": 3,
|
||||||
|
"requires": true,
|
||||||
|
"packages": {}
|
||||||
|
}
|
||||||
238
script/release
Executable file
238
script/release
Executable file
@@ -0,0 +1,238 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
set -euo pipefail
|
||||||
|
|
||||||
|
# ── Configuration ──────────────────────────────────────────────
|
||||||
|
GITEA_URL="https://code.crashlabs.io"
|
||||||
|
REPO="dave/storkit"
|
||||||
|
BINARY_NAME="storkit"
|
||||||
|
|
||||||
|
# ── Load .env if present ───────────────────────────────────────
|
||||||
|
SCRIPT_DIR="$(cd "$(dirname "$0")/.." && pwd)"
|
||||||
|
if [ -f "${SCRIPT_DIR}/.env" ]; then
|
||||||
|
set -a
|
||||||
|
source "${SCRIPT_DIR}/.env"
|
||||||
|
set +a
|
||||||
|
fi
|
||||||
|
|
||||||
|
# ── Preflight ──────────────────────────────────────────────────
|
||||||
|
if [ -z "${GITEA_TOKEN:-}" ]; then
|
||||||
|
echo "Error: GITEA_TOKEN is not set."
|
||||||
|
echo "Create a token at ${GITEA_URL}/user/settings/applications"
|
||||||
|
echo "Then add to .env: GITEA_TOKEN=your_token"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
VERSION="${1:-}"
|
||||||
|
if [ -z "$VERSION" ]; then
|
||||||
|
echo "Usage: script/release <version>"
|
||||||
|
echo "Example: script/release 0.2.0"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
TAG="v${VERSION}"
|
||||||
|
|
||||||
|
if git rev-parse "$TAG" >/dev/null 2>&1; then
|
||||||
|
echo "Error: Tag ${TAG} already exists."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# ── Bump version in Cargo.toml ────────────────────────────────
|
||||||
|
CARGO_TOML="${SCRIPT_DIR}/server/Cargo.toml"
|
||||||
|
if ! grep -q "^version = " "$CARGO_TOML"; then
|
||||||
|
echo "Error: Could not find version field in ${CARGO_TOML}"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
sed -i '' "s/^version = \".*\"/version = \"${VERSION}\"/" "$CARGO_TOML"
|
||||||
|
echo "==> Bumped ${CARGO_TOML} to ${VERSION}"
|
||||||
|
|
||||||
|
PACKAGE_JSON="${SCRIPT_DIR}/frontend/package.json"
|
||||||
|
sed -i '' "s/\"version\": \".*\"/\"version\": \"${VERSION}\"/" "$PACKAGE_JSON"
|
||||||
|
echo "==> Bumped ${PACKAGE_JSON} to ${VERSION}"
|
||||||
|
|
||||||
|
# Regenerate lock files so they stay in sync with the version bump.
|
||||||
|
CARGO_LOCK="${SCRIPT_DIR}/Cargo.lock"
|
||||||
|
(cd "${SCRIPT_DIR}/server" && cargo generate-lockfile)
|
||||||
|
echo "==> Regenerated Cargo.lock"
|
||||||
|
|
||||||
|
PACKAGE_LOCK="${SCRIPT_DIR}/frontend/package-lock.json"
|
||||||
|
(cd "${SCRIPT_DIR}/frontend" && npm install --package-lock-only --ignore-scripts --silent 2>/dev/null)
|
||||||
|
echo "==> Regenerated package-lock.json"
|
||||||
|
|
||||||
|
git add "$CARGO_TOML" "$CARGO_LOCK" "$PACKAGE_JSON" "$PACKAGE_LOCK"
|
||||||
|
git commit -m "Bump version to ${VERSION}"
|
||||||
|
|
||||||
|
if ! command -v cross >/dev/null 2>&1; then
|
||||||
|
echo "Error: 'cross' is not installed. Run: cargo install cross"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if ! docker info >/dev/null 2>&1; then
|
||||||
|
echo "Error: Docker is not running. Start Docker Desktop first."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "==> Releasing ${TAG}"
|
||||||
|
|
||||||
|
# ── Build ──────────────────────────────────────────────────────
|
||||||
|
echo "==> Building macOS (native)..."
|
||||||
|
cargo build --release
|
||||||
|
|
||||||
|
echo "==> Building Linux (static musl via cross)..."
|
||||||
|
cross build --release --target x86_64-unknown-linux-musl
|
||||||
|
|
||||||
|
# ── Package ────────────────────────────────────────────────────
|
||||||
|
DIST="target/dist"
|
||||||
|
rm -rf "$DIST"
|
||||||
|
mkdir -p "$DIST"
|
||||||
|
|
||||||
|
cp "target/release/${BINARY_NAME}" "${DIST}/${BINARY_NAME}-macos-arm64"
|
||||||
|
cp "target/x86_64-unknown-linux-musl/release/${BINARY_NAME}" "${DIST}/${BINARY_NAME}-linux-amd64"
|
||||||
|
chmod +x "${DIST}"/*
|
||||||
|
|
||||||
|
echo "==> Binaries:"
|
||||||
|
ls -lh "${DIST}"/
|
||||||
|
|
||||||
|
# ── Changelog ──────────────────────────────────────────────────
|
||||||
|
echo "==> Generating changelog..."
|
||||||
|
PREV_TAG=$(git describe --tags --abbrev=0 2>/dev/null || echo "")
|
||||||
|
if [ -n "$PREV_TAG" ]; then
|
||||||
|
LOG_RANGE="${PREV_TAG}..HEAD"
|
||||||
|
RANGE="${PREV_TAG}...${TAG}"
|
||||||
|
else
|
||||||
|
LOG_RANGE=""
|
||||||
|
RANGE="initial...${TAG}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Extract completed stories/bugs/refactors from merge commits.
|
||||||
|
# Matches both the current "storkit:" prefix and the legacy "story-kit:" prefix.
|
||||||
|
# Deduplicate (a story may have been merged more than once after reverts).
|
||||||
|
MERGE_RE="^(storkit|story-kit): merge "
|
||||||
|
if [ -n "$LOG_RANGE" ]; then
|
||||||
|
MERGED_RAW=$(git log "$LOG_RANGE" --pretty=format:"%s" --no-merges \
|
||||||
|
| grep -E "$MERGE_RE" | sed -E "s/$MERGE_RE//" | sort -u)
|
||||||
|
else
|
||||||
|
MERGED_RAW=$(git log --pretty=format:"%s" --no-merges \
|
||||||
|
| grep -E "$MERGE_RE" | sed -E "s/$MERGE_RE//" | sort -u)
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Categorise merged work items and format names.
|
||||||
|
FEATURES=""
|
||||||
|
FIXES=""
|
||||||
|
REFACTORS=""
|
||||||
|
while IFS= read -r item; do
|
||||||
|
[ -z "$item" ] && continue
|
||||||
|
# Strip the numeric prefix and type to get the human name.
|
||||||
|
name=$(echo "$item" | sed -E 's/^[0-9]+_(story|bug|refactor|spike)_//' | tr '_' ' ')
|
||||||
|
# Capitalise first letter.
|
||||||
|
name="$(echo "${name:0:1}" | tr '[:lower:]' '[:upper:]')${name:1}"
|
||||||
|
case "$item" in
|
||||||
|
*_bug_*) FIXES="${FIXES}- ${name}\n" ;;
|
||||||
|
*_refactor_*) REFACTORS="${REFACTORS}- ${name}\n" ;;
|
||||||
|
*) FEATURES="${FEATURES}- ${name}\n" ;;
|
||||||
|
esac
|
||||||
|
done <<< "$MERGED_RAW"
|
||||||
|
|
||||||
|
# Collect non-automation manual commits (direct fixes, version bumps, etc).
|
||||||
|
if [ -n "$LOG_RANGE" ]; then
|
||||||
|
MANUAL=$(git log "$LOG_RANGE" --pretty=format:"%s" --no-merges \
|
||||||
|
| grep -Ev "^(storkit|story-kit): " \
|
||||||
|
| grep -Ev "^Revert \"(storkit|story-kit): " \
|
||||||
|
| grep -v "^Bump version" \
|
||||||
|
| sed 's/^/- /')
|
||||||
|
else
|
||||||
|
MANUAL=$(git log --pretty=format:"%s" --no-merges \
|
||||||
|
| grep -Ev "^(storkit|story-kit): " \
|
||||||
|
| grep -Ev "^Revert \"(storkit|story-kit): " \
|
||||||
|
| grep -v "^Bump version" \
|
||||||
|
| sed 's/^/- /')
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Assemble the release body.
|
||||||
|
RELEASE_BODY="## What's Changed"
|
||||||
|
|
||||||
|
if [ -n "$FEATURES" ]; then
|
||||||
|
RELEASE_BODY="${RELEASE_BODY}
|
||||||
|
|
||||||
|
### Features
|
||||||
|
$(echo -e "$FEATURES")"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -n "$FIXES" ]; then
|
||||||
|
RELEASE_BODY="${RELEASE_BODY}
|
||||||
|
|
||||||
|
### Bug Fixes
|
||||||
|
$(echo -e "$FIXES")"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -n "$REFACTORS" ]; then
|
||||||
|
RELEASE_BODY="${RELEASE_BODY}
|
||||||
|
|
||||||
|
### Refactors
|
||||||
|
$(echo -e "$REFACTORS")"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -n "$MANUAL" ]; then
|
||||||
|
RELEASE_BODY="${RELEASE_BODY}
|
||||||
|
|
||||||
|
### Other Changes
|
||||||
|
${MANUAL}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -z "$FEATURES" ] && [ -z "$FIXES" ] && [ -z "$REFACTORS" ] && [ -z "$MANUAL" ]; then
|
||||||
|
RELEASE_BODY="${RELEASE_BODY}
|
||||||
|
|
||||||
|
- No changes since last release"
|
||||||
|
fi
|
||||||
|
|
||||||
|
RELEASE_BODY="${RELEASE_BODY}
|
||||||
|
|
||||||
|
**Full diff:** ${GITEA_URL}/${REPO}/compare/${RANGE}"
|
||||||
|
|
||||||
|
echo "$RELEASE_BODY"
|
||||||
|
|
||||||
|
# ── Tag & Push ─────────────────────────────────────────────────
|
||||||
|
echo "==> Tagging ${TAG}..."
|
||||||
|
git tag -a "$TAG" -m "Release ${TAG}"
|
||||||
|
git push origin "$TAG"
|
||||||
|
|
||||||
|
# ── Create Gitea Release ──────────────────────────────────────
|
||||||
|
echo "==> Creating release on Gitea..."
|
||||||
|
RELEASE_JSON_FILE=$(mktemp)
|
||||||
|
trap "rm -f '$RELEASE_JSON_FILE'" EXIT
|
||||||
|
python3 -c "
|
||||||
|
import json, sys
|
||||||
|
with open(sys.argv[3], 'w') as f:
|
||||||
|
json.dump({
|
||||||
|
'tag_name': sys.argv[1],
|
||||||
|
'name': sys.argv[1],
|
||||||
|
'body': sys.argv[2]
|
||||||
|
}, f)
|
||||||
|
" "$TAG" "$RELEASE_BODY" "$RELEASE_JSON_FILE"
|
||||||
|
|
||||||
|
RELEASE_RESPONSE=$(curl -s --fail-with-body -X POST \
|
||||||
|
-H "Authorization: token ${GITEA_TOKEN}" \
|
||||||
|
-H "Content-Type: application/json" \
|
||||||
|
"${GITEA_URL}/api/v1/repos/${REPO}/releases" \
|
||||||
|
-d "@${RELEASE_JSON_FILE}")
|
||||||
|
|
||||||
|
if [ $? -ne 0 ]; then
|
||||||
|
echo "Error: Failed to create Gitea release."
|
||||||
|
echo "Response: ${RELEASE_RESPONSE}"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
RELEASE_ID=$(echo "$RELEASE_RESPONSE" | python3 -c "import sys,json; print(json.load(sys.stdin)['id'])")
|
||||||
|
|
||||||
|
# ── Upload Binaries ───────────────────────────────────────────
|
||||||
|
for file in "${DIST}"/*; do
|
||||||
|
filename=$(basename "$file")
|
||||||
|
echo "==> Uploading ${filename}..."
|
||||||
|
curl -sf -X POST \
|
||||||
|
-H "Authorization: token ${GITEA_TOKEN}" \
|
||||||
|
-F "attachment=@${file};filename=${filename}" \
|
||||||
|
"${GITEA_URL}/api/v1/repos/${REPO}/releases/${RELEASE_ID}/assets" > /dev/null
|
||||||
|
done
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
echo "==> Done! Release ${TAG} published:"
|
||||||
|
echo " ${GITEA_URL}/${REPO}/releases/tag/${TAG}"
|
||||||
23
script/test
Executable file
23
script/test
Executable file
@@ -0,0 +1,23 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
set -euo pipefail
|
||||||
|
|
||||||
|
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
|
||||||
|
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
|
||||||
|
|
||||||
|
echo "=== Running Rust tests ==="
|
||||||
|
cargo test --manifest-path "$PROJECT_ROOT/Cargo.toml"
|
||||||
|
|
||||||
|
echo "=== Running frontend unit tests ==="
|
||||||
|
if [ -d "$PROJECT_ROOT/frontend" ]; then
|
||||||
|
cd "$PROJECT_ROOT/frontend"
|
||||||
|
npm test
|
||||||
|
else
|
||||||
|
echo "Skipping frontend tests (no frontend directory)"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Disabled: e2e tests may be causing merge pipeline hangs (no running server
|
||||||
|
# in merge workspace → Playwright blocks indefinitely). Re-enable once confirmed.
|
||||||
|
# Disabled: e2e tests cause merge pipeline hangs (no running server
|
||||||
|
# in merge workspace → Playwright blocks indefinitely).
|
||||||
|
# echo "=== Running e2e tests ==="
|
||||||
|
# npm run test:e2e
|
||||||
120
script/test_coverage
Executable file
120
script/test_coverage
Executable file
@@ -0,0 +1,120 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
# Test coverage collection and threshold enforcement.
|
||||||
|
#
|
||||||
|
# Runs Rust tests with llvm-cov and frontend tests with vitest --coverage.
|
||||||
|
# Reports line coverage percentages for each.
|
||||||
|
#
|
||||||
|
# Threshold: reads from COVERAGE_THRESHOLD env var, or .coverage_baseline file.
|
||||||
|
# Default: 0% (any coverage passes; baseline is written on first run).
|
||||||
|
#
|
||||||
|
# Coverage can only go up: if current coverage is above the stored baseline,
|
||||||
|
# the baseline is updated automatically.
|
||||||
|
#
|
||||||
|
# Exit codes:
|
||||||
|
# 0 — all coverage at or above threshold
|
||||||
|
# 1 — coverage below threshold
|
||||||
|
|
||||||
|
set -uo pipefail
|
||||||
|
|
||||||
|
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
|
||||||
|
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
|
||||||
|
BASELINE_FILE="$PROJECT_ROOT/.coverage_baseline"
|
||||||
|
|
||||||
|
# ── Load threshold ────────────────────────────────────────────────────────────
|
||||||
|
if [ -n "${COVERAGE_THRESHOLD:-}" ]; then
|
||||||
|
THRESHOLD="$COVERAGE_THRESHOLD"
|
||||||
|
elif [ -f "$BASELINE_FILE" ]; then
|
||||||
|
THRESHOLD=$(cat "$BASELINE_FILE")
|
||||||
|
else
|
||||||
|
THRESHOLD=0
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "=== Coverage threshold: ${THRESHOLD}% ==="
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
PASS=true
|
||||||
|
RUST_LINE_COV=0
|
||||||
|
FRONTEND_LINE_COV=0
|
||||||
|
|
||||||
|
# ── Rust coverage ─────────────────────────────────────────────────────────────
|
||||||
|
echo "=== Running Rust tests with coverage ==="
|
||||||
|
RUST_REPORT=""
|
||||||
|
if cargo llvm-cov --version >/dev/null 2>&1; then
|
||||||
|
RUST_REPORT=$(cargo llvm-cov \
|
||||||
|
--manifest-path "$PROJECT_ROOT/Cargo.toml" \
|
||||||
|
--summary-only \
|
||||||
|
2>&1) || true
|
||||||
|
echo "$RUST_REPORT"
|
||||||
|
|
||||||
|
# Parse the TOTAL line: columns are space-separated with % on coverage cols.
|
||||||
|
# Format: TOTAL <regions> <missed> <cover%> <funcs> <missed> <exec%> <lines> <missed> <cover%> ...
|
||||||
|
# We want field 10 (lines cover %).
|
||||||
|
RUST_RAW=$(echo "$RUST_REPORT" | awk '/^TOTAL/ { print $10 }' | tr -d '%')
|
||||||
|
if [ -n "$RUST_RAW" ]; then
|
||||||
|
RUST_LINE_COV="$RUST_RAW"
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
echo "cargo-llvm-cov not available; skipping Rust coverage"
|
||||||
|
fi
|
||||||
|
echo "Rust line coverage: ${RUST_LINE_COV}%"
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
# ── Frontend coverage ─────────────────────────────────────────────────────────
|
||||||
|
echo "=== Running frontend tests with coverage ==="
|
||||||
|
FRONTEND_DIR="$PROJECT_ROOT/frontend"
|
||||||
|
FRONTEND_LINE_COV=0
|
||||||
|
if [ -d "$FRONTEND_DIR" ]; then
|
||||||
|
FRONTEND_REPORT=$(cd "$FRONTEND_DIR" && npm run test:coverage 2>&1) || true
|
||||||
|
echo "$FRONTEND_REPORT"
|
||||||
|
|
||||||
|
# Parse "All files" line from vitest coverage text table.
|
||||||
|
# Format: All files | % Stmts | % Branch | % Funcs | % Lines | ...
|
||||||
|
FRONTEND_RAW=$(echo "$FRONTEND_REPORT" | awk -F'|' '/All files/ { gsub(/ /, "", $5); print $5 }' | head -1)
|
||||||
|
if [ -n "$FRONTEND_RAW" ]; then
|
||||||
|
FRONTEND_LINE_COV="$FRONTEND_RAW"
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
echo "No frontend/ directory found; skipping frontend coverage"
|
||||||
|
fi
|
||||||
|
echo "Frontend line coverage: ${FRONTEND_LINE_COV}%"
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
# ── Overall (average of available measurements) ───────────────────────────────
|
||||||
|
if [ "$RUST_LINE_COV" != "0" ] && [ "$FRONTEND_LINE_COV" != "0" ]; then
|
||||||
|
OVERALL=$(awk "BEGIN { printf \"%.1f\", ($RUST_LINE_COV + $FRONTEND_LINE_COV) / 2 }")
|
||||||
|
elif [ "$RUST_LINE_COV" != "0" ]; then
|
||||||
|
OVERALL="$RUST_LINE_COV"
|
||||||
|
elif [ "$FRONTEND_LINE_COV" != "0" ]; then
|
||||||
|
OVERALL="$FRONTEND_LINE_COV"
|
||||||
|
else
|
||||||
|
OVERALL=0
|
||||||
|
fi
|
||||||
|
|
||||||
|
# ── Summary ───────────────────────────────────────────────────────────────────
|
||||||
|
echo "=== Coverage Summary ==="
|
||||||
|
echo " Rust: ${RUST_LINE_COV}%"
|
||||||
|
echo " Frontend: ${FRONTEND_LINE_COV}%"
|
||||||
|
echo " Overall: ${OVERALL}%"
|
||||||
|
echo " Threshold: ${THRESHOLD}%"
|
||||||
|
echo ""
|
||||||
|
|
||||||
|
# ── Threshold check ───────────────────────────────────────────────────────────
|
||||||
|
if awk "BEGIN { exit (($OVERALL + 0) < ($THRESHOLD + 0)) ? 0 : 1 }"; then
|
||||||
|
echo "FAIL: Coverage ${OVERALL}% is below threshold ${THRESHOLD}%"
|
||||||
|
PASS=false
|
||||||
|
else
|
||||||
|
echo "PASS: Coverage ${OVERALL}% meets threshold ${THRESHOLD}%"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# ── Update baseline when coverage improves ────────────────────────────────────
|
||||||
|
if [ "$PASS" = "true" ]; then
|
||||||
|
STORED_BASELINE="${THRESHOLD}"
|
||||||
|
if awk "BEGIN { exit (($OVERALL + 0) > ($STORED_BASELINE + 0)) ? 0 : 1 }"; then
|
||||||
|
echo "${OVERALL}" > "$BASELINE_FILE"
|
||||||
|
echo "Baseline updated: ${STORED_BASELINE}% → ${OVERALL}%"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ "$PASS" = "false" ]; then
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
3022
server/Cargo.lock
generated
Normal file
3022
server/Cargo.lock
generated
Normal file
File diff suppressed because it is too large
Load Diff
43
server/Cargo.toml
Normal file
43
server/Cargo.toml
Normal file
@@ -0,0 +1,43 @@
|
|||||||
|
[package]
|
||||||
|
name = "storkit"
|
||||||
|
version = "0.4.1"
|
||||||
|
edition = "2024"
|
||||||
|
build = "build.rs"
|
||||||
|
|
||||||
|
[dependencies]
|
||||||
|
async-stream = { workspace = true }
|
||||||
|
async-trait = { workspace = true }
|
||||||
|
bytes = { workspace = true }
|
||||||
|
chrono = { workspace = true, features = ["serde"] }
|
||||||
|
eventsource-stream = { workspace = true }
|
||||||
|
futures = { workspace = true }
|
||||||
|
homedir = { workspace = true }
|
||||||
|
ignore = { workspace = true }
|
||||||
|
mime_guess = { workspace = true }
|
||||||
|
notify = { workspace = true }
|
||||||
|
poem = { workspace = true, features = ["websocket"] }
|
||||||
|
poem-openapi = { workspace = true, features = ["swagger-ui"] }
|
||||||
|
portable-pty = { workspace = true }
|
||||||
|
reqwest = { workspace = true, features = ["json", "stream"] }
|
||||||
|
rust-embed = { workspace = true }
|
||||||
|
serde = { workspace = true, features = ["derive"] }
|
||||||
|
serde_json = { workspace = true }
|
||||||
|
serde_urlencoded = { workspace = true }
|
||||||
|
serde_yaml = { workspace = true }
|
||||||
|
strip-ansi-escapes = { workspace = true }
|
||||||
|
tokio = { workspace = true, features = ["rt-multi-thread", "macros", "sync", "process"] }
|
||||||
|
toml = { workspace = true }
|
||||||
|
uuid = { workspace = true, features = ["v4", "serde"] }
|
||||||
|
walkdir = { workspace = true }
|
||||||
|
matrix-sdk = { workspace = true }
|
||||||
|
pulldown-cmark = { workspace = true }
|
||||||
|
|
||||||
|
# Force bundled SQLite so static musl builds don't need a system libsqlite3
|
||||||
|
libsqlite3-sys = { version = "0.35.0", features = ["bundled"] }
|
||||||
|
wait-timeout = "0.2.1"
|
||||||
|
|
||||||
|
[dev-dependencies]
|
||||||
|
tempfile = { workspace = true }
|
||||||
|
tokio-tungstenite = { workspace = true }
|
||||||
|
mockito = "1"
|
||||||
|
filetime = { workspace = true }
|
||||||
50
server/build.rs
Normal file
50
server/build.rs
Normal file
@@ -0,0 +1,50 @@
|
|||||||
|
use std::env;
|
||||||
|
use std::path::Path;
|
||||||
|
use std::process::Command;
|
||||||
|
|
||||||
|
fn run(cmd: &str, args: &[&str], dir: &Path) {
|
||||||
|
let status = Command::new(cmd)
|
||||||
|
.args(args)
|
||||||
|
.current_dir(dir)
|
||||||
|
.status()
|
||||||
|
.unwrap_or_else(|e| panic!("Failed to run {} {:?}: {}", cmd, args, e));
|
||||||
|
|
||||||
|
if !status.success() {
|
||||||
|
panic!("Command failed: {} {:?}", cmd, args);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn main() {
|
||||||
|
println!("cargo:rerun-if-changed=build.rs");
|
||||||
|
println!("cargo:rerun-if-env-changed=PROFILE");
|
||||||
|
println!("cargo:rerun-if-changed=../frontend/package.json");
|
||||||
|
println!("cargo:rerun-if-changed=../frontend/package-lock.json");
|
||||||
|
println!("cargo:rerun-if-changed=../frontend/vite.config.ts");
|
||||||
|
println!("cargo:rerun-if-changed=../frontend/index.html");
|
||||||
|
println!("cargo:rerun-if-changed=../frontend/src");
|
||||||
|
println!("cargo:rerun-if-changed=../frontend/public");
|
||||||
|
|
||||||
|
let profile = env::var("PROFILE").unwrap_or_default();
|
||||||
|
if profile != "release" {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// When cross-compiling (e.g. musl via `cross`), the Docker container
|
||||||
|
// has no Node/npm. The release script builds macOS first, so
|
||||||
|
// frontend/dist/ already exists. Skip the frontend build in that case.
|
||||||
|
let target = env::var("TARGET").unwrap_or_default();
|
||||||
|
let host = env::var("HOST").unwrap_or_default();
|
||||||
|
if target != host {
|
||||||
|
let dist = Path::new("../frontend/dist");
|
||||||
|
if !dist.exists() {
|
||||||
|
panic!("Cross-compiling but frontend/dist/ is missing. Build macOS first.");
|
||||||
|
}
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
let frontend_dir = Path::new("../frontend");
|
||||||
|
|
||||||
|
// Ensure dependencies are installed and build the frontend bundle.
|
||||||
|
run("npm", &["install"], frontend_dir);
|
||||||
|
run("npm", &["run", "build"], frontend_dir);
|
||||||
|
}
|
||||||
377
server/src/agent_log.rs
Normal file
377
server/src/agent_log.rs
Normal file
@@ -0,0 +1,377 @@
|
|||||||
|
use crate::agents::AgentEvent;
|
||||||
|
use chrono::Utc;
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
use std::fs::{self, File, OpenOptions};
|
||||||
|
use std::io::{BufRead, BufReader, Write};
|
||||||
|
use std::path::{Path, PathBuf};
|
||||||
|
|
||||||
|
|
||||||
|
/// A single line in the agent log file (JSONL format).
|
||||||
|
#[derive(Debug, Serialize, Deserialize)]
|
||||||
|
pub struct LogEntry {
|
||||||
|
pub timestamp: String,
|
||||||
|
#[serde(flatten)]
|
||||||
|
pub event: serde_json::Value,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Writes agent events to a persistent log file (JSONL format).
|
||||||
|
///
|
||||||
|
/// Each agent session gets its own log file at:
|
||||||
|
/// `.storkit/logs/{story_id}/{agent_name}-{session_id}.log`
|
||||||
|
pub struct AgentLogWriter {
|
||||||
|
file: File,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl AgentLogWriter {
|
||||||
|
/// Create a new log writer, creating the directory structure as needed.
|
||||||
|
///
|
||||||
|
/// The log file is opened in append mode so that a restart mid-session
|
||||||
|
/// won't overwrite earlier output.
|
||||||
|
pub fn new(
|
||||||
|
project_root: &Path,
|
||||||
|
story_id: &str,
|
||||||
|
agent_name: &str,
|
||||||
|
session_id: &str,
|
||||||
|
) -> Result<Self, String> {
|
||||||
|
let dir = log_dir(project_root, story_id);
|
||||||
|
fs::create_dir_all(&dir)
|
||||||
|
.map_err(|e| format!("Failed to create log directory {}: {e}", dir.display()))?;
|
||||||
|
|
||||||
|
let path = dir.join(format!("{agent_name}-{session_id}.log"));
|
||||||
|
let file = OpenOptions::new()
|
||||||
|
.create(true)
|
||||||
|
.append(true)
|
||||||
|
.open(&path)
|
||||||
|
.map_err(|e| format!("Failed to open log file {}: {e}", path.display()))?;
|
||||||
|
|
||||||
|
Ok(Self { file })
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Write an agent event as a JSONL line with an ISO 8601 timestamp.
|
||||||
|
pub fn write_event(&mut self, event: &AgentEvent) -> Result<(), String> {
|
||||||
|
let event_value =
|
||||||
|
serde_json::to_value(event).map_err(|e| format!("Failed to serialize event: {e}"))?;
|
||||||
|
|
||||||
|
let entry = LogEntry {
|
||||||
|
timestamp: Utc::now().to_rfc3339(),
|
||||||
|
event: event_value,
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut line =
|
||||||
|
serde_json::to_string(&entry).map_err(|e| format!("Failed to serialize entry: {e}"))?;
|
||||||
|
line.push('\n');
|
||||||
|
|
||||||
|
self.file
|
||||||
|
.write_all(line.as_bytes())
|
||||||
|
.map_err(|e| format!("Failed to write log entry: {e}"))?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Return the log directory for a story.
|
||||||
|
fn log_dir(project_root: &Path, story_id: &str) -> PathBuf {
|
||||||
|
project_root
|
||||||
|
.join(".storkit")
|
||||||
|
.join("logs")
|
||||||
|
.join(story_id)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Return the path to a specific log file.
|
||||||
|
pub fn log_file_path(
|
||||||
|
project_root: &Path,
|
||||||
|
story_id: &str,
|
||||||
|
agent_name: &str,
|
||||||
|
session_id: &str,
|
||||||
|
) -> PathBuf {
|
||||||
|
log_dir(project_root, story_id).join(format!("{agent_name}-{session_id}.log"))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Read all log entries from a log file.
|
||||||
|
pub fn read_log(path: &Path) -> Result<Vec<LogEntry>, String> {
|
||||||
|
let file =
|
||||||
|
File::open(path).map_err(|e| format!("Failed to open log file {}: {e}", path.display()))?;
|
||||||
|
let reader = BufReader::new(file);
|
||||||
|
let mut entries = Vec::new();
|
||||||
|
|
||||||
|
for line in reader.lines() {
|
||||||
|
let line = line.map_err(|e| format!("Failed to read log line: {e}"))?;
|
||||||
|
let trimmed = line.trim();
|
||||||
|
if trimmed.is_empty() {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
let entry: LogEntry = serde_json::from_str(trimmed)
|
||||||
|
.map_err(|e| format!("Failed to parse log entry: {e}"))?;
|
||||||
|
entries.push(entry);
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(entries)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Find the most recent log file for a given story+agent combination.
|
||||||
|
///
|
||||||
|
/// Scans `.storkit/logs/{story_id}/` for files matching `{agent_name}-*.log`
|
||||||
|
/// and returns the one with the most recent modification time.
|
||||||
|
pub fn find_latest_log(
|
||||||
|
project_root: &Path,
|
||||||
|
story_id: &str,
|
||||||
|
agent_name: &str,
|
||||||
|
) -> Option<PathBuf> {
|
||||||
|
let dir = log_dir(project_root, story_id);
|
||||||
|
if !dir.is_dir() {
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
|
||||||
|
let prefix = format!("{agent_name}-");
|
||||||
|
let mut best: Option<(PathBuf, std::time::SystemTime)> = None;
|
||||||
|
|
||||||
|
let entries = fs::read_dir(&dir).ok()?;
|
||||||
|
for entry in entries.flatten() {
|
||||||
|
let path = entry.path();
|
||||||
|
let name = match path.file_name().and_then(|n| n.to_str()) {
|
||||||
|
Some(n) => n.to_string(),
|
||||||
|
None => continue,
|
||||||
|
};
|
||||||
|
if !name.starts_with(&prefix) || !name.ends_with(".log") {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
let modified = match entry.metadata().and_then(|m| m.modified()) {
|
||||||
|
Ok(t) => t,
|
||||||
|
Err(_) => continue,
|
||||||
|
};
|
||||||
|
if best.as_ref().is_none_or(|(_, t)| modified > *t) {
|
||||||
|
best = Some((path, modified));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
best.map(|(p, _)| p)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
use crate::agents::AgentEvent;
|
||||||
|
use tempfile::tempdir;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_log_writer_creates_directory_and_file() {
|
||||||
|
let tmp = tempdir().unwrap();
|
||||||
|
let root = tmp.path();
|
||||||
|
|
||||||
|
let _writer =
|
||||||
|
AgentLogWriter::new(root, "42_story_foo", "coder-1", "sess-abc123").unwrap();
|
||||||
|
|
||||||
|
let expected_path = root
|
||||||
|
.join(".storkit")
|
||||||
|
.join("logs")
|
||||||
|
.join("42_story_foo")
|
||||||
|
.join("coder-1-sess-abc123.log");
|
||||||
|
assert!(expected_path.exists(), "Log file should exist");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_log_writer_writes_jsonl_with_timestamps() {
|
||||||
|
let tmp = tempdir().unwrap();
|
||||||
|
let root = tmp.path();
|
||||||
|
|
||||||
|
let mut writer =
|
||||||
|
AgentLogWriter::new(root, "42_story_foo", "coder-1", "sess-001").unwrap();
|
||||||
|
|
||||||
|
let event = AgentEvent::Status {
|
||||||
|
story_id: "42_story_foo".to_string(),
|
||||||
|
agent_name: "coder-1".to_string(),
|
||||||
|
status: "running".to_string(),
|
||||||
|
};
|
||||||
|
writer.write_event(&event).unwrap();
|
||||||
|
|
||||||
|
let event2 = AgentEvent::Output {
|
||||||
|
story_id: "42_story_foo".to_string(),
|
||||||
|
agent_name: "coder-1".to_string(),
|
||||||
|
text: "Hello world".to_string(),
|
||||||
|
};
|
||||||
|
writer.write_event(&event2).unwrap();
|
||||||
|
|
||||||
|
// Read the file and verify
|
||||||
|
let path = log_file_path(root, "42_story_foo", "coder-1", "sess-001");
|
||||||
|
let content = fs::read_to_string(&path).unwrap();
|
||||||
|
let lines: Vec<&str> = content.lines().collect();
|
||||||
|
assert_eq!(lines.len(), 2, "Should have 2 log lines");
|
||||||
|
|
||||||
|
// Parse each line as valid JSON with a timestamp
|
||||||
|
for line in &lines {
|
||||||
|
let entry: LogEntry = serde_json::from_str(line).unwrap();
|
||||||
|
assert!(!entry.timestamp.is_empty(), "Timestamp should be present");
|
||||||
|
// Verify it's a valid ISO 8601 timestamp
|
||||||
|
chrono::DateTime::parse_from_rfc3339(&entry.timestamp)
|
||||||
|
.expect("Timestamp should be valid RFC3339");
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify the first entry is a status event
|
||||||
|
let entry1: LogEntry = serde_json::from_str(lines[0]).unwrap();
|
||||||
|
assert_eq!(entry1.event["type"], "status");
|
||||||
|
assert_eq!(entry1.event["status"], "running");
|
||||||
|
|
||||||
|
// Verify the second entry is an output event
|
||||||
|
let entry2: LogEntry = serde_json::from_str(lines[1]).unwrap();
|
||||||
|
assert_eq!(entry2.event["type"], "output");
|
||||||
|
assert_eq!(entry2.event["text"], "Hello world");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_read_log_parses_written_events() {
|
||||||
|
let tmp = tempdir().unwrap();
|
||||||
|
let root = tmp.path();
|
||||||
|
|
||||||
|
let mut writer =
|
||||||
|
AgentLogWriter::new(root, "42_story_foo", "coder-1", "sess-002").unwrap();
|
||||||
|
|
||||||
|
let events = vec![
|
||||||
|
AgentEvent::Status {
|
||||||
|
story_id: "42_story_foo".to_string(),
|
||||||
|
agent_name: "coder-1".to_string(),
|
||||||
|
status: "running".to_string(),
|
||||||
|
},
|
||||||
|
AgentEvent::Output {
|
||||||
|
story_id: "42_story_foo".to_string(),
|
||||||
|
agent_name: "coder-1".to_string(),
|
||||||
|
text: "Processing...".to_string(),
|
||||||
|
},
|
||||||
|
AgentEvent::AgentJson {
|
||||||
|
story_id: "42_story_foo".to_string(),
|
||||||
|
agent_name: "coder-1".to_string(),
|
||||||
|
data: serde_json::json!({"type": "tool_use", "name": "read_file"}),
|
||||||
|
},
|
||||||
|
AgentEvent::Done {
|
||||||
|
story_id: "42_story_foo".to_string(),
|
||||||
|
agent_name: "coder-1".to_string(),
|
||||||
|
session_id: Some("sess-002".to_string()),
|
||||||
|
},
|
||||||
|
];
|
||||||
|
|
||||||
|
for event in &events {
|
||||||
|
writer.write_event(event).unwrap();
|
||||||
|
}
|
||||||
|
|
||||||
|
let path = log_file_path(root, "42_story_foo", "coder-1", "sess-002");
|
||||||
|
let entries = read_log(&path).unwrap();
|
||||||
|
assert_eq!(entries.len(), 4, "Should read back all 4 events");
|
||||||
|
|
||||||
|
// Verify event types round-trip correctly
|
||||||
|
assert_eq!(entries[0].event["type"], "status");
|
||||||
|
assert_eq!(entries[1].event["type"], "output");
|
||||||
|
assert_eq!(entries[2].event["type"], "agent_json");
|
||||||
|
assert_eq!(entries[3].event["type"], "done");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_separate_sessions_produce_separate_files() {
|
||||||
|
let tmp = tempdir().unwrap();
|
||||||
|
let root = tmp.path();
|
||||||
|
|
||||||
|
let mut writer1 =
|
||||||
|
AgentLogWriter::new(root, "42_story_foo", "coder-1", "sess-aaa").unwrap();
|
||||||
|
let mut writer2 =
|
||||||
|
AgentLogWriter::new(root, "42_story_foo", "coder-1", "sess-bbb").unwrap();
|
||||||
|
|
||||||
|
writer1
|
||||||
|
.write_event(&AgentEvent::Output {
|
||||||
|
story_id: "42_story_foo".to_string(),
|
||||||
|
agent_name: "coder-1".to_string(),
|
||||||
|
text: "from session aaa".to_string(),
|
||||||
|
})
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
writer2
|
||||||
|
.write_event(&AgentEvent::Output {
|
||||||
|
story_id: "42_story_foo".to_string(),
|
||||||
|
agent_name: "coder-1".to_string(),
|
||||||
|
text: "from session bbb".to_string(),
|
||||||
|
})
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let path1 = log_file_path(root, "42_story_foo", "coder-1", "sess-aaa");
|
||||||
|
let path2 = log_file_path(root, "42_story_foo", "coder-1", "sess-bbb");
|
||||||
|
|
||||||
|
assert_ne!(path1, path2, "Different sessions should use different files");
|
||||||
|
|
||||||
|
let entries1 = read_log(&path1).unwrap();
|
||||||
|
let entries2 = read_log(&path2).unwrap();
|
||||||
|
|
||||||
|
assert_eq!(entries1.len(), 1);
|
||||||
|
assert_eq!(entries2.len(), 1);
|
||||||
|
assert_eq!(entries1[0].event["text"], "from session aaa");
|
||||||
|
assert_eq!(entries2[0].event["text"], "from session bbb");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_find_latest_log_returns_most_recent() {
|
||||||
|
let tmp = tempdir().unwrap();
|
||||||
|
let root = tmp.path();
|
||||||
|
|
||||||
|
// Create two log files with a small delay
|
||||||
|
let mut writer1 =
|
||||||
|
AgentLogWriter::new(root, "42_story_foo", "coder-1", "sess-old").unwrap();
|
||||||
|
writer1
|
||||||
|
.write_event(&AgentEvent::Output {
|
||||||
|
story_id: "42_story_foo".to_string(),
|
||||||
|
agent_name: "coder-1".to_string(),
|
||||||
|
text: "old".to_string(),
|
||||||
|
})
|
||||||
|
.unwrap();
|
||||||
|
drop(writer1);
|
||||||
|
|
||||||
|
// Touch the second file to ensure it's newer
|
||||||
|
std::thread::sleep(std::time::Duration::from_millis(50));
|
||||||
|
|
||||||
|
let mut writer2 =
|
||||||
|
AgentLogWriter::new(root, "42_story_foo", "coder-1", "sess-new").unwrap();
|
||||||
|
writer2
|
||||||
|
.write_event(&AgentEvent::Output {
|
||||||
|
story_id: "42_story_foo".to_string(),
|
||||||
|
agent_name: "coder-1".to_string(),
|
||||||
|
text: "new".to_string(),
|
||||||
|
})
|
||||||
|
.unwrap();
|
||||||
|
drop(writer2);
|
||||||
|
|
||||||
|
let latest = find_latest_log(root, "42_story_foo", "coder-1").unwrap();
|
||||||
|
assert!(
|
||||||
|
latest.to_string_lossy().contains("sess-new"),
|
||||||
|
"Should find the newest log file, got: {}",
|
||||||
|
latest.display()
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_find_latest_log_returns_none_for_missing_dir() {
|
||||||
|
let tmp = tempdir().unwrap();
|
||||||
|
let result = find_latest_log(tmp.path(), "nonexistent", "coder-1");
|
||||||
|
assert!(result.is_none());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_log_files_persist_on_disk() {
|
||||||
|
let tmp = tempdir().unwrap();
|
||||||
|
let root = tmp.path();
|
||||||
|
|
||||||
|
let path = {
|
||||||
|
let mut writer =
|
||||||
|
AgentLogWriter::new(root, "42_story_foo", "coder-1", "sess-persist").unwrap();
|
||||||
|
writer
|
||||||
|
.write_event(&AgentEvent::Status {
|
||||||
|
story_id: "42_story_foo".to_string(),
|
||||||
|
agent_name: "coder-1".to_string(),
|
||||||
|
status: "running".to_string(),
|
||||||
|
})
|
||||||
|
.unwrap();
|
||||||
|
log_file_path(root, "42_story_foo", "coder-1", "sess-persist")
|
||||||
|
// writer is dropped here
|
||||||
|
};
|
||||||
|
|
||||||
|
// File should still exist and be readable
|
||||||
|
assert!(path.exists(), "Log file should persist after writer is dropped");
|
||||||
|
let entries = read_log(&path).unwrap();
|
||||||
|
assert_eq!(entries.len(), 1);
|
||||||
|
assert_eq!(entries[0].event["type"], "status");
|
||||||
|
}
|
||||||
|
}
|
||||||
426
server/src/agents/gates.rs
Normal file
426
server/src/agents/gates.rs
Normal file
@@ -0,0 +1,426 @@
|
|||||||
|
use std::path::Path;
|
||||||
|
use std::process::Command;
|
||||||
|
use std::time::Duration;
|
||||||
|
use wait_timeout::ChildExt;
|
||||||
|
|
||||||
|
/// Maximum time any single test command is allowed to run before being killed.
|
||||||
|
const TEST_TIMEOUT: Duration = Duration::from_secs(600); // 10 minutes
|
||||||
|
|
||||||
|
/// Detect whether the base branch in a worktree is `master` or `main`.
|
||||||
|
/// Falls back to `"master"` if neither is found.
|
||||||
|
pub(crate) fn detect_worktree_base_branch(wt_path: &Path) -> String {
|
||||||
|
for branch in &["master", "main"] {
|
||||||
|
let ok = Command::new("git")
|
||||||
|
.args(["rev-parse", "--verify", branch])
|
||||||
|
.current_dir(wt_path)
|
||||||
|
.output()
|
||||||
|
.map(|o| o.status.success())
|
||||||
|
.unwrap_or(false);
|
||||||
|
if ok {
|
||||||
|
return branch.to_string();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
"master".to_string()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Return `true` if the git worktree at `wt_path` has commits on its current
|
||||||
|
/// branch that are not present on the base branch (`master` or `main`).
|
||||||
|
///
|
||||||
|
/// Used during server startup reconciliation to detect stories whose agent work
|
||||||
|
/// was committed while the server was offline.
|
||||||
|
pub(crate) fn worktree_has_committed_work(wt_path: &Path) -> bool {
|
||||||
|
let base_branch = detect_worktree_base_branch(wt_path);
|
||||||
|
let output = Command::new("git")
|
||||||
|
.args(["log", &format!("{base_branch}..HEAD"), "--oneline"])
|
||||||
|
.current_dir(wt_path)
|
||||||
|
.output();
|
||||||
|
match output {
|
||||||
|
Ok(out) if out.status.success() => {
|
||||||
|
!String::from_utf8_lossy(&out.stdout).trim().is_empty()
|
||||||
|
}
|
||||||
|
_ => false,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Check whether the given directory has any uncommitted git changes.
|
||||||
|
/// Returns `Err` with a descriptive message if there are any.
|
||||||
|
pub(crate) fn check_uncommitted_changes(path: &Path) -> Result<(), String> {
|
||||||
|
let output = Command::new("git")
|
||||||
|
.args(["status", "--porcelain"])
|
||||||
|
.current_dir(path)
|
||||||
|
.output()
|
||||||
|
.map_err(|e| format!("Failed to run git status: {e}"))?;
|
||||||
|
|
||||||
|
let stdout = String::from_utf8_lossy(&output.stdout);
|
||||||
|
if !stdout.trim().is_empty() {
|
||||||
|
return Err(format!(
|
||||||
|
"Worktree has uncommitted changes. Please commit all work before \
|
||||||
|
the agent exits:\n{stdout}"
|
||||||
|
));
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Run the project's test suite.
|
||||||
|
///
|
||||||
|
/// Uses `script/test` if present, treating it as the canonical single test entry point.
|
||||||
|
/// Falls back to `cargo nextest run` / `cargo test` when `script/test` is absent.
|
||||||
|
/// Returns `(tests_passed, output)`.
|
||||||
|
pub(crate) fn run_project_tests(path: &Path) -> Result<(bool, String), String> {
|
||||||
|
let script_test = path.join("script").join("test");
|
||||||
|
if script_test.exists() {
|
||||||
|
let mut output = String::from("=== script/test ===\n");
|
||||||
|
let (success, out) = run_command_with_timeout(&script_test, &[], path)?;
|
||||||
|
output.push_str(&out);
|
||||||
|
output.push('\n');
|
||||||
|
return Ok((success, output));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fallback: cargo nextest run / cargo test
|
||||||
|
let mut output = String::from("=== tests ===\n");
|
||||||
|
let (success, test_out) = match run_command_with_timeout("cargo", &["nextest", "run"], path) {
|
||||||
|
Ok(result) => result,
|
||||||
|
Err(_) => {
|
||||||
|
// nextest not available — fall back to cargo test
|
||||||
|
run_command_with_timeout("cargo", &["test"], path)
|
||||||
|
.map_err(|e| format!("Failed to run cargo test: {e}"))?
|
||||||
|
}
|
||||||
|
};
|
||||||
|
output.push_str(&test_out);
|
||||||
|
output.push('\n');
|
||||||
|
Ok((success, output))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Run a command with a timeout. Returns `(success, combined_output)`.
|
||||||
|
/// Kills the child process if it exceeds `TEST_TIMEOUT`.
|
||||||
|
///
|
||||||
|
/// Stdout and stderr are drained in background threads to avoid a pipe-buffer
|
||||||
|
/// deadlock: if the child fills the 64 KB OS pipe buffer while the parent
|
||||||
|
/// blocks on `waitpid`, neither side can make progress.
|
||||||
|
fn run_command_with_timeout(
|
||||||
|
program: impl AsRef<std::ffi::OsStr>,
|
||||||
|
args: &[&str],
|
||||||
|
dir: &Path,
|
||||||
|
) -> Result<(bool, String), String> {
|
||||||
|
let mut child = Command::new(program)
|
||||||
|
.args(args)
|
||||||
|
.current_dir(dir)
|
||||||
|
.stdout(std::process::Stdio::piped())
|
||||||
|
.stderr(std::process::Stdio::piped())
|
||||||
|
.spawn()
|
||||||
|
.map_err(|e| format!("Failed to spawn command: {e}"))?;
|
||||||
|
|
||||||
|
// Drain stdout/stderr in background threads so the pipe buffers never fill.
|
||||||
|
let stdout_handle = child.stdout.take().map(|r| {
|
||||||
|
std::thread::spawn(move || {
|
||||||
|
let mut s = String::new();
|
||||||
|
let mut r = r;
|
||||||
|
std::io::Read::read_to_string(&mut r, &mut s).ok();
|
||||||
|
s
|
||||||
|
})
|
||||||
|
});
|
||||||
|
let stderr_handle = child.stderr.take().map(|r| {
|
||||||
|
std::thread::spawn(move || {
|
||||||
|
let mut s = String::new();
|
||||||
|
let mut r = r;
|
||||||
|
std::io::Read::read_to_string(&mut r, &mut s).ok();
|
||||||
|
s
|
||||||
|
})
|
||||||
|
});
|
||||||
|
|
||||||
|
match child.wait_timeout(TEST_TIMEOUT) {
|
||||||
|
Ok(Some(status)) => {
|
||||||
|
let stdout = stdout_handle
|
||||||
|
.and_then(|h| h.join().ok())
|
||||||
|
.unwrap_or_default();
|
||||||
|
let stderr = stderr_handle
|
||||||
|
.and_then(|h| h.join().ok())
|
||||||
|
.unwrap_or_default();
|
||||||
|
Ok((status.success(), format!("{stdout}{stderr}")))
|
||||||
|
}
|
||||||
|
Ok(None) => {
|
||||||
|
// Timed out — kill the child.
|
||||||
|
let _ = child.kill();
|
||||||
|
let _ = child.wait();
|
||||||
|
Err(format!(
|
||||||
|
"Command timed out after {} seconds",
|
||||||
|
TEST_TIMEOUT.as_secs()
|
||||||
|
))
|
||||||
|
}
|
||||||
|
Err(e) => Err(format!("Failed to wait for command: {e}")),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Run `cargo clippy` and the project test suite (via `script/test` if present,
|
||||||
|
/// otherwise `cargo nextest run` / `cargo test`) in the given directory.
|
||||||
|
/// Returns `(gates_passed, combined_output)`.
|
||||||
|
pub(crate) fn run_acceptance_gates(path: &Path) -> Result<(bool, String), String> {
|
||||||
|
let mut all_output = String::new();
|
||||||
|
let mut all_passed = true;
|
||||||
|
|
||||||
|
// ── cargo clippy ──────────────────────────────────────────────
|
||||||
|
let clippy = Command::new("cargo")
|
||||||
|
.args(["clippy", "--all-targets", "--all-features"])
|
||||||
|
.current_dir(path)
|
||||||
|
.output()
|
||||||
|
.map_err(|e| format!("Failed to run cargo clippy: {e}"))?;
|
||||||
|
|
||||||
|
all_output.push_str("=== cargo clippy ===\n");
|
||||||
|
let clippy_stdout = String::from_utf8_lossy(&clippy.stdout);
|
||||||
|
let clippy_stderr = String::from_utf8_lossy(&clippy.stderr);
|
||||||
|
if !clippy_stdout.is_empty() {
|
||||||
|
all_output.push_str(&clippy_stdout);
|
||||||
|
}
|
||||||
|
if !clippy_stderr.is_empty() {
|
||||||
|
all_output.push_str(&clippy_stderr);
|
||||||
|
}
|
||||||
|
all_output.push('\n');
|
||||||
|
|
||||||
|
if !clippy.status.success() {
|
||||||
|
all_passed = false;
|
||||||
|
}
|
||||||
|
|
||||||
|
// ── tests (script/test if available, else cargo nextest/test) ─
|
||||||
|
let (test_success, test_out) = run_project_tests(path)?;
|
||||||
|
all_output.push_str(&test_out);
|
||||||
|
if !test_success {
|
||||||
|
all_passed = false;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok((all_passed, all_output))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Run `script/test_coverage` in the given directory if the script exists.
|
||||||
|
///
|
||||||
|
/// Used as a QA gate before advancing a story from `3_qa/` to `4_merge/`.
|
||||||
|
/// Returns `(passed, output)`. If the script does not exist, returns `(true, …)`.
|
||||||
|
pub(crate) fn run_coverage_gate(path: &Path) -> Result<(bool, String), String> {
|
||||||
|
let script = path.join("script").join("test_coverage");
|
||||||
|
if !script.exists() {
|
||||||
|
return Ok((
|
||||||
|
true,
|
||||||
|
"script/test_coverage not found; coverage gate skipped.\n".to_string(),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut output = String::from("=== script/test_coverage ===\n");
|
||||||
|
let result = Command::new(&script)
|
||||||
|
.current_dir(path)
|
||||||
|
.output()
|
||||||
|
.map_err(|e| format!("Failed to run script/test_coverage: {e}"))?;
|
||||||
|
|
||||||
|
let combined = format!(
|
||||||
|
"{}{}",
|
||||||
|
String::from_utf8_lossy(&result.stdout),
|
||||||
|
String::from_utf8_lossy(&result.stderr)
|
||||||
|
);
|
||||||
|
output.push_str(&combined);
|
||||||
|
output.push('\n');
|
||||||
|
|
||||||
|
Ok((result.status.success(), output))
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
fn init_git_repo(repo: &std::path::Path) {
|
||||||
|
Command::new("git")
|
||||||
|
.args(["init"])
|
||||||
|
.current_dir(repo)
|
||||||
|
.output()
|
||||||
|
.unwrap();
|
||||||
|
Command::new("git")
|
||||||
|
.args(["config", "user.email", "test@test.com"])
|
||||||
|
.current_dir(repo)
|
||||||
|
.output()
|
||||||
|
.unwrap();
|
||||||
|
Command::new("git")
|
||||||
|
.args(["config", "user.name", "Test"])
|
||||||
|
.current_dir(repo)
|
||||||
|
.output()
|
||||||
|
.unwrap();
|
||||||
|
Command::new("git")
|
||||||
|
.args(["commit", "--allow-empty", "-m", "init"])
|
||||||
|
.current_dir(repo)
|
||||||
|
.output()
|
||||||
|
.unwrap();
|
||||||
|
}
|
||||||
|
|
||||||
|
// ── run_project_tests tests ───────────────────────────────────
|
||||||
|
|
||||||
|
#[cfg(unix)]
|
||||||
|
#[test]
|
||||||
|
fn run_project_tests_uses_script_test_when_present_and_passes() {
|
||||||
|
use std::fs;
|
||||||
|
use std::os::unix::fs::PermissionsExt;
|
||||||
|
|
||||||
|
let tmp = tempfile::tempdir().unwrap();
|
||||||
|
let path = tmp.path();
|
||||||
|
let script_dir = path.join("script");
|
||||||
|
fs::create_dir_all(&script_dir).unwrap();
|
||||||
|
let script_test = script_dir.join("test");
|
||||||
|
fs::write(&script_test, "#!/usr/bin/env bash\necho 'all tests passed'\nexit 0\n").unwrap();
|
||||||
|
let mut perms = fs::metadata(&script_test).unwrap().permissions();
|
||||||
|
perms.set_mode(0o755);
|
||||||
|
fs::set_permissions(&script_test, perms).unwrap();
|
||||||
|
|
||||||
|
let (passed, output) = run_project_tests(path).unwrap();
|
||||||
|
assert!(passed, "script/test exiting 0 should pass");
|
||||||
|
assert!(output.contains("script/test"), "output should mention script/test");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(unix)]
|
||||||
|
#[test]
|
||||||
|
fn run_project_tests_reports_failure_when_script_test_exits_nonzero() {
|
||||||
|
use std::fs;
|
||||||
|
use std::os::unix::fs::PermissionsExt;
|
||||||
|
|
||||||
|
let tmp = tempfile::tempdir().unwrap();
|
||||||
|
let path = tmp.path();
|
||||||
|
let script_dir = path.join("script");
|
||||||
|
fs::create_dir_all(&script_dir).unwrap();
|
||||||
|
let script_test = script_dir.join("test");
|
||||||
|
fs::write(&script_test, "#!/usr/bin/env bash\nexit 1\n").unwrap();
|
||||||
|
let mut perms = fs::metadata(&script_test).unwrap().permissions();
|
||||||
|
perms.set_mode(0o755);
|
||||||
|
fs::set_permissions(&script_test, perms).unwrap();
|
||||||
|
|
||||||
|
let (passed, output) = run_project_tests(path).unwrap();
|
||||||
|
assert!(!passed, "script/test exiting 1 should fail");
|
||||||
|
assert!(output.contains("script/test"), "output should mention script/test");
|
||||||
|
}
|
||||||
|
|
||||||
|
// ── run_coverage_gate tests ───────────────────────────────────────────────
|
||||||
|
|
||||||
|
#[cfg(unix)]
|
||||||
|
#[test]
|
||||||
|
fn coverage_gate_passes_when_script_absent() {
|
||||||
|
use tempfile::tempdir;
|
||||||
|
let tmp = tempdir().unwrap();
|
||||||
|
let (passed, output) = run_coverage_gate(tmp.path()).unwrap();
|
||||||
|
assert!(passed, "coverage gate should pass when script is absent");
|
||||||
|
assert!(
|
||||||
|
output.contains("not found"),
|
||||||
|
"output should mention script not found"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(unix)]
|
||||||
|
#[test]
|
||||||
|
fn coverage_gate_passes_when_script_exits_zero() {
|
||||||
|
use std::fs;
|
||||||
|
use std::os::unix::fs::PermissionsExt;
|
||||||
|
|
||||||
|
let tmp = tempfile::tempdir().unwrap();
|
||||||
|
let path = tmp.path();
|
||||||
|
let script_dir = path.join("script");
|
||||||
|
fs::create_dir_all(&script_dir).unwrap();
|
||||||
|
let script = script_dir.join("test_coverage");
|
||||||
|
fs::write(
|
||||||
|
&script,
|
||||||
|
"#!/usr/bin/env bash\necho 'Rust line coverage: 85%'\necho 'PASS: Coverage 85% meets threshold 0%'\nexit 0\n",
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
|
let mut perms = fs::metadata(&script).unwrap().permissions();
|
||||||
|
perms.set_mode(0o755);
|
||||||
|
fs::set_permissions(&script, perms).unwrap();
|
||||||
|
|
||||||
|
let (passed, output) = run_coverage_gate(path).unwrap();
|
||||||
|
assert!(passed, "coverage gate should pass when script exits 0");
|
||||||
|
assert!(
|
||||||
|
output.contains("script/test_coverage"),
|
||||||
|
"output should mention script/test_coverage"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(unix)]
|
||||||
|
#[test]
|
||||||
|
fn coverage_gate_fails_when_script_exits_nonzero() {
|
||||||
|
use std::fs;
|
||||||
|
use std::os::unix::fs::PermissionsExt;
|
||||||
|
|
||||||
|
let tmp = tempfile::tempdir().unwrap();
|
||||||
|
let path = tmp.path();
|
||||||
|
let script_dir = path.join("script");
|
||||||
|
fs::create_dir_all(&script_dir).unwrap();
|
||||||
|
let script = script_dir.join("test_coverage");
|
||||||
|
fs::write(
|
||||||
|
&script,
|
||||||
|
"#!/usr/bin/env bash\necho 'FAIL: Coverage 40% is below threshold 80%'\nexit 1\n",
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
|
let mut perms = fs::metadata(&script).unwrap().permissions();
|
||||||
|
perms.set_mode(0o755);
|
||||||
|
fs::set_permissions(&script, perms).unwrap();
|
||||||
|
|
||||||
|
let (passed, output) = run_coverage_gate(path).unwrap();
|
||||||
|
assert!(!passed, "coverage gate should fail when script exits 1");
|
||||||
|
assert!(
|
||||||
|
output.contains("script/test_coverage"),
|
||||||
|
"output should mention script/test_coverage"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
// ── worktree_has_committed_work tests ─────────────────────────────────────
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn worktree_has_committed_work_false_on_fresh_repo() {
|
||||||
|
let tmp = tempfile::tempdir().unwrap();
|
||||||
|
let repo = tmp.path();
|
||||||
|
// init_git_repo creates the initial commit on the default branch.
|
||||||
|
// HEAD IS the base branch — no commits ahead.
|
||||||
|
init_git_repo(repo);
|
||||||
|
assert!(!worktree_has_committed_work(repo));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn worktree_has_committed_work_true_after_commit_on_feature_branch() {
|
||||||
|
use std::fs;
|
||||||
|
let tmp = tempfile::tempdir().unwrap();
|
||||||
|
let project_root = tmp.path().join("project");
|
||||||
|
fs::create_dir_all(&project_root).unwrap();
|
||||||
|
init_git_repo(&project_root);
|
||||||
|
|
||||||
|
// Create a git worktree on a feature branch.
|
||||||
|
let wt_path = tmp.path().join("wt");
|
||||||
|
Command::new("git")
|
||||||
|
.args([
|
||||||
|
"worktree",
|
||||||
|
"add",
|
||||||
|
&wt_path.to_string_lossy(),
|
||||||
|
"-b",
|
||||||
|
"feature/story-99_test",
|
||||||
|
])
|
||||||
|
.current_dir(&project_root)
|
||||||
|
.output()
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
// No commits on the feature branch yet — same as base branch.
|
||||||
|
assert!(!worktree_has_committed_work(&wt_path));
|
||||||
|
|
||||||
|
// Add a commit to the feature branch in the worktree.
|
||||||
|
fs::write(wt_path.join("work.txt"), "done").unwrap();
|
||||||
|
Command::new("git")
|
||||||
|
.args(["add", "."])
|
||||||
|
.current_dir(&wt_path)
|
||||||
|
.output()
|
||||||
|
.unwrap();
|
||||||
|
Command::new("git")
|
||||||
|
.args([
|
||||||
|
"-c",
|
||||||
|
"user.email=test@test.com",
|
||||||
|
"-c",
|
||||||
|
"user.name=Test",
|
||||||
|
"commit",
|
||||||
|
"-m",
|
||||||
|
"coder: implement story",
|
||||||
|
])
|
||||||
|
.current_dir(&wt_path)
|
||||||
|
.output()
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
// Now the feature branch is ahead of the base branch.
|
||||||
|
assert!(worktree_has_committed_work(&wt_path));
|
||||||
|
}
|
||||||
|
}
|
||||||
829
server/src/agents/lifecycle.rs
Normal file
829
server/src/agents/lifecycle.rs
Normal file
@@ -0,0 +1,829 @@
|
|||||||
|
use std::path::{Path, PathBuf};
|
||||||
|
use std::process::Command;
|
||||||
|
|
||||||
|
use crate::io::story_metadata::{clear_front_matter_field, write_rejection_notes};
|
||||||
|
use crate::slog;
|
||||||
|
|
||||||
|
pub(super) fn item_type_from_id(item_id: &str) -> &'static str {
|
||||||
|
// New format: {digits}_{type}_{slug}
|
||||||
|
let after_num = item_id.trim_start_matches(|c: char| c.is_ascii_digit());
|
||||||
|
if after_num.starts_with("_bug_") {
|
||||||
|
"bug"
|
||||||
|
} else if after_num.starts_with("_spike_") {
|
||||||
|
"spike"
|
||||||
|
} else {
|
||||||
|
"story"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Return the source directory path for a work item (always work/1_backlog/).
|
||||||
|
fn item_source_dir(project_root: &Path, _item_id: &str) -> PathBuf {
|
||||||
|
project_root.join(".storkit").join("work").join("1_backlog")
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Return the done directory path for a work item (always work/5_done/).
|
||||||
|
fn item_archive_dir(project_root: &Path, _item_id: &str) -> PathBuf {
|
||||||
|
project_root.join(".storkit").join("work").join("5_done")
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Move a work item (story, bug, or spike) from `work/1_backlog/` to `work/2_current/`.
|
||||||
|
///
|
||||||
|
/// Idempotent: if the item is already in `2_current/`, returns Ok without committing.
|
||||||
|
/// If the item is not found in `1_backlog/`, logs a warning and returns Ok.
|
||||||
|
pub fn move_story_to_current(project_root: &Path, story_id: &str) -> Result<(), String> {
|
||||||
|
let sk = project_root.join(".storkit").join("work");
|
||||||
|
let current_dir = sk.join("2_current");
|
||||||
|
let current_path = current_dir.join(format!("{story_id}.md"));
|
||||||
|
|
||||||
|
if current_path.exists() {
|
||||||
|
// Already in 2_current/ — idempotent, nothing to do.
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
|
||||||
|
let source_dir = item_source_dir(project_root, story_id);
|
||||||
|
let source_path = source_dir.join(format!("{story_id}.md"));
|
||||||
|
|
||||||
|
if !source_path.exists() {
|
||||||
|
slog!(
|
||||||
|
"[lifecycle] Work item '{story_id}' not found in {}; skipping move to 2_current/",
|
||||||
|
source_dir.display()
|
||||||
|
);
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
|
||||||
|
std::fs::create_dir_all(¤t_dir)
|
||||||
|
.map_err(|e| format!("Failed to create work/2_current/ directory: {e}"))?;
|
||||||
|
|
||||||
|
std::fs::rename(&source_path, ¤t_path)
|
||||||
|
.map_err(|e| format!("Failed to move '{story_id}' to 2_current/: {e}"))?;
|
||||||
|
|
||||||
|
slog!(
|
||||||
|
"[lifecycle] Moved '{story_id}' from {} to work/2_current/",
|
||||||
|
source_dir.display()
|
||||||
|
);
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Check whether a feature branch `feature/story-{story_id}` exists and has
|
||||||
|
/// commits that are not yet on master. Returns `true` when there is unmerged
|
||||||
|
/// work, `false` when there is no branch or all its commits are already
|
||||||
|
/// reachable from master.
|
||||||
|
pub fn feature_branch_has_unmerged_changes(project_root: &Path, story_id: &str) -> bool {
|
||||||
|
let branch = format!("feature/story-{story_id}");
|
||||||
|
|
||||||
|
// Check if the branch exists.
|
||||||
|
let branch_check = Command::new("git")
|
||||||
|
.args(["rev-parse", "--verify", &branch])
|
||||||
|
.current_dir(project_root)
|
||||||
|
.output();
|
||||||
|
match branch_check {
|
||||||
|
Ok(out) if out.status.success() => {}
|
||||||
|
_ => return false, // No feature branch → nothing to merge.
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if the branch has commits not reachable from master.
|
||||||
|
let log = Command::new("git")
|
||||||
|
.args(["log", &format!("master..{branch}"), "--oneline"])
|
||||||
|
.current_dir(project_root)
|
||||||
|
.output();
|
||||||
|
match log {
|
||||||
|
Ok(out) => {
|
||||||
|
let stdout = String::from_utf8_lossy(&out.stdout);
|
||||||
|
!stdout.trim().is_empty()
|
||||||
|
}
|
||||||
|
Err(_) => false,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Move a story from `work/2_current/` to `work/5_done/` and auto-commit.
|
||||||
|
///
|
||||||
|
/// * If the story is in `2_current/`, it is moved to `5_done/` and committed.
|
||||||
|
/// * If the story is in `4_merge/`, it is moved to `5_done/` and committed.
|
||||||
|
/// * If the story is already in `5_done/` or `6_archived/`, this is a no-op (idempotent).
|
||||||
|
/// * If the story is not found in `2_current/`, `4_merge/`, `5_done/`, or `6_archived/`, an error is returned.
|
||||||
|
pub fn move_story_to_archived(project_root: &Path, story_id: &str) -> Result<(), String> {
|
||||||
|
let sk = project_root.join(".storkit").join("work");
|
||||||
|
let current_path = sk.join("2_current").join(format!("{story_id}.md"));
|
||||||
|
let merge_path = sk.join("4_merge").join(format!("{story_id}.md"));
|
||||||
|
let done_dir = sk.join("5_done");
|
||||||
|
let done_path = done_dir.join(format!("{story_id}.md"));
|
||||||
|
let archived_path = sk.join("6_archived").join(format!("{story_id}.md"));
|
||||||
|
|
||||||
|
if done_path.exists() || archived_path.exists() {
|
||||||
|
// Already in done or archived — idempotent, nothing to do.
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check 2_current/ first, then 4_merge/
|
||||||
|
let source_path = if current_path.exists() {
|
||||||
|
current_path.clone()
|
||||||
|
} else if merge_path.exists() {
|
||||||
|
merge_path.clone()
|
||||||
|
} else {
|
||||||
|
return Err(format!(
|
||||||
|
"Story '{story_id}' not found in work/2_current/ or work/4_merge/. Cannot accept story."
|
||||||
|
));
|
||||||
|
};
|
||||||
|
|
||||||
|
std::fs::create_dir_all(&done_dir)
|
||||||
|
.map_err(|e| format!("Failed to create work/5_done/ directory: {e}"))?;
|
||||||
|
std::fs::rename(&source_path, &done_path)
|
||||||
|
.map_err(|e| format!("Failed to move story '{story_id}' to 5_done/: {e}"))?;
|
||||||
|
|
||||||
|
// Strip stale pipeline fields from front matter now that the story is done.
|
||||||
|
for field in &["merge_failure", "retry_count", "blocked"] {
|
||||||
|
if let Err(e) = clear_front_matter_field(&done_path, field) {
|
||||||
|
slog!("[lifecycle] Warning: could not clear {field} from '{story_id}': {e}");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let from_dir = if source_path == current_path {
|
||||||
|
"work/2_current/"
|
||||||
|
} else {
|
||||||
|
"work/4_merge/"
|
||||||
|
};
|
||||||
|
slog!("[lifecycle] Moved story '{story_id}' from {from_dir} to work/5_done/");
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Move a story/bug from `work/2_current/` or `work/3_qa/` to `work/4_merge/`.
|
||||||
|
///
|
||||||
|
/// This stages a work item as ready for the mergemaster to pick up and merge into master.
|
||||||
|
/// Idempotent: if already in `4_merge/`, returns Ok without committing.
|
||||||
|
pub fn move_story_to_merge(project_root: &Path, story_id: &str) -> Result<(), String> {
|
||||||
|
let sk = project_root.join(".storkit").join("work");
|
||||||
|
let current_path = sk.join("2_current").join(format!("{story_id}.md"));
|
||||||
|
let qa_path = sk.join("3_qa").join(format!("{story_id}.md"));
|
||||||
|
let merge_dir = sk.join("4_merge");
|
||||||
|
let merge_path = merge_dir.join(format!("{story_id}.md"));
|
||||||
|
|
||||||
|
if merge_path.exists() {
|
||||||
|
// Already in 4_merge/ — idempotent, nothing to do.
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
|
||||||
|
// Accept from 2_current/ (manual trigger) or 3_qa/ (pipeline advancement from QA stage).
|
||||||
|
let source_path = if current_path.exists() {
|
||||||
|
current_path.clone()
|
||||||
|
} else if qa_path.exists() {
|
||||||
|
qa_path.clone()
|
||||||
|
} else {
|
||||||
|
return Err(format!(
|
||||||
|
"Work item '{story_id}' not found in work/2_current/ or work/3_qa/. Cannot move to 4_merge/."
|
||||||
|
));
|
||||||
|
};
|
||||||
|
|
||||||
|
std::fs::create_dir_all(&merge_dir)
|
||||||
|
.map_err(|e| format!("Failed to create work/4_merge/ directory: {e}"))?;
|
||||||
|
std::fs::rename(&source_path, &merge_path)
|
||||||
|
.map_err(|e| format!("Failed to move '{story_id}' to 4_merge/: {e}"))?;
|
||||||
|
|
||||||
|
let from_dir = if source_path == current_path {
|
||||||
|
"work/2_current/"
|
||||||
|
} else {
|
||||||
|
"work/3_qa/"
|
||||||
|
};
|
||||||
|
// Reset retry count and blocked for the new stage.
|
||||||
|
if let Err(e) = clear_front_matter_field(&merge_path, "retry_count") {
|
||||||
|
slog!("[lifecycle] Warning: could not clear retry_count for '{story_id}': {e}");
|
||||||
|
}
|
||||||
|
if let Err(e) = clear_front_matter_field(&merge_path, "blocked") {
|
||||||
|
slog!("[lifecycle] Warning: could not clear blocked for '{story_id}': {e}");
|
||||||
|
}
|
||||||
|
|
||||||
|
slog!("[lifecycle] Moved '{story_id}' from {from_dir} to work/4_merge/");
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Move a story/bug from `work/2_current/` to `work/3_qa/` and auto-commit.
|
||||||
|
///
|
||||||
|
/// This stages a work item for QA review before merging to master.
|
||||||
|
/// Idempotent: if already in `3_qa/`, returns Ok without committing.
|
||||||
|
pub fn move_story_to_qa(project_root: &Path, story_id: &str) -> Result<(), String> {
|
||||||
|
let sk = project_root.join(".storkit").join("work");
|
||||||
|
let current_path = sk.join("2_current").join(format!("{story_id}.md"));
|
||||||
|
let qa_dir = sk.join("3_qa");
|
||||||
|
let qa_path = qa_dir.join(format!("{story_id}.md"));
|
||||||
|
|
||||||
|
if qa_path.exists() {
|
||||||
|
// Already in 3_qa/ — idempotent, nothing to do.
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
|
||||||
|
if !current_path.exists() {
|
||||||
|
return Err(format!(
|
||||||
|
"Work item '{story_id}' not found in work/2_current/. Cannot move to 3_qa/."
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
std::fs::create_dir_all(&qa_dir)
|
||||||
|
.map_err(|e| format!("Failed to create work/3_qa/ directory: {e}"))?;
|
||||||
|
std::fs::rename(¤t_path, &qa_path)
|
||||||
|
.map_err(|e| format!("Failed to move '{story_id}' to 3_qa/: {e}"))?;
|
||||||
|
|
||||||
|
// Reset retry count for the new stage.
|
||||||
|
if let Err(e) = clear_front_matter_field(&qa_path, "retry_count") {
|
||||||
|
slog!("[lifecycle] Warning: could not clear retry_count for '{story_id}': {e}");
|
||||||
|
}
|
||||||
|
if let Err(e) = clear_front_matter_field(&qa_path, "blocked") {
|
||||||
|
slog!("[lifecycle] Warning: could not clear blocked for '{story_id}': {e}");
|
||||||
|
}
|
||||||
|
|
||||||
|
slog!("[lifecycle] Moved '{story_id}' from work/2_current/ to work/3_qa/");
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Move a story from `work/3_qa/` back to `work/2_current/` and write rejection notes.
|
||||||
|
///
|
||||||
|
/// Used when a human reviewer rejects a story during manual QA.
|
||||||
|
/// Clears the `review_hold` front matter field and appends rejection notes to the story file.
|
||||||
|
pub fn reject_story_from_qa(
|
||||||
|
project_root: &Path,
|
||||||
|
story_id: &str,
|
||||||
|
notes: &str,
|
||||||
|
) -> Result<(), String> {
|
||||||
|
let sk = project_root.join(".storkit").join("work");
|
||||||
|
let qa_path = sk.join("3_qa").join(format!("{story_id}.md"));
|
||||||
|
let current_dir = sk.join("2_current");
|
||||||
|
let current_path = current_dir.join(format!("{story_id}.md"));
|
||||||
|
|
||||||
|
if current_path.exists() {
|
||||||
|
return Ok(()); // Already in 2_current — idempotent.
|
||||||
|
}
|
||||||
|
|
||||||
|
if !qa_path.exists() {
|
||||||
|
return Err(format!(
|
||||||
|
"Work item '{story_id}' not found in work/3_qa/. Cannot reject."
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
std::fs::create_dir_all(¤t_dir)
|
||||||
|
.map_err(|e| format!("Failed to create work/2_current/ directory: {e}"))?;
|
||||||
|
std::fs::rename(&qa_path, ¤t_path)
|
||||||
|
.map_err(|e| format!("Failed to move '{story_id}' from 3_qa/ to 2_current/: {e}"))?;
|
||||||
|
|
||||||
|
// Clear review_hold since the story is going back for rework.
|
||||||
|
if let Err(e) = clear_front_matter_field(¤t_path, "review_hold") {
|
||||||
|
slog!("[lifecycle] Warning: could not clear review_hold from '{story_id}': {e}");
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write rejection notes into the story file so the coder can see what needs fixing.
|
||||||
|
if !notes.is_empty()
|
||||||
|
&& let Err(e) = write_rejection_notes(¤t_path, notes)
|
||||||
|
{
|
||||||
|
slog!("[lifecycle] Warning: could not write rejection notes to '{story_id}': {e}");
|
||||||
|
}
|
||||||
|
|
||||||
|
slog!("[lifecycle] Rejected '{story_id}' from work/3_qa/ back to work/2_current/");
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Move any work item to an arbitrary pipeline stage by searching all stages.
|
||||||
|
///
|
||||||
|
/// Accepts `target_stage` as one of: `backlog`, `current`, `qa`, `merge`, `done`.
|
||||||
|
/// Idempotent: if the item is already in the target stage, returns Ok.
|
||||||
|
/// Returns `(from_stage, to_stage)` on success.
|
||||||
|
pub fn move_story_to_stage(
|
||||||
|
project_root: &Path,
|
||||||
|
story_id: &str,
|
||||||
|
target_stage: &str,
|
||||||
|
) -> Result<(String, String), String> {
|
||||||
|
let stage_dirs: &[(&str, &str)] = &[
|
||||||
|
("backlog", "1_backlog"),
|
||||||
|
("current", "2_current"),
|
||||||
|
("qa", "3_qa"),
|
||||||
|
("merge", "4_merge"),
|
||||||
|
("done", "5_done"),
|
||||||
|
];
|
||||||
|
|
||||||
|
let target_dir_name = stage_dirs
|
||||||
|
.iter()
|
||||||
|
.find(|(name, _)| *name == target_stage)
|
||||||
|
.map(|(_, dir)| *dir)
|
||||||
|
.ok_or_else(|| {
|
||||||
|
format!(
|
||||||
|
"Invalid target_stage '{target_stage}'. Must be one of: backlog, current, qa, merge, done"
|
||||||
|
)
|
||||||
|
})?;
|
||||||
|
|
||||||
|
let sk = project_root.join(".storkit").join("work");
|
||||||
|
let target_dir = sk.join(target_dir_name);
|
||||||
|
let target_path = target_dir.join(format!("{story_id}.md"));
|
||||||
|
|
||||||
|
if target_path.exists() {
|
||||||
|
return Ok((target_stage.to_string(), target_stage.to_string()));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Search all named stages plus the archive stage.
|
||||||
|
let search_dirs: &[(&str, &str)] = &[
|
||||||
|
("backlog", "1_backlog"),
|
||||||
|
("current", "2_current"),
|
||||||
|
("qa", "3_qa"),
|
||||||
|
("merge", "4_merge"),
|
||||||
|
("done", "5_done"),
|
||||||
|
("archived", "6_archived"),
|
||||||
|
];
|
||||||
|
|
||||||
|
let mut found_path: Option<std::path::PathBuf> = None;
|
||||||
|
let mut from_stage = "";
|
||||||
|
for (stage_name, dir_name) in search_dirs {
|
||||||
|
let candidate = sk.join(dir_name).join(format!("{story_id}.md"));
|
||||||
|
if candidate.exists() {
|
||||||
|
found_path = Some(candidate);
|
||||||
|
from_stage = stage_name;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let source_path =
|
||||||
|
found_path.ok_or_else(|| format!("Work item '{story_id}' not found in any pipeline stage."))?;
|
||||||
|
|
||||||
|
std::fs::create_dir_all(&target_dir)
|
||||||
|
.map_err(|e| format!("Failed to create work/{target_dir_name}/ directory: {e}"))?;
|
||||||
|
std::fs::rename(&source_path, &target_path)
|
||||||
|
.map_err(|e| format!("Failed to move '{story_id}' to work/{target_dir_name}/: {e}"))?;
|
||||||
|
|
||||||
|
slog!(
|
||||||
|
"[lifecycle] Moved '{story_id}' from work/{from_stage}/ to work/{target_dir_name}/"
|
||||||
|
);
|
||||||
|
|
||||||
|
Ok((from_stage.to_string(), target_stage.to_string()))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Move a bug from `work/2_current/` or `work/1_backlog/` to `work/5_done/` and auto-commit.
|
||||||
|
///
|
||||||
|
/// * If the bug is in `2_current/`, it is moved to `5_done/` and committed.
|
||||||
|
/// * If the bug is still in `1_backlog/` (never started), it is moved directly to `5_done/`.
|
||||||
|
/// * If the bug is already in `5_done/`, this is a no-op (idempotent).
|
||||||
|
/// * If the bug is not found anywhere, an error is returned.
|
||||||
|
pub fn close_bug_to_archive(project_root: &Path, bug_id: &str) -> Result<(), String> {
|
||||||
|
let sk = project_root.join(".storkit").join("work");
|
||||||
|
let current_path = sk.join("2_current").join(format!("{bug_id}.md"));
|
||||||
|
let backlog_path = sk.join("1_backlog").join(format!("{bug_id}.md"));
|
||||||
|
let archive_dir = item_archive_dir(project_root, bug_id);
|
||||||
|
let archive_path = archive_dir.join(format!("{bug_id}.md"));
|
||||||
|
|
||||||
|
if archive_path.exists() {
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
|
||||||
|
let source_path = if current_path.exists() {
|
||||||
|
current_path.clone()
|
||||||
|
} else if backlog_path.exists() {
|
||||||
|
backlog_path.clone()
|
||||||
|
} else {
|
||||||
|
return Err(format!(
|
||||||
|
"Bug '{bug_id}' not found in work/2_current/ or work/1_backlog/. Cannot close bug."
|
||||||
|
));
|
||||||
|
};
|
||||||
|
|
||||||
|
std::fs::create_dir_all(&archive_dir)
|
||||||
|
.map_err(|e| format!("Failed to create work/5_done/ directory: {e}"))?;
|
||||||
|
std::fs::rename(&source_path, &archive_path)
|
||||||
|
.map_err(|e| format!("Failed to move bug '{bug_id}' to 5_done/: {e}"))?;
|
||||||
|
|
||||||
|
slog!(
|
||||||
|
"[lifecycle] Closed bug '{bug_id}' → work/5_done/"
|
||||||
|
);
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
// ── move_story_to_current tests ────────────────────────────────────────────
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn move_story_to_current_moves_file() {
|
||||||
|
use std::fs;
|
||||||
|
let tmp = tempfile::tempdir().unwrap();
|
||||||
|
let root = tmp.path();
|
||||||
|
let backlog = root.join(".storkit/work/1_backlog");
|
||||||
|
let current = root.join(".storkit/work/2_current");
|
||||||
|
fs::create_dir_all(&backlog).unwrap();
|
||||||
|
fs::create_dir_all(¤t).unwrap();
|
||||||
|
fs::write(backlog.join("10_story_foo.md"), "test").unwrap();
|
||||||
|
|
||||||
|
move_story_to_current(root, "10_story_foo").unwrap();
|
||||||
|
|
||||||
|
assert!(!backlog.join("10_story_foo.md").exists());
|
||||||
|
assert!(current.join("10_story_foo.md").exists());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn move_story_to_current_is_idempotent_when_already_current() {
|
||||||
|
use std::fs;
|
||||||
|
let tmp = tempfile::tempdir().unwrap();
|
||||||
|
let root = tmp.path();
|
||||||
|
let current = root.join(".storkit/work/2_current");
|
||||||
|
fs::create_dir_all(¤t).unwrap();
|
||||||
|
fs::write(current.join("11_story_foo.md"), "test").unwrap();
|
||||||
|
|
||||||
|
move_story_to_current(root, "11_story_foo").unwrap();
|
||||||
|
assert!(current.join("11_story_foo.md").exists());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn move_story_to_current_noop_when_not_in_backlog() {
|
||||||
|
let tmp = tempfile::tempdir().unwrap();
|
||||||
|
assert!(move_story_to_current(tmp.path(), "99_missing").is_ok());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn move_bug_to_current_moves_from_backlog() {
|
||||||
|
use std::fs;
|
||||||
|
let tmp = tempfile::tempdir().unwrap();
|
||||||
|
let root = tmp.path();
|
||||||
|
let backlog = root.join(".storkit/work/1_backlog");
|
||||||
|
let current = root.join(".storkit/work/2_current");
|
||||||
|
fs::create_dir_all(&backlog).unwrap();
|
||||||
|
fs::create_dir_all(¤t).unwrap();
|
||||||
|
fs::write(backlog.join("1_bug_test.md"), "# Bug 1\n").unwrap();
|
||||||
|
|
||||||
|
move_story_to_current(root, "1_bug_test").unwrap();
|
||||||
|
|
||||||
|
assert!(!backlog.join("1_bug_test.md").exists());
|
||||||
|
assert!(current.join("1_bug_test.md").exists());
|
||||||
|
}
|
||||||
|
|
||||||
|
// ── close_bug_to_archive tests ─────────────────────────────────────────────
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn close_bug_moves_from_current_to_archive() {
|
||||||
|
use std::fs;
|
||||||
|
let tmp = tempfile::tempdir().unwrap();
|
||||||
|
let root = tmp.path();
|
||||||
|
let current = root.join(".storkit/work/2_current");
|
||||||
|
fs::create_dir_all(¤t).unwrap();
|
||||||
|
fs::write(current.join("2_bug_test.md"), "# Bug 2\n").unwrap();
|
||||||
|
|
||||||
|
close_bug_to_archive(root, "2_bug_test").unwrap();
|
||||||
|
|
||||||
|
assert!(!current.join("2_bug_test.md").exists());
|
||||||
|
assert!(root.join(".storkit/work/5_done/2_bug_test.md").exists());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn close_bug_moves_from_backlog_when_not_started() {
|
||||||
|
use std::fs;
|
||||||
|
let tmp = tempfile::tempdir().unwrap();
|
||||||
|
let root = tmp.path();
|
||||||
|
let backlog = root.join(".storkit/work/1_backlog");
|
||||||
|
fs::create_dir_all(&backlog).unwrap();
|
||||||
|
fs::write(backlog.join("3_bug_test.md"), "# Bug 3\n").unwrap();
|
||||||
|
|
||||||
|
close_bug_to_archive(root, "3_bug_test").unwrap();
|
||||||
|
|
||||||
|
assert!(!backlog.join("3_bug_test.md").exists());
|
||||||
|
assert!(root.join(".storkit/work/5_done/3_bug_test.md").exists());
|
||||||
|
}
|
||||||
|
|
||||||
|
// ── item_type_from_id tests ────────────────────────────────────────────────
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn item_type_from_id_detects_types() {
|
||||||
|
assert_eq!(item_type_from_id("1_bug_test"), "bug");
|
||||||
|
assert_eq!(item_type_from_id("1_spike_research"), "spike");
|
||||||
|
assert_eq!(item_type_from_id("50_story_my_story"), "story");
|
||||||
|
assert_eq!(item_type_from_id("1_story_simple"), "story");
|
||||||
|
}
|
||||||
|
|
||||||
|
// ── move_story_to_merge tests ──────────────────────────────────────────────
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn move_story_to_merge_moves_file() {
|
||||||
|
use std::fs;
|
||||||
|
let tmp = tempfile::tempdir().unwrap();
|
||||||
|
let root = tmp.path();
|
||||||
|
let current = root.join(".storkit/work/2_current");
|
||||||
|
fs::create_dir_all(¤t).unwrap();
|
||||||
|
fs::write(current.join("20_story_foo.md"), "test").unwrap();
|
||||||
|
|
||||||
|
move_story_to_merge(root, "20_story_foo").unwrap();
|
||||||
|
|
||||||
|
assert!(!current.join("20_story_foo.md").exists());
|
||||||
|
assert!(root.join(".storkit/work/4_merge/20_story_foo.md").exists());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn move_story_to_merge_from_qa_dir() {
|
||||||
|
use std::fs;
|
||||||
|
let tmp = tempfile::tempdir().unwrap();
|
||||||
|
let root = tmp.path();
|
||||||
|
let qa_dir = root.join(".storkit/work/3_qa");
|
||||||
|
fs::create_dir_all(&qa_dir).unwrap();
|
||||||
|
fs::write(qa_dir.join("40_story_test.md"), "test").unwrap();
|
||||||
|
|
||||||
|
move_story_to_merge(root, "40_story_test").unwrap();
|
||||||
|
|
||||||
|
assert!(!qa_dir.join("40_story_test.md").exists());
|
||||||
|
assert!(root.join(".storkit/work/4_merge/40_story_test.md").exists());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn move_story_to_merge_idempotent_when_already_in_merge() {
|
||||||
|
use std::fs;
|
||||||
|
let tmp = tempfile::tempdir().unwrap();
|
||||||
|
let root = tmp.path();
|
||||||
|
let merge_dir = root.join(".storkit/work/4_merge");
|
||||||
|
fs::create_dir_all(&merge_dir).unwrap();
|
||||||
|
fs::write(merge_dir.join("21_story_test.md"), "test").unwrap();
|
||||||
|
|
||||||
|
move_story_to_merge(root, "21_story_test").unwrap();
|
||||||
|
assert!(merge_dir.join("21_story_test.md").exists());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn move_story_to_merge_errors_when_not_in_current_or_qa() {
|
||||||
|
let tmp = tempfile::tempdir().unwrap();
|
||||||
|
let result = move_story_to_merge(tmp.path(), "99_nonexistent");
|
||||||
|
assert!(result.unwrap_err().contains("not found in work/2_current/ or work/3_qa/"));
|
||||||
|
}
|
||||||
|
|
||||||
|
// ── move_story_to_qa tests ────────────────────────────────────────────────
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn move_story_to_qa_moves_file() {
|
||||||
|
use std::fs;
|
||||||
|
let tmp = tempfile::tempdir().unwrap();
|
||||||
|
let root = tmp.path();
|
||||||
|
let current = root.join(".storkit/work/2_current");
|
||||||
|
fs::create_dir_all(¤t).unwrap();
|
||||||
|
fs::write(current.join("30_story_qa.md"), "test").unwrap();
|
||||||
|
|
||||||
|
move_story_to_qa(root, "30_story_qa").unwrap();
|
||||||
|
|
||||||
|
assert!(!current.join("30_story_qa.md").exists());
|
||||||
|
assert!(root.join(".storkit/work/3_qa/30_story_qa.md").exists());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn move_story_to_qa_idempotent_when_already_in_qa() {
|
||||||
|
use std::fs;
|
||||||
|
let tmp = tempfile::tempdir().unwrap();
|
||||||
|
let root = tmp.path();
|
||||||
|
let qa_dir = root.join(".storkit/work/3_qa");
|
||||||
|
fs::create_dir_all(&qa_dir).unwrap();
|
||||||
|
fs::write(qa_dir.join("31_story_test.md"), "test").unwrap();
|
||||||
|
|
||||||
|
move_story_to_qa(root, "31_story_test").unwrap();
|
||||||
|
assert!(qa_dir.join("31_story_test.md").exists());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn move_story_to_qa_errors_when_not_in_current() {
|
||||||
|
let tmp = tempfile::tempdir().unwrap();
|
||||||
|
let result = move_story_to_qa(tmp.path(), "99_nonexistent");
|
||||||
|
assert!(result.unwrap_err().contains("not found in work/2_current/"));
|
||||||
|
}
|
||||||
|
|
||||||
|
// ── move_story_to_archived tests ──────────────────────────────────────────
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn move_story_to_archived_finds_in_merge_dir() {
|
||||||
|
use std::fs;
|
||||||
|
let tmp = tempfile::tempdir().unwrap();
|
||||||
|
let root = tmp.path();
|
||||||
|
let merge_dir = root.join(".storkit/work/4_merge");
|
||||||
|
fs::create_dir_all(&merge_dir).unwrap();
|
||||||
|
fs::write(merge_dir.join("22_story_test.md"), "test").unwrap();
|
||||||
|
|
||||||
|
move_story_to_archived(root, "22_story_test").unwrap();
|
||||||
|
|
||||||
|
assert!(!merge_dir.join("22_story_test.md").exists());
|
||||||
|
assert!(root.join(".storkit/work/5_done/22_story_test.md").exists());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn move_story_to_archived_error_when_not_in_current_or_merge() {
|
||||||
|
let tmp = tempfile::tempdir().unwrap();
|
||||||
|
let result = move_story_to_archived(tmp.path(), "99_nonexistent");
|
||||||
|
assert!(result.unwrap_err().contains("4_merge"));
|
||||||
|
}
|
||||||
|
|
||||||
|
// ── feature_branch_has_unmerged_changes tests ────────────────────────────
|
||||||
|
|
||||||
|
fn init_git_repo(repo: &std::path::Path) {
|
||||||
|
Command::new("git")
|
||||||
|
.args(["init"])
|
||||||
|
.current_dir(repo)
|
||||||
|
.output()
|
||||||
|
.unwrap();
|
||||||
|
Command::new("git")
|
||||||
|
.args(["config", "user.email", "test@test.com"])
|
||||||
|
.current_dir(repo)
|
||||||
|
.output()
|
||||||
|
.unwrap();
|
||||||
|
Command::new("git")
|
||||||
|
.args(["config", "user.name", "Test"])
|
||||||
|
.current_dir(repo)
|
||||||
|
.output()
|
||||||
|
.unwrap();
|
||||||
|
Command::new("git")
|
||||||
|
.args(["commit", "--allow-empty", "-m", "init"])
|
||||||
|
.current_dir(repo)
|
||||||
|
.output()
|
||||||
|
.unwrap();
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Bug 226: feature_branch_has_unmerged_changes returns true when the
|
||||||
|
/// feature branch has commits not on master.
|
||||||
|
#[test]
|
||||||
|
fn feature_branch_has_unmerged_changes_detects_unmerged_code() {
|
||||||
|
use std::fs;
|
||||||
|
use tempfile::tempdir;
|
||||||
|
|
||||||
|
let tmp = tempdir().unwrap();
|
||||||
|
let repo = tmp.path();
|
||||||
|
init_git_repo(repo);
|
||||||
|
|
||||||
|
// Create a feature branch with a code commit.
|
||||||
|
Command::new("git")
|
||||||
|
.args(["checkout", "-b", "feature/story-50_story_test"])
|
||||||
|
.current_dir(repo)
|
||||||
|
.output()
|
||||||
|
.unwrap();
|
||||||
|
fs::write(repo.join("feature.rs"), "fn main() {}").unwrap();
|
||||||
|
Command::new("git")
|
||||||
|
.args(["add", "."])
|
||||||
|
.current_dir(repo)
|
||||||
|
.output()
|
||||||
|
.unwrap();
|
||||||
|
Command::new("git")
|
||||||
|
.args(["commit", "-m", "add feature"])
|
||||||
|
.current_dir(repo)
|
||||||
|
.output()
|
||||||
|
.unwrap();
|
||||||
|
Command::new("git")
|
||||||
|
.args(["checkout", "master"])
|
||||||
|
.current_dir(repo)
|
||||||
|
.output()
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
assert!(
|
||||||
|
feature_branch_has_unmerged_changes(repo, "50_story_test"),
|
||||||
|
"should detect unmerged changes on feature branch"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Bug 226: feature_branch_has_unmerged_changes returns false when no
|
||||||
|
/// feature branch exists.
|
||||||
|
#[test]
|
||||||
|
fn feature_branch_has_unmerged_changes_false_when_no_branch() {
|
||||||
|
use tempfile::tempdir;
|
||||||
|
|
||||||
|
let tmp = tempdir().unwrap();
|
||||||
|
let repo = tmp.path();
|
||||||
|
init_git_repo(repo);
|
||||||
|
|
||||||
|
assert!(
|
||||||
|
!feature_branch_has_unmerged_changes(repo, "99_nonexistent"),
|
||||||
|
"should return false when no feature branch"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
// ── reject_story_from_qa tests ────────────────────────────────────────────
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn reject_story_from_qa_moves_to_current() {
|
||||||
|
use std::fs;
|
||||||
|
let tmp = tempfile::tempdir().unwrap();
|
||||||
|
let root = tmp.path();
|
||||||
|
let qa_dir = root.join(".storkit/work/3_qa");
|
||||||
|
let current_dir = root.join(".storkit/work/2_current");
|
||||||
|
fs::create_dir_all(&qa_dir).unwrap();
|
||||||
|
fs::create_dir_all(¤t_dir).unwrap();
|
||||||
|
fs::write(
|
||||||
|
qa_dir.join("50_story_test.md"),
|
||||||
|
"---\nname: Test\nreview_hold: true\n---\n# Story\n",
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
reject_story_from_qa(root, "50_story_test", "Button color wrong").unwrap();
|
||||||
|
|
||||||
|
assert!(!qa_dir.join("50_story_test.md").exists());
|
||||||
|
assert!(current_dir.join("50_story_test.md").exists());
|
||||||
|
let contents = fs::read_to_string(current_dir.join("50_story_test.md")).unwrap();
|
||||||
|
assert!(contents.contains("Button color wrong"));
|
||||||
|
assert!(contents.contains("## QA Rejection Notes"));
|
||||||
|
assert!(!contents.contains("review_hold"));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn reject_story_from_qa_errors_when_not_in_qa() {
|
||||||
|
let tmp = tempfile::tempdir().unwrap();
|
||||||
|
let result = reject_story_from_qa(tmp.path(), "99_nonexistent", "notes");
|
||||||
|
assert!(result.unwrap_err().contains("not found in work/3_qa/"));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn reject_story_from_qa_idempotent_when_in_current() {
|
||||||
|
use std::fs;
|
||||||
|
let tmp = tempfile::tempdir().unwrap();
|
||||||
|
let root = tmp.path();
|
||||||
|
let current_dir = root.join(".storkit/work/2_current");
|
||||||
|
fs::create_dir_all(¤t_dir).unwrap();
|
||||||
|
fs::write(current_dir.join("51_story_test.md"), "---\nname: Test\n---\n# Story\n").unwrap();
|
||||||
|
|
||||||
|
reject_story_from_qa(root, "51_story_test", "notes").unwrap();
|
||||||
|
assert!(current_dir.join("51_story_test.md").exists());
|
||||||
|
}
|
||||||
|
|
||||||
|
// ── move_story_to_stage tests ─────────────────────────────────
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn move_story_to_stage_moves_from_backlog_to_current() {
|
||||||
|
use std::fs;
|
||||||
|
let tmp = tempfile::tempdir().unwrap();
|
||||||
|
let root = tmp.path();
|
||||||
|
let backlog = root.join(".storkit/work/1_backlog");
|
||||||
|
let current = root.join(".storkit/work/2_current");
|
||||||
|
fs::create_dir_all(&backlog).unwrap();
|
||||||
|
fs::create_dir_all(¤t).unwrap();
|
||||||
|
fs::write(backlog.join("60_story_move.md"), "test").unwrap();
|
||||||
|
|
||||||
|
let (from, to) = move_story_to_stage(root, "60_story_move", "current").unwrap();
|
||||||
|
|
||||||
|
assert_eq!(from, "backlog");
|
||||||
|
assert_eq!(to, "current");
|
||||||
|
assert!(!backlog.join("60_story_move.md").exists());
|
||||||
|
assert!(current.join("60_story_move.md").exists());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn move_story_to_stage_moves_from_current_to_backlog() {
|
||||||
|
use std::fs;
|
||||||
|
let tmp = tempfile::tempdir().unwrap();
|
||||||
|
let root = tmp.path();
|
||||||
|
let current = root.join(".storkit/work/2_current");
|
||||||
|
let backlog = root.join(".storkit/work/1_backlog");
|
||||||
|
fs::create_dir_all(¤t).unwrap();
|
||||||
|
fs::create_dir_all(&backlog).unwrap();
|
||||||
|
fs::write(current.join("61_story_back.md"), "test").unwrap();
|
||||||
|
|
||||||
|
let (from, to) = move_story_to_stage(root, "61_story_back", "backlog").unwrap();
|
||||||
|
|
||||||
|
assert_eq!(from, "current");
|
||||||
|
assert_eq!(to, "backlog");
|
||||||
|
assert!(!current.join("61_story_back.md").exists());
|
||||||
|
assert!(backlog.join("61_story_back.md").exists());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn move_story_to_stage_idempotent_when_already_in_target() {
|
||||||
|
use std::fs;
|
||||||
|
let tmp = tempfile::tempdir().unwrap();
|
||||||
|
let root = tmp.path();
|
||||||
|
let current = root.join(".storkit/work/2_current");
|
||||||
|
fs::create_dir_all(¤t).unwrap();
|
||||||
|
fs::write(current.join("62_story_idem.md"), "test").unwrap();
|
||||||
|
|
||||||
|
let (from, to) = move_story_to_stage(root, "62_story_idem", "current").unwrap();
|
||||||
|
|
||||||
|
assert_eq!(from, "current");
|
||||||
|
assert_eq!(to, "current");
|
||||||
|
assert!(current.join("62_story_idem.md").exists());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn move_story_to_stage_invalid_target_returns_error() {
|
||||||
|
let tmp = tempfile::tempdir().unwrap();
|
||||||
|
let result = move_story_to_stage(tmp.path(), "1_story_test", "invalid");
|
||||||
|
assert!(result.is_err());
|
||||||
|
assert!(result.unwrap_err().contains("Invalid target_stage"));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn move_story_to_stage_not_found_returns_error() {
|
||||||
|
let tmp = tempfile::tempdir().unwrap();
|
||||||
|
let result = move_story_to_stage(tmp.path(), "99_story_ghost", "current");
|
||||||
|
assert!(result.is_err());
|
||||||
|
assert!(result.unwrap_err().contains("not found in any pipeline stage"));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn move_story_to_stage_finds_in_qa_dir() {
|
||||||
|
use std::fs;
|
||||||
|
let tmp = tempfile::tempdir().unwrap();
|
||||||
|
let root = tmp.path();
|
||||||
|
let qa_dir = root.join(".storkit/work/3_qa");
|
||||||
|
let backlog = root.join(".storkit/work/1_backlog");
|
||||||
|
fs::create_dir_all(&qa_dir).unwrap();
|
||||||
|
fs::create_dir_all(&backlog).unwrap();
|
||||||
|
fs::write(qa_dir.join("63_story_qa.md"), "test").unwrap();
|
||||||
|
|
||||||
|
let (from, to) = move_story_to_stage(root, "63_story_qa", "backlog").unwrap();
|
||||||
|
|
||||||
|
assert_eq!(from, "qa");
|
||||||
|
assert_eq!(to, "backlog");
|
||||||
|
assert!(!qa_dir.join("63_story_qa.md").exists());
|
||||||
|
assert!(backlog.join("63_story_qa.md").exists());
|
||||||
|
}
|
||||||
|
}
|
||||||
1696
server/src/agents/merge.rs
Normal file
1696
server/src/agents/merge.rs
Normal file
File diff suppressed because it is too large
Load Diff
222
server/src/agents/mod.rs
Normal file
222
server/src/agents/mod.rs
Normal file
@@ -0,0 +1,222 @@
|
|||||||
|
pub mod gates;
|
||||||
|
pub mod lifecycle;
|
||||||
|
pub mod merge;
|
||||||
|
mod pool;
|
||||||
|
pub(crate) mod pty;
|
||||||
|
pub mod runtime;
|
||||||
|
pub mod token_usage;
|
||||||
|
|
||||||
|
use crate::config::AgentConfig;
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
pub use lifecycle::{
|
||||||
|
close_bug_to_archive, feature_branch_has_unmerged_changes, move_story_to_archived,
|
||||||
|
move_story_to_merge, move_story_to_qa, move_story_to_stage, reject_story_from_qa,
|
||||||
|
};
|
||||||
|
pub use pool::AgentPool;
|
||||||
|
|
||||||
|
/// Events emitted during server startup reconciliation to broadcast real-time
|
||||||
|
/// progress to connected WebSocket clients.
|
||||||
|
#[derive(Debug, Clone, Serialize)]
|
||||||
|
pub struct ReconciliationEvent {
|
||||||
|
/// The story being reconciled, or empty string for the overall "done" event.
|
||||||
|
pub story_id: String,
|
||||||
|
/// Coarse status: "checking", "gates_running", "advanced", "skipped", "failed", "done"
|
||||||
|
pub status: String,
|
||||||
|
/// Human-readable details.
|
||||||
|
pub message: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Events streamed from a running agent to SSE clients.
|
||||||
|
#[derive(Debug, Clone, Serialize)]
|
||||||
|
#[serde(tag = "type", rename_all = "snake_case")]
|
||||||
|
pub enum AgentEvent {
|
||||||
|
/// Agent status changed.
|
||||||
|
Status {
|
||||||
|
story_id: String,
|
||||||
|
agent_name: String,
|
||||||
|
status: String,
|
||||||
|
},
|
||||||
|
/// Raw text output from the agent process.
|
||||||
|
Output {
|
||||||
|
story_id: String,
|
||||||
|
agent_name: String,
|
||||||
|
text: String,
|
||||||
|
},
|
||||||
|
/// Agent produced a JSON event from `--output-format stream-json`.
|
||||||
|
AgentJson {
|
||||||
|
story_id: String,
|
||||||
|
agent_name: String,
|
||||||
|
data: serde_json::Value,
|
||||||
|
},
|
||||||
|
/// Agent finished.
|
||||||
|
Done {
|
||||||
|
story_id: String,
|
||||||
|
agent_name: String,
|
||||||
|
session_id: Option<String>,
|
||||||
|
},
|
||||||
|
/// Agent errored.
|
||||||
|
Error {
|
||||||
|
story_id: String,
|
||||||
|
agent_name: String,
|
||||||
|
message: String,
|
||||||
|
},
|
||||||
|
/// Thinking tokens from an extended-thinking block.
|
||||||
|
Thinking {
|
||||||
|
story_id: String,
|
||||||
|
agent_name: String,
|
||||||
|
text: String,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Serialize, PartialEq)]
|
||||||
|
#[serde(rename_all = "snake_case")]
|
||||||
|
pub enum AgentStatus {
|
||||||
|
Pending,
|
||||||
|
Running,
|
||||||
|
Completed,
|
||||||
|
Failed,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl std::fmt::Display for AgentStatus {
|
||||||
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
|
match self {
|
||||||
|
Self::Pending => write!(f, "pending"),
|
||||||
|
Self::Running => write!(f, "running"),
|
||||||
|
Self::Completed => write!(f, "completed"),
|
||||||
|
Self::Failed => write!(f, "failed"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Pipeline stages for automatic story advancement.
|
||||||
|
#[derive(Debug, Clone, PartialEq)]
|
||||||
|
pub enum PipelineStage {
|
||||||
|
/// Coding agents (coder-1, coder-2, etc.)
|
||||||
|
Coder,
|
||||||
|
/// QA review agent
|
||||||
|
Qa,
|
||||||
|
/// Mergemaster agent
|
||||||
|
Mergemaster,
|
||||||
|
/// Supervisors and unknown agents — no automatic advancement.
|
||||||
|
Other,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Determine the pipeline stage from an agent name.
|
||||||
|
pub fn pipeline_stage(agent_name: &str) -> PipelineStage {
|
||||||
|
match agent_name {
|
||||||
|
"qa" => PipelineStage::Qa,
|
||||||
|
"mergemaster" => PipelineStage::Mergemaster,
|
||||||
|
name if name.starts_with("coder") => PipelineStage::Coder,
|
||||||
|
_ => PipelineStage::Other,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Determine the pipeline stage for a configured agent.
|
||||||
|
///
|
||||||
|
/// Prefers the explicit `stage` config field (added in Bug 150) over the
|
||||||
|
/// legacy name-based heuristic so that agents with non-standard names
|
||||||
|
/// (e.g. `qa-2`, `coder-opus`) are assigned to the correct stage.
|
||||||
|
pub(crate) fn agent_config_stage(cfg: &AgentConfig) -> PipelineStage {
|
||||||
|
match cfg.stage.as_deref() {
|
||||||
|
Some("coder") => PipelineStage::Coder,
|
||||||
|
Some("qa") => PipelineStage::Qa,
|
||||||
|
Some("mergemaster") => PipelineStage::Mergemaster,
|
||||||
|
Some(_) => PipelineStage::Other,
|
||||||
|
None => pipeline_stage(&cfg.name),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Completion report produced when acceptance gates are run.
|
||||||
|
///
|
||||||
|
/// Created automatically by the server when an agent process exits normally,
|
||||||
|
/// or via the internal `report_completion` method.
|
||||||
|
#[derive(Debug, Serialize, Clone)]
|
||||||
|
pub struct CompletionReport {
|
||||||
|
pub summary: String,
|
||||||
|
pub gates_passed: bool,
|
||||||
|
pub gate_output: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Token usage from a Claude Code session's `result` event.
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
|
||||||
|
pub struct TokenUsage {
|
||||||
|
pub input_tokens: u64,
|
||||||
|
pub output_tokens: u64,
|
||||||
|
pub cache_creation_input_tokens: u64,
|
||||||
|
pub cache_read_input_tokens: u64,
|
||||||
|
pub total_cost_usd: f64,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl TokenUsage {
|
||||||
|
/// Parse token usage from a Claude Code `result` JSON event.
|
||||||
|
pub fn from_result_event(json: &serde_json::Value) -> Option<Self> {
|
||||||
|
let usage = json.get("usage")?;
|
||||||
|
Some(Self {
|
||||||
|
input_tokens: usage
|
||||||
|
.get("input_tokens")
|
||||||
|
.and_then(|v| v.as_u64())
|
||||||
|
.unwrap_or(0),
|
||||||
|
output_tokens: usage
|
||||||
|
.get("output_tokens")
|
||||||
|
.and_then(|v| v.as_u64())
|
||||||
|
.unwrap_or(0),
|
||||||
|
cache_creation_input_tokens: usage
|
||||||
|
.get("cache_creation_input_tokens")
|
||||||
|
.and_then(|v| v.as_u64())
|
||||||
|
.unwrap_or(0),
|
||||||
|
cache_read_input_tokens: usage
|
||||||
|
.get("cache_read_input_tokens")
|
||||||
|
.and_then(|v| v.as_u64())
|
||||||
|
.unwrap_or(0),
|
||||||
|
total_cost_usd: json
|
||||||
|
.get("total_cost_usd")
|
||||||
|
.and_then(|v| v.as_f64())
|
||||||
|
.unwrap_or(0.0),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Serialize, Clone)]
|
||||||
|
pub struct AgentInfo {
|
||||||
|
pub story_id: String,
|
||||||
|
pub agent_name: String,
|
||||||
|
pub status: AgentStatus,
|
||||||
|
pub session_id: Option<String>,
|
||||||
|
pub worktree_path: Option<String>,
|
||||||
|
pub base_branch: Option<String>,
|
||||||
|
pub completion: Option<CompletionReport>,
|
||||||
|
/// UUID identifying the persistent log file for this session.
|
||||||
|
pub log_session_id: Option<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
// ── pipeline_stage tests ──────────────────────────────────────────────────
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn pipeline_stage_detects_coders() {
|
||||||
|
assert_eq!(pipeline_stage("coder-1"), PipelineStage::Coder);
|
||||||
|
assert_eq!(pipeline_stage("coder-2"), PipelineStage::Coder);
|
||||||
|
assert_eq!(pipeline_stage("coder-3"), PipelineStage::Coder);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn pipeline_stage_detects_qa() {
|
||||||
|
assert_eq!(pipeline_stage("qa"), PipelineStage::Qa);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn pipeline_stage_detects_mergemaster() {
|
||||||
|
assert_eq!(pipeline_stage("mergemaster"), PipelineStage::Mergemaster);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn pipeline_stage_supervisor_is_other() {
|
||||||
|
assert_eq!(pipeline_stage("supervisor"), PipelineStage::Other);
|
||||||
|
assert_eq!(pipeline_stage("default"), PipelineStage::Other);
|
||||||
|
assert_eq!(pipeline_stage("unknown"), PipelineStage::Other);
|
||||||
|
}
|
||||||
|
}
|
||||||
1813
server/src/agents/pool/auto_assign.rs
Normal file
1813
server/src/agents/pool/auto_assign.rs
Normal file
File diff suppressed because it is too large
Load Diff
2232
server/src/agents/pool/mod.rs
Normal file
2232
server/src/agents/pool/mod.rs
Normal file
File diff suppressed because it is too large
Load Diff
1771
server/src/agents/pool/pipeline.rs
Normal file
1771
server/src/agents/pool/pipeline.rs
Normal file
File diff suppressed because it is too large
Load Diff
591
server/src/agents/pty.rs
Normal file
591
server/src/agents/pty.rs
Normal file
@@ -0,0 +1,591 @@
|
|||||||
|
use std::collections::HashMap;
|
||||||
|
use std::io::{BufRead, BufReader};
|
||||||
|
use std::sync::{Arc, Mutex};
|
||||||
|
|
||||||
|
use portable_pty::{ChildKiller, CommandBuilder, PtySize, native_pty_system};
|
||||||
|
use tokio::sync::broadcast;
|
||||||
|
|
||||||
|
use super::{AgentEvent, TokenUsage};
|
||||||
|
use crate::agent_log::AgentLogWriter;
|
||||||
|
use crate::io::watcher::WatcherEvent;
|
||||||
|
use crate::slog;
|
||||||
|
use crate::slog_warn;
|
||||||
|
|
||||||
|
/// Result from a PTY agent session, containing the session ID and token usage.
|
||||||
|
pub(in crate::agents) struct PtyResult {
|
||||||
|
pub session_id: Option<String>,
|
||||||
|
pub token_usage: Option<TokenUsage>,
|
||||||
|
}
|
||||||
|
|
||||||
|
fn composite_key(story_id: &str, agent_name: &str) -> String {
|
||||||
|
format!("{story_id}:{agent_name}")
|
||||||
|
}
|
||||||
|
|
||||||
|
struct ChildKillerGuard {
|
||||||
|
killers: Arc<Mutex<HashMap<String, Box<dyn ChildKiller + Send + Sync>>>>,
|
||||||
|
key: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Drop for ChildKillerGuard {
|
||||||
|
fn drop(&mut self) {
|
||||||
|
if let Ok(mut killers) = self.killers.lock() {
|
||||||
|
killers.remove(&self.key);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Spawn claude agent in a PTY and stream events through the broadcast channel.
|
||||||
|
#[allow(clippy::too_many_arguments)]
|
||||||
|
pub(in crate::agents) async fn run_agent_pty_streaming(
|
||||||
|
story_id: &str,
|
||||||
|
agent_name: &str,
|
||||||
|
command: &str,
|
||||||
|
args: &[String],
|
||||||
|
prompt: &str,
|
||||||
|
cwd: &str,
|
||||||
|
tx: &broadcast::Sender<AgentEvent>,
|
||||||
|
event_log: &Arc<Mutex<Vec<AgentEvent>>>,
|
||||||
|
log_writer: Option<Arc<Mutex<AgentLogWriter>>>,
|
||||||
|
inactivity_timeout_secs: u64,
|
||||||
|
child_killers: Arc<Mutex<HashMap<String, Box<dyn ChildKiller + Send + Sync>>>>,
|
||||||
|
watcher_tx: broadcast::Sender<WatcherEvent>,
|
||||||
|
) -> Result<PtyResult, String> {
|
||||||
|
let sid = story_id.to_string();
|
||||||
|
let aname = agent_name.to_string();
|
||||||
|
let cmd = command.to_string();
|
||||||
|
let args = args.to_vec();
|
||||||
|
let prompt = prompt.to_string();
|
||||||
|
let cwd = cwd.to_string();
|
||||||
|
let tx = tx.clone();
|
||||||
|
let event_log = event_log.clone();
|
||||||
|
|
||||||
|
tokio::task::spawn_blocking(move || {
|
||||||
|
run_agent_pty_blocking(
|
||||||
|
&sid,
|
||||||
|
&aname,
|
||||||
|
&cmd,
|
||||||
|
&args,
|
||||||
|
&prompt,
|
||||||
|
&cwd,
|
||||||
|
&tx,
|
||||||
|
&event_log,
|
||||||
|
log_writer.as_deref(),
|
||||||
|
inactivity_timeout_secs,
|
||||||
|
&child_killers,
|
||||||
|
&watcher_tx,
|
||||||
|
)
|
||||||
|
})
|
||||||
|
.await
|
||||||
|
.map_err(|e| format!("Agent task panicked: {e}"))?
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Dispatch a `stream_event` from Claude Code's `--include-partial-messages` output.
|
||||||
|
///
|
||||||
|
/// Extracts `thinking_delta` and `text_delta` from `content_block_delta` events
|
||||||
|
/// and routes them as `AgentEvent::Thinking` and `AgentEvent::Output` respectively.
|
||||||
|
/// This ensures thinking traces flow through the dedicated `ThinkingBlock` UI
|
||||||
|
/// component rather than appearing as unbounded regular output.
|
||||||
|
fn handle_agent_stream_event(
|
||||||
|
event: &serde_json::Value,
|
||||||
|
story_id: &str,
|
||||||
|
agent_name: &str,
|
||||||
|
tx: &broadcast::Sender<AgentEvent>,
|
||||||
|
event_log: &Mutex<Vec<AgentEvent>>,
|
||||||
|
log_writer: Option<&Mutex<AgentLogWriter>>,
|
||||||
|
) {
|
||||||
|
let event_type = event.get("type").and_then(|t| t.as_str()).unwrap_or("");
|
||||||
|
|
||||||
|
if event_type == "content_block_delta"
|
||||||
|
&& let Some(delta) = event.get("delta")
|
||||||
|
{
|
||||||
|
let delta_type = delta.get("type").and_then(|t| t.as_str()).unwrap_or("");
|
||||||
|
match delta_type {
|
||||||
|
"thinking_delta" => {
|
||||||
|
if let Some(thinking) = delta.get("thinking").and_then(|t| t.as_str()) {
|
||||||
|
emit_event(
|
||||||
|
AgentEvent::Thinking {
|
||||||
|
story_id: story_id.to_string(),
|
||||||
|
agent_name: agent_name.to_string(),
|
||||||
|
text: thinking.to_string(),
|
||||||
|
},
|
||||||
|
tx,
|
||||||
|
event_log,
|
||||||
|
log_writer,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
"text_delta" => {
|
||||||
|
if let Some(text) = delta.get("text").and_then(|t| t.as_str()) {
|
||||||
|
emit_event(
|
||||||
|
AgentEvent::Output {
|
||||||
|
story_id: story_id.to_string(),
|
||||||
|
agent_name: agent_name.to_string(),
|
||||||
|
text: text.to_string(),
|
||||||
|
},
|
||||||
|
tx,
|
||||||
|
event_log,
|
||||||
|
log_writer,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
_ => {}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Helper to send an event to broadcast, event log, and optional persistent log file.
|
||||||
|
pub(super) fn emit_event(
|
||||||
|
event: AgentEvent,
|
||||||
|
tx: &broadcast::Sender<AgentEvent>,
|
||||||
|
event_log: &Mutex<Vec<AgentEvent>>,
|
||||||
|
log_writer: Option<&Mutex<AgentLogWriter>>,
|
||||||
|
) {
|
||||||
|
if let Ok(mut log) = event_log.lock() {
|
||||||
|
log.push(event.clone());
|
||||||
|
}
|
||||||
|
if let Some(writer) = log_writer
|
||||||
|
&& let Ok(mut w) = writer.lock()
|
||||||
|
&& let Err(e) = w.write_event(&event)
|
||||||
|
{
|
||||||
|
eprintln!("[agent_log] Failed to write event to log file: {e}");
|
||||||
|
}
|
||||||
|
let _ = tx.send(event);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[allow(clippy::too_many_arguments)]
|
||||||
|
fn run_agent_pty_blocking(
|
||||||
|
story_id: &str,
|
||||||
|
agent_name: &str,
|
||||||
|
command: &str,
|
||||||
|
args: &[String],
|
||||||
|
prompt: &str,
|
||||||
|
cwd: &str,
|
||||||
|
tx: &broadcast::Sender<AgentEvent>,
|
||||||
|
event_log: &Mutex<Vec<AgentEvent>>,
|
||||||
|
log_writer: Option<&Mutex<AgentLogWriter>>,
|
||||||
|
inactivity_timeout_secs: u64,
|
||||||
|
child_killers: &Arc<Mutex<HashMap<String, Box<dyn ChildKiller + Send + Sync>>>>,
|
||||||
|
watcher_tx: &broadcast::Sender<WatcherEvent>,
|
||||||
|
) -> Result<PtyResult, String> {
|
||||||
|
let pty_system = native_pty_system();
|
||||||
|
|
||||||
|
let pair = pty_system
|
||||||
|
.openpty(PtySize {
|
||||||
|
rows: 50,
|
||||||
|
cols: 200,
|
||||||
|
pixel_width: 0,
|
||||||
|
pixel_height: 0,
|
||||||
|
})
|
||||||
|
.map_err(|e| format!("Failed to open PTY: {e}"))?;
|
||||||
|
|
||||||
|
let mut cmd = CommandBuilder::new(command);
|
||||||
|
|
||||||
|
// -p <prompt> must come first
|
||||||
|
cmd.arg("-p");
|
||||||
|
cmd.arg(prompt);
|
||||||
|
|
||||||
|
// Add configured args (e.g., --directory /path/to/worktree, --model, etc.)
|
||||||
|
for arg in args {
|
||||||
|
cmd.arg(arg);
|
||||||
|
}
|
||||||
|
|
||||||
|
cmd.arg("--output-format");
|
||||||
|
cmd.arg("stream-json");
|
||||||
|
cmd.arg("--verbose");
|
||||||
|
// Enable partial streaming so we receive thinking_delta and text_delta
|
||||||
|
// events in real-time, rather than only complete assistant events.
|
||||||
|
// Without this, thinking traces may not appear in the structured output
|
||||||
|
// and instead leak as unstructured PTY text.
|
||||||
|
cmd.arg("--include-partial-messages");
|
||||||
|
|
||||||
|
// Supervised agents don't need interactive permission prompts
|
||||||
|
cmd.arg("--permission-mode");
|
||||||
|
cmd.arg("bypassPermissions");
|
||||||
|
|
||||||
|
cmd.cwd(cwd);
|
||||||
|
cmd.env("NO_COLOR", "1");
|
||||||
|
|
||||||
|
// Allow spawning Claude Code from within a Claude Code session
|
||||||
|
cmd.env_remove("CLAUDECODE");
|
||||||
|
cmd.env_remove("CLAUDE_CODE_ENTRYPOINT");
|
||||||
|
|
||||||
|
slog!("[agent:{story_id}:{agent_name}] Spawning {command} in {cwd} with args: {args:?}");
|
||||||
|
|
||||||
|
let mut child = pair
|
||||||
|
.slave
|
||||||
|
.spawn_command(cmd)
|
||||||
|
.map_err(|e| format!("Failed to spawn agent for {story_id}:{agent_name}: {e}"))?;
|
||||||
|
|
||||||
|
// Register the child killer so that kill_all_children() / stop_agent() can
|
||||||
|
// terminate this process on server shutdown, even if the blocking thread
|
||||||
|
// cannot be interrupted. The ChildKillerGuard deregisters on function exit.
|
||||||
|
let killer_key = composite_key(story_id, agent_name);
|
||||||
|
{
|
||||||
|
let killer = child.clone_killer();
|
||||||
|
if let Ok(mut killers) = child_killers.lock() {
|
||||||
|
killers.insert(killer_key.clone(), killer);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
let _killer_guard = ChildKillerGuard {
|
||||||
|
killers: Arc::clone(child_killers),
|
||||||
|
key: killer_key,
|
||||||
|
};
|
||||||
|
|
||||||
|
drop(pair.slave);
|
||||||
|
|
||||||
|
let reader = pair
|
||||||
|
.master
|
||||||
|
.try_clone_reader()
|
||||||
|
.map_err(|e| format!("Failed to clone PTY reader: {e}"))?;
|
||||||
|
|
||||||
|
drop(pair.master);
|
||||||
|
|
||||||
|
// Spawn a reader thread to collect PTY output lines.
|
||||||
|
// We use a channel so the main thread can apply an inactivity deadline
|
||||||
|
// via recv_timeout: if no output arrives within the configured window
|
||||||
|
// the process is killed and the agent is marked Failed.
|
||||||
|
let (line_tx, line_rx) = std::sync::mpsc::channel::<std::io::Result<String>>();
|
||||||
|
std::thread::spawn(move || {
|
||||||
|
let buf_reader = BufReader::new(reader);
|
||||||
|
for line in buf_reader.lines() {
|
||||||
|
if line_tx.send(line).is_err() {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
let timeout_dur = if inactivity_timeout_secs > 0 {
|
||||||
|
Some(std::time::Duration::from_secs(inactivity_timeout_secs))
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut session_id: Option<String> = None;
|
||||||
|
let mut token_usage: Option<TokenUsage> = None;
|
||||||
|
|
||||||
|
loop {
|
||||||
|
let recv_result = match timeout_dur {
|
||||||
|
Some(dur) => line_rx.recv_timeout(dur),
|
||||||
|
None => line_rx
|
||||||
|
.recv()
|
||||||
|
.map_err(|_| std::sync::mpsc::RecvTimeoutError::Disconnected),
|
||||||
|
};
|
||||||
|
|
||||||
|
let line = match recv_result {
|
||||||
|
Ok(Ok(l)) => l,
|
||||||
|
Ok(Err(_)) => {
|
||||||
|
// IO error reading from PTY — treat as EOF.
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
Err(std::sync::mpsc::RecvTimeoutError::Disconnected) => {
|
||||||
|
// Reader thread exited (EOF from PTY).
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
Err(std::sync::mpsc::RecvTimeoutError::Timeout) => {
|
||||||
|
slog_warn!(
|
||||||
|
"[agent:{story_id}:{agent_name}] Inactivity timeout after \
|
||||||
|
{inactivity_timeout_secs}s with no output. Killing process."
|
||||||
|
);
|
||||||
|
let _ = child.kill();
|
||||||
|
let _ = child.wait();
|
||||||
|
return Err(format!(
|
||||||
|
"Agent inactivity timeout: no output received for {inactivity_timeout_secs}s"
|
||||||
|
));
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let trimmed = line.trim();
|
||||||
|
if trimmed.is_empty() {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Try to parse as JSON
|
||||||
|
let json: serde_json::Value = match serde_json::from_str(trimmed) {
|
||||||
|
Ok(j) => j,
|
||||||
|
Err(_) => {
|
||||||
|
// Non-JSON output (terminal escapes etc.) — send as raw output
|
||||||
|
emit_event(
|
||||||
|
AgentEvent::Output {
|
||||||
|
story_id: story_id.to_string(),
|
||||||
|
agent_name: agent_name.to_string(),
|
||||||
|
text: trimmed.to_string(),
|
||||||
|
},
|
||||||
|
tx,
|
||||||
|
event_log,
|
||||||
|
log_writer,
|
||||||
|
);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let event_type = json.get("type").and_then(|t| t.as_str()).unwrap_or("");
|
||||||
|
|
||||||
|
match event_type {
|
||||||
|
"system" => {
|
||||||
|
session_id = json
|
||||||
|
.get("session_id")
|
||||||
|
.and_then(|s| s.as_str())
|
||||||
|
.map(|s| s.to_string());
|
||||||
|
}
|
||||||
|
// With --include-partial-messages, thinking and text arrive
|
||||||
|
// incrementally via stream_event → content_block_delta. Handle
|
||||||
|
// them here for real-time streaming to the frontend.
|
||||||
|
"stream_event" => {
|
||||||
|
if let Some(event) = json.get("event") {
|
||||||
|
handle_agent_stream_event(
|
||||||
|
event,
|
||||||
|
story_id,
|
||||||
|
agent_name,
|
||||||
|
tx,
|
||||||
|
event_log,
|
||||||
|
log_writer,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Complete assistant events are skipped for content extraction
|
||||||
|
// because thinking and text already arrived via stream_event.
|
||||||
|
// The raw JSON is still forwarded as AgentJson below.
|
||||||
|
"assistant" | "user" => {}
|
||||||
|
"rate_limit_event" => {
|
||||||
|
slog!(
|
||||||
|
"[agent:{story_id}:{agent_name}] API rate limit warning received"
|
||||||
|
);
|
||||||
|
let _ = watcher_tx.send(WatcherEvent::RateLimitWarning {
|
||||||
|
story_id: story_id.to_string(),
|
||||||
|
agent_name: agent_name.to_string(),
|
||||||
|
});
|
||||||
|
}
|
||||||
|
"result" => {
|
||||||
|
// Extract token usage from the result event.
|
||||||
|
if let Some(usage) = TokenUsage::from_result_event(&json) {
|
||||||
|
slog!(
|
||||||
|
"[agent:{story_id}:{agent_name}] Token usage: in={} out={} cache_create={} cache_read={} cost=${:.4}",
|
||||||
|
usage.input_tokens,
|
||||||
|
usage.output_tokens,
|
||||||
|
usage.cache_creation_input_tokens,
|
||||||
|
usage.cache_read_input_tokens,
|
||||||
|
usage.total_cost_usd,
|
||||||
|
);
|
||||||
|
token_usage = Some(usage);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
_ => {}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Forward all JSON events
|
||||||
|
emit_event(
|
||||||
|
AgentEvent::AgentJson {
|
||||||
|
story_id: story_id.to_string(),
|
||||||
|
agent_name: agent_name.to_string(),
|
||||||
|
data: json,
|
||||||
|
},
|
||||||
|
tx,
|
||||||
|
event_log,
|
||||||
|
log_writer,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
let _ = child.kill();
|
||||||
|
let _ = child.wait();
|
||||||
|
|
||||||
|
slog!(
|
||||||
|
"[agent:{story_id}:{agent_name}] Done. Session: {:?}",
|
||||||
|
session_id
|
||||||
|
);
|
||||||
|
|
||||||
|
Ok(PtyResult {
|
||||||
|
session_id,
|
||||||
|
token_usage,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
use crate::agents::AgentEvent;
|
||||||
|
use crate::io::watcher::WatcherEvent;
|
||||||
|
use std::collections::HashMap;
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
// ── AC1: pty detects rate_limit_event and emits RateLimitWarning ─────────
|
||||||
|
|
||||||
|
/// Verify that when a `rate_limit_event` JSON line appears in PTY output,
|
||||||
|
/// `run_agent_pty_streaming` sends a `WatcherEvent::RateLimitWarning` with
|
||||||
|
/// the correct story_id and agent_name.
|
||||||
|
///
|
||||||
|
/// The command invoked is: `sh -p -- <script>` where `--` terminates
|
||||||
|
/// option parsing so the script path is treated as the operand.
|
||||||
|
#[tokio::test]
|
||||||
|
async fn rate_limit_event_json_sends_watcher_warning() {
|
||||||
|
use std::os::unix::fs::PermissionsExt;
|
||||||
|
|
||||||
|
let tmp = tempfile::tempdir().unwrap();
|
||||||
|
let script = tmp.path().join("emit_rate_limit.sh");
|
||||||
|
std::fs::write(
|
||||||
|
&script,
|
||||||
|
"#!/bin/sh\nprintf '%s\\n' '{\"type\":\"rate_limit_event\",\"rate_limit_info\":{\"status\":\"allowed_warning\"}}'\n",
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
|
std::fs::set_permissions(&script, std::fs::Permissions::from_mode(0o755)).unwrap();
|
||||||
|
|
||||||
|
let (tx, _rx) = broadcast::channel::<AgentEvent>(64);
|
||||||
|
let (watcher_tx, mut watcher_rx) = broadcast::channel::<WatcherEvent>(16);
|
||||||
|
let event_log = Arc::new(Mutex::new(Vec::new()));
|
||||||
|
let child_killers = Arc::new(Mutex::new(HashMap::new()));
|
||||||
|
|
||||||
|
// sh -p "--" <script>: -p = privileged mode, "--" = end options,
|
||||||
|
// then the script path is the file operand.
|
||||||
|
let result = run_agent_pty_streaming(
|
||||||
|
"365_story_test",
|
||||||
|
"coder-1",
|
||||||
|
"sh",
|
||||||
|
&[script.to_string_lossy().to_string()],
|
||||||
|
"--",
|
||||||
|
"/tmp",
|
||||||
|
&tx,
|
||||||
|
&event_log,
|
||||||
|
None,
|
||||||
|
0,
|
||||||
|
child_killers,
|
||||||
|
watcher_tx,
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
|
||||||
|
assert!(result.is_ok(), "PTY run should succeed: {:?}", result.err());
|
||||||
|
|
||||||
|
let evt = watcher_rx
|
||||||
|
.try_recv()
|
||||||
|
.expect("Expected a RateLimitWarning to be sent on watcher_tx");
|
||||||
|
match evt {
|
||||||
|
WatcherEvent::RateLimitWarning {
|
||||||
|
story_id,
|
||||||
|
agent_name,
|
||||||
|
} => {
|
||||||
|
assert_eq!(story_id, "365_story_test");
|
||||||
|
assert_eq!(agent_name, "coder-1");
|
||||||
|
}
|
||||||
|
other => panic!("Expected RateLimitWarning, got: {other:?}"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_emit_event_writes_to_log_writer() {
|
||||||
|
let tmp = tempfile::tempdir().unwrap();
|
||||||
|
let root = tmp.path();
|
||||||
|
|
||||||
|
let log_writer =
|
||||||
|
AgentLogWriter::new(root, "42_story_foo", "coder-1", "sess-emit").unwrap();
|
||||||
|
let log_mutex = Mutex::new(log_writer);
|
||||||
|
|
||||||
|
let (tx, _rx) = broadcast::channel::<AgentEvent>(64);
|
||||||
|
let event_log: Mutex<Vec<AgentEvent>> = Mutex::new(Vec::new());
|
||||||
|
|
||||||
|
let event = AgentEvent::Status {
|
||||||
|
story_id: "42_story_foo".to_string(),
|
||||||
|
agent_name: "coder-1".to_string(),
|
||||||
|
status: "running".to_string(),
|
||||||
|
};
|
||||||
|
|
||||||
|
emit_event(event, &tx, &event_log, Some(&log_mutex));
|
||||||
|
|
||||||
|
// Verify event was added to in-memory log
|
||||||
|
let mem_events = event_log.lock().unwrap();
|
||||||
|
assert_eq!(mem_events.len(), 1);
|
||||||
|
drop(mem_events);
|
||||||
|
|
||||||
|
// Verify event was written to the log file
|
||||||
|
let log_path =
|
||||||
|
crate::agent_log::log_file_path(root, "42_story_foo", "coder-1", "sess-emit");
|
||||||
|
let entries = crate::agent_log::read_log(&log_path).unwrap();
|
||||||
|
assert_eq!(entries.len(), 1);
|
||||||
|
assert_eq!(entries[0].event["type"], "status");
|
||||||
|
assert_eq!(entries[0].event["status"], "running");
|
||||||
|
}
|
||||||
|
|
||||||
|
// ── bug 167: handle_agent_stream_event routes thinking/text correctly ───
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn stream_event_thinking_delta_emits_thinking_event() {
|
||||||
|
let (tx, mut rx) = broadcast::channel::<AgentEvent>(64);
|
||||||
|
let event_log: Mutex<Vec<AgentEvent>> = Mutex::new(Vec::new());
|
||||||
|
|
||||||
|
let event = serde_json::json!({
|
||||||
|
"type": "content_block_delta",
|
||||||
|
"delta": {"type": "thinking_delta", "thinking": "Let me analyze this..."}
|
||||||
|
});
|
||||||
|
|
||||||
|
handle_agent_stream_event(&event, "s1", "coder-1", &tx, &event_log, None);
|
||||||
|
|
||||||
|
let received = rx.try_recv().unwrap();
|
||||||
|
match received {
|
||||||
|
AgentEvent::Thinking {
|
||||||
|
story_id,
|
||||||
|
agent_name,
|
||||||
|
text,
|
||||||
|
} => {
|
||||||
|
assert_eq!(story_id, "s1");
|
||||||
|
assert_eq!(agent_name, "coder-1");
|
||||||
|
assert_eq!(text, "Let me analyze this...");
|
||||||
|
}
|
||||||
|
other => panic!("Expected Thinking event, got: {other:?}"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn stream_event_text_delta_emits_output_event() {
|
||||||
|
let (tx, mut rx) = broadcast::channel::<AgentEvent>(64);
|
||||||
|
let event_log: Mutex<Vec<AgentEvent>> = Mutex::new(Vec::new());
|
||||||
|
|
||||||
|
let event = serde_json::json!({
|
||||||
|
"type": "content_block_delta",
|
||||||
|
"delta": {"type": "text_delta", "text": "Here is the result."}
|
||||||
|
});
|
||||||
|
|
||||||
|
handle_agent_stream_event(&event, "s1", "coder-1", &tx, &event_log, None);
|
||||||
|
|
||||||
|
let received = rx.try_recv().unwrap();
|
||||||
|
match received {
|
||||||
|
AgentEvent::Output {
|
||||||
|
story_id,
|
||||||
|
agent_name,
|
||||||
|
text,
|
||||||
|
} => {
|
||||||
|
assert_eq!(story_id, "s1");
|
||||||
|
assert_eq!(agent_name, "coder-1");
|
||||||
|
assert_eq!(text, "Here is the result.");
|
||||||
|
}
|
||||||
|
other => panic!("Expected Output event, got: {other:?}"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn stream_event_input_json_delta_ignored() {
|
||||||
|
let (tx, mut rx) = broadcast::channel::<AgentEvent>(64);
|
||||||
|
let event_log: Mutex<Vec<AgentEvent>> = Mutex::new(Vec::new());
|
||||||
|
|
||||||
|
let event = serde_json::json!({
|
||||||
|
"type": "content_block_delta",
|
||||||
|
"delta": {"type": "input_json_delta", "partial_json": "{\"file\":"}
|
||||||
|
});
|
||||||
|
|
||||||
|
handle_agent_stream_event(&event, "s1", "coder-1", &tx, &event_log, None);
|
||||||
|
|
||||||
|
// No event should be emitted for tool argument deltas
|
||||||
|
assert!(rx.try_recv().is_err());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn stream_event_non_delta_type_ignored() {
|
||||||
|
let (tx, mut rx) = broadcast::channel::<AgentEvent>(64);
|
||||||
|
let event_log: Mutex<Vec<AgentEvent>> = Mutex::new(Vec::new());
|
||||||
|
|
||||||
|
let event = serde_json::json!({
|
||||||
|
"type": "message_start",
|
||||||
|
"message": {"role": "assistant"}
|
||||||
|
});
|
||||||
|
|
||||||
|
handle_agent_stream_event(&event, "s1", "coder-1", &tx, &event_log, None);
|
||||||
|
|
||||||
|
assert!(rx.try_recv().is_err());
|
||||||
|
}
|
||||||
|
}
|
||||||
73
server/src/agents/runtime/claude_code.rs
Normal file
73
server/src/agents/runtime/claude_code.rs
Normal file
@@ -0,0 +1,73 @@
|
|||||||
|
use std::collections::HashMap;
|
||||||
|
use std::sync::{Arc, Mutex};
|
||||||
|
|
||||||
|
use portable_pty::ChildKiller;
|
||||||
|
use tokio::sync::broadcast;
|
||||||
|
|
||||||
|
use crate::agent_log::AgentLogWriter;
|
||||||
|
use crate::io::watcher::WatcherEvent;
|
||||||
|
|
||||||
|
use super::{AgentEvent, AgentRuntime, RuntimeContext, RuntimeResult, RuntimeStatus};
|
||||||
|
|
||||||
|
/// Agent runtime that spawns the `claude` CLI in a PTY and streams JSON events.
|
||||||
|
///
|
||||||
|
/// This is the default runtime (`runtime = "claude-code"` in project.toml).
|
||||||
|
/// It wraps the existing PTY-based execution logic, preserving all streaming,
|
||||||
|
/// token tracking, and inactivity timeout behaviour.
|
||||||
|
pub struct ClaudeCodeRuntime {
|
||||||
|
child_killers: Arc<Mutex<HashMap<String, Box<dyn ChildKiller + Send + Sync>>>>,
|
||||||
|
watcher_tx: broadcast::Sender<WatcherEvent>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ClaudeCodeRuntime {
|
||||||
|
pub fn new(
|
||||||
|
child_killers: Arc<Mutex<HashMap<String, Box<dyn ChildKiller + Send + Sync>>>>,
|
||||||
|
watcher_tx: broadcast::Sender<WatcherEvent>,
|
||||||
|
) -> Self {
|
||||||
|
Self {
|
||||||
|
child_killers,
|
||||||
|
watcher_tx,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl AgentRuntime for ClaudeCodeRuntime {
|
||||||
|
async fn start(
|
||||||
|
&self,
|
||||||
|
ctx: RuntimeContext,
|
||||||
|
tx: broadcast::Sender<AgentEvent>,
|
||||||
|
event_log: Arc<Mutex<Vec<AgentEvent>>>,
|
||||||
|
log_writer: Option<Arc<Mutex<AgentLogWriter>>>,
|
||||||
|
) -> Result<RuntimeResult, String> {
|
||||||
|
let pty_result = super::super::pty::run_agent_pty_streaming(
|
||||||
|
&ctx.story_id,
|
||||||
|
&ctx.agent_name,
|
||||||
|
&ctx.command,
|
||||||
|
&ctx.args,
|
||||||
|
&ctx.prompt,
|
||||||
|
&ctx.cwd,
|
||||||
|
&tx,
|
||||||
|
&event_log,
|
||||||
|
log_writer,
|
||||||
|
ctx.inactivity_timeout_secs,
|
||||||
|
Arc::clone(&self.child_killers),
|
||||||
|
self.watcher_tx.clone(),
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
Ok(RuntimeResult {
|
||||||
|
session_id: pty_result.session_id,
|
||||||
|
token_usage: pty_result.token_usage,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
fn stop(&self) {
|
||||||
|
// Stopping is handled externally by the pool via kill_child_for_key().
|
||||||
|
// The ChildKillerGuard in pty.rs deregisters automatically on process exit.
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_status(&self) -> RuntimeStatus {
|
||||||
|
// Lifecycle status is tracked by the pool; the runtime itself is stateless.
|
||||||
|
RuntimeStatus::Idle
|
||||||
|
}
|
||||||
|
}
|
||||||
809
server/src/agents/runtime/gemini.rs
Normal file
809
server/src/agents/runtime/gemini.rs
Normal file
@@ -0,0 +1,809 @@
|
|||||||
|
use std::sync::atomic::{AtomicBool, Ordering};
|
||||||
|
use std::sync::{Arc, Mutex};
|
||||||
|
|
||||||
|
use reqwest::Client;
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
use serde_json::{json, Value};
|
||||||
|
use tokio::sync::broadcast;
|
||||||
|
|
||||||
|
use crate::agent_log::AgentLogWriter;
|
||||||
|
use crate::slog;
|
||||||
|
|
||||||
|
use super::super::{AgentEvent, TokenUsage};
|
||||||
|
use super::{AgentRuntime, RuntimeContext, RuntimeResult, RuntimeStatus};
|
||||||
|
|
||||||
|
// ── Public runtime struct ────────────────────────────────────────────
|
||||||
|
|
||||||
|
/// Agent runtime that drives a Gemini model through the Google AI
|
||||||
|
/// `generateContent` REST API.
|
||||||
|
///
|
||||||
|
/// The runtime:
|
||||||
|
/// 1. Fetches MCP tool definitions from storkit's MCP server.
|
||||||
|
/// 2. Converts them to Gemini function-calling format.
|
||||||
|
/// 3. Sends the agent prompt + tools to the Gemini API.
|
||||||
|
/// 4. Executes any requested function calls via MCP `tools/call`.
|
||||||
|
/// 5. Loops until the model produces a text-only response or an error.
|
||||||
|
/// 6. Tracks token usage from the API response metadata.
|
||||||
|
pub struct GeminiRuntime {
|
||||||
|
/// Whether a stop has been requested.
|
||||||
|
cancelled: Arc<AtomicBool>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl GeminiRuntime {
|
||||||
|
pub fn new() -> Self {
|
||||||
|
Self {
|
||||||
|
cancelled: Arc::new(AtomicBool::new(false)),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl AgentRuntime for GeminiRuntime {
|
||||||
|
async fn start(
|
||||||
|
&self,
|
||||||
|
ctx: RuntimeContext,
|
||||||
|
tx: broadcast::Sender<AgentEvent>,
|
||||||
|
event_log: Arc<Mutex<Vec<AgentEvent>>>,
|
||||||
|
log_writer: Option<Arc<Mutex<AgentLogWriter>>>,
|
||||||
|
) -> Result<RuntimeResult, String> {
|
||||||
|
let api_key = std::env::var("GOOGLE_AI_API_KEY").map_err(|_| {
|
||||||
|
"GOOGLE_AI_API_KEY environment variable is not set. \
|
||||||
|
Set it to your Google AI API key to use the Gemini runtime."
|
||||||
|
.to_string()
|
||||||
|
})?;
|
||||||
|
|
||||||
|
let model = if ctx.command.starts_with("gemini") {
|
||||||
|
// The pool puts the model into `command` for non-CLI runtimes,
|
||||||
|
// but also check args for a --model flag.
|
||||||
|
ctx.command.clone()
|
||||||
|
} else {
|
||||||
|
// Fall back to args: look for --model <value>
|
||||||
|
ctx.args
|
||||||
|
.iter()
|
||||||
|
.position(|a| a == "--model")
|
||||||
|
.and_then(|i| ctx.args.get(i + 1))
|
||||||
|
.cloned()
|
||||||
|
.unwrap_or_else(|| "gemini-2.5-pro".to_string())
|
||||||
|
};
|
||||||
|
|
||||||
|
let mcp_port = ctx.mcp_port;
|
||||||
|
let mcp_base = format!("http://localhost:{mcp_port}/mcp");
|
||||||
|
|
||||||
|
let client = Client::new();
|
||||||
|
let cancelled = Arc::clone(&self.cancelled);
|
||||||
|
|
||||||
|
// Step 1: Fetch MCP tool definitions and convert to Gemini format.
|
||||||
|
let gemini_tools = fetch_and_convert_mcp_tools(&client, &mcp_base).await?;
|
||||||
|
|
||||||
|
// Step 2: Build the initial conversation contents.
|
||||||
|
let system_instruction = build_system_instruction(&ctx);
|
||||||
|
let mut contents: Vec<Value> = vec![json!({
|
||||||
|
"role": "user",
|
||||||
|
"parts": [{ "text": ctx.prompt }]
|
||||||
|
})];
|
||||||
|
|
||||||
|
let mut total_usage = TokenUsage {
|
||||||
|
input_tokens: 0,
|
||||||
|
output_tokens: 0,
|
||||||
|
cache_creation_input_tokens: 0,
|
||||||
|
cache_read_input_tokens: 0,
|
||||||
|
total_cost_usd: 0.0,
|
||||||
|
};
|
||||||
|
|
||||||
|
let emit = |event: AgentEvent| {
|
||||||
|
super::super::pty::emit_event(
|
||||||
|
event,
|
||||||
|
&tx,
|
||||||
|
&event_log,
|
||||||
|
log_writer.as_ref().map(|w| w.as_ref()),
|
||||||
|
);
|
||||||
|
};
|
||||||
|
|
||||||
|
emit(AgentEvent::Status {
|
||||||
|
story_id: ctx.story_id.clone(),
|
||||||
|
agent_name: ctx.agent_name.clone(),
|
||||||
|
status: "running".to_string(),
|
||||||
|
});
|
||||||
|
|
||||||
|
// Step 3: Conversation loop.
|
||||||
|
let mut turn = 0u32;
|
||||||
|
let max_turns = 200; // Safety limit
|
||||||
|
|
||||||
|
loop {
|
||||||
|
if cancelled.load(Ordering::Relaxed) {
|
||||||
|
emit(AgentEvent::Error {
|
||||||
|
story_id: ctx.story_id.clone(),
|
||||||
|
agent_name: ctx.agent_name.clone(),
|
||||||
|
message: "Agent was stopped by user".to_string(),
|
||||||
|
});
|
||||||
|
return Ok(RuntimeResult {
|
||||||
|
session_id: None,
|
||||||
|
token_usage: Some(total_usage),
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
turn += 1;
|
||||||
|
if turn > max_turns {
|
||||||
|
emit(AgentEvent::Error {
|
||||||
|
story_id: ctx.story_id.clone(),
|
||||||
|
agent_name: ctx.agent_name.clone(),
|
||||||
|
message: format!("Exceeded maximum turns ({max_turns})"),
|
||||||
|
});
|
||||||
|
return Ok(RuntimeResult {
|
||||||
|
session_id: None,
|
||||||
|
token_usage: Some(total_usage),
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
slog!("[gemini] Turn {turn} for {}:{}", ctx.story_id, ctx.agent_name);
|
||||||
|
|
||||||
|
let request_body = build_generate_content_request(
|
||||||
|
&system_instruction,
|
||||||
|
&contents,
|
||||||
|
&gemini_tools,
|
||||||
|
);
|
||||||
|
|
||||||
|
let url = format!(
|
||||||
|
"https://generativelanguage.googleapis.com/v1beta/models/{model}:generateContent?key={api_key}"
|
||||||
|
);
|
||||||
|
|
||||||
|
let response = client
|
||||||
|
.post(&url)
|
||||||
|
.json(&request_body)
|
||||||
|
.send()
|
||||||
|
.await
|
||||||
|
.map_err(|e| format!("Gemini API request failed: {e}"))?;
|
||||||
|
|
||||||
|
let status = response.status();
|
||||||
|
let body: Value = response
|
||||||
|
.json()
|
||||||
|
.await
|
||||||
|
.map_err(|e| format!("Failed to parse Gemini API response: {e}"))?;
|
||||||
|
|
||||||
|
if !status.is_success() {
|
||||||
|
let error_msg = body["error"]["message"]
|
||||||
|
.as_str()
|
||||||
|
.unwrap_or("Unknown API error");
|
||||||
|
let err = format!("Gemini API error ({status}): {error_msg}");
|
||||||
|
emit(AgentEvent::Error {
|
||||||
|
story_id: ctx.story_id.clone(),
|
||||||
|
agent_name: ctx.agent_name.clone(),
|
||||||
|
message: err.clone(),
|
||||||
|
});
|
||||||
|
return Err(err);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Accumulate token usage.
|
||||||
|
if let Some(usage) = parse_usage_metadata(&body) {
|
||||||
|
total_usage.input_tokens += usage.input_tokens;
|
||||||
|
total_usage.output_tokens += usage.output_tokens;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Extract the candidate response.
|
||||||
|
let candidate = body["candidates"]
|
||||||
|
.as_array()
|
||||||
|
.and_then(|c| c.first())
|
||||||
|
.ok_or_else(|| "No candidates in Gemini response".to_string())?;
|
||||||
|
|
||||||
|
let parts = candidate["content"]["parts"]
|
||||||
|
.as_array()
|
||||||
|
.ok_or_else(|| "No parts in Gemini response candidate".to_string())?;
|
||||||
|
|
||||||
|
// Check finish reason.
|
||||||
|
let finish_reason = candidate["finishReason"].as_str().unwrap_or("");
|
||||||
|
|
||||||
|
// Separate text parts and function call parts.
|
||||||
|
let mut text_parts: Vec<String> = Vec::new();
|
||||||
|
let mut function_calls: Vec<GeminiFunctionCall> = Vec::new();
|
||||||
|
|
||||||
|
for part in parts {
|
||||||
|
if let Some(text) = part["text"].as_str() {
|
||||||
|
text_parts.push(text.to_string());
|
||||||
|
}
|
||||||
|
if let Some(fc) = part.get("functionCall")
|
||||||
|
&& let (Some(name), Some(args)) =
|
||||||
|
(fc["name"].as_str(), fc.get("args"))
|
||||||
|
{
|
||||||
|
function_calls.push(GeminiFunctionCall {
|
||||||
|
name: name.to_string(),
|
||||||
|
args: args.clone(),
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Emit any text output.
|
||||||
|
for text in &text_parts {
|
||||||
|
if !text.is_empty() {
|
||||||
|
emit(AgentEvent::Output {
|
||||||
|
story_id: ctx.story_id.clone(),
|
||||||
|
agent_name: ctx.agent_name.clone(),
|
||||||
|
text: text.clone(),
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If no function calls, the model is done.
|
||||||
|
if function_calls.is_empty() {
|
||||||
|
emit(AgentEvent::Done {
|
||||||
|
story_id: ctx.story_id.clone(),
|
||||||
|
agent_name: ctx.agent_name.clone(),
|
||||||
|
session_id: None,
|
||||||
|
});
|
||||||
|
return Ok(RuntimeResult {
|
||||||
|
session_id: None,
|
||||||
|
token_usage: Some(total_usage),
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add the model's response to the conversation.
|
||||||
|
let model_parts: Vec<Value> = parts.to_vec();
|
||||||
|
contents.push(json!({
|
||||||
|
"role": "model",
|
||||||
|
"parts": model_parts
|
||||||
|
}));
|
||||||
|
|
||||||
|
// Execute function calls via MCP and build response parts.
|
||||||
|
let mut response_parts: Vec<Value> = Vec::new();
|
||||||
|
|
||||||
|
for fc in &function_calls {
|
||||||
|
if cancelled.load(Ordering::Relaxed) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
slog!(
|
||||||
|
"[gemini] Calling MCP tool '{}' for {}:{}",
|
||||||
|
fc.name,
|
||||||
|
ctx.story_id,
|
||||||
|
ctx.agent_name
|
||||||
|
);
|
||||||
|
|
||||||
|
emit(AgentEvent::Output {
|
||||||
|
story_id: ctx.story_id.clone(),
|
||||||
|
agent_name: ctx.agent_name.clone(),
|
||||||
|
text: format!("\n[Tool call: {}]\n", fc.name),
|
||||||
|
});
|
||||||
|
|
||||||
|
let tool_result =
|
||||||
|
call_mcp_tool(&client, &mcp_base, &fc.name, &fc.args).await;
|
||||||
|
|
||||||
|
let response_value = match &tool_result {
|
||||||
|
Ok(result) => {
|
||||||
|
emit(AgentEvent::Output {
|
||||||
|
story_id: ctx.story_id.clone(),
|
||||||
|
agent_name: ctx.agent_name.clone(),
|
||||||
|
text: format!(
|
||||||
|
"[Tool result: {} chars]\n",
|
||||||
|
result.len()
|
||||||
|
),
|
||||||
|
});
|
||||||
|
json!({ "result": result })
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
emit(AgentEvent::Output {
|
||||||
|
story_id: ctx.story_id.clone(),
|
||||||
|
agent_name: ctx.agent_name.clone(),
|
||||||
|
text: format!("[Tool error: {e}]\n"),
|
||||||
|
});
|
||||||
|
json!({ "error": e })
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
response_parts.push(json!({
|
||||||
|
"functionResponse": {
|
||||||
|
"name": fc.name,
|
||||||
|
"response": response_value
|
||||||
|
}
|
||||||
|
}));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add function responses to the conversation.
|
||||||
|
contents.push(json!({
|
||||||
|
"role": "user",
|
||||||
|
"parts": response_parts
|
||||||
|
}));
|
||||||
|
|
||||||
|
// If the model indicated it's done despite having function calls,
|
||||||
|
// respect the finish reason.
|
||||||
|
if finish_reason == "STOP" && function_calls.is_empty() {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
emit(AgentEvent::Done {
|
||||||
|
story_id: ctx.story_id.clone(),
|
||||||
|
agent_name: ctx.agent_name.clone(),
|
||||||
|
session_id: None,
|
||||||
|
});
|
||||||
|
|
||||||
|
Ok(RuntimeResult {
|
||||||
|
session_id: None,
|
||||||
|
token_usage: Some(total_usage),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
fn stop(&self) {
|
||||||
|
self.cancelled.store(true, Ordering::Relaxed);
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_status(&self) -> RuntimeStatus {
|
||||||
|
if self.cancelled.load(Ordering::Relaxed) {
|
||||||
|
RuntimeStatus::Failed
|
||||||
|
} else {
|
||||||
|
RuntimeStatus::Idle
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ── Internal types ───────────────────────────────────────────────────
|
||||||
|
|
||||||
|
struct GeminiFunctionCall {
|
||||||
|
name: String,
|
||||||
|
args: Value,
|
||||||
|
}
|
||||||
|
|
||||||
|
// ── Gemini API types (for serde) ─────────────────────────────────────
|
||||||
|
|
||||||
|
#[derive(Debug, Serialize, Deserialize)]
|
||||||
|
struct GeminiFunctionDeclaration {
|
||||||
|
name: String,
|
||||||
|
description: String,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
parameters: Option<Value>,
|
||||||
|
}
|
||||||
|
|
||||||
|
// ── Helper functions ─────────────────────────────────────────────────
|
||||||
|
|
||||||
|
/// Build the system instruction content from the RuntimeContext.
|
||||||
|
fn build_system_instruction(ctx: &RuntimeContext) -> Value {
|
||||||
|
// Use system_prompt from args if provided via --append-system-prompt,
|
||||||
|
// otherwise use a sensible default.
|
||||||
|
let system_text = ctx
|
||||||
|
.args
|
||||||
|
.iter()
|
||||||
|
.position(|a| a == "--append-system-prompt")
|
||||||
|
.and_then(|i| ctx.args.get(i + 1))
|
||||||
|
.cloned()
|
||||||
|
.unwrap_or_else(|| {
|
||||||
|
format!(
|
||||||
|
"You are an AI coding agent working on story {}. \
|
||||||
|
You have access to tools via function calling. \
|
||||||
|
Use them to complete the task. \
|
||||||
|
Work in the directory: {}",
|
||||||
|
ctx.story_id, ctx.cwd
|
||||||
|
)
|
||||||
|
});
|
||||||
|
|
||||||
|
json!({
|
||||||
|
"parts": [{ "text": system_text }]
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Build the full `generateContent` request body.
|
||||||
|
fn build_generate_content_request(
|
||||||
|
system_instruction: &Value,
|
||||||
|
contents: &[Value],
|
||||||
|
gemini_tools: &[GeminiFunctionDeclaration],
|
||||||
|
) -> Value {
|
||||||
|
let mut body = json!({
|
||||||
|
"system_instruction": system_instruction,
|
||||||
|
"contents": contents,
|
||||||
|
"generationConfig": {
|
||||||
|
"temperature": 0.2,
|
||||||
|
"maxOutputTokens": 65536,
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
if !gemini_tools.is_empty() {
|
||||||
|
body["tools"] = json!([{
|
||||||
|
"functionDeclarations": gemini_tools
|
||||||
|
}]);
|
||||||
|
}
|
||||||
|
|
||||||
|
body
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Fetch MCP tool definitions from storkit's MCP server and convert
|
||||||
|
/// them to Gemini function declaration format.
|
||||||
|
async fn fetch_and_convert_mcp_tools(
|
||||||
|
client: &Client,
|
||||||
|
mcp_base: &str,
|
||||||
|
) -> Result<Vec<GeminiFunctionDeclaration>, String> {
|
||||||
|
let request = json!({
|
||||||
|
"jsonrpc": "2.0",
|
||||||
|
"id": 1,
|
||||||
|
"method": "tools/list",
|
||||||
|
"params": {}
|
||||||
|
});
|
||||||
|
|
||||||
|
let response = client
|
||||||
|
.post(mcp_base)
|
||||||
|
.json(&request)
|
||||||
|
.send()
|
||||||
|
.await
|
||||||
|
.map_err(|e| format!("Failed to fetch MCP tools: {e}"))?;
|
||||||
|
|
||||||
|
let body: Value = response
|
||||||
|
.json()
|
||||||
|
.await
|
||||||
|
.map_err(|e| format!("Failed to parse MCP tools response: {e}"))?;
|
||||||
|
|
||||||
|
let tools = body["result"]["tools"]
|
||||||
|
.as_array()
|
||||||
|
.ok_or_else(|| "No tools array in MCP response".to_string())?;
|
||||||
|
|
||||||
|
let mut declarations = Vec::new();
|
||||||
|
|
||||||
|
for tool in tools {
|
||||||
|
let name = tool["name"].as_str().unwrap_or("").to_string();
|
||||||
|
let description = tool["description"].as_str().unwrap_or("").to_string();
|
||||||
|
|
||||||
|
if name.is_empty() {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Convert MCP inputSchema (JSON Schema) to Gemini parameters
|
||||||
|
// (OpenAPI-subset schema). They are structurally compatible for
|
||||||
|
// simple object schemas.
|
||||||
|
let parameters = convert_mcp_schema_to_gemini(tool.get("inputSchema"));
|
||||||
|
|
||||||
|
declarations.push(GeminiFunctionDeclaration {
|
||||||
|
name,
|
||||||
|
description,
|
||||||
|
parameters,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
slog!("[gemini] Loaded {} MCP tools as function declarations", declarations.len());
|
||||||
|
Ok(declarations)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Convert an MCP inputSchema (JSON Schema) to a Gemini-compatible
|
||||||
|
/// OpenAPI-subset parameter schema.
|
||||||
|
///
|
||||||
|
/// Gemini function calling expects parameters in OpenAPI format, which
|
||||||
|
/// is structurally similar to JSON Schema for simple object types.
|
||||||
|
/// We strip unsupported fields and ensure the type is "object".
|
||||||
|
fn convert_mcp_schema_to_gemini(schema: Option<&Value>) -> Option<Value> {
|
||||||
|
let schema = schema?;
|
||||||
|
|
||||||
|
// If the schema has no properties (empty tool), return None.
|
||||||
|
let properties = schema.get("properties")?;
|
||||||
|
if properties.as_object().is_some_and(|p| p.is_empty()) {
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut result = json!({
|
||||||
|
"type": "object",
|
||||||
|
"properties": clean_schema_properties(properties),
|
||||||
|
});
|
||||||
|
|
||||||
|
// Preserve required fields if present.
|
||||||
|
if let Some(required) = schema.get("required") {
|
||||||
|
result["required"] = required.clone();
|
||||||
|
}
|
||||||
|
|
||||||
|
Some(result)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Recursively clean schema properties to be Gemini-compatible.
|
||||||
|
/// Removes unsupported JSON Schema keywords.
|
||||||
|
fn clean_schema_properties(properties: &Value) -> Value {
|
||||||
|
let Some(obj) = properties.as_object() else {
|
||||||
|
return properties.clone();
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut cleaned = serde_json::Map::new();
|
||||||
|
for (key, value) in obj {
|
||||||
|
let mut prop = value.clone();
|
||||||
|
// Remove JSON Schema keywords not supported by Gemini
|
||||||
|
if let Some(p) = prop.as_object_mut() {
|
||||||
|
p.remove("$schema");
|
||||||
|
p.remove("additionalProperties");
|
||||||
|
|
||||||
|
// Recursively clean nested object properties
|
||||||
|
if let Some(nested_props) = p.get("properties").cloned() {
|
||||||
|
p.insert(
|
||||||
|
"properties".to_string(),
|
||||||
|
clean_schema_properties(&nested_props),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Clean items schema for arrays
|
||||||
|
if let Some(items) = p.get("items").cloned()
|
||||||
|
&& let Some(items_obj) = items.as_object()
|
||||||
|
{
|
||||||
|
let mut cleaned_items = items_obj.clone();
|
||||||
|
cleaned_items.remove("$schema");
|
||||||
|
cleaned_items.remove("additionalProperties");
|
||||||
|
p.insert("items".to_string(), Value::Object(cleaned_items));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
cleaned.insert(key.clone(), prop);
|
||||||
|
}
|
||||||
|
Value::Object(cleaned)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Call an MCP tool via storkit's MCP server.
|
||||||
|
async fn call_mcp_tool(
|
||||||
|
client: &Client,
|
||||||
|
mcp_base: &str,
|
||||||
|
tool_name: &str,
|
||||||
|
args: &Value,
|
||||||
|
) -> Result<String, String> {
|
||||||
|
let request = json!({
|
||||||
|
"jsonrpc": "2.0",
|
||||||
|
"id": 1,
|
||||||
|
"method": "tools/call",
|
||||||
|
"params": {
|
||||||
|
"name": tool_name,
|
||||||
|
"arguments": args
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
let response = client
|
||||||
|
.post(mcp_base)
|
||||||
|
.json(&request)
|
||||||
|
.send()
|
||||||
|
.await
|
||||||
|
.map_err(|e| format!("MCP tool call failed: {e}"))?;
|
||||||
|
|
||||||
|
let body: Value = response
|
||||||
|
.json()
|
||||||
|
.await
|
||||||
|
.map_err(|e| format!("Failed to parse MCP tool response: {e}"))?;
|
||||||
|
|
||||||
|
if let Some(error) = body.get("error") {
|
||||||
|
let msg = error["message"].as_str().unwrap_or("Unknown MCP error");
|
||||||
|
return Err(format!("MCP tool '{tool_name}' error: {msg}"));
|
||||||
|
}
|
||||||
|
|
||||||
|
// MCP tools/call returns { result: { content: [{ type: "text", text: "..." }] } }
|
||||||
|
let content = &body["result"]["content"];
|
||||||
|
if let Some(arr) = content.as_array() {
|
||||||
|
let texts: Vec<&str> = arr
|
||||||
|
.iter()
|
||||||
|
.filter_map(|c| c["text"].as_str())
|
||||||
|
.collect();
|
||||||
|
if !texts.is_empty() {
|
||||||
|
return Ok(texts.join("\n"));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fall back to serializing the entire result.
|
||||||
|
Ok(body["result"].to_string())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Parse token usage metadata from a Gemini API response.
|
||||||
|
fn parse_usage_metadata(response: &Value) -> Option<TokenUsage> {
|
||||||
|
let metadata = response.get("usageMetadata")?;
|
||||||
|
Some(TokenUsage {
|
||||||
|
input_tokens: metadata
|
||||||
|
.get("promptTokenCount")
|
||||||
|
.and_then(|v| v.as_u64())
|
||||||
|
.unwrap_or(0),
|
||||||
|
output_tokens: metadata
|
||||||
|
.get("candidatesTokenCount")
|
||||||
|
.and_then(|v| v.as_u64())
|
||||||
|
.unwrap_or(0),
|
||||||
|
// Gemini doesn't have cache token fields, but we keep the struct uniform.
|
||||||
|
cache_creation_input_tokens: 0,
|
||||||
|
cache_read_input_tokens: 0,
|
||||||
|
// Google AI API doesn't report cost; leave at 0.
|
||||||
|
total_cost_usd: 0.0,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// ── Tests ────────────────────────────────────────────────────────────
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn convert_mcp_schema_simple_object() {
|
||||||
|
let schema = json!({
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"story_id": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Story identifier"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"required": ["story_id"]
|
||||||
|
});
|
||||||
|
|
||||||
|
let result = convert_mcp_schema_to_gemini(Some(&schema)).unwrap();
|
||||||
|
assert_eq!(result["type"], "object");
|
||||||
|
assert!(result["properties"]["story_id"].is_object());
|
||||||
|
assert_eq!(result["required"][0], "story_id");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn convert_mcp_schema_empty_properties_returns_none() {
|
||||||
|
let schema = json!({
|
||||||
|
"type": "object",
|
||||||
|
"properties": {}
|
||||||
|
});
|
||||||
|
|
||||||
|
assert!(convert_mcp_schema_to_gemini(Some(&schema)).is_none());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn convert_mcp_schema_none_returns_none() {
|
||||||
|
assert!(convert_mcp_schema_to_gemini(None).is_none());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn convert_mcp_schema_strips_additional_properties() {
|
||||||
|
let schema = json!({
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"name": {
|
||||||
|
"type": "string",
|
||||||
|
"additionalProperties": false,
|
||||||
|
"$schema": "http://json-schema.org/draft-07/schema#"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
let result = convert_mcp_schema_to_gemini(Some(&schema)).unwrap();
|
||||||
|
let name_prop = &result["properties"]["name"];
|
||||||
|
assert!(name_prop.get("additionalProperties").is_none());
|
||||||
|
assert!(name_prop.get("$schema").is_none());
|
||||||
|
assert_eq!(name_prop["type"], "string");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn convert_mcp_schema_with_nested_objects() {
|
||||||
|
let schema = json!({
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"config": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"key": { "type": "string" }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
let result = convert_mcp_schema_to_gemini(Some(&schema)).unwrap();
|
||||||
|
assert!(result["properties"]["config"]["properties"]["key"].is_object());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn convert_mcp_schema_with_array_items() {
|
||||||
|
let schema = json!({
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"items": {
|
||||||
|
"type": "array",
|
||||||
|
"items": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"name": { "type": "string" }
|
||||||
|
},
|
||||||
|
"additionalProperties": false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
let result = convert_mcp_schema_to_gemini(Some(&schema)).unwrap();
|
||||||
|
let items_schema = &result["properties"]["items"]["items"];
|
||||||
|
assert!(items_schema.get("additionalProperties").is_none());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn build_system_instruction_uses_args() {
|
||||||
|
let ctx = RuntimeContext {
|
||||||
|
story_id: "42_story_test".to_string(),
|
||||||
|
agent_name: "coder-1".to_string(),
|
||||||
|
command: "gemini-2.5-pro".to_string(),
|
||||||
|
args: vec![
|
||||||
|
"--append-system-prompt".to_string(),
|
||||||
|
"Custom system prompt".to_string(),
|
||||||
|
],
|
||||||
|
prompt: "Do the thing".to_string(),
|
||||||
|
cwd: "/tmp/wt".to_string(),
|
||||||
|
inactivity_timeout_secs: 300,
|
||||||
|
mcp_port: 3001,
|
||||||
|
};
|
||||||
|
|
||||||
|
let instruction = build_system_instruction(&ctx);
|
||||||
|
assert_eq!(instruction["parts"][0]["text"], "Custom system prompt");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn build_system_instruction_default() {
|
||||||
|
let ctx = RuntimeContext {
|
||||||
|
story_id: "42_story_test".to_string(),
|
||||||
|
agent_name: "coder-1".to_string(),
|
||||||
|
command: "gemini-2.5-pro".to_string(),
|
||||||
|
args: vec![],
|
||||||
|
prompt: "Do the thing".to_string(),
|
||||||
|
cwd: "/tmp/wt".to_string(),
|
||||||
|
inactivity_timeout_secs: 300,
|
||||||
|
mcp_port: 3001,
|
||||||
|
};
|
||||||
|
|
||||||
|
let instruction = build_system_instruction(&ctx);
|
||||||
|
let text = instruction["parts"][0]["text"].as_str().unwrap();
|
||||||
|
assert!(text.contains("42_story_test"));
|
||||||
|
assert!(text.contains("/tmp/wt"));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn build_generate_content_request_includes_tools() {
|
||||||
|
let system = json!({"parts": [{"text": "system"}]});
|
||||||
|
let contents = vec![json!({"role": "user", "parts": [{"text": "hello"}]})];
|
||||||
|
let tools = vec![GeminiFunctionDeclaration {
|
||||||
|
name: "my_tool".to_string(),
|
||||||
|
description: "A tool".to_string(),
|
||||||
|
parameters: Some(json!({"type": "object", "properties": {"x": {"type": "string"}}})),
|
||||||
|
}];
|
||||||
|
|
||||||
|
let body = build_generate_content_request(&system, &contents, &tools);
|
||||||
|
assert!(body["tools"][0]["functionDeclarations"].is_array());
|
||||||
|
assert_eq!(body["tools"][0]["functionDeclarations"][0]["name"], "my_tool");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn build_generate_content_request_no_tools() {
|
||||||
|
let system = json!({"parts": [{"text": "system"}]});
|
||||||
|
let contents = vec![json!({"role": "user", "parts": [{"text": "hello"}]})];
|
||||||
|
let tools: Vec<GeminiFunctionDeclaration> = vec![];
|
||||||
|
|
||||||
|
let body = build_generate_content_request(&system, &contents, &tools);
|
||||||
|
assert!(body.get("tools").is_none());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn parse_usage_metadata_valid() {
|
||||||
|
let response = json!({
|
||||||
|
"usageMetadata": {
|
||||||
|
"promptTokenCount": 100,
|
||||||
|
"candidatesTokenCount": 50,
|
||||||
|
"totalTokenCount": 150
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
let usage = parse_usage_metadata(&response).unwrap();
|
||||||
|
assert_eq!(usage.input_tokens, 100);
|
||||||
|
assert_eq!(usage.output_tokens, 50);
|
||||||
|
assert_eq!(usage.cache_creation_input_tokens, 0);
|
||||||
|
assert_eq!(usage.total_cost_usd, 0.0);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn parse_usage_metadata_missing() {
|
||||||
|
let response = json!({"candidates": []});
|
||||||
|
assert!(parse_usage_metadata(&response).is_none());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn gemini_runtime_stop_sets_cancelled() {
|
||||||
|
let runtime = GeminiRuntime::new();
|
||||||
|
assert_eq!(runtime.get_status(), RuntimeStatus::Idle);
|
||||||
|
runtime.stop();
|
||||||
|
assert_eq!(runtime.get_status(), RuntimeStatus::Failed);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn model_extraction_from_command() {
|
||||||
|
// When command starts with "gemini", use it as model name
|
||||||
|
let ctx = RuntimeContext {
|
||||||
|
story_id: "1".to_string(),
|
||||||
|
agent_name: "coder".to_string(),
|
||||||
|
command: "gemini-2.5-pro".to_string(),
|
||||||
|
args: vec![],
|
||||||
|
prompt: "test".to_string(),
|
||||||
|
cwd: "/tmp".to_string(),
|
||||||
|
inactivity_timeout_secs: 300,
|
||||||
|
mcp_port: 3001,
|
||||||
|
};
|
||||||
|
|
||||||
|
// The model extraction logic is inside start(), but we test the
|
||||||
|
// condition here.
|
||||||
|
assert!(ctx.command.starts_with("gemini"));
|
||||||
|
}
|
||||||
|
}
|
||||||
163
server/src/agents/runtime/mod.rs
Normal file
163
server/src/agents/runtime/mod.rs
Normal file
@@ -0,0 +1,163 @@
|
|||||||
|
mod claude_code;
|
||||||
|
mod gemini;
|
||||||
|
mod openai;
|
||||||
|
|
||||||
|
pub use claude_code::ClaudeCodeRuntime;
|
||||||
|
pub use gemini::GeminiRuntime;
|
||||||
|
pub use openai::OpenAiRuntime;
|
||||||
|
|
||||||
|
use std::sync::{Arc, Mutex};
|
||||||
|
use tokio::sync::broadcast;
|
||||||
|
|
||||||
|
use crate::agent_log::AgentLogWriter;
|
||||||
|
|
||||||
|
use super::{AgentEvent, TokenUsage};
|
||||||
|
|
||||||
|
/// Context passed to a runtime when launching an agent session.
|
||||||
|
pub struct RuntimeContext {
|
||||||
|
pub story_id: String,
|
||||||
|
pub agent_name: String,
|
||||||
|
pub command: String,
|
||||||
|
pub args: Vec<String>,
|
||||||
|
pub prompt: String,
|
||||||
|
pub cwd: String,
|
||||||
|
pub inactivity_timeout_secs: u64,
|
||||||
|
/// Port of the storkit MCP server, used by API-based runtimes (Gemini, OpenAI)
|
||||||
|
/// to call back for tool execution.
|
||||||
|
pub mcp_port: u16,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Result returned by a runtime after the agent session completes.
|
||||||
|
pub struct RuntimeResult {
|
||||||
|
pub session_id: Option<String>,
|
||||||
|
pub token_usage: Option<TokenUsage>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Runtime status reported by the backend.
|
||||||
|
#[derive(Debug, Clone, PartialEq)]
|
||||||
|
#[allow(dead_code)]
|
||||||
|
pub enum RuntimeStatus {
|
||||||
|
Idle,
|
||||||
|
Running,
|
||||||
|
Completed,
|
||||||
|
Failed,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Abstraction over different agent execution backends.
|
||||||
|
///
|
||||||
|
/// Implementations:
|
||||||
|
/// - [`ClaudeCodeRuntime`]: spawns the `claude` CLI via a PTY (default, `runtime = "claude-code"`)
|
||||||
|
///
|
||||||
|
/// Future implementations could include OpenAI and Gemini API runtimes.
|
||||||
|
#[allow(dead_code)]
|
||||||
|
pub trait AgentRuntime: Send + Sync {
|
||||||
|
/// Start the agent and drive it to completion, streaming events through
|
||||||
|
/// the provided broadcast sender and event log.
|
||||||
|
///
|
||||||
|
/// Returns when the agent session finishes (success or error).
|
||||||
|
async fn start(
|
||||||
|
&self,
|
||||||
|
ctx: RuntimeContext,
|
||||||
|
tx: broadcast::Sender<AgentEvent>,
|
||||||
|
event_log: Arc<Mutex<Vec<AgentEvent>>>,
|
||||||
|
log_writer: Option<Arc<Mutex<AgentLogWriter>>>,
|
||||||
|
) -> Result<RuntimeResult, String>;
|
||||||
|
|
||||||
|
/// Stop the running agent.
|
||||||
|
fn stop(&self);
|
||||||
|
|
||||||
|
/// Get the current runtime status.
|
||||||
|
fn get_status(&self) -> RuntimeStatus;
|
||||||
|
|
||||||
|
/// Return any events buffered outside the broadcast channel.
|
||||||
|
///
|
||||||
|
/// PTY-based runtimes stream directly to the broadcast channel; this
|
||||||
|
/// returns empty by default. API-based runtimes may buffer events here.
|
||||||
|
fn stream_events(&self) -> Vec<AgentEvent> {
|
||||||
|
vec![]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn runtime_context_fields() {
|
||||||
|
let ctx = RuntimeContext {
|
||||||
|
story_id: "42_story_foo".to_string(),
|
||||||
|
agent_name: "coder-1".to_string(),
|
||||||
|
command: "claude".to_string(),
|
||||||
|
args: vec!["--model".to_string(), "sonnet".to_string()],
|
||||||
|
prompt: "Do the thing".to_string(),
|
||||||
|
cwd: "/tmp/wt".to_string(),
|
||||||
|
inactivity_timeout_secs: 300,
|
||||||
|
mcp_port: 3001,
|
||||||
|
};
|
||||||
|
assert_eq!(ctx.story_id, "42_story_foo");
|
||||||
|
assert_eq!(ctx.agent_name, "coder-1");
|
||||||
|
assert_eq!(ctx.command, "claude");
|
||||||
|
assert_eq!(ctx.args.len(), 2);
|
||||||
|
assert_eq!(ctx.prompt, "Do the thing");
|
||||||
|
assert_eq!(ctx.cwd, "/tmp/wt");
|
||||||
|
assert_eq!(ctx.inactivity_timeout_secs, 300);
|
||||||
|
assert_eq!(ctx.mcp_port, 3001);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn runtime_result_fields() {
|
||||||
|
let result = RuntimeResult {
|
||||||
|
session_id: Some("sess-123".to_string()),
|
||||||
|
token_usage: Some(TokenUsage {
|
||||||
|
input_tokens: 100,
|
||||||
|
output_tokens: 50,
|
||||||
|
cache_creation_input_tokens: 0,
|
||||||
|
cache_read_input_tokens: 0,
|
||||||
|
total_cost_usd: 0.01,
|
||||||
|
}),
|
||||||
|
};
|
||||||
|
assert_eq!(result.session_id, Some("sess-123".to_string()));
|
||||||
|
assert!(result.token_usage.is_some());
|
||||||
|
let usage = result.token_usage.unwrap();
|
||||||
|
assert_eq!(usage.input_tokens, 100);
|
||||||
|
assert_eq!(usage.output_tokens, 50);
|
||||||
|
assert_eq!(usage.total_cost_usd, 0.01);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn runtime_result_no_usage() {
|
||||||
|
let result = RuntimeResult {
|
||||||
|
session_id: None,
|
||||||
|
token_usage: None,
|
||||||
|
};
|
||||||
|
assert!(result.session_id.is_none());
|
||||||
|
assert!(result.token_usage.is_none());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn runtime_status_variants() {
|
||||||
|
assert_eq!(RuntimeStatus::Idle, RuntimeStatus::Idle);
|
||||||
|
assert_ne!(RuntimeStatus::Running, RuntimeStatus::Completed);
|
||||||
|
assert_ne!(RuntimeStatus::Failed, RuntimeStatus::Idle);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn claude_code_runtime_get_status_returns_idle() {
|
||||||
|
use std::collections::HashMap;
|
||||||
|
use crate::io::watcher::WatcherEvent;
|
||||||
|
let killers = Arc::new(Mutex::new(HashMap::new()));
|
||||||
|
let (watcher_tx, _) = broadcast::channel::<WatcherEvent>(16);
|
||||||
|
let runtime = ClaudeCodeRuntime::new(killers, watcher_tx);
|
||||||
|
assert_eq!(runtime.get_status(), RuntimeStatus::Idle);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn claude_code_runtime_stream_events_empty() {
|
||||||
|
use std::collections::HashMap;
|
||||||
|
use crate::io::watcher::WatcherEvent;
|
||||||
|
let killers = Arc::new(Mutex::new(HashMap::new()));
|
||||||
|
let (watcher_tx, _) = broadcast::channel::<WatcherEvent>(16);
|
||||||
|
let runtime = ClaudeCodeRuntime::new(killers, watcher_tx);
|
||||||
|
assert!(runtime.stream_events().is_empty());
|
||||||
|
}
|
||||||
|
}
|
||||||
704
server/src/agents/runtime/openai.rs
Normal file
704
server/src/agents/runtime/openai.rs
Normal file
@@ -0,0 +1,704 @@
|
|||||||
|
use std::sync::atomic::{AtomicBool, Ordering};
|
||||||
|
use std::sync::{Arc, Mutex};
|
||||||
|
|
||||||
|
use reqwest::Client;
|
||||||
|
use serde_json::{json, Value};
|
||||||
|
use tokio::sync::broadcast;
|
||||||
|
|
||||||
|
use crate::agent_log::AgentLogWriter;
|
||||||
|
use crate::slog;
|
||||||
|
|
||||||
|
use super::super::{AgentEvent, TokenUsage};
|
||||||
|
use super::{AgentRuntime, RuntimeContext, RuntimeResult, RuntimeStatus};
|
||||||
|
|
||||||
|
// ── Public runtime struct ────────────────────────────────────────────
|
||||||
|
|
||||||
|
/// Agent runtime that drives an OpenAI model (GPT-4o, o3, etc.) through
|
||||||
|
/// the OpenAI Chat Completions API.
|
||||||
|
///
|
||||||
|
/// The runtime:
|
||||||
|
/// 1. Fetches MCP tool definitions from storkit's MCP server.
|
||||||
|
/// 2. Converts them to OpenAI function-calling format.
|
||||||
|
/// 3. Sends the agent prompt + tools to the Chat Completions API.
|
||||||
|
/// 4. Executes any requested tool calls via MCP `tools/call`.
|
||||||
|
/// 5. Loops until the model produces a response with no tool calls.
|
||||||
|
/// 6. Tracks token usage from the API response.
|
||||||
|
pub struct OpenAiRuntime {
|
||||||
|
/// Whether a stop has been requested.
|
||||||
|
cancelled: Arc<AtomicBool>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl OpenAiRuntime {
|
||||||
|
pub fn new() -> Self {
|
||||||
|
Self {
|
||||||
|
cancelled: Arc::new(AtomicBool::new(false)),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl AgentRuntime for OpenAiRuntime {
|
||||||
|
async fn start(
|
||||||
|
&self,
|
||||||
|
ctx: RuntimeContext,
|
||||||
|
tx: broadcast::Sender<AgentEvent>,
|
||||||
|
event_log: Arc<Mutex<Vec<AgentEvent>>>,
|
||||||
|
log_writer: Option<Arc<Mutex<AgentLogWriter>>>,
|
||||||
|
) -> Result<RuntimeResult, String> {
|
||||||
|
let api_key = std::env::var("OPENAI_API_KEY").map_err(|_| {
|
||||||
|
"OPENAI_API_KEY environment variable is not set. \
|
||||||
|
Set it to your OpenAI API key to use the OpenAI runtime."
|
||||||
|
.to_string()
|
||||||
|
})?;
|
||||||
|
|
||||||
|
let model = if ctx.command.starts_with("gpt") || ctx.command.starts_with("o") {
|
||||||
|
// The pool puts the model into `command` for non-CLI runtimes.
|
||||||
|
ctx.command.clone()
|
||||||
|
} else {
|
||||||
|
// Fall back to args: look for --model <value>
|
||||||
|
ctx.args
|
||||||
|
.iter()
|
||||||
|
.position(|a| a == "--model")
|
||||||
|
.and_then(|i| ctx.args.get(i + 1))
|
||||||
|
.cloned()
|
||||||
|
.unwrap_or_else(|| "gpt-4o".to_string())
|
||||||
|
};
|
||||||
|
|
||||||
|
let mcp_port = ctx.mcp_port;
|
||||||
|
let mcp_base = format!("http://localhost:{mcp_port}/mcp");
|
||||||
|
|
||||||
|
let client = Client::new();
|
||||||
|
let cancelled = Arc::clone(&self.cancelled);
|
||||||
|
|
||||||
|
// Step 1: Fetch MCP tool definitions and convert to OpenAI format.
|
||||||
|
let openai_tools = fetch_and_convert_mcp_tools(&client, &mcp_base).await?;
|
||||||
|
|
||||||
|
// Step 2: Build the initial conversation messages.
|
||||||
|
let system_text = build_system_text(&ctx);
|
||||||
|
let mut messages: Vec<Value> = vec![
|
||||||
|
json!({ "role": "system", "content": system_text }),
|
||||||
|
json!({ "role": "user", "content": ctx.prompt }),
|
||||||
|
];
|
||||||
|
|
||||||
|
let mut total_usage = TokenUsage {
|
||||||
|
input_tokens: 0,
|
||||||
|
output_tokens: 0,
|
||||||
|
cache_creation_input_tokens: 0,
|
||||||
|
cache_read_input_tokens: 0,
|
||||||
|
total_cost_usd: 0.0,
|
||||||
|
};
|
||||||
|
|
||||||
|
let emit = |event: AgentEvent| {
|
||||||
|
super::super::pty::emit_event(
|
||||||
|
event,
|
||||||
|
&tx,
|
||||||
|
&event_log,
|
||||||
|
log_writer.as_ref().map(|w| w.as_ref()),
|
||||||
|
);
|
||||||
|
};
|
||||||
|
|
||||||
|
emit(AgentEvent::Status {
|
||||||
|
story_id: ctx.story_id.clone(),
|
||||||
|
agent_name: ctx.agent_name.clone(),
|
||||||
|
status: "running".to_string(),
|
||||||
|
});
|
||||||
|
|
||||||
|
// Step 3: Conversation loop.
|
||||||
|
let mut turn = 0u32;
|
||||||
|
let max_turns = 200; // Safety limit
|
||||||
|
|
||||||
|
loop {
|
||||||
|
if cancelled.load(Ordering::Relaxed) {
|
||||||
|
emit(AgentEvent::Error {
|
||||||
|
story_id: ctx.story_id.clone(),
|
||||||
|
agent_name: ctx.agent_name.clone(),
|
||||||
|
message: "Agent was stopped by user".to_string(),
|
||||||
|
});
|
||||||
|
return Ok(RuntimeResult {
|
||||||
|
session_id: None,
|
||||||
|
token_usage: Some(total_usage),
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
turn += 1;
|
||||||
|
if turn > max_turns {
|
||||||
|
emit(AgentEvent::Error {
|
||||||
|
story_id: ctx.story_id.clone(),
|
||||||
|
agent_name: ctx.agent_name.clone(),
|
||||||
|
message: format!("Exceeded maximum turns ({max_turns})"),
|
||||||
|
});
|
||||||
|
return Ok(RuntimeResult {
|
||||||
|
session_id: None,
|
||||||
|
token_usage: Some(total_usage),
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
slog!(
|
||||||
|
"[openai] Turn {turn} for {}:{}",
|
||||||
|
ctx.story_id,
|
||||||
|
ctx.agent_name
|
||||||
|
);
|
||||||
|
|
||||||
|
let mut request_body = json!({
|
||||||
|
"model": model,
|
||||||
|
"messages": messages,
|
||||||
|
"temperature": 0.2,
|
||||||
|
});
|
||||||
|
|
||||||
|
if !openai_tools.is_empty() {
|
||||||
|
request_body["tools"] = json!(openai_tools);
|
||||||
|
}
|
||||||
|
|
||||||
|
let response = client
|
||||||
|
.post("https://api.openai.com/v1/chat/completions")
|
||||||
|
.bearer_auth(&api_key)
|
||||||
|
.json(&request_body)
|
||||||
|
.send()
|
||||||
|
.await
|
||||||
|
.map_err(|e| format!("OpenAI API request failed: {e}"))?;
|
||||||
|
|
||||||
|
let status = response.status();
|
||||||
|
let body: Value = response
|
||||||
|
.json()
|
||||||
|
.await
|
||||||
|
.map_err(|e| format!("Failed to parse OpenAI API response: {e}"))?;
|
||||||
|
|
||||||
|
if !status.is_success() {
|
||||||
|
let error_msg = body["error"]["message"]
|
||||||
|
.as_str()
|
||||||
|
.unwrap_or("Unknown API error");
|
||||||
|
let err = format!("OpenAI API error ({status}): {error_msg}");
|
||||||
|
emit(AgentEvent::Error {
|
||||||
|
story_id: ctx.story_id.clone(),
|
||||||
|
agent_name: ctx.agent_name.clone(),
|
||||||
|
message: err.clone(),
|
||||||
|
});
|
||||||
|
return Err(err);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Accumulate token usage.
|
||||||
|
if let Some(usage) = parse_usage(&body) {
|
||||||
|
total_usage.input_tokens += usage.input_tokens;
|
||||||
|
total_usage.output_tokens += usage.output_tokens;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Extract the first choice.
|
||||||
|
let choice = body["choices"]
|
||||||
|
.as_array()
|
||||||
|
.and_then(|c| c.first())
|
||||||
|
.ok_or_else(|| "No choices in OpenAI response".to_string())?;
|
||||||
|
|
||||||
|
let message = &choice["message"];
|
||||||
|
let content = message["content"].as_str().unwrap_or("");
|
||||||
|
|
||||||
|
// Emit any text content.
|
||||||
|
if !content.is_empty() {
|
||||||
|
emit(AgentEvent::Output {
|
||||||
|
story_id: ctx.story_id.clone(),
|
||||||
|
agent_name: ctx.agent_name.clone(),
|
||||||
|
text: content.to_string(),
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check for tool calls.
|
||||||
|
let tool_calls = message["tool_calls"].as_array();
|
||||||
|
|
||||||
|
if tool_calls.is_none() || tool_calls.is_some_and(|tc| tc.is_empty()) {
|
||||||
|
// No tool calls — model is done.
|
||||||
|
emit(AgentEvent::Done {
|
||||||
|
story_id: ctx.story_id.clone(),
|
||||||
|
agent_name: ctx.agent_name.clone(),
|
||||||
|
session_id: None,
|
||||||
|
});
|
||||||
|
return Ok(RuntimeResult {
|
||||||
|
session_id: None,
|
||||||
|
token_usage: Some(total_usage),
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
let tool_calls = tool_calls.unwrap();
|
||||||
|
|
||||||
|
// Add the assistant message (with tool_calls) to the conversation.
|
||||||
|
messages.push(message.clone());
|
||||||
|
|
||||||
|
// Execute each tool call via MCP and add results.
|
||||||
|
for tc in tool_calls {
|
||||||
|
if cancelled.load(Ordering::Relaxed) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
let call_id = tc["id"].as_str().unwrap_or("");
|
||||||
|
let function = &tc["function"];
|
||||||
|
let tool_name = function["name"].as_str().unwrap_or("");
|
||||||
|
let arguments_str = function["arguments"].as_str().unwrap_or("{}");
|
||||||
|
|
||||||
|
let args: Value = serde_json::from_str(arguments_str).unwrap_or(json!({}));
|
||||||
|
|
||||||
|
slog!(
|
||||||
|
"[openai] Calling MCP tool '{}' for {}:{}",
|
||||||
|
tool_name,
|
||||||
|
ctx.story_id,
|
||||||
|
ctx.agent_name
|
||||||
|
);
|
||||||
|
|
||||||
|
emit(AgentEvent::Output {
|
||||||
|
story_id: ctx.story_id.clone(),
|
||||||
|
agent_name: ctx.agent_name.clone(),
|
||||||
|
text: format!("\n[Tool call: {tool_name}]\n"),
|
||||||
|
});
|
||||||
|
|
||||||
|
let tool_result = call_mcp_tool(&client, &mcp_base, tool_name, &args).await;
|
||||||
|
|
||||||
|
let result_content = match &tool_result {
|
||||||
|
Ok(result) => {
|
||||||
|
emit(AgentEvent::Output {
|
||||||
|
story_id: ctx.story_id.clone(),
|
||||||
|
agent_name: ctx.agent_name.clone(),
|
||||||
|
text: format!("[Tool result: {} chars]\n", result.len()),
|
||||||
|
});
|
||||||
|
result.clone()
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
emit(AgentEvent::Output {
|
||||||
|
story_id: ctx.story_id.clone(),
|
||||||
|
agent_name: ctx.agent_name.clone(),
|
||||||
|
text: format!("[Tool error: {e}]\n"),
|
||||||
|
});
|
||||||
|
format!("Error: {e}")
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
// OpenAI expects tool results as role=tool messages with
|
||||||
|
// the matching tool_call_id.
|
||||||
|
messages.push(json!({
|
||||||
|
"role": "tool",
|
||||||
|
"tool_call_id": call_id,
|
||||||
|
"content": result_content,
|
||||||
|
}));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn stop(&self) {
|
||||||
|
self.cancelled.store(true, Ordering::Relaxed);
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_status(&self) -> RuntimeStatus {
|
||||||
|
if self.cancelled.load(Ordering::Relaxed) {
|
||||||
|
RuntimeStatus::Failed
|
||||||
|
} else {
|
||||||
|
RuntimeStatus::Idle
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ── Helper functions ─────────────────────────────────────────────────
|
||||||
|
|
||||||
|
/// Build the system message text from the RuntimeContext.
|
||||||
|
fn build_system_text(ctx: &RuntimeContext) -> String {
|
||||||
|
ctx.args
|
||||||
|
.iter()
|
||||||
|
.position(|a| a == "--append-system-prompt")
|
||||||
|
.and_then(|i| ctx.args.get(i + 1))
|
||||||
|
.cloned()
|
||||||
|
.unwrap_or_else(|| {
|
||||||
|
format!(
|
||||||
|
"You are an AI coding agent working on story {}. \
|
||||||
|
You have access to tools via function calling. \
|
||||||
|
Use them to complete the task. \
|
||||||
|
Work in the directory: {}",
|
||||||
|
ctx.story_id, ctx.cwd
|
||||||
|
)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Fetch MCP tool definitions from storkit's MCP server and convert
|
||||||
|
/// them to OpenAI function-calling format.
|
||||||
|
async fn fetch_and_convert_mcp_tools(
|
||||||
|
client: &Client,
|
||||||
|
mcp_base: &str,
|
||||||
|
) -> Result<Vec<Value>, String> {
|
||||||
|
let request = json!({
|
||||||
|
"jsonrpc": "2.0",
|
||||||
|
"id": 1,
|
||||||
|
"method": "tools/list",
|
||||||
|
"params": {}
|
||||||
|
});
|
||||||
|
|
||||||
|
let response = client
|
||||||
|
.post(mcp_base)
|
||||||
|
.json(&request)
|
||||||
|
.send()
|
||||||
|
.await
|
||||||
|
.map_err(|e| format!("Failed to fetch MCP tools: {e}"))?;
|
||||||
|
|
||||||
|
let body: Value = response
|
||||||
|
.json()
|
||||||
|
.await
|
||||||
|
.map_err(|e| format!("Failed to parse MCP tools response: {e}"))?;
|
||||||
|
|
||||||
|
let tools = body["result"]["tools"]
|
||||||
|
.as_array()
|
||||||
|
.ok_or_else(|| "No tools array in MCP response".to_string())?;
|
||||||
|
|
||||||
|
let mut openai_tools = Vec::new();
|
||||||
|
|
||||||
|
for tool in tools {
|
||||||
|
let name = tool["name"].as_str().unwrap_or("").to_string();
|
||||||
|
let description = tool["description"].as_str().unwrap_or("").to_string();
|
||||||
|
|
||||||
|
if name.is_empty() {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
// OpenAI function calling uses JSON Schema natively for parameters,
|
||||||
|
// so the MCP inputSchema can be used with minimal cleanup.
|
||||||
|
let parameters = convert_mcp_schema_to_openai(tool.get("inputSchema"));
|
||||||
|
|
||||||
|
openai_tools.push(json!({
|
||||||
|
"type": "function",
|
||||||
|
"function": {
|
||||||
|
"name": name,
|
||||||
|
"description": description,
|
||||||
|
"parameters": parameters.unwrap_or_else(|| json!({"type": "object", "properties": {}})),
|
||||||
|
}
|
||||||
|
}));
|
||||||
|
}
|
||||||
|
|
||||||
|
slog!(
|
||||||
|
"[openai] Loaded {} MCP tools as function definitions",
|
||||||
|
openai_tools.len()
|
||||||
|
);
|
||||||
|
Ok(openai_tools)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Convert an MCP inputSchema (JSON Schema) to OpenAI-compatible
|
||||||
|
/// function parameters.
|
||||||
|
///
|
||||||
|
/// OpenAI uses JSON Schema natively, so less transformation is needed
|
||||||
|
/// compared to Gemini. We still strip `$schema` to keep payloads clean.
|
||||||
|
fn convert_mcp_schema_to_openai(schema: Option<&Value>) -> Option<Value> {
|
||||||
|
let schema = schema?;
|
||||||
|
|
||||||
|
let mut result = json!({
|
||||||
|
"type": "object",
|
||||||
|
});
|
||||||
|
|
||||||
|
if let Some(properties) = schema.get("properties") {
|
||||||
|
result["properties"] = clean_schema_properties(properties);
|
||||||
|
} else {
|
||||||
|
result["properties"] = json!({});
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(required) = schema.get("required") {
|
||||||
|
result["required"] = required.clone();
|
||||||
|
}
|
||||||
|
|
||||||
|
// OpenAI recommends additionalProperties: false for strict mode.
|
||||||
|
result["additionalProperties"] = json!(false);
|
||||||
|
|
||||||
|
Some(result)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Recursively clean schema properties, removing unsupported keywords.
|
||||||
|
fn clean_schema_properties(properties: &Value) -> Value {
|
||||||
|
let Some(obj) = properties.as_object() else {
|
||||||
|
return properties.clone();
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut cleaned = serde_json::Map::new();
|
||||||
|
for (key, value) in obj {
|
||||||
|
let mut prop = value.clone();
|
||||||
|
if let Some(p) = prop.as_object_mut() {
|
||||||
|
p.remove("$schema");
|
||||||
|
|
||||||
|
// Recursively clean nested object properties.
|
||||||
|
if let Some(nested_props) = p.get("properties").cloned() {
|
||||||
|
p.insert(
|
||||||
|
"properties".to_string(),
|
||||||
|
clean_schema_properties(&nested_props),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Clean items schema for arrays.
|
||||||
|
if let Some(items) = p.get("items").cloned()
|
||||||
|
&& let Some(items_obj) = items.as_object()
|
||||||
|
{
|
||||||
|
let mut cleaned_items = items_obj.clone();
|
||||||
|
cleaned_items.remove("$schema");
|
||||||
|
p.insert("items".to_string(), Value::Object(cleaned_items));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
cleaned.insert(key.clone(), prop);
|
||||||
|
}
|
||||||
|
Value::Object(cleaned)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Call an MCP tool via storkit's MCP server.
|
||||||
|
async fn call_mcp_tool(
|
||||||
|
client: &Client,
|
||||||
|
mcp_base: &str,
|
||||||
|
tool_name: &str,
|
||||||
|
args: &Value,
|
||||||
|
) -> Result<String, String> {
|
||||||
|
let request = json!({
|
||||||
|
"jsonrpc": "2.0",
|
||||||
|
"id": 1,
|
||||||
|
"method": "tools/call",
|
||||||
|
"params": {
|
||||||
|
"name": tool_name,
|
||||||
|
"arguments": args
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
let response = client
|
||||||
|
.post(mcp_base)
|
||||||
|
.json(&request)
|
||||||
|
.send()
|
||||||
|
.await
|
||||||
|
.map_err(|e| format!("MCP tool call failed: {e}"))?;
|
||||||
|
|
||||||
|
let body: Value = response
|
||||||
|
.json()
|
||||||
|
.await
|
||||||
|
.map_err(|e| format!("Failed to parse MCP tool response: {e}"))?;
|
||||||
|
|
||||||
|
if let Some(error) = body.get("error") {
|
||||||
|
let msg = error["message"].as_str().unwrap_or("Unknown MCP error");
|
||||||
|
return Err(format!("MCP tool '{tool_name}' error: {msg}"));
|
||||||
|
}
|
||||||
|
|
||||||
|
// MCP tools/call returns { result: { content: [{ type: "text", text: "..." }] } }
|
||||||
|
let content = &body["result"]["content"];
|
||||||
|
if let Some(arr) = content.as_array() {
|
||||||
|
let texts: Vec<&str> = arr
|
||||||
|
.iter()
|
||||||
|
.filter_map(|c| c["text"].as_str())
|
||||||
|
.collect();
|
||||||
|
if !texts.is_empty() {
|
||||||
|
return Ok(texts.join("\n"));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fall back to serializing the entire result.
|
||||||
|
Ok(body["result"].to_string())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Parse token usage from an OpenAI API response.
|
||||||
|
fn parse_usage(response: &Value) -> Option<TokenUsage> {
|
||||||
|
let usage = response.get("usage")?;
|
||||||
|
Some(TokenUsage {
|
||||||
|
input_tokens: usage
|
||||||
|
.get("prompt_tokens")
|
||||||
|
.and_then(|v| v.as_u64())
|
||||||
|
.unwrap_or(0),
|
||||||
|
output_tokens: usage
|
||||||
|
.get("completion_tokens")
|
||||||
|
.and_then(|v| v.as_u64())
|
||||||
|
.unwrap_or(0),
|
||||||
|
cache_creation_input_tokens: 0,
|
||||||
|
cache_read_input_tokens: 0,
|
||||||
|
// OpenAI API doesn't report cost directly; leave at 0.
|
||||||
|
total_cost_usd: 0.0,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// ── Tests ────────────────────────────────────────────────────────────
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn convert_mcp_schema_simple_object() {
|
||||||
|
let schema = json!({
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"story_id": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Story identifier"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"required": ["story_id"]
|
||||||
|
});
|
||||||
|
|
||||||
|
let result = convert_mcp_schema_to_openai(Some(&schema)).unwrap();
|
||||||
|
assert_eq!(result["type"], "object");
|
||||||
|
assert!(result["properties"]["story_id"].is_object());
|
||||||
|
assert_eq!(result["required"][0], "story_id");
|
||||||
|
assert_eq!(result["additionalProperties"], false);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn convert_mcp_schema_empty_properties() {
|
||||||
|
let schema = json!({
|
||||||
|
"type": "object",
|
||||||
|
"properties": {}
|
||||||
|
});
|
||||||
|
|
||||||
|
let result = convert_mcp_schema_to_openai(Some(&schema)).unwrap();
|
||||||
|
assert_eq!(result["type"], "object");
|
||||||
|
assert!(result["properties"].as_object().unwrap().is_empty());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn convert_mcp_schema_none_returns_none() {
|
||||||
|
assert!(convert_mcp_schema_to_openai(None).is_none());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn convert_mcp_schema_strips_dollar_schema() {
|
||||||
|
let schema = json!({
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"name": {
|
||||||
|
"type": "string",
|
||||||
|
"$schema": "http://json-schema.org/draft-07/schema#"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
let result = convert_mcp_schema_to_openai(Some(&schema)).unwrap();
|
||||||
|
let name_prop = &result["properties"]["name"];
|
||||||
|
assert!(name_prop.get("$schema").is_none());
|
||||||
|
assert_eq!(name_prop["type"], "string");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn convert_mcp_schema_with_nested_objects() {
|
||||||
|
let schema = json!({
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"config": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"key": { "type": "string" }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
let result = convert_mcp_schema_to_openai(Some(&schema)).unwrap();
|
||||||
|
assert!(result["properties"]["config"]["properties"]["key"].is_object());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn convert_mcp_schema_with_array_items() {
|
||||||
|
let schema = json!({
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"items": {
|
||||||
|
"type": "array",
|
||||||
|
"items": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"name": { "type": "string" }
|
||||||
|
},
|
||||||
|
"$schema": "http://json-schema.org/draft-07/schema#"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
let result = convert_mcp_schema_to_openai(Some(&schema)).unwrap();
|
||||||
|
let items_schema = &result["properties"]["items"]["items"];
|
||||||
|
assert!(items_schema.get("$schema").is_none());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn build_system_text_uses_args() {
|
||||||
|
let ctx = RuntimeContext {
|
||||||
|
story_id: "42_story_test".to_string(),
|
||||||
|
agent_name: "coder-1".to_string(),
|
||||||
|
command: "gpt-4o".to_string(),
|
||||||
|
args: vec![
|
||||||
|
"--append-system-prompt".to_string(),
|
||||||
|
"Custom system prompt".to_string(),
|
||||||
|
],
|
||||||
|
prompt: "Do the thing".to_string(),
|
||||||
|
cwd: "/tmp/wt".to_string(),
|
||||||
|
inactivity_timeout_secs: 300,
|
||||||
|
mcp_port: 3001,
|
||||||
|
};
|
||||||
|
|
||||||
|
assert_eq!(build_system_text(&ctx), "Custom system prompt");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn build_system_text_default() {
|
||||||
|
let ctx = RuntimeContext {
|
||||||
|
story_id: "42_story_test".to_string(),
|
||||||
|
agent_name: "coder-1".to_string(),
|
||||||
|
command: "gpt-4o".to_string(),
|
||||||
|
args: vec![],
|
||||||
|
prompt: "Do the thing".to_string(),
|
||||||
|
cwd: "/tmp/wt".to_string(),
|
||||||
|
inactivity_timeout_secs: 300,
|
||||||
|
mcp_port: 3001,
|
||||||
|
};
|
||||||
|
|
||||||
|
let text = build_system_text(&ctx);
|
||||||
|
assert!(text.contains("42_story_test"));
|
||||||
|
assert!(text.contains("/tmp/wt"));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn parse_usage_valid() {
|
||||||
|
let response = json!({
|
||||||
|
"usage": {
|
||||||
|
"prompt_tokens": 100,
|
||||||
|
"completion_tokens": 50,
|
||||||
|
"total_tokens": 150
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
let usage = parse_usage(&response).unwrap();
|
||||||
|
assert_eq!(usage.input_tokens, 100);
|
||||||
|
assert_eq!(usage.output_tokens, 50);
|
||||||
|
assert_eq!(usage.cache_creation_input_tokens, 0);
|
||||||
|
assert_eq!(usage.total_cost_usd, 0.0);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn parse_usage_missing() {
|
||||||
|
let response = json!({"choices": []});
|
||||||
|
assert!(parse_usage(&response).is_none());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn openai_runtime_stop_sets_cancelled() {
|
||||||
|
let runtime = OpenAiRuntime::new();
|
||||||
|
assert_eq!(runtime.get_status(), RuntimeStatus::Idle);
|
||||||
|
runtime.stop();
|
||||||
|
assert_eq!(runtime.get_status(), RuntimeStatus::Failed);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn model_extraction_from_command_gpt() {
|
||||||
|
let ctx = RuntimeContext {
|
||||||
|
story_id: "1".to_string(),
|
||||||
|
agent_name: "coder".to_string(),
|
||||||
|
command: "gpt-4o".to_string(),
|
||||||
|
args: vec![],
|
||||||
|
prompt: "test".to_string(),
|
||||||
|
cwd: "/tmp".to_string(),
|
||||||
|
inactivity_timeout_secs: 300,
|
||||||
|
mcp_port: 3001,
|
||||||
|
};
|
||||||
|
assert!(ctx.command.starts_with("gpt"));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn model_extraction_from_command_o3() {
|
||||||
|
let ctx = RuntimeContext {
|
||||||
|
story_id: "1".to_string(),
|
||||||
|
agent_name: "coder".to_string(),
|
||||||
|
command: "o3".to_string(),
|
||||||
|
args: vec![],
|
||||||
|
prompt: "test".to_string(),
|
||||||
|
cwd: "/tmp".to_string(),
|
||||||
|
inactivity_timeout_secs: 300,
|
||||||
|
mcp_port: 3001,
|
||||||
|
};
|
||||||
|
assert!(ctx.command.starts_with("o"));
|
||||||
|
}
|
||||||
|
}
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user