Compare commits

...

16 Commits

Author SHA1 Message Date
dave b70ee1aa4b huskies: merge 622_story_wrap_react_state_updates_in_act_to_silence_frontend_test_warnings 2026-04-24 23:02:52 +00:00
dave e1bfbf4232 huskies: merge 619_story_service_common_consolidation_sweep 2026-04-24 21:36:49 +00:00
dave c16d9e471d huskies: merge 618_story_extract_mcp_only_domain_services 2026-04-24 21:16:19 +00:00
dave 360bca45c8 huskies: merge 617_story_split_gateway_into_service_and_transport 2026-04-24 18:43:26 +00:00
dave 271f8ea6a8 huskies: merge 616_story_extract_notifications_service 2026-04-24 18:05:42 +00:00
dave eca0ef792c huskies: merge 615_story_extract_timer_service 2026-04-24 17:43:53 +00:00
dave 62bfaf20f4 huskies: merge 611_story_extract_settings_service 2026-04-24 17:11:55 +00:00
dave da6ae89667 huskies: merge 610_story_extract_wizard_service 2026-04-24 16:46:09 +00:00
dave 60a9c87794 huskies: merge 609_story_extract_oauth_service 2026-04-24 16:19:26 +00:00
dave 2dc2513fac huskies: merge 620_refactor_enforce_test_fixture_discipline_in_service_modules 2026-04-24 16:07:00 +00:00
dave 65c896f07f huskies: merge 608_story_extract_io_and_anthropic_services 2026-04-24 15:54:50 +00:00
dave aba3120388 huskies: merge 607_story_extract_bot_command_service 2026-04-24 15:28:03 +00:00
dave 1910365321 huskies: merge 606_story_extract_project_service 2026-04-24 15:01:04 +00:00
dave d9e883c21d huskies: merge 612_story_extract_ws_service 2026-04-24 14:36:44 +00:00
dave 4a80600e22 huskies: merge 614_bug_gateway_web_ui_has_no_vertical_scrollbars 2026-04-24 14:25:09 +00:00
dave 23890a1d33 huskies: merge 605_story_extract_events_and_health_services 2026-04-24 14:08:39 +00:00
115 changed files with 13908 additions and 7573 deletions
+1
View File
@@ -10,6 +10,7 @@ script/local-release
# App specific (root-level; huskies subdirectory patterns live in .huskies/.gitignore)
store.json
_merge_parsed.json
.huskies_port
.huskies/bot.toml.bak
.huskies/build_hash
+36
View File
@@ -86,6 +86,30 @@ HTTP handlers map service errors to **specific** HTTP status codes:
## 4. Test Pattern
### Chosen default pattern: fixture helpers in `io::test_helpers`
All filesystem setup for tests lives in a `#[cfg(test)] pub mod test_helpers`
block inside `io.rs`. Test blocks in `mod.rs` and topic files call these
helpers instead of importing `std::fs` directly.
**Grep-enforceable check for test code:** The following must NOT appear inside
`#[cfg(test)]` blocks in any `service/<domain>/` file **other than `io.rs`**:
- `std::fs::` (any item)
- `tokio::fs`
- `std::process::` (any item)
- `Command::new`
Run to verify:
```sh
grep -rn --include='*.rs' \
'std::fs::\|tokio::fs\|std::process::\|Command::new' \
server/src/service/ | grep -v '/io\.rs'
```
This must return zero matches (including lines inside `#[cfg(test)]` blocks).
### Pure topic files (`<topic>.rs`)
```rust
@@ -104,6 +128,17 @@ mod tests {
### `io.rs`
```rust
/// Fixture helpers — the ONLY place allowed to call std::fs in tests.
#[cfg(test)]
pub mod test_helpers {
use tempfile::TempDir;
pub fn make_work_dirs(tmp: &TempDir) { ... }
pub fn make_stage_dirs(tmp: &TempDir) { ... }
pub fn make_project_toml(tmp: &TempDir, content: &str) { ... }
pub fn write_story_file(tmp: &TempDir, relative_path: &str, content: &str) { ... }
}
#[cfg(test)]
mod tests {
use super::*;
@@ -122,6 +157,7 @@ mod tests {
#[cfg(test)]
mod tests {
use super::*;
use io::test_helpers::*; // ← fixture helpers; never import std::fs here
// Integration tests compose io + pure layers end-to-end.
// May use tempdirs. Keep the count small — they are integration-level.
-1
View File
@@ -194,7 +194,6 @@ body,
#root {
height: 100%;
margin: 0;
overflow: hidden;
}
/* Agent activity indicator pulse */
+12 -2
View File
@@ -1,8 +1,14 @@
import { fireEvent, render, screen, waitFor } from "@testing-library/react";
import { act, fireEvent, render, screen, waitFor } from "@testing-library/react";
import userEvent from "@testing-library/user-event";
import { beforeEach, describe, expect, it, vi } from "vitest";
import { api } from "./api/client";
vi.mock("./api/gateway", () => ({
gatewayApi: {
getServerMode: vi.fn().mockResolvedValue({ mode: "standard" }),
},
}));
vi.mock("./api/client", () => {
const api = {
getCurrentProject: vi.fn(),
@@ -76,7 +82,11 @@ describe("App", () => {
async function renderApp() {
const { default: App } = await import("./App");
return render(<App />);
let result!: ReturnType<typeof render>;
await act(async () => {
result = render(<App />);
});
return result;
}
it("calls getCurrentProject() on mount", async () => {
+33 -33
View File
@@ -165,7 +165,7 @@ describe("Chat message rendering — unified tool call UI", () => {
},
];
act(() => {
await act(async () => {
capturedWsHandlers?.onUpdate(messages);
});
@@ -199,7 +199,7 @@ describe("Chat message rendering — unified tool call UI", () => {
{ role: "assistant", content: "The file contains a main function." },
];
act(() => {
await act(async () => {
capturedWsHandlers?.onUpdate(messages);
});
@@ -219,7 +219,7 @@ describe("Chat message rendering — unified tool call UI", () => {
{ role: "assistant", content: "Hi there! How can I help?" },
];
act(() => {
await act(async () => {
capturedWsHandlers?.onUpdate(messages);
});
@@ -254,7 +254,7 @@ describe("Chat message rendering — unified tool call UI", () => {
},
];
act(() => {
await act(async () => {
capturedWsHandlers?.onUpdate(messages);
});
@@ -396,7 +396,7 @@ describe("Chat reconciliation banner", () => {
await waitFor(() => expect(capturedWsHandlers).not.toBeNull());
act(() => {
await act(async () => {
capturedWsHandlers?.onReconciliationProgress(
"42_story_test",
"checking",
@@ -417,7 +417,7 @@ describe("Chat reconciliation banner", () => {
await waitFor(() => expect(capturedWsHandlers).not.toBeNull());
act(() => {
await act(async () => {
capturedWsHandlers?.onReconciliationProgress(
"42_story_test",
"gates_running",
@@ -435,7 +435,7 @@ describe("Chat reconciliation banner", () => {
await waitFor(() => expect(capturedWsHandlers).not.toBeNull());
act(() => {
await act(async () => {
capturedWsHandlers?.onReconciliationProgress(
"42_story_test",
"checking",
@@ -447,7 +447,7 @@ describe("Chat reconciliation banner", () => {
await screen.findByTestId("reconciliation-banner"),
).toBeInTheDocument();
act(() => {
await act(async () => {
capturedWsHandlers?.onReconciliationProgress(
"",
"done",
@@ -504,7 +504,7 @@ describe("Chat localStorage persistence (Story 145)", () => {
{ role: "assistant", content: "Hi there!" },
];
act(() => {
await act(async () => {
capturedWsHandlers?.onUpdate(history);
});
@@ -555,7 +555,7 @@ describe("Chat localStorage persistence (Story 145)", () => {
{ role: "assistant", content: "I should survive a reload" },
];
act(() => {
await act(async () => {
capturedWsHandlers?.onUpdate(history);
});
@@ -604,7 +604,7 @@ describe("Chat localStorage persistence (Story 145)", () => {
{ role: "user", content: "What is Rust?" },
{ role: "assistant", content: "Rust is a systems programming language." },
];
act(() => {
await act(async () => {
capturedWsHandlers?.onUpdate(priorHistory);
});
@@ -692,12 +692,12 @@ describe("Chat activity status indicator (Bug 140)", () => {
});
// Simulate tokens arriving (streamingContent becomes non-empty)
act(() => {
await act(async () => {
capturedWsHandlers?.onToken("I'll read that file for you.");
});
// Now simulate a tool activity event while streamingContent is non-empty
act(() => {
await act(async () => {
capturedWsHandlers?.onActivity("read_file");
});
@@ -742,7 +742,7 @@ describe("Chat activity status indicator (Bug 140)", () => {
});
// Tokens arrive — streamingContent is non-empty, no activity
act(() => {
await act(async () => {
capturedWsHandlers?.onToken("Here is my response...");
});
@@ -765,12 +765,12 @@ describe("Chat activity status indicator (Bug 140)", () => {
});
// Simulate tokens arriving
act(() => {
await act(async () => {
capturedWsHandlers?.onToken("Let me read that.");
});
// Claude Code sends tool name "Read" (not "read_file")
act(() => {
await act(async () => {
capturedWsHandlers?.onActivity("Read");
});
@@ -792,11 +792,11 @@ describe("Chat activity status indicator (Bug 140)", () => {
fireEvent.keyDown(input, { key: "Enter", shiftKey: false });
});
act(() => {
await act(async () => {
capturedWsHandlers?.onToken("Running tests now.");
});
act(() => {
await act(async () => {
capturedWsHandlers?.onActivity("Bash");
});
@@ -818,11 +818,11 @@ describe("Chat activity status indicator (Bug 140)", () => {
fireEvent.keyDown(input, { key: "Enter", shiftKey: false });
});
act(() => {
await act(async () => {
capturedWsHandlers?.onToken("Working on it.");
});
act(() => {
await act(async () => {
capturedWsHandlers?.onActivity("SomeCustomTool");
});
@@ -899,7 +899,7 @@ describe("Chat message queue (Story 155)", () => {
).toBeInTheDocument();
// Simulate agent response completing (loading → false)
act(() => {
await act(async () => {
capturedWsHandlers?.onUpdate([
{ role: "user", content: "First" },
{ role: "assistant", content: "Done." },
@@ -1066,7 +1066,7 @@ describe("Chat message queue (Story 155)", () => {
expect(indicators[1]).toHaveTextContent("Third");
// Simulate first response completing — both "Second" and "Third" are drained at once
act(() => {
await act(async () => {
capturedWsHandlers?.onUpdate([
{ role: "user", content: "First" },
{ role: "assistant", content: "Response 1." },
@@ -1145,7 +1145,7 @@ describe("Remove bubble styling from streaming messages (Story 163)", () => {
});
// Simulate streaming tokens arriving
act(() => {
await act(async () => {
capturedWsHandlers?.onToken("Streaming response text");
});
@@ -1176,7 +1176,7 @@ describe("Remove bubble styling from streaming messages (Story 163)", () => {
fireEvent.keyDown(input, { key: "Enter", shiftKey: false });
});
act(() => {
await act(async () => {
capturedWsHandlers?.onToken("Some markdown content");
});
@@ -1200,7 +1200,7 @@ describe("Remove bubble styling from streaming messages (Story 163)", () => {
});
// Simulate streaming tokens
act(() => {
await act(async () => {
capturedWsHandlers?.onToken("Final response");
});
@@ -1211,7 +1211,7 @@ describe("Remove bubble styling from streaming messages (Story 163)", () => {
const streamingStyleAttr = streamingStyledDiv.getAttribute("style") ?? "";
// Transition: onUpdate completes the message
act(() => {
await act(async () => {
capturedWsHandlers?.onUpdate([
{ role: "user", content: "Hello" },
{ role: "assistant", content: "Final response" },
@@ -1244,7 +1244,7 @@ describe("Remove bubble styling from streaming messages (Story 163)", () => {
await waitFor(() => expect(capturedWsHandlers).not.toBeNull());
act(() => {
await act(async () => {
capturedWsHandlers?.onUpdate([
{ role: "user", content: "Hi" },
{ role: "assistant", content: "Hello there!" },
@@ -1268,7 +1268,7 @@ describe("Remove bubble styling from streaming messages (Story 163)", () => {
await waitFor(() => expect(capturedWsHandlers).not.toBeNull());
act(() => {
await act(async () => {
capturedWsHandlers?.onUpdate([
{ role: "user", content: "I am a user message" },
{ role: "assistant", content: "I am a response" },
@@ -1310,7 +1310,7 @@ describe("Bug 264: Claude Code session ID persisted across browser refresh", ()
await waitFor(() => expect(capturedWsHandlers).not.toBeNull());
act(() => {
await act(async () => {
capturedWsHandlers?.onSessionId("test-session-abc");
});
@@ -1394,7 +1394,7 @@ describe("Bug 264: Claude Code session ID persisted across browser refresh", ()
render(<Chat projectPath={PROJECT_PATH} onCloseProject={vi.fn()} />);
await waitFor(() => expect(capturedWsHandlers).not.toBeNull());
act(() => {
await act(async () => {
capturedWsHandlers?.onSessionId("my-session");
});
@@ -1595,7 +1595,7 @@ describe("Slash command handling (Story 374)", () => {
await waitFor(() => expect(capturedWsHandlers).not.toBeNull());
// First add a message so there is history to clear
act(() => {
await act(async () => {
capturedWsHandlers?.onUpdate([
{ role: "user", content: "hello" },
{ role: "assistant", content: "world" },
@@ -1701,7 +1701,7 @@ describe("Bug 450: WebSocket error messages displayed in chat", () => {
await waitFor(() => expect(capturedWsHandlers).not.toBeNull());
act(() => {
await act(async () => {
capturedWsHandlers?.onError("Something went wrong on the server.");
});
@@ -1715,7 +1715,7 @@ describe("Bug 450: WebSocket error messages displayed in chat", () => {
await waitFor(() => expect(capturedWsHandlers).not.toBeNull());
act(() => {
await act(async () => {
capturedWsHandlers?.onError(
"OAuth login required. Please visit: https://example.com/oauth/login",
);
@@ -138,7 +138,7 @@ describe("usePathCompletion hook", () => {
expect(result.current.matchList[0].name).toBe("Documents");
});
it("calls setPathInput when acceptMatch is invoked", () => {
it("calls setPathInput when acceptMatch is invoked", async () => {
const setPathInput = vi.fn();
const { result } = renderHook(() =>
@@ -151,7 +151,7 @@ describe("usePathCompletion hook", () => {
}),
);
act(() => {
await act(async () => {
result.current.acceptMatch("/home/user/Documents/");
});
@@ -308,14 +308,14 @@ describe("usePathCompletion hook", () => {
expect(result.current.matchList.length).toBe(2);
});
act(() => {
await act(async () => {
result.current.acceptSelectedMatch();
});
expect(setPathInput).toHaveBeenCalledWith("/home/user/Documents/");
});
it("acceptSelectedMatch does nothing when matchList is empty", () => {
it("acceptSelectedMatch does nothing when matchList is empty", async () => {
const setPathInput = vi.fn();
const { result } = renderHook(() =>
@@ -328,7 +328,7 @@ describe("usePathCompletion hook", () => {
}),
);
act(() => {
await act(async () => {
result.current.acceptSelectedMatch();
});
@@ -352,7 +352,7 @@ describe("usePathCompletion hook", () => {
expect(result.current.matchList.length).toBe(1);
});
act(() => {
await act(async () => {
result.current.closeSuggestions();
});
@@ -450,7 +450,7 @@ describe("usePathCompletion hook", () => {
expect(result.current.matchList.length).toBe(2);
});
act(() => {
await act(async () => {
result.current.setSelectedMatch(1);
});
+31 -31
View File
@@ -19,7 +19,7 @@ function makeMessages(count: number): Message[] {
}));
}
describe("useChatHistory", () => {
describe("useChatHistory", async () => {
beforeEach(() => {
localStorage.clear();
});
@@ -28,7 +28,7 @@ describe("useChatHistory", () => {
localStorage.clear();
});
it("AC1: restores messages from localStorage on mount", () => {
it("AC1: restores messages from localStorage on mount", async () => {
localStorage.setItem(STORAGE_KEY, JSON.stringify(sampleMessages));
const { result } = renderHook(() => useChatHistory(PROJECT));
@@ -36,13 +36,13 @@ describe("useChatHistory", () => {
expect(result.current.messages).toEqual(sampleMessages);
});
it("AC1: returns empty array when localStorage has no data", () => {
it("AC1: returns empty array when localStorage has no data", async () => {
const { result } = renderHook(() => useChatHistory(PROJECT));
expect(result.current.messages).toEqual([]);
});
it("AC1: returns empty array when localStorage contains invalid JSON", () => {
it("AC1: returns empty array when localStorage contains invalid JSON", async () => {
localStorage.setItem(STORAGE_KEY, "not-json{{{");
const { result } = renderHook(() => useChatHistory(PROJECT));
@@ -50,7 +50,7 @@ describe("useChatHistory", () => {
expect(result.current.messages).toEqual([]);
});
it("AC1: returns empty array when localStorage contains a non-array", () => {
it("AC1: returns empty array when localStorage contains a non-array", async () => {
localStorage.setItem(STORAGE_KEY, JSON.stringify({ not: "array" }));
const { result } = renderHook(() => useChatHistory(PROJECT));
@@ -58,10 +58,10 @@ describe("useChatHistory", () => {
expect(result.current.messages).toEqual([]);
});
it("AC2: saves messages to localStorage when setMessages is called with an array", () => {
it("AC2: saves messages to localStorage when setMessages is called with an array", async () => {
const { result } = renderHook(() => useChatHistory(PROJECT));
act(() => {
await act(async () => {
result.current.setMessages(sampleMessages);
});
@@ -69,10 +69,10 @@ describe("useChatHistory", () => {
expect(stored).toEqual(sampleMessages);
});
it("AC2: saves messages to localStorage when setMessages is called with updater function", () => {
it("AC2: saves messages to localStorage when setMessages is called with updater function", async () => {
const { result } = renderHook(() => useChatHistory(PROJECT));
act(() => {
await act(async () => {
result.current.setMessages(() => sampleMessages);
});
@@ -80,14 +80,14 @@ describe("useChatHistory", () => {
expect(stored).toEqual(sampleMessages);
});
it("AC3: clearMessages removes messages from state and localStorage", () => {
it("AC3: clearMessages removes messages from state and localStorage", async () => {
localStorage.setItem(STORAGE_KEY, JSON.stringify(sampleMessages));
const { result } = renderHook(() => useChatHistory(PROJECT));
expect(result.current.messages).toEqual(sampleMessages);
act(() => {
await act(async () => {
result.current.clearMessages();
});
@@ -95,7 +95,7 @@ describe("useChatHistory", () => {
expect(localStorage.getItem(STORAGE_KEY)).toBeNull();
});
it("AC4: handles localStorage quota errors gracefully", () => {
it("AC4: handles localStorage quota errors gracefully", async () => {
const warnSpy = vi.spyOn(console, "warn").mockImplementation(() => {});
const setItemSpy = vi
.spyOn(Storage.prototype, "setItem")
@@ -106,7 +106,7 @@ describe("useChatHistory", () => {
const { result } = renderHook(() => useChatHistory(PROJECT));
// Should not throw
act(() => {
await act(async () => {
result.current.setMessages(sampleMessages);
});
@@ -121,7 +121,7 @@ describe("useChatHistory", () => {
setItemSpy.mockRestore();
});
it("AC5: scopes storage key to project path", () => {
it("AC5: scopes storage key to project path", async () => {
const projectA = "/projects/a";
const projectB = "/projects/b";
const keyA = `storykit-chat-history:${projectA}`;
@@ -140,12 +140,12 @@ describe("useChatHistory", () => {
expect(resultB.current.messages).toEqual(messagesB);
});
it("AC2: removes localStorage key when messages are set to empty array", () => {
it("AC2: removes localStorage key when messages are set to empty array", async () => {
localStorage.setItem(STORAGE_KEY, JSON.stringify(sampleMessages));
const { result } = renderHook(() => useChatHistory(PROJECT));
act(() => {
await act(async () => {
result.current.setMessages([]);
});
@@ -154,20 +154,20 @@ describe("useChatHistory", () => {
// --- Story 179: Chat history pruning tests ---
it("S179: default limit of 200 is applied when saving to localStorage", () => {
it("S179: default limit of 200 is applied when saving to localStorage", async () => {
const { result } = renderHook(() => useChatHistory(PROJECT));
expect(result.current.maxMessages).toBe(200);
});
it("S179: messages are pruned from the front when exceeding the limit", () => {
it("S179: messages are pruned from the front when exceeding the limit", async () => {
// Set a small limit to make testing practical
localStorage.setItem(LIMIT_KEY, "3");
const { result } = renderHook(() => useChatHistory(PROJECT));
const fiveMessages = makeMessages(5);
act(() => {
await act(async () => {
result.current.setMessages(fiveMessages);
});
@@ -180,13 +180,13 @@ describe("useChatHistory", () => {
expect(stored[0].content).toBe("Message 3");
});
it("S179: messages under the limit are not pruned", () => {
it("S179: messages under the limit are not pruned", async () => {
localStorage.setItem(LIMIT_KEY, "10");
const { result } = renderHook(() => useChatHistory(PROJECT));
const threeMessages = makeMessages(3);
act(() => {
await act(async () => {
result.current.setMessages(threeMessages);
});
@@ -197,7 +197,7 @@ describe("useChatHistory", () => {
expect(stored).toHaveLength(3);
});
it("S179: limit is configurable via localStorage key", () => {
it("S179: limit is configurable via localStorage key", async () => {
localStorage.setItem(LIMIT_KEY, "5");
const { result } = renderHook(() => useChatHistory(PROJECT));
@@ -205,10 +205,10 @@ describe("useChatHistory", () => {
expect(result.current.maxMessages).toBe(5);
});
it("S179: setMaxMessages updates the limit and persists it", () => {
it("S179: setMaxMessages updates the limit and persists it", async () => {
const { result } = renderHook(() => useChatHistory(PROJECT));
act(() => {
await act(async () => {
result.current.setMaxMessages(50);
});
@@ -216,13 +216,13 @@ describe("useChatHistory", () => {
expect(localStorage.getItem(LIMIT_KEY)).toBe("50");
});
it("S179: a limit of 0 means unlimited (no pruning)", () => {
it("S179: a limit of 0 means unlimited (no pruning)", async () => {
localStorage.setItem(LIMIT_KEY, "0");
const { result } = renderHook(() => useChatHistory(PROJECT));
const manyMessages = makeMessages(500);
act(() => {
await act(async () => {
result.current.setMessages(manyMessages);
});
@@ -233,11 +233,11 @@ describe("useChatHistory", () => {
expect(stored).toEqual(manyMessages);
});
it("S179: changing the limit re-prunes messages on next save", () => {
it("S179: changing the limit re-prunes messages on next save", async () => {
const { result } = renderHook(() => useChatHistory(PROJECT));
const tenMessages = makeMessages(10);
act(() => {
await act(async () => {
result.current.setMessages(tenMessages);
});
@@ -248,7 +248,7 @@ describe("useChatHistory", () => {
expect(stored).toHaveLength(10);
// Now lower the limit — the effect re-runs and prunes
act(() => {
await act(async () => {
result.current.setMaxMessages(3);
});
@@ -257,7 +257,7 @@ describe("useChatHistory", () => {
expect(stored[0].content).toBe("Message 8");
});
it("S179: invalid limit in localStorage falls back to default", () => {
it("S179: invalid limit in localStorage falls back to default", async () => {
localStorage.setItem(LIMIT_KEY, "not-a-number");
const { result } = renderHook(() => useChatHistory(PROJECT));
@@ -265,7 +265,7 @@ describe("useChatHistory", () => {
expect(result.current.maxMessages).toBe(200);
});
it("S179: negative limit in localStorage falls back to default", () => {
it("S179: negative limit in localStorage falls back to default", async () => {
localStorage.setItem(LIMIT_KEY, "-5");
const { result } = renderHook(() => useChatHistory(PROJECT));
+75
View File
@@ -0,0 +1,75 @@
import { expect, test } from "@playwright/test";
/// Regression test: gateway UI must have vertical scrolling when content
/// overflows the viewport. Verifies the `overflow: hidden` fix on
/// `html / body / #root` — without that fix the page is locked at y=0.
test.describe("Gateway UI scrolling", () => {
test("page scrolls when content exceeds viewport height", async ({
page,
}) => {
// Use a small viewport to guarantee overflow even with modest content.
await page.setViewportSize({ width: 1280, height: 400 });
// --- mock API endpoints ---
// Identify this server as a gateway.
await page.route("/gateway/mode", async (route) => {
await route.fulfill({ json: { mode: "gateway" } });
});
// Return enough agents to push the page past 400 px.
const agents = Array.from({ length: 15 }, (_, i) => ({
id: `agent-${i}`,
label: `Build Agent ${i}`,
address: `10.0.0.${i}:5000`,
registered_at: Date.now() / 1000 - 60,
last_seen: Date.now() / 1000 - 10,
}));
await page.route("/gateway/agents", async (route) => {
await route.fulfill({ json: agents });
});
await page.route("/api/gateway", async (route) => {
await route.fulfill({ json: { active: "", projects: [] } });
});
await page.route("/api/gateway/pipeline", async (route) => {
await route.fulfill({ json: { active: "", projects: {} } });
});
// Non-gateway APIs called by App.tsx on startup — respond quickly so the
// loading gate (`isCheckingProject`) clears and the gateway panel renders.
await page.route("/api/project", async (route) => {
await route.fulfill({ json: null });
});
await page.route("/api/projects", async (route) => {
await route.fulfill({ json: [] });
});
await page.route("/oauth/status", async (route) => {
await route.fulfill({ json: { authenticated: false } });
});
await page.route("/api/home", async (route) => {
await route.fulfill({ json: "/home/test" });
});
await page.goto("/");
// Wait until the gateway panel is visible.
await page.waitForSelector('[data-testid="add-agent-button"]');
// The scrolling element should be taller than the visible viewport.
const isOverflowing = await page.evaluate(() => {
const el =
document.scrollingElement ?? document.documentElement;
return el.scrollHeight > el.clientHeight;
});
expect(isOverflowing).toBe(true);
// Scrolling must actually move the viewport.
await page.evaluate(() => window.scrollBy(0, 300));
const scrollY = await page.evaluate(
() => document.scrollingElement?.scrollTop ?? window.scrollY,
);
expect(scrollY).toBeGreaterThan(0);
});
});
+8 -1138
View File
File diff suppressed because it is too large Load Diff
@@ -1,8 +1,8 @@
//! Matrix bot context — shared state for the Matrix bot (rooms, history, permissions).
use crate::agents::AgentPool;
use crate::chat::ChatTransport;
use crate::chat::timer::TimerStore;
use crate::http::context::{PermissionDecision, PermissionForward};
use crate::service::timer::TimerStore;
use matrix_sdk::ruma::{OwnedEventId, OwnedRoomId, OwnedUserId};
use std::collections::{BTreeMap, HashMap, HashSet};
use std::path::PathBuf;
@@ -177,7 +177,7 @@ mod tests {
"test-token".to_string(),
"pipeline_notification".to_string(),
)),
timer_store: Arc::new(crate::chat::timer::TimerStore::load(
timer_store: Arc::new(crate::service::timer::TimerStore::load(
std::path::PathBuf::from("/tmp/timers.json"),
)),
gateway_active_project: None,
@@ -215,7 +215,7 @@ mod tests {
"test-token".to_string(),
"pipeline_notification".to_string(),
)),
timer_store: Arc::new(crate::chat::timer::TimerStore::load(
timer_store: Arc::new(crate::service::timer::TimerStore::load(
std::path::PathBuf::from("/tmp/timers.json"),
)),
gateway_active_project: Some(Arc::clone(&active)),
@@ -256,7 +256,7 @@ mod tests {
"test-token".to_string(),
"pipeline_notification".to_string(),
)),
timer_store: Arc::new(crate::chat::timer::TimerStore::load(
timer_store: Arc::new(crate::service::timer::TimerStore::load(
std::path::PathBuf::from("/tmp/timers.json"),
)),
gateway_active_project: Some(Arc::clone(&active)),
@@ -306,7 +306,7 @@ mod tests {
"test-token".to_string(),
"pipeline_notification".to_string(),
)),
timer_store: Arc::new(crate::chat::timer::TimerStore::load(
timer_store: Arc::new(crate::service::timer::TimerStore::load(
std::path::PathBuf::from("/tmp/timers.json"),
)),
gateway_active_project: None,
@@ -572,13 +572,13 @@ pub(super) async fn on_room_message(
// Check for the timer command, which requires async file I/O and cannot
// be handled by the sync command registry.
if let Some(timer_cmd) = crate::chat::timer::extract_timer_command(
if let Some(timer_cmd) = crate::service::timer::extract_timer_command(
&user_message,
&ctx.bot_name,
ctx.bot_user_id.as_str(),
) {
slog!("[matrix-bot] Handling timer command from {sender}: {timer_cmd:?}");
let response = crate::chat::timer::handle_timer_command(
let response = crate::service::timer::handle_timer_command(
timer_cmd,
&ctx.timer_store,
&ctx.project_root,
+6 -3
View File
@@ -228,11 +228,14 @@ pub async fn run_bot(
.unwrap_or_else(|| "Assistant".to_string());
let announce_bot_name = bot_name.clone();
let timer_store = Arc::new(crate::chat::timer::TimerStore::load(
let timer_store = Arc::new(crate::service::timer::TimerStore::load(
project_root.join(".huskies").join("timers.json"),
));
// Auto-schedule timers when an agent hits a hard rate limit.
crate::chat::timer::spawn_rate_limit_auto_scheduler(Arc::clone(&timer_store), watcher_rx_auto);
crate::service::timer::spawn_rate_limit_auto_scheduler(
Arc::clone(&timer_store),
watcher_rx_auto,
);
let ctx = BotContext {
bot_user_id,
@@ -269,7 +272,7 @@ pub async fn run_bot(
// Spawn the stage-transition notification listener before entering the
// sync loop so it starts receiving watcher events immediately.
let notif_room_id_strings: Vec<String> = notif_room_ids.iter().map(|r| r.to_string()).collect();
super::super::notifications::spawn_notification_listener(
crate::service::notifications::spawn_notification_listener(
Arc::clone(&transport),
move || notif_room_id_strings.clone(),
watcher_rx,
-1
View File
@@ -21,7 +21,6 @@ pub mod commands;
pub(crate) mod config;
pub mod delete;
pub mod htop;
pub mod notifications;
pub mod rebuild;
pub mod reset;
pub mod rmtree;
+212 -2876
View File
File diff suppressed because it is too large Load Diff
+46 -98
View File
@@ -1,50 +1,10 @@
//! Anthropic API proxy — forwards model listing and key-validation requests to Anthropic.
//! Anthropic API proxy — thin adapter over `service::anthropic`.
use crate::http::context::{AppContext, OpenApiResult, bad_request};
use crate::llm::chat;
use crate::store::StoreOps;
use crate::service::anthropic::{self as svc, ModelSummary};
use poem_openapi::{Object, OpenApi, Tags, payload::Json};
use reqwest::header::{HeaderMap, HeaderValue};
use serde::{Deserialize, Serialize};
use serde::Deserialize;
use std::sync::Arc;
const ANTHROPIC_MODELS_URL: &str = "https://api.anthropic.com/v1/models";
const ANTHROPIC_VERSION: &str = "2023-06-01";
const KEY_ANTHROPIC_API_KEY: &str = "anthropic_api_key";
#[derive(Deserialize)]
struct AnthropicModelsResponse {
data: Vec<AnthropicModelInfo>,
}
#[derive(Deserialize)]
struct AnthropicModelInfo {
id: String,
context_window: u64,
}
#[derive(Serialize, Object)]
struct AnthropicModelSummary {
id: String,
context_window: u64,
}
fn get_anthropic_api_key(ctx: &AppContext) -> Result<String, String> {
match ctx.store.get(KEY_ANTHROPIC_API_KEY) {
Some(value) => {
if let Some(key) = value.as_str() {
if key.is_empty() {
Err("Anthropic API key is empty. Please set your API key.".to_string())
} else {
Ok(key.to_string())
}
} else {
Err("Stored API key is not a string".to_string())
}
}
None => Err("Anthropic API key not found. Please set your API key.".to_string()),
}
}
#[derive(Deserialize, Object)]
struct ApiKeyPayload {
api_key: String,
@@ -79,8 +39,8 @@ impl AnthropicApi {
/// Returns `true` if a non-empty key is present, otherwise `false`.
#[oai(path = "/anthropic/key/exists", method = "get")]
async fn get_anthropic_api_key_exists(&self) -> OpenApiResult<Json<bool>> {
let exists =
chat::get_anthropic_api_key_exists(self.ctx.store.as_ref()).map_err(bad_request)?;
let exists = svc::get_api_key_exists(self.ctx.store.as_ref())
.map_err(|e| bad_request(e.to_string()))?;
Ok(Json(exists))
}
@@ -92,74 +52,62 @@ impl AnthropicApi {
&self,
payload: Json<ApiKeyPayload>,
) -> OpenApiResult<Json<bool>> {
chat::set_anthropic_api_key(self.ctx.store.as_ref(), payload.0.api_key)
.map_err(bad_request)?;
svc::set_api_key(self.ctx.store.as_ref(), payload.0.api_key)
.map_err(|e| bad_request(e.to_string()))?;
Ok(Json(true))
}
/// List available Anthropic models.
#[oai(path = "/anthropic/models", method = "get")]
async fn list_anthropic_models(&self) -> OpenApiResult<Json<Vec<AnthropicModelSummary>>> {
self.list_anthropic_models_from(ANTHROPIC_MODELS_URL).await
}
}
impl AnthropicApi {
async fn list_anthropic_models_from(
&self,
url: &str,
) -> OpenApiResult<Json<Vec<AnthropicModelSummary>>> {
let api_key = get_anthropic_api_key(self.ctx.as_ref()).map_err(bad_request)?;
let client = reqwest::Client::new();
let mut headers = HeaderMap::new();
headers.insert(
"x-api-key",
HeaderValue::from_str(&api_key).map_err(|e| bad_request(e.to_string()))?,
);
headers.insert(
"anthropic-version",
HeaderValue::from_static(ANTHROPIC_VERSION),
);
let response = client
.get(url)
.headers(headers)
.send()
async fn list_anthropic_models(&self) -> OpenApiResult<Json<Vec<ModelSummary>>> {
let models = svc::list_models(self.ctx.store.as_ref())
.await
.map_err(|e| bad_request(e.to_string()))?;
if !response.status().is_success() {
let status = response.status();
let error_text = response
.text()
.await
.unwrap_or_else(|_| "Unknown error".to_string());
return Err(bad_request(format!(
"Anthropic API error {status}: {error_text}"
)));
}
let body = response
.json::<AnthropicModelsResponse>()
.await
.map_err(|e| bad_request(e.to_string()))?;
let models = body
.data
.into_iter()
.map(|m| AnthropicModelSummary {
id: m.id,
context_window: m.context_window,
})
.collect();
Ok(Json(models))
}
}
#[cfg(test)]
impl AnthropicApi {
/// List models from an injectable URL (used in tests to avoid real network calls).
async fn list_anthropic_models_from(
&self,
url: &str,
) -> OpenApiResult<Json<Vec<ModelSummary>>> {
let models = svc::list_models_from(self.ctx.store.as_ref(), url)
.await
.map_err(|e| bad_request(e.to_string()))?;
Ok(Json(models))
}
}
// Private helper retained for backward compatibility with tests that call it directly.
#[cfg(test)]
fn get_anthropic_api_key(ctx: &AppContext) -> Result<String, String> {
svc::get_api_key(ctx.store.as_ref()).map_err(|e| e.to_string())
}
// Private types retained so existing tests that deserialise them directly continue to compile.
#[cfg(test)]
#[derive(serde::Deserialize)]
struct AnthropicModelsResponse {
data: Vec<AnthropicModelInfo>,
}
#[cfg(test)]
#[derive(serde::Deserialize)]
struct AnthropicModelInfo {
id: String,
context_window: u64,
}
#[cfg(test)]
mod tests {
use super::*;
use crate::http::context::AppContext;
use crate::http::test_helpers::{make_api, test_ctx};
use crate::store::StoreOps;
const KEY_ANTHROPIC_API_KEY: &str = "anthropic_api_key";
use serde_json::json;
use tempfile::TempDir;
+35 -248
View File
@@ -3,19 +3,16 @@
//! `POST /api/bot/command` lets the web UI invoke the same deterministic bot
//! commands available in Matrix without going through the LLM.
//!
//! Synchronous commands (status, git, cost, move, show, overview, help) are
//! dispatched directly through the matrix command registry.
//! Asynchronous commands (assign, start, delete, rebuild) are dispatched to
//! their dedicated async handlers. The `reset` command is handled by the frontend
//! (it clears local session state and message history) and is not routed here.
//! Dispatches to [`crate::service::bot_command::execute`], which owns all
//! parsing and routing logic. This handler is a thin OpenAPI adapter: it
//! receives JSON, calls the service, and maps typed errors to HTTP status codes.
use crate::chat::commands::CommandDispatch;
use crate::http::context::{AppContext, OpenApiResult};
use crate::service::bot_command as svc;
use poem::http::StatusCode;
use poem_openapi::{Object, OpenApi, Tags, payload::Json};
use serde::{Deserialize, Serialize};
use std::collections::HashSet;
use std::sync::{Arc, Mutex};
use std::sync::Arc;
#[derive(Tags)]
enum BotCommandTags {
@@ -50,6 +47,11 @@ impl BotCommandApi {
/// Dispatches to the same handlers used by the Matrix and Slack bots.
/// Returns a markdown-formatted response that the frontend can display
/// directly in the chat panel.
///
/// # Errors
/// - `400 Bad Request` — project root not set, or invalid command arguments.
/// - `404 Not Found` — unrecognised command keyword.
/// - `500 Internal Server Error` — command execution failed.
#[oai(path = "/bot/command", method = "post")]
async fn run_command(
&self,
@@ -63,221 +65,23 @@ impl BotCommandApi {
let cmd = body.command.trim().to_ascii_lowercase();
let args = body.args.trim();
let response = dispatch_command(&cmd, args, &project_root, &self.ctx.agents).await;
let response = svc::execute(&cmd, args, &project_root, &self.ctx.agents)
.await
.map_err(|e| match e {
svc::Error::UnknownCommand(msg) => {
poem::Error::from_string(msg, StatusCode::NOT_FOUND)
}
svc::Error::BadArgs(msg) => poem::Error::from_string(msg, StatusCode::BAD_REQUEST),
svc::Error::CommandFailed(msg) => {
poem::Error::from_string(msg, StatusCode::INTERNAL_SERVER_ERROR)
}
})?;
Ok(Json(BotCommandResponse { response }))
}
}
/// Dispatch a command keyword + args to the appropriate handler.
async fn dispatch_command(
cmd: &str,
args: &str,
project_root: &std::path::Path,
agents: &Arc<crate::agents::AgentPool>,
) -> String {
match cmd {
"assign" => dispatch_assign(args, project_root, agents).await,
"start" => dispatch_start(args, project_root, agents).await,
"delete" => dispatch_delete(args, project_root, agents).await,
"rebuild" => dispatch_rebuild(project_root, agents).await,
"rmtree" => dispatch_rmtree(args, project_root, agents).await,
"timer" => dispatch_timer(args, project_root).await,
"htop" => dispatch_htop(args, agents).await,
// All other commands go through the synchronous command registry.
_ => dispatch_sync(cmd, args, project_root, agents),
}
}
fn dispatch_sync(
cmd: &str,
args: &str,
project_root: &std::path::Path,
agents: &Arc<crate::agents::AgentPool>,
) -> String {
let ambient_rooms: Arc<Mutex<HashSet<String>>> = Arc::new(Mutex::new(HashSet::new()));
// Use a synthetic bot name/id so strip_bot_mention passes through.
let bot_name = "__web_ui__";
let bot_user_id = "@__web_ui__:localhost";
let room_id = "__web_ui__";
let dispatch = CommandDispatch {
bot_name,
bot_user_id,
project_root,
agents,
ambient_rooms: &ambient_rooms,
room_id,
};
// Build a synthetic message that the registry can parse.
let synthetic = if args.is_empty() {
format!("{bot_name} {cmd}")
} else {
format!("{bot_name} {cmd} {args}")
};
match crate::chat::commands::try_handle_command(&dispatch, &synthetic) {
Some(response) => response,
None => {
// Command exists in the registry but its fallback handler returns None
// (start, delete, rebuild, reset, htop — handled elsewhere or in
// the frontend). Should not be reached for those since we intercept
// them above. For genuinely unknown commands, tell the user.
format!("Unknown command: `/{cmd}`. Type `/help` to see available commands.")
}
}
}
async fn dispatch_assign(
args: &str,
project_root: &std::path::Path,
agents: &Arc<crate::agents::AgentPool>,
) -> String {
// args: "<number> <model>"
let mut parts = args.splitn(2, char::is_whitespace);
let number_str = parts.next().unwrap_or("").trim();
let model_str = parts.next().unwrap_or("").trim();
if number_str.is_empty()
|| !number_str.chars().all(|c| c.is_ascii_digit())
|| model_str.is_empty()
{
return "Usage: `/assign <number> <model>` (e.g. `/assign 42 opus`)".to_string();
}
crate::chat::transport::matrix::assign::handle_assign(
"web-ui",
number_str,
model_str,
project_root,
agents,
)
.await
}
async fn dispatch_start(
args: &str,
project_root: &std::path::Path,
agents: &Arc<crate::agents::AgentPool>,
) -> String {
// args: "<number>" or "<number> <model_hint>"
let mut parts = args.splitn(2, char::is_whitespace);
let number_str = parts.next().unwrap_or("").trim();
let hint_str = parts.next().unwrap_or("").trim();
if number_str.is_empty() || !number_str.chars().all(|c| c.is_ascii_digit()) {
return "Usage: `/start <number>` or `/start <number> <model>` (e.g. `/start 42 opus`)"
.to_string();
}
let agent_hint = if hint_str.is_empty() {
None
} else {
Some(hint_str)
};
crate::chat::transport::matrix::start::handle_start(
"web-ui",
number_str,
agent_hint,
project_root,
agents,
)
.await
}
async fn dispatch_delete(
args: &str,
project_root: &std::path::Path,
agents: &Arc<crate::agents::AgentPool>,
) -> String {
let number_str = args.trim();
if number_str.is_empty() || !number_str.chars().all(|c| c.is_ascii_digit()) {
return "Usage: `/delete <number>` (e.g. `/delete 42`)".to_string();
}
crate::chat::transport::matrix::delete::handle_delete(
"web-ui",
number_str,
project_root,
agents,
)
.await
}
async fn dispatch_rmtree(
args: &str,
project_root: &std::path::Path,
agents: &Arc<crate::agents::AgentPool>,
) -> String {
let number_str = args.trim();
if number_str.is_empty() || !number_str.chars().all(|c| c.is_ascii_digit()) {
return "Usage: `/rmtree <number>` (e.g. `/rmtree 42`)".to_string();
}
crate::chat::transport::matrix::rmtree::handle_rmtree(
"web-ui",
number_str,
project_root,
agents,
)
.await
}
async fn dispatch_rebuild(
project_root: &std::path::Path,
agents: &Arc<crate::agents::AgentPool>,
) -> String {
crate::chat::transport::matrix::rebuild::handle_rebuild("web-ui", project_root, agents).await
}
async fn dispatch_timer(args: &str, project_root: &std::path::Path) -> String {
// Re-use the existing parser by constructing a synthetic message that
// looks like a bot-addressed timer command.
let synthetic = format!("__web_ui__ timer {args}");
let timer_cmd = match crate::chat::timer::extract_timer_command(
&synthetic,
"__web_ui__",
"@__web_ui__:localhost",
) {
Some(cmd) => cmd,
None => {
return "Usage: `/timer list`, `/timer <number> <HH:MM>`, or `/timer cancel <number>`"
.to_string();
}
};
let store =
crate::chat::timer::TimerStore::load(project_root.join(".huskies").join("timers.json"));
crate::chat::timer::handle_timer_command(timer_cmd, &store, project_root).await
}
/// Handle the `htop` command from the web UI.
///
/// The web UI uses a one-shot HTTP request, so live updates are not possible
/// here. Returns a static snapshot of the process dashboard. For `htop stop`,
/// returns a helpful message (no persistent session state exists in the web UI).
async fn dispatch_htop(args: &str, agents: &Arc<crate::agents::AgentPool>) -> String {
use crate::chat::transport::matrix::htop::{HtopCommand, build_htop_message};
// Re-use the existing parser by constructing a synthetic message.
let synthetic = if args.is_empty() {
"__web_ui__ htop".to_string()
} else {
format!("__web_ui__ htop {args}")
};
match crate::chat::transport::matrix::htop::extract_htop_command(
&synthetic,
"__web_ui__",
"@__web_ui__:localhost",
) {
Some(HtopCommand::Stop) => "No active htop session in the web UI. \
Live sessions are only supported in chat transports (Matrix, Slack, Discord)."
.to_string(),
Some(HtopCommand::Start { duration_secs }) => build_htop_message(agents, 0, duration_secs),
None => build_htop_message(agents, 0, 300),
}
}
// ---------------------------------------------------------------------------
// Tests
// ---------------------------------------------------------------------------
@@ -316,13 +120,7 @@ mod tests {
args: String::new(),
};
let result = api.run_command(Json(body)).await;
assert!(result.is_ok());
let resp = result.unwrap().0;
assert!(
resp.response.contains("Unknown command"),
"expected 'Unknown command' in: {}",
resp.response
);
assert!(result.is_err(), "unknown command should return HTTP 404");
}
#[tokio::test]
@@ -334,13 +132,7 @@ mod tests {
args: String::new(),
};
let result = api.run_command(Json(body)).await;
assert!(result.is_ok());
let resp = result.unwrap().0;
assert!(
resp.response.contains("Usage"),
"expected usage hint in: {}",
resp.response
);
assert!(result.is_err(), "start with no args should return HTTP 400");
}
#[tokio::test]
@@ -352,12 +144,9 @@ mod tests {
args: String::new(),
};
let result = api.run_command(Json(body)).await;
assert!(result.is_ok());
let resp = result.unwrap().0;
assert!(
resp.response.contains("Usage"),
"expected usage hint in: {}",
resp.response
result.is_err(),
"delete with no args should return HTTP 400"
);
}
@@ -388,7 +177,11 @@ mod tests {
args: "list".to_string(),
};
let result = api.run_command(Json(body)).await;
assert!(result.is_ok());
assert!(
result.is_ok(),
"timer list should succeed, got err: {:?}",
result.err().map(|e| e.to_string())
);
let resp = result.unwrap().0;
assert!(
!resp.response.contains("Unknown command"),
@@ -469,12 +262,9 @@ mod tests {
args: String::new(),
};
let result = api.run_command(Json(body)).await;
assert!(result.is_ok());
let resp = result.unwrap().0;
assert!(
resp.response.contains("Usage"),
"expected usage hint for bare /rmtree: {}",
resp.response
result.is_err(),
"rmtree with no args should return HTTP 400"
);
}
@@ -487,12 +277,9 @@ mod tests {
args: "foo".to_string(),
};
let result = api.run_command(Json(body)).await;
assert!(result.is_ok());
let resp = result.unwrap().0;
assert!(
resp.response.contains("Usage"),
"expected usage hint for /rmtree foo: {}",
resp.response
result.is_err(),
"rmtree with non-numeric arg should return HTTP 400"
);
}
+1 -1
View File
@@ -1,8 +1,8 @@
//! Application context — shared state (`AppContext`) threaded through all HTTP handlers.
use crate::agents::{AgentPool, ReconciliationEvent};
use crate::chat::timer::TimerStore;
use crate::io::watcher::WatcherEvent;
use crate::rebuild::{BotShutdownNotifier, ShutdownReason};
use crate::service::timer::TimerStore;
use crate::state::SessionState;
use crate::store::JsonFileStore;
use crate::workflow::WorkflowState;
+12 -155
View File
@@ -4,163 +4,20 @@
//! server to aggregate cross-project pipeline notifications into a single
//! gateway chat channel. Each project server buffers up to 500 events in
//! memory and serves them via this endpoint.
//!
//! Domain logic lives in `service::events`; this module is a thin HTTP
//! adapter: extract query params → call service → shape response.
#[cfg(test)]
pub use crate::service::events::StoredEvent;
pub use crate::service::events::{EventBuffer, subscribe_to_watcher};
// MAX_BUFFER_SIZE is used in tests via `use super::*`.
#[cfg(test)]
pub use crate::service::events::MAX_BUFFER_SIZE;
use crate::io::watcher::WatcherEvent;
use poem::web::{Data, Query};
use poem::{Response, handler, http::StatusCode};
use serde::{Deserialize, Serialize};
use std::collections::VecDeque;
use std::sync::{Arc, Mutex};
use tokio::sync::broadcast;
/// Maximum number of events retained in the in-memory buffer.
const MAX_BUFFER_SIZE: usize = 500;
/// A pipeline event stored in the event buffer with a timestamp.
#[derive(Clone, Debug, Serialize, Deserialize)]
#[serde(tag = "type", rename_all = "snake_case")]
pub enum StoredEvent {
/// A work item transitioned between pipeline stages.
StageTransition {
/// Work item ID (e.g. `"42_story_my_feature"`).
story_id: String,
/// The stage the item moved FROM (display name, e.g. `"Current"`).
from_stage: String,
/// The stage the item moved TO (directory key, e.g. `"3_qa"`).
to_stage: String,
/// Unix timestamp in milliseconds when this event was recorded.
timestamp_ms: u64,
},
/// A merge operation failed for a story.
MergeFailure {
/// Work item ID (e.g. `"42_story_my_feature"`).
story_id: String,
/// Human-readable description of the failure.
reason: String,
/// Unix timestamp in milliseconds when this event was recorded.
timestamp_ms: u64,
},
/// A story was blocked (e.g. retry limit exceeded).
StoryBlocked {
/// Work item ID (e.g. `"42_story_my_feature"`).
story_id: String,
/// Human-readable reason the story was blocked.
reason: String,
/// Unix timestamp in milliseconds when this event was recorded.
timestamp_ms: u64,
},
}
impl StoredEvent {
/// Returns the `timestamp_ms` field common to all event variants.
pub fn timestamp_ms(&self) -> u64 {
match self {
StoredEvent::StageTransition { timestamp_ms, .. } => *timestamp_ms,
StoredEvent::MergeFailure { timestamp_ms, .. } => *timestamp_ms,
StoredEvent::StoryBlocked { timestamp_ms, .. } => *timestamp_ms,
}
}
}
/// Shared, thread-safe ring buffer of recent pipeline events.
///
/// Wrapped in `Arc` so it can be shared between the background subscriber
/// task and the HTTP handler. The inner `Mutex` guards the `VecDeque`.
#[derive(Clone, Debug)]
pub struct EventBuffer(Arc<Mutex<VecDeque<StoredEvent>>>);
impl EventBuffer {
/// Create a new, empty event buffer.
pub fn new() -> Self {
EventBuffer(Arc::new(Mutex::new(VecDeque::new())))
}
/// Append an event to the buffer, evicting the oldest entry if the buffer
/// exceeds [`MAX_BUFFER_SIZE`].
pub fn push(&self, event: StoredEvent) {
let mut buf = self.0.lock().unwrap();
if buf.len() >= MAX_BUFFER_SIZE {
buf.pop_front();
}
buf.push_back(event);
}
/// Return all events whose `timestamp_ms` is strictly greater than `since_ms`.
pub fn events_since(&self, since_ms: u64) -> Vec<StoredEvent> {
let buf = self.0.lock().unwrap();
buf.iter()
.filter(|e| e.timestamp_ms() > since_ms)
.cloned()
.collect()
}
}
impl Default for EventBuffer {
fn default() -> Self {
Self::new()
}
}
/// Returns the current Unix timestamp in milliseconds.
fn now_ms() -> u64 {
std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.map(|d| d.as_millis() as u64)
.unwrap_or(0)
}
/// Spawn a background task that consumes [`WatcherEvent`] broadcasts and
/// stores relevant events in `buffer`.
///
/// Only [`WatcherEvent::WorkItem`] (with a known `from_stage`),
/// [`WatcherEvent::MergeFailure`], and [`WatcherEvent::StoryBlocked`]
/// variants are stored. All other variants are silently ignored.
pub fn subscribe_to_watcher(buffer: EventBuffer, mut rx: broadcast::Receiver<WatcherEvent>) {
tokio::spawn(async move {
loop {
match rx.recv().await {
Ok(WatcherEvent::WorkItem {
stage,
item_id,
from_stage,
..
}) => {
// Only store genuine transitions (from_stage is known).
if let Some(from) = from_stage {
buffer.push(StoredEvent::StageTransition {
story_id: item_id,
from_stage: from,
to_stage: stage,
timestamp_ms: now_ms(),
});
}
}
Ok(WatcherEvent::MergeFailure { story_id, reason }) => {
buffer.push(StoredEvent::MergeFailure {
story_id,
reason,
timestamp_ms: now_ms(),
});
}
Ok(WatcherEvent::StoryBlocked { story_id, reason }) => {
buffer.push(StoredEvent::StoryBlocked {
story_id,
reason,
timestamp_ms: now_ms(),
});
}
Ok(_) => {} // Ignore all other event types.
Err(broadcast::error::RecvError::Lagged(n)) => {
crate::slog!("[events] Subscriber lagged, skipped {n} events");
}
Err(broadcast::error::RecvError::Closed) => {
crate::slog!("[events] Watcher channel closed; stopping event subscriber");
break;
}
}
}
});
}
use serde::Deserialize;
/// Query parameters for `GET /api/events`.
#[derive(Deserialize)]
@@ -181,7 +38,7 @@ pub fn events_handler(
Query(params): Query<EventsQuery>,
Data(buffer): Data<&EventBuffer>,
) -> Response {
let events = buffer.events_since(params.since);
let events = crate::service::events::events_since(buffer, params.since);
let body = serde_json::to_vec(&events).unwrap_or_else(|_| b"[]".to_vec());
Response::builder()
.status(StatusCode::OK)
File diff suppressed because it is too large Load Diff
+10 -11
View File
@@ -1,7 +1,13 @@
//! Health check endpoint — returns a static "ok" response.
//! Health check endpoint — thin HTTP adapter over `service::health`.
//!
//! Domain logic (the `HealthStatus` type and check function) lives in
//! `service::health`; this module is a thin adapter: call service → shape
//! response.
pub use crate::service::health::HealthStatus;
use poem::handler;
use poem_openapi::{Object, OpenApi, Tags, payload::Json};
use serde::Serialize;
use poem_openapi::{OpenApi, Tags, payload::Json};
/// Health check endpoint.
///
@@ -16,11 +22,6 @@ enum HealthTags {
Health,
}
#[derive(Serialize, Object)]
pub struct HealthStatus {
status: String,
}
pub struct HealthApi;
#[OpenApi(tag = "HealthTags::Health")]
@@ -30,9 +31,7 @@ impl HealthApi {
/// Returns a JSON status object to confirm the server is running.
#[oai(path = "/health", method = "get")]
async fn health(&self) -> Json<HealthStatus> {
Json(HealthStatus {
status: "ok".to_string(),
})
Json(crate::service::health::check())
}
}
+24 -25
View File
@@ -1,6 +1,6 @@
//! HTTP I/O endpoints — REST API for file and directory operations.
//! HTTP I/O endpoints — thin adapters over `service::file_io`.
use crate::http::context::{AppContext, OpenApiResult, bad_request};
use crate::io::fs as io_fs;
use crate::service::file_io::{self as svc, FileEntry};
use poem_openapi::{Object, OpenApi, Tags, payload::Json};
use serde::Deserialize;
use std::sync::Arc;
@@ -46,18 +46,18 @@ impl IoApi {
/// Read a file from the currently open project and return its contents.
#[oai(path = "/io/fs/read", method = "post")]
async fn read_file(&self, payload: Json<FilePathPayload>) -> OpenApiResult<Json<String>> {
let content = io_fs::read_file(payload.0.path, &self.ctx.state)
let content = svc::read_file(payload.0.path, &self.ctx.state)
.await
.map_err(bad_request)?;
.map_err(|e| bad_request(e.to_string()))?;
Ok(Json(content))
}
/// Write a file to the currently open project, creating parent directories if needed.
#[oai(path = "/io/fs/write", method = "post")]
async fn write_file(&self, payload: Json<WriteFilePayload>) -> OpenApiResult<Json<bool>> {
io_fs::write_file(payload.0.path, payload.0.content, &self.ctx.state)
svc::write_file(payload.0.path, payload.0.content, &self.ctx.state)
.await
.map_err(bad_request)?;
.map_err(|e| bad_request(e.to_string()))?;
Ok(Json(true))
}
@@ -66,10 +66,10 @@ impl IoApi {
async fn list_directory(
&self,
payload: Json<FilePathPayload>,
) -> OpenApiResult<Json<Vec<io_fs::FileEntry>>> {
let entries = io_fs::list_directory(payload.0.path, &self.ctx.state)
) -> OpenApiResult<Json<Vec<FileEntry>>> {
let entries = svc::list_directory(payload.0.path, &self.ctx.state)
.await
.map_err(bad_request)?;
.map_err(|e| bad_request(e.to_string()))?;
Ok(Json(entries))
}
@@ -78,10 +78,10 @@ impl IoApi {
async fn list_directory_absolute(
&self,
payload: Json<FilePathPayload>,
) -> OpenApiResult<Json<Vec<io_fs::FileEntry>>> {
let entries = io_fs::list_directory_absolute(payload.0.path)
) -> OpenApiResult<Json<Vec<FileEntry>>> {
let entries = svc::list_directory_absolute(payload.0.path)
.await
.map_err(bad_request)?;
.map_err(|e| bad_request(e.to_string()))?;
Ok(Json(entries))
}
@@ -91,25 +91,25 @@ impl IoApi {
&self,
payload: Json<CreateDirectoryPayload>,
) -> OpenApiResult<Json<bool>> {
io_fs::create_directory_absolute(payload.0.path)
svc::create_directory_absolute(payload.0.path)
.await
.map_err(bad_request)?;
.map_err(|e| bad_request(e.to_string()))?;
Ok(Json(true))
}
/// Get the user's home directory.
#[oai(path = "/io/fs/home", method = "get")]
async fn get_home_directory(&self) -> OpenApiResult<Json<String>> {
let home = io_fs::get_home_directory().map_err(bad_request)?;
let home = svc::get_home_directory().map_err(|e| bad_request(e.to_string()))?;
Ok(Json(home))
}
/// List all files in the project recursively, respecting .gitignore.
#[oai(path = "/io/fs/files", method = "get")]
async fn list_project_files(&self) -> OpenApiResult<Json<Vec<String>>> {
let files = io_fs::list_project_files(&self.ctx.state)
let files = svc::list_project_files(&self.ctx.state)
.await
.map_err(bad_request)?;
.map_err(|e| bad_request(e.to_string()))?;
Ok(Json(files))
}
@@ -118,10 +118,10 @@ impl IoApi {
async fn search_files(
&self,
payload: Json<SearchPayload>,
) -> OpenApiResult<Json<Vec<crate::io::search::SearchResult>>> {
let results = crate::io::search::search_files(payload.0.query, &self.ctx.state)
) -> OpenApiResult<Json<Vec<crate::service::file_io::SearchResult>>> {
let results = svc::search_files(payload.0.query, &self.ctx.state)
.await
.map_err(bad_request)?;
.map_err(|e| bad_request(e.to_string()))?;
Ok(Json(results))
}
@@ -130,11 +130,10 @@ impl IoApi {
async fn exec_shell(
&self,
payload: Json<ExecShellPayload>,
) -> OpenApiResult<Json<crate::io::shell::CommandOutput>> {
let output =
crate::io::shell::exec_shell(payload.0.command, payload.0.args, &self.ctx.state)
.await
.map_err(bad_request)?;
) -> OpenApiResult<Json<crate::service::file_io::CommandOutput>> {
let output = svc::exec_shell(payload.0.command, payload.0.args, &self.ctx.state)
.await
.map_err(|e| bad_request(e.to_string()))?;
Ok(Json(output))
}
}
+2 -2
View File
@@ -2,7 +2,7 @@
use crate::agents::PipelineStage;
use crate::config::ProjectConfig;
use crate::http::context::AppContext;
use crate::http::settings::get_editor_command_from_store;
use crate::service::settings::get_editor_command;
use crate::slog_warn;
use crate::worktree;
use serde_json::{Value, json};
@@ -414,7 +414,7 @@ pub(super) fn tool_get_editor_command(args: &Value, ctx: &AppContext) -> Result<
.and_then(|v| v.as_str())
.ok_or("Missing required argument: worktree_path")?;
let editor = get_editor_command_from_store(ctx)
let editor = get_editor_command(&*ctx.store)
.ok_or_else(|| "No editor configured. Set one via PUT /api/settings/editor.".to_string())?;
Ok(format!("{editor} {worktree_path}"))
+5 -88
View File
@@ -1,10 +1,15 @@
//! MCP diagnostic tools — server logs, CRDT dump, and story movement helpers.
//!
//! This file is a thin adapter: it deserialises MCP payloads, delegates to
//! `crate::service::diagnostics` for all business logic, and serialises responses.
use crate::agents::move_story_to_stage;
use crate::http::context::AppContext;
use crate::log_buffer;
use crate::service::diagnostics::{add_permission_rule, generate_permission_rule};
use crate::slog;
use crate::slog_warn;
use serde_json::{Value, json};
#[allow(unused_imports)]
use std::fs;
pub(super) fn tool_get_server_logs(args: &Value) -> Result<String, String> {
@@ -44,94 +49,6 @@ pub(super) async fn tool_rebuild_and_restart(ctx: &AppContext) -> Result<String,
crate::rebuild::rebuild_and_restart(&ctx.agents, &project_root, notifier).await
}
/// Generate a Claude Code permission rule string for the given tool name and input.
///
/// - `Edit` / `Write` / `Read` / `Grep` / `Glob` etc. → just the tool name
/// - `Bash` → `Bash(first_word *)` derived from the `command` field in `tool_input`
/// - `mcp__*` → the full tool name (e.g. `mcp__huskies__create_story`)
fn generate_permission_rule(tool_name: &str, tool_input: &Value) -> String {
if tool_name == "Bash" {
// Extract command from tool_input.command and use first word as prefix
let command_str = tool_input
.get("command")
.and_then(|v| v.as_str())
.unwrap_or("");
let first_word = command_str.split_whitespace().next().unwrap_or("unknown");
format!("Bash({first_word} *)")
} else {
// For Edit, Write, Read, Glob, Grep, MCP tools, etc. — use the tool name directly
tool_name.to_string()
}
}
/// Add a permission rule to `.claude/settings.json` in the project root.
/// Does nothing if the rule already exists. Creates the file if missing.
pub(super) fn add_permission_rule(
project_root: &std::path::Path,
rule: &str,
) -> Result<(), String> {
let claude_dir = project_root.join(".claude");
fs::create_dir_all(&claude_dir)
.map_err(|e| format!("Failed to create .claude/ directory: {e}"))?;
let settings_path = claude_dir.join("settings.json");
let mut settings: Value = if settings_path.exists() {
let content = fs::read_to_string(&settings_path)
.map_err(|e| format!("Failed to read settings.json: {e}"))?;
serde_json::from_str(&content).map_err(|e| format!("Failed to parse settings.json: {e}"))?
} else {
json!({ "permissions": { "allow": [] } })
};
let allow_arr = settings
.pointer_mut("/permissions/allow")
.and_then(|v| v.as_array_mut());
let allow = match allow_arr {
Some(arr) => arr,
None => {
// Ensure the structure exists
settings
.as_object_mut()
.unwrap()
.entry("permissions")
.or_insert(json!({ "allow": [] }));
settings
.pointer_mut("/permissions/allow")
.unwrap()
.as_array_mut()
.unwrap()
}
};
// Check for duplicates — exact string match
let rule_value = Value::String(rule.to_string());
if allow.contains(&rule_value) {
return Ok(());
}
// Also check for wildcard coverage: if "mcp__huskies__*" exists, don't add
// a more specific "mcp__huskies__create_story".
let dominated = allow.iter().any(|existing| {
if let Some(pat) = existing.as_str()
&& let Some(prefix) = pat.strip_suffix('*')
{
return rule.starts_with(prefix);
}
false
});
if dominated {
return Ok(());
}
allow.push(rule_value);
let pretty =
serde_json::to_string_pretty(&settings).map_err(|e| format!("Failed to serialize: {e}"))?;
fs::write(&settings_path, pretty).map_err(|e| format!("Failed to write settings.json: {e}"))?;
Ok(())
}
/// MCP tool called by Claude Code via `--permission-prompt-tool`.
///
/// Forwards the permission request through the shared channel to the active
+16 -71
View File
@@ -1,68 +1,34 @@
//! MCP git tools — status, diff, add, commit, and log operations on agent worktrees.
//!
//! This file is a thin adapter: it deserialises MCP payloads, delegates to
//! `crate::service::git_ops` for all business logic, and serialises responses.
use crate::http::context::AppContext;
use serde_json::{Value, json};
use std::path::PathBuf;
/// Validates that `worktree_path` exists and is inside the project's
/// `.huskies/worktrees/` directory. Returns the canonicalized path.
///
/// Thin wrapper that obtains the project root from `ctx` and delegates to
/// `service::git_ops::io::validate_worktree_path`.
fn validate_worktree_path(worktree_path: &str, ctx: &AppContext) -> Result<PathBuf, String> {
let wd = PathBuf::from(worktree_path);
if !wd.is_absolute() {
return Err("worktree_path must be an absolute path".to_string());
}
if !wd.exists() {
return Err(format!("worktree_path does not exist: {worktree_path}"));
}
let project_root = ctx.agents.get_project_root(&ctx.state)?;
let worktrees_root = project_root.join(".huskies").join("worktrees");
let canonical_wd = wd
.canonicalize()
.map_err(|e| format!("Cannot canonicalize worktree_path: {e}"))?;
let canonical_wt = if worktrees_root.exists() {
worktrees_root
.canonicalize()
.map_err(|e| format!("Cannot canonicalize worktrees root: {e}"))?
} else {
return Err("No worktrees directory found in project".to_string());
};
if !canonical_wd.starts_with(&canonical_wt) {
return Err(format!(
"worktree_path must be inside .huskies/worktrees/. Got: {worktree_path}"
));
}
Ok(canonical_wd)
crate::service::git_ops::io::validate_worktree_path(worktree_path, &project_root)
.map_err(|e| e.to_string())
}
/// Run a git command in the given directory and return its output.
async fn run_git(args: Vec<&'static str>, dir: PathBuf) -> Result<std::process::Output, String> {
tokio::task::spawn_blocking(move || {
std::process::Command::new("git")
.args(&args)
.current_dir(&dir)
.output()
})
.await
.map_err(|e| format!("Task join error: {e}"))?
.map_err(|e| format!("Failed to run git: {e}"))
crate::service::git_ops::io::run_git(args, dir)
.await
.map_err(|e| e.to_string())
}
/// Run a git command with owned args in the given directory.
async fn run_git_owned(args: Vec<String>, dir: PathBuf) -> Result<std::process::Output, String> {
tokio::task::spawn_blocking(move || {
std::process::Command::new("git")
.args(&args)
.current_dir(&dir)
.output()
})
.await
.map_err(|e| format!("Task join error: {e}"))?
.map_err(|e| format!("Failed to run git: {e}"))
crate::service::git_ops::io::run_git_owned(args, dir)
.await
.map_err(|e| e.to_string())
}
/// git_status — returns working tree status (staged, unstaged, untracked files).
@@ -86,29 +52,8 @@ pub(super) async fn tool_git_status(args: &Value, ctx: &AppContext) -> Result<St
));
}
let mut staged: Vec<String> = Vec::new();
let mut unstaged: Vec<String> = Vec::new();
let mut untracked: Vec<String> = Vec::new();
for line in stdout.lines() {
if line.len() < 3 {
continue;
}
let x = line.chars().next().unwrap_or(' ');
let y = line.chars().nth(1).unwrap_or(' ');
let path = line[3..].to_string();
match (x, y) {
('?', '?') => untracked.push(path),
(' ', _) => unstaged.push(path),
(_, ' ') => staged.push(path),
_ => {
// Both staged and unstaged modifications
staged.push(path.clone());
unstaged.push(path);
}
}
}
let (staged, unstaged, untracked) =
crate::service::git_ops::parse_git_status_porcelain(&stdout);
serde_json::to_string_pretty(&json!({
"staged": staged,
+7 -80
View File
@@ -1,8 +1,12 @@
//! MCP QA tools — request, approve, and reject QA reviews for stories.
//!
//! This file is a thin adapter: it deserialises MCP payloads, delegates to
//! `crate::service::qa` for all business logic, and serialises responses.
use crate::agents::{
move_story_to_done, move_story_to_merge, move_story_to_qa, reject_story_from_qa,
};
use crate::http::context::AppContext;
use crate::service::qa::{find_free_port, is_spike, merge_spike_branch_to_master};
use crate::slog;
use crate::slog_warn;
use serde_json::{Value, json};
@@ -57,8 +61,7 @@ pub(super) async fn tool_approve_qa(args: &Value, ctx: &AppContext) -> Result<St
let _ = crate::io::story_metadata::clear_front_matter_field(&qa_path, "review_hold");
}
let item_type = crate::agents::lifecycle::item_type_from_id(story_id);
if item_type == "spike" {
if is_spike(story_id) {
// Spikes skip the merge stage entirely: merge the feature branch to master
// directly (fast-forward or simple merge), then move straight to done.
let branch = format!("feature/story-{story_id}");
@@ -68,7 +71,8 @@ pub(super) async fn tool_approve_qa(args: &Value, ctx: &AppContext) -> Result<St
let merge_ok =
tokio::task::spawn_blocking(move || merge_spike_branch_to_master(&root, &br, &sid))
.await
.map_err(|e| format!("Merge task panicked: {e}"))??;
.map_err(|e| format!("Merge task panicked: {e}"))?
.map_err(|e| e.to_string())?;
move_story_to_done(&project_root, story_id)?;
@@ -115,73 +119,6 @@ pub(super) async fn tool_approve_qa(args: &Value, ctx: &AppContext) -> Result<St
}
}
/// Merge a spike's feature branch into master using a fast-forward or simple merge.
///
/// Unlike the squash-merge pipeline used for stories, spikes skip quality gates
/// and preserve their commit history. Returns `true` if a merge was performed,
/// `false` if the branch had no unmerged commits.
fn merge_spike_branch_to_master(
project_root: &std::path::Path,
branch: &str,
story_id: &str,
) -> Result<bool, String> {
use std::process::Command;
// Check the branch exists and has unmerged changes.
if !crate::agents::lifecycle::feature_branch_has_unmerged_changes(project_root, story_id) {
slog!("[qa] Spike '{story_id}': feature branch has no unmerged changes, skipping merge.");
return Ok(false);
}
// Ensure we are on master.
let checkout = Command::new("git")
.args(["checkout", "master"])
.current_dir(project_root)
.output()
.map_err(|e| format!("git checkout master failed: {e}"))?;
if !checkout.status.success() {
return Err(format!(
"Failed to checkout master: {}",
String::from_utf8_lossy(&checkout.stderr)
));
}
// Try fast-forward first, then fall back to a regular merge.
let ff = Command::new("git")
.args(["merge", "--ff-only", branch])
.current_dir(project_root)
.output()
.map_err(|e| format!("git merge --ff-only failed: {e}"))?;
if ff.status.success() {
slog!("[qa] Spike '{story_id}': fast-forward merged '{branch}' into master.");
return Ok(true);
}
// Fast-forward failed (diverged history) — fall back to a regular merge.
let merge = Command::new("git")
.args([
"merge",
"--no-ff",
branch,
"-m",
&format!("Merge spike branch '{branch}' into master"),
])
.current_dir(project_root)
.output()
.map_err(|e| format!("git merge failed: {e}"))?;
if merge.status.success() {
slog!("[qa] Spike '{story_id}': merged '{branch}' into master (no-ff).");
Ok(true)
} else {
Err(format!(
"Failed to merge spike branch '{branch}' into master: {}",
String::from_utf8_lossy(&merge.stderr)
))
}
}
pub(super) async fn tool_reject_qa(args: &Value, ctx: &AppContext) -> Result<String, String> {
let story_id = args
.get("story_id")
@@ -294,16 +231,6 @@ pub(super) async fn tool_launch_qa_app(args: &Value, ctx: &AppContext) -> Result
.map_err(|e| format!("Serialization error: {e}"))
}
/// Find a free TCP port starting from `start`.
pub(super) fn find_free_port(start: u16) -> u16 {
for port in start..start + 100 {
if std::net::TcpListener::bind(("127.0.0.1", port)).is_ok() {
return port;
}
}
start // fallback
}
#[cfg(test)]
mod tests {
use super::*;
+10 -127
View File
@@ -1,5 +1,10 @@
//! MCP shell tools — run commands, execute tests, and stream output via MCP.
//!
//! This file is a thin adapter: it deserialises MCP payloads, delegates to
//! `crate::service::shell` for all business logic, and serialises responses.
use crate::http::context::AppContext;
#[allow(unused_imports)]
use crate::service::shell::{extract_count, is_dangerous, parse_test_counts, truncate_output};
use bytes::Bytes;
use futures::StreamExt;
use poem::{Body, Response};
@@ -11,92 +16,15 @@ const MAX_TIMEOUT_SECS: u64 = 600;
const TEST_TIMEOUT_SECS: u64 = 1200;
const MAX_OUTPUT_LINES: usize = 100;
/// Patterns that are unconditionally blocked regardless of context.
static BLOCKED_PATTERNS: &[&str] = &[
"rm -rf /",
"rm -fr /",
"rm -rf /*",
"rm -fr /*",
"rm --no-preserve-root",
":(){ :|:& };:",
"> /dev/sda",
"dd if=/dev",
];
/// Binaries that are unconditionally blocked.
static BLOCKED_BINARIES: &[&str] = &[
"sudo", "su", "shutdown", "reboot", "halt", "poweroff", "mkfs",
];
/// Returns an error message if the command matches a blocked pattern or binary.
fn is_dangerous(command: &str) -> Option<String> {
let trimmed = command.trim();
// Check each blocked pattern (substring match)
for &pattern in BLOCKED_PATTERNS {
if trimmed.contains(pattern) {
return Some(format!(
"Command blocked: dangerous pattern '{pattern}' detected"
));
}
}
// Check first token of the command against blocked binaries
if let Some(first_token) = trimmed.split_whitespace().next() {
let binary = std::path::Path::new(first_token)
.file_name()
.and_then(|n| n.to_str())
.unwrap_or(first_token);
if BLOCKED_BINARIES.contains(&binary) {
return Some(format!("Command blocked: '{binary}' is not permitted"));
}
}
None
}
/// Validates that `working_dir` exists and is inside the project's
/// `.huskies/worktrees/` directory. Returns the canonicalized path.
///
/// Thin wrapper that obtains the project root from `ctx` and delegates to
/// `service::shell::io::validate_working_dir`.
fn validate_working_dir(working_dir: &str, ctx: &AppContext) -> Result<PathBuf, String> {
let wd = PathBuf::from(working_dir);
if !wd.is_absolute() {
return Err("working_dir must be an absolute path".to_string());
}
if !wd.exists() {
return Err(format!("working_dir does not exist: {working_dir}"));
}
let project_root = ctx.agents.get_project_root(&ctx.state)?;
let worktrees_root = project_root.join(".huskies").join("worktrees");
let canonical_wd = wd
.canonicalize()
.map_err(|e| format!("Cannot canonicalize working_dir: {e}"))?;
// If worktrees_root doesn't exist yet, we can't allow anything
let canonical_wt = if worktrees_root.exists() {
worktrees_root
.canonicalize()
.map_err(|e| format!("Cannot canonicalize worktrees root: {e}"))?
} else {
return Err("No worktrees directory found in project".to_string());
};
// Also allow the merge workspace so mergemaster can fix conflicts.
let merge_workspace = project_root.join(".huskies").join("merge_workspace");
let canonical_mw = merge_workspace.canonicalize().unwrap_or_default();
let in_worktrees = canonical_wd.starts_with(&canonical_wt);
let in_merge_ws =
!canonical_mw.as_os_str().is_empty() && canonical_wd.starts_with(&canonical_mw);
if !in_worktrees && !in_merge_ws {
return Err(format!(
"working_dir must be inside .huskies/worktrees/ or .huskies/merge_workspace/. Got: {working_dir}"
));
}
Ok(canonical_wd)
crate::service::shell::io::validate_working_dir(working_dir, &project_root)
.map_err(|e| e.to_string())
}
/// Regular (non-SSE) run_command: runs the bash command to completion and
@@ -328,51 +256,6 @@ pub(super) fn handle_run_command_sse(
.body(Body::from_bytes_stream(stream.map(|r| r.map(Bytes::from))))
}
/// Truncate output to at most `max_lines` lines, keeping the tail.
fn truncate_output(output: &str, max_lines: usize) -> String {
let lines: Vec<&str> = output.lines().collect();
if lines.len() <= max_lines {
return output.to_string();
}
let omitted = lines.len() - max_lines;
let tail = lines[lines.len() - max_lines..].join("\n");
format!("[... {omitted} lines omitted ...]\n{tail}")
}
/// Parse cumulative passed/failed counts from `cargo test` output lines like:
/// `"test result: ok. 5 passed; 0 failed; ..."`
fn parse_test_counts(output: &str) -> (u64, u64) {
let mut total_passed = 0u64;
let mut total_failed = 0u64;
for line in output.lines() {
if line.contains("test result:") {
if let Some(p) = extract_count(line, "passed") {
total_passed += p;
}
if let Some(f) = extract_count(line, "failed") {
total_failed += f;
}
}
}
(total_passed, total_failed)
}
/// Extract a count immediately before `label` in `line` (e.g. `"5 passed"` → 5).
fn extract_count(line: &str, label: &str) -> Option<u64> {
let pos = line.find(label)?;
let before = line[..pos].trim_end();
let num_str: String = before
.chars()
.rev()
.take_while(|c| c.is_ascii_digit())
.collect();
if num_str.is_empty() {
return None;
}
let num_str: String = num_str.chars().rev().collect();
num_str.parse().ok()
}
/// Run the project's test suite (`script/test`) and block until complete.
///
/// Spawns the test process, then polls every second server-side until the
+6 -40
View File
@@ -1,4 +1,8 @@
//! MCP story tools — create, update, move, and manage stories, bugs, and refactors via MCP.
//!
//! This file is a thin adapter: it deserialises MCP payloads, delegates to
//! `crate::service::story` and `crate::http::workflow` for business logic,
//! and serialises responses.
use crate::agents::{
close_bug_to_archive, feature_branch_has_unmerged_changes, move_story_to_done,
};
@@ -12,7 +16,9 @@ use crate::http::workflow::{
use crate::io::story_metadata::{
check_archived_deps, check_archived_deps_from_list, parse_front_matter, parse_unchecked_todos,
};
use crate::service::story::parse_test_cases;
use crate::slog_warn;
#[allow(unused_imports)]
use crate::workflow::{TestCaseResult, TestStatus, evaluate_acceptance_with_coverage};
use serde_json::{Value, json};
use std::collections::HashMap;
@@ -702,46 +708,6 @@ pub(super) fn tool_list_refactors(ctx: &AppContext) -> Result<String, String> {
.map_err(|e| format!("Serialization error: {e}"))
}
pub(super) fn parse_test_cases(value: Option<&Value>) -> Result<Vec<TestCaseResult>, String> {
let arr = match value {
Some(Value::Array(a)) => a,
Some(Value::Null) | None => return Ok(Vec::new()),
_ => return Err("Expected array for test cases".to_string()),
};
arr.iter()
.map(|item| {
let name = item
.get("name")
.and_then(|v| v.as_str())
.ok_or("Test case missing 'name'")?
.to_string();
let status_str = item
.get("status")
.and_then(|v| v.as_str())
.ok_or("Test case missing 'status'")?;
let status = match status_str {
"pass" => TestStatus::Pass,
"fail" => TestStatus::Fail,
other => {
return Err(format!(
"Invalid test status '{other}'. Use 'pass' or 'fail'."
));
}
};
let details = item
.get("details")
.and_then(|v| v.as_str())
.map(String::from);
Ok(TestCaseResult {
name,
status,
details,
})
})
.collect()
}
#[cfg(test)]
mod tests {
use super::*;
+39 -414
View File
@@ -12,106 +12,60 @@
//! 5. `wizard_retry` — discard staged content and regenerate from scratch
use crate::http::context::AppContext;
use crate::io::wizard::{StepStatus, WizardState, WizardStep, format_wizard_state};
use crate::io::wizard::WizardStep;
use crate::service::wizard::state_machine;
use crate::service::wizard::{self as svc};
use serde_json::Value;
use std::fs;
use std::path::Path;
// ── helpers ───────────────────────────────────────────────────────────────────
// ── Thin adapters (kept for callers in chat/commands/setup.rs) ────────────────
/// Return the filesystem path (relative to `project_root`) for a step's output.
/// Return the filesystem path for a step's output file.
///
/// Returns `None` for `Scaffold` since that step has no single output file — it
/// creates the full `.huskies/` directory structure and is handled by
/// `huskies init` before the server starts.
/// Pure path concatenation — delegates to `service::wizard::state_machine`.
pub(crate) fn step_output_path(
project_root: &Path,
step: WizardStep,
) -> Option<std::path::PathBuf> {
match step {
WizardStep::Context => Some(
project_root
.join(".huskies")
.join("specs")
.join("00_CONTEXT.md"),
),
WizardStep::Stack => Some(
project_root
.join(".huskies")
.join("specs")
.join("tech")
.join("STACK.md"),
),
WizardStep::TestScript => Some(project_root.join("script").join("test")),
WizardStep::BuildScript => Some(project_root.join("script").join("build")),
WizardStep::LintScript => Some(project_root.join("script").join("lint")),
WizardStep::ReleaseScript => Some(project_root.join("script").join("release")),
WizardStep::TestCoverage => Some(project_root.join("script").join("test_coverage")),
WizardStep::Scaffold => None,
}
state_machine::step_output_path(project_root, step)
}
/// Return true when `step` produces an executable script file.
pub(crate) fn is_script_step(step: WizardStep) -> bool {
matches!(
step,
WizardStep::TestScript
| WizardStep::BuildScript
| WizardStep::LintScript
| WizardStep::ReleaseScript
| WizardStep::TestCoverage
)
state_machine::is_script_step(step)
}
/// Write `content` to `path`, skipping if the file already exists with real
/// (non-template) content.
/// Write `content` to `path`, skipping if the file already has real content.
///
/// Scaffold template files (those containing [`TEMPLATE_SENTINEL`]) are treated
/// as placeholders and will be overwritten with the wizard-generated content.
/// Files with real user content are never overwritten. For script steps the
/// file is also made executable after writing.
/// Delegates to `service::wizard::write_step_file`.
pub(crate) fn write_if_missing(
path: &Path,
content: &str,
executable: bool,
) -> Result<bool, String> {
use crate::io::onboarding::TEMPLATE_SENTINEL;
if path.exists() {
// Overwrite scaffold template placeholders; preserve real user content.
let is_template = std::fs::read_to_string(path)
.map(|s| s.contains(TEMPLATE_SENTINEL))
.unwrap_or(false);
if !is_template {
return Ok(false); // real content already present — skip
}
}
if let Some(parent) = path.parent() {
fs::create_dir_all(parent)
.map_err(|e| format!("Failed to create directory {}: {e}", parent.display()))?;
}
fs::write(path, content).map_err(|e| format!("Failed to write {}: {e}", path.display()))?;
if executable {
#[cfg(unix)]
{
use std::os::unix::fs::PermissionsExt;
let mut perms = fs::metadata(path)
.map_err(|e| format!("Failed to read permissions: {e}"))?
.permissions();
perms.set_mode(0o755);
fs::set_permissions(path, perms)
.map_err(|e| format!("Failed to set permissions: {e}"))?;
}
}
Ok(true)
svc::write_step_file(path, content, executable).map_err(|e| e.to_string())
}
/// Serialise a `WizardStep` to its snake_case string (e.g. `"test_script"`).
fn step_slug(step: WizardStep) -> String {
serde_json::to_value(step)
.ok()
.and_then(|v| v.as_str().map(String::from))
.unwrap_or_default()
/// Return true when the project directory has no meaningful source files.
///
/// Delegates to `service::wizard::state_machine::is_bare_project` after
/// reading directory entries via `service::wizard::io`.
#[cfg(test)]
fn is_bare_project(project_root: &Path) -> bool {
use crate::service::wizard::io as wio;
let names = wio::list_dir_names(project_root);
state_machine::is_bare_project(&names)
}
/// Return a generation hint for `step` based on the project at `project_root`.
///
/// Reads filesystem state then delegates pure logic to `state_machine`.
pub(crate) fn generation_hint(step: WizardStep, project_root: &Path) -> String {
use crate::service::wizard::io as wio;
let names = wio::list_dir_names(project_root);
let tools = wio::detect_project_tools(project_root);
let is_bare = state_machine::is_bare_project(&names);
state_machine::generation_hint(step, is_bare, &tools)
}
// ── MCP tool handlers ─────────────────────────────────────────────────────────
@@ -119,9 +73,7 @@ fn step_slug(step: WizardStep) -> String {
/// `wizard_status` — return current wizard state as a human-readable summary.
pub(super) fn tool_wizard_status(ctx: &AppContext) -> Result<String, String> {
let root = ctx.state.get_project_root()?;
let state =
WizardState::load(&root).ok_or("No wizard active. Run `huskies init` to begin setup.")?;
Ok(format_wizard_state(&state))
svc::status(&root).map_err(|e| e.to_string())
}
/// `wizard_generate` — mark the current step as generating or stage content.
@@ -133,245 +85,8 @@ pub(super) fn tool_wizard_status(ctx: &AppContext) -> Result<String, String> {
/// until `wizard_confirm` is called.
pub(super) fn tool_wizard_generate(args: &Value, ctx: &AppContext) -> Result<String, String> {
let root = ctx.state.get_project_root()?;
let mut state = WizardState::load(&root).ok_or("No wizard active.")?;
if state.completed {
return Ok("Wizard is already complete.".to_string());
}
let current_idx = state.current_step_index();
let step = state.steps[current_idx].step;
// If content is provided, stage it for confirmation.
if let Some(content) = args.get("content").and_then(|v| v.as_str()) {
state.set_step_status(
step,
StepStatus::AwaitingConfirmation,
Some(content.to_string()),
);
state
.save(&root)
.map_err(|e| format!("Failed to save wizard state: {e}"))?;
return Ok(format!(
"Content staged for '{}'. Run `wizard_confirm` to write it to disk, `wizard_retry` to regenerate, or `wizard_skip` to skip.",
step.label()
));
}
// No content provided — mark as generating and return a hint.
state.set_step_status(step, StepStatus::Generating, None);
state
.save(&root)
.map_err(|e| format!("Failed to save wizard state: {e}"))?;
let hint = generation_hint(step, &root);
let slug = step_slug(step);
Ok(format!(
"Step '{}' marked as generating.\n\n{hint}\n\nOnce you have the content, call `wizard_generate` again with a `content` argument (or PUT /wizard/step/{slug}/content). Then call `wizard_confirm` to write it to disk.",
step.label(),
))
}
/// Return true if the project directory has no meaningful source files.
pub(crate) fn is_bare_project(project_root: &Path) -> bool {
std::fs::read_dir(project_root)
.ok()
.map(|entries| {
let names: Vec<String> = entries
.filter_map(|e| e.ok())
.map(|e| e.file_name().to_string_lossy().to_string())
.collect();
// A bare project only has huskies scaffolding and no real code
names.iter().all(|n| {
n.starts_with('.')
|| n == "CLAUDE.md"
|| n == "LICENSE"
|| n == "README.md"
|| n == "script"
})
})
.unwrap_or(true)
}
/// Return a generation hint for a step based on the project root.
pub(crate) fn generation_hint(step: WizardStep, project_root: &Path) -> String {
let bare = is_bare_project(project_root);
match step {
WizardStep::Context => {
if bare {
"This is a bare project with no existing code. Ask the user what they want \
to build the project's purpose, goals, target users, and key features. \
Then generate `.huskies/specs/00_CONTEXT.md` from their answers covering:\n\
- High-level goal of the project\n\
- Core features\n\
- Domain concepts and entities\n\
- Glossary of abbreviations and technical terms"
.to_string()
} else {
"Read the project source tree and generate a `.huskies/specs/00_CONTEXT.md` describing:\n\
- High-level goal of the project\n\
- Core features\n\
- Domain concepts and entities\n\
- Glossary of abbreviations and technical terms".to_string()
}
}
WizardStep::Stack => {
if bare {
"This is a bare project with no existing code. Ask the user what language, \
frameworks, and tools they plan to use. Then generate `.huskies/specs/tech/STACK.md` \
from their answers covering:\n\
- Language, frameworks, and runtimes\n\
- Coding standards and linting rules\n\
- Quality gates (commands that must pass before merging)\n\
- Approved libraries and their purpose".to_string()
} else {
"Read the project source tree and generate a `.huskies/specs/tech/STACK.md` describing:\n\
- Language, frameworks, and runtimes\n\
- Coding standards and linting rules\n\
- Quality gates (commands that must pass before merging)\n\
- Approved libraries and their purpose".to_string()
}
}
WizardStep::TestScript => {
if bare {
"This is a bare project with no existing code. Read the STACK.md generated \
in the previous step (or ask the user about their stack if it was skipped) \
and generate a `script/test` shell script (#!/usr/bin/env bash, set -euo pipefail) \
with appropriate test commands for their chosen language and framework."
.to_string()
} else {
let has_cargo = project_root.join("Cargo.toml").exists();
let has_pkg = project_root.join("package.json").exists();
let has_pnpm = project_root.join("pnpm-lock.yaml").exists();
let mut cmds = Vec::new();
if has_cargo {
cmds.push("cargo nextest run");
}
if has_pkg {
cmds.push(if has_pnpm { "pnpm test" } else { "npm test" });
}
if cmds.is_empty() {
"Generate a `script/test` shell script (#!/usr/bin/env bash, set -euo pipefail) that runs the project's test suite.".to_string()
} else {
format!(
"Generate a `script/test` shell script (#!/usr/bin/env bash, set -euo pipefail) that runs: {}",
cmds.join(", ")
)
}
}
}
WizardStep::BuildScript => {
if bare {
"This is a bare project with no existing code. Read the STACK.md generated \
in the previous step (or ask the user about their stack if it was skipped) \
and generate a `script/build` shell script (#!/usr/bin/env bash, set -euo pipefail) \
with appropriate build commands for their chosen language and framework."
.to_string()
} else {
let has_cargo = project_root.join("Cargo.toml").exists();
let has_pkg = project_root.join("package.json").exists();
let has_pnpm = project_root.join("pnpm-lock.yaml").exists();
let has_frontend_subdir =
project_root.join("frontend").join("package.json").exists()
|| project_root.join("client").join("package.json").exists();
let has_go = project_root.join("go.mod").exists();
let mut cmds = Vec::new();
if has_cargo {
cmds.push("cargo build --release");
}
if has_pkg {
cmds.push(if has_pnpm {
"pnpm run build"
} else {
"npm run build"
});
}
if has_frontend_subdir {
cmds.push("(cd frontend && npm run build)");
}
if has_go {
cmds.push("go build ./...");
}
if cmds.is_empty() {
"Generate a `script/build` shell script (#!/usr/bin/env bash, set -euo pipefail) that builds the project.".to_string()
} else {
format!(
"Generate a `script/build` shell script (#!/usr/bin/env bash, set -euo pipefail) that runs: {}",
cmds.join(", ")
)
}
}
}
WizardStep::LintScript => {
if bare {
"This is a bare project with no existing code. Read the STACK.md generated \
in the previous step (or ask the user about their stack if it was skipped) \
and generate a `script/lint` shell script (#!/usr/bin/env bash, set -euo pipefail) \
with appropriate lint commands for their chosen language and framework."
.to_string()
} else {
let has_cargo = project_root.join("Cargo.toml").exists();
let has_pkg = project_root.join("package.json").exists();
let has_pnpm = project_root.join("pnpm-lock.yaml").exists();
let has_python = project_root.join("pyproject.toml").exists()
|| project_root.join("requirements.txt").exists();
let has_go = project_root.join("go.mod").exists();
let mut cmds = Vec::new();
if has_cargo {
cmds.push("cargo fmt --all --check");
cmds.push("cargo clippy -- -D warnings");
}
if has_pkg {
cmds.push(if has_pnpm {
"pnpm run lint"
} else {
"npm run lint"
});
}
if has_python {
cmds.push("flake8 . (or ruff check . if ruff is configured)");
}
if has_go {
cmds.push("go vet ./...");
}
if cmds.is_empty() {
"Generate a `script/lint` shell script (#!/usr/bin/env bash, set -euo pipefail) that runs the project's linters.".to_string()
} else {
format!(
"Generate a `script/lint` shell script (#!/usr/bin/env bash, set -euo pipefail) that runs: {}",
cmds.join(", ")
)
}
}
}
WizardStep::ReleaseScript => {
if bare {
"This is a bare project with no existing code. Read the STACK.md generated \
in the previous step (or ask the user about their stack if it was skipped) \
and generate a `script/release` shell script (#!/usr/bin/env bash, set -euo pipefail) \
with appropriate build/release commands for their chosen language and framework."
.to_string()
} else {
"Generate a `script/release` shell script (#!/usr/bin/env bash, set -euo pipefail) that builds and releases the project (e.g. `cargo build --release` or `npm run build`).".to_string()
}
}
WizardStep::TestCoverage => {
if bare {
"This is a bare project with no existing code. Read the STACK.md generated \
in the previous step (or ask the user about their stack if it was skipped) \
and generate a `script/test_coverage` shell script (#!/usr/bin/env bash, set -euo pipefail) \
with appropriate test coverage commands for their chosen language and framework."
.to_string()
} else {
"Generate a `script/test_coverage` shell script (#!/usr/bin/env bash, set -euo pipefail) that generates a test coverage report (e.g. `cargo llvm-cov nextest` or `npm run coverage`).".to_string()
}
}
WizardStep::Scaffold => {
"Scaffold step is handled automatically by `huskies init`.".to_string()
}
}
let content = args.get("content").and_then(|v| v.as_str());
svc::generate(&root, content).map_err(|e| e.to_string())
}
/// `wizard_confirm` — confirm the current step and write its content to disk.
@@ -382,111 +97,20 @@ pub(crate) fn generation_hint(step: WizardStep, project_root: &Path) -> String {
/// advances to the next pending step.
pub(super) fn tool_wizard_confirm(ctx: &AppContext) -> Result<String, String> {
let root = ctx.state.get_project_root()?;
let mut state = WizardState::load(&root).ok_or("No wizard active.")?;
if state.completed {
return Ok("Wizard is already complete.".to_string());
}
let current_idx = state.current_step_index();
let step = state.steps[current_idx].step;
let content = state.steps[current_idx].content.clone();
// Write content to disk (only if a file path exists and the file is absent).
let write_msg = if let (Some(c), Some(ref path)) = (&content, step_output_path(&root, step)) {
let executable = is_script_step(step);
match write_if_missing(path, c, executable)? {
true => format!(" File written: `{}`.", path.display()),
false => format!(" File `{}` already exists — skipped.", path.display()),
}
} else {
String::new()
};
state
.confirm_step(step)
.map_err(|e| format!("Cannot confirm step: {e}"))?;
state
.save(&root)
.map_err(|e| format!("Failed to save wizard state: {e}"))?;
let next_idx = state.current_step_index();
if state.completed {
Ok(format!(
"Step '{}' confirmed.{write_msg}\n\nSetup wizard complete! All steps done.",
step.label()
))
} else {
let next = &state.steps[next_idx];
Ok(format!(
"Step '{}' confirmed.{write_msg}\n\nNext: {} — run `wizard_generate` to begin.",
step.label(),
next.step.label()
))
}
svc::confirm(&root).map_err(|e| e.to_string())
}
/// `wizard_skip` — skip the current step without writing any file.
pub(super) fn tool_wizard_skip(ctx: &AppContext) -> Result<String, String> {
let root = ctx.state.get_project_root()?;
let mut state = WizardState::load(&root).ok_or("No wizard active.")?;
if state.completed {
return Ok("Wizard is already complete.".to_string());
}
let current_idx = state.current_step_index();
let step = state.steps[current_idx].step;
state
.skip_step(step)
.map_err(|e| format!("Cannot skip step: {e}"))?;
state
.save(&root)
.map_err(|e| format!("Failed to save wizard state: {e}"))?;
let next_idx = state.current_step_index();
if state.completed {
Ok(format!(
"Step '{}' skipped. Setup wizard complete!",
step.label()
))
} else {
let next = &state.steps[next_idx];
Ok(format!(
"Step '{}' skipped.\n\nNext: {} — run `wizard_generate` to begin.",
step.label(),
next.step.label()
))
}
svc::skip(&root).map_err(|e| e.to_string())
}
/// `wizard_retry` — discard staged content and reset the current step to
/// `Pending` so it can be regenerated.
pub(super) fn tool_wizard_retry(ctx: &AppContext) -> Result<String, String> {
let root = ctx.state.get_project_root()?;
let mut state = WizardState::load(&root).ok_or("No wizard active.")?;
if state.completed {
return Ok("Wizard is already complete.".to_string());
}
let current_idx = state.current_step_index();
let step = state.steps[current_idx].step;
// Clear content and reset to pending.
if let Some(s) = state.steps.iter_mut().find(|s| s.step == step) {
s.status = StepStatus::Pending;
s.content = None;
}
state
.save(&root)
.map_err(|e| format!("Failed to save wizard state: {e}"))?;
Ok(format!(
"Step '{}' reset to pending. Run `wizard_generate` to regenerate content.",
step.label()
))
svc::retry(&root).map_err(|e| e.to_string())
}
// ── tests ─────────────────────────────────────────────────────────────────────
@@ -495,6 +119,7 @@ pub(super) fn tool_wizard_retry(ctx: &AppContext) -> Result<String, String> {
mod tests {
use super::*;
use crate::http::context::AppContext;
use crate::io::wizard::{StepStatus, WizardState, format_wizard_state};
use tempfile::TempDir;
fn setup(dir: &TempDir) -> AppContext {
+1
View File
@@ -18,6 +18,7 @@ pub mod settings;
pub(crate) mod test_helpers;
pub mod workflow;
pub mod gateway;
pub mod project;
pub mod wizard;
pub mod ws;
+77 -283
View File
@@ -1,102 +1,23 @@
//! OAuth endpoints — Anthropic OAuth callback and token exchange flow.
use crate::llm::oauth;
//! OAuth endpoints — thin HTTP adapters over `service::oauth`.
//!
//! Business logic lives in `service::oauth`. These handlers only:
//! 1. Extract parameters from the HTTP request.
//! 2. Call the service layer.
//! 3. Map service errors to HTTP responses.
use crate::service::oauth as svc;
use crate::slog;
use poem::handler;
use poem::http::StatusCode;
use poem::web::{Data, Query, Redirect};
use serde::Deserialize;
use std::collections::HashMap;
use std::sync::{Arc, Mutex};
use std::sync::Arc;
/// Anthropic OAuth configuration.
const CLIENT_ID: &str = "9d1c250a-e61b-44d9-88ed-5944d1962f5e";
/// Claude.ai authorize URL (for Max/Pro subscriptions).
const AUTHORIZE_URL: &str = "https://claude.com/cai/oauth/authorize";
const TOKEN_ENDPOINT: &str = "https://platform.claude.com/v1/oauth/token";
const SCOPES: &str =
"user:inference user:profile user:mcp_servers user:sessions:claude_code user:file_upload";
/// In-memory store for pending PKCE flows, keyed by state parameter.
#[derive(Clone)]
pub struct OAuthState {
/// Maps state → (code_verifier, redirect_uri)
pending: Arc<Mutex<HashMap<String, PendingFlow>>>,
/// The port the server is listening on (for building redirect_uri).
port: u16,
}
struct PendingFlow {
code_verifier: String,
redirect_uri: String,
}
impl OAuthState {
pub fn new(port: u16) -> Self {
Self {
pending: Arc::new(Mutex::new(HashMap::new())),
port,
}
}
fn callback_url(&self) -> String {
format!("http://localhost:{}/callback", self.port)
}
}
/// Generate a random alphanumeric string of the given length.
fn random_string(len: usize) -> String {
use std::collections::hash_map::RandomState;
use std::hash::{BuildHasher, Hasher};
let mut s = String::with_capacity(len);
let chars = b"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789";
for _ in 0..len {
let hasher = RandomState::new().build_hasher();
let idx = hasher.finish() as usize % chars.len();
s.push(chars[idx] as char);
}
s
}
/// Compute the S256 PKCE code challenge from a code verifier.
fn compute_code_challenge(verifier: &str) -> String {
use sha2::{Digest, Sha256};
let hash = Sha256::digest(verifier.as_bytes());
base64url_encode(&hash)
}
/// Base64url-encode without padding (RFC 7636).
fn base64url_encode(data: &[u8]) -> String {
// Standard base64 then convert to base64url
const CHARS: &[u8] = b"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
let mut result = String::new();
let mut i = 0;
while i < data.len() {
let b0 = data[i] as u32;
let b1 = if i + 1 < data.len() {
data[i + 1] as u32
} else {
0
};
let b2 = if i + 2 < data.len() {
data[i + 2] as u32
} else {
0
};
let triple = (b0 << 16) | (b1 << 8) | b2;
result.push(CHARS[((triple >> 18) & 0x3F) as usize] as char);
result.push(CHARS[((triple >> 12) & 0x3F) as usize] as char);
if i + 1 < data.len() {
result.push(CHARS[((triple >> 6) & 0x3F) as usize] as char);
}
if i + 2 < data.len() {
result.push(CHARS[(triple & 0x3F) as usize] as char);
}
i += 3;
}
// Convert to base64url: replace + with -, / with _
result.replace('+', "-").replace('/', "_")
}
// Re-export service types so that existing tests in this file continue to
// compile unchanged (they use `use super::*` and call these by name).
pub(crate) use svc::OAuthState;
// Re-exported for tests only (tests use `use super::*` to call these by name).
#[cfg(test)]
pub(crate) use svc::pkce::{base64url_encode, compute_code_challenge, random_string};
/// `GET /oauth/authorize` — Initiates the OAuth flow.
///
@@ -104,35 +25,11 @@ fn base64url_encode(data: &[u8]) -> String {
/// Anthropic's authorization page.
#[handler]
pub async fn oauth_authorize(state: Data<&Arc<OAuthState>>) -> Redirect {
let code_verifier = random_string(128);
let code_challenge = compute_code_challenge(&code_verifier);
let csrf_state = random_string(32);
let redirect_uri = state.callback_url();
slog!("[oauth] Starting OAuth flow, state={}", csrf_state);
// Store the pending flow
state.pending.lock().unwrap().insert(
csrf_state.clone(),
PendingFlow {
code_verifier,
redirect_uri: redirect_uri.clone(),
},
);
let authorize_url = format!(
"{}?code=true&client_id={}&response_type=code&redirect_uri={}&scope={}&code_challenge={}&code_challenge_method=S256&state={}",
AUTHORIZE_URL,
CLIENT_ID,
percent_encode(&redirect_uri),
percent_encode(SCOPES),
percent_encode(&code_challenge),
percent_encode(&csrf_state),
);
Redirect::temporary(authorize_url)
let (_, url) = svc::initiate_flow(&state);
Redirect::temporary(url)
}
/// Query parameters received on the OAuth callback URL.
#[derive(Deserialize)]
pub struct CallbackParams {
code: Option<String>,
@@ -141,18 +38,6 @@ pub struct CallbackParams {
error_description: Option<String>,
}
/// Response from the Anthropic OAuth token endpoint.
#[derive(Deserialize)]
struct TokenResponse {
access_token: String,
refresh_token: Option<String>,
expires_in: u64,
#[allow(dead_code)]
token_type: Option<String>,
#[allow(dead_code)]
scope: Option<String>,
}
/// `GET /oauth/callback` — Handles the OAuth redirect from Anthropic.
///
/// Exchanges the authorization code for tokens and writes them to
@@ -162,7 +47,7 @@ pub async fn oauth_callback(
state: Data<&Arc<OAuthState>>,
Query(params): Query<CallbackParams>,
) -> poem::Response {
// Handle errors from Anthropic
// Handle provider-side errors (e.g. user denied access).
if let Some(err) = &params.error {
let desc = params
.error_description
@@ -177,7 +62,7 @@ pub async fn oauth_callback(
}
let code = match &params.code {
Some(c) => c,
Some(c) => c.clone(),
None => {
return html_response(
StatusCode::BAD_REQUEST,
@@ -188,7 +73,7 @@ pub async fn oauth_callback(
};
let csrf_state = match &params.state {
Some(s) => s,
Some(s) => s.clone(),
None => {
return html_response(
StatusCode::BAD_REQUEST,
@@ -198,163 +83,72 @@ pub async fn oauth_callback(
}
};
// Look up and remove the pending flow
let pending = state.pending.lock().unwrap().remove(csrf_state);
let flow = match pending {
Some(f) => f,
None => {
slog!("[oauth] Unknown state parameter: {}", csrf_state);
return html_response(
StatusCode::BAD_REQUEST,
"Invalid State",
"Unknown or expired state parameter. Please try logging in again.",
);
}
};
slog!("[oauth] Received callback, exchanging code for tokens");
// Exchange the authorization code for tokens
let client = reqwest::Client::new();
let resp = client
.post(TOKEN_ENDPOINT)
.header("Content-Type", "application/json")
.json(&serde_json::json!({
"grant_type": "authorization_code",
"code": code,
"client_id": CLIENT_ID,
"redirect_uri": &flow.redirect_uri,
"code_verifier": &flow.code_verifier,
"state": csrf_state,
}))
.send()
.await;
let resp = match resp {
Ok(r) => r,
Err(e) => {
slog!("[oauth] Token exchange request failed: {}", e);
return html_response(
StatusCode::INTERNAL_SERVER_ERROR,
"Token Exchange Failed",
&format!("Failed to contact Anthropic: {e}"),
);
}
};
let status = resp.status();
let body = resp.text().await.unwrap_or_default();
slog!(
"[oauth] Token exchange response (HTTP {}): {}",
status,
body
);
if !status.is_success() {
return html_response(
StatusCode::INTERNAL_SERVER_ERROR,
"Token Exchange Failed",
&format!("Anthropic returned HTTP {status}. Please try again."),
);
match svc::exchange_code(&state, &code, &csrf_state).await {
Ok(()) => html_response(
StatusCode::OK,
"Authenticated!",
"Claude OAuth login successful. You can close this tab and return to Huskies.",
),
Err(e) => map_service_error(e),
}
let token_resp: TokenResponse = match serde_json::from_str(&body) {
Ok(t) => t,
Err(e) => {
slog!("[oauth] Failed to parse token response: {}", e);
return html_response(
StatusCode::INTERNAL_SERVER_ERROR,
"Token Parse Failed",
"Received an unexpected response from Anthropic.",
);
}
};
let now_ms = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.map(|d| d.as_millis() as u64)
.unwrap_or(0);
let creds = oauth::CredentialsFile {
claude_ai_oauth: oauth::OAuthCredentials {
access_token: token_resp.access_token,
refresh_token: token_resp.refresh_token.unwrap_or_default(),
expires_at: now_ms + (token_resp.expires_in * 1000),
scopes: SCOPES.split(' ').map(|s| s.to_string()).collect(),
subscription_type: None,
rate_limit_tier: None,
},
};
if let Err(e) = oauth::write_credentials(&creds) {
slog!("[oauth] Failed to write credentials: {}", e);
return html_response(
StatusCode::INTERNAL_SERVER_ERROR,
"Credential Write Failed",
&format!("Tokens received but failed to save: {e}"),
);
}
slog!("[oauth] Successfully authenticated and saved credentials");
html_response(
StatusCode::OK,
"Authenticated!",
"Claude OAuth login successful. You can close this tab and return to Huskies.",
)
}
/// Check whether valid (non-expired) OAuth credentials exist.
/// `GET /oauth/status` — Check whether valid (non-expired) OAuth credentials exist.
#[handler]
pub async fn oauth_status() -> poem::Response {
match oauth::read_credentials() {
Ok(creds) => {
let now_ms = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.map(|d| d.as_millis() as u64)
.unwrap_or(0);
let expired = now_ms > creds.claude_ai_oauth.expires_at;
let body = serde_json::json!({
"authenticated": true,
"expired": expired,
"expires_at": creds.claude_ai_oauth.expires_at,
"has_refresh_token": !creds.claude_ai_oauth.refresh_token.is_empty(),
});
poem::Response::builder()
.status(StatusCode::OK)
.header("Content-Type", "application/json")
.body(body.to_string())
}
Err(_) => {
let body = serde_json::json!({
"authenticated": false,
"expired": false,
"expires_at": 0,
"has_refresh_token": false,
});
poem::Response::builder()
.status(StatusCode::OK)
.header("Content-Type", "application/json")
.body(body.to_string())
}
}
let status = svc::check_status();
let body = serde_json::json!({
"authenticated": status.authenticated,
"expired": status.expired,
"expires_at": status.expires_at,
"has_refresh_token": status.has_refresh_token,
});
poem::Response::builder()
.status(StatusCode::OK)
.header("Content-Type", "application/json")
.body(body.to_string())
}
/// Percent-encode a string for use in URL query parameters.
fn percent_encode(input: &str) -> String {
let mut encoded = String::with_capacity(input.len() * 3);
for byte in input.bytes() {
match byte {
b'A'..=b'Z' | b'a'..=b'z' | b'0'..=b'9' | b'-' | b'_' | b'.' | b'~' => {
encoded.push(byte as char);
}
_ => {
encoded.push_str(&format!("%{byte:02X}"));
}
// ── Private helpers ───────────────────────────────────────────────────────────
/// Map a service-layer `Error` to an HTML HTTP response.
fn map_service_error(e: svc::Error) -> poem::Response {
use svc::Error;
match e {
Error::MissingCode => html_response(
StatusCode::BAD_REQUEST,
"Missing Code",
"No authorization code received.",
),
Error::MissingState => html_response(
StatusCode::BAD_REQUEST,
"Missing State",
"No state parameter received.",
),
Error::InvalidState(msg) => html_response(StatusCode::BAD_REQUEST, "Invalid State", &msg),
Error::AuthorizationDenied(msg) => {
html_response(StatusCode::BAD_REQUEST, "Authentication Failed", &msg)
}
Error::InvalidGrant(msg) => {
html_response(StatusCode::BAD_REQUEST, "Token Exchange Failed", &msg)
}
Error::Network(msg) => html_response(
StatusCode::INTERNAL_SERVER_ERROR,
"Token Exchange Failed",
&msg,
),
Error::TokenExpired(msg) => html_response(StatusCode::UNAUTHORIZED, "Token Expired", &msg),
Error::TokenStorage(msg) => html_response(
StatusCode::INTERNAL_SERVER_ERROR,
"Credential Write Failed",
&msg,
),
Error::Parse(msg) => html_response(
StatusCode::INTERNAL_SERVER_ERROR,
"Token Parse Failed",
&msg,
),
}
encoded
}
fn html_response(status: StatusCode, title: &str, message: &str) -> poem::Response {
+24 -10
View File
@@ -1,6 +1,7 @@
//! HTTP project endpoints — REST API for project initialization and context management.
use crate::http::context::{AppContext, OpenApiResult, bad_request};
use crate::io::fs;
//! HTTP project endpoints — thin adapters over `service::project`.
use crate::http::context::{AppContext, OpenApiResult, bad_request, not_found};
use crate::service::project::{self as svc, Error as ProjectError};
use poem::http::StatusCode;
use poem_openapi::{Object, OpenApi, Tags, payload::Json};
use serde::Deserialize;
use std::sync::Arc;
@@ -15,6 +16,17 @@ struct PathPayload {
path: String,
}
/// Map a typed [`ProjectError`] to a `poem::Error` with the appropriate HTTP status.
fn map_project_error(e: ProjectError) -> poem::Error {
match e {
ProjectError::PathNotFound(msg) => not_found(msg),
ProjectError::NotADirectory(msg) => bad_request(msg),
ProjectError::Internal(msg) => {
poem::Error::from_string(msg, StatusCode::INTERNAL_SERVER_ERROR)
}
}
}
pub struct ProjectApi {
pub ctx: Arc<AppContext>,
}
@@ -26,8 +38,8 @@ impl ProjectApi {
/// Returns null when no project is open.
#[oai(path = "/project", method = "get")]
async fn get_current_project(&self) -> OpenApiResult<Json<Option<String>>> {
let result = fs::get_current_project(&self.ctx.state, self.ctx.store.as_ref())
.map_err(bad_request)?;
let result = svc::get_current_project(&self.ctx.state, self.ctx.store.as_ref())
.map_err(map_project_error)?;
Ok(Json(result))
}
@@ -36,14 +48,14 @@ impl ProjectApi {
/// Persists the selected path for later sessions.
#[oai(path = "/project", method = "post")]
async fn open_project(&self, payload: Json<PathPayload>) -> OpenApiResult<Json<String>> {
let confirmed = fs::open_project(
let confirmed = svc::open_project(
payload.0.path,
&self.ctx.state,
self.ctx.store.as_ref(),
self.ctx.agents.port(),
)
.await
.map_err(bad_request)?;
.map_err(map_project_error)?;
Ok(Json(confirmed))
}
@@ -55,21 +67,23 @@ impl ProjectApi {
"[MERGE-DEBUG] DELETE /project called! \
Backtrace: this is the only code path that clears project_root."
);
fs::close_project(&self.ctx.state, self.ctx.store.as_ref()).map_err(bad_request)?;
svc::close_project(&self.ctx.state, self.ctx.store.as_ref()).map_err(map_project_error)?;
Ok(Json(true))
}
/// List known projects from the store.
#[oai(path = "/projects", method = "get")]
async fn list_known_projects(&self) -> OpenApiResult<Json<Vec<String>>> {
let projects = fs::get_known_projects(self.ctx.store.as_ref()).map_err(bad_request)?;
let projects =
svc::get_known_projects(self.ctx.store.as_ref()).map_err(map_project_error)?;
Ok(Json(projects))
}
/// Forget a known project path.
#[oai(path = "/projects/forget", method = "post")]
async fn forget_known_project(&self, payload: Json<PathPayload>) -> OpenApiResult<Json<bool>> {
fs::forget_known_project(payload.0.path, self.ctx.store.as_ref()).map_err(bad_request)?;
svc::forget_known_project(payload.0.path, self.ctx.store.as_ref())
.map_err(map_project_error)?;
Ok(Json(true))
}
}
+35 -194
View File
@@ -1,179 +1,38 @@
//! HTTP settings endpoints — REST API for user preferences and editor configuration.
use crate::config::ProjectConfig;
use crate::http::context::{AppContext, OpenApiResult, bad_request};
use crate::service::settings as svc;
use crate::store::StoreOps;
use poem_openapi::{Object, OpenApi, Tags, param::Query, payload::Json};
use serde::{Deserialize, Serialize};
use serde::Serialize;
use serde_json::json;
#[cfg(test)]
use std::path::Path;
use std::sync::Arc;
const EDITOR_COMMAND_KEY: &str = "editor_command";
// Re-export service types so the test module (which does `use super::*`) can
// access them without modification.
pub use svc::EDITOR_COMMAND_KEY;
pub use svc::ProjectSettings;
#[cfg(test)]
pub use svc::settings_from_config;
/// Project-level settings exposed via `GET /api/settings` and `PUT /api/settings`.
///
/// Only contains the scalar fields of `ProjectConfig` — array sections
/// (`[[component]]`, `[[agent]]`, `[watcher]`) are preserved in the TOML file
/// and are not editable through this API.
#[derive(Debug, Object, Serialize, Deserialize)]
struct ProjectSettings {
/// Project-wide default QA mode: "server", "agent", or "human". Default: "server".
default_qa: String,
/// Default model for coder-stage agents (e.g. "sonnet"). When set, only agents whose
/// model matches this value are used for auto-assignment.
default_coder_model: Option<String>,
/// Maximum number of concurrent coder-stage agents. When set, stories wait in
/// 2_current/ until a slot is free.
max_coders: Option<u32>,
/// Maximum retries per story per pipeline stage before marking as blocked. Default: 2.
max_retries: u32,
/// Optional base branch name (e.g. "main", "master"). Overrides auto-detection.
base_branch: Option<String>,
/// Whether to send RateLimitWarning chat notifications. Default: true.
rate_limit_notifications: bool,
/// IANA timezone name (e.g. "Europe/London"). Timer inputs are interpreted in this tz.
timezone: Option<String>,
/// WebSocket URL of a remote huskies node to sync CRDT state with.
rendezvous: Option<String>,
/// How often (seconds) to check 5_done/ for items to archive. Default: 60.
watcher_sweep_interval_secs: u64,
/// How long (seconds) an item must remain in 5_done/ before archiving. Default: 14400.
watcher_done_retention_secs: u64,
}
/// Load `ProjectSettings` from `ProjectConfig`.
fn settings_from_config(cfg: &ProjectConfig) -> ProjectSettings {
ProjectSettings {
default_qa: cfg.default_qa.clone(),
default_coder_model: cfg.default_coder_model.clone(),
max_coders: cfg.max_coders.map(|v| v as u32),
max_retries: cfg.max_retries,
base_branch: cfg.base_branch.clone(),
rate_limit_notifications: cfg.rate_limit_notifications,
timezone: cfg.timezone.clone(),
rendezvous: cfg.rendezvous.clone(),
watcher_sweep_interval_secs: cfg.watcher.sweep_interval_secs,
watcher_done_retention_secs: cfg.watcher.done_retention_secs,
}
}
/// Validate the incoming `ProjectSettings` before writing.
/// Thin wrapper — delegates to [`svc::validate_project_settings`] and maps
/// the typed error to `String` so existing tests calling `.unwrap_err()` can
/// call `.contains()` directly.
fn validate_project_settings(s: &ProjectSettings) -> Result<(), String> {
match s.default_qa.as_str() {
"server" | "agent" | "human" => {}
other => {
return Err(format!(
"Invalid default_qa value '{other}'. Must be one of: server, agent, human"
));
}
}
Ok(())
svc::validate_project_settings(s).map_err(|e| e.to_string())
}
/// Write only the scalar settings from `s` into the project.toml at the given root.
/// Array sections (`[[component]]`, `[[agent]]`) are preserved unchanged.
/// Thin wrapper — delegates to [`svc::write_project_settings`] and maps the
/// typed error to `String` so existing tests can call `.unwrap()` unchanged.
#[cfg(test)]
fn write_project_settings(project_root: &Path, s: &ProjectSettings) -> Result<(), String> {
let config_path = project_root.join(".huskies/project.toml");
svc::write_project_settings(project_root, s).map_err(|e| e.to_string())
}
let content = if config_path.exists() {
std::fs::read_to_string(&config_path).map_err(|e| format!("Read config: {e}"))?
} else {
String::new()
};
let mut val: toml::Value = if content.trim().is_empty() {
toml::Value::Table(toml::map::Map::new())
} else {
toml::from_str(&content).map_err(|e| format!("Parse config: {e}"))?
};
let table = val
.as_table_mut()
.ok_or_else(|| "Config is not a TOML table".to_string())?;
// Scalar root fields
table.insert(
"default_qa".to_string(),
toml::Value::String(s.default_qa.clone()),
);
table.insert(
"max_retries".to_string(),
toml::Value::Integer(s.max_retries as i64),
);
table.insert(
"rate_limit_notifications".to_string(),
toml::Value::Boolean(s.rate_limit_notifications),
);
// Optional scalar fields
match &s.default_coder_model {
Some(v) => {
table.insert(
"default_coder_model".to_string(),
toml::Value::String(v.clone()),
);
}
None => {
table.remove("default_coder_model");
}
}
match s.max_coders {
Some(v) => {
table.insert("max_coders".to_string(), toml::Value::Integer(v as i64));
}
None => {
table.remove("max_coders");
}
}
match &s.base_branch {
Some(v) => {
table.insert("base_branch".to_string(), toml::Value::String(v.clone()));
}
None => {
table.remove("base_branch");
}
}
match &s.timezone {
Some(v) => {
table.insert("timezone".to_string(), toml::Value::String(v.clone()));
}
None => {
table.remove("timezone");
}
}
match &s.rendezvous {
Some(v) => {
table.insert("rendezvous".to_string(), toml::Value::String(v.clone()));
}
None => {
table.remove("rendezvous");
}
}
// [watcher] sub-table
let watcher_entry = table
.entry("watcher".to_string())
.or_insert_with(|| toml::Value::Table(toml::map::Map::new()));
if let toml::Value::Table(wt) = watcher_entry {
wt.insert(
"sweep_interval_secs".to_string(),
toml::Value::Integer(s.watcher_sweep_interval_secs as i64),
);
wt.insert(
"done_retention_secs".to_string(),
toml::Value::Integer(s.watcher_done_retention_secs as i64),
);
}
// Ensure .huskies/ directory exists
if let Some(parent) = config_path.parent() {
std::fs::create_dir_all(parent).map_err(|e| format!("Create .huskies dir: {e}"))?;
}
let new_content = toml::to_string_pretty(&val).map_err(|e| format!("Serialize config: {e}"))?;
std::fs::write(&config_path, new_content).map_err(|e| format!("Write config: {e}"))?;
Ok(())
/// Return the configured editor command from the store, or `None` if not set.
pub fn get_editor_command_from_store(ctx: &AppContext) -> Option<String> {
svc::get_editor_command(&*ctx.store)
}
#[derive(Tags)]
@@ -205,11 +64,7 @@ impl SettingsApi {
/// Get the configured editor command (e.g. "zed", "code", "cursor"), or null if not set.
#[oai(path = "/settings/editor", method = "get")]
async fn get_editor(&self) -> OpenApiResult<Json<EditorCommandResponse>> {
let editor_command = self
.ctx
.store
.get(EDITOR_COMMAND_KEY)
.and_then(|v| v.as_str().map(|s| s.to_string()));
let editor_command = get_editor_command_from_store(&self.ctx);
Ok(Json(EditorCommandResponse { editor_command }))
}
@@ -223,19 +78,8 @@ impl SettingsApi {
path: Query<String>,
line: Query<Option<u32>>,
) -> OpenApiResult<Json<OpenFileResponse>> {
let editor_command = get_editor_command_from_store(&self.ctx)
.ok_or_else(|| bad_request("No editor configured".to_string()))?;
let file_ref = match line.0 {
Some(l) => format!("{}:{}", path.0, l),
None => path.0.clone(),
};
std::process::Command::new(&editor_command)
.arg(&file_ref)
.spawn()
.map_err(|e| bad_request(format!("Failed to open editor: {e}")))?;
svc::open_file_in_editor(&*self.ctx.store, &path.0, line.0)
.map_err(|e| bad_request(e.to_string()))?;
Ok(Json(OpenFileResponse { success: true }))
}
@@ -243,8 +87,9 @@ impl SettingsApi {
#[oai(path = "/settings", method = "get")]
async fn get_settings(&self) -> OpenApiResult<Json<ProjectSettings>> {
let project_root = self.ctx.state.get_project_root().map_err(bad_request)?;
let config = ProjectConfig::load(&project_root).map_err(bad_request)?;
Ok(Json(settings_from_config(&config)))
let s =
svc::load_project_settings(&project_root).map_err(|e| bad_request(e.to_string()))?;
Ok(Json(s))
}
/// Update project.toml scalar settings. Array sections (component, agent) are preserved.
@@ -257,10 +102,12 @@ impl SettingsApi {
) -> OpenApiResult<Json<ProjectSettings>> {
validate_project_settings(&payload.0).map_err(bad_request)?;
let project_root = self.ctx.state.get_project_root().map_err(bad_request)?;
write_project_settings(&project_root, &payload.0).map_err(bad_request)?;
svc::write_project_settings(&project_root, &payload.0)
.map_err(|e| bad_request(e.to_string()))?;
// Re-read to confirm what was written
let config = ProjectConfig::load(&project_root).map_err(bad_request)?;
Ok(Json(settings_from_config(&config)))
let s =
svc::load_project_settings(&project_root).map_err(|e| bad_request(e.to_string()))?;
Ok(Json(s))
}
/// Set the preferred editor command (e.g. "zed", "code", "cursor").
@@ -294,12 +141,6 @@ impl SettingsApi {
}
}
pub fn get_editor_command_from_store(ctx: &AppContext) -> Option<String> {
ctx.store
.get(EDITOR_COMMAND_KEY)
.and_then(|v| v.as_str().map(|s| s.to_string()))
}
#[cfg(test)]
impl From<std::sync::Arc<AppContext>> for SettingsApi {
fn from(ctx: std::sync::Arc<AppContext>) -> Self {
@@ -556,7 +397,7 @@ mod tests {
// ── /api/settings GET/PUT ──────────────────────────────────────────────
fn default_project_settings() -> ProjectSettings {
let cfg = ProjectConfig::default();
let cfg = crate::config::ProjectConfig::default();
settings_from_config(&cfg)
}
@@ -709,7 +550,7 @@ path = "."
write_project_settings(dir.path(), &s).unwrap();
let config = ProjectConfig::load(dir.path()).unwrap();
let config = crate::config::ProjectConfig::load(dir.path()).unwrap();
let loaded = settings_from_config(&config);
assert_eq!(loaded.default_qa, "agent");
@@ -763,7 +604,7 @@ path = "."
};
write_project_settings(dir.path(), &s_clear).unwrap();
let config = ProjectConfig::load(dir.path()).unwrap();
let config = crate::config::ProjectConfig::load(dir.path()).unwrap();
let loaded = settings_from_config(&config);
assert!(loaded.default_coder_model.is_none());
assert!(loaded.max_coders.is_none());
+11 -31
View File
@@ -1,6 +1,7 @@
//! HTTP wizard endpoints — REST API for the project setup wizard.
use crate::http::context::{AppContext, OpenApiResult, bad_request, not_found};
use crate::io::wizard::{StepStatus, WizardState, WizardStep};
use crate::io::wizard::{WizardState, WizardStep};
use crate::service::wizard as svc;
use poem_openapi::{Object, OpenApi, Tags, param::Path, payload::Json};
use serde::{Deserialize, Serialize};
use std::sync::Arc;
@@ -80,8 +81,7 @@ impl WizardApi {
#[oai(path = "/wizard", method = "get")]
async fn get_wizard_state(&self) -> OpenApiResult<Json<WizardResponse>> {
let root = self.ctx.state.get_project_root().map_err(bad_request)?;
let state =
WizardState::load(&root).ok_or_else(|| not_found("No wizard active".to_string()))?;
let state = svc::get_state(&root).map_err(|_| not_found("No wizard active".to_string()))?;
Ok(Json(WizardResponse::from(&state)))
}
@@ -97,16 +97,8 @@ impl WizardApi {
) -> OpenApiResult<Json<WizardResponse>> {
let root = self.ctx.state.get_project_root().map_err(bad_request)?;
let wizard_step = parse_step(&step.0)?;
let mut state =
WizardState::load(&root).ok_or_else(|| not_found("No wizard active".to_string()))?;
state.set_step_status(
wizard_step,
StepStatus::AwaitingConfirmation,
payload.0.content,
);
state.save(&root).map_err(bad_request)?;
let state = svc::set_step_content(&root, wizard_step, payload.0.content)
.map_err(|e| bad_request(e.to_string()))?;
Ok(Json(WizardResponse::from(&state)))
}
@@ -117,12 +109,8 @@ impl WizardApi {
async fn confirm_step(&self, step: Path<String>) -> OpenApiResult<Json<WizardResponse>> {
let root = self.ctx.state.get_project_root().map_err(bad_request)?;
let wizard_step = parse_step(&step.0)?;
let mut state =
WizardState::load(&root).ok_or_else(|| not_found("No wizard active".to_string()))?;
state.confirm_step(wizard_step).map_err(bad_request)?;
state.save(&root).map_err(bad_request)?;
let state =
svc::mark_step_confirmed(&root, wizard_step).map_err(|e| bad_request(e.to_string()))?;
Ok(Json(WizardResponse::from(&state)))
}
@@ -133,12 +121,8 @@ impl WizardApi {
async fn skip_step(&self, step: Path<String>) -> OpenApiResult<Json<WizardResponse>> {
let root = self.ctx.state.get_project_root().map_err(bad_request)?;
let wizard_step = parse_step(&step.0)?;
let mut state =
WizardState::load(&root).ok_or_else(|| not_found("No wizard active".to_string()))?;
state.skip_step(wizard_step).map_err(bad_request)?;
state.save(&root).map_err(bad_request)?;
let state =
svc::mark_step_skipped(&root, wizard_step).map_err(|e| bad_request(e.to_string()))?;
Ok(Json(WizardResponse::from(&state)))
}
@@ -147,12 +131,8 @@ impl WizardApi {
async fn mark_generating(&self, step: Path<String>) -> OpenApiResult<Json<WizardResponse>> {
let root = self.ctx.state.get_project_root().map_err(bad_request)?;
let wizard_step = parse_step(&step.0)?;
let mut state =
WizardState::load(&root).ok_or_else(|| not_found("No wizard active".to_string()))?;
state.set_step_status(wizard_step, StepStatus::Generating, None);
state.save(&root).map_err(bad_request)?;
let state = svc::mark_step_generating(&root, wizard_step)
.map_err(|e| bad_request(e.to_string()))?;
Ok(Json(WizardResponse::from(&state)))
}
}
+49 -971
View File
File diff suppressed because it is too large Load Diff
+1 -3
View File
@@ -11,6 +11,4 @@ pub use files::{
};
pub use paths::{find_story_kit_root, get_home_directory, resolve_cli_path};
pub use preferences::{get_model_preference, set_model_preference};
pub use project::{
close_project, forget_known_project, get_current_project, get_known_projects, open_project,
};
pub use project::open_project;
+4
View File
@@ -84,6 +84,7 @@ pub async fn open_project(
Ok(path)
}
#[allow(dead_code)]
pub fn close_project(state: &SessionState, store: &dyn StoreOps) -> Result<(), String> {
{
// TRACE:MERGE-DEBUG — remove once root cause is found
@@ -98,6 +99,7 @@ pub fn close_project(state: &SessionState, store: &dyn StoreOps) -> Result<(), S
Ok(())
}
#[allow(dead_code)]
pub fn get_current_project(
state: &SessionState,
store: &dyn StoreOps,
@@ -131,6 +133,7 @@ pub fn get_current_project(
Ok(None)
}
#[allow(dead_code)]
pub fn get_known_projects(store: &dyn StoreOps) -> Result<Vec<String>, String> {
let projects = store
.get(KEY_KNOWN_PROJECTS)
@@ -143,6 +146,7 @@ pub fn get_known_projects(store: &dyn StoreOps) -> Result<Vec<String>, String> {
Ok(projects)
}
#[allow(dead_code)]
pub fn forget_known_project(path: String, store: &dyn StoreOps) -> Result<(), String> {
let mut known_projects = get_known_projects(store)?;
let original_len = known_projects.len();
-168
View File
@@ -31,35 +31,6 @@ pub struct ChatResult {
pub session_id: Option<String>,
}
fn get_anthropic_api_key_exists_impl(store: &dyn StoreOps) -> bool {
match store.get(KEY_ANTHROPIC_API_KEY) {
Some(value) => value.as_str().map(|k| !k.is_empty()).unwrap_or(false),
None => false,
}
}
fn set_anthropic_api_key_impl(store: &dyn StoreOps, api_key: &str) -> Result<(), String> {
store.set(KEY_ANTHROPIC_API_KEY, json!(api_key));
store.save()?;
match store.get(KEY_ANTHROPIC_API_KEY) {
Some(value) => {
if let Some(retrieved) = value.as_str() {
if retrieved != api_key {
return Err("Retrieved key does not match saved key".to_string());
}
} else {
return Err("Stored value is not a string".to_string());
}
}
None => {
return Err("API key was saved but cannot be retrieved".to_string());
}
}
Ok(())
}
fn get_anthropic_api_key_impl(store: &dyn StoreOps) -> Result<String, String> {
match store.get(KEY_ANTHROPIC_API_KEY) {
Some(value) => {
@@ -172,14 +143,6 @@ pub async fn get_ollama_models(base_url: Option<String>) -> Result<Vec<String>,
OllamaProvider::get_models(&url).await
}
pub fn get_anthropic_api_key_exists(store: &dyn StoreOps) -> Result<bool, String> {
Ok(get_anthropic_api_key_exists_impl(store))
}
pub fn set_anthropic_api_key(store: &dyn StoreOps, api_key: String) -> Result<(), String> {
set_anthropic_api_key_impl(store, &api_key)
}
/// Build a prompt for Claude Code that includes prior conversation history.
///
/// When a Claude Code session cannot be resumed (no session_id), we embed
@@ -627,22 +590,6 @@ mod tests {
save_should_fail: false,
}
}
fn with_save_error() -> Self {
Self {
data: Mutex::new(HashMap::new()),
save_should_fail: true,
}
}
fn with_entry(key: &str, value: serde_json::Value) -> Self {
let mut map = HashMap::new();
map.insert(key.to_string(), value);
Self {
data: Mutex::new(map),
save_should_fail: false,
}
}
}
impl StoreOps for MockStore {
@@ -695,121 +642,6 @@ mod tests {
assert!(result.is_ok());
}
// ---------------------------------------------------------------------------
// get_anthropic_api_key_exists_impl
// ---------------------------------------------------------------------------
#[test]
fn api_key_exists_when_key_is_present_and_non_empty() {
let store = MockStore::with_entry("anthropic_api_key", json!("sk-test-key"));
assert!(get_anthropic_api_key_exists_impl(&store));
}
#[test]
fn api_key_exists_returns_false_when_key_is_empty_string() {
let store = MockStore::with_entry("anthropic_api_key", json!(""));
assert!(!get_anthropic_api_key_exists_impl(&store));
}
#[test]
fn api_key_exists_returns_false_when_key_absent() {
let store = MockStore::new();
assert!(!get_anthropic_api_key_exists_impl(&store));
}
#[test]
fn api_key_exists_returns_false_when_value_is_not_string() {
let store = MockStore::with_entry("anthropic_api_key", json!(42));
assert!(!get_anthropic_api_key_exists_impl(&store));
}
// ---------------------------------------------------------------------------
// get_anthropic_api_key_impl
// ---------------------------------------------------------------------------
#[test]
fn get_api_key_returns_key_when_present() {
let store = MockStore::with_entry("anthropic_api_key", json!("sk-test-key"));
let result = get_anthropic_api_key_impl(&store);
assert_eq!(result.unwrap(), "sk-test-key");
}
#[test]
fn get_api_key_errors_when_empty() {
let store = MockStore::with_entry("anthropic_api_key", json!(""));
let result = get_anthropic_api_key_impl(&store);
assert!(result.is_err());
assert!(result.unwrap_err().contains("empty"));
}
#[test]
fn get_api_key_errors_when_absent() {
let store = MockStore::new();
let result = get_anthropic_api_key_impl(&store);
assert!(result.is_err());
assert!(result.unwrap_err().contains("not found"));
}
#[test]
fn get_api_key_errors_when_value_not_string() {
let store = MockStore::with_entry("anthropic_api_key", json!(123));
let result = get_anthropic_api_key_impl(&store);
assert!(result.is_err());
assert!(result.unwrap_err().contains("not a string"));
}
// ---------------------------------------------------------------------------
// set_anthropic_api_key_impl
// ---------------------------------------------------------------------------
#[test]
fn set_api_key_stores_and_returns_ok() {
let store = MockStore::new();
let result = set_anthropic_api_key_impl(&store, "sk-my-key");
assert!(result.is_ok());
assert_eq!(store.get("anthropic_api_key"), Some(json!("sk-my-key")));
}
#[test]
fn set_api_key_returns_error_when_save_fails() {
let store = MockStore::with_save_error();
let result = set_anthropic_api_key_impl(&store, "sk-my-key");
assert!(result.is_err());
assert!(result.unwrap_err().contains("mock save error"));
}
// ---------------------------------------------------------------------------
// Public wrappers: get_anthropic_api_key_exists / set_anthropic_api_key
// ---------------------------------------------------------------------------
#[test]
fn public_api_key_exists_returns_ok_bool() {
let store = MockStore::with_entry("anthropic_api_key", json!("sk-abc"));
let result = get_anthropic_api_key_exists(&store);
assert_eq!(result, Ok(true));
}
#[test]
fn public_api_key_exists_false_when_absent() {
let store = MockStore::new();
let result = get_anthropic_api_key_exists(&store);
assert_eq!(result, Ok(false));
}
#[test]
fn public_set_api_key_succeeds() {
let store = MockStore::new();
let result = set_anthropic_api_key(&store, "sk-xyz".to_string());
assert!(result.is_ok());
}
#[test]
fn public_set_api_key_propagates_save_error() {
let store = MockStore::with_save_error();
let result = set_anthropic_api_key(&store, "sk-xyz".to_string());
assert!(result.is_err());
}
// ---------------------------------------------------------------------------
// get_tool_definitions
// ---------------------------------------------------------------------------
+5 -5
View File
@@ -780,7 +780,7 @@ async fn main() -> Result<(), std::io::Error> {
// in `chat::transport::matrix::bot::run::spawn_bot`. Refactor to consume this
// shared instance via `AppContext.timer_store` so cancellations from MCP
// tools and the bot's tick loop see the same in-memory state.
let timer_store = std::sync::Arc::new(crate::chat::timer::TimerStore::load(
let timer_store = std::sync::Arc::new(crate::service::timer::TimerStore::load(
startup_root
.as_ref()
.map(|r| r.join(".huskies").join("timers.json"))
@@ -844,7 +844,7 @@ async fn main() -> Result<(), std::io::Error> {
// Timer: fire due timers every second.
if let Some(ref root) = tick_root {
let result =
crate::chat::timer::tick_once(&tick_timer, &tick_agents, root).await;
crate::service::timer::tick_once(&tick_timer, &tick_agents, root).await;
if let Err(msg) = result {
crate::slog_error!("[tick] Timer tick panicked: {msg}");
}
@@ -893,7 +893,7 @@ async fn main() -> Result<(), std::io::Error> {
// These mirror the listener that the Matrix bot spawns internally.
if let (Some(ctx), Some(root)) = (&whatsapp_ctx, &startup_root) {
let ambient_rooms = Arc::clone(&ctx.ambient_rooms);
chat::transport::matrix::notifications::spawn_notification_listener(
crate::service::notifications::spawn_notification_listener(
Arc::clone(&ctx.transport),
move || ambient_rooms.lock().unwrap().iter().cloned().collect(),
watcher_rx_for_whatsapp,
@@ -904,7 +904,7 @@ async fn main() -> Result<(), std::io::Error> {
}
if let (Some(ctx), Some(root)) = (&slack_ctx, &startup_root) {
let channel_ids: Vec<String> = ctx.channel_ids.iter().cloned().collect();
chat::transport::matrix::notifications::spawn_notification_listener(
crate::service::notifications::spawn_notification_listener(
Arc::clone(&ctx.transport) as Arc<dyn crate::chat::ChatTransport>,
move || channel_ids.clone(),
watcher_rx_for_slack,
@@ -919,7 +919,7 @@ async fn main() -> Result<(), std::io::Error> {
// Spawn stage-transition notification listener for Discord.
let channel_ids: Vec<String> = ctx.channel_ids.iter().cloned().collect();
chat::transport::matrix::notifications::spawn_notification_listener(
crate::service::notifications::spawn_notification_listener(
Arc::clone(&ctx.transport) as Arc<dyn crate::chat::ChatTransport>,
move || channel_ids.clone(),
watcher_rx_for_discord,
+50
View File
@@ -92,6 +92,56 @@ pub fn read_work_item_from_stage(
}
}
/// Test-fixture helpers that may call `std::fs` — kept here so that
/// `mod.rs` and topic-file `#[cfg(test)]` blocks never need to import
/// `std::fs`, `tokio::fs`, or `std::process` directly.
#[cfg(test)]
pub mod test_helpers {
use tempfile::TempDir;
/// Create the `.huskies/` directory.
pub fn make_huskies_dir(tmp: &TempDir) {
std::fs::create_dir_all(tmp.path().join(".huskies")).unwrap();
}
/// Create the `5_done` and `6_archived` work-stage directories.
pub fn make_work_dirs(tmp: &TempDir) {
for stage in &["5_done", "6_archived"] {
std::fs::create_dir_all(tmp.path().join(".huskies").join("work").join(stage)).unwrap();
}
}
/// Create all six pipeline stage directories under `.huskies/work/`.
pub fn make_stage_dirs(tmp: &TempDir) {
for stage in &[
"1_backlog",
"2_current",
"3_qa",
"4_merge",
"5_done",
"6_archived",
] {
std::fs::create_dir_all(tmp.path().join(".huskies").join("work").join(stage)).unwrap();
}
}
/// Write `.huskies/project.toml` with the given TOML content.
pub fn make_project_toml(tmp: &TempDir, content: &str) {
let sk_dir = tmp.path().join(".huskies");
std::fs::create_dir_all(&sk_dir).unwrap();
std::fs::write(sk_dir.join("project.toml"), content).unwrap();
}
/// Write a fixture file at `relative_path` (relative to the tmp root).
pub fn write_story_file(tmp: &TempDir, relative_path: &str, content: &str) {
let path = tmp.path().join(relative_path);
if let Some(parent) = path.parent() {
std::fs::create_dir_all(parent).unwrap();
}
std::fs::write(path, content).unwrap();
}
}
#[cfg(test)]
mod tests {
use super::*;
+10 -35
View File
@@ -290,6 +290,7 @@ fn config_to_entries(config: &ProjectConfig) -> Vec<AgentConfigEntry> {
mod tests {
use super::*;
use crate::agents::AgentStatus;
use io::test_helpers::*;
use std::sync::Arc;
use tempfile::TempDir;
@@ -301,43 +302,17 @@ mod tests {
Arc::new(pool)
}
fn make_work_dirs(tmp: &TempDir) {
for stage in &["5_done", "6_archived"] {
std::fs::create_dir_all(tmp.path().join(".huskies").join("work").join(stage)).unwrap();
}
}
fn make_stage_dirs(tmp: &TempDir) {
for stage in &[
"1_backlog",
"2_current",
"3_qa",
"4_merge",
"5_done",
"6_archived",
] {
std::fs::create_dir_all(tmp.path().join(".huskies").join("work").join(stage)).unwrap();
}
}
fn make_project_toml(tmp: &TempDir, content: &str) {
let sk_dir = tmp.path().join(".huskies");
std::fs::create_dir_all(&sk_dir).unwrap();
std::fs::write(sk_dir.join("project.toml"), content).unwrap();
}
// ── list_agents ───────────────────────────────────────────────────────────
#[tokio::test]
async fn list_agents_excludes_archived_stories() {
let tmp = TempDir::new().unwrap();
make_work_dirs(&tmp);
std::fs::write(
tmp.path()
.join(".huskies/work/6_archived/79_story_archived.md"),
write_story_file(
&tmp,
".huskies/work/6_archived/79_story_archived.md",
"---\nname: archived\n---\n",
)
.unwrap();
);
let pool = make_pool(&tmp);
pool.inject_test_agent("79_story_archived", "coder-1", AgentStatus::Completed);
@@ -363,7 +338,7 @@ mod tests {
#[test]
fn get_agent_config_returns_default_when_no_toml() {
let tmp = TempDir::new().unwrap();
std::fs::create_dir_all(tmp.path().join(".huskies")).unwrap();
make_huskies_dir(&tmp);
let entries = get_agent_config(tmp.path()).unwrap();
assert_eq!(entries.len(), 1);
assert_eq!(entries[0].name, "default");
@@ -405,11 +380,11 @@ max_budget_usd = 5.0
fn get_work_item_content_reads_from_backlog() {
let tmp = TempDir::new().unwrap();
make_stage_dirs(&tmp);
std::fs::write(
tmp.path().join(".huskies/work/1_backlog/42_story_foo.md"),
write_story_file(
&tmp,
".huskies/work/1_backlog/42_story_foo.md",
"---\nname: \"Foo Story\"\n---\n\nSome content.",
)
.unwrap();
);
let item = get_work_item_content(tmp.path(), "42_story_foo").unwrap();
assert!(item.content.contains("Some content."));
assert_eq!(item.stage, "backlog");
+100
View File
@@ -0,0 +1,100 @@
//! Anthropic I/O — the ONLY place in `service/anthropic/` that may perform
//! network requests or store operations.
//!
//! Every function here is a thin adapter that converts lower-level errors
//! into the typed [`super::Error`] variants. No business logic or branching
//! lives here; that belongs in `mod.rs`.
use super::{Error, ModelSummary, ModelsResponse};
use crate::store::StoreOps;
use reqwest::header::{HeaderMap, HeaderValue};
/// Store key for the Anthropic API key — shared with `llm::chat`.
pub(crate) const KEY_ANTHROPIC_API_KEY: &str = "anthropic_api_key";
const ANTHROPIC_VERSION: &str = "2023-06-01";
/// Return whether a non-empty API key is stored.
pub(super) fn api_key_exists(store: &dyn StoreOps) -> bool {
match store.get(KEY_ANTHROPIC_API_KEY) {
Some(value) => value.as_str().map(|k| !k.is_empty()).unwrap_or(false),
None => false,
}
}
/// Read the stored API key, returning a typed error when absent or invalid.
pub(super) fn get_api_key(store: &dyn StoreOps) -> Result<String, Error> {
match store.get(KEY_ANTHROPIC_API_KEY) {
Some(value) => {
if let Some(key) = value.as_str() {
if key.is_empty() {
Err(Error::Validation(
"Anthropic API key is empty. Please set your API key.".to_string(),
))
} else {
Ok(key.to_string())
}
} else {
Err(Error::Validation(
"Stored API key is not a string".to_string(),
))
}
}
None => Err(Error::Validation(
"Anthropic API key not found. Please set your API key.".to_string(),
)),
}
}
/// Persist a new API key to the store.
pub(super) fn save_api_key(store: &dyn StoreOps, api_key: &str) -> Result<(), String> {
store.set(KEY_ANTHROPIC_API_KEY, serde_json::json!(api_key));
store.save()
}
/// Fetch models from the Anthropic API at `url`.
pub(super) async fn fetch_models(api_key: &str, url: &str) -> Result<Vec<ModelSummary>, Error> {
let client = reqwest::Client::new();
let mut headers = HeaderMap::new();
headers.insert(
"x-api-key",
HeaderValue::from_str(api_key)
.map_err(|e| Error::Validation(format!("Invalid API key header value: {e}")))?,
);
headers.insert(
"anthropic-version",
HeaderValue::from_static(ANTHROPIC_VERSION),
);
let response = client
.get(url)
.headers(headers)
.send()
.await
.map_err(|e| Error::UpstreamApi(e.to_string()))?;
if !response.status().is_success() {
let status = response.status();
let error_text = response
.text()
.await
.unwrap_or_else(|_| "Unknown error".to_string());
return Err(Error::UpstreamApi(format!(
"Anthropic API error {status}: {error_text}"
)));
}
let body = response
.json::<ModelsResponse>()
.await
.map_err(|e| Error::Internal(format!("Failed to parse response: {e}")))?;
Ok(body
.data
.into_iter()
.map(|m| ModelSummary {
id: m.id,
context_window: m.context_window,
})
.collect())
}
+178
View File
@@ -0,0 +1,178 @@
//! Anthropic service — public API for Anthropic API-key management and model listing.
//!
//! Exposes functions to check, store, and use the Anthropic API key, and to
//! list available models. HTTP handlers call these functions instead of
//! talking to `llm::chat` or making HTTP requests directly.
//!
//! Conventions: `docs/architecture/service-modules.md`
pub(super) mod io;
use crate::store::StoreOps;
use serde::{Deserialize, Serialize};
const ANTHROPIC_MODELS_URL: &str = "https://api.anthropic.com/v1/models";
// ── Error type ────────────────────────────────────────────────────────────────
/// Typed errors returned by `service::anthropic` functions.
///
/// HTTP handlers map these to status codes:
/// - [`Error::Validation`] → 400 Bad Request
/// - [`Error::UpstreamApi`] → 502 Bad Gateway (or 400 for invalid keys)
/// - [`Error::Internal`] → 500 Internal Server Error
#[derive(Debug)]
pub enum Error {
/// The request was invalid (e.g. missing, empty, or malformed API key).
Validation(String),
/// The upstream Anthropic API returned an error or was unreachable.
UpstreamApi(String),
/// An internal error occurred (JSON parse failure, store I/O error, etc.).
Internal(String),
}
impl std::fmt::Display for Error {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Self::Validation(msg) => write!(f, "Validation error: {msg}"),
Self::UpstreamApi(msg) => write!(f, "Upstream API error: {msg}"),
Self::Internal(msg) => write!(f, "Internal error: {msg}"),
}
}
}
// ── Types ─────────────────────────────────────────────────────────────────────
/// A summary of an Anthropic model as returned by the `/v1/models` endpoint.
#[derive(Serialize, Deserialize, Debug, PartialEq, poem_openapi::Object)]
pub struct ModelSummary {
pub id: String,
pub context_window: u64,
}
/// Raw response shape from the Anthropic `/v1/models` endpoint.
#[derive(Deserialize)]
pub(super) struct ModelsResponse {
pub data: Vec<ModelInfo>,
}
/// A single model entry in the Anthropic API response.
#[derive(Deserialize)]
pub(super) struct ModelInfo {
pub id: String,
pub context_window: u64,
}
// ── Public API ────────────────────────────────────────────────────────────────
/// Return whether a non-empty Anthropic API key is currently stored.
pub fn get_api_key_exists(store: &dyn StoreOps) -> Result<bool, Error> {
Ok(io::api_key_exists(store))
}
/// Read the stored Anthropic API key.
///
/// Returns [`Error::Validation`] when the key is absent, empty, or not a string.
pub fn get_api_key(store: &dyn StoreOps) -> Result<String, Error> {
io::get_api_key(store)
}
/// Store or replace the Anthropic API key.
pub fn set_api_key(store: &dyn StoreOps, api_key: String) -> Result<(), Error> {
io::save_api_key(store, &api_key).map_err(Error::Internal)
}
/// List available Anthropic models from the production endpoint.
pub async fn list_models(store: &dyn StoreOps) -> Result<Vec<ModelSummary>, Error> {
list_models_from(store, ANTHROPIC_MODELS_URL).await
}
/// List available Anthropic models from `url` (injectable for tests).
pub async fn list_models_from(store: &dyn StoreOps, url: &str) -> Result<Vec<ModelSummary>, Error> {
let api_key = get_api_key(store)?;
io::fetch_models(&api_key, url).await
}
/// Parse a raw JSON string from the Anthropic `/v1/models` endpoint into model summaries.
///
/// Pure function for unit testing; production code uses [`list_models`].
#[cfg(test)]
pub fn parse_models_response(json: &str) -> Result<Vec<ModelSummary>, Error> {
let response: ModelsResponse = serde_json::from_str(json)
.map_err(|e| Error::Internal(format!("Failed to parse models response: {e}")))?;
Ok(response
.data
.into_iter()
.map(|m| ModelSummary {
id: m.id,
context_window: m.context_window,
})
.collect())
}
// ── Tests ─────────────────────────────────────────────────────────────────────
#[cfg(test)]
mod tests {
use super::*;
// Pure unit tests for response parsing — no tempdir, no network.
#[test]
fn parse_models_response_parses_single_model() {
let json = r#"{"data":[{"id":"claude-opus-4-5","context_window":200000}]}"#;
let models = parse_models_response(json).unwrap();
assert_eq!(models.len(), 1);
assert_eq!(models[0].id, "claude-opus-4-5");
assert_eq!(models[0].context_window, 200000);
}
#[test]
fn parse_models_response_parses_multiple_models() {
let json = r#"{"data":[
{"id":"claude-opus-4-5","context_window":200000},
{"id":"claude-haiku-4-5-20251001","context_window":100000}
]}"#;
let models = parse_models_response(json).unwrap();
assert_eq!(models.len(), 2);
assert_eq!(models[0].id, "claude-opus-4-5");
assert_eq!(models[1].context_window, 100000);
}
#[test]
fn parse_models_response_returns_empty_for_empty_data() {
let json = r#"{"data":[]}"#;
let models = parse_models_response(json).unwrap();
assert!(models.is_empty());
}
#[test]
fn parse_models_response_returns_internal_error_for_invalid_json() {
let result = parse_models_response("not json at all");
assert!(matches!(result, Err(Error::Internal(_))));
}
#[test]
fn parse_models_response_returns_error_for_missing_data_field() {
let result = parse_models_response(r#"{"wrong_field":[]}"#);
assert!(matches!(result, Err(Error::Internal(_))));
}
#[test]
fn error_display_validation() {
let e = Error::Validation("no key".to_string());
assert!(e.to_string().contains("no key"));
}
#[test]
fn error_display_upstream_api() {
let e = Error::UpstreamApi("500 Server Error".to_string());
assert!(e.to_string().contains("500 Server Error"));
}
#[test]
fn error_display_internal() {
let e = Error::Internal("parse failed".to_string());
assert!(e.to_string().contains("parse failed"));
}
}
+158
View File
@@ -0,0 +1,158 @@
//! Bot command I/O — the ONLY place in `service/bot_command/` that may call
//! transport handlers, load stores, spawn tasks, or interact with the agent
//! pool.
//!
//! Every function here is a thin adapter over the underlying matrix/timer/htop
//! handlers. No argument parsing or business logic lives here — that belongs in
//! `parse.rs` or `mod.rs`.
use crate::agents::AgentPool;
use std::path::Path;
use std::sync::Arc;
use super::parse::{AssignArgs, StartArgs};
/// Call the Matrix `assign` handler with pre-validated arguments.
pub(super) async fn call_assign(
args: &AssignArgs,
project_root: &Path,
agents: &Arc<AgentPool>,
) -> String {
crate::chat::transport::matrix::assign::handle_assign(
"web-ui",
&args.number,
&args.model,
project_root,
agents,
)
.await
}
/// Call the Matrix `start` handler with pre-validated arguments.
pub(super) async fn call_start(
args: &StartArgs,
project_root: &Path,
agents: &Arc<AgentPool>,
) -> String {
crate::chat::transport::matrix::start::handle_start(
"web-ui",
&args.number,
args.hint.as_deref(),
project_root,
agents,
)
.await
}
/// Call the Matrix `delete` handler with a pre-validated story number.
pub(super) async fn call_delete(
number: &str,
project_root: &Path,
agents: &Arc<AgentPool>,
) -> String {
crate::chat::transport::matrix::delete::handle_delete("web-ui", number, project_root, agents)
.await
}
/// Call the Matrix `rmtree` handler with a pre-validated story number.
pub(super) async fn call_rmtree(
number: &str,
project_root: &Path,
agents: &Arc<AgentPool>,
) -> String {
crate::chat::transport::matrix::rmtree::handle_rmtree("web-ui", number, project_root, agents)
.await
}
/// Call the Matrix `rebuild` handler.
pub(super) async fn call_rebuild(project_root: &Path, agents: &Arc<AgentPool>) -> String {
crate::chat::transport::matrix::rebuild::handle_rebuild("web-ui", project_root, agents).await
}
/// Parse and execute a `timer` command.
///
/// Returns `Err` with a usage string if the timer arguments cannot be parsed.
pub(super) async fn call_timer(args: &str, project_root: &Path) -> Result<String, String> {
let synthetic = format!("__web_ui__ timer {args}");
let timer_cmd = match crate::service::timer::extract_timer_command(
&synthetic,
"__web_ui__",
"@__web_ui__:localhost",
) {
Some(cmd) => cmd,
None => {
return Err(
"Usage: `/timer list`, `/timer <number> <HH:MM>`, or `/timer cancel <number>`"
.to_string(),
);
}
};
let store =
crate::service::timer::TimerStore::load(project_root.join(".huskies").join("timers.json"));
Ok(crate::service::timer::handle_timer_command(timer_cmd, &store, project_root).await)
}
/// Build an `htop` snapshot for the web UI.
///
/// The web UI uses one-shot HTTP requests, so live-updating sessions are not
/// supported. `htop stop` returns a helpful explanation instead of an error.
pub(super) fn call_htop(args: &str, agents: &Arc<AgentPool>) -> String {
use crate::chat::transport::matrix::htop::{HtopCommand, build_htop_message};
let synthetic = if args.is_empty() {
"__web_ui__ htop".to_string()
} else {
format!("__web_ui__ htop {args}")
};
match crate::chat::transport::matrix::htop::extract_htop_command(
&synthetic,
"__web_ui__",
"@__web_ui__:localhost",
) {
Some(HtopCommand::Stop) => "No active htop session in the web UI. \
Live sessions are only supported in chat transports (Matrix, Slack, Discord)."
.to_string(),
Some(HtopCommand::Start { duration_secs }) => build_htop_message(agents, 0, duration_secs),
None => build_htop_message(agents, 0, 300),
}
}
/// Dispatch through the synchronous command registry.
///
/// Returns `Some(response)` if the command keyword is registered, or `None`
/// if the keyword is unknown.
pub(super) fn call_sync(
cmd: &str,
args: &str,
project_root: &Path,
agents: &Arc<AgentPool>,
) -> Option<String> {
use crate::chat::commands::CommandDispatch;
use std::collections::HashSet;
use std::sync::Mutex;
let ambient_rooms: Arc<Mutex<HashSet<String>>> = Arc::new(Mutex::new(HashSet::new()));
let bot_name = "__web_ui__";
let bot_user_id = "@__web_ui__:localhost";
let room_id = "__web_ui__";
let dispatch = CommandDispatch {
bot_name,
bot_user_id,
project_root,
agents,
ambient_rooms: &ambient_rooms,
room_id,
};
// Build a synthetic bot-addressed message so the registry parses it
// identically to messages from chat transports.
let synthetic = if args.is_empty() {
format!("{bot_name} {cmd}")
} else {
format!("{bot_name} {cmd} {args}")
};
crate::chat::commands::try_handle_command(&dispatch, &synthetic)
}
+97
View File
@@ -0,0 +1,97 @@
//! Bot command service — domain logic for dispatching slash commands.
//!
//! Extracted from `http/bot_command.rs` so that argument parsing and dispatch
//! are independently testable without an HTTP layer.
//!
//! Conventions: `docs/architecture/service-modules.md`
//!
//! # Structure
//! - `mod.rs` (this file) — public API and typed `Error` type
//! - `parse.rs` — pure argument parsing, no I/O
//! - `io.rs` — all side-effectful calls (transport handlers, stores, agent pool)
pub(super) mod io;
pub mod parse;
use crate::agents::AgentPool;
use std::path::Path;
use std::sync::Arc;
// ── Error type ────────────────────────────────────────────────────────────────
/// Typed errors returned by `service::bot_command::execute`.
///
/// HTTP handlers map these to specific status codes:
/// - [`Error::UnknownCommand`] → 404 Not Found
/// - [`Error::BadArgs`] → 400 Bad Request
/// - [`Error::CommandFailed`] → 500 Internal Server Error
#[derive(Debug)]
#[allow(dead_code)] // CommandFailed is part of the public API contract; not yet reachable
pub enum Error {
/// The command keyword does not match any registered command.
UnknownCommand(String),
/// The command exists but the provided arguments are invalid.
BadArgs(String),
/// The command ran but failed with an internal error.
CommandFailed(String),
}
impl std::fmt::Display for Error {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Self::UnknownCommand(msg) | Self::BadArgs(msg) | Self::CommandFailed(msg) => {
write!(f, "{msg}")
}
}
}
}
// ── Public API ────────────────────────────────────────────────────────────────
/// Execute a bot command and return the markdown response.
///
/// Dispatches to the same handlers used by the Matrix and Slack bots. The
/// `cmd` argument is the lower-cased command keyword (e.g. `"status"`,
/// `"start"`). The `args` argument is any text after the keyword, already
/// trimmed.
///
/// # Errors
/// - [`Error::UnknownCommand`] if the command keyword is not registered.
/// - [`Error::BadArgs`] if the arguments fail validation.
/// - [`Error::CommandFailed`] if command execution raises an internal error.
pub async fn execute(
cmd: &str,
args: &str,
project_root: &Path,
agents: &Arc<AgentPool>,
) -> Result<String, Error> {
match cmd {
"assign" => {
let parsed = parse::parse_assign(args).map_err(Error::BadArgs)?;
Ok(io::call_assign(&parsed, project_root, agents).await)
}
"start" => {
let parsed = parse::parse_start(args).map_err(Error::BadArgs)?;
Ok(io::call_start(&parsed, project_root, agents).await)
}
"delete" => {
let number = parse::parse_number("delete", args).map_err(Error::BadArgs)?;
Ok(io::call_delete(&number, project_root, agents).await)
}
"rmtree" => {
let number = parse::parse_number("rmtree", args).map_err(Error::BadArgs)?;
Ok(io::call_rmtree(&number, project_root, agents).await)
}
"rebuild" => Ok(io::call_rebuild(project_root, agents).await),
"timer" => io::call_timer(args, project_root)
.await
.map_err(Error::BadArgs),
"htop" => Ok(io::call_htop(args, agents)),
_ => match io::call_sync(cmd, args, project_root, agents) {
Some(response) => Ok(response),
None => Err(Error::UnknownCommand(format!(
"Unknown command: `/{cmd}`. Type `/help` to see available commands."
))),
},
}
}
+216
View File
@@ -0,0 +1,216 @@
//! Pure argument parsing for bot commands.
//!
//! Every function in this module is synchronous and free of I/O. All
//! filesystem, network, and agent-pool access belongs in `io.rs`.
// ── Parsed argument types ─────────────────────────────────────────────────────
/// Parsed arguments for the `assign` command.
#[derive(Debug)]
pub struct AssignArgs {
/// The numeric story identifier (as a string, e.g. `"42"`).
pub number: String,
/// The model / agent name (e.g. `"opus"`, `"coder-sonnet"`).
pub model: String,
}
/// Parsed arguments for the `start` command.
#[derive(Debug)]
pub struct StartArgs {
/// The numeric story identifier.
pub number: String,
/// Optional model hint (e.g. `"opus"` → resolved to `"coder-opus"`).
pub hint: Option<String>,
}
// ── Parsing functions ─────────────────────────────────────────────────────────
/// Parse `assign` arguments: `<number> <model>`.
///
/// Returns `Err` with a user-visible usage string if the arguments are missing
/// or invalid (non-numeric number, empty model).
pub fn parse_assign(args: &str) -> Result<AssignArgs, String> {
let mut parts = args.splitn(2, char::is_whitespace);
let number = parts.next().unwrap_or("").trim().to_string();
let model = parts.next().unwrap_or("").trim().to_string();
if number.is_empty() || !number.chars().all(|c| c.is_ascii_digit()) || model.is_empty() {
return Err("Usage: `/assign <number> <model>` (e.g. `/assign 42 opus`)".to_string());
}
Ok(AssignArgs { number, model })
}
/// Parse `start` arguments: `<number>` or `<number> <model_hint>`.
///
/// Returns `Err` with a user-visible usage string if the number is missing
/// or non-numeric.
pub fn parse_start(args: &str) -> Result<StartArgs, String> {
let mut parts = args.splitn(2, char::is_whitespace);
let number = parts.next().unwrap_or("").trim().to_string();
let hint_str = parts.next().unwrap_or("").trim();
if number.is_empty() || !number.chars().all(|c| c.is_ascii_digit()) {
return Err(
"Usage: `/start <number>` or `/start <number> <model>` (e.g. `/start 42 opus`)"
.to_string(),
);
}
let hint = if hint_str.is_empty() {
None
} else {
Some(hint_str.to_string())
};
Ok(StartArgs { number, hint })
}
/// Parse a single numeric argument for commands like `delete` and `rmtree`.
///
/// `cmd_name` is used only in the error message (e.g. `"delete"` or `"rmtree"`).
/// Returns `Err` with a user-visible usage string if the argument is missing
/// or non-numeric.
pub fn parse_number(cmd_name: &str, args: &str) -> Result<String, String> {
let number = args.trim().to_string();
if number.is_empty() || !number.chars().all(|c| c.is_ascii_digit()) {
return Err(format!(
"Usage: `/{cmd_name} <number>` (e.g. `/{cmd_name} 42`)"
));
}
Ok(number)
}
// ── Tests ─────────────────────────────────────────────────────────────────────
#[cfg(test)]
mod tests {
use super::*;
// -- parse_assign ----------------------------------------------------------
#[test]
fn assign_valid() {
let r = parse_assign("42 opus").unwrap();
assert_eq!(r.number, "42");
assert_eq!(r.model, "opus");
}
#[test]
fn assign_valid_model_with_spaces() {
// splitn(2): everything after first whitespace goes into `model`.
let r = parse_assign("42 claude-opus-4").unwrap();
assert_eq!(r.number, "42");
assert_eq!(r.model, "claude-opus-4");
}
#[test]
fn assign_missing_all_args() {
assert!(parse_assign("").is_err());
}
#[test]
fn assign_missing_model() {
let err = parse_assign("42").unwrap_err();
assert!(
err.contains("Usage"),
"error should contain usage hint: {err}"
);
}
#[test]
fn assign_non_numeric_number() {
let err = parse_assign("foo opus").unwrap_err();
assert!(
err.contains("Usage"),
"error should contain usage hint: {err}"
);
}
#[test]
fn assign_number_with_letters_is_invalid() {
assert!(parse_assign("42x opus").is_err());
}
// -- parse_start -----------------------------------------------------------
#[test]
fn start_valid_number_only() {
let r = parse_start("42").unwrap();
assert_eq!(r.number, "42");
assert!(r.hint.is_none());
}
#[test]
fn start_valid_with_hint() {
let r = parse_start("42 opus").unwrap();
assert_eq!(r.number, "42");
assert_eq!(r.hint.as_deref(), Some("opus"));
}
#[test]
fn start_missing_number() {
let err = parse_start("").unwrap_err();
assert!(
err.contains("Usage"),
"error should contain usage hint: {err}"
);
}
#[test]
fn start_non_numeric_number() {
let err = parse_start("foo").unwrap_err();
assert!(
err.contains("Usage"),
"error should contain usage hint: {err}"
);
}
#[test]
fn start_non_numeric_with_hint() {
assert!(parse_start("foo opus").is_err());
}
// -- parse_number ----------------------------------------------------------
#[test]
fn number_valid() {
assert_eq!(parse_number("delete", "99").unwrap(), "99");
}
#[test]
fn number_missing() {
let err = parse_number("delete", "").unwrap_err();
assert!(
err.contains("Usage"),
"error should contain usage hint: {err}"
);
assert!(
err.contains("delete"),
"error should mention the command: {err}"
);
}
#[test]
fn number_non_numeric() {
let err = parse_number("delete", "abc").unwrap_err();
assert!(
err.contains("Usage"),
"error should contain usage hint: {err}"
);
}
#[test]
fn number_usage_contains_cmd_name() {
let err = parse_number("rmtree", "").unwrap_err();
assert!(
err.contains("rmtree"),
"usage should mention the command: {err}"
);
}
#[test]
fn number_whitespace_only_is_invalid() {
assert!(parse_number("delete", " ").is_err());
}
}
+70
View File
@@ -0,0 +1,70 @@
//! Pure helpers for pipeline item ID parsing.
//!
//! Pipeline item IDs share the format `{number}_{type}_{slug}`, e.g.
//! `"42_story_foo"`, `"7_bug_bar"`, `"100_refactor_baz"`. The functions here
//! extract or validate the leading numeric segment without performing any I/O.
/// Extract the numeric prefix from a pipeline item ID.
///
/// Returns the leading digit sequence from IDs like `"42_story_foo"` → `"42"`.
/// Returns `None` if the ID has no leading digit sequence.
pub fn extract_item_number(item_id: &str) -> Option<&str> {
item_id
.split('_')
.next()
.filter(|s| !s.is_empty() && s.chars().all(|c| c.is_ascii_digit()))
}
#[allow(dead_code)]
/// Return `true` if `item_id` has a valid `{digits}_` prefix format.
///
/// Valid: `"42_story_foo"`, `"1_bug_bar"`.
/// Invalid: `"story_without_number"`, `""`, `"abc_story"`.
pub fn has_valid_id_prefix(item_id: &str) -> bool {
extract_item_number(item_id).is_some()
}
// ── Tests ─────────────────────────────────────────────────────────────────────
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn extract_item_number_extracts_prefix() {
assert_eq!(extract_item_number("42_story_foo"), Some("42"));
assert_eq!(extract_item_number("1_bug_bar"), Some("1"));
assert_eq!(extract_item_number("100_refactor_baz"), Some("100"));
assert_eq!(
extract_item_number("261_story_bot_notifications"),
Some("261")
);
assert_eq!(extract_item_number("1_spike_research"), Some("1"));
}
#[test]
fn extract_item_number_returns_none_for_no_numeric_prefix() {
assert_eq!(extract_item_number("story_without_number"), None);
assert_eq!(extract_item_number("abc_story"), None);
assert_eq!(extract_item_number("abc_story_thing"), None);
assert_eq!(extract_item_number(""), None);
}
#[test]
fn extract_item_number_returns_none_for_empty_first_segment() {
// Leading underscore: first segment is "".
assert_eq!(extract_item_number("_story_thing"), None);
}
#[test]
fn has_valid_id_prefix_returns_true_for_valid_ids() {
assert!(has_valid_id_prefix("42_story_foo"));
assert!(has_valid_id_prefix("1_bug_bar"));
}
#[test]
fn has_valid_id_prefix_returns_false_for_invalid_ids() {
assert!(!has_valid_id_prefix("story_no_number"));
assert!(!has_valid_id_prefix(""));
}
}
+6
View File
@@ -0,0 +1,6 @@
//! Shared pure helpers used by multiple service modules.
//!
//! All sub-modules here are pure (no I/O, no side effects). Any helper that
//! duplicates logic across two or more service modules belongs here; anything
//! used by only one service stays in that service.
pub mod item_id;
+72
View File
@@ -0,0 +1,72 @@
//! Diagnostics I/O — the ONLY place in `service::diagnostics/` that may perform side effects.
//!
//! Side effects here include: reading and writing `.claude/settings.json` via `std::fs`.
//! Pure permission-rule logic (pattern derivation, wildcard domination checks) lives in
//! `permission.rs`.
use serde_json::{Value, json};
use std::fs;
use std::path::Path;
/// Add a permission rule to `.claude/settings.json` in the project root.
///
/// Does nothing if the rule already exists (exact match) or is already covered
/// by a wildcard pattern in the allow list. Creates the file and any missing
/// parent directories if they do not yet exist.
///
/// # Errors
/// Returns `Err(String)` if the directory cannot be created, the file cannot be
/// read or written, or the JSON cannot be parsed or serialised.
pub fn add_permission_rule(project_root: &Path, rule: &str) -> Result<(), String> {
let claude_dir = project_root.join(".claude");
fs::create_dir_all(&claude_dir)
.map_err(|e| format!("Failed to create .claude/ directory: {e}"))?;
let settings_path = claude_dir.join("settings.json");
let mut settings: Value = if settings_path.exists() {
let content = fs::read_to_string(&settings_path)
.map_err(|e| format!("Failed to read settings.json: {e}"))?;
serde_json::from_str(&content).map_err(|e| format!("Failed to parse settings.json: {e}"))?
} else {
json!({ "permissions": { "allow": [] } })
};
let allow_arr = settings
.pointer_mut("/permissions/allow")
.and_then(|v| v.as_array_mut());
let allow = match allow_arr {
Some(arr) => arr,
None => {
settings
.as_object_mut()
.unwrap()
.entry("permissions")
.or_insert(json!({ "allow": [] }));
settings
.pointer_mut("/permissions/allow")
.unwrap()
.as_array_mut()
.unwrap()
}
};
let rule_value = Value::String(rule.to_string());
// Exact duplicate check.
if allow.contains(&rule_value) {
return Ok(());
}
// Wildcard-coverage check: if "mcp__huskies__*" exists, skip more-specific rules.
if super::permission::is_dominated_by_wildcard(rule, allow) {
return Ok(());
}
allow.push(rule_value);
let pretty =
serde_json::to_string_pretty(&settings).map_err(|e| format!("Failed to serialize: {e}"))?;
fs::write(&settings_path, pretty).map_err(|e| format!("Failed to write settings.json: {e}"))?;
Ok(())
}
+89
View File
@@ -0,0 +1,89 @@
//! Diagnostics service — server logs, CRDT dump, permission management, and story movement.
//!
//! Extracted from `http/mcp/diagnostics.rs` following the conventions in
//! `docs/architecture/service-modules.md`:
//! - `mod.rs` (this file) — public API, typed [`Error`], orchestration
//! - `io.rs` — the ONLY place that performs side effects (filesystem reads/writes)
//! - `permission.rs` — pure permission-rule generation and wildcard checks
pub mod io;
pub mod permission;
pub use io::add_permission_rule;
pub use permission::generate_permission_rule;
#[allow(unused_imports)]
pub use permission::is_dominated_by_wildcard;
// ── Error type ────────────────────────────────────────────────────────────────
/// Typed errors returned by `service::diagnostics` functions.
///
/// HTTP handlers map these to status codes:
/// - [`Error::NotFound`] → 404 Not Found
/// - [`Error::Validation`] → 400 Bad Request
/// - [`Error::Conflict`] → 409 Conflict
/// - [`Error::Io`] → 500 Internal Server Error
/// - [`Error::UpstreamFailure`] → 500 Internal Server Error
#[allow(dead_code)]
#[derive(Debug)]
pub enum Error {
/// The requested resource was not found.
NotFound(String),
/// A required argument is missing or has an invalid value.
Validation(String),
/// The operation cannot proceed due to a conflicting state.
Conflict(String),
/// A filesystem read or write operation failed.
Io(String),
/// An upstream dependency returned an unexpected error.
UpstreamFailure(String),
}
impl std::fmt::Display for Error {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Self::NotFound(msg) => write!(f, "Not found: {msg}"),
Self::Validation(msg) => write!(f, "Validation error: {msg}"),
Self::Conflict(msg) => write!(f, "Conflict: {msg}"),
Self::Io(msg) => write!(f, "I/O error: {msg}"),
Self::UpstreamFailure(msg) => write!(f, "Upstream failure: {msg}"),
}
}
}
// ── Tests ─────────────────────────────────────────────────────────────────────
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn error_display_not_found() {
let e = Error::NotFound("log file missing".to_string());
assert!(e.to_string().contains("Not found"));
}
#[test]
fn error_display_validation() {
let e = Error::Validation("invalid filter".to_string());
assert!(e.to_string().contains("Validation error"));
}
#[test]
fn error_display_conflict() {
let e = Error::Conflict("story in wrong stage".to_string());
assert!(e.to_string().contains("Conflict"));
}
#[test]
fn error_display_io() {
let e = Error::Io("settings.json write failed".to_string());
assert!(e.to_string().contains("I/O error"));
}
#[test]
fn error_display_upstream_failure() {
let e = Error::UpstreamFailure("rebuild failed".to_string());
assert!(e.to_string().contains("Upstream failure"));
}
}
@@ -0,0 +1,105 @@
//! Pure permission-rule generation for `service::diagnostics`.
//!
//! These functions produce Claude Code permission-rule strings from tool call
//! metadata. No I/O: they take `&str` / `&Value` and return `String`.
use serde_json::Value;
/// Generate a Claude Code permission rule string for the given tool name and input.
///
/// - `Bash` tools → `Bash(first_word *)` derived from the `command` field.
/// - All other tools → the tool name verbatim (e.g. `Edit`, `mcp__huskies__create_story`).
pub fn generate_permission_rule(tool_name: &str, tool_input: &Value) -> String {
if tool_name == "Bash" {
let command_str = tool_input
.get("command")
.and_then(|v| v.as_str())
.unwrap_or("");
let first_word = command_str.split_whitespace().next().unwrap_or("unknown");
format!("Bash({first_word} *)")
} else {
tool_name.to_string()
}
}
/// Return `true` if `rule` is already covered by an existing wildcard in `allow_list`.
///
/// For example, if `allow_list` contains `"mcp__huskies__*"`, then the more
/// specific rule `"mcp__huskies__create_story"` is already covered.
pub fn is_dominated_by_wildcard(rule: &str, allow_list: &[Value]) -> bool {
allow_list.iter().any(|existing| {
if let Some(pat) = existing.as_str()
&& let Some(prefix) = pat.strip_suffix('*')
{
return rule.starts_with(prefix);
}
false
})
}
// ── Tests ─────────────────────────────────────────────────────────────────────
#[cfg(test)]
mod tests {
use super::*;
use serde_json::json;
#[test]
fn generate_rule_for_edit_tool() {
let rule = generate_permission_rule("Edit", &json!({}));
assert_eq!(rule, "Edit");
}
#[test]
fn generate_rule_for_write_tool() {
let rule = generate_permission_rule("Write", &json!({}));
assert_eq!(rule, "Write");
}
#[test]
fn generate_rule_for_bash_git() {
let rule = generate_permission_rule("Bash", &json!({"command": "git status"}));
assert_eq!(rule, "Bash(git *)");
}
#[test]
fn generate_rule_for_bash_cargo() {
let rule = generate_permission_rule("Bash", &json!({"command": "cargo test --all"}));
assert_eq!(rule, "Bash(cargo *)");
}
#[test]
fn generate_rule_for_bash_empty_command() {
let rule = generate_permission_rule("Bash", &json!({}));
assert_eq!(rule, "Bash(unknown *)");
}
#[test]
fn generate_rule_for_mcp_tool() {
let rule = generate_permission_rule("mcp__huskies__create_story", &json!({"name": "foo"}));
assert_eq!(rule, "mcp__huskies__create_story");
}
#[test]
fn is_dominated_by_exact_wildcard() {
let allow = vec![json!("mcp__huskies__*")];
assert!(is_dominated_by_wildcard(
"mcp__huskies__create_story",
&allow
));
}
#[test]
fn is_not_dominated_by_different_prefix() {
let allow = vec![json!("mcp__other__*")];
assert!(!is_dominated_by_wildcard(
"mcp__huskies__create_story",
&allow
));
}
#[test]
fn is_not_dominated_when_list_is_empty() {
assert!(!is_dominated_by_wildcard("Edit", &[]));
}
}
+184
View File
@@ -0,0 +1,184 @@
//! Pure event-buffer types — no side effects.
//!
//! `StoredEvent` and `EventBuffer` contain only data-transformation and
//! structural logic; all I/O (clocks, spawned tasks) lives in `io.rs`.
use serde::{Deserialize, Serialize};
use std::collections::VecDeque;
use std::sync::{Arc, Mutex};
/// Maximum number of events retained in the in-memory buffer.
pub const MAX_BUFFER_SIZE: usize = 500;
/// A pipeline event stored in the event buffer with a timestamp.
#[derive(Clone, Debug, Serialize, Deserialize)]
#[serde(tag = "type", rename_all = "snake_case")]
pub enum StoredEvent {
/// A work item transitioned between pipeline stages.
StageTransition {
/// Work item ID (e.g. `"42_story_my_feature"`).
story_id: String,
/// The stage the item moved FROM (display name, e.g. `"Current"`).
from_stage: String,
/// The stage the item moved TO (directory key, e.g. `"3_qa"`).
to_stage: String,
/// Unix timestamp in milliseconds when this event was recorded.
timestamp_ms: u64,
},
/// A merge operation failed for a story.
MergeFailure {
/// Work item ID (e.g. `"42_story_my_feature"`).
story_id: String,
/// Human-readable description of the failure.
reason: String,
/// Unix timestamp in milliseconds when this event was recorded.
timestamp_ms: u64,
},
/// A story was blocked (e.g. retry limit exceeded).
StoryBlocked {
/// Work item ID (e.g. `"42_story_my_feature"`).
story_id: String,
/// Human-readable reason the story was blocked.
reason: String,
/// Unix timestamp in milliseconds when this event was recorded.
timestamp_ms: u64,
},
}
impl StoredEvent {
/// Returns the `timestamp_ms` field common to all event variants.
pub fn timestamp_ms(&self) -> u64 {
match self {
StoredEvent::StageTransition { timestamp_ms, .. } => *timestamp_ms,
StoredEvent::MergeFailure { timestamp_ms, .. } => *timestamp_ms,
StoredEvent::StoryBlocked { timestamp_ms, .. } => *timestamp_ms,
}
}
}
/// Shared, thread-safe ring buffer of recent pipeline events.
///
/// Wrapped in `Arc` so it can be shared between the background subscriber
/// task and the HTTP handler. The inner `Mutex` guards the `VecDeque`.
#[derive(Clone, Debug)]
pub struct EventBuffer(Arc<Mutex<VecDeque<StoredEvent>>>);
impl EventBuffer {
/// Create a new, empty event buffer.
pub fn new() -> Self {
EventBuffer(Arc::new(Mutex::new(VecDeque::new())))
}
/// Append an event to the buffer, evicting the oldest entry if the buffer
/// exceeds [`MAX_BUFFER_SIZE`].
pub fn push(&self, event: StoredEvent) {
let mut buf = self.0.lock().unwrap();
if buf.len() >= MAX_BUFFER_SIZE {
buf.pop_front();
}
buf.push_back(event);
}
/// Return all events whose `timestamp_ms` is strictly greater than `since_ms`.
pub fn events_since(&self, since_ms: u64) -> Vec<StoredEvent> {
let buf = self.0.lock().unwrap();
buf.iter()
.filter(|e| e.timestamp_ms() > since_ms)
.cloned()
.collect()
}
}
impl Default for EventBuffer {
fn default() -> Self {
Self::new()
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn push_and_retrieve_events() {
let buf = EventBuffer::new();
buf.push(StoredEvent::MergeFailure {
story_id: "42_story_x".to_string(),
reason: "conflict".to_string(),
timestamp_ms: 1000,
});
buf.push(StoredEvent::StoryBlocked {
story_id: "43_story_y".to_string(),
reason: "retry limit".to_string(),
timestamp_ms: 2000,
});
let all = buf.events_since(0);
assert_eq!(all.len(), 2);
let after_1000 = buf.events_since(1000);
assert_eq!(after_1000.len(), 1);
assert!(matches!(after_1000[0], StoredEvent::StoryBlocked { .. }));
}
#[test]
fn evicts_oldest_when_full() {
let buf = EventBuffer::new();
for i in 0..MAX_BUFFER_SIZE + 1 {
buf.push(StoredEvent::MergeFailure {
story_id: format!("{i}_story_x"),
reason: "x".to_string(),
timestamp_ms: i as u64,
});
}
assert_eq!(buf.events_since(0).len(), MAX_BUFFER_SIZE);
assert!(buf.events_since(0).iter().all(|e| e.timestamp_ms() > 0));
}
#[test]
fn timestamp_ms_accessor_for_all_variants() {
let variants = [
StoredEvent::StageTransition {
story_id: "1".to_string(),
from_stage: "2_current".to_string(),
to_stage: "3_qa".to_string(),
timestamp_ms: 100,
},
StoredEvent::MergeFailure {
story_id: "2".to_string(),
reason: "x".to_string(),
timestamp_ms: 200,
},
StoredEvent::StoryBlocked {
story_id: "3".to_string(),
reason: "y".to_string(),
timestamp_ms: 300,
},
];
assert_eq!(variants[0].timestamp_ms(), 100);
assert_eq!(variants[1].timestamp_ms(), 200);
assert_eq!(variants[2].timestamp_ms(), 300);
}
#[test]
fn events_since_filters_by_timestamp() {
let buf = EventBuffer::new();
for ts in [100u64, 200, 300] {
buf.push(StoredEvent::MergeFailure {
story_id: "x".to_string(),
reason: "r".to_string(),
timestamp_ms: ts,
});
}
// strictly greater than 100
let result = buf.events_since(100);
assert_eq!(result.len(), 2);
assert!(result.iter().all(|e| e.timestamp_ms() > 100));
}
#[test]
fn default_creates_empty_buffer() {
let buf = EventBuffer::default();
assert_eq!(buf.events_since(0).len(), 0);
}
}
+67
View File
@@ -0,0 +1,67 @@
//! Events I/O wrappers — the ONLY place in `service/events/` that may perform
//! side effects such as reading the system clock or spawning async tasks.
use crate::io::watcher::WatcherEvent;
use tokio::sync::broadcast;
use super::buffer::{EventBuffer, StoredEvent};
/// Returns the current Unix timestamp in milliseconds.
pub(super) fn now_ms() -> u64 {
std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.map(|d| d.as_millis() as u64)
.unwrap_or(0)
}
/// Spawn a background task that consumes [`WatcherEvent`] broadcasts and
/// stores relevant events in `buffer`.
///
/// Only [`WatcherEvent::WorkItem`] (with a known `from_stage`),
/// [`WatcherEvent::MergeFailure`], and [`WatcherEvent::StoryBlocked`]
/// variants are stored. All other variants are silently ignored.
pub fn subscribe_to_watcher(buffer: EventBuffer, mut rx: broadcast::Receiver<WatcherEvent>) {
tokio::spawn(async move {
loop {
match rx.recv().await {
Ok(WatcherEvent::WorkItem {
stage,
item_id,
from_stage,
..
}) => {
if let Some(from) = from_stage {
buffer.push(StoredEvent::StageTransition {
story_id: item_id,
from_stage: from,
to_stage: stage,
timestamp_ms: now_ms(),
});
}
}
Ok(WatcherEvent::MergeFailure { story_id, reason }) => {
buffer.push(StoredEvent::MergeFailure {
story_id,
reason,
timestamp_ms: now_ms(),
});
}
Ok(WatcherEvent::StoryBlocked { story_id, reason }) => {
buffer.push(StoredEvent::StoryBlocked {
story_id,
reason,
timestamp_ms: now_ms(),
});
}
Ok(_) => {}
Err(broadcast::error::RecvError::Lagged(n)) => {
crate::slog!("[events] Subscriber lagged, skipped {n} events");
}
Err(broadcast::error::RecvError::Closed) => {
crate::slog!("[events] Watcher channel closed; stopping event subscriber");
break;
}
}
}
});
}
+45
View File
@@ -0,0 +1,45 @@
//! Events service — public API for the events domain.
//!
//! This module re-exports the pure buffer types from `buffer.rs` and the
//! side-effectful watcher subscription from `io.rs`. HTTP handlers call
//! these exports instead of containing the logic inline.
//!
//! Conventions: `docs/architecture/service-modules.md`
pub mod buffer;
pub(super) mod io;
pub use buffer::{EventBuffer, StoredEvent};
// Re-exported for tests (http::events uses it via `use super::*`).
#[allow(unused_imports)]
pub use buffer::MAX_BUFFER_SIZE;
pub use io::subscribe_to_watcher;
// ── Error type ────────────────────────────────────────────────────────────────
/// Typed errors returned by `service::events` functions.
///
/// Events operations on the in-memory buffer are infallible; this enum
/// exists to satisfy the module convention and to accommodate future
/// error cases (e.g. persistence).
#[allow(dead_code)]
#[derive(Debug)]
pub enum Error {
/// A serialisation or internal error occurred.
Internal(String),
}
impl std::fmt::Display for Error {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Self::Internal(msg) => write!(f, "Events error: {msg}"),
}
}
}
// ── Public API ────────────────────────────────────────────────────────────────
/// Return all events in `buffer` recorded after `since_ms` milliseconds.
pub fn events_since(buffer: &EventBuffer, since_ms: u64) -> Vec<StoredEvent> {
buffer.events_since(since_ms)
}
+84
View File
@@ -0,0 +1,84 @@
//! File I/O — the ONLY place in `service/file_io/` that may perform
//! filesystem reads, writes, shell execution, or other side effects.
//!
//! Every function here is a thin adapter that converts lower-level
//! `String` errors into the typed [`super::Error`] variants.
use super::Error;
use crate::io::fs::FileEntry;
use crate::io::search::SearchResult;
use crate::io::shell::CommandOutput;
use crate::state::SessionState;
pub(super) async fn read_file(path: String, state: &SessionState) -> Result<String, Error> {
crate::io::fs::read_file(path, state)
.await
.map_err(Error::Filesystem)
}
pub(super) async fn write_file(
path: String,
content: String,
state: &SessionState,
) -> Result<(), Error> {
crate::io::fs::write_file(path, content, state)
.await
.map_err(Error::Filesystem)
}
pub(super) async fn list_directory(
path: String,
state: &SessionState,
) -> Result<Vec<FileEntry>, Error> {
crate::io::fs::list_directory(path, state)
.await
.map_err(Error::Filesystem)
}
pub(super) async fn list_directory_absolute(path: String) -> Result<Vec<FileEntry>, Error> {
crate::io::fs::list_directory_absolute(path)
.await
.map_err(Error::Filesystem)
}
pub(super) async fn create_directory_absolute(path: String) -> Result<(), Error> {
crate::io::fs::create_directory_absolute(path)
.await
.map_err(Error::Filesystem)
.map(|_| ())
}
pub(super) fn get_home_directory() -> Result<String, Error> {
crate::io::fs::get_home_directory().map_err(Error::Filesystem)
}
pub(super) async fn list_project_files(state: &SessionState) -> Result<Vec<String>, Error> {
crate::io::fs::list_project_files(state)
.await
.map_err(Error::Filesystem)
}
pub(super) async fn search_files(
query: String,
state: &SessionState,
) -> Result<Vec<SearchResult>, Error> {
crate::io::search::search_files(query, state)
.await
.map_err(Error::Filesystem)
}
pub(super) async fn exec_shell(
command: String,
args: Vec<String>,
state: &SessionState,
) -> Result<CommandOutput, Error> {
crate::io::shell::exec_shell(command, args, state)
.await
.map_err(|e| {
if e.contains("not in the allowlist") {
Error::Validation(e)
} else {
Error::Filesystem(e)
}
})
}
+183
View File
@@ -0,0 +1,183 @@
//! File I/O service — public API for filesystem and shell operations.
//!
//! Exposes functions for reading, writing, and listing files scoped to the
//! active project root, plus utilities for absolute-path and shell operations.
//! HTTP handlers call these functions instead of touching `io::fs` directly.
//!
//! Conventions: `docs/architecture/service-modules.md`
pub(super) mod io;
use crate::state::SessionState;
/// Re-export the canonical filesystem entry type so HTTP handlers don't need
/// to import from `io::fs` directly.
pub use crate::io::fs::FileEntry;
/// Re-export the search result type.
pub use crate::io::search::SearchResult;
/// Re-export the shell output type.
pub use crate::io::shell::CommandOutput;
// ── Error type ────────────────────────────────────────────────────────────────
/// Typed errors returned by `service::file_io` functions.
///
/// HTTP handlers map these to status codes:
/// - [`Error::Validation`] → 400 Bad Request
/// - [`Error::Filesystem`] → 400 Bad Request (or 404 when appropriate)
#[derive(Debug)]
pub enum Error {
/// The request was invalid (e.g. path traversal attempt, command not allowlisted).
Validation(String),
/// A filesystem or shell operation failed (file not found, permission denied, etc.).
Filesystem(String),
}
impl std::fmt::Display for Error {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Self::Validation(msg) => write!(f, "Validation error: {msg}"),
Self::Filesystem(msg) => write!(f, "Filesystem error: {msg}"),
}
}
}
// ── Path validation ───────────────────────────────────────────────────────────
/// Validate a relative path, rejecting directory traversal attempts.
///
/// Returns [`Error::Validation`] when the path contains `..`.
pub fn validate_path(path: &str) -> Result<(), Error> {
if path.contains("..") {
return Err(Error::Validation(
"Security Violation: Directory traversal ('..') is not allowed.".to_string(),
));
}
Ok(())
}
// ── Public API ────────────────────────────────────────────────────────────────
/// Read a file from the project root.
pub async fn read_file(path: String, state: &SessionState) -> Result<String, Error> {
validate_path(&path)?;
io::read_file(path, state).await
}
/// Write a file to the project root, creating parent directories as needed.
pub async fn write_file(path: String, content: String, state: &SessionState) -> Result<(), Error> {
validate_path(&path)?;
io::write_file(path, content, state).await
}
/// List directory entries at a project-relative path.
pub async fn list_directory(path: String, state: &SessionState) -> Result<Vec<FileEntry>, Error> {
io::list_directory(path, state).await
}
/// List directory entries at an absolute path (not scoped to the project root).
pub async fn list_directory_absolute(path: String) -> Result<Vec<FileEntry>, Error> {
io::list_directory_absolute(path).await
}
/// Create a directory (and all parents) at an absolute path.
pub async fn create_directory_absolute(path: String) -> Result<(), Error> {
io::create_directory_absolute(path).await
}
/// Return the current user's home directory path.
pub fn get_home_directory() -> Result<String, Error> {
io::get_home_directory()
}
/// List all files in the project recursively, respecting `.gitignore`.
pub async fn list_project_files(state: &SessionState) -> Result<Vec<String>, Error> {
io::list_project_files(state).await
}
/// Search the project for files whose contents contain `query`.
pub async fn search_files(query: String, state: &SessionState) -> Result<Vec<SearchResult>, Error> {
io::search_files(query, state).await
}
/// Execute an allowlisted shell command in the project root directory.
pub async fn exec_shell(
command: String,
args: Vec<String>,
state: &SessionState,
) -> Result<CommandOutput, Error> {
io::exec_shell(command, args, state).await
}
// ── Tests ─────────────────────────────────────────────────────────────────────
#[cfg(test)]
mod tests {
use super::*;
// Pure unit tests for path validation and sanitisation — no tempdir, no network.
#[test]
fn validate_path_accepts_simple_relative_path() {
assert!(validate_path("src/main.rs").is_ok());
}
#[test]
fn validate_path_accepts_dot_path() {
assert!(validate_path(".").is_ok());
}
#[test]
fn validate_path_accepts_root_relative() {
assert!(validate_path("subdir/file.txt").is_ok());
}
#[test]
fn validate_path_rejects_parent_traversal() {
let result = validate_path("../etc/passwd");
assert!(matches!(result, Err(Error::Validation(_))));
}
#[test]
fn validate_path_rejects_embedded_traversal() {
let result = validate_path("src/../../../etc/passwd");
assert!(matches!(result, Err(Error::Validation(_))));
}
#[test]
fn validate_path_rejects_double_dot_only() {
let result = validate_path("..");
assert!(matches!(result, Err(Error::Validation(_))));
}
#[test]
fn validate_path_accepts_file_with_single_dots_in_name() {
// Filenames like "config.dev.toml" have single dots — must be accepted.
assert!(validate_path("config.dev.toml").is_ok());
}
#[test]
fn validate_path_rejects_traversal_with_url_encoding_lookalike() {
// A literal ".." sequence anywhere in the string is rejected.
let result = validate_path("valid/..hidden");
assert!(matches!(result, Err(Error::Validation(_))));
}
#[test]
fn error_display_validation() {
let e = Error::Validation("bad path".to_string());
assert!(e.to_string().contains("bad path"));
}
#[test]
fn error_display_filesystem() {
let e = Error::Filesystem("file not found".to_string());
assert!(e.to_string().contains("file not found"));
}
#[test]
fn error_display_filesystem_contains_message() {
let e = Error::Filesystem("task panic".to_string());
assert!(e.to_string().contains("task panic"));
}
}
+136
View File
@@ -0,0 +1,136 @@
//! Gateway aggregation — pure functions for cross-project pipeline status.
//!
//! Formats aggregated pipeline data into compact text suitable for chat
//! transports (Matrix, Slack). Uses `service::pipeline::aggregate_pipeline_counts`
//! for per-project parsing.
use serde_json::Value;
use std::collections::BTreeMap;
/// Format an aggregated status map as a compact, one-line-per-project string
/// suitable for Matrix/Slack messages.
///
/// Healthy projects: `🟢 **name** — B:5 C:2 Q:1 M:0 D:12`
/// Blocked items appended on the same line: `| blocked: 42 [story]`
/// Unreachable projects: `🔴 **name** — UNREACHABLE`
pub fn format_aggregate_status_compact(statuses: &BTreeMap<String, Value>) -> String {
let mut lines: Vec<String> = Vec::new();
for (name, status) in statuses {
if let Some(err) = status.get("error").and_then(|e| e.as_str()) {
lines.push(format!("\u{1F534} **{name}** — UNREACHABLE: {err}"));
} else {
let counts = status.get("counts");
let b = counts
.and_then(|c| c.get("backlog"))
.and_then(|n| n.as_u64())
.unwrap_or(0);
let c = counts
.and_then(|c| c.get("current"))
.and_then(|n| n.as_u64())
.unwrap_or(0);
let q = counts
.and_then(|c| c.get("qa"))
.and_then(|n| n.as_u64())
.unwrap_or(0);
let m = counts
.and_then(|c| c.get("merge"))
.and_then(|n| n.as_u64())
.unwrap_or(0);
let d = counts
.and_then(|c| c.get("done"))
.and_then(|n| n.as_u64())
.unwrap_or(0);
let blocked_arr = status
.get("blocked")
.and_then(|a| a.as_array())
.cloned()
.unwrap_or_default();
let indicator = if blocked_arr.is_empty() {
"\u{1F7E2}" // 🟢
} else {
"\u{1F7E0}" // 🟠
};
let mut line = format!("{indicator} **{name}** — B:{b} C:{c} Q:{q} M:{m} D:{d}");
if !blocked_arr.is_empty() {
let ids: Vec<String> = blocked_arr
.iter()
.filter_map(|item| item.get("story_id").and_then(|s| s.as_str()))
.map(|s| s.to_string())
.collect();
line.push_str(&format!(" | blocked: {}", ids.join(", ")));
}
lines.push(line);
}
}
if lines.is_empty() {
return "No projects registered.".to_string();
}
format!("**All Projects**\n\n{}", lines.join("\n\n"))
}
// ── Tests ────────────────────────────────────────────────────────────────────
#[cfg(test)]
mod tests {
use super::*;
use serde_json::json;
#[test]
fn format_healthy_project() {
let mut statuses = BTreeMap::new();
statuses.insert(
"huskies".to_string(),
json!({
"counts": { "backlog": 5, "current": 2, "qa": 1, "merge": 0, "done": 12 },
"blocked": []
}),
);
let output = format_aggregate_status_compact(&statuses);
assert!(output.contains("huskies"));
assert!(output.contains("B:5"));
assert!(output.contains("C:2"));
assert!(output.contains("Q:1"));
assert!(output.contains("D:12"));
assert!(!output.contains("blocked:"));
}
#[test]
fn format_unreachable_project() {
let mut statuses = BTreeMap::new();
statuses.insert(
"broken".to_string(),
json!({ "error": "connection refused" }),
);
let output = format_aggregate_status_compact(&statuses);
assert!(output.contains("broken"));
assert!(output.contains("UNREACHABLE"));
assert!(output.contains("connection refused"));
}
#[test]
fn format_blocked_items_shown() {
let mut statuses = BTreeMap::new();
statuses.insert(
"myproj".to_string(),
json!({
"counts": { "backlog": 0, "current": 1, "qa": 0, "merge": 0, "done": 0 },
"blocked": [{ "story_id": "42_story_x", "name": "X", "stage": "current", "reason": "blocked" }]
}),
);
let output = format_aggregate_status_compact(&statuses);
assert!(output.contains("blocked:"));
assert!(output.contains("42_story_x"));
}
#[test]
fn format_empty_projects() {
let statuses = BTreeMap::new();
let output = format_aggregate_status_compact(&statuses);
assert_eq!(output, "No projects registered.");
}
}
+191
View File
@@ -0,0 +1,191 @@
//! Gateway configuration types — pure parsing and validation.
//!
//! Contains `ProjectEntry`, `GatewayConfig`, and validation logic.
//! All filesystem I/O (loading from disk) lives in `io.rs`.
use serde::{Deserialize, Serialize};
use std::collections::BTreeMap;
/// A single project entry in `projects.toml`.
#[derive(Debug, Clone, Deserialize, Serialize)]
pub struct ProjectEntry {
/// Base URL of the project's huskies container (e.g. `http://localhost:3001`).
pub url: String,
}
/// Top-level `projects.toml` config.
#[derive(Debug, Clone, Deserialize, Serialize)]
pub struct GatewayConfig {
/// Map of project name → container URL.
#[serde(default)]
pub projects: BTreeMap<String, ProjectEntry>,
}
/// Validate that a gateway config has at least one project.
///
/// Returns the name of the first project (alphabetically) on success,
/// or an error message if the config is empty.
pub fn validate_config(config: &GatewayConfig) -> Result<String, String> {
if config.projects.is_empty() {
return Err("projects.toml must define at least one project".to_string());
}
Ok(config.projects.keys().next().unwrap().clone())
}
/// Validate that a project name exists in the given project map.
///
/// Returns the project's URL on success.
pub fn validate_project_exists(
projects: &BTreeMap<String, ProjectEntry>,
name: &str,
) -> Result<String, String> {
projects.get(name).map(|p| p.url.clone()).ok_or_else(|| {
let available: Vec<&str> = projects.keys().map(|s| s.as_str()).collect();
format!(
"unknown project '{name}'. Available: {}",
available.join(", ")
)
})
}
/// Escape a string as a TOML quoted string.
pub fn toml_string(s: &str) -> String {
format!("\"{}\"", s.replace('\\', "\\\\").replace('"', "\\\""))
}
/// Serialize a `bot.toml` content string from the given fields.
pub fn serialize_bot_config(
transport: &str,
homeserver: Option<&str>,
username: Option<&str>,
password: Option<&str>,
slack_bot_token: Option<&str>,
slack_signing_secret: Option<&str>,
) -> String {
match transport {
"slack" => {
format!(
"enabled = true\ntransport = \"slack\"\n\nslack_bot_token = {}\nslack_signing_secret = {}\nslack_channel_ids = []\n",
toml_string(slack_bot_token.unwrap_or("")),
toml_string(slack_signing_secret.unwrap_or("")),
)
}
_ => {
format!(
"enabled = true\ntransport = \"matrix\"\n\nhomeserver = {}\nusername = {}\npassword = {}\nroom_ids = []\nallowed_users = []\n",
toml_string(homeserver.unwrap_or("")),
toml_string(username.unwrap_or("")),
toml_string(password.unwrap_or("")),
)
}
}
}
// ── Tests ────────────────────────────────────────────────────────────────────
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn parse_valid_projects_toml() {
let toml_str = r#"
[projects.huskies]
url = "http://localhost:3001"
[projects.robot-studio]
url = "http://localhost:3002"
"#;
let config: GatewayConfig = toml::from_str(toml_str).unwrap();
assert_eq!(config.projects.len(), 2);
assert_eq!(config.projects["huskies"].url, "http://localhost:3001");
assert_eq!(config.projects["robot-studio"].url, "http://localhost:3002");
}
#[test]
fn parse_empty_projects_toml() {
let toml_str = "[projects]\n";
let config: GatewayConfig = toml::from_str(toml_str).unwrap();
assert!(config.projects.is_empty());
}
#[test]
fn validate_config_rejects_empty() {
let config = GatewayConfig {
projects: BTreeMap::new(),
};
assert!(validate_config(&config).is_err());
}
#[test]
fn validate_config_returns_first_project_name() {
let mut projects = BTreeMap::new();
projects.insert(
"beta".into(),
ProjectEntry {
url: "http://b".into(),
},
);
projects.insert(
"alpha".into(),
ProjectEntry {
url: "http://a".into(),
},
);
let config = GatewayConfig { projects };
assert_eq!(validate_config(&config).unwrap(), "alpha");
}
#[test]
fn validate_project_exists_succeeds() {
let mut projects = BTreeMap::new();
projects.insert(
"p1".into(),
ProjectEntry {
url: "http://p1".into(),
},
);
assert_eq!(
validate_project_exists(&projects, "p1").unwrap(),
"http://p1"
);
}
#[test]
fn validate_project_exists_fails() {
let projects = BTreeMap::new();
assert!(validate_project_exists(&projects, "missing").is_err());
}
#[test]
fn toml_string_escapes_quotes() {
assert_eq!(toml_string(r#"a"b"#), r#""a\"b""#);
}
#[test]
fn toml_string_escapes_backslashes() {
assert_eq!(toml_string(r"a\b"), r#""a\\b""#);
}
#[test]
fn serialize_bot_config_matrix() {
let content = serialize_bot_config(
"matrix",
Some("https://mx.io"),
Some("@bot:mx.io"),
Some("pass"),
None,
None,
);
assert!(content.contains("transport = \"matrix\""));
assert!(content.contains("homeserver = \"https://mx.io\""));
}
#[test]
fn serialize_bot_config_slack() {
let content =
serialize_bot_config("slack", None, None, None, Some("xoxb-123"), Some("secret"));
assert!(content.contains("transport = \"slack\""));
assert!(content.contains("slack_bot_token = \"xoxb-123\""));
}
}
+407
View File
@@ -0,0 +1,407 @@
//! Gateway I/O — the ONLY place in `service/gateway/` that may perform side effects.
//!
//! Side effects here include: reading/writing config and agent state files,
//! HTTP requests to project containers (proxying, health checks, polling),
//! spawning the Matrix bot task, and the notification poller background task.
use super::config::{GatewayConfig, ProjectEntry};
use super::registration::JoinedAgent;
pub use reqwest::Client;
use serde_json::{Value, json};
use std::collections::{BTreeMap, HashMap};
use std::path::Path;
// ── Config I/O ───────────────────────────────────────────────────────────────
/// Load gateway config from a `projects.toml` file.
pub fn load_config(path: &Path) -> Result<GatewayConfig, String> {
let contents = std::fs::read_to_string(path)
.map_err(|e| format!("cannot read {}: {e}", path.display()))?;
toml::from_str(&contents).map_err(|e| format!("invalid projects.toml: {e}"))
}
/// Load persisted agents from `<config_dir>/gateway_agents.json`.
/// Returns an empty list if the file does not exist or cannot be parsed.
pub fn load_agents(config_dir: &Path) -> Vec<JoinedAgent> {
let path = config_dir.join("gateway_agents.json");
match std::fs::read(&path) {
Ok(data) => serde_json::from_slice(&data).unwrap_or_default(),
Err(_) => Vec::new(),
}
}
/// Persist the current projects map to `<config_dir>/projects.toml`.
/// Silently ignores write errors or skips when `config_dir` is empty.
pub async fn save_config(projects: &BTreeMap<String, ProjectEntry>, config_dir: &Path) {
if config_dir.as_os_str().is_empty() {
return;
}
let path = config_dir.join("projects.toml");
let config = GatewayConfig {
projects: projects.clone(),
};
if let Ok(data) = toml::to_string_pretty(&config) {
let _ = tokio::fs::write(&path, data).await;
}
}
/// Persist the current agent list to `<config_dir>/gateway_agents.json`.
/// Silently ignores write errors.
pub async fn save_agents(agents: &[JoinedAgent], config_dir: &Path) {
if config_dir == Path::new("") {
return;
}
let path = config_dir.join("gateway_agents.json");
if let Ok(data) = serde_json::to_vec_pretty(agents) {
let _ = tokio::fs::write(&path, data).await;
}
}
// ── Bot config I/O ──────────────────────────────────────────────────────────
/// Read the current raw bot.toml as key/value pairs for the configuration UI.
/// Returns `None` values if the file does not exist.
pub fn read_bot_config_raw(config_dir: &Path) -> BotConfigFields {
let path = config_dir.join(".huskies").join("bot.toml");
let content = match std::fs::read_to_string(&path) {
Ok(c) => c,
Err(_) => return BotConfigFields::default(),
};
let table: toml::Value = match toml::from_str(&content) {
Ok(v) => v,
Err(_) => return BotConfigFields::default(),
};
let s = |key: &str| -> Option<String> {
table
.get(key)
.and_then(|v| v.as_str())
.map(|s| s.to_string())
};
BotConfigFields {
transport: s("transport").unwrap_or_else(|| "matrix".to_string()),
homeserver: s("homeserver"),
username: s("username"),
password: s("password"),
slack_bot_token: s("slack_bot_token"),
slack_signing_secret: s("slack_signing_secret"),
}
}
/// Raw bot.toml fields for the configuration UI.
#[derive(Default)]
pub struct BotConfigFields {
pub transport: String,
pub homeserver: Option<String>,
pub username: Option<String>,
pub password: Option<String>,
pub slack_bot_token: Option<String>,
pub slack_signing_secret: Option<String>,
}
/// Write a `bot.toml` from the given content string.
pub fn write_bot_config(config_dir: &Path, content: &str) -> Result<(), String> {
let huskies_dir = config_dir.join(".huskies");
std::fs::create_dir_all(&huskies_dir)
.map_err(|e| format!("cannot create .huskies dir: {e}"))?;
let path = huskies_dir.join("bot.toml");
std::fs::write(&path, content).map_err(|e| format!("cannot write bot.toml: {e}"))
}
// ── MCP proxy I/O ───────────────────────────────────────────────────────────
/// Proxy a raw MCP request body to the given project URL.
pub async fn proxy_mcp_call(
client: &Client,
base_url: &str,
request_bytes: &[u8],
) -> Result<Vec<u8>, String> {
let mcp_url = format!("{}/mcp", base_url.trim_end_matches('/'));
let resp = client
.post(&mcp_url)
.header("Content-Type", "application/json")
.body(request_bytes.to_vec())
.send()
.await
.map_err(|e| format!("failed to reach {mcp_url}: {e}"))?;
resp.bytes()
.await
.map(|b| b.to_vec())
.map_err(|e| format!("failed to read response from {mcp_url}: {e}"))
}
/// Fetch tools/list from a project's MCP endpoint.
pub async fn fetch_tools_list(client: &Client, base_url: &str) -> Result<Value, String> {
let mcp_url = format!("{}/mcp", base_url.trim_end_matches('/'));
let rpc_body = json!({
"jsonrpc": "2.0",
"id": 1,
"method": "tools/list",
"params": {}
});
let resp = client
.post(&mcp_url)
.json(&rpc_body)
.send()
.await
.map_err(|e| format!("failed to reach {mcp_url}: {e}"))?;
resp.json()
.await
.map_err(|e| format!("invalid JSON from upstream: {e}"))
}
/// Fetch and aggregate pipeline status for a single project URL.
pub async fn fetch_one_project_pipeline_status(url: &str, client: &Client) -> Value {
let mcp_url = format!("{}/mcp", url.trim_end_matches('/'));
let rpc_body = json!({
"jsonrpc": "2.0",
"id": 1,
"method": "tools/call",
"params": {
"name": "get_pipeline_status",
"arguments": {}
}
});
match client.post(&mcp_url).json(&rpc_body).send().await {
Ok(resp) => match resp.json::<Value>().await {
Ok(upstream) => {
if let Some(text) = upstream
.get("result")
.and_then(|r| r.get("content"))
.and_then(|c| c.get(0))
.and_then(|c| c.get("text"))
.and_then(|t| t.as_str())
{
match serde_json::from_str::<Value>(text) {
Ok(pipeline) => {
crate::service::pipeline::aggregate_pipeline_counts(&pipeline)
}
Err(_) => json!({ "error": "invalid pipeline JSON" }),
}
} else {
json!({ "error": "unexpected response shape" })
}
}
Err(e) => json!({ "error": format!("invalid response: {e}") }),
},
Err(e) => json!({ "error": format!("unreachable: {e}") }),
}
}
/// Fetch `get_pipeline_status` from every registered project URL in parallel.
pub async fn fetch_all_project_pipeline_statuses(
project_urls: &BTreeMap<String, String>,
client: &Client,
) -> BTreeMap<String, Value> {
use futures::future::join_all;
let futures: Vec<_> = project_urls
.iter()
.map(|(name, url)| {
let name = name.clone();
let url = url.clone();
let client = client.clone();
async move {
let result = fetch_one_project_pipeline_status(&url, &client).await;
(name, result)
}
})
.collect();
join_all(futures).await.into_iter().collect()
}
/// Fetch the pipeline status from a single project for the `gateway_status` tool.
pub async fn fetch_pipeline_status_for_project(
client: &Client,
base_url: &str,
) -> Result<Value, String> {
let mcp_url = format!("{}/mcp", base_url.trim_end_matches('/'));
let rpc_body = json!({
"jsonrpc": "2.0",
"id": 1,
"method": "tools/call",
"params": {
"name": "get_pipeline_status",
"arguments": {}
}
});
let resp = client
.post(&mcp_url)
.json(&rpc_body)
.send()
.await
.map_err(|e| format!("failed to reach {mcp_url}: {e}"))?;
resp.json()
.await
.map_err(|e| format!("invalid upstream response: {e}"))
}
/// Check health of a single project URL.
pub async fn check_project_health(client: &Client, base_url: &str) -> Result<bool, String> {
let health_url = format!("{}/health", base_url.trim_end_matches('/'));
match client.get(&health_url).send().await {
Ok(resp) => Ok(resp.status().is_success()),
Err(e) => Err(format!("unreachable: {e}")),
}
}
// ── Gateway MCP JSON ────────────────────────────────────────────────────────
/// Write (or overwrite) a `.mcp.json` in `config_dir` that points Claude Code
/// CLI at the gateway's own `/mcp` endpoint.
pub fn write_gateway_mcp_json(config_dir: &Path, port: u16) -> Result<(), std::io::Error> {
let host = std::env::var("HUSKIES_HOST").unwrap_or_else(|_| "127.0.0.1".to_string());
let url = format!("http://{host}:{port}/mcp");
let content = json!({
"mcpServers": {
"huskies": {
"type": "http",
"url": url
}
}
});
let path = config_dir.join(".mcp.json");
std::fs::write(&path, serde_json::to_string_pretty(&content).unwrap())?;
crate::slog!("[gateway] Wrote {} pointing to {}", path.display(), url);
Ok(())
}
// ── Init project I/O ────────────────────────────────────────────────────────
/// Check if a path already has a `.huskies/` directory.
pub fn has_huskies_dir(path: &Path) -> bool {
path.join(".huskies").exists()
}
/// Create a directory (and parents) if it does not exist.
pub fn ensure_directory(path: &Path) -> Result<(), String> {
if !path.exists() {
std::fs::create_dir_all(path)
.map_err(|e| format!("failed to create directory '{}': {e}", path.display()))?;
}
Ok(())
}
/// Scaffold a huskies project at the given path.
pub fn scaffold_project(path: &Path) -> Result<(), String> {
crate::io::fs::scaffold::scaffold_story_kit(path, 3001)
}
/// Initialise wizard state at the given path.
pub fn init_wizard_state(path: &Path) {
crate::io::wizard::WizardState::init_if_missing(path);
}
// ── Notification poller ─────────────────────────────────────────────────────
/// Spawn a background task that polls events from all project servers.
pub fn spawn_gateway_notification_poller(
transport: std::sync::Arc<dyn crate::chat::ChatTransport>,
room_ids: Vec<String>,
project_urls: BTreeMap<String, String>,
poll_interval_secs: u64,
) {
tokio::spawn(async move {
let client = Client::builder()
.timeout(std::time::Duration::from_secs(10))
.build()
.unwrap_or_else(|_| Client::new());
let interval = std::time::Duration::from_secs(poll_interval_secs.max(1));
let mut last_ts: HashMap<String, u64> = project_urls
.keys()
.map(|name| (name.clone(), 0u64))
.collect();
loop {
for (project_name, base_url) in &project_urls {
let since = last_ts.get(project_name).copied().unwrap_or(0);
let url = format!("{base_url}/api/events?since={since}");
let response = match client.get(&url).send().await {
Ok(r) => r,
Err(e) => {
crate::slog!(
"[gateway-poller] {project_name}: unreachable ({e}); skipping"
);
continue;
}
};
let events: Vec<crate::service::events::StoredEvent> = match response.json().await {
Ok(v) => v,
Err(e) => {
crate::slog!(
"[gateway-poller] {project_name}: failed to parse events: {e}"
);
continue;
}
};
for event in &events {
let ts = event.timestamp_ms();
if ts > *last_ts.get(project_name).unwrap_or(&0) {
last_ts.insert(project_name.clone(), ts);
}
let (plain, html) = super::polling::format_gateway_event(project_name, event);
for room_id in &room_ids {
if let Err(e) = transport.send_message(room_id, &plain, &html).await {
crate::slog!(
"[gateway-poller] Failed to send notification to {room_id}: {e}"
);
}
}
}
}
tokio::time::sleep(interval).await;
}
});
}
// ── Gateway bot spawn ───────────────────────────────────────────────────────
/// Re-export type alias for the active project lock.
pub type ActiveProject = std::sync::Arc<tokio::sync::RwLock<String>>;
/// Attempt to spawn the Matrix bot against the gateway config directory.
pub fn spawn_gateway_bot(
config_dir: &Path,
active_project: ActiveProject,
gateway_projects: Vec<String>,
gateway_project_urls: BTreeMap<String, String>,
port: u16,
) -> Option<tokio::task::AbortHandle> {
use crate::agents::AgentPool;
use tokio::sync::{broadcast, mpsc};
let (watcher_tx, _) = broadcast::channel(16);
let (_perm_tx, perm_rx) = mpsc::unbounded_channel();
let perm_rx = std::sync::Arc::new(tokio::sync::Mutex::new(perm_rx));
let (shutdown_tx, shutdown_rx) =
tokio::sync::watch::channel::<Option<crate::rebuild::ShutdownReason>>(None);
std::mem::forget(shutdown_tx);
let agents = std::sync::Arc::new(AgentPool::new(port, watcher_tx.clone()));
crate::chat::transport::matrix::spawn_bot(
config_dir,
watcher_tx,
perm_rx,
agents,
shutdown_rx,
Some(active_project),
gateway_projects,
gateway_project_urls,
)
}
+580
View File
@@ -0,0 +1,580 @@
//! Gateway service — domain logic for the multi-project gateway.
//!
//! Follows the conventions in `docs/architecture/service-modules.md`:
//! - `mod.rs` (this file) — public API, typed [`Error`], orchestration, `GatewayState`
//! - `io.rs` — the ONLY place that performs side effects (filesystem, network, process spawn)
//! - `config.rs` — pure config types and validation
//! - `registration.rs` — pure agent registration logic
//! - `aggregation.rs` — pure cross-project pipeline formatting
//! - `polling.rs` — pure notification event formatting
pub mod aggregation;
pub mod config;
pub(crate) mod io;
pub mod polling;
pub mod registration;
pub use aggregation::format_aggregate_status_compact;
pub use config::{GatewayConfig, ProjectEntry};
pub use io::{fetch_all_project_pipeline_statuses, spawn_gateway_notification_poller};
pub use registration::JoinedAgent;
use io::Client;
use std::collections::{BTreeMap, HashMap};
use std::path::PathBuf;
use std::sync::Arc;
use tokio::sync::Mutex as TokioMutex;
use tokio::sync::RwLock;
// ── Error type ──────────────────────────────────────────────────────────────
/// Typed errors returned by `service::gateway` functions.
///
/// HTTP handlers map these to appropriate status codes:
/// - [`Error::ProjectNotFound`] → 404 Not Found
/// - [`Error::UnreachableProject`] → 502 Bad Gateway
/// - [`Error::DuplicateToken`] → 409 Conflict
/// - [`Error::InvalidAgent`] → 404 Not Found / 400 Bad Request
/// - [`Error::Config`] → 400 Bad Request
/// - [`Error::Upstream`] → 502 Bad Gateway
#[derive(Debug)]
pub enum Error {
/// A referenced project does not exist in the gateway config.
ProjectNotFound(String),
/// A project container is unreachable.
UnreachableProject(String),
/// A join token has already been consumed or a project name is taken.
DuplicateToken(String),
/// An agent ID is invalid or not found.
InvalidAgent(String),
/// A configuration value is invalid.
Config(String),
/// An upstream project container returned an unexpected response.
Upstream(String),
}
impl std::fmt::Display for Error {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Self::ProjectNotFound(msg) => write!(f, "Project not found: {msg}"),
Self::UnreachableProject(msg) => write!(f, "Unreachable project: {msg}"),
Self::DuplicateToken(msg) => write!(f, "Duplicate token: {msg}"),
Self::InvalidAgent(msg) => write!(f, "Invalid agent: {msg}"),
Self::Config(msg) => write!(f, "Config error: {msg}"),
Self::Upstream(msg) => write!(f, "Upstream error: {msg}"),
}
}
}
// ── Gateway state ───────────────────────────────────────────────────────────
/// A one-time join token that has been generated but not yet consumed.
pub(crate) struct PendingToken {
#[allow(dead_code)]
pub(crate) created_at: f64,
}
/// Shared gateway state threaded through HTTP handlers.
#[derive(Clone)]
pub struct GatewayState {
/// The live set of registered projects (initially loaded from `projects.toml`).
pub projects: Arc<RwLock<BTreeMap<String, ProjectEntry>>>,
/// The currently active project name.
pub active_project: Arc<RwLock<String>>,
/// HTTP client for proxying requests to project containers.
pub client: Client,
/// Build agents that have joined this gateway.
pub joined_agents: Arc<RwLock<Vec<JoinedAgent>>>,
/// One-time join tokens that have been issued but not yet consumed.
pub(crate) pending_tokens: Arc<RwLock<HashMap<String, PendingToken>>>,
/// Directory containing `projects.toml` and the `.huskies/` subfolder.
pub config_dir: PathBuf,
/// HTTP port the gateway is listening on.
pub port: u16,
/// Abort handle for the running Matrix bot task (if any).
pub bot_handle: Arc<TokioMutex<Option<tokio::task::AbortHandle>>>,
}
impl GatewayState {
/// Create a new gateway state from a config and config directory.
///
/// The first project in the config becomes the active project by default.
/// Previously registered agents are loaded from `gateway_agents.json`.
pub fn new(
gateway_config: GatewayConfig,
config_dir: PathBuf,
port: u16,
) -> Result<Self, String> {
let first = config::validate_config(&gateway_config)?;
let agents = io::load_agents(&config_dir);
Ok(Self {
projects: Arc::new(RwLock::new(gateway_config.projects)),
active_project: Arc::new(RwLock::new(first)),
client: Client::new(),
joined_agents: Arc::new(RwLock::new(agents)),
pending_tokens: Arc::new(RwLock::new(HashMap::new())),
config_dir,
port,
bot_handle: Arc::new(TokioMutex::new(None)),
})
}
/// Get the URL of the currently active project.
pub async fn active_url(&self) -> Result<String, Error> {
let name = self.active_project.read().await.clone();
self.projects
.read()
.await
.get(&name)
.map(|p| p.url.clone())
.ok_or_else(|| {
Error::ProjectNotFound(format!("active project '{name}' not found in config"))
})
}
}
// ── Public API ──────────────────────────────────────────────────────────────
/// Switch the active project. Returns the project's URL on success.
pub async fn switch_project(state: &GatewayState, project: &str) -> Result<String, Error> {
if project.is_empty() {
return Err(Error::Config("missing required parameter: project".into()));
}
let url = {
let projects = state.projects.read().await;
config::validate_project_exists(&projects, project).map_err(Error::ProjectNotFound)?
};
*state.active_project.write().await = project.to_string();
Ok(url)
}
/// Generate a one-time join token. Returns the token string.
pub async fn generate_join_token(state: &GatewayState) -> String {
let token = uuid::Uuid::new_v4().to_string();
let now = chrono::Utc::now().timestamp() as f64;
state
.pending_tokens
.write()
.await
.insert(token.clone(), PendingToken { created_at: now });
crate::slog!("[gateway] Generated join token {:.8}…", &token);
token
}
/// Register a build agent with a join token.
pub async fn register_agent(
state: &GatewayState,
token: &str,
label: String,
address: String,
) -> Result<JoinedAgent, Error> {
// Validate and consume the token.
let mut tokens = state.pending_tokens.write().await;
if !tokens.contains_key(token) {
return Err(Error::DuplicateToken(
"invalid or already-used join token".into(),
));
}
tokens.remove(token);
drop(tokens);
let now = chrono::Utc::now().timestamp() as f64;
let agent = registration::create_agent(uuid::Uuid::new_v4().to_string(), label, address, now);
crate::slog!(
"[gateway] Agent '{}' registered (id={})",
agent.label,
agent.id
);
{
let mut agents = state.joined_agents.write().await;
agents.push(agent.clone());
io::save_agents(&agents, &state.config_dir).await;
}
Ok(agent)
}
/// Remove a registered agent by ID. Returns `true` if found and removed.
pub async fn remove_agent(state: &GatewayState, id: &str) -> bool {
let mut agents = state.joined_agents.write().await;
let removed = registration::remove_agent(&mut agents, id);
if removed {
io::save_agents(&agents, &state.config_dir).await;
crate::slog!("[gateway] Removed agent id={id}");
}
removed
}
/// Assign or unassign an agent to a project.
pub async fn assign_agent(
state: &GatewayState,
id: &str,
project: Option<String>,
) -> Result<JoinedAgent, Error> {
let project_clean = project.and_then(|p| if p.is_empty() { None } else { Some(p) });
let updated = {
let projects = state.projects.read().await;
let mut agents = state.joined_agents.write().await;
registration::assign_agent(&mut agents, id, project_clean, &projects)?
};
crate::slog!(
"[gateway] Agent '{}' (id={}) assigned to {:?}",
updated.label,
updated.id,
updated.assigned_project
);
let agents = state.joined_agents.read().await.clone();
io::save_agents(&agents, &state.config_dir).await;
Ok(updated)
}
/// Update an agent's heartbeat. Returns `true` if found.
pub async fn heartbeat_agent(state: &GatewayState, id: &str) -> bool {
let now = chrono::Utc::now().timestamp() as f64;
let mut agents = state.joined_agents.write().await;
registration::heartbeat(&mut agents, id, now)
}
/// Add a new project to the gateway config.
pub async fn add_project(state: &GatewayState, name: &str, url: &str) -> Result<(), Error> {
let name = name.trim().to_string();
let url = url.trim().to_string();
if name.is_empty() {
return Err(Error::Config("project name must not be empty".into()));
}
if url.is_empty() {
return Err(Error::Config("project url must not be empty".into()));
}
{
let mut projects = state.projects.write().await;
if projects.contains_key(&name) {
return Err(Error::DuplicateToken(format!(
"project '{name}' already exists"
)));
}
projects.insert(name.clone(), ProjectEntry { url: url.clone() });
}
let snapshot = state.projects.read().await.clone();
io::save_config(&snapshot, &state.config_dir).await;
crate::slog!("[gateway] Added project '{name}' ({url})");
Ok(())
}
/// Remove a project from the gateway config.
pub async fn remove_project(state: &GatewayState, name: &str) -> Result<(), Error> {
let active = state.active_project.read().await.clone();
{
let mut projects = state.projects.write().await;
if !projects.contains_key(name) {
return Err(Error::ProjectNotFound(format!(
"project '{name}' not found"
)));
}
if projects.len() == 1 {
return Err(Error::Config("cannot remove the last project".into()));
}
projects.remove(name);
}
let snapshot = state.projects.read().await.clone();
io::save_config(&snapshot, &state.config_dir).await;
// If the removed project was active, switch to the first remaining.
if active == name {
let first = state.projects.read().await.keys().next().cloned();
if let Some(new_active) = first {
*state.active_project.write().await = new_active;
}
}
crate::slog!("[gateway] Removed project '{name}'");
Ok(())
}
/// Initialise a new huskies project at the given path.
///
/// Optionally registers the project in the gateway's project map.
pub async fn init_project(
state: &GatewayState,
path_str: &str,
name: Option<&str>,
url: Option<&str>,
) -> Result<Option<String>, Error> {
let path_str = path_str.trim();
if path_str.is_empty() {
return Err(Error::Config("missing required parameter: path".into()));
}
let project_path = std::path::Path::new(path_str);
if io::has_huskies_dir(project_path) {
return Err(Error::Config(format!(
"path '{}' is already a huskies project (.huskies/ exists). \
Use wizard_status to check setup progress.",
project_path.display()
)));
}
io::ensure_directory(project_path).map_err(Error::Config)?;
io::scaffold_project(project_path)
.map_err(|e| Error::Config(format!("scaffold failed: {e}")))?;
io::init_wizard_state(project_path);
// Optionally register in projects.toml.
let registered_name: Option<String> = match (name, url) {
(Some(n), Some(u)) if !n.trim().is_empty() && !u.trim().is_empty() => {
let n = n.trim();
let u = u.trim();
let mut projects = state.projects.write().await;
if projects.contains_key(n) {
return Err(Error::DuplicateToken(format!(
"project '{n}' is already registered. Choose a different name or use switch_project."
)));
}
projects.insert(n.to_string(), ProjectEntry { url: u.to_string() });
io::save_config(&projects, &state.config_dir).await;
crate::slog!("[gateway] init_project: registered '{n}' ({u})");
Some(n.to_string())
}
_ => None,
};
Ok(registered_name)
}
/// Fetch aggregated health status across all projects.
pub async fn health_check_all(state: &GatewayState) -> (bool, BTreeMap<String, &'static str>) {
let mut all_healthy = true;
let mut statuses = BTreeMap::new();
let project_entries: Vec<(String, String)> = state
.projects
.read()
.await
.iter()
.map(|(n, e)| (n.clone(), e.url.clone()))
.collect();
for (name, url) in &project_entries {
let healthy = io::check_project_health(&state.client, url)
.await
.unwrap_or(false);
if !healthy {
all_healthy = false;
}
statuses.insert(name.clone(), if healthy { "ok" } else { "error" });
}
(all_healthy, statuses)
}
/// Save bot config and restart the bot.
pub async fn save_bot_config_and_restart(state: &GatewayState, content: &str) -> Result<(), Error> {
io::write_bot_config(&state.config_dir, content).map_err(Error::Config)?;
// Abort existing bot task and spawn a fresh one.
{
let mut handle = state.bot_handle.lock().await;
if let Some(h) = handle.take() {
h.abort();
}
let gateway_projects: Vec<String> = state.projects.read().await.keys().cloned().collect();
let gateway_project_urls: BTreeMap<String, String> = state
.projects
.read()
.await
.iter()
.map(|(name, entry)| (name.clone(), entry.url.clone()))
.collect();
let new_handle = io::spawn_gateway_bot(
&state.config_dir,
Arc::clone(&state.active_project),
gateway_projects,
gateway_project_urls,
state.port,
);
*handle = new_handle;
}
crate::slog!("[gateway] Bot configuration saved; bot restarted");
Ok(())
}
// ── Tests ────────────────────────────────────────────────────────────────────
#[cfg(test)]
mod tests {
use super::*;
fn make_config(names: &[(&str, &str)]) -> GatewayConfig {
let mut projects = BTreeMap::new();
for (name, url) in names {
projects.insert(
name.to_string(),
ProjectEntry {
url: url.to_string(),
},
);
}
GatewayConfig { projects }
}
#[test]
fn gateway_state_rejects_empty_config() {
let config = GatewayConfig {
projects: BTreeMap::new(),
};
assert!(GatewayState::new(config, PathBuf::from("."), 3000).is_err());
}
#[test]
fn gateway_state_sets_first_project_active() {
let config = make_config(&[("alpha", "http://a:3001"), ("beta", "http://b:3002")]);
let state = GatewayState::new(config, PathBuf::from("."), 3000).unwrap();
let active = state.active_project.blocking_read().clone();
assert_eq!(active, "alpha");
}
#[tokio::test]
async fn switch_project_to_known_project() {
let config = make_config(&[("alpha", "http://a:3001"), ("beta", "http://b:3002")]);
let state = GatewayState::new(config, PathBuf::from("."), 3000).unwrap();
let url = switch_project(&state, "beta").await.unwrap();
assert_eq!(url, "http://b:3002");
assert_eq!(*state.active_project.read().await, "beta");
}
#[tokio::test]
async fn switch_project_to_unknown_fails() {
let config = make_config(&[("alpha", "http://a:3001")]);
let state = GatewayState::new(config, PathBuf::from("."), 3000).unwrap();
assert!(switch_project(&state, "nonexistent").await.is_err());
}
#[tokio::test]
async fn switch_project_empty_name_fails() {
let config = make_config(&[("alpha", "http://a:3001")]);
let state = GatewayState::new(config, PathBuf::from("."), 3000).unwrap();
assert!(switch_project(&state, "").await.is_err());
}
#[tokio::test]
async fn active_url_returns_correct_url() {
let config = make_config(&[("myproj", "http://my:3001")]);
let state = GatewayState::new(config, PathBuf::from("."), 3000).unwrap();
let url = state.active_url().await.unwrap();
assert_eq!(url, "http://my:3001");
}
#[test]
fn error_display_variants() {
assert!(
Error::ProjectNotFound("x".into())
.to_string()
.contains("Project not found")
);
assert!(
Error::UnreachableProject("x".into())
.to_string()
.contains("Unreachable")
);
assert!(
Error::DuplicateToken("x".into())
.to_string()
.contains("Duplicate")
);
assert!(
Error::InvalidAgent("x".into())
.to_string()
.contains("Invalid agent")
);
assert!(
Error::Config("x".into())
.to_string()
.contains("Config error")
);
assert!(Error::Upstream("x".into()).to_string().contains("Upstream"));
}
#[tokio::test]
async fn generate_and_register_agent() {
let config = make_config(&[("test", "http://test:3001")]);
let state = GatewayState::new(config, PathBuf::new(), 3000).unwrap();
let token = generate_join_token(&state).await;
let agent = register_agent(&state, &token, "test-agent".into(), "ws://a".into())
.await
.unwrap();
assert_eq!(agent.label, "test-agent");
assert!(state.pending_tokens.read().await.is_empty());
assert_eq!(state.joined_agents.read().await.len(), 1);
}
#[tokio::test]
async fn register_agent_invalid_token_fails() {
let config = make_config(&[("test", "http://test:3001")]);
let state = GatewayState::new(config, PathBuf::new(), 3000).unwrap();
let result = register_agent(&state, "bad-token", "a".into(), "ws://a".into()).await;
assert!(result.is_err());
}
#[tokio::test]
async fn remove_agent_success() {
let config = make_config(&[("test", "http://test:3001")]);
let state = GatewayState::new(config, PathBuf::new(), 3000).unwrap();
let token = generate_join_token(&state).await;
let agent = register_agent(&state, &token, "a".into(), "ws://a".into())
.await
.unwrap();
assert!(remove_agent(&state, &agent.id).await);
assert!(state.joined_agents.read().await.is_empty());
}
#[tokio::test]
async fn heartbeat_agent_updates_timestamp() {
let config = make_config(&[("test", "http://test:3001")]);
let state = GatewayState::new(config, PathBuf::new(), 3000).unwrap();
let token = generate_join_token(&state).await;
let agent = register_agent(&state, &token, "a".into(), "ws://a".into())
.await
.unwrap();
let old_ts = agent.last_seen;
// Small sleep to ensure timestamp differs.
tokio::time::sleep(std::time::Duration::from_millis(10)).await;
assert!(heartbeat_agent(&state, &agent.id).await);
let agents = state.joined_agents.read().await;
assert!(agents[0].last_seen >= old_ts);
}
#[tokio::test]
async fn init_project_scaffolds_directory() {
let dir = tempfile::tempdir().unwrap();
let config = make_config(&[("test", "http://test:3001")]);
let state = GatewayState::new(config, PathBuf::new(), 3000).unwrap();
let result = init_project(&state, dir.path().to_str().unwrap(), None, None).await;
assert!(result.is_ok());
assert!(dir.path().join(".huskies").exists());
}
#[tokio::test]
async fn init_project_already_exists_fails() {
let dir = tempfile::tempdir().unwrap();
std::fs::create_dir_all(dir.path().join(".huskies")).unwrap();
let config = make_config(&[("test", "http://test:3001")]);
let state = GatewayState::new(config, PathBuf::new(), 3000).unwrap();
let result = init_project(&state, dir.path().to_str().unwrap(), None, None).await;
assert!(result.is_err());
}
}
+91
View File
@@ -0,0 +1,91 @@
//! Gateway notification polling — pure event formatting.
//!
//! Formats pipeline events from project containers into gateway notifications
//! with `[project-name]` prefixes. The actual I/O (HTTP polling, spawning
//! tasks, sending messages) lives in `io.rs`.
use crate::service::events::StoredEvent;
use crate::service::notifications::{
format_blocked_notification, format_error_notification, format_stage_notification,
stage_display_name,
};
/// Format a [`StoredEvent`] from a project into a gateway notification.
///
/// Prefixes the message with `[project-name]` so users can distinguish which
/// project emitted the event.
pub fn format_gateway_event(project_name: &str, event: &StoredEvent) -> (String, String) {
let prefix = format!("[{project_name}] ");
match event {
StoredEvent::StageTransition {
story_id,
from_stage,
to_stage,
..
} => {
let from_display = stage_display_name(from_stage);
let to_display = stage_display_name(to_stage);
let (plain, html) = format_stage_notification(story_id, None, from_display, to_display);
(format!("{prefix}{plain}"), format!("{prefix}{html}"))
}
StoredEvent::MergeFailure {
story_id, reason, ..
} => {
let (plain, html) = format_error_notification(story_id, None, reason);
(format!("{prefix}{plain}"), format!("{prefix}{html}"))
}
StoredEvent::StoryBlocked {
story_id, reason, ..
} => {
let (plain, html) = format_blocked_notification(story_id, None, reason);
(format!("{prefix}{plain}"), format!("{prefix}{html}"))
}
}
}
// ── Tests ────────────────────────────────────────────────────────────────────
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn stage_transition_prefixes_project_name() {
let event = StoredEvent::StageTransition {
story_id: "42_story_my_feature".to_string(),
from_stage: "2_current".to_string(),
to_stage: "3_qa".to_string(),
timestamp_ms: 1000,
};
let (plain, html) = format_gateway_event("huskies", &event);
assert!(plain.starts_with("[huskies] "));
assert!(html.starts_with("[huskies] "));
assert!(plain.contains("Current"));
assert!(plain.contains("QA"));
}
#[test]
fn merge_failure_prefixes_project_name() {
let event = StoredEvent::MergeFailure {
story_id: "42_story_my_feature".to_string(),
reason: "merge conflict".to_string(),
timestamp_ms: 1000,
};
let (plain, _html) = format_gateway_event("robot-studio", &event);
assert!(plain.starts_with("[robot-studio] "));
assert!(plain.contains("merge conflict"));
}
#[test]
fn story_blocked_prefixes_project_name() {
let event = StoredEvent::StoryBlocked {
story_id: "43_story_bar".to_string(),
reason: "retry limit exceeded".to_string(),
timestamp_ms: 2000,
};
let (plain, _html) = format_gateway_event("huskies", &event);
assert!(plain.starts_with("[huskies] "));
assert!(plain.contains("BLOCKED"));
}
}
+165
View File
@@ -0,0 +1,165 @@
//! Gateway agent registration — pure logic for managing build agents.
//!
//! Contains `JoinedAgent` and functions that validate and manipulate agent
//! state in memory. All persistence (disk I/O) lives in `io.rs`.
use serde::{Deserialize, Serialize};
use std::collections::BTreeMap;
use super::config::ProjectEntry;
/// A build agent that has registered with this gateway.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct JoinedAgent {
/// Unique ID assigned by the gateway on registration.
pub id: String,
/// Human-readable label provided by the agent (e.g. `build-agent-abc123`).
pub label: String,
/// The agent's CRDT-sync WebSocket address (e.g. `ws://host:3001/crdt-sync`).
pub address: String,
/// Unix timestamp when the agent registered.
pub registered_at: f64,
/// Unix timestamp of the last heartbeat from this agent.
#[serde(default)]
pub last_seen: f64,
/// Project this agent is assigned to, if any.
#[serde(default, skip_serializing_if = "Option::is_none")]
pub assigned_project: Option<String>,
}
/// Create a new `JoinedAgent` from registration data.
pub fn create_agent(id: String, label: String, address: String, now: f64) -> JoinedAgent {
JoinedAgent {
id,
label,
address,
registered_at: now,
last_seen: now,
assigned_project: None,
}
}
/// Remove an agent by ID from the list. Returns `true` if found and removed.
pub fn remove_agent(agents: &mut Vec<JoinedAgent>, id: &str) -> bool {
let before = agents.len();
agents.retain(|a| a.id != id);
agents.len() < before
}
/// Assign (or unassign) an agent to a project.
///
/// Returns the updated agent on success, or an error if the agent or project
/// is not found.
pub fn assign_agent(
agents: &mut [JoinedAgent],
id: &str,
project: Option<String>,
projects: &BTreeMap<String, ProjectEntry>,
) -> Result<JoinedAgent, super::Error> {
// Validate project exists if assigning.
if let Some(ref p) = project
&& !projects.contains_key(p.as_str())
{
return Err(super::Error::ProjectNotFound(format!(
"unknown project '{p}'"
)));
}
match agents.iter_mut().find(|a| a.id == id) {
None => Err(super::Error::InvalidAgent(format!("agent not found: {id}"))),
Some(a) => {
a.assigned_project = project;
Ok(a.clone())
}
}
}
/// Update an agent's last-seen timestamp. Returns `true` if the agent was found.
pub fn heartbeat(agents: &mut [JoinedAgent], id: &str, now: f64) -> bool {
match agents.iter_mut().find(|a| a.id == id) {
None => false,
Some(a) => {
a.last_seen = now;
true
}
}
}
// ── Tests ────────────────────────────────────────────────────────────────────
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn create_agent_sets_fields() {
let agent = create_agent("id-1".into(), "lbl".into(), "ws://a".into(), 100.0);
assert_eq!(agent.id, "id-1");
assert_eq!(agent.label, "lbl");
assert_eq!(agent.address, "ws://a");
assert_eq!(agent.registered_at, 100.0);
assert_eq!(agent.last_seen, 100.0);
assert!(agent.assigned_project.is_none());
}
#[test]
fn remove_agent_by_id() {
let mut agents = vec![
create_agent("a".into(), "A".into(), "ws://a".into(), 0.0),
create_agent("b".into(), "B".into(), "ws://b".into(), 0.0),
];
assert!(remove_agent(&mut agents, "a"));
assert_eq!(agents.len(), 1);
assert_eq!(agents[0].id, "b");
}
#[test]
fn remove_agent_missing_returns_false() {
let mut agents = vec![];
assert!(!remove_agent(&mut agents, "x"));
}
#[test]
fn assign_agent_to_valid_project() {
let mut projects = BTreeMap::new();
projects.insert(
"proj".into(),
ProjectEntry {
url: "http://p".into(),
},
);
let mut agents = vec![create_agent("a".into(), "A".into(), "ws://a".into(), 0.0)];
let result = assign_agent(&mut agents, "a", Some("proj".into()), &projects);
assert!(result.is_ok());
assert_eq!(result.unwrap().assigned_project, Some("proj".into()));
}
#[test]
fn assign_agent_to_unknown_project_fails() {
let projects = BTreeMap::new();
let mut agents = vec![create_agent("a".into(), "A".into(), "ws://a".into(), 0.0)];
let result = assign_agent(&mut agents, "a", Some("nope".into()), &projects);
assert!(result.is_err());
}
#[test]
fn assign_agent_unknown_id_fails() {
let projects = BTreeMap::new();
let mut agents: Vec<JoinedAgent> = vec![];
let result = assign_agent(&mut agents, "x", None, &projects);
assert!(result.is_err());
}
#[test]
fn heartbeat_updates_last_seen() {
let mut agents = vec![create_agent("a".into(), "A".into(), "ws://a".into(), 0.0)];
assert!(heartbeat(&mut agents, "a", 999.0));
assert_eq!(agents[0].last_seen, 999.0);
}
#[test]
fn heartbeat_unknown_id_returns_false() {
let mut agents: Vec<JoinedAgent> = vec![];
assert!(!heartbeat(&mut agents, "x", 1.0));
}
}
+90
View File
@@ -0,0 +1,90 @@
//! Git I/O — the ONLY place in `service::git_ops/` that may perform side effects.
//!
//! Side effects here include: spawning git processes via `std::process::Command`
//! (wrapped in `tokio::task::spawn_blocking`), and filesystem existence and
//! canonicalization checks for path validation.
//! All pure logic (path-prefix checks, porcelain parsing) lives in `path_guard.rs`
//! and `porcelain.rs`.
use super::Error;
use std::path::{Path, PathBuf};
use std::process::Output;
/// Validate that `worktree_path` is an absolute path that exists on disk and
/// lies inside the project's `.huskies/worktrees/` directory. Returns the
/// canonicalized path on success.
///
/// # Errors
/// - [`Error::Validation`] if the path is relative or does not exist.
/// - [`Error::PathNotAllowed`] if the path is outside `.huskies/worktrees/`.
/// - [`Error::Io`] if canonicalization fails.
pub fn validate_worktree_path(worktree_path: &str, project_root: &Path) -> Result<PathBuf, Error> {
let wd = PathBuf::from(worktree_path);
if !wd.is_absolute() {
return Err(Error::Validation(
"worktree_path must be an absolute path".to_string(),
));
}
if !wd.exists() {
return Err(Error::Validation(format!(
"worktree_path does not exist: {worktree_path}"
)));
}
let worktrees_root = project_root.join(".huskies").join("worktrees");
let canonical_wd = wd
.canonicalize()
.map_err(|e| Error::Io(format!("Cannot canonicalize worktree_path: {e}")))?;
let canonical_wt = if worktrees_root.exists() {
worktrees_root
.canonicalize()
.map_err(|e| Error::Io(format!("Cannot canonicalize worktrees root: {e}")))?
} else {
return Err(Error::PathNotAllowed(
"No worktrees directory found in project".to_string(),
));
};
if !super::path_guard::is_under_root(&canonical_wd, &canonical_wt) {
return Err(Error::PathNotAllowed(format!(
"worktree_path must be inside .huskies/worktrees/. Got: {worktree_path}"
)));
}
Ok(canonical_wd)
}
/// Run a git command with static arg slices in `dir` and return the process output.
///
/// # Errors
/// - [`Error::UpstreamFailure`] if the task panics or git cannot be spawned.
pub async fn run_git(args: Vec<&'static str>, dir: PathBuf) -> Result<Output, Error> {
tokio::task::spawn_blocking(move || {
std::process::Command::new("git")
.args(&args)
.current_dir(&dir)
.output()
})
.await
.map_err(|e| Error::UpstreamFailure(format!("Task join error: {e}")))?
.map_err(|e| Error::Io(format!("Failed to run git: {e}")))
}
/// Run a git command with owned `String` args in `dir` and return the process output.
///
/// # Errors
/// - [`Error::UpstreamFailure`] if the task panics or git cannot be spawned.
pub async fn run_git_owned(args: Vec<String>, dir: PathBuf) -> Result<Output, Error> {
tokio::task::spawn_blocking(move || {
std::process::Command::new("git")
.args(&args)
.current_dir(&dir)
.output()
})
.await
.map_err(|e| Error::UpstreamFailure(format!("Task join error: {e}")))?
.map_err(|e| Error::Io(format!("Failed to run git: {e}")))
}
+100
View File
@@ -0,0 +1,100 @@
//! Git operations service — worktree path validation and git command execution.
//!
//! Extracted from `http/mcp/git_tools.rs` following the conventions in
//! `docs/architecture/service-modules.md`:
//! - `mod.rs` (this file) — public API, typed [`Error`], orchestration
//! - `io.rs` — the ONLY place that performs side effects (git processes, filesystem)
//! - `path_guard.rs` — pure path-prefix safety checks
//! - `porcelain.rs` — pure git porcelain output parsers
pub mod io;
pub mod path_guard;
pub mod porcelain;
#[allow(unused_imports)]
pub use path_guard::is_under_root;
pub use porcelain::parse_git_status_porcelain;
// ── Error type ────────────────────────────────────────────────────────────────
/// Typed errors returned by `service::git_ops` functions.
///
/// HTTP handlers map these to status codes:
/// - [`Error::NotFound`] → 404 Not Found
/// - [`Error::Validation`] → 400 Bad Request
/// - [`Error::Conflict`] → 409 Conflict
/// - [`Error::PathNotAllowed`] → 400 Bad Request (sandbox violation)
/// - [`Error::Io`] → 500 Internal Server Error
/// - [`Error::UpstreamFailure`] → 500 Internal Server Error
#[allow(dead_code)]
#[derive(Debug)]
pub enum Error {
/// The requested worktree or path does not exist.
NotFound(String),
/// A required argument is missing or has an invalid value.
Validation(String),
/// The git operation cannot proceed due to a conflicting state.
Conflict(String),
/// The path is outside the allowed sandbox.
PathNotAllowed(String),
/// A filesystem or git I/O operation failed.
Io(String),
/// An upstream git command returned an unexpected error.
UpstreamFailure(String),
}
impl std::fmt::Display for Error {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Self::NotFound(msg) => write!(f, "Not found: {msg}"),
Self::Validation(msg) => write!(f, "Validation error: {msg}"),
Self::Conflict(msg) => write!(f, "Conflict: {msg}"),
Self::PathNotAllowed(msg) => write!(f, "Path not allowed: {msg}"),
Self::Io(msg) => write!(f, "I/O error: {msg}"),
Self::UpstreamFailure(msg) => write!(f, "Upstream failure: {msg}"),
}
}
}
// ── Tests ─────────────────────────────────────────────────────────────────────
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn error_display_not_found() {
let e = Error::NotFound("worktree missing".to_string());
assert!(e.to_string().contains("Not found"));
}
#[test]
fn error_display_validation() {
let e = Error::Validation("relative path".to_string());
assert!(e.to_string().contains("Validation error"));
}
#[test]
fn error_display_conflict() {
let e = Error::Conflict("uncommitted changes".to_string());
assert!(e.to_string().contains("Conflict"));
}
#[test]
fn error_display_path_not_allowed() {
let e = Error::PathNotAllowed("outside sandbox".to_string());
assert!(e.to_string().contains("Path not allowed"));
}
#[test]
fn error_display_io() {
let e = Error::Io("permission denied".to_string());
assert!(e.to_string().contains("I/O error"));
}
#[test]
fn error_display_upstream_failure() {
let e = Error::UpstreamFailure("git not found".to_string());
assert!(e.to_string().contains("Upstream failure"));
}
}
+58
View File
@@ -0,0 +1,58 @@
//! Pure path-guard helpers for `service::git_ops`.
//!
//! These functions are free of side effects — they operate on already-resolved
//! `Path` values and perform no filesystem I/O. Path existence checks and
//! canonicalization belong in `io.rs`.
use std::path::Path;
/// Return `true` if `canonical_path` starts with (i.e. is under) `root`.
///
/// Both paths must already be canonicalized so that symlinks, `.`, and `..`
/// components do not cause false negatives.
pub fn is_under_root(canonical_path: &Path, root: &Path) -> bool {
canonical_path.starts_with(root)
}
// ── Tests ─────────────────────────────────────────────────────────────────────
#[cfg(test)]
mod tests {
use super::*;
use std::path::PathBuf;
#[test]
fn is_under_root_returns_true_for_child() {
let root = PathBuf::from("/project/.huskies/worktrees");
let child = PathBuf::from("/project/.huskies/worktrees/42_story_foo");
assert!(is_under_root(&child, &root));
}
#[test]
fn is_under_root_returns_false_for_sibling() {
let root = PathBuf::from("/project/.huskies/worktrees");
let sibling = PathBuf::from("/project/.huskies/other");
assert!(!is_under_root(&sibling, &root));
}
#[test]
fn is_under_root_returns_false_for_parent() {
let root = PathBuf::from("/project/.huskies/worktrees");
let parent = PathBuf::from("/project/.huskies");
assert!(!is_under_root(&parent, &root));
}
#[test]
fn is_under_root_returns_true_for_exact_match() {
let root = PathBuf::from("/project/.huskies/worktrees");
assert!(is_under_root(&root, &root));
}
#[test]
fn is_under_root_returns_false_for_path_with_shared_prefix_but_not_child() {
// /foo/bar-extra is NOT under /foo/bar
let root = PathBuf::from("/foo/bar");
let other = PathBuf::from("/foo/bar-extra");
assert!(!is_under_root(&other, &root));
}
}
+107
View File
@@ -0,0 +1,107 @@
//! Pure git porcelain output parsers for `service::git_ops`.
//!
//! These functions parse the text output of `git status --porcelain=v1`
//! and similar commands. No I/O: they take `&str` and return structured data.
/// Parse `git status --porcelain=v1 -u` output into three file lists.
///
/// Returns `(staged, unstaged, untracked)` where each entry is the file path
/// string from the porcelain line.
pub fn parse_git_status_porcelain(stdout: &str) -> (Vec<String>, Vec<String>, Vec<String>) {
let mut staged: Vec<String> = Vec::new();
let mut unstaged: Vec<String> = Vec::new();
let mut untracked: Vec<String> = Vec::new();
for line in stdout.lines() {
if line.len() < 3 {
continue;
}
let x = line.chars().next().unwrap_or(' ');
let y = line.chars().nth(1).unwrap_or(' ');
let path = line[3..].to_string();
match (x, y) {
('?', '?') => untracked.push(path),
(' ', _) => unstaged.push(path),
(_, ' ') => staged.push(path),
_ => {
// Both staged and unstaged modifications.
staged.push(path.clone());
unstaged.push(path);
}
}
}
(staged, unstaged, untracked)
}
// ── Tests ─────────────────────────────────────────────────────────────────────
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn parse_empty_output_returns_empty_vecs() {
let (s, u, t) = parse_git_status_porcelain("");
assert!(s.is_empty());
assert!(u.is_empty());
assert!(t.is_empty());
}
#[test]
fn parse_untracked_file() {
let output = "?? new_file.txt\n";
let (staged, unstaged, untracked) = parse_git_status_porcelain(output);
assert!(staged.is_empty());
assert!(unstaged.is_empty());
assert_eq!(untracked, vec!["new_file.txt"]);
}
#[test]
fn parse_staged_file() {
let output = "A staged.txt\n";
let (staged, unstaged, untracked) = parse_git_status_porcelain(output);
assert_eq!(staged, vec!["staged.txt"]);
assert!(unstaged.is_empty());
assert!(untracked.is_empty());
}
#[test]
fn parse_unstaged_modified_file() {
// 'M' in second column = unstaged modification
let output = " M modified.txt\n";
let (staged, unstaged, untracked) = parse_git_status_porcelain(output);
assert!(staged.is_empty());
assert_eq!(unstaged, vec!["modified.txt"]);
assert!(untracked.is_empty());
}
#[test]
fn parse_both_staged_and_unstaged() {
// 'MM' = staged + unstaged in same file
let output = "MM both.txt\n";
let (staged, unstaged, untracked) = parse_git_status_porcelain(output);
assert_eq!(staged, vec!["both.txt"]);
assert_eq!(unstaged, vec!["both.txt"]);
assert!(untracked.is_empty());
}
#[test]
fn parse_mixed_output() {
let output = "A staged.rs\n M unstaged.rs\n?? untracked.rs\n";
let (staged, unstaged, untracked) = parse_git_status_porcelain(output);
assert_eq!(staged, vec!["staged.rs"]);
assert_eq!(unstaged, vec!["unstaged.rs"]);
assert_eq!(untracked, vec!["untracked.rs"]);
}
#[test]
fn parse_skips_short_lines() {
// Lines shorter than 3 chars should be skipped.
let output = "A \nMM both.txt\n";
let (staged, _unstaged, _untracked) = parse_git_status_porcelain(output);
// Only "both.txt" should appear — the 2-char "A " line is skipped.
assert_eq!(staged, vec!["both.txt"]);
}
}
+38
View File
@@ -0,0 +1,38 @@
//! Pure health-check logic — no side effects.
use poem_openapi::Object;
use serde::Serialize;
/// The JSON payload returned by the health check endpoint.
#[derive(Serialize, Object)]
pub struct HealthStatus {
/// Human-readable status string, always `"ok"` when the server is healthy.
pub status: String,
}
/// Return a healthy status response.
pub fn ok() -> HealthStatus {
HealthStatus {
status: "ok".to_string(),
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn ok_returns_status_ok() {
let s = ok();
assert_eq!(s.status, "ok");
}
#[test]
fn health_status_serializes() {
let s = HealthStatus {
status: "ok".to_string(),
};
let json = serde_json::to_value(&s).unwrap();
assert_eq!(json["status"], "ok");
}
}
+4
View File
@@ -0,0 +1,4 @@
//! Health I/O wrappers.
//!
//! Health has no side effects; this file exists to satisfy the
//! service-module convention (`docs/architecture/service-modules.md`).
+39
View File
@@ -0,0 +1,39 @@
//! Health service — public API for the health domain.
//!
//! Exposes a single `check()` function that returns a [`HealthStatus`].
//! HTTP handlers call this instead of constructing the response inline.
//!
//! Conventions: `docs/architecture/service-modules.md`
pub mod check;
pub(super) mod io;
pub use check::HealthStatus;
// ── Error type ────────────────────────────────────────────────────────────────
/// Typed errors returned by `service::health` functions.
///
/// Health checks are currently infallible; this enum satisfies the module
/// convention and accommodates future error cases (e.g. dependency checks).
#[allow(dead_code)]
#[derive(Debug)]
pub enum Error {
/// An internal error occurred during the health check.
Internal(String),
}
impl std::fmt::Display for Error {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Self::Internal(msg) => write!(f, "Health error: {msg}"),
}
}
}
// ── Public API ────────────────────────────────────────────────────────────────
/// Perform a health check and return the status.
pub fn check() -> HealthStatus {
check::ok()
}
+5
View File
@@ -0,0 +1,5 @@
//! Merge I/O — the ONLY place in `service::merge/` that may perform side effects.
//!
//! Currently, the bulk of the merge I/O is handled by `crate::agents::merge`
//! and `crate::io::story_metadata`. This file is the designated home for any
//! future I/O helpers that are extracted from merge-related MCP handlers.
+87
View File
@@ -0,0 +1,87 @@
//! Merge service — domain logic for merging agent work to master.
//!
//! Extracted from `http/mcp/merge_tools.rs` following the conventions in
//! `docs/architecture/service-modules.md`:
//! - `mod.rs` (this file) — public API, typed [`Error`], orchestration
//! - `io.rs` — the ONLY place that performs side effects
//! - `status.rs` — pure merge-status message formatting
pub mod io;
pub mod status;
#[allow(unused_imports)]
pub use status::format_merge_status_message;
// ── Error type ────────────────────────────────────────────────────────────────
/// Typed errors returned by `service::merge` functions.
///
/// HTTP handlers map these to status codes:
/// - [`Error::NotFound`] → 404 Not Found
/// - [`Error::Validation`] → 400 Bad Request
/// - [`Error::Conflict`] → 409 Conflict
/// - [`Error::Io`] → 500 Internal Server Error
/// - [`Error::UpstreamFailure`] → 500 Internal Server Error
#[allow(dead_code)]
#[derive(Debug)]
pub enum Error {
/// The requested story or merge job was not found.
NotFound(String),
/// A required argument is missing or has an invalid value.
Validation(String),
/// The merge cannot proceed due to a conflicting state.
Conflict(String),
/// A filesystem or process I/O operation failed.
Io(String),
/// An upstream dependency (agents, git) returned an unexpected error.
UpstreamFailure(String),
}
impl std::fmt::Display for Error {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Self::NotFound(msg) => write!(f, "Not found: {msg}"),
Self::Validation(msg) => write!(f, "Validation error: {msg}"),
Self::Conflict(msg) => write!(f, "Conflict: {msg}"),
Self::Io(msg) => write!(f, "I/O error: {msg}"),
Self::UpstreamFailure(msg) => write!(f, "Upstream failure: {msg}"),
}
}
}
// ── Tests ─────────────────────────────────────────────────────────────────────
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn error_display_not_found() {
let e = Error::NotFound("merge job missing".to_string());
assert!(e.to_string().contains("Not found"));
}
#[test]
fn error_display_validation() {
let e = Error::Validation("story_id required".to_string());
assert!(e.to_string().contains("Validation error"));
}
#[test]
fn error_display_conflict() {
let e = Error::Conflict("story already merged".to_string());
assert!(e.to_string().contains("Conflict"));
}
#[test]
fn error_display_io() {
let e = Error::Io("write failed".to_string());
assert!(e.to_string().contains("I/O error"));
}
#[test]
fn error_display_upstream_failure() {
let e = Error::UpstreamFailure("git crashed".to_string());
assert!(e.to_string().contains("Upstream failure"));
}
}
+89
View File
@@ -0,0 +1,89 @@
//! Pure merge-status message formatting for `service::merge`.
//!
//! These functions transform a completed merge report into human-readable
//! status messages. No I/O: they are pure functions over plain data.
use crate::agents::merge::MergeReport;
#[allow(dead_code)]
/// Derive a human-readable status message from a completed [`MergeReport`].
///
/// The message explains what happened and (on failure) what the caller
/// should do next.
pub fn format_merge_status_message(report: &MergeReport) -> &'static str {
if report.success && report.gates_passed && report.conflicts_resolved {
"Merge complete: conflicts were auto-resolved and all quality gates passed. Story moved to done and worktree cleaned up."
} else if report.success && report.gates_passed {
"Merge complete: all quality gates passed. Story moved to done and worktree cleaned up."
} else if report.had_conflicts && !report.conflicts_resolved {
"Merge failed: conflicts detected that could not be auto-resolved. Merge was aborted — master is untouched. Call report_merge_failure with the conflict details so the human can resolve them. Do NOT manually move the story file or call accept_story."
} else if report.success && !report.gates_passed {
"Merge committed but quality gates failed. Review gate_output and fix issues before re-running."
} else {
"Merge failed. Review gate_output for details. Call report_merge_failure to record the failure. Do NOT manually move the story file or call accept_story."
}
}
// ── Tests ─────────────────────────────────────────────────────────────────────
#[cfg(test)]
mod tests {
use super::*;
fn report(
success: bool,
had_conflicts: bool,
conflicts_resolved: bool,
gates_passed: bool,
) -> MergeReport {
MergeReport {
story_id: String::new(),
success,
had_conflicts,
conflicts_resolved,
conflict_details: None,
gates_passed,
gate_output: String::new(),
worktree_cleaned_up: false,
story_archived: false,
}
}
#[test]
fn clean_merge_message() {
let r = report(true, false, false, true);
let msg = format_merge_status_message(&r);
assert!(msg.contains("quality gates passed"));
assert!(msg.contains("done"));
}
#[test]
fn conflicts_resolved_message() {
let r = report(true, true, true, true);
let msg = format_merge_status_message(&r);
assert!(msg.contains("auto-resolved"));
}
#[test]
fn unresolved_conflicts_message() {
let r = report(false, true, false, false);
let msg = format_merge_status_message(&r);
assert!(msg.contains("could not be auto-resolved"));
assert!(msg.contains("report_merge_failure"));
}
#[test]
fn gates_failed_message() {
let r = report(true, false, false, false);
let msg = format_merge_status_message(&r);
assert!(msg.contains("quality gates failed"));
}
#[test]
fn general_failure_message() {
let r = report(false, false, false, false);
let msg = format_merge_status_message(&r);
assert!(msg.contains("Merge failed"));
assert!(msg.contains("report_merge_failure"));
}
}
+21
View File
@@ -6,3 +6,24 @@
//! - `io.rs` is the only file that performs side effects
//! - Topic-named pure files contain branching logic with no I/O
pub mod agents;
pub mod anthropic;
pub mod bot_command;
pub mod common;
pub mod diagnostics;
pub mod events;
pub mod file_io;
pub mod gateway;
pub mod git_ops;
pub mod health;
pub mod merge;
pub mod notifications;
pub mod oauth;
pub mod pipeline;
pub mod project;
pub mod qa;
pub mod settings;
pub mod shell;
pub mod story;
pub mod timer;
pub mod wizard;
pub mod ws;
+119
View File
@@ -0,0 +1,119 @@
//! Event-to-notification mapping.
//!
//! Pure functions that classify [`WatcherEvent`] variants into notification
//! actions, deciding which events produce user-visible messages and which
//! are suppressed or logged server-side only.
use crate::io::watcher::WatcherEvent;
/// The notification action to take in response to a [`WatcherEvent`].
#[derive(Debug, PartialEq)]
pub enum EventAction {
/// Post a stage-transition notification; the event carries a known source stage.
StageTransition,
/// Post a merge-failure error notification.
MergeFailure,
/// Post a rate-limit warning (subject to config/debounce suppression).
RateLimitWarning,
/// Post a story-blocked notification.
StoryBlocked,
/// Log server-side only; do not post to chat (e.g. hard rate-limit blocks).
LogOnly,
/// Reload the project configuration.
ReloadConfig,
/// Skip silently (synthetic events, unknown variants).
Skip,
}
/// Classify a [`WatcherEvent`] into the action the notification listener should take.
pub fn classify(event: &WatcherEvent) -> EventAction {
match event {
WatcherEvent::WorkItem { from_stage, .. } => {
if from_stage.is_some() {
EventAction::StageTransition
} else {
// Synthetic events (creation, reassign) have no from_stage.
// Posting a notification for these would produce incorrect messages.
EventAction::Skip
}
}
WatcherEvent::MergeFailure { .. } => EventAction::MergeFailure,
WatcherEvent::RateLimitWarning { .. } => EventAction::RateLimitWarning,
WatcherEvent::StoryBlocked { .. } => EventAction::StoryBlocked,
WatcherEvent::RateLimitHardBlock { .. } => EventAction::LogOnly,
WatcherEvent::ConfigChanged => EventAction::ReloadConfig,
_ => EventAction::Skip,
}
}
#[cfg(test)]
mod tests {
use super::*;
fn work_item(from_stage: Option<&str>) -> WatcherEvent {
WatcherEvent::WorkItem {
stage: "3_qa".to_string(),
item_id: "1_story_foo".to_string(),
action: "qa".to_string(),
commit_msg: String::new(),
from_stage: from_stage.map(str::to_string),
}
}
#[test]
fn work_item_with_from_stage_is_stage_transition() {
let event = work_item(Some("2_current"));
assert_eq!(classify(&event), EventAction::StageTransition);
}
#[test]
fn work_item_without_from_stage_is_skip() {
let event = work_item(None);
assert_eq!(classify(&event), EventAction::Skip);
}
#[test]
fn merge_failure_is_classified_correctly() {
let event = WatcherEvent::MergeFailure {
story_id: "1_story_foo".to_string(),
reason: "conflict".to_string(),
};
assert_eq!(classify(&event), EventAction::MergeFailure);
}
#[test]
fn rate_limit_warning_is_classified_correctly() {
let event = WatcherEvent::RateLimitWarning {
story_id: "1_story_foo".to_string(),
agent_name: "coder-1".to_string(),
};
assert_eq!(classify(&event), EventAction::RateLimitWarning);
}
#[test]
fn story_blocked_is_classified_correctly() {
let event = WatcherEvent::StoryBlocked {
story_id: "1_story_foo".to_string(),
reason: "empty diff".to_string(),
};
assert_eq!(classify(&event), EventAction::StoryBlocked);
}
#[test]
fn rate_limit_hard_block_is_log_only() {
let event = WatcherEvent::RateLimitHardBlock {
story_id: "1_story_foo".to_string(),
agent_name: "coder-1".to_string(),
reset_at: chrono::Utc::now(),
};
assert_eq!(classify(&event), EventAction::LogOnly);
}
#[test]
fn config_changed_triggers_reload() {
assert_eq!(
classify(&WatcherEvent::ConfigChanged),
EventAction::ReloadConfig
);
}
}
@@ -0,0 +1,73 @@
//! Pure filtering and debounce logic for notification suppression.
//!
//! Contains constants and predicates that decide whether a notification
//! should be sent, without performing any I/O.
use std::time::{Duration, Instant};
/// Minimum time between rate-limit notifications for the same agent key.
pub const RATE_LIMIT_DEBOUNCE: Duration = Duration::from_secs(60);
/// Window during which rapid stage transitions for the same item are coalesced
/// into a single notification (only the final stage is announced).
pub const STAGE_TRANSITION_DEBOUNCE: Duration = Duration::from_millis(200);
/// Returns `true` if a rate-limit notification should be sent.
///
/// `last_notified` is the [`Instant`] of the last sent notification for this
/// agent, or `None` if no notification has been sent yet.
pub fn should_send_rate_limit(last_notified: Option<Instant>, now: Instant) -> bool {
match last_notified {
None => true,
Some(last) => now.duration_since(last) >= RATE_LIMIT_DEBOUNCE,
}
}
#[cfg(test)]
mod tests {
use super::*;
// ── should_send_rate_limit ────────────────────────────────────────────────
#[test]
fn should_send_when_never_notified() {
let now = Instant::now();
assert!(should_send_rate_limit(None, now));
}
#[test]
fn should_not_send_within_debounce_window() {
let now = Instant::now();
// Pretend last notification was 10 seconds ago — inside the 60s window.
let last = now - Duration::from_secs(10);
assert!(!should_send_rate_limit(Some(last), now));
}
#[test]
fn should_send_after_debounce_window_expires() {
let now = Instant::now();
// Pretend last notification was 61 seconds ago — outside the 60s window.
let last = now - Duration::from_secs(61);
assert!(should_send_rate_limit(Some(last), now));
}
#[test]
fn should_not_send_at_exactly_debounce_boundary() {
let now = Instant::now();
// Exactly at the boundary: duration_since == RATE_LIMIT_DEBOUNCE (>=, so allowed).
let last = now - RATE_LIMIT_DEBOUNCE;
assert!(should_send_rate_limit(Some(last), now));
}
// ── constants ─────────────────────────────────────────────────────────────
#[test]
fn rate_limit_debounce_is_one_minute() {
assert_eq!(RATE_LIMIT_DEBOUNCE, Duration::from_secs(60));
}
#[test]
fn stage_transition_debounce_is_200ms() {
assert_eq!(STAGE_TRANSITION_DEBOUNCE, Duration::from_millis(200));
}
}
+314
View File
@@ -0,0 +1,314 @@
//! Pure message-formatting functions for pipeline-event notifications.
//!
//! All functions are pure (no I/O, no side effects) and accept only owned
//! or borrowed string data. They return `(plain_text, html)` pairs suitable
//! for `ChatTransport::send_message`.
use crate::service::common::item_id::extract_item_number;
/// Human-readable display name for a pipeline stage directory.
pub fn stage_display_name(stage: &str) -> &'static str {
match stage {
"1_backlog" => "Backlog",
"2_current" => "Current",
"3_qa" => "QA",
"4_merge" => "Merge",
"5_done" => "Done",
"6_archived" => "Archived",
_ => "Unknown",
}
}
/// Format a stage transition notification message.
///
/// Returns `(plain_text, html)` suitable for `ChatTransport::send_message`.
pub fn format_stage_notification(
item_id: &str,
story_name: Option<&str>,
from_stage: &str,
to_stage: &str,
) -> (String, String) {
let number = extract_item_number(item_id).unwrap_or(item_id);
let name = story_name.unwrap_or(item_id);
let prefix = if to_stage == "Done" { "\u{1f389} " } else { "" };
let plain = format!("{prefix}#{number} {name} \u{2014} {from_stage} \u{2192} {to_stage}");
let html = format!(
"{prefix}<strong>#{number}</strong> <em>{name}</em> \u{2014} {from_stage} \u{2192} {to_stage}"
);
(plain, html)
}
/// Format an error notification message for a story merge failure.
///
/// Returns `(plain_text, html)` suitable for `ChatTransport::send_message`.
pub fn format_error_notification(
item_id: &str,
story_name: Option<&str>,
reason: &str,
) -> (String, String) {
let number = extract_item_number(item_id).unwrap_or(item_id);
let name = story_name.unwrap_or(item_id);
let plain = format!("\u{274c} #{number} {name} \u{2014} {reason}");
let html = format!("\u{274c} <strong>#{number}</strong> <em>{name}</em> \u{2014} {reason}");
(plain, html)
}
/// Format a blocked-story notification message.
///
/// Returns `(plain_text, html)` suitable for `ChatTransport::send_message`.
pub fn format_blocked_notification(
item_id: &str,
story_name: Option<&str>,
reason: &str,
) -> (String, String) {
let number = extract_item_number(item_id).unwrap_or(item_id);
let name = story_name.unwrap_or(item_id);
let plain = format!("\u{1f6ab} #{number} {name} \u{2014} BLOCKED: {reason}");
let html =
format!("\u{1f6ab} <strong>#{number}</strong> <em>{name}</em> \u{2014} BLOCKED: {reason}");
(plain, html)
}
/// Format a rate limit warning notification message.
///
/// Returns `(plain_text, html)` suitable for `ChatTransport::send_message`.
pub fn format_rate_limit_notification(
item_id: &str,
story_name: Option<&str>,
agent_name: &str,
) -> (String, String) {
let number = extract_item_number(item_id).unwrap_or(item_id);
let name = story_name.unwrap_or(item_id);
let plain =
format!("\u{26a0}\u{fe0f} #{number} {name} \u{2014} {agent_name} hit an API rate limit");
let html = format!(
"\u{26a0}\u{fe0f} <strong>#{number}</strong> <em>{name}</em> \u{2014} \
{agent_name} hit an API rate limit"
);
(plain, html)
}
#[cfg(test)]
mod tests {
use super::*;
// ── stage_display_name ────────────────────────────────────────────────────
#[test]
fn stage_display_name_maps_all_known_stages() {
assert_eq!(stage_display_name("1_backlog"), "Backlog");
assert_eq!(stage_display_name("2_current"), "Current");
assert_eq!(stage_display_name("3_qa"), "QA");
assert_eq!(stage_display_name("4_merge"), "Merge");
assert_eq!(stage_display_name("5_done"), "Done");
assert_eq!(stage_display_name("6_archived"), "Archived");
assert_eq!(stage_display_name("unknown"), "Unknown");
}
#[test]
fn stage_display_name_unknown_slug_returns_unknown() {
assert_eq!(stage_display_name("99_future"), "Unknown");
assert_eq!(stage_display_name(""), "Unknown");
}
// ── format_stage_notification ─────────────────────────────────────────────
#[test]
fn format_notification_done_stage_includes_party_emoji() {
let (plain, html) =
format_stage_notification("353_story_done", Some("Done Story"), "Merge", "Done");
assert_eq!(
plain,
"\u{1f389} #353 Done Story \u{2014} Merge \u{2192} Done"
);
assert_eq!(
html,
"\u{1f389} <strong>#353</strong> <em>Done Story</em> \u{2014} Merge \u{2192} Done"
);
}
#[test]
fn format_notification_non_done_stage_has_no_emoji() {
let (plain, _html) =
format_stage_notification("42_story_thing", Some("Some Story"), "Backlog", "Current");
assert!(!plain.contains("\u{1f389}"));
}
#[test]
fn format_notification_with_story_name() {
let (plain, html) = format_stage_notification(
"261_story_bot_notifications",
Some("Bot notifications"),
"Upcoming",
"Current",
);
assert_eq!(
plain,
"#261 Bot notifications \u{2014} Upcoming \u{2192} Current"
);
assert_eq!(
html,
"<strong>#261</strong> <em>Bot notifications</em> \u{2014} Upcoming \u{2192} Current"
);
}
#[test]
fn format_notification_without_story_name_falls_back_to_item_id() {
let (plain, _html) = format_stage_notification("42_bug_fix_thing", None, "Current", "QA");
assert_eq!(plain, "#42 42_bug_fix_thing \u{2014} Current \u{2192} QA");
}
#[test]
fn format_notification_non_numeric_id_uses_full_id() {
let (plain, _html) =
format_stage_notification("abc_story_thing", Some("Some Story"), "QA", "Merge");
assert_eq!(
plain,
"#abc_story_thing Some Story \u{2014} QA \u{2192} Merge"
);
}
#[test]
fn format_stage_notification_long_name_is_preserved() {
let long_name = "A".repeat(300);
let (plain, _html) =
format_stage_notification("1_story_long", Some(&long_name), "Current", "QA");
assert!(plain.contains(&long_name));
}
#[test]
fn format_stage_notification_empty_story_name_falls_back_to_id() {
// Some("") is a valid Some but empty — treat as missing? Currently we use it as-is.
let (plain, _html) = format_stage_notification("42_story_empty", Some(""), "Current", "QA");
// The name slot is empty but the structure is still correct.
assert!(plain.contains("#42"));
assert!(plain.contains("Current \u{2192} QA"));
}
#[test]
fn format_stage_notification_unicode_name() {
let (plain, html) =
format_stage_notification("7_story_i18n", Some("Ünïcödé Ñämé 🎉"), "QA", "Merge");
assert!(plain.contains("Ünïcödé Ñämé 🎉"));
assert!(html.contains("Ünïcödé Ñämé 🎉"));
}
// ── format_error_notification ─────────────────────────────────────────────
#[test]
fn format_error_notification_with_story_name() {
let (plain, html) = format_error_notification(
"262_story_bot_errors",
Some("Bot error notifications"),
"merge conflict in src/main.rs",
);
assert_eq!(
plain,
"\u{274c} #262 Bot error notifications \u{2014} merge conflict in src/main.rs"
);
assert_eq!(
html,
"\u{274c} <strong>#262</strong> <em>Bot error notifications</em> \u{2014} merge conflict in src/main.rs"
);
}
#[test]
fn format_error_notification_without_story_name_falls_back_to_item_id() {
let (plain, _html) = format_error_notification("42_bug_fix_thing", None, "tests failed");
assert_eq!(plain, "\u{274c} #42 42_bug_fix_thing \u{2014} tests failed");
}
#[test]
fn format_error_notification_non_numeric_id_uses_full_id() {
let (plain, _html) =
format_error_notification("abc_story_thing", Some("Some Story"), "clippy errors");
assert_eq!(
plain,
"\u{274c} #abc_story_thing Some Story \u{2014} clippy errors"
);
}
#[test]
fn format_error_notification_long_reason_preserved() {
let long_reason = "x".repeat(500);
let (plain, _html) = format_error_notification("1_story_foo", None, &long_reason);
assert!(plain.contains(&long_reason));
}
#[test]
fn format_error_notification_unicode_reason() {
let (plain, _html) =
format_error_notification("5_story_foo", Some("Foo"), "错误:合并冲突");
assert!(plain.contains("错误:合并冲突"));
}
// ── format_blocked_notification ───────────────────────────────────────────
#[test]
fn format_blocked_notification_with_story_name() {
let (plain, html) = format_blocked_notification(
"425_story_blocking_reason",
Some("Blocking Reason Story"),
"Retry limit exceeded (3/3) at coder stage",
);
assert_eq!(
plain,
"\u{1f6ab} #425 Blocking Reason Story \u{2014} BLOCKED: Retry limit exceeded (3/3) at coder stage"
);
assert_eq!(
html,
"\u{1f6ab} <strong>#425</strong> <em>Blocking Reason Story</em> \u{2014} BLOCKED: Retry limit exceeded (3/3) at coder stage"
);
}
#[test]
fn format_blocked_notification_falls_back_to_item_id() {
let (plain, _html) = format_blocked_notification("42_story_thing", None, "empty diff");
assert_eq!(
plain,
"\u{1f6ab} #42 42_story_thing \u{2014} BLOCKED: empty diff"
);
}
#[test]
fn format_blocked_notification_unicode_reason() {
let (plain, _html) = format_blocked_notification("3_story_x", Some("X"), "理由:空の差分");
assert!(plain.contains("BLOCKED: 理由:空の差分"));
}
// ── format_rate_limit_notification ────────────────────────────────────────
#[test]
fn format_rate_limit_notification_includes_agent_and_story() {
let (plain, html) =
format_rate_limit_notification("365_story_my_feature", Some("My Feature"), "coder-2");
assert_eq!(
plain,
"\u{26a0}\u{fe0f} #365 My Feature \u{2014} coder-2 hit an API rate limit"
);
assert_eq!(
html,
"\u{26a0}\u{fe0f} <strong>#365</strong> <em>My Feature</em> \u{2014} coder-2 hit an API rate limit"
);
}
#[test]
fn format_rate_limit_notification_falls_back_to_item_id() {
let (plain, _html) = format_rate_limit_notification("42_story_thing", None, "coder-1");
assert_eq!(
plain,
"\u{26a0}\u{fe0f} #42 42_story_thing \u{2014} coder-1 hit an API rate limit"
);
}
#[test]
fn format_rate_limit_notification_unicode_agent_name() {
let (plain, _html) = format_rate_limit_notification("9_story_foo", Some("Foo"), "агент-1");
assert!(plain.contains("агент-1"));
assert!(plain.contains("hit an API rate limit"));
}
}
@@ -1,7 +1,8 @@
//! Stage transition notifications for Matrix rooms.
//! I/O side of the notifications service.
//!
//! Subscribes to [`WatcherEvent`] broadcasts and posts a notification to all
//! configured Matrix rooms whenever a work item moves between pipeline stages.
//! This is the **only** file inside `service/notifications/` that may perform
//! side effects: reading from the CRDT content store, loading configuration,
//! and spawning the background listener task.
use crate::chat::ChatTransport;
use crate::config::ProjectConfig;
@@ -11,29 +12,16 @@ use crate::slog;
use std::collections::HashMap;
use std::path::{Path, PathBuf};
use std::sync::Arc;
use std::time::{Duration, Instant};
use std::time::Instant;
use tokio::sync::broadcast;
/// Human-readable display name for a pipeline stage directory.
pub fn stage_display_name(stage: &str) -> &'static str {
match stage {
"1_backlog" => "Backlog",
"2_current" => "Current",
"3_qa" => "QA",
"4_merge" => "Merge",
"5_done" => "Done",
"6_archived" => "Archived",
_ => "Unknown",
}
}
/// Extract the numeric story number from an item ID like `"261_story_slug"`.
pub fn extract_story_number(item_id: &str) -> Option<&str> {
item_id
.split('_')
.next()
.filter(|s| !s.is_empty() && s.chars().all(|c| c.is_ascii_digit()))
}
use super::events::classify;
use super::filter::{STAGE_TRANSITION_DEBOUNCE, should_send_rate_limit};
use super::format::{
format_blocked_notification, format_error_notification, format_rate_limit_notification,
format_stage_notification, stage_display_name,
};
use super::route::rooms_for_notification;
/// Read the story name from the CRDT content store's YAML front matter.
///
@@ -44,93 +32,13 @@ pub fn read_story_name(_project_root: &Path, _stage: &str, item_id: &str) -> Opt
meta.name
}
/// Format a stage transition notification message.
///
/// Returns `(plain_text, html)` suitable for `RoomMessageEventContent::text_html`.
pub fn format_stage_notification(
item_id: &str,
story_name: Option<&str>,
from_stage: &str,
to_stage: &str,
) -> (String, String) {
let number = extract_story_number(item_id).unwrap_or(item_id);
let name = story_name.unwrap_or(item_id);
let prefix = if to_stage == "Done" { "\u{1f389} " } else { "" };
let plain = format!("{prefix}#{number} {name} \u{2014} {from_stage} \u{2192} {to_stage}");
let html = format!(
"{prefix}<strong>#{number}</strong> <em>{name}</em> \u{2014} {from_stage} \u{2192} {to_stage}"
);
(plain, html)
}
/// Format an error notification message for a story failure.
///
/// Returns `(plain_text, html)` suitable for `RoomMessageEventContent::text_html`.
pub fn format_error_notification(
item_id: &str,
story_name: Option<&str>,
reason: &str,
) -> (String, String) {
let number = extract_story_number(item_id).unwrap_or(item_id);
let name = story_name.unwrap_or(item_id);
let plain = format!("\u{274c} #{number} {name} \u{2014} {reason}");
let html = format!("\u{274c} <strong>#{number}</strong> <em>{name}</em> \u{2014} {reason}");
(plain, html)
}
/// Look up a story name from the CRDT content store.
/// Look up a story name from the CRDT content store regardless of stage.
///
/// Used for events (like rate-limit warnings) that arrive without a known stage.
fn find_story_name_any_stage(project_root: &Path, item_id: &str) -> Option<String> {
read_story_name(project_root, "", item_id)
}
/// Format a blocked-story notification message.
///
/// Returns `(plain_text, html)` suitable for `ChatTransport::send_message`.
pub fn format_blocked_notification(
item_id: &str,
story_name: Option<&str>,
reason: &str,
) -> (String, String) {
let number = extract_story_number(item_id).unwrap_or(item_id);
let name = story_name.unwrap_or(item_id);
let plain = format!("\u{1f6ab} #{number} {name} \u{2014} BLOCKED: {reason}");
let html =
format!("\u{1f6ab} <strong>#{number}</strong> <em>{name}</em> \u{2014} BLOCKED: {reason}");
(plain, html)
}
/// Minimum time between rate-limit notifications for the same agent.
const RATE_LIMIT_DEBOUNCE: Duration = Duration::from_secs(60);
/// Window during which rapid stage transitions for the same item are coalesced
/// into a single notification (only the final stage is announced).
const STAGE_TRANSITION_DEBOUNCE: Duration = Duration::from_millis(200);
/// Format a rate limit warning notification message.
///
/// Returns `(plain_text, html)` suitable for `ChatTransport::send_message`.
pub fn format_rate_limit_notification(
item_id: &str,
story_name: Option<&str>,
agent_name: &str,
) -> (String, String) {
let number = extract_story_number(item_id).unwrap_or(item_id);
let name = story_name.unwrap_or(item_id);
let plain =
format!("\u{26a0}\u{fe0f} #{number} {name} \u{2014} {agent_name} hit an API rate limit");
let html = format!(
"\u{26a0}\u{fe0f} <strong>#{number}</strong> <em>{name}</em> \u{2014} \
{agent_name} hit an API rate limit"
);
(plain, html)
}
/// Spawn a background task that listens for watcher events and posts
/// stage-transition notifications to all configured rooms via the
/// [`ChatTransport`] abstraction.
@@ -184,7 +92,7 @@ pub fn spawn_notification_listener(
to_display,
);
slog!("[bot] Sending stage notification: {plain}");
for room_id in &get_room_ids() {
for room_id in &rooms_for_notification(&get_room_ids) {
if let Err(e) = transport.send_message(room_id, &plain, &html).await {
slog!("[bot] Failed to send notification to {room_id}: {e}");
}
@@ -194,139 +102,11 @@ pub fn spawn_notification_listener(
continue;
}
match recv_result.unwrap() {
Ok(WatcherEvent::WorkItem {
ref stage,
ref item_id,
ref from_stage,
..
}) => {
// Only notify for transitions with a known source stage.
// Synthetic events (reassign, creation) have from_stage=None
// and must be skipped — the old inferred_from_stage fallback
// produced wrong notifications for stories that skipped stages
// (e.g. "QA → Merge" when QA was never entered).
let from_display = from_stage.as_deref().map(stage_display_name);
let Some(from_display) = from_display else {
continue; // creation or unknown transition — skip
};
// Look up the story name in the expected stage directory; fall
// back to a full search so stale events still show the name (AC1).
let story_name = read_story_name(&project_root, stage, item_id)
.or_else(|| find_story_name_any_stage(&project_root, item_id));
// Buffer the transition. If this item_id is already pending (rapid
// succession), update to_stage_key to the latest destination while
// preserving the original from_display (AC2).
pending_transitions
.entry(item_id.clone())
.and_modify(|e| {
e.1 = stage.clone();
if story_name.is_some() {
e.2 = story_name.clone();
}
})
.or_insert_with(|| (from_display.to_string(), stage.clone(), story_name));
// Start or extend the debounce window.
flush_deadline = Some(tokio::time::Instant::now() + STAGE_TRANSITION_DEBOUNCE);
}
Ok(WatcherEvent::MergeFailure {
ref story_id,
ref reason,
}) => {
let story_name = read_story_name(&project_root, "4_merge", story_id);
let (plain, html) =
format_error_notification(story_id, story_name.as_deref(), reason);
slog!("[bot] Sending error notification: {plain}");
for room_id in &get_room_ids() {
if let Err(e) = transport.send_message(room_id, &plain, &html).await {
slog!("[bot] Failed to send error notification to {room_id}: {e}");
}
}
}
Ok(WatcherEvent::RateLimitWarning {
ref story_id,
ref agent_name,
}) => {
if !config.rate_limit_notifications {
slog!(
"[bot] RateLimitWarning suppressed by config for \
{story_id}:{agent_name}"
);
continue;
}
// Debounce: skip if we sent a notification for this agent
// within the last RATE_LIMIT_DEBOUNCE seconds.
let debounce_key = format!("{story_id}:{agent_name}");
let now = Instant::now();
if let Some(&last) = rate_limit_last_notified.get(&debounce_key)
&& now.duration_since(last) < RATE_LIMIT_DEBOUNCE
{
slog!(
"[bot] Rate-limit notification debounced for \
{story_id}:{agent_name}"
);
continue;
}
rate_limit_last_notified.insert(debounce_key, now);
let story_name = find_story_name_any_stage(&project_root, story_id);
let (plain, html) =
format_rate_limit_notification(story_id, story_name.as_deref(), agent_name);
slog!("[bot] Sending rate-limit notification: {plain}");
for room_id in &get_room_ids() {
if let Err(e) = transport.send_message(room_id, &plain, &html).await {
slog!(
"[bot] Failed to send rate-limit notification \
to {room_id}: {e}"
);
}
}
}
Ok(WatcherEvent::StoryBlocked {
ref story_id,
ref reason,
}) => {
let story_name = find_story_name_any_stage(&project_root, story_id);
let (plain, html) =
format_blocked_notification(story_id, story_name.as_deref(), reason);
slog!("[bot] Sending blocked notification: {plain}");
for room_id in &get_room_ids() {
if let Err(e) = transport.send_message(room_id, &plain, &html).await {
slog!("[bot] Failed to send blocked notification to {room_id}: {e}");
}
}
}
Ok(WatcherEvent::RateLimitHardBlock {
ref story_id,
ref agent_name,
reset_at,
}) => {
// Log server-side for debugging; do NOT post to Matrix.
// Hard-block auto-resume is normal operation — the status
// command already surfaces rate-limit state via emoji.
slog!(
"[bot] Rate-limit hard block for {story_id}/{agent_name}, \
auto-resume at {reset_at}"
);
}
Ok(WatcherEvent::ConfigChanged) => {
// Hot-reload: pick up any changes to rate_limit_notifications.
if let Ok(new_cfg) = ProjectConfig::load(&project_root) {
config = new_cfg;
}
}
Ok(_) => {} // Ignore other events
let event = match recv_result.unwrap() {
Ok(ev) => ev,
Err(broadcast::error::RecvError::Lagged(n)) => {
slog!("[bot] Notification listener lagged, skipped {n} events");
continue;
}
Err(broadcast::error::RecvError::Closed) => {
slog!("[bot] Watcher channel closed, stopping notification listener");
@@ -342,7 +122,7 @@ pub fn spawn_notification_listener(
to_display,
);
slog!("[bot] Sending stage notification: {plain}");
for room_id in &get_room_ids() {
for room_id in &rooms_for_notification(&get_room_ids) {
if let Err(e) = transport.send_message(room_id, &plain, &html).await {
slog!("[bot] Failed to send notification to {room_id}: {e}");
}
@@ -350,6 +130,143 @@ pub fn spawn_notification_listener(
}
break;
}
};
use super::events::EventAction;
match classify(&event) {
EventAction::StageTransition => {
// WorkItem with a known from_stage — extract the fields.
let WatcherEvent::WorkItem {
ref stage,
ref item_id,
ref from_stage,
..
} = event
else {
continue;
};
let from_display = stage_display_name(from_stage.as_deref().unwrap_or(""));
// Look up the story name in the expected stage directory; fall
// back to a full search so stale events still show the name.
let story_name = read_story_name(&project_root, stage, item_id)
.or_else(|| find_story_name_any_stage(&project_root, item_id));
// Buffer the transition. If this item_id is already pending (rapid
// succession), update to_stage_key to the latest destination while
// preserving the original from_display.
pending_transitions
.entry(item_id.clone())
.and_modify(|e| {
e.1 = stage.clone();
if story_name.is_some() {
e.2 = story_name.clone();
}
})
.or_insert_with(|| (from_display.to_string(), stage.clone(), story_name));
// Start or extend the debounce window.
flush_deadline = Some(tokio::time::Instant::now() + STAGE_TRANSITION_DEBOUNCE);
}
EventAction::MergeFailure => {
let WatcherEvent::MergeFailure {
ref story_id,
ref reason,
} = event
else {
continue;
};
let story_name = read_story_name(&project_root, "4_merge", story_id);
let (plain, html) =
format_error_notification(story_id, story_name.as_deref(), reason);
slog!("[bot] Sending error notification: {plain}");
for room_id in &rooms_for_notification(&get_room_ids) {
if let Err(e) = transport.send_message(room_id, &plain, &html).await {
slog!("[bot] Failed to send error notification to {room_id}: {e}");
}
}
}
EventAction::RateLimitWarning => {
let WatcherEvent::RateLimitWarning {
ref story_id,
ref agent_name,
} = event
else {
continue;
};
if !config.rate_limit_notifications {
slog!(
"[bot] RateLimitWarning suppressed by config for \
{story_id}:{agent_name}"
);
continue;
}
let debounce_key = format!("{story_id}:{agent_name}");
let now = Instant::now();
if !should_send_rate_limit(
rate_limit_last_notified.get(&debounce_key).copied(),
now,
) {
slog!(
"[bot] Rate-limit notification debounced for \
{story_id}:{agent_name}"
);
continue;
}
rate_limit_last_notified.insert(debounce_key, now);
let story_name = find_story_name_any_stage(&project_root, story_id);
let (plain, html) =
format_rate_limit_notification(story_id, story_name.as_deref(), agent_name);
slog!("[bot] Sending rate-limit notification: {plain}");
for room_id in &rooms_for_notification(&get_room_ids) {
if let Err(e) = transport.send_message(room_id, &plain, &html).await {
slog!(
"[bot] Failed to send rate-limit notification \
to {room_id}: {e}"
);
}
}
}
EventAction::StoryBlocked => {
let WatcherEvent::StoryBlocked {
ref story_id,
ref reason,
} = event
else {
continue;
};
let story_name = find_story_name_any_stage(&project_root, story_id);
let (plain, html) =
format_blocked_notification(story_id, story_name.as_deref(), reason);
slog!("[bot] Sending blocked notification: {plain}");
for room_id in &rooms_for_notification(&get_room_ids) {
if let Err(e) = transport.send_message(room_id, &plain, &html).await {
slog!("[bot] Failed to send blocked notification to {room_id}: {e}");
}
}
}
EventAction::LogOnly => {
// Hard-block: log server-side for debugging; do NOT post to chat.
// Hard-block auto-resume is normal operation — the status command
// already surfaces rate-limit state via emoji.
if let WatcherEvent::RateLimitHardBlock {
ref story_id,
ref agent_name,
reset_at,
} = event
{
slog!(
"[bot] Rate-limit hard block for {story_id}/{agent_name}, \
auto-resume at {reset_at}"
);
}
}
EventAction::ReloadConfig => {
if let Ok(new_cfg) = ProjectConfig::load(&project_root) {
config = new_cfg;
}
}
EventAction::Skip => {}
}
}
});
@@ -630,37 +547,6 @@ mod tests {
assert_eq!(calls.len(), 0, "No rooms means no notifications");
}
// ── stage_display_name ──────────────────────────────────────────────────
#[test]
fn stage_display_name_maps_all_known_stages() {
assert_eq!(stage_display_name("1_backlog"), "Backlog");
assert_eq!(stage_display_name("2_current"), "Current");
assert_eq!(stage_display_name("3_qa"), "QA");
assert_eq!(stage_display_name("4_merge"), "Merge");
assert_eq!(stage_display_name("5_done"), "Done");
assert_eq!(stage_display_name("6_archived"), "Archived");
assert_eq!(stage_display_name("unknown"), "Unknown");
}
// ── extract_story_number ────────────────────────────────────────────────
#[test]
fn extract_story_number_parses_numeric_prefix() {
assert_eq!(
extract_story_number("261_story_bot_notifications"),
Some("261")
);
assert_eq!(extract_story_number("42_bug_fix_thing"), Some("42"));
assert_eq!(extract_story_number("1_spike_research"), Some("1"));
}
#[test]
fn extract_story_number_returns_none_for_non_numeric() {
assert_eq!(extract_story_number("abc_story_thing"), None);
assert_eq!(extract_story_number(""), None);
}
// ── read_story_name ─────────────────────────────────────────────────────
#[test]
@@ -699,69 +585,6 @@ mod tests {
assert_eq!(name, None);
}
// ── format_error_notification ────────────────────────────────────────────
#[test]
fn format_error_notification_with_story_name() {
let (plain, html) = format_error_notification(
"262_story_bot_errors",
Some("Bot error notifications"),
"merge conflict in src/main.rs",
);
assert_eq!(
plain,
"\u{274c} #262 Bot error notifications \u{2014} merge conflict in src/main.rs"
);
assert_eq!(
html,
"\u{274c} <strong>#262</strong> <em>Bot error notifications</em> \u{2014} merge conflict in src/main.rs"
);
}
#[test]
fn format_error_notification_without_story_name_falls_back_to_item_id() {
let (plain, _html) = format_error_notification("42_bug_fix_thing", None, "tests failed");
assert_eq!(plain, "\u{274c} #42 42_bug_fix_thing \u{2014} tests failed");
}
#[test]
fn format_error_notification_non_numeric_id_uses_full_id() {
let (plain, _html) =
format_error_notification("abc_story_thing", Some("Some Story"), "clippy errors");
assert_eq!(
plain,
"\u{274c} #abc_story_thing Some Story \u{2014} clippy errors"
);
}
// ── format_blocked_notification ─────────────────────────────────────────
#[test]
fn format_blocked_notification_with_story_name() {
let (plain, html) = format_blocked_notification(
"425_story_blocking_reason",
Some("Blocking Reason Story"),
"Retry limit exceeded (3/3) at coder stage",
);
assert_eq!(
plain,
"\u{1f6ab} #425 Blocking Reason Story \u{2014} BLOCKED: Retry limit exceeded (3/3) at coder stage"
);
assert_eq!(
html,
"\u{1f6ab} <strong>#425</strong> <em>Blocking Reason Story</em> \u{2014} BLOCKED: Retry limit exceeded (3/3) at coder stage"
);
}
#[test]
fn format_blocked_notification_falls_back_to_item_id() {
let (plain, _html) = format_blocked_notification("42_story_thing", None, "empty diff");
assert_eq!(
plain,
"\u{1f6ab} #42 42_story_thing \u{2014} BLOCKED: empty diff"
);
}
// ── spawn_notification_listener: StoryBlocked ───────────────────────────
/// AC1: when a StoryBlocked event arrives, send_message is called with a
@@ -842,88 +665,6 @@ mod tests {
assert_eq!(calls.len(), 0, "No rooms means no notifications");
}
// ── format_rate_limit_notification ─────────────────────────────────────
#[test]
fn format_rate_limit_notification_includes_agent_and_story() {
let (plain, html) =
format_rate_limit_notification("365_story_my_feature", Some("My Feature"), "coder-2");
assert_eq!(
plain,
"\u{26a0}\u{fe0f} #365 My Feature \u{2014} coder-2 hit an API rate limit"
);
assert_eq!(
html,
"\u{26a0}\u{fe0f} <strong>#365</strong> <em>My Feature</em> \u{2014} coder-2 hit an API rate limit"
);
}
#[test]
fn format_rate_limit_notification_falls_back_to_item_id() {
let (plain, _html) = format_rate_limit_notification("42_story_thing", None, "coder-1");
assert_eq!(
plain,
"\u{26a0}\u{fe0f} #42 42_story_thing \u{2014} coder-1 hit an API rate limit"
);
}
// ── format_stage_notification ───────────────────────────────────────────
#[test]
fn format_notification_done_stage_includes_party_emoji() {
let (plain, html) =
format_stage_notification("353_story_done", Some("Done Story"), "Merge", "Done");
assert_eq!(
plain,
"\u{1f389} #353 Done Story \u{2014} Merge \u{2192} Done"
);
assert_eq!(
html,
"\u{1f389} <strong>#353</strong> <em>Done Story</em> \u{2014} Merge \u{2192} Done"
);
}
#[test]
fn format_notification_non_done_stage_has_no_emoji() {
let (plain, _html) =
format_stage_notification("42_story_thing", Some("Some Story"), "Backlog", "Current");
assert!(!plain.contains("\u{1f389}"));
}
#[test]
fn format_notification_with_story_name() {
let (plain, html) = format_stage_notification(
"261_story_bot_notifications",
Some("Bot notifications"),
"Upcoming",
"Current",
);
assert_eq!(
plain,
"#261 Bot notifications \u{2014} Upcoming \u{2192} Current"
);
assert_eq!(
html,
"<strong>#261</strong> <em>Bot notifications</em> \u{2014} Upcoming \u{2192} Current"
);
}
#[test]
fn format_notification_without_story_name_falls_back_to_item_id() {
let (plain, _html) = format_stage_notification("42_bug_fix_thing", None, "Current", "QA");
assert_eq!(plain, "#42 42_bug_fix_thing \u{2014} Current \u{2192} QA");
}
#[test]
fn format_notification_non_numeric_id_uses_full_id() {
let (plain, _html) =
format_stage_notification("abc_story_thing", Some("Some Story"), "QA", "Merge");
assert_eq!(
plain,
"#abc_story_thing Some Story \u{2014} QA \u{2192} Merge"
);
}
// ── rate_limit_notifications config flag ─────────────────────────────────
/// AC1+AC2: when rate_limit_notifications = false in project.toml,
+89
View File
@@ -0,0 +1,89 @@
//! Notifications service — pipeline-event fan-out to chat transports.
//!
//! Subscribes to [`WatcherEvent`] broadcasts and posts human-readable messages
//! to all configured chat rooms whenever a work item moves through the pipeline.
//!
//! Follows service-module conventions:
//! - `mod.rs` (this file) — public API, typed [`Error`] type, orchestration
//! - `io.rs` — the ONLY place that performs side effects (DB reads, config
//! loads, `tokio::spawn`)
//! - `format.rs` — pure: message formatting functions
//! - `filter.rs` — pure: debounce constants and suppression predicates
//! - `events.rs` — pure: WatcherEvent classification / event mapping
//! - `route.rs` — pure: room-routing decisions
pub(super) mod events;
pub(super) mod filter;
pub(super) mod format;
pub(super) mod io;
pub(super) mod route;
pub use format::{
format_blocked_notification, format_error_notification, format_stage_notification,
stage_display_name,
};
pub use io::spawn_notification_listener;
// ── Error type ────────────────────────────────────────────────────────────────
/// Typed errors returned by `service::notifications` operations.
///
/// HTTP handlers and bot commands may map these to user-facing messages.
#[derive(Debug)]
#[allow(dead_code)]
pub enum Error {
/// The incoming event type is not recognised or not supported.
UnknownEvent(String),
/// A message could not be formatted for delivery (e.g. malformed input).
RenderFailure(String),
/// The underlying chat transport rejected the send operation.
TransportSendFailure(String),
/// Required configuration (room IDs, credentials) is absent.
ConfigMissing(String),
}
impl std::fmt::Display for Error {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Self::UnknownEvent(msg) => write!(f, "Unknown event: {msg}"),
Self::RenderFailure(msg) => write!(f, "Render failure: {msg}"),
Self::TransportSendFailure(msg) => write!(f, "Transport send failure: {msg}"),
Self::ConfigMissing(msg) => write!(f, "Config missing: {msg}"),
}
}
}
#[cfg(test)]
mod tests {
use super::*;
// ── Error Display ─────────────────────────────────────────────────────────
#[test]
fn error_unknown_event_display() {
let e = Error::UnknownEvent("bad_event_type".to_string());
assert!(e.to_string().contains("Unknown event"));
assert!(e.to_string().contains("bad_event_type"));
}
#[test]
fn error_render_failure_display() {
let e = Error::RenderFailure("malformed input".to_string());
assert!(e.to_string().contains("Render failure"));
assert!(e.to_string().contains("malformed input"));
}
#[test]
fn error_transport_send_failure_display() {
let e = Error::TransportSendFailure("connection refused".to_string());
assert!(e.to_string().contains("Transport send failure"));
assert!(e.to_string().contains("connection refused"));
}
#[test]
fn error_config_missing_display() {
let e = Error::ConfigMissing("room_id not set".to_string());
assert!(e.to_string().contains("Config missing"));
assert!(e.to_string().contains("room_id not set"));
}
}
+42
View File
@@ -0,0 +1,42 @@
//! Room-routing decisions for notifications.
//!
//! Pure functions that determine which destination room IDs should receive
//! a given notification. Currently all notification kinds are broadcast to
//! all registered rooms; this module is the single location to change that
//! policy if per-event routing is needed in the future.
/// Return the rooms that should receive a notification.
///
/// `get_room_ids` is called once per notification to obtain the current list
/// of destination room IDs. Passing a closure (rather than a static slice)
/// allows callers to use a runtime-mutable set, e.g. WhatsApp ambient senders.
///
/// All currently supported event kinds are broadcast to every room returned
/// by the closure.
pub fn rooms_for_notification(get_room_ids: &impl Fn() -> Vec<String>) -> Vec<String> {
get_room_ids()
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn returns_all_rooms_from_closure() {
let rooms = rooms_for_notification(&|| vec!["room1".to_string(), "room2".to_string()]);
assert_eq!(rooms, vec!["room1".to_string(), "room2".to_string()]);
}
#[test]
fn returns_empty_when_no_rooms_registered() {
let rooms = rooms_for_notification(&Vec::new);
assert!(rooms.is_empty());
}
#[test]
fn returns_single_room() {
let rooms = rooms_for_notification(&|| vec!["!abc:example.org".to_string()]);
assert_eq!(rooms.len(), 1);
assert_eq!(rooms[0], "!abc:example.org");
}
}
+124
View File
@@ -0,0 +1,124 @@
//! OAuth flow state types and pure decision logic.
//!
//! All functions here are pure — no I/O, no network, no clocks.
//! Side-effectful operations live exclusively in `io.rs`.
use crate::llm::oauth::CredentialsFile;
/// A pending PKCE flow waiting for an OAuth callback.
pub struct PendingFlow {
/// The PKCE code verifier generated at flow initiation.
pub code_verifier: String,
/// The redirect URI sent to the authorization endpoint.
pub redirect_uri: String,
}
/// Current OAuth credential status, computed without I/O from already-loaded credentials.
#[derive(Debug, Clone)]
pub struct FlowStatus {
/// Whether valid credentials were found on disk.
pub authenticated: bool,
/// Whether the access token is past its expiry timestamp.
pub expired: bool,
/// The Unix-epoch millisecond expiry timestamp (0 when unauthenticated).
pub expires_at: u64,
/// Whether a non-empty refresh token is present.
pub has_refresh_token: bool,
}
/// Determine whether `expires_at` (Unix epoch ms) has passed, given `now_ms`.
///
/// Returns `true` when `now_ms > expires_at`.
pub fn is_token_expired(expires_at: u64, now_ms: u64) -> bool {
now_ms > expires_at
}
/// Build a `FlowStatus` from loaded credentials and the current time.
pub fn build_flow_status(creds: &CredentialsFile, now_ms: u64) -> FlowStatus {
let expires_at = creds.claude_ai_oauth.expires_at;
FlowStatus {
authenticated: true,
expired: is_token_expired(expires_at, now_ms),
expires_at,
has_refresh_token: !creds.claude_ai_oauth.refresh_token.is_empty(),
}
}
/// Return the unauthenticated `FlowStatus` (no credentials on disk).
pub fn unauthenticated_status() -> FlowStatus {
FlowStatus {
authenticated: false,
expired: false,
expires_at: 0,
has_refresh_token: false,
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn is_token_expired_when_past_expiry() {
assert!(is_token_expired(1000, 2000));
}
#[test]
fn is_token_not_expired_when_before_expiry() {
assert!(!is_token_expired(2000, 1000));
}
#[test]
fn is_token_not_expired_at_exact_boundary() {
// expires_at == now_ms → not expired
assert!(!is_token_expired(1000, 1000));
}
#[test]
fn unauthenticated_status_is_not_authenticated() {
let s = unauthenticated_status();
assert!(!s.authenticated);
assert!(!s.expired);
assert_eq!(s.expires_at, 0);
assert!(!s.has_refresh_token);
}
#[test]
fn build_flow_status_authenticated_not_expired() {
use crate::llm::oauth::{CredentialsFile, OAuthCredentials};
let creds = CredentialsFile {
claude_ai_oauth: OAuthCredentials {
access_token: "tok".to_string(),
refresh_token: "ref".to_string(),
expires_at: 5000,
scopes: vec![],
subscription_type: None,
rate_limit_tier: None,
},
};
let status = build_flow_status(&creds, 1000);
assert!(status.authenticated);
assert!(!status.expired);
assert_eq!(status.expires_at, 5000);
assert!(status.has_refresh_token);
}
#[test]
fn build_flow_status_authenticated_expired() {
use crate::llm::oauth::{CredentialsFile, OAuthCredentials};
let creds = CredentialsFile {
claude_ai_oauth: OAuthCredentials {
access_token: "tok".to_string(),
refresh_token: String::new(),
expires_at: 1000,
scopes: vec![],
subscription_type: None,
rate_limit_tier: None,
},
};
let status = build_flow_status(&creds, 9999);
assert!(status.authenticated);
assert!(status.expired);
assert!(!status.has_refresh_token);
}
}
+112
View File
@@ -0,0 +1,112 @@
//! OAuth I/O — the ONLY place in `service/oauth/` that may perform side effects.
//!
//! Side effects here include: reading the system clock, making HTTP requests to
//! the Anthropic token endpoint, and reading/writing `~/.claude/.credentials.json`.
//! All business logic and branching belong in `mod.rs`, `pkce.rs`, or `flow.rs`.
use super::Error;
use super::flow::FlowStatus;
use super::pkce::SCOPES;
use crate::llm::oauth::{self, CredentialsFile};
use crate::slog;
/// Raw token exchange result returned by the Anthropic OAuth endpoint.
#[derive(serde::Deserialize)]
pub(super) struct TokenExchangeResult {
pub access_token: String,
pub refresh_token: Option<String>,
pub expires_in: u64,
#[allow(dead_code)]
pub token_type: Option<String>,
#[allow(dead_code)]
pub scope: Option<String>,
}
/// Return the current Unix-epoch time in milliseconds.
pub(super) fn current_time_ms() -> u64 {
std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.map(|d| d.as_millis() as u64)
.unwrap_or(0)
}
/// Exchange an authorization code for tokens via the Anthropic token endpoint.
///
/// Returns the raw token response on success. Network or HTTP errors are
/// mapped to typed [`Error`] variants.
pub(super) async fn exchange_code_for_tokens(
code: &str,
redirect_uri: &str,
code_verifier: &str,
csrf_state: &str,
) -> Result<TokenExchangeResult, Error> {
use super::pkce::CLIENT_ID;
const TOKEN_ENDPOINT: &str = "https://platform.claude.com/v1/oauth/token";
slog!("[oauth] Exchanging authorization code for tokens");
let client = reqwest::Client::new();
let resp = client
.post(TOKEN_ENDPOINT)
.header("Content-Type", "application/json")
.json(&serde_json::json!({
"grant_type": "authorization_code",
"code": code,
"client_id": CLIENT_ID,
"redirect_uri": redirect_uri,
"code_verifier": code_verifier,
"state": csrf_state,
}))
.send()
.await
.map_err(|e| Error::Network(format!("Failed to contact Anthropic: {e}")))?;
let status = resp.status();
let body = resp.text().await.unwrap_or_default();
slog!(
"[oauth] Token exchange response (HTTP {}): {}",
status,
body
);
if !status.is_success() {
return Err(Error::InvalidGrant(format!(
"Anthropic returned HTTP {status}. Please try again."
)));
}
serde_json::from_str(&body)
.map_err(|e| Error::Parse(format!("Unexpected response from Anthropic: {e}")))
}
/// Persist a token exchange result to `~/.claude/.credentials.json`.
///
/// Builds a [`CredentialsFile`] from the token response and `now_ms`, then
/// delegates to [`oauth::write_credentials`].
pub(super) fn save_credentials(token: &TokenExchangeResult, now_ms: u64) -> Result<(), Error> {
let creds = CredentialsFile {
claude_ai_oauth: oauth::OAuthCredentials {
access_token: token.access_token.clone(),
refresh_token: token.refresh_token.clone().unwrap_or_default(),
expires_at: now_ms + (token.expires_in * 1000),
scopes: SCOPES.split(' ').map(|s| s.to_string()).collect(),
subscription_type: None,
rate_limit_tier: None,
},
};
oauth::write_credentials(&creds).map_err(Error::TokenStorage)
}
/// Load OAuth credentials from disk and compute a [`FlowStatus`].
///
/// Returns `Ok(None)` when no credentials file exists yet (user not logged in).
pub(super) fn load_status() -> FlowStatus {
match oauth::read_credentials() {
Ok(creds) => {
let now_ms = current_time_ms();
super::flow::build_flow_status(&creds, now_ms)
}
Err(_) => super::flow::unauthenticated_status(),
}
}
+269
View File
@@ -0,0 +1,269 @@
//! OAuth service — domain logic for the Anthropic OAuth 2.0 PKCE flow.
//!
//! Extracts business logic from `http/oauth.rs` following the conventions in
//! `docs/architecture/service-modules.md`:
//! - `mod.rs` (this file) — public API, typed `Error`, `OAuthState`, orchestration
//! - `io.rs` — the ONLY place that performs side effects (HTTP, filesystem, clock)
//! - `pkce.rs` — pure PKCE helpers: generation, challenge, encoding
//! - `flow.rs` — pure flow types and token-expiry decision logic
pub mod flow;
pub(super) mod io;
pub mod pkce;
pub use flow::FlowStatus;
use flow::PendingFlow;
use std::collections::HashMap;
use std::sync::{Arc, Mutex};
// ── Error type ────────────────────────────────────────────────────────────────
/// Typed errors returned by `service::oauth` functions.
///
/// HTTP handlers map these to status codes:
/// - [`Error::InvalidGrant`] → 400 Bad Request
/// - [`Error::Network`] → 500 Internal Server Error
/// - [`Error::TokenExpired`] → 401 Unauthorized
/// - [`Error::TokenStorage`] → 500 Internal Server Error
/// - [`Error::InvalidState`] → 400 Bad Request
/// - [`Error::MissingCode`] → 400 Bad Request
/// - [`Error::MissingState`] → 400 Bad Request
/// - [`Error::AuthorizationDenied`] → 400 Bad Request
/// - [`Error::Parse`] → 500 Internal Server Error
#[derive(Debug)]
#[allow(dead_code)]
pub enum Error {
/// The OAuth provider rejected the authorization code (invalid-grant).
InvalidGrant(String),
/// A network error occurred communicating with the OAuth provider.
Network(String),
/// The access token has expired and cannot be refreshed.
TokenExpired(String),
/// Failed to read or write the credential storage file.
TokenStorage(String),
/// The CSRF state parameter does not match any pending flow.
InvalidState(String),
/// No authorization code was provided in the callback.
MissingCode,
/// No state parameter was provided in the callback.
MissingState,
/// The OAuth provider returned an explicit error (e.g. user denied access).
AuthorizationDenied(String),
/// The token response could not be parsed.
Parse(String),
}
impl std::fmt::Display for Error {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Self::InvalidGrant(msg) => write!(f, "Invalid grant: {msg}"),
Self::Network(msg) => write!(f, "Network error: {msg}"),
Self::TokenExpired(msg) => write!(f, "Token expired: {msg}"),
Self::TokenStorage(msg) => write!(f, "Token storage error: {msg}"),
Self::InvalidState(msg) => write!(f, "Invalid state: {msg}"),
Self::MissingCode => write!(f, "Missing authorization code"),
Self::MissingState => write!(f, "Missing state parameter"),
Self::AuthorizationDenied(msg) => write!(f, "Authorization denied: {msg}"),
Self::Parse(msg) => write!(f, "Parse error: {msg}"),
}
}
}
// ── OAuthState ────────────────────────────────────────────────────────────────
/// In-memory store for pending PKCE flows, keyed by CSRF state parameter.
///
/// Injected into Poem route handlers via `Data<Arc<OAuthState>>`.
#[derive(Clone)]
pub struct OAuthState {
/// Maps CSRF state → pending PKCE flow data.
pending: Arc<Mutex<HashMap<String, PendingFlow>>>,
/// Server port, used to build the `redirect_uri`.
port: u16,
}
impl OAuthState {
/// Create a new `OAuthState` for the server listening on `port`.
pub fn new(port: u16) -> Self {
Self {
pending: Arc::new(Mutex::new(HashMap::new())),
port,
}
}
/// Return the OAuth callback URL for this server instance.
pub(crate) fn callback_url(&self) -> String {
format!("http://localhost:{}/callback", self.port)
}
}
// ── Public API ────────────────────────────────────────────────────────────────
/// Initiate a new OAuth PKCE flow.
///
/// Generates a code verifier, CSRF state token, and PKCE challenge; stores
/// the pending flow; and returns `(csrf_state, authorize_url)` for the caller
/// to redirect the browser to.
pub fn initiate_flow(state: &OAuthState) -> (String, String) {
use pkce::{build_authorize_url, compute_code_challenge, random_string};
let code_verifier = random_string(128);
let code_challenge = compute_code_challenge(&code_verifier);
let csrf_state = random_string(32);
let redirect_uri = state.callback_url();
crate::slog!("[oauth] Starting OAuth flow, state={}", csrf_state);
state.pending.lock().unwrap().insert(
csrf_state.clone(),
PendingFlow {
code_verifier,
redirect_uri: redirect_uri.clone(),
},
);
let url = build_authorize_url(&redirect_uri, &code_challenge, &csrf_state);
(csrf_state, url)
}
/// Exchange an authorization code for tokens and persist the credentials.
///
/// Looks up the pending PKCE flow for `csrf_state`, exchanges the code with
/// Anthropic's token endpoint, and writes the result to
/// `~/.claude/.credentials.json`.
///
/// # Errors
/// - [`Error::InvalidState`] if `csrf_state` is unknown or already consumed.
/// - [`Error::Network`] if the token endpoint is unreachable.
/// - [`Error::InvalidGrant`] if Anthropic rejects the code (non-2xx response).
/// - [`Error::Parse`] if the token response cannot be parsed.
/// - [`Error::TokenStorage`] if writing credentials to disk fails.
pub async fn exchange_code(state: &OAuthState, code: &str, csrf_state: &str) -> Result<(), Error> {
crate::slog!("[oauth] Received callback, exchanging code for tokens");
let pending = state.pending.lock().unwrap().remove(csrf_state);
let flow = pending.ok_or_else(|| {
crate::slog!("[oauth] Unknown state parameter: {}", csrf_state);
Error::InvalidState(
"Unknown or expired state parameter. Please try logging in again.".to_string(),
)
})?;
let token =
io::exchange_code_for_tokens(code, &flow.redirect_uri, &flow.code_verifier, csrf_state)
.await?;
let now_ms = io::current_time_ms();
io::save_credentials(&token, now_ms)?;
crate::slog!("[oauth] Successfully authenticated and saved credentials");
Ok(())
}
/// Return the current OAuth credential status without performing any I/O beyond
/// reading the credentials file.
///
/// Returns an unauthenticated [`FlowStatus`] when no credentials file exists.
pub fn check_status() -> FlowStatus {
io::load_status()
}
// ── Tests ─────────────────────────────────────────────────────────────────────
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn oauth_state_new_sets_port() {
let s = OAuthState::new(3001);
assert_eq!(s.callback_url(), "http://localhost:3001/callback");
}
#[test]
fn oauth_state_different_ports() {
let s = OAuthState::new(9876);
assert_eq!(s.callback_url(), "http://localhost:9876/callback");
}
#[test]
fn initiate_flow_stores_pending_entry() {
let state = OAuthState::new(3001);
let (csrf_state, url) = initiate_flow(&state);
assert!(!csrf_state.is_empty());
assert!(url.contains(&csrf_state));
assert!(state.pending.lock().unwrap().contains_key(&csrf_state));
}
#[test]
fn initiate_flow_generates_unique_states() {
let state = OAuthState::new(3001);
let (s1, _) = initiate_flow(&state);
let (s2, _) = initiate_flow(&state);
assert_ne!(s1, s2);
}
#[test]
fn error_display_invalid_grant() {
let e = Error::InvalidGrant("bad code".to_string());
assert_eq!(e.to_string(), "Invalid grant: bad code");
}
#[test]
fn error_display_network_error() {
let e = Error::Network("timeout".to_string());
assert_eq!(e.to_string(), "Network error: timeout");
}
#[test]
fn error_display_token_expired() {
let e = Error::TokenExpired("expired".to_string());
assert_eq!(e.to_string(), "Token expired: expired");
}
#[test]
fn error_display_token_storage() {
let e = Error::TokenStorage("disk full".to_string());
assert_eq!(e.to_string(), "Token storage error: disk full");
}
#[test]
fn error_display_invalid_state() {
let e = Error::InvalidState("unknown".to_string());
assert_eq!(e.to_string(), "Invalid state: unknown");
}
#[test]
fn error_display_missing_code() {
let e = Error::MissingCode;
assert_eq!(e.to_string(), "Missing authorization code");
}
#[test]
fn error_display_missing_state() {
let e = Error::MissingState;
assert_eq!(e.to_string(), "Missing state parameter");
}
#[test]
fn error_display_authorization_denied() {
let e = Error::AuthorizationDenied("access_denied".to_string());
assert_eq!(e.to_string(), "Authorization denied: access_denied");
}
#[test]
fn error_display_parse_error() {
let e = Error::Parse("bad json".to_string());
assert_eq!(e.to_string(), "Parse error: bad json");
}
#[test]
fn exchange_code_returns_invalid_state_for_unknown_csrf() {
// Can test the InvalidState path synchronously by driving the pending map directly
let state = OAuthState::new(3001);
// No pending flow inserted — exchange_code will find no match
let rt = tokio::runtime::Runtime::new().unwrap();
let result = rt.block_on(exchange_code(&state, "somecode", "unknownstate"));
assert!(matches!(result, Err(Error::InvalidState(_))));
}
}
+175
View File
@@ -0,0 +1,175 @@
//! PKCE (Proof Key for Code Exchange) helpers — pure functions with no side effects.
//!
//! Covers code verifier/challenge generation, base64url encoding,
//! URL percent-encoding, and authorization URL construction.
use sha2::{Digest, Sha256};
/// The Anthropic authorize endpoint.
const AUTHORIZE_URL: &str = "https://claude.com/cai/oauth/authorize";
/// The OAuth client ID used by Claude Code.
pub(crate) const CLIENT_ID: &str = "9d1c250a-e61b-44d9-88ed-5944d1962f5e";
/// The OAuth scopes requested.
pub const SCOPES: &str =
"user:inference user:profile user:mcp_servers user:sessions:claude_code user:file_upload";
/// Generate a random alphanumeric string of the given length.
///
/// Used to produce PKCE code verifiers (128 chars) and CSRF state tokens (32 chars).
pub fn random_string(len: usize) -> String {
use std::collections::hash_map::RandomState;
use std::hash::{BuildHasher, Hasher};
let mut s = String::with_capacity(len);
let chars = b"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789";
for _ in 0..len {
let hasher = RandomState::new().build_hasher();
let idx = hasher.finish() as usize % chars.len();
s.push(chars[idx] as char);
}
s
}
/// Compute the S256 PKCE code challenge from a code verifier.
///
/// Returns the base64url-encoded SHA-256 hash of `verifier` (no padding).
pub fn compute_code_challenge(verifier: &str) -> String {
let hash = Sha256::digest(verifier.as_bytes());
base64url_encode(&hash)
}
/// Base64url-encode `data` without padding (RFC 7636).
pub fn base64url_encode(data: &[u8]) -> String {
const CHARS: &[u8] = b"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
let mut result = String::new();
let mut i = 0;
while i < data.len() {
let b0 = data[i] as u32;
let b1 = if i + 1 < data.len() {
data[i + 1] as u32
} else {
0
};
let b2 = if i + 2 < data.len() {
data[i + 2] as u32
} else {
0
};
let triple = (b0 << 16) | (b1 << 8) | b2;
result.push(CHARS[((triple >> 18) & 0x3F) as usize] as char);
result.push(CHARS[((triple >> 12) & 0x3F) as usize] as char);
if i + 1 < data.len() {
result.push(CHARS[((triple >> 6) & 0x3F) as usize] as char);
}
if i + 2 < data.len() {
result.push(CHARS[(triple & 0x3F) as usize] as char);
}
i += 3;
}
result.replace('+', "-").replace('/', "_")
}
/// Percent-encode `input` for use in URL query parameters (RFC 3986 unreserved chars).
pub fn percent_encode(input: &str) -> String {
let mut encoded = String::with_capacity(input.len() * 3);
for byte in input.bytes() {
match byte {
b'A'..=b'Z' | b'a'..=b'z' | b'0'..=b'9' | b'-' | b'_' | b'.' | b'~' => {
encoded.push(byte as char);
}
_ => {
encoded.push_str(&format!("%{byte:02X}"));
}
}
}
encoded
}
/// Build the full authorization URL to redirect the browser to.
///
/// `redirect_uri` — the callback URL (`http://localhost:<port>/callback`)
/// `code_challenge` — the S256 code challenge
/// `csrf_state` — the random CSRF state token
pub fn build_authorize_url(redirect_uri: &str, code_challenge: &str, csrf_state: &str) -> String {
format!(
"{}?code=true&client_id={}&response_type=code&redirect_uri={}&scope={}&code_challenge={}&code_challenge_method=S256&state={}",
AUTHORIZE_URL,
CLIENT_ID,
percent_encode(redirect_uri),
percent_encode(SCOPES),
percent_encode(code_challenge),
percent_encode(csrf_state),
)
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn base64url_encode_basic() {
assert_eq!(base64url_encode(b"Hello"), "SGVsbG8");
}
#[test]
fn base64url_encode_no_padding() {
assert!(!base64url_encode(b"a").contains('='));
}
#[test]
fn base64url_encode_no_plus_or_slash() {
let data: Vec<u8> = (0..=255).collect();
let encoded = base64url_encode(&data);
assert!(!encoded.contains('+'));
assert!(!encoded.contains('/'));
}
#[test]
fn compute_code_challenge_returns_nonempty() {
assert!(!compute_code_challenge("test_verifier").is_empty());
}
#[test]
fn compute_code_challenge_is_deterministic() {
assert_eq!(
compute_code_challenge("same"),
compute_code_challenge("same")
);
}
#[test]
fn random_string_length() {
assert_eq!(random_string(64).len(), 64);
}
#[test]
fn random_string_is_alphanumeric() {
assert!(
random_string(100)
.chars()
.all(|c| c.is_ascii_alphanumeric())
);
}
#[test]
fn percent_encode_unreserved_chars_unchanged() {
assert_eq!(percent_encode("abc-_.~"), "abc-_.~");
}
#[test]
fn percent_encode_space_becomes_percent_20() {
assert_eq!(percent_encode("hello world"), "hello%20world");
}
#[test]
fn build_authorize_url_contains_client_id() {
let url = build_authorize_url("http://localhost:3001/callback", "challenge", "state");
assert!(url.contains(CLIENT_ID));
}
#[test]
fn build_authorize_url_contains_state() {
let url = build_authorize_url("http://localhost:3001/callback", "challenge", "mystate");
assert!(url.contains("mystate"));
}
}
+155
View File
@@ -0,0 +1,155 @@
//! Pipeline service — shared pipeline-domain logic.
//!
//! Contains pure functions for parsing and aggregating pipeline status data.
//! Used by the gateway service for cross-project aggregation and potentially
//! by other consumers that need to reason about pipeline stage counts.
use serde_json::{Value, json};
/// Parse a `get_pipeline_status` JSON payload and produce aggregated counts
/// plus a list of blocked/failing items.
pub fn aggregate_pipeline_counts(pipeline: &Value) -> Value {
let active = pipeline
.get("active")
.and_then(|a| a.as_array())
.cloned()
.unwrap_or_default();
let backlog_count = pipeline
.get("backlog_count")
.and_then(|n| n.as_u64())
.unwrap_or(0);
let mut current = 0u64;
let mut qa = 0u64;
let mut merge = 0u64;
let mut done = 0u64;
let mut blocked: Vec<Value> = Vec::new();
for item in &active {
let stage = item
.get("stage")
.and_then(|s| s.as_str())
.unwrap_or("unknown");
match stage {
"current" => current += 1,
"qa" => qa += 1,
"merge" => merge += 1,
"done" => done += 1,
_ => {}
}
let is_blocked = item
.get("blocked")
.and_then(|b| b.as_bool())
.unwrap_or(false);
let merge_failure = item.get("merge_failure");
let has_merge_failure = merge_failure
.map(|f| !f.is_null() && f != "")
.unwrap_or(false);
if is_blocked || has_merge_failure {
let story_id = item
.get("story_id")
.and_then(|s| s.as_str())
.unwrap_or("?")
.to_string();
let story_name = item
.get("name")
.and_then(|s| s.as_str())
.unwrap_or("")
.to_string();
let reason = if has_merge_failure {
format!(
"merge failure: {}",
merge_failure.and_then(|f| f.as_str()).unwrap_or("unknown")
)
} else {
let rc = item
.get("retry_count")
.and_then(|n| n.as_u64())
.unwrap_or(0);
format!("blocked after {rc} retries")
};
blocked.push(json!({
"story_id": story_id,
"name": story_name,
"stage": stage,
"reason": reason,
}));
}
}
json!({
"counts": {
"backlog": backlog_count,
"current": current,
"qa": qa,
"merge": merge,
"done": done,
},
"blocked": blocked,
})
}
// ── Tests ────────────────────────────────────────────────────────────────────
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn aggregate_empty_pipeline() {
let pipeline = json!({ "active": [], "backlog": [], "backlog_count": 0 });
let result = aggregate_pipeline_counts(&pipeline);
assert_eq!(result["counts"]["backlog"], 0);
assert_eq!(result["counts"]["current"], 0);
assert_eq!(result["counts"]["qa"], 0);
assert_eq!(result["counts"]["merge"], 0);
assert_eq!(result["counts"]["done"], 0);
assert_eq!(result["blocked"].as_array().unwrap().len(), 0);
}
#[test]
fn aggregate_stage_counts_correct() {
let pipeline = json!({
"active": [
{ "story_id": "1_story_a", "name": "A", "stage": "current" },
{ "story_id": "2_story_b", "name": "B", "stage": "current" },
{ "story_id": "3_story_c", "name": "C", "stage": "qa" },
{ "story_id": "4_story_d", "name": "D", "stage": "done" },
],
"backlog": [{ "story_id": "5_story_e", "name": "E" }, { "story_id": "6_story_f", "name": "F" }],
"backlog_count": 2
});
let result = aggregate_pipeline_counts(&pipeline);
assert_eq!(result["counts"]["backlog"], 2);
assert_eq!(result["counts"]["current"], 2);
assert_eq!(result["counts"]["qa"], 1);
assert_eq!(result["counts"]["merge"], 0);
assert_eq!(result["counts"]["done"], 1);
assert_eq!(result["blocked"].as_array().unwrap().len(), 0);
}
#[test]
fn aggregate_blocked_items_captured() {
let pipeline = json!({
"active": [
{ "story_id": "10_story_blocked", "name": "Blocked", "stage": "current", "blocked": true, "retry_count": 3 },
{ "story_id": "20_story_ok", "name": "OK", "stage": "qa" },
],
"backlog": [],
"backlog_count": 0
});
let result = aggregate_pipeline_counts(&pipeline);
let blocked = result["blocked"].as_array().unwrap();
assert_eq!(blocked.len(), 1);
assert_eq!(blocked[0]["story_id"], "10_story_blocked");
assert_eq!(blocked[0]["stage"], "current");
assert!(
blocked[0]["reason"]
.as_str()
.unwrap()
.contains("blocked after 3 retries"),
);
}
}
+144
View File
@@ -0,0 +1,144 @@
//! Project I/O — the ONLY place in `service/project/` that may perform
//! filesystem reads, state mutations, or store operations.
//!
//! Every function here is a thin adapter that converts lower-level errors
//! into the typed [`super::Error`] for this domain. No business logic lives
//! here; branching belongs in `selection.rs` or `mod.rs`.
use crate::state::SessionState;
use crate::store::StoreOps;
use std::path::{Path, PathBuf};
use super::Error;
const KEY_LAST_PROJECT: &str = "last_project_path";
const KEY_KNOWN_PROJECTS: &str = "known_projects";
/// Validate that `path` exists and is a directory, returning a typed error.
pub(super) async fn validate_path(path: &Path) -> Result<(), Error> {
let p = path.to_path_buf();
tokio::task::spawn_blocking(move || {
if !p.exists() {
return Err(Error::PathNotFound(format!(
"Path does not exist: {}",
p.display()
)));
}
if !p.is_dir() {
return Err(Error::NotADirectory(format!(
"Path is not a directory: {}",
p.display()
)));
}
Ok(())
})
.await
.map_err(|e| Error::Internal(format!("Task failed: {e}")))?
}
/// Ensure the project directory has a `.huskies/` scaffold and an `.mcp.json`.
///
/// Creates the directory if it does not exist. If `.huskies/` is absent,
/// writes the full scaffold. Always rewrites `.mcp.json` with `port`.
pub(super) async fn ensure_scaffold(path: PathBuf, port: u16) -> Result<(), Error> {
crate::io::fs::project::ensure_project_root_with_story_kit(path, port)
.await
.map_err(Error::Internal)
}
/// Set (or clear) the active project root in session state.
pub(super) fn set_project_root(state: &SessionState, path: Option<PathBuf>) -> Result<(), Error> {
// TRACE:MERGE-DEBUG — remove once root cause is found
match &path {
Some(p) => crate::slog!(
"[MERGE-DEBUG] open_project: setting project_root to {:?}",
p
),
None => crate::slog!("[MERGE-DEBUG] close_project: setting project_root to None"),
}
let mut root = state
.project_root
.lock()
.map_err(|e| Error::Internal(format!("Lock poisoned: {e}")))?;
*root = path;
Ok(())
}
/// Read the active project root from session state.
pub(super) fn get_project_root_from_state(state: &SessionState) -> Result<Option<PathBuf>, Error> {
let root = state
.project_root
.lock()
.map_err(|e| Error::Internal(format!("Lock poisoned: {e}")))?;
Ok(root.clone())
}
/// Persist the last-used project path and known-projects list to the store.
///
/// Sets both keys and flushes in a single `save()` call to minimise writes.
pub(super) fn persist_open_project(
path: &str,
known: &[String],
store: &dyn StoreOps,
) -> Result<(), Error> {
store.set(KEY_LAST_PROJECT, serde_json::json!(path));
store.set(KEY_KNOWN_PROJECTS, serde_json::json!(known));
store.save().map_err(Error::Internal)
}
/// Remove the persisted project path from the store and flush.
pub(super) fn clear_project(store: &dyn StoreOps) -> Result<(), Error> {
store.delete(KEY_LAST_PROJECT);
store.save().map_err(Error::Internal)
}
/// Read the known-projects list from the store.
pub(super) fn read_known_projects(store: &dyn StoreOps) -> Vec<String> {
store
.get(KEY_KNOWN_PROJECTS)
.and_then(|val| val.as_array().cloned())
.unwrap_or_default()
.into_iter()
.filter_map(|val| val.as_str().map(|s| s.to_string()))
.collect()
}
/// Persist the known-projects list to the store and flush.
pub(super) fn save_known_projects(projects: &[String], store: &dyn StoreOps) -> Result<(), Error> {
store.set(KEY_KNOWN_PROJECTS, serde_json::json!(projects));
store.save().map_err(Error::Internal)
}
/// Try to restore the project root from the persisted store path.
///
/// If the stored path still exists and is a directory, updates session state
/// and returns the path string. Returns `Ok(None)` when no valid stored path
/// is found.
pub(super) fn restore_from_store(
state: &SessionState,
store: &dyn StoreOps,
) -> Result<Option<String>, Error> {
let last = store
.get(KEY_LAST_PROJECT)
.and_then(|val| val.as_str().map(|s| s.to_string()));
if let Some(path_str) = last {
let p = PathBuf::from(&path_str);
if p.exists() && p.is_dir() {
// TRACE:MERGE-DEBUG — remove once root cause is found
crate::slog!(
"[MERGE-DEBUG] get_current_project: project_root was None, \
restoring from store to {:?}",
p
);
let mut root = state
.project_root
.lock()
.map_err(|e| Error::Internal(format!("Lock poisoned: {e}")))?;
*root = Some(p);
return Ok(Some(path_str));
}
}
Ok(None)
}
+102
View File
@@ -0,0 +1,102 @@
//! Project service — public API for the project domain.
//!
//! Exposes functions to open, close, query, and manage known projects.
//! HTTP handlers call these functions instead of touching `io::fs` or session
//! state directly.
//!
//! Conventions: `docs/architecture/service-modules.md`
pub(super) mod io;
pub mod selection;
use crate::state::SessionState;
use crate::store::StoreOps;
use std::path::PathBuf;
// ── Error type ────────────────────────────────────────────────────────────────
/// Typed errors returned by `service::project` functions.
///
/// HTTP handlers map these to specific status codes:
/// - [`Error::PathNotFound`] → 404 Not Found
/// - [`Error::NotADirectory`] → 400 Bad Request
/// - [`Error::Internal`] → 500 Internal Server Error
#[derive(Debug)]
pub enum Error {
/// The given path does not exist on the filesystem.
PathNotFound(String),
/// The given path exists but is not a directory.
NotADirectory(String),
/// An internal error occurred (lock poisoned, store I/O failure, task panic).
Internal(String),
}
impl std::fmt::Display for Error {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Self::PathNotFound(msg) => write!(f, "Project not found: {msg}"),
Self::NotADirectory(msg) => write!(f, "Invalid project path: {msg}"),
Self::Internal(msg) => write!(f, "Internal error: {msg}"),
}
}
}
// ── Public API ────────────────────────────────────────────────────────────────
/// Open a project, scaffolding it when needed, and persist the selection.
///
/// Validates that `path` exists and is a directory. On success, returns the
/// canonical path string. The path is promoted to the front of the known list.
pub async fn open_project(
path: String,
state: &SessionState,
store: &dyn StoreOps,
port: u16,
) -> Result<String, Error> {
let p = PathBuf::from(&path);
io::ensure_scaffold(p.clone(), port).await?;
io::validate_path(&p).await?;
io::set_project_root(state, Some(p))?;
let known = io::read_known_projects(store);
let updated = selection::promote_to_front(known, &path);
io::persist_open_project(&path, &updated, store)?;
Ok(path)
}
/// Close the current project and remove it from the persisted selection.
pub fn close_project(state: &SessionState, store: &dyn StoreOps) -> Result<(), Error> {
io::set_project_root(state, None)?;
io::clear_project(store)
}
/// Return the currently open project path, if any.
///
/// Checks in-memory state first, then falls back to the store.
/// If the store has a valid path, restores it into state for future calls.
pub fn get_current_project(
state: &SessionState,
store: &dyn StoreOps,
) -> Result<Option<String>, Error> {
if let Some(path) = io::get_project_root_from_state(state)? {
return Ok(Some(path.to_string_lossy().to_string()));
}
io::restore_from_store(state, store)
}
/// Return all known (previously opened) project paths from the store.
pub fn get_known_projects(store: &dyn StoreOps) -> Result<Vec<String>, Error> {
Ok(io::read_known_projects(store))
}
/// Remove a path from the known-projects list.
///
/// Returns `Ok(())` whether or not the path was present (idempotent).
pub fn forget_known_project(path: String, store: &dyn StoreOps) -> Result<(), Error> {
let known = io::read_known_projects(store);
let before = known.len();
let updated: Vec<String> = known.into_iter().filter(|p| p != &path).collect();
if updated.len() == before {
return Ok(());
}
io::save_known_projects(&updated, store)
}
+100
View File
@@ -0,0 +1,100 @@
//! Pure project-selection logic — no I/O, no async, no side effects.
//!
//! All functions here are deterministic and depend only on their arguments.
/// Promote a project path to the front of the known-projects list.
///
/// Removes any existing occurrence of `path` and inserts it at position 0,
/// so the most-recently-opened project is always first.
pub fn promote_to_front(mut projects: Vec<String>, path: &str) -> Vec<String> {
projects.retain(|p| p != path);
projects.insert(0, path.to_string());
projects
}
#[allow(dead_code)]
/// Extract the display name for a project from its filesystem path.
///
/// Returns the last non-empty path component, or `None` for root or empty input.
pub fn project_name_from_path(path: &str) -> Option<&str> {
path.trim_end_matches('/')
.rsplit('/')
.find(|s| !s.is_empty())
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn promote_to_front_inserts_new_path_at_position_zero() {
let result = promote_to_front(vec!["/a".to_string(), "/b".to_string()], "/c");
assert_eq!(result, vec!["/c", "/a", "/b"]);
}
#[test]
fn promote_to_front_moves_existing_entry_to_front() {
let result = promote_to_front(
vec!["/a".to_string(), "/b".to_string(), "/c".to_string()],
"/b",
);
assert_eq!(result, vec!["/b", "/a", "/c"]);
}
#[test]
fn promote_to_front_is_idempotent_when_already_first() {
let result = promote_to_front(vec!["/a".to_string(), "/b".to_string()], "/a");
assert_eq!(result, vec!["/a", "/b"]);
}
#[test]
fn promote_to_front_handles_empty_list() {
let result = promote_to_front(vec![], "/new");
assert_eq!(result, vec!["/new"]);
}
#[test]
fn promote_to_front_deduplicates_single_entry() {
let result = promote_to_front(vec!["/a".to_string()], "/a");
assert_eq!(result, vec!["/a"]);
}
#[test]
fn project_name_from_path_extracts_last_component() {
assert_eq!(
project_name_from_path("/home/user/myproject"),
Some("myproject")
);
}
#[test]
fn project_name_from_path_handles_trailing_slash() {
assert_eq!(
project_name_from_path("/home/user/myproject/"),
Some("myproject")
);
}
#[test]
fn project_name_from_path_returns_none_for_root() {
assert_eq!(project_name_from_path("/"), None);
}
#[test]
fn project_name_from_path_returns_none_for_empty() {
assert_eq!(project_name_from_path(""), None);
}
#[test]
fn project_name_from_path_handles_single_component() {
assert_eq!(project_name_from_path("myproject"), Some("myproject"));
}
#[test]
fn project_name_from_path_handles_deep_path() {
assert_eq!(
project_name_from_path("/a/b/c/d/project-name"),
Some("project-name")
);
}
}
+92
View File
@@ -0,0 +1,92 @@
//! QA I/O — the ONLY place in `service::qa/` that may perform side effects.
//!
//! Side effects here include: spawning git processes via `std::process::Command`,
//! binding TCP sockets to discover free ports, and launching the QA app process.
use super::Error;
/// Find a free TCP port by attempting to bind starting from `start`.
///
/// Scans up to 100 ports above `start` and returns the first available one.
/// Falls back to `start` if none are found (unlikely in practice).
pub fn find_free_port(start: u16) -> u16 {
for port in start..start + 100 {
if std::net::TcpListener::bind(("127.0.0.1", port)).is_ok() {
return port;
}
}
start
}
/// Merge a spike's feature branch into master using a fast-forward or simple merge.
///
/// Unlike the squash-merge pipeline used for stories, spikes skip quality gates
/// and preserve their commit history. Returns `true` if a merge was performed,
/// `false` if the branch had no unmerged commits (already up to date).
///
/// # Errors
/// - [`Error::Conflict`] if the merge fails due to conflicts.
/// - [`Error::UpstreamFailure`] if a git command cannot be run.
pub fn merge_spike_branch_to_master(
project_root: &std::path::Path,
branch: &str,
story_id: &str,
) -> Result<bool, Error> {
use std::process::Command;
// Check the branch exists and has unmerged changes.
if !crate::agents::lifecycle::feature_branch_has_unmerged_changes(project_root, story_id) {
crate::slog!(
"[qa] Spike '{story_id}': feature branch has no unmerged changes, skipping merge."
);
return Ok(false);
}
// Ensure we are on master.
let checkout = Command::new("git")
.args(["checkout", "master"])
.current_dir(project_root)
.output()
.map_err(|e| Error::UpstreamFailure(format!("git checkout master failed: {e}")))?;
if !checkout.status.success() {
return Err(Error::UpstreamFailure(format!(
"Failed to checkout master: {}",
String::from_utf8_lossy(&checkout.stderr)
)));
}
// Try fast-forward first, then fall back to a regular merge.
let ff = Command::new("git")
.args(["merge", "--ff-only", branch])
.current_dir(project_root)
.output()
.map_err(|e| Error::UpstreamFailure(format!("git merge --ff-only failed: {e}")))?;
if ff.status.success() {
crate::slog!("[qa] Spike '{story_id}': fast-forward merged '{branch}' into master.");
return Ok(true);
}
// Fast-forward failed (diverged history) — fall back to a regular merge.
let merge = Command::new("git")
.args([
"merge",
"--no-ff",
branch,
"-m",
&format!("Merge spike branch '{branch}' into master"),
])
.current_dir(project_root)
.output()
.map_err(|e| Error::UpstreamFailure(format!("git merge failed: {e}")))?;
if merge.status.success() {
crate::slog!("[qa] Spike '{story_id}': merged '{branch}' into master (no-ff).");
Ok(true)
} else {
Err(Error::Conflict(format!(
"Failed to merge spike branch '{branch}' into master: {}",
String::from_utf8_lossy(&merge.stderr)
)))
}
}
+42
View File
@@ -0,0 +1,42 @@
//! Pure QA lifecycle helpers for `service::qa`.
//!
//! These functions classify work items and make routing decisions without
//! performing any I/O. The spike-vs-story distinction determines whether a
//! QA approval goes through the merge pipeline or bypasses it.
use crate::agents::lifecycle::item_type_from_id;
/// Return `true` if `story_id` identifies a spike (e.g. `"42_spike_foo"`).
///
/// Spikes bypass the normal merge pipeline: when approved from QA they are
/// merged directly to master and moved straight to done.
pub fn is_spike(story_id: &str) -> bool {
item_type_from_id(story_id) == "spike"
}
// ── Tests ─────────────────────────────────────────────────────────────────────
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn is_spike_returns_true_for_spike_id() {
assert!(is_spike("42_spike_my_research"));
}
#[test]
fn is_spike_returns_false_for_story_id() {
assert!(!is_spike("42_story_my_feature"));
}
#[test]
fn is_spike_returns_false_for_bug_id() {
assert!(!is_spike("42_bug_login_crash"));
}
#[test]
fn is_spike_returns_false_for_refactor_id() {
assert!(!is_spike("42_refactor_cleanup"));
}
}
+97
View File
@@ -0,0 +1,97 @@
//! QA service — domain logic for requesting, approving, and rejecting QA reviews.
//!
//! Extracted from `http/mcp/qa_tools.rs` following the conventions in
//! `docs/architecture/service-modules.md`:
//! - `mod.rs` (this file) — public API, typed [`Error`], orchestration
//! - `io.rs` — the ONLY place that performs side effects (git, TCP, process)
//! - `lifecycle.rs` — pure QA routing decisions (spike vs. normal story)
pub mod io;
pub mod lifecycle;
pub use io::{find_free_port, merge_spike_branch_to_master};
pub use lifecycle::is_spike;
// ── Error type ────────────────────────────────────────────────────────────────
/// Typed errors returned by `service::qa` functions.
///
/// HTTP handlers map these to status codes:
/// - [`Error::NotFound`] → 404 Not Found
/// - [`Error::Validation`] → 400 Bad Request
/// - [`Error::Conflict`] → 409 Conflict (merge conflicts)
/// - [`Error::Io`] → 500 Internal Server Error
/// - [`Error::UpstreamFailure`] → 500 Internal Server Error
#[allow(dead_code)]
#[derive(Debug)]
pub enum Error {
/// The requested story or worktree was not found.
NotFound(String),
/// A required argument is missing or has an invalid value.
Validation(String),
/// The QA approval cannot proceed due to a git conflict.
Conflict(String),
/// A filesystem or process I/O operation failed.
Io(String),
/// An upstream dependency (git, agents) returned an unexpected error.
UpstreamFailure(String),
}
impl std::fmt::Display for Error {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Self::NotFound(msg) => write!(f, "Not found: {msg}"),
Self::Validation(msg) => write!(f, "Validation error: {msg}"),
Self::Conflict(msg) => write!(f, "Conflict: {msg}"),
Self::Io(msg) => write!(f, "I/O error: {msg}"),
Self::UpstreamFailure(msg) => write!(f, "Upstream failure: {msg}"),
}
}
}
// ── Tests ─────────────────────────────────────────────────────────────────────
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn error_display_not_found() {
let e = Error::NotFound("story missing".to_string());
assert!(e.to_string().contains("Not found"));
}
#[test]
fn error_display_validation() {
let e = Error::Validation("notes required".to_string());
assert!(e.to_string().contains("Validation error"));
}
#[test]
fn error_display_conflict() {
let e = Error::Conflict("merge conflict".to_string());
assert!(e.to_string().contains("Conflict"));
}
#[test]
fn error_display_io() {
let e = Error::Io("port bind failed".to_string());
assert!(e.to_string().contains("I/O error"));
}
#[test]
fn error_display_upstream_failure() {
let e = Error::UpstreamFailure("git not found".to_string());
assert!(e.to_string().contains("Upstream failure"));
}
#[test]
fn find_free_port_returns_bindable_port() {
let port = find_free_port(3100);
// The returned port must be bindable.
assert!(
std::net::TcpListener::bind(("127.0.0.1", port)).is_ok(),
"port {port} should be bindable"
);
}
}
+52
View File
@@ -0,0 +1,52 @@
//! Settings I/O — the ONLY place in `service/settings/` that may perform side effects.
//!
//! Side effects here include: reading/writing `.huskies/project.toml` and
//! spawning the editor process via `std::process::Command`.
//! All business logic, branching, and type definitions belong in `mod.rs`,
//! `project.rs`, or `validate.rs`.
use super::Error;
use std::path::Path;
/// Read the raw TOML content from `config_path`.
///
/// Returns an empty string if the file does not exist yet, so callers can
/// treat a missing config the same as an empty one.
///
/// # Errors
/// - [`Error::Io`] if the file exists but cannot be read.
pub(super) fn read_config_toml(config_path: &Path) -> Result<String, Error> {
if config_path.exists() {
std::fs::read_to_string(config_path).map_err(|e| Error::Io(format!("Read config: {e}")))
} else {
Ok(String::new())
}
}
/// Write `content` to `config_path`, creating parent directories as needed.
///
/// # Errors
/// - [`Error::Io`] if the directory cannot be created or the file write fails.
pub(super) fn write_config_toml(config_path: &Path, content: &str) -> Result<(), Error> {
if let Some(parent) = config_path.parent() {
std::fs::create_dir_all(parent)
.map_err(|e| Error::Io(format!("Create .huskies dir: {e}")))?;
}
std::fs::write(config_path, content).map_err(|e| Error::Io(format!("Write config: {e}")))
}
/// Spawn the editor process with `file_ref` as the sole argument.
///
/// Does not wait for the editor to exit — fire-and-forget so the UI remains
/// responsive.
///
/// # Errors
/// - [`Error::SpawnError`] if the operating system cannot start the process
/// (e.g. the editor binary is not on `$PATH`).
pub(super) fn spawn_editor(editor_command: &str, file_ref: &str) -> Result<(), Error> {
std::process::Command::new(editor_command)
.arg(file_ref)
.spawn()
.map(|_| ())
.map_err(|e| Error::Spawn(format!("Failed to open editor: {e}")))
}
+262
View File
@@ -0,0 +1,262 @@
//! Settings service — domain logic for project settings and editor configuration.
//!
//! Extracts business logic from `http/settings.rs` following the conventions in
//! `docs/architecture/service-modules.md`:
//! - `mod.rs` (this file) — public API, typed [`Error`], orchestration
//! - `io.rs` — the ONLY place that performs side effects (filesystem I/O, process spawn)
//! - `project.rs` — pure types: [`ProjectSettings`], [`settings_from_config`],
//! [`merge_settings_into_toml`]
//! - `validate.rs` — pure validation: [`validate_project_settings`]
pub(super) mod io;
pub mod project;
pub mod validate;
pub use project::{ProjectSettings, merge_settings_into_toml, settings_from_config};
pub use validate::validate_project_settings;
use crate::config::ProjectConfig;
use crate::store::StoreOps;
use std::path::Path;
/// The store key for the configured editor command.
pub const EDITOR_COMMAND_KEY: &str = "editor_command";
// ── Error type ────────────────────────────────────────────────────────────────
/// Typed errors returned by `service::settings` functions.
///
/// HTTP handlers map these to status codes:
/// - [`Error::Validation`] → 400 Bad Request
/// - [`Error::NotConfigured`] → 400 Bad Request
/// - [`Error::Io`] → 500 Internal Server Error
/// - [`Error::Spawn`] → 500 Internal Server Error
#[derive(Debug)]
pub enum Error {
/// A field value failed validation (e.g. unknown QA mode).
Validation(String),
/// No editor is configured in the store.
NotConfigured,
/// A filesystem read or write operation failed.
Io(String),
/// The editor process failed to spawn.
Spawn(String),
}
impl std::fmt::Display for Error {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Self::Validation(msg) => write!(f, "Validation error: {msg}"),
Self::NotConfigured => write!(f, "No editor configured"),
Self::Io(msg) => write!(f, "I/O error: {msg}"),
Self::Spawn(msg) => write!(f, "Spawn error: {msg}"),
}
}
}
// ── Public API ────────────────────────────────────────────────────────────────
/// Load the current project settings from disk.
///
/// # Errors
/// - [`Error::IoError`] if the config file cannot be read or parsed.
pub fn load_project_settings(project_root: &Path) -> Result<ProjectSettings, Error> {
let config =
ProjectConfig::load(project_root).map_err(|e| Error::Io(format!("Load config: {e}")))?;
Ok(settings_from_config(&config))
}
/// Write the given settings to disk, preserving array sections.
///
/// Reads the existing project.toml, merges only the scalar fields from `s`,
/// and rewrites the file. Array sections (`[[component]]`, `[[agent]]`) are
/// untouched.
///
/// # Errors
/// - [`Error::IoError`] if the config file cannot be read, parsed, or written.
pub fn write_project_settings(project_root: &Path, s: &ProjectSettings) -> Result<(), Error> {
let config_path = project_root.join(".huskies/project.toml");
let content = io::read_config_toml(&config_path)?;
let mut val: toml::Value = if content.trim().is_empty() {
toml::Value::Table(toml::map::Map::new())
} else {
toml::from_str(&content).map_err(|e| Error::Io(format!("Parse config: {e}")))?
};
merge_settings_into_toml(&mut val, s)?;
let new_content =
toml::to_string_pretty(&val).map_err(|e| Error::Io(format!("Serialize config: {e}")))?;
io::write_config_toml(&config_path, &new_content)?;
Ok(())
}
/// Return the configured editor command from the store, or `None` if not set.
///
/// Pure: reads from in-memory store only — no filesystem or network I/O.
pub fn get_editor_command(store: &dyn StoreOps) -> Option<String> {
store
.get(EDITOR_COMMAND_KEY)
.and_then(|v| v.as_str().map(|s| s.to_string()))
}
/// Open a file in the configured editor at the optional line number.
///
/// # Errors
/// - [`Error::NotConfigured`] if no editor has been set in the store.
/// - [`Error::SpawnError`] if the editor process fails to start.
pub fn open_file_in_editor(
store: &dyn StoreOps,
path: &str,
line: Option<u32>,
) -> Result<(), Error> {
let editor_command = get_editor_command(store).ok_or(Error::NotConfigured)?;
let file_ref = match line {
Some(l) => format!("{path}:{l}"),
None => path.to_string(),
};
io::spawn_editor(&editor_command, &file_ref)
}
// ── Tests ─────────────────────────────────────────────────────────────────────
#[cfg(test)]
mod tests {
use super::*;
use tempfile::TempDir;
// ── Error Display ─────────────────────────────────────────────────────────
#[test]
fn error_display_validation() {
let e = Error::Validation("bad value".to_string());
assert!(e.to_string().contains("Validation error"));
assert!(e.to_string().contains("bad value"));
}
#[test]
fn error_display_not_configured() {
let e = Error::NotConfigured;
assert!(e.to_string().contains("No editor configured"));
}
#[test]
fn error_display_io() {
let e = Error::Io("disk full".to_string());
assert!(e.to_string().contains("I/O error"));
assert!(e.to_string().contains("disk full"));
}
#[test]
fn error_display_spawn() {
let e = Error::Spawn("not found".to_string());
assert!(e.to_string().contains("Spawn error"));
assert!(e.to_string().contains("not found"));
}
// ── load_project_settings ─────────────────────────────────────────────────
#[test]
fn load_project_settings_returns_defaults_when_no_toml() {
let dir = TempDir::new().unwrap();
std::fs::create_dir_all(dir.path().join(".huskies")).unwrap();
let s = load_project_settings(dir.path()).unwrap();
assert_eq!(s.default_qa, "server");
assert_eq!(s.max_retries, 2);
assert!(s.rate_limit_notifications);
}
// ── write_project_settings ────────────────────────────────────────────────
#[test]
fn write_project_settings_creates_file() {
let dir = TempDir::new().unwrap();
std::fs::create_dir_all(dir.path().join(".huskies")).unwrap();
let s = load_project_settings(dir.path()).unwrap();
write_project_settings(dir.path(), &s).unwrap();
assert!(dir.path().join(".huskies/project.toml").exists());
}
#[test]
fn write_project_settings_roundtrips() {
let dir = TempDir::new().unwrap();
std::fs::create_dir_all(dir.path().join(".huskies")).unwrap();
let mut s = load_project_settings(dir.path()).unwrap();
s.default_qa = "agent".to_string();
s.max_retries = 7;
write_project_settings(dir.path(), &s).unwrap();
let loaded = load_project_settings(dir.path()).unwrap();
assert_eq!(loaded.default_qa, "agent");
assert_eq!(loaded.max_retries, 7);
}
// ── get_editor_command ────────────────────────────────────────────────────
#[test]
fn get_editor_command_returns_none_when_unset() {
use crate::store::JsonFileStore;
let dir = TempDir::new().unwrap();
let store = JsonFileStore::new(dir.path().join("store.json")).unwrap();
assert!(get_editor_command(&store).is_none());
}
#[test]
fn get_editor_command_returns_value_when_set() {
use crate::store::JsonFileStore;
use serde_json::json;
let dir = TempDir::new().unwrap();
let store = JsonFileStore::new(dir.path().join("store.json")).unwrap();
store.set(EDITOR_COMMAND_KEY, json!("zed"));
assert_eq!(get_editor_command(&store), Some("zed".to_string()));
}
// ── open_file_in_editor ───────────────────────────────────────────────────
#[test]
fn open_file_in_editor_returns_not_configured_when_no_editor() {
use crate::store::JsonFileStore;
let dir = TempDir::new().unwrap();
let store = JsonFileStore::new(dir.path().join("store.json")).unwrap();
let result = open_file_in_editor(&store, "src/main.rs", Some(42));
assert!(matches!(result, Err(Error::NotConfigured)));
}
#[test]
fn open_file_in_editor_returns_spawn_error_for_nonexistent_editor() {
use crate::store::JsonFileStore;
use serde_json::json;
let dir = TempDir::new().unwrap();
let store = JsonFileStore::new(dir.path().join("store.json")).unwrap();
store.set(EDITOR_COMMAND_KEY, json!("this_editor_xyz_does_not_exist"));
let result = open_file_in_editor(&store, "src/main.rs", Some(1));
assert!(matches!(result, Err(Error::Spawn(_))));
}
#[test]
fn open_file_in_editor_succeeds_with_echo() {
use crate::store::JsonFileStore;
use serde_json::json;
let dir = TempDir::new().unwrap();
let store = JsonFileStore::new(dir.path().join("store.json")).unwrap();
store.set(EDITOR_COMMAND_KEY, json!("echo"));
let result = open_file_in_editor(&store, "src/main.rs", Some(10));
assert!(result.is_ok());
}
#[test]
fn open_file_in_editor_formats_path_without_line() {
use crate::store::JsonFileStore;
use serde_json::json;
let dir = TempDir::new().unwrap();
let store = JsonFileStore::new(dir.path().join("store.json")).unwrap();
store.set(EDITOR_COMMAND_KEY, json!("echo"));
let result = open_file_in_editor(&store, "src/lib.rs", None);
assert!(result.is_ok());
}
}
+396
View File
@@ -0,0 +1,396 @@
//! Pure settings types and TOML merge logic — no side effects.
//!
//! Owns [`ProjectSettings`] (the API-facing settings payload),
//! [`settings_from_config`] (conversion from `ProjectConfig`), and
//! [`merge_settings_into_toml`] (the pure TOML key-updating logic used by the
//! write path in `mod.rs` + `io.rs`).
use crate::config::ProjectConfig;
use poem_openapi::Object;
use serde::{Deserialize, Serialize};
/// Project-level settings exposed via `GET /api/settings` and `PUT /api/settings`.
///
/// Only contains the scalar fields of `ProjectConfig` — array sections
/// (`[[component]]`, `[[agent]]`, `[watcher]`) are preserved in the TOML file
/// and are not editable through this API.
#[derive(Debug, Object, Serialize, Deserialize)]
pub struct ProjectSettings {
/// Project-wide default QA mode: "server", "agent", or "human". Default: "server".
pub default_qa: String,
/// Default model for coder-stage agents (e.g. "sonnet").
pub default_coder_model: Option<String>,
/// Maximum number of concurrent coder-stage agents.
pub max_coders: Option<u32>,
/// Maximum retries per story per pipeline stage before marking as blocked. Default: 2.
pub max_retries: u32,
/// Optional base branch name (e.g. "main", "master").
pub base_branch: Option<String>,
/// Whether to send RateLimitWarning chat notifications. Default: true.
pub rate_limit_notifications: bool,
/// IANA timezone name (e.g. "Europe/London").
pub timezone: Option<String>,
/// WebSocket URL of a remote huskies node to sync CRDT state with.
pub rendezvous: Option<String>,
/// How often (seconds) to check 5_done/ for items to archive. Default: 60.
pub watcher_sweep_interval_secs: u64,
/// How long (seconds) an item must remain in 5_done/ before archiving. Default: 14400.
pub watcher_done_retention_secs: u64,
}
/// Convert a [`ProjectConfig`] into a [`ProjectSettings`] payload.
///
/// Pure: performs no I/O.
pub fn settings_from_config(cfg: &ProjectConfig) -> ProjectSettings {
ProjectSettings {
default_qa: cfg.default_qa.clone(),
default_coder_model: cfg.default_coder_model.clone(),
max_coders: cfg.max_coders.map(|v| v as u32),
max_retries: cfg.max_retries,
base_branch: cfg.base_branch.clone(),
rate_limit_notifications: cfg.rate_limit_notifications,
timezone: cfg.timezone.clone(),
rendezvous: cfg.rendezvous.clone(),
watcher_sweep_interval_secs: cfg.watcher.sweep_interval_secs,
watcher_done_retention_secs: cfg.watcher.done_retention_secs,
}
}
/// Merge the scalar settings from `s` into an existing TOML value in-place.
///
/// Array sections (`[[component]]`, `[[agent]]`) and unknown keys are preserved.
/// Pure: performs no I/O.
///
/// # Errors
/// - [`super::Error::IoError`] if `val` is not a TOML table.
pub fn merge_settings_into_toml(
val: &mut toml::Value,
s: &ProjectSettings,
) -> Result<(), super::Error> {
let table = val
.as_table_mut()
.ok_or_else(|| super::Error::Io("Config is not a TOML table".to_string()))?;
// Scalar root fields — always written
table.insert(
"default_qa".to_string(),
toml::Value::String(s.default_qa.clone()),
);
table.insert(
"max_retries".to_string(),
toml::Value::Integer(s.max_retries as i64),
);
table.insert(
"rate_limit_notifications".to_string(),
toml::Value::Boolean(s.rate_limit_notifications),
);
// Optional scalar fields — insert when Some, remove when None
match &s.default_coder_model {
Some(v) => {
table.insert(
"default_coder_model".to_string(),
toml::Value::String(v.clone()),
);
}
None => {
table.remove("default_coder_model");
}
}
match s.max_coders {
Some(v) => {
table.insert("max_coders".to_string(), toml::Value::Integer(v as i64));
}
None => {
table.remove("max_coders");
}
}
match &s.base_branch {
Some(v) => {
table.insert("base_branch".to_string(), toml::Value::String(v.clone()));
}
None => {
table.remove("base_branch");
}
}
match &s.timezone {
Some(v) => {
table.insert("timezone".to_string(), toml::Value::String(v.clone()));
}
None => {
table.remove("timezone");
}
}
match &s.rendezvous {
Some(v) => {
table.insert("rendezvous".to_string(), toml::Value::String(v.clone()));
}
None => {
table.remove("rendezvous");
}
}
// [watcher] sub-table
let watcher_entry = table
.entry("watcher".to_string())
.or_insert_with(|| toml::Value::Table(toml::map::Map::new()));
if let toml::Value::Table(wt) = watcher_entry {
wt.insert(
"sweep_interval_secs".to_string(),
toml::Value::Integer(s.watcher_sweep_interval_secs as i64),
);
wt.insert(
"done_retention_secs".to_string(),
toml::Value::Integer(s.watcher_done_retention_secs as i64),
);
}
Ok(())
}
// ── Tests ─────────────────────────────────────────────────────────────────────
#[cfg(test)]
mod tests {
use super::*;
fn default_settings() -> ProjectSettings {
settings_from_config(&ProjectConfig::default())
}
fn empty_toml() -> toml::Value {
toml::Value::Table(toml::map::Map::new())
}
// ── settings_from_config ──────────────────────────────────────────────────
#[test]
fn settings_from_config_reflects_defaults() {
let s = default_settings();
assert_eq!(s.default_qa, "server");
assert_eq!(s.max_retries, 2);
assert!(s.rate_limit_notifications);
assert!(s.default_coder_model.is_none());
assert!(s.max_coders.is_none());
assert!(s.base_branch.is_none());
assert!(s.timezone.is_none());
assert!(s.rendezvous.is_none());
}
#[test]
fn settings_from_config_copies_all_scalar_fields() {
let cfg = ProjectConfig {
default_qa: "human".to_string(),
default_coder_model: Some("opus".to_string()),
max_coders: Some(4),
max_retries: 5,
base_branch: Some("main".to_string()),
rate_limit_notifications: false,
timezone: Some("UTC".to_string()),
rendezvous: Some("ws://host:3001/crdt-sync".to_string()),
watcher: crate::config::WatcherConfig {
sweep_interval_secs: 30,
done_retention_secs: 7200,
},
..Default::default()
};
let s = settings_from_config(&cfg);
assert_eq!(s.default_qa, "human");
assert_eq!(s.default_coder_model, Some("opus".to_string()));
assert_eq!(s.max_coders, Some(4));
assert_eq!(s.max_retries, 5);
assert_eq!(s.base_branch, Some("main".to_string()));
assert!(!s.rate_limit_notifications);
assert_eq!(s.timezone, Some("UTC".to_string()));
assert_eq!(s.rendezvous, Some("ws://host:3001/crdt-sync".to_string()));
assert_eq!(s.watcher_sweep_interval_secs, 30);
assert_eq!(s.watcher_done_retention_secs, 7200);
}
#[test]
fn settings_from_config_max_coders_usize_to_u32() {
let cfg = ProjectConfig {
max_coders: Some(3usize),
..Default::default()
};
let s = settings_from_config(&cfg);
assert_eq!(s.max_coders, Some(3u32));
}
// ── merge_settings_into_toml ──────────────────────────────────────────────
#[test]
fn merge_writes_scalar_root_fields() {
let mut val = empty_toml();
let s = ProjectSettings {
default_qa: "agent".to_string(),
default_coder_model: None,
max_coders: None,
max_retries: 3,
base_branch: None,
rate_limit_notifications: false,
timezone: None,
rendezvous: None,
watcher_sweep_interval_secs: 60,
watcher_done_retention_secs: 14400,
};
merge_settings_into_toml(&mut val, &s).unwrap();
let t = val.as_table().unwrap();
assert_eq!(t["default_qa"].as_str(), Some("agent"));
assert_eq!(t["max_retries"].as_integer(), Some(3));
assert_eq!(t["rate_limit_notifications"].as_bool(), Some(false));
}
#[test]
fn merge_inserts_optional_fields_when_some() {
let mut val = empty_toml();
let s = ProjectSettings {
default_qa: "server".to_string(),
default_coder_model: Some("sonnet".to_string()),
max_coders: Some(2),
max_retries: 2,
base_branch: Some("main".to_string()),
rate_limit_notifications: true,
timezone: Some("America/New_York".to_string()),
rendezvous: Some("ws://host/crdt-sync".to_string()),
watcher_sweep_interval_secs: 60,
watcher_done_retention_secs: 14400,
};
merge_settings_into_toml(&mut val, &s).unwrap();
let t = val.as_table().unwrap();
assert_eq!(t["default_coder_model"].as_str(), Some("sonnet"));
assert_eq!(t["max_coders"].as_integer(), Some(2));
assert_eq!(t["base_branch"].as_str(), Some("main"));
assert_eq!(t["timezone"].as_str(), Some("America/New_York"));
assert_eq!(t["rendezvous"].as_str(), Some("ws://host/crdt-sync"));
}
#[test]
fn merge_removes_optional_fields_when_none() {
let mut val = empty_toml();
// First set them
let s_with = ProjectSettings {
default_qa: "server".to_string(),
default_coder_model: Some("sonnet".to_string()),
max_coders: Some(3),
max_retries: 2,
base_branch: Some("master".to_string()),
rate_limit_notifications: true,
timezone: Some("UTC".to_string()),
rendezvous: None,
watcher_sweep_interval_secs: 60,
watcher_done_retention_secs: 14400,
};
merge_settings_into_toml(&mut val, &s_with).unwrap();
// Then clear them
let s_clear = ProjectSettings {
default_qa: "server".to_string(),
default_coder_model: None,
max_coders: None,
max_retries: 2,
base_branch: None,
rate_limit_notifications: true,
timezone: None,
rendezvous: None,
watcher_sweep_interval_secs: 60,
watcher_done_retention_secs: 14400,
};
merge_settings_into_toml(&mut val, &s_clear).unwrap();
let t = val.as_table().unwrap();
assert!(!t.contains_key("default_coder_model"));
assert!(!t.contains_key("max_coders"));
assert!(!t.contains_key("base_branch"));
assert!(!t.contains_key("timezone"));
}
#[test]
fn merge_writes_watcher_sub_table() {
let mut val = empty_toml();
let s = ProjectSettings {
default_qa: "server".to_string(),
default_coder_model: None,
max_coders: None,
max_retries: 2,
base_branch: None,
rate_limit_notifications: true,
timezone: None,
rendezvous: None,
watcher_sweep_interval_secs: 45,
watcher_done_retention_secs: 3600,
};
merge_settings_into_toml(&mut val, &s).unwrap();
let t = val.as_table().unwrap();
let wt = t["watcher"].as_table().unwrap();
assert_eq!(wt["sweep_interval_secs"].as_integer(), Some(45));
assert_eq!(wt["done_retention_secs"].as_integer(), Some(3600));
}
#[test]
fn merge_preserves_unknown_toml_keys() {
let existing_toml = r#"
[[agent]]
name = "coder-1"
model = "sonnet"
stage = "coder"
[[component]]
name = "server"
path = "."
"#;
let mut val: toml::Value = toml::from_str(existing_toml).unwrap();
let s = default_settings();
merge_settings_into_toml(&mut val, &s).unwrap();
// Re-serialize and verify agent/component sections are preserved
let output = toml::to_string_pretty(&val).unwrap();
assert!(
output.contains("coder-1"),
"agent section should be preserved"
);
assert!(
output.contains("component"),
"component section should be preserved"
);
}
#[test]
fn merge_returns_error_for_non_table_toml() {
let mut val = toml::Value::String("not a table".to_string());
let s = default_settings();
let result = merge_settings_into_toml(&mut val, &s);
assert!(result.is_err());
assert!(matches!(result.unwrap_err(), super::super::Error::Io(_)));
}
#[test]
fn merge_formatting_produces_valid_toml() {
let mut val = empty_toml();
let s = ProjectSettings {
default_qa: "human".to_string(),
default_coder_model: Some("opus".to_string()),
max_coders: Some(2),
max_retries: 4,
base_branch: Some("develop".to_string()),
rate_limit_notifications: false,
timezone: Some("Europe/London".to_string()),
rendezvous: Some("ws://remote:3001/crdt-sync".to_string()),
watcher_sweep_interval_secs: 120,
watcher_done_retention_secs: 28800,
};
merge_settings_into_toml(&mut val, &s).unwrap();
let output = toml::to_string_pretty(&val).unwrap();
// Verify round-trip: the output must be valid TOML
let reparsed: toml::Value = toml::from_str(&output).unwrap();
let t = reparsed.as_table().unwrap();
assert_eq!(t["default_qa"].as_str(), Some("human"));
assert_eq!(t["default_coder_model"].as_str(), Some("opus"));
assert_eq!(t["max_coders"].as_integer(), Some(2));
}
}
+149
View File
@@ -0,0 +1,149 @@
//! Pure validation logic for project settings — no side effects.
//!
//! All functions in this module are pure: given the same input, they always
//! return the same output, and they never perform any I/O.
use super::{Error, project::ProjectSettings};
/// Validate the incoming [`ProjectSettings`] before writing to disk.
///
/// # Errors
/// - [`Error::Validation`] if any field value is invalid.
pub fn validate_project_settings(s: &ProjectSettings) -> Result<(), Error> {
match s.default_qa.as_str() {
"server" | "agent" | "human" => {}
other => {
return Err(Error::Validation(format!(
"Invalid default_qa value '{other}'. Must be one of: server, agent, human"
)));
}
}
Ok(())
}
// ── Tests ─────────────────────────────────────────────────────────────────────
#[cfg(test)]
mod tests {
use super::*;
use crate::config::ProjectConfig;
use crate::service::settings::project::settings_from_config;
fn make_settings(default_qa: &str) -> ProjectSettings {
let cfg = ProjectConfig {
default_qa: default_qa.to_string(),
..Default::default()
};
settings_from_config(&cfg)
}
// ── Valid cases ───────────────────────────────────────────────────────────
#[test]
fn accepts_server_qa_mode() {
assert!(validate_project_settings(&make_settings("server")).is_ok());
}
#[test]
fn accepts_agent_qa_mode() {
assert!(validate_project_settings(&make_settings("agent")).is_ok());
}
#[test]
fn accepts_human_qa_mode() {
assert!(validate_project_settings(&make_settings("human")).is_ok());
}
#[test]
fn accepts_all_qa_modes() {
for mode in &["server", "agent", "human"] {
let result = validate_project_settings(&make_settings(mode));
assert!(result.is_ok(), "qa mode '{mode}' should be valid");
}
}
// ── Invalid cases ─────────────────────────────────────────────────────────
#[test]
fn rejects_empty_qa_mode() {
let err = validate_project_settings(&make_settings("")).unwrap_err();
assert!(matches!(err, Error::Validation(_)));
}
#[test]
fn rejects_unknown_qa_mode() {
let err = validate_project_settings(&make_settings("robot")).unwrap_err();
assert!(matches!(err, Error::Validation(ref msg) if msg.contains("robot")));
}
#[test]
fn rejects_uppercase_qa_mode() {
let err = validate_project_settings(&make_settings("Server")).unwrap_err();
assert!(matches!(err, Error::Validation(_)));
}
#[test]
fn rejects_partial_qa_mode() {
let err = validate_project_settings(&make_settings("serv")).unwrap_err();
assert!(matches!(err, Error::Validation(_)));
}
#[test]
fn rejects_qa_mode_with_trailing_space() {
let err = validate_project_settings(&make_settings("server ")).unwrap_err();
assert!(matches!(err, Error::Validation(_)));
}
#[test]
fn error_message_contains_invalid_value() {
let err = validate_project_settings(&make_settings("bad_mode")).unwrap_err();
if let Error::Validation(msg) = err {
assert!(
msg.contains("bad_mode"),
"error message should include the bad value"
);
assert!(
msg.contains("server") && msg.contains("agent") && msg.contains("human"),
"error message should list valid values"
);
} else {
panic!("expected ValidationError");
}
}
// ── Settings with other fields set ───────────────────────────────────────
#[test]
fn valid_settings_with_all_optional_fields_set() {
let s = ProjectSettings {
default_qa: "agent".to_string(),
default_coder_model: Some("opus".to_string()),
max_coders: Some(4),
max_retries: 5,
base_branch: Some("main".to_string()),
rate_limit_notifications: false,
timezone: Some("UTC".to_string()),
rendezvous: Some("ws://host:3001/crdt-sync".to_string()),
watcher_sweep_interval_secs: 30,
watcher_done_retention_secs: 3600,
};
assert!(validate_project_settings(&s).is_ok());
}
#[test]
fn valid_settings_with_no_optional_fields() {
let s = ProjectSettings {
default_qa: "human".to_string(),
default_coder_model: None,
max_coders: None,
max_retries: 2,
base_branch: None,
rate_limit_notifications: true,
timezone: None,
rendezvous: None,
watcher_sweep_interval_secs: 60,
watcher_done_retention_secs: 14400,
};
assert!(validate_project_settings(&s).is_ok());
}
}
+64
View File
@@ -0,0 +1,64 @@
//! Shell I/O — the ONLY place in `service::shell/` that may perform side effects.
//!
//! Side effects here include: filesystem existence and canonicalization checks,
//! process spawning via `std::process::Command`, and reading pipe output.
//! All pure logic (pattern matching, output truncation, count parsing) lives in
//! `path_guard.rs`.
use super::Error;
use std::path::{Path, PathBuf};
/// Validate that `working_dir` is an absolute path that exists on disk and
/// lies inside the project's `.huskies/worktrees/` or `.huskies/merge_workspace/`
/// directory. Returns the canonicalized path on success.
///
/// # Errors
/// - [`Error::Validation`] if the path is relative or does not exist.
/// - [`Error::PathNotAllowed`] if the path is outside the allowed roots.
/// - [`Error::Io`] if canonicalization fails.
pub fn validate_working_dir(working_dir: &str, project_root: &Path) -> Result<PathBuf, Error> {
let wd = PathBuf::from(working_dir);
if !wd.is_absolute() {
return Err(Error::Validation(
"working_dir must be an absolute path".to_string(),
));
}
if !wd.exists() {
return Err(Error::Validation(format!(
"working_dir does not exist: {working_dir}"
)));
}
let worktrees_root = project_root.join(".huskies").join("worktrees");
let canonical_wd = wd
.canonicalize()
.map_err(|e| Error::Io(format!("Cannot canonicalize working_dir: {e}")))?;
let canonical_wt = if worktrees_root.exists() {
worktrees_root
.canonicalize()
.map_err(|e| Error::Io(format!("Cannot canonicalize worktrees root: {e}")))?
} else {
return Err(Error::PathNotAllowed(
"No worktrees directory found in project".to_string(),
));
};
// Also allow the merge workspace so mergemaster can fix conflicts.
let merge_workspace = project_root.join(".huskies").join("merge_workspace");
let canonical_mw = merge_workspace.canonicalize().unwrap_or_default();
let in_worktrees = canonical_wd.starts_with(&canonical_wt);
let in_merge_ws =
!canonical_mw.as_os_str().is_empty() && canonical_wd.starts_with(&canonical_mw);
if !in_worktrees && !in_merge_ws {
return Err(Error::PathNotAllowed(format!(
"working_dir must be inside .huskies/worktrees/ or .huskies/merge_workspace/. Got: {working_dir}"
)));
}
Ok(canonical_wd)
}
+90
View File
@@ -0,0 +1,90 @@
//! Shell service — command safety, path sandboxing, and output helpers.
//!
//! Extracted from `http/mcp/shell_tools.rs` following the conventions in
//! `docs/architecture/service-modules.md`:
//! - `mod.rs` (this file) — public API, typed [`Error`], orchestration
//! - `io.rs` — the ONLY place that performs side effects (filesystem checks)
//! - `path_guard.rs` — pure command-safety checks and output utilities
pub mod io;
pub mod path_guard;
#[allow(unused_imports)]
pub use path_guard::{
BLOCKED_BINARIES, BLOCKED_PATTERNS, extract_count, is_dangerous, parse_test_counts,
truncate_output,
};
// ── Error type ────────────────────────────────────────────────────────────────
/// Typed errors returned by `service::shell` functions.
///
/// HTTP handlers map these to status codes:
/// - [`Error::DangerousCommand`] → 400 Bad Request
/// - [`Error::PathNotAllowed`] → 400 Bad Request (sandbox violation)
/// - [`Error::Validation`] → 400 Bad Request
/// - [`Error::Io`] → 500 Internal Server Error
/// - [`Error::UpstreamFailure`] → 500 Internal Server Error
#[allow(dead_code)]
#[derive(Debug)]
pub enum Error {
/// The command matches a blocked pattern or binary.
DangerousCommand(String),
/// The working directory is outside the allowed sandbox.
PathNotAllowed(String),
/// A required argument is missing or has an invalid value.
Validation(String),
/// A filesystem or process I/O operation failed.
Io(String),
/// An upstream dependency (e.g. the shell) returned an unexpected error.
UpstreamFailure(String),
}
impl std::fmt::Display for Error {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Self::DangerousCommand(msg) => write!(f, "Dangerous command: {msg}"),
Self::PathNotAllowed(msg) => write!(f, "Path not allowed: {msg}"),
Self::Validation(msg) => write!(f, "Validation error: {msg}"),
Self::Io(msg) => write!(f, "I/O error: {msg}"),
Self::UpstreamFailure(msg) => write!(f, "Upstream failure: {msg}"),
}
}
}
// ── Tests ─────────────────────────────────────────────────────────────────────
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn error_display_dangerous_command() {
let e = Error::DangerousCommand("rm -rf / detected".to_string());
assert!(e.to_string().contains("Dangerous command"));
}
#[test]
fn error_display_path_not_allowed() {
let e = Error::PathNotAllowed("outside worktrees".to_string());
assert!(e.to_string().contains("Path not allowed"));
}
#[test]
fn error_display_validation() {
let e = Error::Validation("must be absolute".to_string());
assert!(e.to_string().contains("Validation error"));
}
#[test]
fn error_display_io() {
let e = Error::Io("disk full".to_string());
assert!(e.to_string().contains("I/O error"));
}
#[test]
fn error_display_upstream_failure() {
let e = Error::UpstreamFailure("bash not found".to_string());
assert!(e.to_string().contains("Upstream failure"));
}
}
+223
View File
@@ -0,0 +1,223 @@
//! Pure command safety and output helpers for `service::shell`.
//!
//! All functions here are free of side effects: no filesystem access,
//! no process spawning, no I/O of any kind. They may be tested without
//! temporary directories or an async runtime.
/// Patterns that are unconditionally blocked regardless of context.
pub static BLOCKED_PATTERNS: &[&str] = &[
"rm -rf /",
"rm -fr /",
"rm -rf /*",
"rm -fr /*",
"rm --no-preserve-root",
":(){ :|:& };:",
"> /dev/sda",
"dd if=/dev",
];
/// Binaries that are unconditionally blocked.
pub static BLOCKED_BINARIES: &[&str] = &[
"sudo", "su", "shutdown", "reboot", "halt", "poweroff", "mkfs",
];
/// Returns an error message if the command matches a blocked pattern or binary,
/// or `None` if the command is safe to run.
///
/// Checks are purely string-based — no I/O or process inspection.
pub fn is_dangerous(command: &str) -> Option<String> {
let trimmed = command.trim();
for &pattern in BLOCKED_PATTERNS {
if trimmed.contains(pattern) {
return Some(format!(
"Command blocked: dangerous pattern '{pattern}' detected"
));
}
}
if let Some(first_token) = trimmed.split_whitespace().next() {
let binary = std::path::Path::new(first_token)
.file_name()
.and_then(|n| n.to_str())
.unwrap_or(first_token);
if BLOCKED_BINARIES.contains(&binary) {
return Some(format!("Command blocked: '{binary}' is not permitted"));
}
}
None
}
/// Truncate `output` to at most `max_lines` lines, keeping the tail.
///
/// If the output fits within `max_lines`, it is returned unchanged.
/// Otherwise a `"[... N lines omitted ...]"` header is prepended to the
/// last `max_lines` lines so callers still see the most recent output.
pub fn truncate_output(output: &str, max_lines: usize) -> String {
let lines: Vec<&str> = output.lines().collect();
if lines.len() <= max_lines {
return output.to_string();
}
let omitted = lines.len() - max_lines;
let tail = lines[lines.len() - max_lines..].join("\n");
format!("[... {omitted} lines omitted ...]\n{tail}")
}
/// Parse cumulative passed/failed counts from `cargo test` output.
///
/// Scans each line for `"test result:"` summaries and accumulates the
/// `passed` and `failed` counts across all crates in the output.
pub fn parse_test_counts(output: &str) -> (u64, u64) {
let mut total_passed = 0u64;
let mut total_failed = 0u64;
for line in output.lines() {
if line.contains("test result:") {
if let Some(p) = extract_count(line, "passed") {
total_passed += p;
}
if let Some(f) = extract_count(line, "failed") {
total_failed += f;
}
}
}
(total_passed, total_failed)
}
/// Extract the integer immediately before `label` in `line`.
///
/// For example, `extract_count("5 passed; 0 failed", "passed")` returns
/// `Some(5)`. Returns `None` if no digit sequence precedes `label`.
pub fn extract_count(line: &str, label: &str) -> Option<u64> {
let pos = line.find(label)?;
let before = line[..pos].trim_end();
let num_str: String = before
.chars()
.rev()
.take_while(|c| c.is_ascii_digit())
.collect();
if num_str.is_empty() {
return None;
}
let num_str: String = num_str.chars().rev().collect();
num_str.parse().ok()
}
// ── Tests ─────────────────────────────────────────────────────────────────────
#[cfg(test)]
mod tests {
use super::*;
// ── is_dangerous ──────────────────────────────────────────────────────────
#[test]
fn is_dangerous_blocks_rm_rf_root() {
assert!(is_dangerous("rm -rf /").is_some());
assert!(is_dangerous(" rm -rf / ").is_some());
}
#[test]
fn is_dangerous_blocks_rm_fr_root() {
assert!(is_dangerous("rm -fr /").is_some());
}
#[test]
fn is_dangerous_blocks_rm_rf_star() {
assert!(is_dangerous("rm -rf /*").is_some());
assert!(is_dangerous("rm -fr /*").is_some());
}
#[test]
fn is_dangerous_blocks_sudo() {
assert!(is_dangerous("sudo ls").is_some());
}
#[test]
fn is_dangerous_blocks_shutdown() {
assert!(is_dangerous("shutdown -h now").is_some());
}
#[test]
fn is_dangerous_blocks_mkfs() {
assert!(is_dangerous("mkfs /dev/sda1").is_some());
}
#[test]
fn is_dangerous_blocks_fork_bomb() {
assert!(is_dangerous(":(){ :|:& };:").is_some());
}
#[test]
fn is_dangerous_allows_safe_commands() {
assert!(is_dangerous("cargo build").is_none());
assert!(is_dangerous("npm test").is_none());
assert!(is_dangerous("git status").is_none());
assert!(is_dangerous("ls -la").is_none());
assert!(is_dangerous("rm -rf target/").is_none());
}
// ── truncate_output ───────────────────────────────────────────────────────
#[test]
fn truncate_short_text_unchanged() {
let text = "line1\nline2\nline3";
assert_eq!(truncate_output(text, 10), text);
}
#[test]
fn truncate_long_text_keeps_tail() {
let lines: Vec<String> = (1..=200).map(|i| format!("line {i}")).collect();
let text = lines.join("\n");
let result = truncate_output(&text, 50);
assert!(
result.contains("line 200"),
"should keep last line: {result}"
);
assert!(
result.contains("omitted"),
"should note omitted lines: {result}"
);
assert!(
!result.contains("line 1\n"),
"should not keep first line: {result}"
);
}
#[test]
fn truncate_exact_max_unchanged() {
let text = "a\nb\nc";
assert_eq!(truncate_output(text, 3), text);
}
// ── parse_test_counts ─────────────────────────────────────────────────────
#[test]
fn parse_counts_extracts_passed_and_failed() {
let output = "test result: ok. 5 passed; 0 failed; 0 ignored\ntest result: FAILED. 2 passed; 3 failed;";
let (passed, failed) = parse_test_counts(output);
assert_eq!(passed, 7);
assert_eq!(failed, 3);
}
#[test]
fn parse_counts_no_results_returns_zeros() {
let (passed, failed) = parse_test_counts("no test output here");
assert_eq!(passed, 0);
assert_eq!(failed, 0);
}
// ── extract_count ─────────────────────────────────────────────────────────
#[test]
fn extract_count_finds_number_before_label() {
assert_eq!(extract_count("5 passed; 0 failed", "passed"), Some(5));
assert_eq!(extract_count("0 failed", "failed"), Some(0));
assert_eq!(extract_count("no number here passed", "passed"), None);
}
#[test]
fn extract_count_returns_none_for_missing_label() {
assert_eq!(extract_count("5 passed", "failed"), None);
}
}
+129
View File
@@ -0,0 +1,129 @@
//! Pure criterion helpers for `service::story`.
//!
//! These functions parse, validate, and manipulate story acceptance criteria
//! without performing any I/O.
use crate::workflow::{TestCaseResult, TestStatus};
use serde_json::Value;
/// Parse an optional JSON array of test-case objects into a `Vec<TestCaseResult>`.
///
/// Each object must have `"name"` (string) and `"status"` (`"pass"` or `"fail"`)
/// fields. The optional `"details"` field is preserved when present.
///
/// Returns an empty vector for `None` or `Value::Null` inputs.
///
/// # Errors
/// Returns `Err(String)` if the value is not an array, or if any item is
/// missing a required field or has an unrecognised status string.
pub fn parse_test_cases(value: Option<&Value>) -> Result<Vec<TestCaseResult>, String> {
let arr = match value {
Some(Value::Array(a)) => a,
Some(Value::Null) | None => return Ok(Vec::new()),
_ => return Err("Expected array for test cases".to_string()),
};
arr.iter()
.map(|item| {
let name = item
.get("name")
.and_then(|v| v.as_str())
.ok_or("Test case missing 'name'")?
.to_string();
let status_str = item
.get("status")
.and_then(|v| v.as_str())
.ok_or("Test case missing 'status'")?;
let status = match status_str {
"pass" => TestStatus::Pass,
"fail" => TestStatus::Fail,
other => {
return Err(format!(
"Invalid test status '{other}'. Use 'pass' or 'fail'."
));
}
};
let details = item
.get("details")
.and_then(|v| v.as_str())
.map(String::from);
Ok(TestCaseResult {
name,
status,
details,
})
})
.collect()
}
// ── Tests ─────────────────────────────────────────────────────────────────────
#[cfg(test)]
mod tests {
use super::*;
use serde_json::json;
#[test]
fn parse_none_returns_empty() {
let result = parse_test_cases(None).unwrap();
assert!(result.is_empty());
}
#[test]
fn parse_null_value_returns_empty() {
let null_val = json!(null);
let result = parse_test_cases(Some(&null_val)).unwrap();
assert!(result.is_empty());
}
#[test]
fn parse_valid_cases() {
let input = json!([
{"name": "test1", "status": "pass"},
{"name": "test2", "status": "fail", "details": "assertion failed"}
]);
let result = parse_test_cases(Some(&input)).unwrap();
assert_eq!(result.len(), 2);
assert_eq!(result[0].name, "test1");
assert_eq!(result[0].status, TestStatus::Pass);
assert_eq!(result[1].status, TestStatus::Fail);
assert_eq!(result[1].details, Some("assertion failed".to_string()));
}
#[test]
fn parse_invalid_status_returns_error() {
let input = json!([{"name": "t", "status": "maybe"}]);
assert!(parse_test_cases(Some(&input)).is_err());
}
#[test]
fn parse_non_array_returns_error() {
let obj = json!({"invalid": "input"});
let result = parse_test_cases(Some(&obj));
assert!(result.is_err());
assert!(result.unwrap_err().contains("Expected array"));
}
#[test]
fn parse_missing_name_returns_error() {
let input = json!([{"status": "pass"}]);
let result = parse_test_cases(Some(&input));
assert!(result.is_err());
assert!(result.unwrap_err().contains("name"));
}
#[test]
fn parse_missing_status_returns_error() {
let input = json!([{"name": "test1"}]);
let result = parse_test_cases(Some(&input));
assert!(result.is_err());
assert!(result.unwrap_err().contains("status"));
}
#[test]
fn parse_details_is_optional() {
let input = json!([{"name": "no_details", "status": "pass"}]);
let result = parse_test_cases(Some(&input)).unwrap();
assert_eq!(result[0].details, None);
}
}
+77
View File
@@ -0,0 +1,77 @@
//! Pure front-matter helpers for `service::story`.
//!
//! These functions validate and inspect story front-matter field values
//! without performing any I/O. Parsing is delegated to `crate::io::story_metadata`.
#[allow(dead_code)]
/// Return `true` if `stage` is a recognised pipeline stage directory name.
///
/// Valid stage names match the `.huskies/work/N_name/` directory scheme.
pub fn is_valid_stage(stage: &str) -> bool {
matches!(
stage,
"1_backlog" | "2_current" | "3_qa" | "4_merge" | "5_done" | "6_archived"
)
}
#[allow(dead_code)]
/// Map a human-readable stage alias (e.g. `"backlog"`) to its directory name
/// (e.g. `"1_backlog"`). Returns `None` for unrecognised aliases.
pub fn stage_alias_to_dir(alias: &str) -> Option<&'static str> {
match alias {
"backlog" | "1_backlog" => Some("1_backlog"),
"current" | "2_current" => Some("2_current"),
"qa" | "3_qa" => Some("3_qa"),
"merge" | "4_merge" => Some("4_merge"),
"done" | "5_done" => Some("5_done"),
"archived" | "6_archived" => Some("6_archived"),
_ => None,
}
}
// ── Tests ─────────────────────────────────────────────────────────────────────
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn is_valid_stage_accepts_all_known_stages() {
assert!(is_valid_stage("1_backlog"));
assert!(is_valid_stage("2_current"));
assert!(is_valid_stage("3_qa"));
assert!(is_valid_stage("4_merge"));
assert!(is_valid_stage("5_done"));
assert!(is_valid_stage("6_archived"));
}
#[test]
fn is_valid_stage_rejects_unknown() {
assert!(!is_valid_stage("current"));
assert!(!is_valid_stage("backlog"));
assert!(!is_valid_stage("7_future"));
assert!(!is_valid_stage(""));
}
#[test]
fn stage_alias_maps_short_names() {
assert_eq!(stage_alias_to_dir("backlog"), Some("1_backlog"));
assert_eq!(stage_alias_to_dir("current"), Some("2_current"));
assert_eq!(stage_alias_to_dir("qa"), Some("3_qa"));
assert_eq!(stage_alias_to_dir("merge"), Some("4_merge"));
assert_eq!(stage_alias_to_dir("done"), Some("5_done"));
assert_eq!(stage_alias_to_dir("archived"), Some("6_archived"));
}
#[test]
fn stage_alias_maps_full_dir_names() {
assert_eq!(stage_alias_to_dir("1_backlog"), Some("1_backlog"));
assert_eq!(stage_alias_to_dir("6_archived"), Some("6_archived"));
}
#[test]
fn stage_alias_returns_none_for_unknown() {
assert_eq!(stage_alias_to_dir("unknown"), None);
assert_eq!(stage_alias_to_dir(""), None);
}
}
+7
View File
@@ -0,0 +1,7 @@
//! Story I/O — the ONLY place in `service::story/` that may perform side effects.
//!
//! Currently, the bulk of story file I/O is handled by `crate::http::workflow`
//! (story file creation, criterion editing, stage moves) and
//! `crate::io::story_metadata` (front-matter parsing, merge-failure writes).
//! This file is the designated home for any future story-specific I/O helpers
//! that are extracted from those modules.
+41
View File
@@ -0,0 +1,41 @@
//! Pure story-lifecycle helpers for `service::story`.
//!
//! These functions reason about story IDs and dependencies without performing
//! any I/O. They inform routing decisions in `mod.rs` and the MCP adapter.
pub use crate::service::common::item_id::{
extract_item_number as story_number, has_valid_id_prefix,
};
// ── Tests ─────────────────────────────────────────────────────────────────────
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn story_number_extracts_prefix() {
assert_eq!(story_number("42_story_foo"), Some("42"));
assert_eq!(story_number("1_bug_bar"), Some("1"));
assert_eq!(story_number("100_refactor_baz"), Some("100"));
}
#[test]
fn story_number_returns_none_for_no_numeric_prefix() {
assert_eq!(story_number("story_without_number"), None);
assert_eq!(story_number("abc_story"), None);
assert_eq!(story_number(""), None);
}
#[test]
fn has_valid_id_prefix_returns_true_for_valid_ids() {
assert!(has_valid_id_prefix("42_story_foo"));
assert!(has_valid_id_prefix("1_bug_bar"));
}
#[test]
fn has_valid_id_prefix_returns_false_for_invalid_ids() {
assert!(!has_valid_id_prefix("story_no_number"));
assert!(!has_valid_id_prefix(""));
}
}

Some files were not shown because too many files have changed in this diff Show More