diff --git a/frontend/src/components/Chat.test.tsx b/frontend/src/components/Chat.test.tsx
index e98f590..29bbf5f 100644
--- a/frontend/src/components/Chat.test.tsx
+++ b/frontend/src/components/Chat.test.tsx
@@ -26,6 +26,8 @@ type WsHandlers = {
) => void;
};
let capturedWsHandlers: WsHandlers | null = null;
+// Captures the last sendChat call's arguments for assertion.
+let lastSendChatArgs: { messages: Message[]; config: unknown } | null = null;
vi.mock("../api/client", () => {
const api = {
@@ -42,7 +44,9 @@ vi.mock("../api/client", () => {
capturedWsHandlers = handlers;
}
close() {}
- sendChat() {}
+ sendChat(messages: Message[], config: unknown) {
+ lastSendChatArgs = { messages, config };
+ }
cancel() {}
}
return { api, ChatWebSocket };
@@ -580,6 +584,62 @@ describe("Chat localStorage persistence (Story 145)", () => {
expect(storedAfterRemount).toEqual(history);
});
+ it("Bug 245: after refresh, sendChat includes full prior history", async () => {
+ // Step 1: Render, populate messages via onUpdate, then unmount (simulate refresh)
+ const { unmount } = render(
+ ,
+ );
+ await waitFor(() => expect(capturedWsHandlers).not.toBeNull());
+
+ const priorHistory: Message[] = [
+ { role: "user", content: "What is Rust?" },
+ { role: "assistant", content: "Rust is a systems programming language." },
+ ];
+ act(() => {
+ capturedWsHandlers?.onUpdate(priorHistory);
+ });
+
+ // Verify localStorage has the prior history
+ const stored = JSON.parse(localStorage.getItem(STORAGE_KEY) ?? "[]");
+ expect(stored).toEqual(priorHistory);
+
+ unmount();
+
+ // Step 2: Remount (simulates page reload) — messages load from localStorage
+ capturedWsHandlers = null;
+ lastSendChatArgs = null;
+ render();
+ await waitFor(() => expect(capturedWsHandlers).not.toBeNull());
+
+ // Verify prior messages are displayed
+ expect(await screen.findByText("What is Rust?")).toBeInTheDocument();
+
+ // Step 3: Send a new message — sendChat should include the full prior history
+ const input = screen.getByPlaceholderText("Send a message...");
+ await act(async () => {
+ fireEvent.change(input, { target: { value: "Tell me more" } });
+ });
+ await act(async () => {
+ fireEvent.keyDown(input, { key: "Enter", shiftKey: false });
+ });
+
+ // Verify sendChat was called with ALL prior messages + the new one
+ expect(lastSendChatArgs).not.toBeNull();
+ expect(lastSendChatArgs?.messages).toHaveLength(3);
+ expect(lastSendChatArgs?.messages[0]).toEqual({
+ role: "user",
+ content: "What is Rust?",
+ });
+ expect(lastSendChatArgs?.messages[1]).toEqual({
+ role: "assistant",
+ content: "Rust is a systems programming language.",
+ });
+ expect(lastSendChatArgs?.messages[2]).toEqual({
+ role: "user",
+ content: "Tell me more",
+ });
+ });
+
it("AC5: uses project-scoped storage key", async () => {
const otherKey = "storykit-chat-history:/other/project";
localStorage.setItem(
diff --git a/server/src/llm/chat.rs b/server/src/llm/chat.rs
index 6aa6a3f..edd8b6b 100644
--- a/server/src/llm/chat.rs
+++ b/server/src/llm/chat.rs
@@ -179,6 +179,44 @@ pub fn set_anthropic_api_key(store: &dyn StoreOps, api_key: String) -> Result<()
set_anthropic_api_key_impl(store, &api_key)
}
+/// Build a prompt for Claude Code that includes prior conversation history.
+///
+/// When a Claude Code session cannot be resumed (no session_id), we embed
+/// the prior messages as a structured preamble so the LLM retains context.
+/// If there is only one user message (the current one), the content is
+/// returned as-is with no preamble.
+fn build_claude_code_context_prompt(messages: &[Message], latest_user_content: &str) -> String {
+ // Collect prior messages (everything except the trailing user message).
+ let prior: Vec<&Message> = messages
+ .iter()
+ .rev()
+ .skip(1) // skip the latest user message
+ .collect::>()
+ .into_iter()
+ .rev()
+ .collect();
+
+ if prior.is_empty() {
+ return latest_user_content.to_string();
+ }
+
+ let mut parts = Vec::new();
+ parts.push("".to_string());
+ for msg in &prior {
+ let label = match msg.role {
+ Role::User => "User",
+ Role::Assistant => "Assistant",
+ Role::Tool => "Tool",
+ Role::System => continue,
+ };
+ parts.push(format!("[{}]: {}", label, msg.content));
+ }
+ parts.push("".to_string());
+ parts.push(String::new());
+ parts.push(latest_user_content.to_string());
+ parts.join("\n")
+}
+
#[allow(clippy::too_many_arguments)]
pub async fn chat(
messages: Vec,
@@ -224,13 +262,25 @@ where
if is_claude_code {
use crate::llm::providers::claude_code::ClaudeCodeProvider;
- let user_message = messages
+ let latest_user_content = messages
.iter()
.rev()
.find(|m| m.role == Role::User)
.map(|m| m.content.clone())
.ok_or_else(|| "No user message found".to_string())?;
+ // When resuming with a session_id, Claude Code loads its own transcript
+ // from disk — the latest user message is sufficient. Without a
+ // session_id (e.g. after a page refresh) the prior conversation context
+ // would be lost because Claude Code only receives a single prompt
+ // string. In that case, prepend the conversation history so the LLM
+ // retains full context even though the session cannot be resumed.
+ let user_message = if config.session_id.is_some() {
+ latest_user_content
+ } else {
+ build_claude_code_context_prompt(&messages, &latest_user_content)
+ };
+
let project_root = state
.get_project_root()
.unwrap_or_else(|_| std::path::PathBuf::from("."));
@@ -404,7 +454,7 @@ where
}
Ok(ChatResult {
- messages: new_messages,
+ messages: current_history[2..].to_vec(),
session_id: None,
})
}
@@ -1095,4 +1145,102 @@ mod tests {
let result = execute_tool(&call, &state).await;
assert!(result.starts_with("Error:"), "unexpected result: {result}");
}
+
+ // ---------------------------------------------------------------------------
+ // build_claude_code_context_prompt (Bug 245)
+ // ---------------------------------------------------------------------------
+
+ #[test]
+ fn context_prompt_single_message_returns_content_as_is() {
+ let messages = vec![Message {
+ role: Role::User,
+ content: "hello".to_string(),
+ tool_calls: None,
+ tool_call_id: None,
+ }];
+ let result = build_claude_code_context_prompt(&messages, "hello");
+ assert_eq!(result, "hello");
+ }
+
+ #[test]
+ fn context_prompt_includes_prior_conversation() {
+ let messages = vec![
+ Message {
+ role: Role::User,
+ content: "What is Rust?".to_string(),
+ tool_calls: None,
+ tool_call_id: None,
+ },
+ Message {
+ role: Role::Assistant,
+ content: "Rust is a systems language.".to_string(),
+ tool_calls: None,
+ tool_call_id: None,
+ },
+ Message {
+ role: Role::User,
+ content: "Tell me more".to_string(),
+ tool_calls: None,
+ tool_call_id: None,
+ },
+ ];
+ let result = build_claude_code_context_prompt(&messages, "Tell me more");
+ assert!(
+ result.contains(""),
+ "should have history preamble"
+ );
+ assert!(
+ result.contains("[User]: What is Rust?"),
+ "should include prior user message"
+ );
+ assert!(
+ result.contains("[Assistant]: Rust is a systems language."),
+ "should include prior assistant message"
+ );
+ assert!(
+ result.contains(""),
+ "should close history block"
+ );
+ assert!(
+ result.ends_with("Tell me more"),
+ "should end with latest user message"
+ );
+ }
+
+ #[test]
+ fn context_prompt_skips_system_messages() {
+ let messages = vec![
+ Message {
+ role: Role::System,
+ content: "You are a helpful assistant.".to_string(),
+ tool_calls: None,
+ tool_call_id: None,
+ },
+ Message {
+ role: Role::User,
+ content: "hi".to_string(),
+ tool_calls: None,
+ tool_call_id: None,
+ },
+ Message {
+ role: Role::Assistant,
+ content: "hello".to_string(),
+ tool_calls: None,
+ tool_call_id: None,
+ },
+ Message {
+ role: Role::User,
+ content: "bye".to_string(),
+ tool_calls: None,
+ tool_call_id: None,
+ },
+ ];
+ let result = build_claude_code_context_prompt(&messages, "bye");
+ assert!(
+ !result.contains("helpful assistant"),
+ "should not include system messages"
+ );
+ assert!(result.contains("[User]: hi"));
+ assert!(result.contains("[Assistant]: hello"));
+ }
}