fix: auto-assign after merge, persistent server logs, remove duplicate pnpm install

- Call auto_assign_available_work at end of merge_agent_work so the next
  story gets picked up without waiting for the PTY exit handler
- Add persistent file logging to .story_kit/logs/server.log so server
  logs survive restarts
- Remove duplicate pnpm install block in run_squash_merge

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
Dave
2026-02-26 18:24:27 +00:00
parent b5135ad957
commit 2148531a46
3 changed files with 38 additions and 42 deletions

View File

@@ -1283,6 +1283,12 @@ impl AgentPool {
false false
}; };
// Mergemaster slot is now free — trigger auto-assign so remaining
// items in 4_merge/ (or other stages) get picked up. The normal
// server-owned completion handler won't run because we already
// removed the agent entry above.
self.auto_assign_available_work(project_root).await;
Ok(MergeReport { Ok(MergeReport {
story_id: story_id.to_string(), story_id: story_id.to_string(),
success: true, success: true,
@@ -2797,57 +2803,17 @@ fn run_squash_merge(
}); });
} }
// ── Install frontend dependencies for quality gates ──────────────
let frontend_dir_for_install = merge_wt_path.join("frontend");
if frontend_dir_for_install.exists() {
// Ensure frontend/dist/ exists so cargo clippy (RustEmbed) can compile
// even before `pnpm build` has run.
let dist_dir = frontend_dir_for_install.join("dist");
std::fs::create_dir_all(&dist_dir)
.map_err(|e| format!("Failed to create frontend/dist: {e}"))?;
all_output.push_str("=== pnpm install (merge worktree) ===\n");
let pnpm_install = Command::new("pnpm")
.args(["install"])
.current_dir(&frontend_dir_for_install)
.output()
.map_err(|e| format!("Failed to run pnpm install: {e}"))?;
let install_out = format!(
"{}{}",
String::from_utf8_lossy(&pnpm_install.stdout),
String::from_utf8_lossy(&pnpm_install.stderr)
);
all_output.push_str(&install_out);
all_output.push('\n');
if !pnpm_install.status.success() {
all_output.push_str("=== pnpm install FAILED — aborting merge ===\n");
cleanup_merge_workspace(project_root, &merge_wt_path, &merge_branch);
return Ok(SquashMergeResult {
success: false,
had_conflicts,
conflicts_resolved,
conflict_details,
output: all_output,
gates_passed: false,
});
}
}
// ── Install frontend dependencies for quality gates ────────── // ── Install frontend dependencies for quality gates ──────────
let frontend_dir = merge_wt_path.join("frontend"); let frontend_dir = merge_wt_path.join("frontend");
if frontend_dir.exists() { if frontend_dir.exists() {
// Ensure frontend/dist exists so RustEmbed (cargo clippy) doesn't fail // Ensure frontend/dist exists so RustEmbed (cargo clippy) doesn't fail
// even before pnpm build runs. // even before pnpm build runs.
let dist_dir = frontend_dir.join("dist"); let dist_dir = frontend_dir.join("dist");
if !dist_dir.exists() { let _ = std::fs::create_dir_all(&dist_dir);
let _ = std::fs::create_dir_all(&dist_dir);
}
all_output.push_str("=== pnpm install (merge worktree) ===\n"); all_output.push_str("=== pnpm install (merge worktree) ===\n");
let pnpm_install = Command::new("pnpm") let pnpm_install = Command::new("pnpm")
.args(["install", "--frozen-lockfile"]) .args(["install"])
.current_dir(&frontend_dir) .current_dir(&frontend_dir)
.output() .output()
.map_err(|e| format!("Failed to run pnpm install: {e}"))?; .map_err(|e| format!("Failed to run pnpm install: {e}"))?;

View File

@@ -7,6 +7,9 @@
//! `get_server_logs` MCP tool. //! `get_server_logs` MCP tool.
use std::collections::VecDeque; use std::collections::VecDeque;
use std::fs::OpenOptions;
use std::io::Write;
use std::path::PathBuf;
use std::sync::{Mutex, OnceLock}; use std::sync::{Mutex, OnceLock};
const CAPACITY: usize = 1000; const CAPACITY: usize = 1000;
@@ -68,12 +71,22 @@ impl LogEntry {
pub struct LogBuffer { pub struct LogBuffer {
entries: Mutex<VecDeque<LogEntry>>, entries: Mutex<VecDeque<LogEntry>>,
log_file: Mutex<Option<PathBuf>>,
} }
impl LogBuffer { impl LogBuffer {
fn new() -> Self { fn new() -> Self {
Self { Self {
entries: Mutex::new(VecDeque::with_capacity(CAPACITY)), entries: Mutex::new(VecDeque::with_capacity(CAPACITY)),
log_file: Mutex::new(None),
}
}
/// Set the persistent log file path. Call once at startup after the
/// project root is known.
pub fn set_log_file(&self, path: PathBuf) {
if let Ok(mut f) = self.log_file.lock() {
*f = Some(path);
} }
} }
@@ -86,6 +99,15 @@ impl LogBuffer {
message, message,
}; };
eprintln!("{}", entry.colored_formatted()); eprintln!("{}", entry.colored_formatted());
// Append to persistent log file (best-effort).
if let Ok(guard) = self.log_file.lock()
&& let Some(ref path) = *guard
&& let Ok(mut file) = OpenOptions::new().create(true).append(true).open(path)
{
let _ = writeln!(file, "{}", entry.formatted());
}
if let Ok(mut buf) = self.entries.lock() { if let Ok(mut buf) = self.entries.lock() {
if buf.len() >= CAPACITY { if buf.len() >= CAPACITY {
buf.pop_front(); buf.pop_front();
@@ -188,6 +210,7 @@ mod tests {
fn evicts_oldest_at_capacity() { fn evicts_oldest_at_capacity() {
let buf = LogBuffer { let buf = LogBuffer {
entries: Mutex::new(VecDeque::with_capacity(CAPACITY)), entries: Mutex::new(VecDeque::with_capacity(CAPACITY)),
log_file: Mutex::new(None),
}; };
// Fill past capacity // Fill past capacity
for i in 0..=CAPACITY { for i in 0..=CAPACITY {

View File

@@ -100,6 +100,13 @@ async fn main() -> Result<(), std::io::Error> {
} }
} }
// Enable persistent server log file now that the project root is known.
if let Some(ref root) = *app_state.project_root.lock().unwrap() {
let log_dir = root.join(".story_kit").join("logs");
let _ = std::fs::create_dir_all(&log_dir);
log_buffer::global().set_log_file(log_dir.join("server.log"));
}
let workflow = Arc::new(std::sync::Mutex::new(WorkflowState::default())); let workflow = Arc::new(std::sync::Mutex::new(WorkflowState::default()));
// Filesystem watcher: broadcast channel for work/ pipeline changes. // Filesystem watcher: broadcast channel for work/ pipeline changes.