fix: clean up clippy warnings + cargo fmt across post-refactor surface
The 13-file refactor pass (commitsdb00a5d4througheca15b4e) introduced ~89 clippy errors and 38 cargo fmt issues — every agent in every worktree hit them on script/test, burning their turn budget on cleanup before doing real story work. This is the silent kill behind 644, 652, 655, 664, 667 all hitting watchdog limits this round. Changes: - cargo fmt --all across 37 files (formatting normalisation only) - #![allow(unused_imports, dead_code)] on 24 split modules where the python-script splitter imported liberally to be safe; tighter cleanup per-import will happen as agents touch each module - Removed truly-dead re-exports (cleanup_merge_workspace, slog_warn from http/mcp/mod.rs, CliArgs/print_help from main.rs) - Prefixed _auth_msg in crdt_sync/server.rs (handshake helper return is bound but not consumed) - Converted dangling /// doc block in crdt_sync/mod.rs to //! so it attaches to the module - Removed empty lines after doc comments in 4 spots (clippy lint) All 2636 tests pass; clippy --all-targets -- -D warnings clean.
This commit is contained in:
@@ -5,7 +5,7 @@ use serde::Serialize;
|
||||
mod conflicts;
|
||||
mod squash;
|
||||
|
||||
pub(crate) use squash::{cleanup_merge_workspace, run_squash_merge};
|
||||
pub(crate) use squash::run_squash_merge;
|
||||
|
||||
/// Status of an async merge job.
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
|
||||
@@ -1,13 +1,14 @@
|
||||
//! Squash-merge orchestration: rebase agent work onto master and run post-merge gates.
|
||||
|
||||
#![allow(unused_imports, dead_code)]
|
||||
use std::path::Path;
|
||||
use std::process::Command;
|
||||
use std::sync::Mutex;
|
||||
|
||||
use crate::config::ProjectConfig;
|
||||
use super::conflicts::try_resolve_conflicts;
|
||||
use super::super::gates::run_project_tests;
|
||||
use super::conflicts::try_resolve_conflicts;
|
||||
use super::{MergeReport, SquashMergeResult};
|
||||
use crate::config::ProjectConfig;
|
||||
|
||||
/// Global lock ensuring only one squash-merge runs at a time.
|
||||
///
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -127,4 +127,3 @@ pub(crate) fn should_block_story(
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
//! Pipeline advance — moves stories forward through pipeline stages after agent completion.
|
||||
#![allow(unused_imports, dead_code)]
|
||||
use crate::config::ProjectConfig;
|
||||
use crate::io::watcher::WatcherEvent;
|
||||
use crate::slog;
|
||||
@@ -483,11 +484,10 @@ impl AgentPool {
|
||||
///
|
||||
/// This is a **non-async** function so it does not participate in the opaque
|
||||
/// type cycle between `start_agent` and `run_server_owned_completion`.
|
||||
|
||||
mod helpers;
|
||||
|
||||
pub(crate) use helpers::{should_block_story, spawn_pipeline_advance};
|
||||
use helpers::{resolve_qa_mode_from_store, write_review_hold_to_store};
|
||||
pub(crate) use helpers::{should_block_story, spawn_pipeline_advance};
|
||||
|
||||
mod tests {
|
||||
use super::super::super::AgentPool;
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
//! Agent start — spawns a new agent process in a worktree for a given story.
|
||||
#![allow(unused_imports, dead_code)]
|
||||
use crate::agent_log::AgentLogWriter;
|
||||
use crate::config::ProjectConfig;
|
||||
use crate::slog_error;
|
||||
@@ -21,7 +22,6 @@ mod validation;
|
||||
|
||||
use validation::{read_front_matter_agent, validate_agent_stage};
|
||||
|
||||
|
||||
impl AgentPool {
|
||||
/// Start an agent for a story: load config, create worktree, spawn agent.
|
||||
///
|
||||
@@ -315,7 +315,6 @@ impl AgentPool {
|
||||
inactivity_timeout_secs,
|
||||
));
|
||||
|
||||
|
||||
// Store the task handle while the agent is still Pending.
|
||||
{
|
||||
let mut agents = self.agents.lock().map_err(|e| e.to_string())?;
|
||||
|
||||
@@ -16,14 +16,14 @@ use crate::config::ProjectConfig;
|
||||
use crate::io::watcher::WatcherEvent;
|
||||
use crate::slog_error;
|
||||
|
||||
use super::super::super::{
|
||||
AgentEvent, AgentStatus, PipelineStage, agent_config_stage, pipeline_stage,
|
||||
};
|
||||
use super::super::super::merge::MergeJob;
|
||||
use super::super::AgentPool;
|
||||
use super::super::super::runtime::{
|
||||
AgentRuntime, ClaudeCodeRuntime, GeminiRuntime, OpenAiRuntime, RuntimeContext,
|
||||
};
|
||||
use super::super::super::{
|
||||
AgentEvent, AgentStatus, PipelineStage, agent_config_stage, pipeline_stage,
|
||||
};
|
||||
use super::super::AgentPool;
|
||||
use super::super::types::StoryAgent;
|
||||
|
||||
/// Run the background worktree-creation + agent-launch flow.
|
||||
@@ -66,296 +66,289 @@ pub(super) async fn run_agent_spawn(
|
||||
let child_killers_clone = child_killers;
|
||||
let watcher_tx_clone = watcher_tx;
|
||||
let merge_jobs_clone = merge_jobs;
|
||||
let _ = inactivity_timeout_secs; // currently unused inside the closure body
|
||||
let _ = inactivity_timeout_secs; // currently unused inside the closure body
|
||||
|
||||
// Step 1: create the worktree (slow — git checkout, pnpm install, etc.)
|
||||
let wt_info = match crate::worktree::create_worktree(
|
||||
&project_root_clone,
|
||||
&sid,
|
||||
&config_clone,
|
||||
port_for_task,
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok(wt) => wt,
|
||||
Err(e) => {
|
||||
let error_msg = format!("Failed to create worktree: {e}");
|
||||
slog_error!("[agents] {error_msg}");
|
||||
let event = AgentEvent::Error {
|
||||
story_id: sid.clone(),
|
||||
agent_name: aname.clone(),
|
||||
message: error_msg,
|
||||
};
|
||||
if let Ok(mut log) = log_clone.lock() {
|
||||
log.push(event.clone());
|
||||
}
|
||||
let _ = tx_clone.send(event);
|
||||
if let Ok(mut agents) = agents_ref.lock()
|
||||
&& let Some(agent) = agents.get_mut(&key_clone)
|
||||
{
|
||||
agent.status = AgentStatus::Failed;
|
||||
}
|
||||
AgentPool::notify_agent_state_changed(&watcher_tx_clone);
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
// Step 2: store worktree info and render agent command/args/prompt.
|
||||
let wt_path_str = wt_info.path.to_string_lossy().to_string();
|
||||
{
|
||||
if let Ok(mut agents) = agents_ref.lock()
|
||||
&& let Some(agent) = agents.get_mut(&key_clone)
|
||||
{
|
||||
agent.worktree_info = Some(wt_info.clone());
|
||||
}
|
||||
}
|
||||
|
||||
let (command, args, mut prompt) = match config_clone.render_agent_args(
|
||||
&wt_path_str,
|
||||
&sid,
|
||||
Some(&aname),
|
||||
Some(&wt_info.base_branch),
|
||||
) {
|
||||
Ok(result) => result,
|
||||
Err(e) => {
|
||||
let error_msg = format!("Failed to render agent args: {e}");
|
||||
slog_error!("[agents] {error_msg}");
|
||||
let event = AgentEvent::Error {
|
||||
story_id: sid.clone(),
|
||||
agent_name: aname.clone(),
|
||||
message: error_msg,
|
||||
};
|
||||
if let Ok(mut log) = log_clone.lock() {
|
||||
log.push(event.clone());
|
||||
}
|
||||
let _ = tx_clone.send(event);
|
||||
if let Ok(mut agents) = agents_ref.lock()
|
||||
&& let Some(agent) = agents.get_mut(&key_clone)
|
||||
{
|
||||
agent.status = AgentStatus::Failed;
|
||||
}
|
||||
AgentPool::notify_agent_state_changed(&watcher_tx_clone);
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
// Append project-local prompt content (.huskies/AGENT.md) to the
|
||||
// baked-in prompt so every agent role sees project-specific guidance
|
||||
// without any config changes. The file is read fresh each spawn;
|
||||
// if absent or empty, the prompt is unchanged and no warning is logged.
|
||||
if let Some(local) =
|
||||
crate::agents::local_prompt::read_project_local_prompt(&project_root_clone)
|
||||
{
|
||||
prompt.push_str("\n\n");
|
||||
prompt.push_str(&local);
|
||||
}
|
||||
|
||||
// Build the effective prompt and determine resume session.
|
||||
//
|
||||
// When resuming a previous session, discard the full rendered prompt
|
||||
// (which would re-read CLAUDE.md and README) and send only the gate
|
||||
// failure context as a new message. On a fresh start, append the
|
||||
// failure context to the original prompt as before.
|
||||
let effective_prompt = match &session_id_to_resume_owned {
|
||||
Some(_) => resume_context_owned.unwrap_or_default(),
|
||||
None => {
|
||||
if let Some(ctx) = resume_context_owned {
|
||||
prompt.push_str(&ctx);
|
||||
}
|
||||
prompt
|
||||
}
|
||||
};
|
||||
|
||||
// Step 3: transition to Running now that the worktree is ready.
|
||||
{
|
||||
if let Ok(mut agents) = agents_ref.lock()
|
||||
&& let Some(agent) = agents.get_mut(&key_clone)
|
||||
{
|
||||
agent.status = AgentStatus::Running;
|
||||
}
|
||||
}
|
||||
let _ = tx_clone.send(AgentEvent::Status {
|
||||
// Step 1: create the worktree (slow — git checkout, pnpm install, etc.)
|
||||
let wt_info = match crate::worktree::create_worktree(
|
||||
&project_root_clone,
|
||||
&sid,
|
||||
&config_clone,
|
||||
port_for_task,
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok(wt) => wt,
|
||||
Err(e) => {
|
||||
let error_msg = format!("Failed to create worktree: {e}");
|
||||
slog_error!("[agents] {error_msg}");
|
||||
let event = AgentEvent::Error {
|
||||
story_id: sid.clone(),
|
||||
agent_name: aname.clone(),
|
||||
status: "running".to_string(),
|
||||
});
|
||||
AgentPool::notify_agent_state_changed(&watcher_tx_clone);
|
||||
|
||||
// Step 4: launch the agent process via the configured runtime.
|
||||
let runtime_name = config_clone
|
||||
.find_agent(&aname)
|
||||
.and_then(|a| a.runtime.as_deref())
|
||||
.unwrap_or("claude-code");
|
||||
|
||||
let run_result = match runtime_name {
|
||||
"claude-code" => {
|
||||
let runtime = ClaudeCodeRuntime::new(
|
||||
child_killers_clone.clone(),
|
||||
watcher_tx_clone.clone(),
|
||||
);
|
||||
let ctx = RuntimeContext {
|
||||
story_id: sid.clone(),
|
||||
agent_name: aname.clone(),
|
||||
command,
|
||||
args,
|
||||
prompt: effective_prompt,
|
||||
cwd: wt_path_str,
|
||||
inactivity_timeout_secs,
|
||||
mcp_port: port_for_task,
|
||||
session_id_to_resume: session_id_to_resume_owned.clone(),
|
||||
};
|
||||
runtime
|
||||
.start(ctx, tx_clone.clone(), log_clone.clone(), log_writer_clone)
|
||||
.await
|
||||
}
|
||||
"gemini" => {
|
||||
let runtime = GeminiRuntime::new();
|
||||
let ctx = RuntimeContext {
|
||||
story_id: sid.clone(),
|
||||
agent_name: aname.clone(),
|
||||
command,
|
||||
args,
|
||||
prompt: effective_prompt,
|
||||
cwd: wt_path_str,
|
||||
inactivity_timeout_secs,
|
||||
mcp_port: port_for_task,
|
||||
session_id_to_resume: session_id_to_resume_owned.clone(),
|
||||
};
|
||||
runtime
|
||||
.start(ctx, tx_clone.clone(), log_clone.clone(), log_writer_clone)
|
||||
.await
|
||||
}
|
||||
"openai" => {
|
||||
let runtime = OpenAiRuntime::new();
|
||||
let ctx = RuntimeContext {
|
||||
story_id: sid.clone(),
|
||||
agent_name: aname.clone(),
|
||||
command,
|
||||
args,
|
||||
prompt: effective_prompt,
|
||||
cwd: wt_path_str,
|
||||
inactivity_timeout_secs,
|
||||
mcp_port: port_for_task,
|
||||
session_id_to_resume: session_id_to_resume_owned,
|
||||
};
|
||||
runtime
|
||||
.start(ctx, tx_clone.clone(), log_clone.clone(), log_writer_clone)
|
||||
.await
|
||||
}
|
||||
other => Err(format!(
|
||||
"Unknown agent runtime '{other}'; check the 'runtime' field in project.toml. \
|
||||
Supported: 'claude-code', 'gemini', 'openai'"
|
||||
)),
|
||||
message: error_msg,
|
||||
};
|
||||
if let Ok(mut log) = log_clone.lock() {
|
||||
log.push(event.clone());
|
||||
}
|
||||
let _ = tx_clone.send(event);
|
||||
if let Ok(mut agents) = agents_ref.lock()
|
||||
&& let Some(agent) = agents.get_mut(&key_clone)
|
||||
{
|
||||
agent.status = AgentStatus::Failed;
|
||||
}
|
||||
AgentPool::notify_agent_state_changed(&watcher_tx_clone);
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
match run_result {
|
||||
Ok(result) => {
|
||||
// Persist token usage if the agent reported it.
|
||||
if let Some(ref usage) = result.token_usage
|
||||
&& let Ok(agents) = agents_ref.lock()
|
||||
&& let Some(agent) = agents.get(&key_clone)
|
||||
&& let Some(ref pr) = agent.project_root
|
||||
{
|
||||
let model = config_clone
|
||||
.find_agent(&aname)
|
||||
.and_then(|a| a.model.clone());
|
||||
let record = crate::agents::token_usage::build_record(
|
||||
&sid,
|
||||
&aname,
|
||||
model,
|
||||
usage.clone(),
|
||||
);
|
||||
if let Err(e) = crate::agents::token_usage::append_record(pr, &record) {
|
||||
slog_error!(
|
||||
"[agents] Failed to persist token usage for \
|
||||
// Step 2: store worktree info and render agent command/args/prompt.
|
||||
let wt_path_str = wt_info.path.to_string_lossy().to_string();
|
||||
{
|
||||
if let Ok(mut agents) = agents_ref.lock()
|
||||
&& let Some(agent) = agents.get_mut(&key_clone)
|
||||
{
|
||||
agent.worktree_info = Some(wt_info.clone());
|
||||
}
|
||||
}
|
||||
|
||||
let (command, args, mut prompt) = match config_clone.render_agent_args(
|
||||
&wt_path_str,
|
||||
&sid,
|
||||
Some(&aname),
|
||||
Some(&wt_info.base_branch),
|
||||
) {
|
||||
Ok(result) => result,
|
||||
Err(e) => {
|
||||
let error_msg = format!("Failed to render agent args: {e}");
|
||||
slog_error!("[agents] {error_msg}");
|
||||
let event = AgentEvent::Error {
|
||||
story_id: sid.clone(),
|
||||
agent_name: aname.clone(),
|
||||
message: error_msg,
|
||||
};
|
||||
if let Ok(mut log) = log_clone.lock() {
|
||||
log.push(event.clone());
|
||||
}
|
||||
let _ = tx_clone.send(event);
|
||||
if let Ok(mut agents) = agents_ref.lock()
|
||||
&& let Some(agent) = agents.get_mut(&key_clone)
|
||||
{
|
||||
agent.status = AgentStatus::Failed;
|
||||
}
|
||||
AgentPool::notify_agent_state_changed(&watcher_tx_clone);
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
// Append project-local prompt content (.huskies/AGENT.md) to the
|
||||
// baked-in prompt so every agent role sees project-specific guidance
|
||||
// without any config changes. The file is read fresh each spawn;
|
||||
// if absent or empty, the prompt is unchanged and no warning is logged.
|
||||
if let Some(local) = crate::agents::local_prompt::read_project_local_prompt(&project_root_clone)
|
||||
{
|
||||
prompt.push_str("\n\n");
|
||||
prompt.push_str(&local);
|
||||
}
|
||||
|
||||
// Build the effective prompt and determine resume session.
|
||||
//
|
||||
// When resuming a previous session, discard the full rendered prompt
|
||||
// (which would re-read CLAUDE.md and README) and send only the gate
|
||||
// failure context as a new message. On a fresh start, append the
|
||||
// failure context to the original prompt as before.
|
||||
let effective_prompt = match &session_id_to_resume_owned {
|
||||
Some(_) => resume_context_owned.unwrap_or_default(),
|
||||
None => {
|
||||
if let Some(ctx) = resume_context_owned {
|
||||
prompt.push_str(&ctx);
|
||||
}
|
||||
prompt
|
||||
}
|
||||
};
|
||||
|
||||
// Step 3: transition to Running now that the worktree is ready.
|
||||
{
|
||||
if let Ok(mut agents) = agents_ref.lock()
|
||||
&& let Some(agent) = agents.get_mut(&key_clone)
|
||||
{
|
||||
agent.status = AgentStatus::Running;
|
||||
}
|
||||
}
|
||||
let _ = tx_clone.send(AgentEvent::Status {
|
||||
story_id: sid.clone(),
|
||||
agent_name: aname.clone(),
|
||||
status: "running".to_string(),
|
||||
});
|
||||
AgentPool::notify_agent_state_changed(&watcher_tx_clone);
|
||||
|
||||
// Step 4: launch the agent process via the configured runtime.
|
||||
let runtime_name = config_clone
|
||||
.find_agent(&aname)
|
||||
.and_then(|a| a.runtime.as_deref())
|
||||
.unwrap_or("claude-code");
|
||||
|
||||
let run_result = match runtime_name {
|
||||
"claude-code" => {
|
||||
let runtime =
|
||||
ClaudeCodeRuntime::new(child_killers_clone.clone(), watcher_tx_clone.clone());
|
||||
let ctx = RuntimeContext {
|
||||
story_id: sid.clone(),
|
||||
agent_name: aname.clone(),
|
||||
command,
|
||||
args,
|
||||
prompt: effective_prompt,
|
||||
cwd: wt_path_str,
|
||||
inactivity_timeout_secs,
|
||||
mcp_port: port_for_task,
|
||||
session_id_to_resume: session_id_to_resume_owned.clone(),
|
||||
};
|
||||
runtime
|
||||
.start(ctx, tx_clone.clone(), log_clone.clone(), log_writer_clone)
|
||||
.await
|
||||
}
|
||||
"gemini" => {
|
||||
let runtime = GeminiRuntime::new();
|
||||
let ctx = RuntimeContext {
|
||||
story_id: sid.clone(),
|
||||
agent_name: aname.clone(),
|
||||
command,
|
||||
args,
|
||||
prompt: effective_prompt,
|
||||
cwd: wt_path_str,
|
||||
inactivity_timeout_secs,
|
||||
mcp_port: port_for_task,
|
||||
session_id_to_resume: session_id_to_resume_owned.clone(),
|
||||
};
|
||||
runtime
|
||||
.start(ctx, tx_clone.clone(), log_clone.clone(), log_writer_clone)
|
||||
.await
|
||||
}
|
||||
"openai" => {
|
||||
let runtime = OpenAiRuntime::new();
|
||||
let ctx = RuntimeContext {
|
||||
story_id: sid.clone(),
|
||||
agent_name: aname.clone(),
|
||||
command,
|
||||
args,
|
||||
prompt: effective_prompt,
|
||||
cwd: wt_path_str,
|
||||
inactivity_timeout_secs,
|
||||
mcp_port: port_for_task,
|
||||
session_id_to_resume: session_id_to_resume_owned,
|
||||
};
|
||||
runtime
|
||||
.start(ctx, tx_clone.clone(), log_clone.clone(), log_writer_clone)
|
||||
.await
|
||||
}
|
||||
other => Err(format!(
|
||||
"Unknown agent runtime '{other}'; check the 'runtime' field in project.toml. \
|
||||
Supported: 'claude-code', 'gemini', 'openai'"
|
||||
)),
|
||||
};
|
||||
|
||||
match run_result {
|
||||
Ok(result) => {
|
||||
// Persist token usage if the agent reported it.
|
||||
if let Some(ref usage) = result.token_usage
|
||||
&& let Ok(agents) = agents_ref.lock()
|
||||
&& let Some(agent) = agents.get(&key_clone)
|
||||
&& let Some(ref pr) = agent.project_root
|
||||
{
|
||||
let model = config_clone
|
||||
.find_agent(&aname)
|
||||
.and_then(|a| a.model.clone());
|
||||
let record =
|
||||
crate::agents::token_usage::build_record(&sid, &aname, model, usage.clone());
|
||||
if let Err(e) = crate::agents::token_usage::append_record(pr, &record) {
|
||||
slog_error!(
|
||||
"[agents] Failed to persist token usage for \
|
||||
{sid}:{aname}: {e}"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// Mergemaster agents have their own completion path via
|
||||
// start_merge_agent_work / run_merge_pipeline and must NOT go
|
||||
// through server-owned gates. When a mergemaster exits early
|
||||
// (e.g. rate-limited before calling start_merge_agent_work) the
|
||||
// feature-branch worktree compiles fine and post-merge tests on
|
||||
// master pass (nothing changed), which would wrongly advance the
|
||||
// story to 5_done/ without any squash merge having occurred.
|
||||
// Instead: just remove the agent from the pool and let
|
||||
// auto-assign restart a new mergemaster for the story.
|
||||
let stage = config_clone
|
||||
.find_agent(&aname)
|
||||
.map(agent_config_stage)
|
||||
.unwrap_or_else(|| pipeline_stage(&aname));
|
||||
if stage == PipelineStage::Mergemaster {
|
||||
let (tx_done, done_session_id) = {
|
||||
let mut lock = match agents_ref.lock() {
|
||||
Ok(a) => a,
|
||||
Err(_) => return,
|
||||
};
|
||||
if let Some(agent) = lock.remove(&key_clone) {
|
||||
(agent.tx, agent.session_id.or(result.session_id))
|
||||
} else {
|
||||
(tx_clone.clone(), result.session_id)
|
||||
}
|
||||
};
|
||||
// Clear any stale Running merge job so the next mergemaster
|
||||
// can call start_merge_agent_work without hitting "Merge
|
||||
// already in progress" (bug 498).
|
||||
if let Ok(mut jobs) = merge_jobs_clone.lock()
|
||||
&& let Some(job) = jobs.get(&sid)
|
||||
&& matches!(job.status, crate::agents::merge::MergeJobStatus::Running)
|
||||
{
|
||||
jobs.remove(&sid);
|
||||
}
|
||||
let _ = tx_done.send(AgentEvent::Done {
|
||||
story_id: sid.clone(),
|
||||
agent_name: aname.clone(),
|
||||
session_id: done_session_id,
|
||||
});
|
||||
AgentPool::notify_agent_state_changed(&watcher_tx_clone);
|
||||
// Send a WorkItem event so the auto-assign watcher loop
|
||||
// re-dispatches a new mergemaster if the story still needs
|
||||
// merging. This avoids an async call to start_agent inside
|
||||
// a tokio::spawn (which would require Send).
|
||||
let _ = watcher_tx_clone.send(crate::io::watcher::WatcherEvent::WorkItem {
|
||||
stage: "4_merge".to_string(),
|
||||
item_id: sid.clone(),
|
||||
action: "reassign".to_string(),
|
||||
commit_msg: String::new(),
|
||||
from_stage: None,
|
||||
});
|
||||
} else {
|
||||
// Server-owned completion: run acceptance gates automatically
|
||||
// when the agent process exits normally.
|
||||
super::super::pipeline::run_server_owned_completion(
|
||||
&agents_ref,
|
||||
port_for_task,
|
||||
&sid,
|
||||
&aname,
|
||||
result.session_id,
|
||||
watcher_tx_clone.clone(),
|
||||
)
|
||||
.await;
|
||||
AgentPool::notify_agent_state_changed(&watcher_tx_clone);
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
slog_error!("[agents] Agent process error for {aname} on {sid}: {e}");
|
||||
let event = AgentEvent::Error {
|
||||
story_id: sid.clone(),
|
||||
agent_name: aname.clone(),
|
||||
message: e,
|
||||
};
|
||||
if let Ok(mut log) = log_clone.lock() {
|
||||
log.push(event.clone());
|
||||
}
|
||||
let _ = tx_clone.send(event);
|
||||
if let Ok(mut agents) = agents_ref.lock()
|
||||
&& let Some(agent) = agents.get_mut(&key_clone)
|
||||
{
|
||||
agent.status = AgentStatus::Failed;
|
||||
}
|
||||
AgentPool::notify_agent_state_changed(&watcher_tx_clone);
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// Mergemaster agents have their own completion path via
|
||||
// start_merge_agent_work / run_merge_pipeline and must NOT go
|
||||
// through server-owned gates. When a mergemaster exits early
|
||||
// (e.g. rate-limited before calling start_merge_agent_work) the
|
||||
// feature-branch worktree compiles fine and post-merge tests on
|
||||
// master pass (nothing changed), which would wrongly advance the
|
||||
// story to 5_done/ without any squash merge having occurred.
|
||||
// Instead: just remove the agent from the pool and let
|
||||
// auto-assign restart a new mergemaster for the story.
|
||||
let stage = config_clone
|
||||
.find_agent(&aname)
|
||||
.map(agent_config_stage)
|
||||
.unwrap_or_else(|| pipeline_stage(&aname));
|
||||
if stage == PipelineStage::Mergemaster {
|
||||
let (tx_done, done_session_id) = {
|
||||
let mut lock = match agents_ref.lock() {
|
||||
Ok(a) => a,
|
||||
Err(_) => return,
|
||||
};
|
||||
if let Some(agent) = lock.remove(&key_clone) {
|
||||
(agent.tx, agent.session_id.or(result.session_id))
|
||||
} else {
|
||||
(tx_clone.clone(), result.session_id)
|
||||
}
|
||||
};
|
||||
// Clear any stale Running merge job so the next mergemaster
|
||||
// can call start_merge_agent_work without hitting "Merge
|
||||
// already in progress" (bug 498).
|
||||
if let Ok(mut jobs) = merge_jobs_clone.lock()
|
||||
&& let Some(job) = jobs.get(&sid)
|
||||
&& matches!(job.status, crate::agents::merge::MergeJobStatus::Running)
|
||||
{
|
||||
jobs.remove(&sid);
|
||||
}
|
||||
let _ = tx_done.send(AgentEvent::Done {
|
||||
story_id: sid.clone(),
|
||||
agent_name: aname.clone(),
|
||||
session_id: done_session_id,
|
||||
});
|
||||
AgentPool::notify_agent_state_changed(&watcher_tx_clone);
|
||||
// Send a WorkItem event so the auto-assign watcher loop
|
||||
// re-dispatches a new mergemaster if the story still needs
|
||||
// merging. This avoids an async call to start_agent inside
|
||||
// a tokio::spawn (which would require Send).
|
||||
let _ = watcher_tx_clone.send(crate::io::watcher::WatcherEvent::WorkItem {
|
||||
stage: "4_merge".to_string(),
|
||||
item_id: sid.clone(),
|
||||
action: "reassign".to_string(),
|
||||
commit_msg: String::new(),
|
||||
from_stage: None,
|
||||
});
|
||||
} else {
|
||||
// Server-owned completion: run acceptance gates automatically
|
||||
// when the agent process exits normally.
|
||||
super::super::pipeline::run_server_owned_completion(
|
||||
&agents_ref,
|
||||
port_for_task,
|
||||
&sid,
|
||||
&aname,
|
||||
result.session_id,
|
||||
watcher_tx_clone.clone(),
|
||||
)
|
||||
.await;
|
||||
AgentPool::notify_agent_state_changed(&watcher_tx_clone);
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
slog_error!("[agents] Agent process error for {aname} on {sid}: {e}");
|
||||
let event = AgentEvent::Error {
|
||||
story_id: sid.clone(),
|
||||
agent_name: aname.clone(),
|
||||
message: e,
|
||||
};
|
||||
if let Ok(mut log) = log_clone.lock() {
|
||||
log.push(event.clone());
|
||||
}
|
||||
let _ = tx_clone.send(event);
|
||||
if let Ok(mut agents) = agents_ref.lock()
|
||||
&& let Some(agent) = agents.get_mut(&key_clone)
|
||||
{
|
||||
agent.status = AgentStatus::Failed;
|
||||
}
|
||||
AgentPool::notify_agent_state_changed(&watcher_tx_clone);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -54,10 +54,7 @@ pub(super) fn validate_agent_stage(
|
||||
/// `start_agent` honour an explicit `agent: coder-opus` written by the
|
||||
/// `assign` command (bug 379). Returns `None` when an explicit agent_name
|
||||
/// was already supplied or when the story has no front-matter preference.
|
||||
pub(super) fn read_front_matter_agent(
|
||||
story_id: &str,
|
||||
agent_name: Option<&str>,
|
||||
) -> Option<String> {
|
||||
pub(super) fn read_front_matter_agent(story_id: &str, agent_name: Option<&str>) -> Option<String> {
|
||||
if agent_name.is_some() {
|
||||
return None;
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user