2026-04-26 22:12:04 +00:00
|
|
|
//! Background async work spawned by `AgentPool::start_agent`.
|
|
|
|
|
//!
|
|
|
|
|
//! `start_agent` returns immediately after registering the agent as `Pending`;
|
|
|
|
|
//! this module runs the slow worktree creation, agent process launch, and
|
|
|
|
|
//! event streaming in the background (story 157).
|
|
|
|
|
|
|
|
|
|
use std::collections::HashMap;
|
|
|
|
|
use std::path::PathBuf;
|
|
|
|
|
use std::sync::{Arc, Mutex};
|
|
|
|
|
|
|
|
|
|
use portable_pty::ChildKiller;
|
|
|
|
|
use tokio::sync::broadcast;
|
|
|
|
|
|
|
|
|
|
use crate::agent_log::AgentLogWriter;
|
|
|
|
|
use crate::config::ProjectConfig;
|
|
|
|
|
use crate::io::watcher::WatcherEvent;
|
|
|
|
|
use crate::slog_error;
|
|
|
|
|
|
|
|
|
|
use super::super::super::runtime::{
|
|
|
|
|
AgentRuntime, ClaudeCodeRuntime, GeminiRuntime, OpenAiRuntime, RuntimeContext,
|
|
|
|
|
};
|
2026-04-27 01:32:08 +00:00
|
|
|
use super::super::super::{
|
|
|
|
|
AgentEvent, AgentStatus, PipelineStage, agent_config_stage, pipeline_stage,
|
|
|
|
|
};
|
|
|
|
|
use super::super::AgentPool;
|
2026-04-26 22:12:04 +00:00
|
|
|
use super::super::types::StoryAgent;
|
|
|
|
|
|
|
|
|
|
/// Run the background worktree-creation + agent-launch flow.
|
|
|
|
|
///
|
|
|
|
|
/// Caller (`AgentPool::start_agent`) wraps this in `tokio::spawn` and stores
|
|
|
|
|
/// the resulting handle on the Pending entry so cancellation works.
|
|
|
|
|
#[allow(clippy::too_many_arguments)]
|
|
|
|
|
pub(super) async fn run_agent_spawn(
|
|
|
|
|
project_root: PathBuf,
|
|
|
|
|
config: ProjectConfig,
|
|
|
|
|
resume_context: Option<String>,
|
|
|
|
|
session_id_to_resume: Option<String>,
|
|
|
|
|
story_id: String,
|
|
|
|
|
agent_name: String,
|
|
|
|
|
tx: broadcast::Sender<AgentEvent>,
|
|
|
|
|
agents: Arc<Mutex<HashMap<String, StoryAgent>>>,
|
|
|
|
|
key: String,
|
|
|
|
|
event_log: Arc<Mutex<Vec<AgentEvent>>>,
|
|
|
|
|
port: u16,
|
|
|
|
|
log_writer: Option<Arc<Mutex<AgentLogWriter>>>,
|
|
|
|
|
child_killers: Arc<Mutex<HashMap<String, Box<dyn ChildKiller + Send + Sync>>>>,
|
|
|
|
|
watcher_tx: broadcast::Sender<WatcherEvent>,
|
|
|
|
|
inactivity_timeout_secs: u64,
|
2026-04-27 19:31:34 +00:00
|
|
|
// Formatted `<recent-events>` block drained from the previous session's
|
|
|
|
|
// buffer. Prepended to the first agent turn so the agent sees what
|
|
|
|
|
// happened while it was idle (story 736). `None` when there were no
|
|
|
|
|
// buffered events.
|
|
|
|
|
buffered_events_block: Option<String>,
|
2026-04-26 22:12:04 +00:00
|
|
|
) {
|
|
|
|
|
// Re-bind to the legacy `_clone` / `_owned` names so the body below remains
|
|
|
|
|
// a verbatim copy of the original closure (story 157).
|
|
|
|
|
let project_root_clone = project_root;
|
|
|
|
|
let config_clone = config;
|
|
|
|
|
let resume_context_owned = resume_context;
|
|
|
|
|
let session_id_to_resume_owned = session_id_to_resume;
|
|
|
|
|
let sid = story_id;
|
|
|
|
|
let aname = agent_name;
|
|
|
|
|
let tx_clone = tx;
|
|
|
|
|
let agents_ref = agents;
|
|
|
|
|
let key_clone = key;
|
|
|
|
|
let log_clone = event_log;
|
|
|
|
|
let port_for_task = port;
|
|
|
|
|
let log_writer_clone = log_writer;
|
|
|
|
|
let child_killers_clone = child_killers;
|
|
|
|
|
let watcher_tx_clone = watcher_tx;
|
2026-04-27 01:32:08 +00:00
|
|
|
let _ = inactivity_timeout_secs; // currently unused inside the closure body
|
2026-04-26 22:12:04 +00:00
|
|
|
|
2026-04-27 01:32:08 +00:00
|
|
|
// Step 1: create the worktree (slow — git checkout, pnpm install, etc.)
|
|
|
|
|
let wt_info = match crate::worktree::create_worktree(
|
|
|
|
|
&project_root_clone,
|
|
|
|
|
&sid,
|
|
|
|
|
&config_clone,
|
|
|
|
|
port_for_task,
|
|
|
|
|
)
|
|
|
|
|
.await
|
|
|
|
|
{
|
|
|
|
|
Ok(wt) => wt,
|
|
|
|
|
Err(e) => {
|
|
|
|
|
let error_msg = format!("Failed to create worktree: {e}");
|
|
|
|
|
slog_error!("[agents] {error_msg}");
|
|
|
|
|
let event = AgentEvent::Error {
|
|
|
|
|
story_id: sid.clone(),
|
|
|
|
|
agent_name: aname.clone(),
|
|
|
|
|
message: error_msg,
|
2026-04-26 22:12:04 +00:00
|
|
|
};
|
2026-04-27 01:32:08 +00:00
|
|
|
if let Ok(mut log) = log_clone.lock() {
|
|
|
|
|
log.push(event.clone());
|
|
|
|
|
}
|
|
|
|
|
let _ = tx_clone.send(event);
|
|
|
|
|
if let Ok(mut agents) = agents_ref.lock()
|
|
|
|
|
&& let Some(agent) = agents.get_mut(&key_clone)
|
2026-04-26 22:12:04 +00:00
|
|
|
{
|
2026-04-27 01:32:08 +00:00
|
|
|
agent.status = AgentStatus::Failed;
|
2026-04-26 22:12:04 +00:00
|
|
|
}
|
2026-04-27 01:32:08 +00:00
|
|
|
AgentPool::notify_agent_state_changed(&watcher_tx_clone);
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
};
|
2026-04-26 22:12:04 +00:00
|
|
|
|
2026-04-28 20:22:22 +00:00
|
|
|
// Step 1.5: Update the source map for changed files since master.
|
|
|
|
|
// Non-blocking — failures are logged but do not gate the spawn.
|
|
|
|
|
{
|
|
|
|
|
let wt_path_for_map = wt_info.path.clone();
|
|
|
|
|
let base_for_map = wt_info.base_branch.clone();
|
|
|
|
|
let map_path = project_root_clone.join(".huskies").join("source-map.json");
|
|
|
|
|
match tokio::task::spawn_blocking(move || {
|
|
|
|
|
source_map_gen::update_for_worktree(&wt_path_for_map, &base_for_map, &map_path)
|
|
|
|
|
})
|
|
|
|
|
.await
|
|
|
|
|
.unwrap_or_else(|e| Err(e.to_string()))
|
|
|
|
|
{
|
|
|
|
|
Ok(()) => {}
|
|
|
|
|
Err(e) => slog_error!("[agents] source map update for {sid}: {e}"),
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2026-04-27 01:32:08 +00:00
|
|
|
// Step 2: store worktree info and render agent command/args/prompt.
|
|
|
|
|
let wt_path_str = wt_info.path.to_string_lossy().to_string();
|
|
|
|
|
{
|
|
|
|
|
if let Ok(mut agents) = agents_ref.lock()
|
|
|
|
|
&& let Some(agent) = agents.get_mut(&key_clone)
|
|
|
|
|
{
|
|
|
|
|
agent.worktree_info = Some(wt_info.clone());
|
|
|
|
|
}
|
|
|
|
|
}
|
2026-04-26 22:12:04 +00:00
|
|
|
|
2026-04-27 01:32:08 +00:00
|
|
|
let (command, args, mut prompt) = match config_clone.render_agent_args(
|
|
|
|
|
&wt_path_str,
|
|
|
|
|
&sid,
|
|
|
|
|
Some(&aname),
|
|
|
|
|
Some(&wt_info.base_branch),
|
|
|
|
|
) {
|
|
|
|
|
Ok(result) => result,
|
|
|
|
|
Err(e) => {
|
|
|
|
|
let error_msg = format!("Failed to render agent args: {e}");
|
|
|
|
|
slog_error!("[agents] {error_msg}");
|
|
|
|
|
let event = AgentEvent::Error {
|
|
|
|
|
story_id: sid.clone(),
|
|
|
|
|
agent_name: aname.clone(),
|
|
|
|
|
message: error_msg,
|
|
|
|
|
};
|
|
|
|
|
if let Ok(mut log) = log_clone.lock() {
|
|
|
|
|
log.push(event.clone());
|
|
|
|
|
}
|
|
|
|
|
let _ = tx_clone.send(event);
|
|
|
|
|
if let Ok(mut agents) = agents_ref.lock()
|
|
|
|
|
&& let Some(agent) = agents.get_mut(&key_clone)
|
2026-04-26 22:12:04 +00:00
|
|
|
{
|
2026-04-27 01:32:08 +00:00
|
|
|
agent.status = AgentStatus::Failed;
|
2026-04-26 22:12:04 +00:00
|
|
|
}
|
2026-04-27 01:32:08 +00:00
|
|
|
AgentPool::notify_agent_state_changed(&watcher_tx_clone);
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
};
|
2026-04-26 22:12:04 +00:00
|
|
|
|
2026-04-27 01:32:08 +00:00
|
|
|
// Append project-local prompt content (.huskies/AGENT.md) to the
|
|
|
|
|
// baked-in prompt so every agent role sees project-specific guidance
|
|
|
|
|
// without any config changes. The file is read fresh each spawn;
|
|
|
|
|
// if absent or empty, the prompt is unchanged and no warning is logged.
|
|
|
|
|
if let Some(local) = crate::agents::local_prompt::read_project_local_prompt(&project_root_clone)
|
|
|
|
|
{
|
|
|
|
|
prompt.push_str("\n\n");
|
|
|
|
|
prompt.push_str(&local);
|
|
|
|
|
}
|
2026-04-26 22:12:04 +00:00
|
|
|
|
2026-04-28 20:22:22 +00:00
|
|
|
// Append a reference to the source map if the file was written.
|
|
|
|
|
let source_map_path = project_root_clone.join(".huskies").join("source-map.json");
|
|
|
|
|
if source_map_path.exists() {
|
|
|
|
|
prompt.push_str(
|
|
|
|
|
"\n\nA source map of well-documented changed files is at `.huskies/source-map.json`.",
|
|
|
|
|
);
|
|
|
|
|
}
|
|
|
|
|
|
2026-04-27 01:32:08 +00:00
|
|
|
// Build the effective prompt and determine resume session.
|
|
|
|
|
//
|
|
|
|
|
// When resuming a previous session, discard the full rendered prompt
|
|
|
|
|
// (which would re-read CLAUDE.md and README) and send only the gate
|
|
|
|
|
// failure context as a new message. On a fresh start, append the
|
|
|
|
|
// failure context to the original prompt as before.
|
2026-04-27 11:23:28 +00:00
|
|
|
let (effective_prompt, fresh_prompt) = match &session_id_to_resume_owned {
|
|
|
|
|
Some(_) => {
|
|
|
|
|
// Keep the full rendered prompt as fallback if resume fails.
|
|
|
|
|
let fallback = prompt;
|
|
|
|
|
(resume_context_owned.unwrap_or_default(), Some(fallback))
|
|
|
|
|
}
|
2026-04-27 01:32:08 +00:00
|
|
|
None => {
|
|
|
|
|
if let Some(ctx) = resume_context_owned {
|
|
|
|
|
prompt.push_str(&ctx);
|
2026-04-26 22:12:04 +00:00
|
|
|
}
|
2026-04-27 11:23:28 +00:00
|
|
|
(prompt, None)
|
2026-04-27 01:32:08 +00:00
|
|
|
}
|
|
|
|
|
};
|
2026-04-26 22:12:04 +00:00
|
|
|
|
2026-04-27 19:31:34 +00:00
|
|
|
// Prepend buffered pipeline events from the previous idle period so the
|
|
|
|
|
// agent sees what happened while it was not running (story 736).
|
|
|
|
|
let effective_prompt = match buffered_events_block {
|
|
|
|
|
Some(block) if !block.is_empty() => format!("{block}\n\n{effective_prompt}"),
|
|
|
|
|
_ => effective_prompt,
|
|
|
|
|
};
|
|
|
|
|
|
2026-04-27 01:32:08 +00:00
|
|
|
// Step 3: transition to Running now that the worktree is ready.
|
|
|
|
|
{
|
|
|
|
|
if let Ok(mut agents) = agents_ref.lock()
|
|
|
|
|
&& let Some(agent) = agents.get_mut(&key_clone)
|
|
|
|
|
{
|
|
|
|
|
agent.status = AgentStatus::Running;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
let _ = tx_clone.send(AgentEvent::Status {
|
|
|
|
|
story_id: sid.clone(),
|
|
|
|
|
agent_name: aname.clone(),
|
|
|
|
|
status: "running".to_string(),
|
|
|
|
|
});
|
|
|
|
|
AgentPool::notify_agent_state_changed(&watcher_tx_clone);
|
2026-04-26 22:12:04 +00:00
|
|
|
|
2026-04-27 01:32:08 +00:00
|
|
|
// Step 4: launch the agent process via the configured runtime.
|
|
|
|
|
let runtime_name = config_clone
|
|
|
|
|
.find_agent(&aname)
|
|
|
|
|
.and_then(|a| a.runtime.as_deref())
|
|
|
|
|
.unwrap_or("claude-code");
|
|
|
|
|
|
|
|
|
|
let run_result = match runtime_name {
|
|
|
|
|
"claude-code" => {
|
|
|
|
|
let runtime =
|
|
|
|
|
ClaudeCodeRuntime::new(child_killers_clone.clone(), watcher_tx_clone.clone());
|
|
|
|
|
let ctx = RuntimeContext {
|
|
|
|
|
story_id: sid.clone(),
|
|
|
|
|
agent_name: aname.clone(),
|
|
|
|
|
command,
|
|
|
|
|
args,
|
|
|
|
|
prompt: effective_prompt,
|
|
|
|
|
cwd: wt_path_str,
|
|
|
|
|
inactivity_timeout_secs,
|
|
|
|
|
mcp_port: port_for_task,
|
|
|
|
|
session_id_to_resume: session_id_to_resume_owned.clone(),
|
2026-04-27 11:23:28 +00:00
|
|
|
fresh_prompt: fresh_prompt.clone(),
|
2026-04-27 01:32:08 +00:00
|
|
|
};
|
|
|
|
|
runtime
|
|
|
|
|
.start(ctx, tx_clone.clone(), log_clone.clone(), log_writer_clone)
|
|
|
|
|
.await
|
|
|
|
|
}
|
|
|
|
|
"gemini" => {
|
|
|
|
|
let runtime = GeminiRuntime::new();
|
|
|
|
|
let ctx = RuntimeContext {
|
|
|
|
|
story_id: sid.clone(),
|
|
|
|
|
agent_name: aname.clone(),
|
|
|
|
|
command,
|
|
|
|
|
args,
|
|
|
|
|
prompt: effective_prompt,
|
|
|
|
|
cwd: wt_path_str,
|
|
|
|
|
inactivity_timeout_secs,
|
|
|
|
|
mcp_port: port_for_task,
|
|
|
|
|
session_id_to_resume: session_id_to_resume_owned.clone(),
|
2026-04-27 11:23:28 +00:00
|
|
|
fresh_prompt: fresh_prompt.clone(),
|
2026-04-26 22:12:04 +00:00
|
|
|
};
|
2026-04-27 01:32:08 +00:00
|
|
|
runtime
|
|
|
|
|
.start(ctx, tx_clone.clone(), log_clone.clone(), log_writer_clone)
|
|
|
|
|
.await
|
|
|
|
|
}
|
|
|
|
|
"openai" => {
|
|
|
|
|
let runtime = OpenAiRuntime::new();
|
|
|
|
|
let ctx = RuntimeContext {
|
|
|
|
|
story_id: sid.clone(),
|
|
|
|
|
agent_name: aname.clone(),
|
|
|
|
|
command,
|
|
|
|
|
args,
|
|
|
|
|
prompt: effective_prompt,
|
|
|
|
|
cwd: wt_path_str,
|
|
|
|
|
inactivity_timeout_secs,
|
|
|
|
|
mcp_port: port_for_task,
|
|
|
|
|
session_id_to_resume: session_id_to_resume_owned,
|
2026-04-27 11:23:28 +00:00
|
|
|
fresh_prompt,
|
2026-04-27 01:32:08 +00:00
|
|
|
};
|
|
|
|
|
runtime
|
|
|
|
|
.start(ctx, tx_clone.clone(), log_clone.clone(), log_writer_clone)
|
|
|
|
|
.await
|
|
|
|
|
}
|
|
|
|
|
other => Err(format!(
|
|
|
|
|
"Unknown agent runtime '{other}'; check the 'runtime' field in project.toml. \
|
|
|
|
|
Supported: 'claude-code', 'gemini', 'openai'"
|
|
|
|
|
)),
|
|
|
|
|
};
|
2026-04-26 22:12:04 +00:00
|
|
|
|
2026-04-27 01:32:08 +00:00
|
|
|
match run_result {
|
|
|
|
|
Ok(result) => {
|
|
|
|
|
// Persist token usage if the agent reported it.
|
|
|
|
|
if let Some(ref usage) = result.token_usage
|
|
|
|
|
&& let Ok(agents) = agents_ref.lock()
|
|
|
|
|
&& let Some(agent) = agents.get(&key_clone)
|
|
|
|
|
&& let Some(ref pr) = agent.project_root
|
|
|
|
|
{
|
|
|
|
|
let model = config_clone
|
|
|
|
|
.find_agent(&aname)
|
|
|
|
|
.and_then(|a| a.model.clone());
|
|
|
|
|
let record =
|
|
|
|
|
crate::agents::token_usage::build_record(&sid, &aname, model, usage.clone());
|
|
|
|
|
if let Err(e) = crate::agents::token_usage::append_record(pr, &record) {
|
|
|
|
|
slog_error!(
|
|
|
|
|
"[agents] Failed to persist token usage for \
|
2026-04-26 22:12:04 +00:00
|
|
|
{sid}:{aname}: {e}"
|
2026-04-27 01:32:08 +00:00
|
|
|
);
|
2026-04-26 22:12:04 +00:00
|
|
|
}
|
2026-04-27 01:32:08 +00:00
|
|
|
}
|
|
|
|
|
|
2026-04-27 11:23:28 +00:00
|
|
|
// Persist session_id so respawns can resume prior reasoning.
|
|
|
|
|
if let Some(ref sess_id) = result.session_id {
|
|
|
|
|
let model = config_clone
|
|
|
|
|
.find_agent(&aname)
|
|
|
|
|
.and_then(|a| a.model.clone())
|
|
|
|
|
.unwrap_or_default();
|
|
|
|
|
crate::agents::session_store::record_session(
|
|
|
|
|
&project_root_clone,
|
|
|
|
|
&sid,
|
|
|
|
|
&aname,
|
|
|
|
|
&model,
|
|
|
|
|
sess_id,
|
|
|
|
|
);
|
|
|
|
|
}
|
|
|
|
|
|
2026-04-27 01:32:08 +00:00
|
|
|
// Mergemaster agents have their own completion path via
|
|
|
|
|
// start_merge_agent_work / run_merge_pipeline and must NOT go
|
|
|
|
|
// through server-owned gates. When a mergemaster exits early
|
|
|
|
|
// (e.g. rate-limited before calling start_merge_agent_work) the
|
|
|
|
|
// feature-branch worktree compiles fine and post-merge tests on
|
|
|
|
|
// master pass (nothing changed), which would wrongly advance the
|
|
|
|
|
// story to 5_done/ without any squash merge having occurred.
|
|
|
|
|
// Instead: just remove the agent from the pool and let
|
|
|
|
|
// auto-assign restart a new mergemaster for the story.
|
|
|
|
|
let stage = config_clone
|
|
|
|
|
.find_agent(&aname)
|
|
|
|
|
.map(agent_config_stage)
|
|
|
|
|
.unwrap_or_else(|| pipeline_stage(&aname));
|
|
|
|
|
if stage == PipelineStage::Mergemaster {
|
|
|
|
|
let (tx_done, done_session_id) = {
|
|
|
|
|
let mut lock = match agents_ref.lock() {
|
|
|
|
|
Ok(a) => a,
|
|
|
|
|
Err(_) => return,
|
2026-04-26 22:12:04 +00:00
|
|
|
};
|
2026-04-27 01:32:08 +00:00
|
|
|
if let Some(agent) = lock.remove(&key_clone) {
|
|
|
|
|
(agent.tx, agent.session_id.or(result.session_id))
|
|
|
|
|
} else {
|
|
|
|
|
(tx_clone.clone(), result.session_id)
|
2026-04-26 22:12:04 +00:00
|
|
|
}
|
2026-04-27 01:32:08 +00:00
|
|
|
};
|
|
|
|
|
// Clear any stale Running merge job so the next mergemaster
|
|
|
|
|
// can call start_merge_agent_work without hitting "Merge
|
|
|
|
|
// already in progress" (bug 498).
|
2026-04-28 10:19:43 +00:00
|
|
|
if crate::crdt_state::read_merge_job(&sid)
|
|
|
|
|
.is_some_and(|job| job.status == "running")
|
2026-04-27 01:32:08 +00:00
|
|
|
{
|
2026-04-28 10:19:43 +00:00
|
|
|
crate::crdt_state::delete_merge_job(&sid);
|
2026-04-26 22:12:04 +00:00
|
|
|
}
|
2026-04-27 01:32:08 +00:00
|
|
|
let _ = tx_done.send(AgentEvent::Done {
|
|
|
|
|
story_id: sid.clone(),
|
|
|
|
|
agent_name: aname.clone(),
|
|
|
|
|
session_id: done_session_id,
|
|
|
|
|
});
|
|
|
|
|
AgentPool::notify_agent_state_changed(&watcher_tx_clone);
|
|
|
|
|
// Send a WorkItem event so the auto-assign watcher loop
|
|
|
|
|
// re-dispatches a new mergemaster if the story still needs
|
|
|
|
|
// merging. This avoids an async call to start_agent inside
|
|
|
|
|
// a tokio::spawn (which would require Send).
|
|
|
|
|
let _ = watcher_tx_clone.send(crate::io::watcher::WatcherEvent::WorkItem {
|
|
|
|
|
stage: "4_merge".to_string(),
|
|
|
|
|
item_id: sid.clone(),
|
|
|
|
|
action: "reassign".to_string(),
|
|
|
|
|
commit_msg: String::new(),
|
|
|
|
|
from_stage: None,
|
|
|
|
|
});
|
|
|
|
|
} else {
|
|
|
|
|
// Server-owned completion: run acceptance gates automatically
|
|
|
|
|
// when the agent process exits normally.
|
|
|
|
|
super::super::pipeline::run_server_owned_completion(
|
|
|
|
|
&agents_ref,
|
|
|
|
|
port_for_task,
|
|
|
|
|
&sid,
|
|
|
|
|
&aname,
|
|
|
|
|
result.session_id,
|
|
|
|
|
watcher_tx_clone.clone(),
|
|
|
|
|
)
|
|
|
|
|
.await;
|
|
|
|
|
AgentPool::notify_agent_state_changed(&watcher_tx_clone);
|
2026-04-26 22:12:04 +00:00
|
|
|
}
|
2026-04-27 01:32:08 +00:00
|
|
|
}
|
|
|
|
|
Err(e) => {
|
|
|
|
|
slog_error!("[agents] Agent process error for {aname} on {sid}: {e}");
|
|
|
|
|
let event = AgentEvent::Error {
|
|
|
|
|
story_id: sid.clone(),
|
|
|
|
|
agent_name: aname.clone(),
|
|
|
|
|
message: e,
|
|
|
|
|
};
|
|
|
|
|
if let Ok(mut log) = log_clone.lock() {
|
|
|
|
|
log.push(event.clone());
|
|
|
|
|
}
|
|
|
|
|
let _ = tx_clone.send(event);
|
|
|
|
|
if let Ok(mut agents) = agents_ref.lock()
|
|
|
|
|
&& let Some(agent) = agents.get_mut(&key_clone)
|
|
|
|
|
{
|
|
|
|
|
agent.status = AgentStatus::Failed;
|
|
|
|
|
}
|
|
|
|
|
AgentPool::notify_agent_state_changed(&watcher_tx_clone);
|
|
|
|
|
}
|
|
|
|
|
}
|
2026-04-26 22:12:04 +00:00
|
|
|
}
|