Files
huskies/server/src/agents/pool/stop.rs
T

173 lines
6.3 KiB
Rust
Raw Normal View History

//! Agent stop — terminates a running agent while preserving its worktree.
use crate::slog;
use crate::slog_error;
use std::path::Path;
use super::super::{AgentEvent, AgentStatus};
use super::AgentPool;
use super::types::composite_key;
impl AgentPool {
/// Stop a running agent. Worktree is preserved for inspection.
pub async fn stop_agent(
&self,
_project_root: &Path,
story_id: &str,
agent_name: &str,
) -> Result<(), String> {
let key = composite_key(story_id, agent_name);
let (worktree_info, task_handle, tx) = {
let mut agents = self.agents.lock().map_err(|e| e.to_string())?;
let agent = agents
.get_mut(&key)
.ok_or_else(|| format!("No agent '{agent_name}' for story '{story_id}'"))?;
let wt = agent.worktree_info.clone();
let handle = agent.task_handle.take();
let tx = agent.tx.clone();
agent.status = AgentStatus::Failed;
(wt, handle, tx)
};
// Abort the task and kill the PTY child process.
// Note: aborting a spawn_blocking task handle does not interrupt the blocking
// thread, so we must also kill the child process directly via the killer registry.
if let Some(handle) = task_handle {
handle.abort();
let _ = handle.await;
}
self.kill_child_for_key(&key);
// Preserve worktree for inspection — don't destroy agent's work on stop.
if let Some(ref wt) = worktree_info {
slog!(
"[agents] Worktree preserved for {story_id}:{agent_name}: {}",
wt.path.display()
);
}
let _ = tx.send(AgentEvent::Status {
story_id: story_id.to_string(),
agent_name: agent_name.to_string(),
status: "stopped".to_string(),
});
// Remove from map
{
let mut agents = self.agents.lock().map_err(|e| e.to_string())?;
agents.remove(&key);
}
// Notify WebSocket clients so pipeline board and agent panel update.
Self::notify_agent_state_changed(&self.watcher_tx);
Ok(())
}
/// Remove all agent entries for a given story_id from the pool.
///
/// Called when a story is archived so that stale entries don't accumulate.
/// Returns the number of entries removed.
pub fn remove_agents_for_story(&self, story_id: &str) -> usize {
let mut agents = match self.agents.lock() {
Ok(a) => a,
Err(e) => {
slog_error!("[agents] Failed to lock pool for cleanup of '{story_id}': {e}");
return 0;
}
};
let prefix = format!("{story_id}:");
let keys_to_remove: Vec<String> = agents
.keys()
.filter(|k| k.starts_with(&prefix))
.cloned()
.collect();
let count = keys_to_remove.len();
for key in &keys_to_remove {
agents.remove(key);
}
if count > 0 {
slog!("[agents] Removed {count} agent entries for archived story '{story_id}'");
}
count
}
}
#[cfg(test)]
mod tests {
use super::super::AgentPool;
use crate::agents::AgentStatus;
// ── remove_agents_for_story tests ────────────────────────────────────────
#[test]
fn remove_agents_for_story_removes_all_entries() {
let pool = AgentPool::new_test(3001);
pool.inject_test_agent("story_a", "coder-1", AgentStatus::Completed);
pool.inject_test_agent("story_a", "qa", AgentStatus::Failed);
pool.inject_test_agent("story_b", "coder-1", AgentStatus::Running);
let removed = pool.remove_agents_for_story("story_a");
assert_eq!(removed, 2, "should remove both agents for story_a");
let agents = pool.list_agents().unwrap();
assert_eq!(agents.len(), 1, "only story_b agent should remain");
assert_eq!(agents[0].story_id, "story_b");
}
#[test]
fn remove_agents_for_story_returns_zero_when_no_match() {
let pool = AgentPool::new_test(3001);
pool.inject_test_agent("story_a", "coder-1", AgentStatus::Running);
let removed = pool.remove_agents_for_story("nonexistent");
assert_eq!(removed, 0);
let agents = pool.list_agents().unwrap();
assert_eq!(agents.len(), 1, "existing agents should not be affected");
}
// ── archive + cleanup integration test ───────────────────────────────────
#[tokio::test]
async fn archiving_story_removes_agent_entries_from_pool() {
use crate::agents::lifecycle::move_story_to_done;
use std::fs;
let tmp = tempfile::tempdir().unwrap();
let root = tmp.path();
let current = root.join(".huskies/work/2_current");
fs::create_dir_all(&current).unwrap();
let story_content = "test";
fs::write(current.join("60_story_cleanup.md"), story_content).unwrap();
crate::db::ensure_content_store();
crate::db::write_content("60_story_cleanup", story_content);
let pool = AgentPool::new_test(3001);
pool.inject_test_agent("60_story_cleanup", "coder-1", AgentStatus::Completed);
pool.inject_test_agent("60_story_cleanup", "qa", AgentStatus::Completed);
pool.inject_test_agent("61_story_other", "coder-1", AgentStatus::Running);
assert_eq!(pool.list_agents().unwrap().len(), 3);
move_story_to_done(root, "60_story_cleanup").unwrap();
pool.remove_agents_for_story("60_story_cleanup");
let remaining = pool.list_agents().unwrap();
assert_eq!(
remaining.len(),
1,
"only the other story's agent should remain"
);
assert_eq!(remaining[0].story_id, "61_story_other");
// The lifecycle function updates the content store (not the filesystem),
// so verify the move via the DB.
let content = crate::db::read_content("60_story_cleanup")
.expect("60_story_cleanup should be in content store after move to done");
assert_eq!(content, "test", "content should be preserved after move");
}
}