huskies: merge 736_story_drain_and_prepend_buffered_status_events_on_the_user_s_next_agent_message

This commit is contained in:
dave
2026-04-27 19:31:34 +00:00
parent aa7b26a24a
commit 63a30a9319
4 changed files with 182 additions and 2 deletions
+68
View File
@@ -280,6 +280,74 @@ mod tests {
}
}
/// Story 736 — Criterion 3 end-to-end: fire N status events at an idle
/// agent session; verify the agent is not invoked; then drain and format the
/// buffer to simulate what `start_agent` does on the next user message;
/// assert all N events appear in the `<recent-events>` block and the buffer
/// is empty afterwards.
#[tokio::test]
async fn drain_and_format_prepends_all_buffered_events_and_empties_buffer() {
use crate::service::status::buffer::BufferedItem;
use crate::service::status::buffer::format_buffered_items;
let pool = AgentPool::new_test(3207);
// Idle agent with a live buffer — simulates a previous session that is
// now completed but has been accumulating pipeline events.
pool.inject_test_agent_with_live_buffer("story-736", "coder", AgentStatus::Completed);
// Fire N status events at the pool's broadcaster while the agent is idle.
// The agent must NOT be invoked — events only accumulate in the buffer.
let bc = pool.status_broadcaster();
let n = 4_usize;
for i in 0..n {
bc.publish(StatusEvent::MergeFailure {
story_id: format!("other-{i}"),
story_name: None,
reason: format!("reason-{i}"),
});
}
// Yield so the background receive tasks can process all events.
sleep(Duration::from_millis(30)).await;
// Drain the buffer — this is what start_agent does before spawning the
// next agent turn (story 736, criterion 1).
let items = pool
.drain_agent_status_buffer("story-736", "coder")
.expect("agent must have a buffer");
assert_eq!(items.len(), n, "all {n} events must be buffered");
assert!(
items.iter().all(|i| matches!(i, BufferedItem::Event(_))),
"all items must be events (no truncation)"
);
// Format into a <recent-events> block (criterion 1).
let block = format_buffered_items(&items).expect("non-empty slice must yield a block");
assert!(
block.starts_with("<recent-events>\n"),
"block must open with <recent-events>: {block}"
);
assert!(
block.ends_with("\n</recent-events>"),
"block must close with </recent-events>: {block}"
);
for i in 0..n {
assert!(
block.contains(&format!("reason-{i}")),
"block must include content from event {i}: {block}"
);
}
// After drain the buffer must be empty (criterion 2).
let after = pool
.drain_agent_status_buffer("story-736", "coder")
.expect("agent must still have a buffer");
assert!(
after.is_empty(),
"buffer must be empty after drain (got {} items)",
after.len()
);
}
/// Criterion 2: on session restart a fresh buffer is created, clearing any
/// events accumulated in the previous session.
///
+17
View File
@@ -100,6 +100,11 @@ impl AgentPool {
// (bug 118).
let resolved_name: String;
let key: String;
// Buffered status events accumulated while the agent was idle. Drained
// inside the lock (before the new entry replaces the old one) and
// formatted as a `<recent-events>` block for prepending to the first
// agent turn (story 736).
let prior_events: Option<String>;
{
let mut agents = self.agents.lock().map_err(|e| e.to_string())?;
@@ -249,6 +254,17 @@ impl AgentPool {
story '{story_id}' will be picked up when the agent becomes available"
));
}
// Drain accumulated status events from the previous session before
// replacing the entry with the new one. The drained items are
// formatted and prepended to the first agent turn (story 736).
prior_events = {
let items = agents
.get(&key)
.and_then(|a| a.status_buffer.as_ref().map(|b| b.drain()))
.unwrap_or_default();
crate::service::status::buffer::format_buffered_items(&items)
};
agents.insert(
key.clone(),
StoryAgent {
@@ -336,6 +352,7 @@ impl AgentPool {
self.watcher_tx.clone(),
Arc::clone(&self.merge_jobs),
inactivity_timeout_secs,
prior_events,
));
// Store the task handle while the agent is still Pending.
+12
View File
@@ -48,6 +48,11 @@ pub(super) async fn run_agent_spawn(
watcher_tx: broadcast::Sender<WatcherEvent>,
merge_jobs: Arc<Mutex<HashMap<String, MergeJob>>>,
inactivity_timeout_secs: u64,
// Formatted `<recent-events>` block drained from the previous session's
// buffer. Prepended to the first agent turn so the agent sees what
// happened while it was idle (story 736). `None` when there were no
// buffered events.
buffered_events_block: Option<String>,
) {
// Re-bind to the legacy `_clone` / `_owned` names so the body below remains
// a verbatim copy of the original closure (story 157).
@@ -169,6 +174,13 @@ pub(super) async fn run_agent_spawn(
}
};
// Prepend buffered pipeline events from the previous idle period so the
// agent sees what happened while it was not running (story 736).
let effective_prompt = match buffered_events_block {
Some(block) if !block.is_empty() => format!("{block}\n\n{effective_prompt}"),
_ => effective_prompt,
};
// Step 3: transition to Running now that the worktree is ready.
{
if let Ok(mut agents) = agents_ref.lock()