huskies: merge 862
This commit is contained in:
@@ -54,6 +54,17 @@ pub fn write_merge_job(
|
||||
.into();
|
||||
apply_and_persist(&mut state, |s| s.crdt.doc.merge_jobs.insert(ROOT_ID, entry));
|
||||
state.merge_job_index = rebuild_merge_job_index(&state.crdt);
|
||||
// After inserting a new entry via JSON, explicitly set the error field via
|
||||
// .set() so that entry.error.view() returns the correct value immediately.
|
||||
// The JSON insert path initialises LWW fields, but an explicit .set() is
|
||||
// required for the stale-merge reaper to read the field reliably via .view().
|
||||
if let Some(e) = error
|
||||
&& let Some(&idx) = state.merge_job_index.get(story_id)
|
||||
{
|
||||
apply_and_persist(&mut state, |s| {
|
||||
s.crdt.doc.merge_jobs[idx].error.set(e.to_string())
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -183,6 +183,31 @@ fn merge_job_insert_update_delete() {
|
||||
assert!(read_merge_job("100").is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn merge_job_error_field_readable_after_insert() {
|
||||
// Regression: the error field must be readable via read_all_merge_jobs()
|
||||
// immediately after a fresh insert so that the stale-merge reaper can
|
||||
// decode server_start_time from it (None → treated as legacy/stale).
|
||||
init_for_test();
|
||||
write_merge_job(
|
||||
"test_error_field",
|
||||
"running",
|
||||
1.0,
|
||||
None,
|
||||
Some("{\"server_start\":9999999.0}"),
|
||||
);
|
||||
let all = read_all_merge_jobs().unwrap_or_default();
|
||||
let entry = all
|
||||
.iter()
|
||||
.find(|v| v.story_id == "test_error_field")
|
||||
.expect("entry must exist in read_all");
|
||||
assert_eq!(
|
||||
entry.error.as_deref(),
|
||||
Some("{\"server_start\":9999999.0}"),
|
||||
"error field must be readable via view() after a fresh insert"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn active_agent_insert_update_delete() {
|
||||
init_for_test();
|
||||
|
||||
@@ -101,6 +101,7 @@ pub(super) async fn handle_tools_call(
|
||||
"get_test_result" => shell_tools::tool_get_test_result(&args, ctx).await,
|
||||
"run_build" => shell_tools::tool_run_build(&args, ctx).await,
|
||||
"run_lint" => shell_tools::tool_run_lint(&args, ctx).await,
|
||||
"run_check" => shell_tools::tool_run_check(&args, ctx).await,
|
||||
// Git operations
|
||||
"git_status" => git_tools::tool_git_status(&args, ctx).await,
|
||||
"git_diff" => git_tools::tool_git_diff(&args, ctx).await,
|
||||
|
||||
@@ -7,4 +7,6 @@ mod exec;
|
||||
mod script;
|
||||
|
||||
pub(crate) use exec::{handle_run_command_sse, tool_run_command};
|
||||
pub(crate) use script::{tool_get_test_result, tool_run_build, tool_run_lint, tool_run_tests};
|
||||
pub(crate) use script::{
|
||||
tool_get_test_result, tool_run_build, tool_run_check, tool_run_lint, tool_run_tests,
|
||||
};
|
||||
|
||||
@@ -377,6 +377,61 @@ pub(crate) async fn tool_run_lint(args: &Value, ctx: &AppContext) -> Result<Stri
|
||||
run_script_tool("lint", args, ctx).await
|
||||
}
|
||||
|
||||
// ── run_check ────────────────────────────────────────────────────────────────
|
||||
|
||||
/// Fast compile-only check (`script/check`).
|
||||
///
|
||||
/// Runs `script/check` (expected to be `cargo check --tests --workspace`) in
|
||||
/// the agent's worktree and returns the **full, untruncated** stdout + stderr
|
||||
/// so that every compiler diagnostic is visible to the caller. Unlike
|
||||
/// `run_build` and `run_lint`, output is never truncated — compile errors must
|
||||
/// be readable in their entirety for fast iteration feedback.
|
||||
pub(crate) async fn tool_run_check(args: &Value, ctx: &AppContext) -> Result<String, String> {
|
||||
let project_root = ctx.services.agents.get_project_root(&ctx.state)?;
|
||||
|
||||
let working_dir = match args.get("worktree_path").and_then(|v| v.as_str()) {
|
||||
Some(wt) => validate_working_dir(wt, ctx)?,
|
||||
None => project_root
|
||||
.canonicalize()
|
||||
.map_err(|e| format!("Cannot canonicalize project root: {e}"))?,
|
||||
};
|
||||
|
||||
let script_path = working_dir.join("script").join("check");
|
||||
if !script_path.exists() {
|
||||
return Err(format!(
|
||||
"script/check not found: {}. Create script/check (e.g. `cargo check --tests --workspace`) to enable fast compile feedback.",
|
||||
script_path.display()
|
||||
));
|
||||
}
|
||||
|
||||
let result = tokio::task::spawn_blocking({
|
||||
let script = script_path.clone();
|
||||
let dir = working_dir.clone();
|
||||
move || {
|
||||
std::process::Command::new("bash")
|
||||
.arg(&script)
|
||||
.current_dir(&dir)
|
||||
.output()
|
||||
}
|
||||
})
|
||||
.await
|
||||
.map_err(|e| format!("Task join error: {e}"))?
|
||||
.map_err(|e| format!("Failed to spawn script/check: {e}"))?;
|
||||
|
||||
let stdout = String::from_utf8_lossy(&result.stdout);
|
||||
let stderr = String::from_utf8_lossy(&result.stderr);
|
||||
// No truncation: agents need the full compiler output to diagnose errors.
|
||||
let output = format!("{stdout}{stderr}");
|
||||
let exit_code = result.status.code().unwrap_or(-1);
|
||||
|
||||
serde_json::to_string_pretty(&json!({
|
||||
"passed": result.status.success(),
|
||||
"exit_code": exit_code,
|
||||
"output": output,
|
||||
}))
|
||||
.map_err(|e| format!("Serialization error: {e}"))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
@@ -551,6 +606,76 @@ mod tests {
|
||||
assert_eq!(parsed["exit_code"], 1);
|
||||
}
|
||||
|
||||
// ── tool_run_check ────────────────────────────────────────────────
|
||||
|
||||
#[tokio::test]
|
||||
async fn tool_run_check_missing_script_returns_error() {
|
||||
let tmp = tempfile::tempdir().unwrap();
|
||||
let ctx = test_ctx(tmp.path());
|
||||
let result = tool_run_check(&json!({}), &ctx).await;
|
||||
assert!(result.is_err());
|
||||
let err = result.unwrap_err();
|
||||
assert!(
|
||||
err.contains("script/check"),
|
||||
"error should name script/check: {err}"
|
||||
);
|
||||
assert!(
|
||||
err.contains("not found"),
|
||||
"error should say not found: {err}"
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn tool_run_check_returns_full_output_on_nonzero_exit() {
|
||||
let tmp = tempfile::tempdir().unwrap();
|
||||
let script_dir = tmp.path().join("script");
|
||||
std::fs::create_dir_all(&script_dir).unwrap();
|
||||
let script_path = script_dir.join("check");
|
||||
// Script that emits 150 lines (exceeds the 100-line truncation limit used
|
||||
// by run_build/run_lint) and exits non-zero to verify no truncation occurs.
|
||||
std::fs::write(
|
||||
&script_path,
|
||||
"#!/usr/bin/env bash\nfor i in $(seq 1 150); do echo \"error[$i]: compile error on line $i\"; done\nexit 1\n",
|
||||
)
|
||||
.unwrap();
|
||||
#[cfg(unix)]
|
||||
{
|
||||
use std::os::unix::fs::PermissionsExt;
|
||||
std::fs::set_permissions(&script_path, std::fs::Permissions::from_mode(0o755)).unwrap();
|
||||
}
|
||||
|
||||
let ctx = test_ctx(tmp.path());
|
||||
let result = tool_run_check(&json!({}), &ctx).await.unwrap();
|
||||
let parsed: serde_json::Value = serde_json::from_str(&result).unwrap();
|
||||
|
||||
assert_eq!(parsed["passed"], false);
|
||||
assert_eq!(parsed["exit_code"], 1);
|
||||
let output = parsed["output"].as_str().unwrap();
|
||||
// All 150 lines must be present — no truncation.
|
||||
assert!(output.contains("error[1]"), "should contain first line");
|
||||
assert!(output.contains("error[150]"), "should contain last line");
|
||||
assert!(!output.contains("omitted"), "must not truncate output");
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn tool_run_check_passes_when_script_exits_zero() {
|
||||
let tmp = tempfile::tempdir().unwrap();
|
||||
let script_dir = tmp.path().join("script");
|
||||
std::fs::create_dir_all(&script_dir).unwrap();
|
||||
let script_path = script_dir.join("check");
|
||||
std::fs::write(&script_path, "#!/usr/bin/env bash\nexit 0\n").unwrap();
|
||||
#[cfg(unix)]
|
||||
{
|
||||
use std::os::unix::fs::PermissionsExt;
|
||||
std::fs::set_permissions(&script_path, std::fs::Permissions::from_mode(0o755)).unwrap();
|
||||
}
|
||||
let ctx = test_ctx(tmp.path());
|
||||
let result = tool_run_check(&json!({}), &ctx).await.unwrap();
|
||||
let parsed: serde_json::Value = serde_json::from_str(&result).unwrap();
|
||||
assert_eq!(parsed["passed"], true);
|
||||
assert_eq!(parsed["exit_code"], 0);
|
||||
}
|
||||
|
||||
// ── truncate_output ───────────────────────────────────────────────
|
||||
|
||||
#[test]
|
||||
|
||||
@@ -87,7 +87,8 @@ mod tests {
|
||||
assert!(names.contains(&"get_version"));
|
||||
assert!(names.contains(&"remove_criterion"));
|
||||
assert!(names.contains(&"mesh_status"));
|
||||
assert_eq!(tools.len(), 67);
|
||||
assert!(names.contains(&"run_check"));
|
||||
assert_eq!(tools.len(), 68);
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
||||
@@ -151,6 +151,20 @@ pub(super) fn system_tools() -> Vec<Value> {
|
||||
"required": []
|
||||
}
|
||||
}),
|
||||
json!({
|
||||
"name": "run_check",
|
||||
"description": "Run script/check (cargo check --tests --workspace) in the agent's worktree for fast compile-only feedback. Returns the full, untruncated output so every compiler diagnostic is visible. Much faster than run_tests — use this while iterating to catch compile errors; call run_tests only to validate the full pipeline before committing.",
|
||||
"inputSchema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"worktree_path": {
|
||||
"type": "string",
|
||||
"description": "Optional absolute path to a worktree to run the check in. Must be inside .huskies/worktrees/. Defaults to the project root."
|
||||
}
|
||||
},
|
||||
"required": []
|
||||
}
|
||||
}),
|
||||
json!({
|
||||
"name": "git_status",
|
||||
"description": "Return the working tree status of an agent's worktree (staged, unstaged, and untracked files). The worktree_path must be inside .huskies/worktrees/. Push and remote operations are not available.",
|
||||
|
||||
Reference in New Issue
Block a user