603 lines
24 KiB
Rust
603 lines
24 KiB
Rust
//! MCP permission-prompt tool (`tool_prompt_permission`) and rule helpers.
|
|
|
|
use serde_json::{Value, json};
|
|
|
|
use crate::http::context::AppContext;
|
|
use crate::service::diagnostics::{add_permission_rule, generate_permission_rule};
|
|
use crate::slog;
|
|
use crate::slog_warn;
|
|
|
|
pub(crate) async fn tool_prompt_permission(
|
|
args: &Value,
|
|
ctx: &AppContext,
|
|
) -> Result<String, String> {
|
|
let tool_name = args
|
|
.get("tool_name")
|
|
.and_then(|v| v.as_str())
|
|
.unwrap_or("unknown")
|
|
.to_string();
|
|
let tool_input = args.get("input").cloned().unwrap_or(json!({}));
|
|
|
|
// Auto-approve huskies MCP tools — they match the `mcp__huskies__*` entry
|
|
// in .claude/settings.json, but Claude Code does not honour wildcard
|
|
// patterns for MCP tool names (only for Bash commands), so we enforce the
|
|
// same rule here to prevent spurious permission prompts in chat.
|
|
if tool_name.starts_with("mcp__huskies__") {
|
|
crate::slog!(
|
|
"[permission] Auto-approved '{tool_name}' (matches mcp__huskies__* allowlist)"
|
|
);
|
|
return Ok(json!({"behavior": "allow", "updatedInput": tool_input}).to_string());
|
|
}
|
|
|
|
// Auto-deny immediately if no interactive session is currently listening on
|
|
// perm_rx. Story 884 made the Matrix bot hold this lock for its lifetime
|
|
// via the permission_listener task spawned at startup, so requests reach
|
|
// chat asynchronously regardless of whether a chat message is in flight.
|
|
// Other transports (Discord/Slack/WhatsApp) still acquire per message; if
|
|
// none is active, try_lock succeeds — auto-deny so background agent calls
|
|
// don't queue and flood chat at the next user session.
|
|
if ctx.services.perm_rx.try_lock().is_ok() {
|
|
crate::slog!(
|
|
"[permission] Auto-denied '{tool_name}' (no interactive session — agent mode)"
|
|
);
|
|
return serde_json::to_string_pretty(&json!({
|
|
"behavior": "deny",
|
|
"message": format!(
|
|
"Permission denied for '{tool_name}'. No interactive session active."
|
|
)
|
|
}))
|
|
.map_err(|e| format!("Serialization error: {e}"));
|
|
}
|
|
|
|
let request_id = uuid::Uuid::new_v4().to_string();
|
|
let (response_tx, response_rx) = tokio::sync::oneshot::channel();
|
|
|
|
// Forward to the active interactive session.
|
|
if ctx
|
|
.perm_tx
|
|
.send(crate::http::context::PermissionForward {
|
|
request_id: request_id.clone(),
|
|
tool_name: tool_name.clone(),
|
|
tool_input: tool_input.clone(),
|
|
response_tx,
|
|
})
|
|
.is_err()
|
|
{
|
|
crate::slog!("[permission] Auto-denied '{tool_name}' (perm_tx send failed)");
|
|
return serde_json::to_string_pretty(&json!({
|
|
"behavior": "deny",
|
|
"message": format!("Permission denied for '{tool_name}'.")
|
|
}))
|
|
.map_err(|e| format!("Serialization error: {e}"));
|
|
}
|
|
|
|
use crate::http::context::PermissionDecision;
|
|
|
|
let decision = tokio::time::timeout(std::time::Duration::from_secs(300), response_rx)
|
|
.await
|
|
.map_err(|_| {
|
|
let msg = format!("Permission request for '{tool_name}' timed out after 5 minutes");
|
|
slog_warn!("[permission] {msg}");
|
|
msg
|
|
})?
|
|
.map_err(|_| "Permission response channel closed unexpectedly".to_string())?;
|
|
|
|
if decision == PermissionDecision::AlwaysAllow {
|
|
// Persist the rule so Claude Code won't prompt again for this tool.
|
|
if let Some(root) = ctx.state.project_root.lock().unwrap().clone() {
|
|
let rule = generate_permission_rule(&tool_name, &tool_input);
|
|
if let Err(e) = add_permission_rule(&root, &rule) {
|
|
slog_warn!("[permission] Failed to write always-allow rule: {e}");
|
|
} else {
|
|
slog!("[permission] Added always-allow rule: {rule}");
|
|
}
|
|
}
|
|
}
|
|
|
|
if decision == PermissionDecision::Approve || decision == PermissionDecision::AlwaysAllow {
|
|
// Claude Code SDK expects:
|
|
// Allow: { behavior: "allow", updatedInput: <record> }
|
|
// Deny: { behavior: "deny", message: string }
|
|
Ok(json!({"behavior": "allow", "updatedInput": tool_input}).to_string())
|
|
} else {
|
|
slog_warn!("[permission] User denied permission for '{tool_name}'");
|
|
Ok(json!({
|
|
"behavior": "deny",
|
|
"message": format!("User denied permission for '{tool_name}'")
|
|
})
|
|
.to_string())
|
|
}
|
|
}
|
|
|
|
#[cfg(test)]
|
|
mod tests {
|
|
use super::*;
|
|
use crate::http::test_helpers::test_ctx;
|
|
use std::fs;
|
|
|
|
#[tokio::test]
|
|
async fn tool_prompt_permission_auto_denies_without_interactive_session() {
|
|
let tmp = tempfile::tempdir().unwrap();
|
|
let ctx = test_ctx(tmp.path());
|
|
|
|
// No task holds perm_rx — simulates a background agent with no
|
|
// interactive session active. Should auto-deny immediately.
|
|
let result = tool_prompt_permission(
|
|
&json!({"tool_name": "Bash", "input": {"command": "echo hello"}}),
|
|
&ctx,
|
|
)
|
|
.await
|
|
.expect("auto-deny must return Ok");
|
|
|
|
let parsed: Value = serde_json::from_str(&result).expect("result should be valid JSON");
|
|
assert_eq!(
|
|
parsed["behavior"], "deny",
|
|
"must auto-deny when no interactive session holds perm_rx"
|
|
);
|
|
}
|
|
|
|
#[tokio::test]
|
|
async fn tool_prompt_permission_approved_returns_updated_input() {
|
|
let tmp = tempfile::tempdir().unwrap();
|
|
let ctx = test_ctx(tmp.path());
|
|
|
|
// Simulate an interactive session: lock perm_rx first, signal readiness,
|
|
// then respond with approval. The try_lock() inside tool_prompt_permission
|
|
// must fail (lock held) so the request is forwarded rather than auto-denied.
|
|
let (ready_tx, ready_rx) = tokio::sync::oneshot::channel::<()>();
|
|
let perm_rx = ctx.services.perm_rx.clone();
|
|
tokio::spawn(async move {
|
|
let mut rx = perm_rx.lock().await;
|
|
let _ = ready_tx.send(()); // signal: lock is held
|
|
if let Some(forward) = rx.recv().await {
|
|
let _ = forward
|
|
.response_tx
|
|
.send(crate::http::context::PermissionDecision::Approve);
|
|
}
|
|
});
|
|
|
|
// Wait until the spawned task holds the perm_rx lock.
|
|
ready_rx.await.unwrap();
|
|
|
|
let result = tool_prompt_permission(
|
|
&json!({"tool_name": "Bash", "input": {"command": "echo hello"}}),
|
|
&ctx,
|
|
)
|
|
.await
|
|
.expect("should succeed on approval");
|
|
|
|
let parsed: Value = serde_json::from_str(&result).expect("result should be valid JSON");
|
|
assert_eq!(
|
|
parsed["behavior"], "allow",
|
|
"approved must return behavior:allow"
|
|
);
|
|
assert_eq!(
|
|
parsed["updatedInput"]["command"], "echo hello",
|
|
"approved must return updatedInput with original tool input for Claude Code SDK compatibility"
|
|
);
|
|
}
|
|
|
|
#[tokio::test]
|
|
async fn tool_prompt_permission_denied_returns_deny_json() {
|
|
let tmp = tempfile::tempdir().unwrap();
|
|
let ctx = test_ctx(tmp.path());
|
|
|
|
// Simulate an interactive session: lock perm_rx first, then deny.
|
|
let (ready_tx, ready_rx) = tokio::sync::oneshot::channel::<()>();
|
|
let perm_rx = ctx.services.perm_rx.clone();
|
|
tokio::spawn(async move {
|
|
let mut rx = perm_rx.lock().await;
|
|
let _ = ready_tx.send(()); // signal: lock is held
|
|
if let Some(forward) = rx.recv().await {
|
|
let _ = forward
|
|
.response_tx
|
|
.send(crate::http::context::PermissionDecision::Deny);
|
|
}
|
|
});
|
|
|
|
// Wait until the spawned task holds the perm_rx lock.
|
|
ready_rx.await.unwrap();
|
|
|
|
let result = tool_prompt_permission(&json!({"tool_name": "Write", "input": {}}), &ctx)
|
|
.await
|
|
.expect("denial must return Ok, not Err");
|
|
|
|
let parsed: Value = serde_json::from_str(&result).expect("result should be valid JSON");
|
|
assert_eq!(
|
|
parsed["behavior"], "deny",
|
|
"denied must return behavior:deny"
|
|
);
|
|
assert!(parsed["message"].is_string(), "deny must include a message");
|
|
}
|
|
|
|
// ── Permission rule generation tests ─────────────────────────
|
|
|
|
#[test]
|
|
fn generate_rule_for_edit_tool() {
|
|
let rule = generate_permission_rule("Edit", &json!({}));
|
|
assert_eq!(rule, "Edit");
|
|
}
|
|
|
|
#[test]
|
|
fn generate_rule_for_write_tool() {
|
|
let rule = generate_permission_rule("Write", &json!({}));
|
|
assert_eq!(rule, "Write");
|
|
}
|
|
|
|
#[test]
|
|
fn generate_rule_for_bash_git() {
|
|
let rule = generate_permission_rule("Bash", &json!({"command": "git status"}));
|
|
assert_eq!(rule, "Bash(git *)");
|
|
}
|
|
|
|
#[test]
|
|
fn generate_rule_for_bash_cargo() {
|
|
let rule = generate_permission_rule("Bash", &json!({"command": "cargo test --all"}));
|
|
assert_eq!(rule, "Bash(cargo *)");
|
|
}
|
|
|
|
#[test]
|
|
fn generate_rule_for_bash_empty_command() {
|
|
let rule = generate_permission_rule("Bash", &json!({}));
|
|
assert_eq!(rule, "Bash(unknown *)");
|
|
}
|
|
|
|
#[test]
|
|
fn generate_rule_for_mcp_tool() {
|
|
let rule = generate_permission_rule("mcp__huskies__create_story", &json!({"name": "foo"}));
|
|
assert_eq!(rule, "mcp__huskies__create_story");
|
|
}
|
|
|
|
// ── Settings.json writing tests ──────────────────────────────
|
|
|
|
#[test]
|
|
fn add_rule_creates_settings_file_when_missing() {
|
|
let tmp = tempfile::tempdir().unwrap();
|
|
add_permission_rule(tmp.path(), "Edit").unwrap();
|
|
|
|
let content = fs::read_to_string(tmp.path().join(".claude/settings.json")).unwrap();
|
|
let settings: Value = serde_json::from_str(&content).unwrap();
|
|
let allow = settings["permissions"]["allow"].as_array().unwrap();
|
|
assert!(allow.contains(&json!("Edit")));
|
|
}
|
|
|
|
#[test]
|
|
fn add_rule_does_not_duplicate_existing() {
|
|
let tmp = tempfile::tempdir().unwrap();
|
|
add_permission_rule(tmp.path(), "Edit").unwrap();
|
|
add_permission_rule(tmp.path(), "Edit").unwrap();
|
|
|
|
let content = fs::read_to_string(tmp.path().join(".claude/settings.json")).unwrap();
|
|
let settings: Value = serde_json::from_str(&content).unwrap();
|
|
let allow = settings["permissions"]["allow"].as_array().unwrap();
|
|
let count = allow.iter().filter(|v| v == &&json!("Edit")).count();
|
|
assert_eq!(count, 1);
|
|
}
|
|
|
|
#[test]
|
|
fn add_rule_skips_when_wildcard_already_covers() {
|
|
let tmp = tempfile::tempdir().unwrap();
|
|
let claude_dir = tmp.path().join(".claude");
|
|
fs::create_dir_all(&claude_dir).unwrap();
|
|
fs::write(
|
|
claude_dir.join("settings.json"),
|
|
r#"{"permissions":{"allow":["mcp__huskies__*"]}}"#,
|
|
)
|
|
.unwrap();
|
|
|
|
add_permission_rule(tmp.path(), "mcp__huskies__create_story").unwrap();
|
|
|
|
let content = fs::read_to_string(claude_dir.join("settings.json")).unwrap();
|
|
let settings: Value = serde_json::from_str(&content).unwrap();
|
|
let allow = settings["permissions"]["allow"].as_array().unwrap();
|
|
assert_eq!(allow.len(), 1);
|
|
assert_eq!(allow[0], "mcp__huskies__*");
|
|
}
|
|
|
|
#[test]
|
|
fn add_rule_appends_to_existing_rules() {
|
|
let tmp = tempfile::tempdir().unwrap();
|
|
let claude_dir = tmp.path().join(".claude");
|
|
fs::create_dir_all(&claude_dir).unwrap();
|
|
fs::write(
|
|
claude_dir.join("settings.json"),
|
|
r#"{"permissions":{"allow":["Edit"]}}"#,
|
|
)
|
|
.unwrap();
|
|
|
|
add_permission_rule(tmp.path(), "Write").unwrap();
|
|
|
|
let content = fs::read_to_string(claude_dir.join("settings.json")).unwrap();
|
|
let settings: Value = serde_json::from_str(&content).unwrap();
|
|
let allow = settings["permissions"]["allow"].as_array().unwrap();
|
|
assert_eq!(allow.len(), 2);
|
|
assert!(allow.contains(&json!("Edit")));
|
|
assert!(allow.contains(&json!("Write")));
|
|
}
|
|
|
|
#[test]
|
|
fn add_rule_preserves_other_settings_fields() {
|
|
let tmp = tempfile::tempdir().unwrap();
|
|
let claude_dir = tmp.path().join(".claude");
|
|
fs::create_dir_all(&claude_dir).unwrap();
|
|
fs::write(
|
|
claude_dir.join("settings.json"),
|
|
r#"{"permissions":{"allow":["Edit"]},"enabledMcpjsonServers":["huskies"]}"#,
|
|
)
|
|
.unwrap();
|
|
|
|
add_permission_rule(tmp.path(), "Write").unwrap();
|
|
|
|
let content = fs::read_to_string(claude_dir.join("settings.json")).unwrap();
|
|
let settings: Value = serde_json::from_str(&content).unwrap();
|
|
let servers = settings["enabledMcpjsonServers"].as_array().unwrap();
|
|
assert_eq!(servers.len(), 1);
|
|
assert_eq!(servers[0], "huskies");
|
|
}
|
|
|
|
#[test]
|
|
fn rebuild_and_restart_in_tools_list() {
|
|
use super::super::super::tools_list::handle_tools_list;
|
|
let resp = handle_tools_list(Some(json!(1)));
|
|
let tools = resp.result.unwrap()["tools"].as_array().unwrap().clone();
|
|
let tool = tools.iter().find(|t| t["name"] == "rebuild_and_restart");
|
|
assert!(
|
|
tool.is_some(),
|
|
"rebuild_and_restart missing from tools list"
|
|
);
|
|
let t = tool.unwrap();
|
|
assert!(t["description"].as_str().unwrap().contains("Rebuild"));
|
|
assert!(t["inputSchema"].is_object());
|
|
}
|
|
|
|
#[tokio::test]
|
|
async fn rebuild_and_restart_kills_agents_before_build() {
|
|
// Verify that calling rebuild_and_restart on an empty pool doesn't
|
|
// panic and proceeds to the build step. We can't test exec() in a
|
|
// unit test, but we can verify the build attempt happens.
|
|
let tmp = tempfile::tempdir().unwrap();
|
|
let ctx = test_ctx(tmp.path());
|
|
|
|
// The build will succeed (we're running in the real workspace) and
|
|
// then exec() will be called — which would replace our test process.
|
|
// So we only test that the function *runs* without panicking up to
|
|
// the agent-kill step. We do this by checking the pool is empty.
|
|
assert_eq!(ctx.services.agents.list_agents().unwrap().len(), 0);
|
|
ctx.services.agents.kill_all_children(); // should not panic on empty pool
|
|
}
|
|
|
|
#[test]
|
|
fn rebuild_uses_matching_build_profile() {
|
|
// The build must use the same profile (debug/release) as the running
|
|
// binary, otherwise cargo build outputs to a different target dir and
|
|
// current_exe() still points at the old binary.
|
|
let build_args: Vec<&str> = if cfg!(debug_assertions) {
|
|
vec!["build", "-p", "huskies"]
|
|
} else {
|
|
vec!["build", "--release", "-p", "huskies"]
|
|
};
|
|
|
|
// Tests always run in debug mode, so --release must NOT be present.
|
|
assert!(
|
|
!build_args.contains(&"--release"),
|
|
"In debug builds, rebuild must not pass --release (would put \
|
|
the binary in target/release/ while current_exe() points to \
|
|
target/debug/)"
|
|
);
|
|
}
|
|
|
|
// ── move_story tool tests ─────────────────────────────────────
|
|
|
|
#[test]
|
|
fn move_story_in_tools_list() {
|
|
use super::super::super::tools_list::handle_tools_list;
|
|
let resp = handle_tools_list(Some(json!(1)));
|
|
let tools = resp.result.unwrap()["tools"].as_array().unwrap().clone();
|
|
let tool = tools.iter().find(|t| t["name"] == "move_story");
|
|
assert!(tool.is_some(), "move_story missing from tools list");
|
|
let t = tool.unwrap();
|
|
assert!(t["description"].is_string());
|
|
let required = t["inputSchema"]["required"].as_array().unwrap();
|
|
let req_names: Vec<&str> = required.iter().map(|v| v.as_str().unwrap()).collect();
|
|
assert!(req_names.contains(&"story_id"));
|
|
assert!(req_names.contains(&"target_stage"));
|
|
}
|
|
|
|
#[test]
|
|
fn tool_move_story_missing_story_id() {
|
|
let tmp = tempfile::tempdir().unwrap();
|
|
let ctx = test_ctx(tmp.path());
|
|
let result = super::super::tool_move_story(&json!({"target_stage": "current"}), &ctx);
|
|
assert!(result.is_err());
|
|
assert!(result.unwrap_err().contains("story_id"));
|
|
}
|
|
|
|
#[test]
|
|
fn tool_move_story_missing_target_stage() {
|
|
let tmp = tempfile::tempdir().unwrap();
|
|
let ctx = test_ctx(tmp.path());
|
|
let result = super::super::tool_move_story(&json!({"story_id": "1_story_test"}), &ctx);
|
|
assert!(result.is_err());
|
|
assert!(result.unwrap_err().contains("target_stage"));
|
|
}
|
|
|
|
#[test]
|
|
fn tool_move_story_invalid_target_stage() {
|
|
let tmp = tempfile::tempdir().unwrap();
|
|
let root = tmp.path();
|
|
// Seed project root in state so get_project_root works
|
|
let backlog = root.join(".huskies/work/1_backlog");
|
|
fs::create_dir_all(&backlog).unwrap();
|
|
fs::write(backlog.join("1_story_test.md"), "---\nname: Test\n---\n").unwrap();
|
|
let ctx = test_ctx(root);
|
|
let result = super::super::tool_move_story(
|
|
&json!({"story_id": "1_story_test", "target_stage": "invalid"}),
|
|
&ctx,
|
|
);
|
|
assert!(result.is_err());
|
|
assert!(result.unwrap_err().contains("Invalid target_stage"));
|
|
}
|
|
|
|
#[test]
|
|
fn tool_move_story_moves_from_backlog_to_current() {
|
|
let tmp = tempfile::tempdir().unwrap();
|
|
let root = tmp.path();
|
|
let backlog = root.join(".huskies/work/1_backlog");
|
|
let current = root.join(".huskies/work/2_current");
|
|
fs::create_dir_all(&backlog).unwrap();
|
|
fs::create_dir_all(¤t).unwrap();
|
|
let content = "---\nname: Test\n---\n";
|
|
fs::write(backlog.join("5_story_test.md"), content).unwrap();
|
|
crate::db::ensure_content_store();
|
|
crate::db::write_item_with_content(
|
|
"5_story_test",
|
|
"1_backlog",
|
|
content,
|
|
crate::db::ItemMeta::named("Test"),
|
|
);
|
|
|
|
let ctx = test_ctx(root);
|
|
let result = super::super::tool_move_story(
|
|
&json!({"story_id": "5_story_test", "target_stage": "current"}),
|
|
&ctx,
|
|
)
|
|
.unwrap();
|
|
|
|
assert!(crate::db::read_content(crate::db::ContentKey::Story("5_story_test")).is_some());
|
|
let parsed: Value = serde_json::from_str(&result).unwrap();
|
|
assert_eq!(parsed["story_id"], "5_story_test");
|
|
assert_eq!(parsed["from_stage"], "backlog");
|
|
assert_eq!(parsed["to_stage"], "current");
|
|
}
|
|
|
|
#[test]
|
|
fn tool_move_story_moves_from_current_to_backlog() {
|
|
let tmp = tempfile::tempdir().unwrap();
|
|
let root = tmp.path();
|
|
let current = root.join(".huskies/work/2_current");
|
|
let backlog = root.join(".huskies/work/1_backlog");
|
|
fs::create_dir_all(¤t).unwrap();
|
|
fs::create_dir_all(&backlog).unwrap();
|
|
let content = "---\nname: Back\n---\n";
|
|
fs::write(current.join("6_story_back.md"), content).unwrap();
|
|
crate::db::ensure_content_store();
|
|
crate::db::write_item_with_content(
|
|
"6_story_back",
|
|
"2_current",
|
|
content,
|
|
crate::db::ItemMeta::named("Back"),
|
|
);
|
|
|
|
let ctx = test_ctx(root);
|
|
let result = super::super::tool_move_story(
|
|
&json!({"story_id": "6_story_back", "target_stage": "backlog"}),
|
|
&ctx,
|
|
)
|
|
.unwrap();
|
|
|
|
assert!(crate::db::read_content(crate::db::ContentKey::Story("6_story_back")).is_some());
|
|
let parsed: Value = serde_json::from_str(&result).unwrap();
|
|
// from_stage may be inaccurate when using the content-store fallback
|
|
// (it lacks stage tracking), but the move itself must succeed.
|
|
assert_eq!(parsed["to_stage"], "backlog");
|
|
}
|
|
|
|
#[test]
|
|
fn tool_move_story_idempotent_when_already_in_target() {
|
|
let tmp = tempfile::tempdir().unwrap();
|
|
let root = tmp.path();
|
|
let current = root.join(".huskies/work/2_current");
|
|
fs::create_dir_all(¤t).unwrap();
|
|
// Use a unique high-numbered story ID to avoid collisions with stale
|
|
// entries in the global content store from parallel tests.
|
|
let content = "---\nname: Idem\n---\n";
|
|
fs::write(current.join("9907_story_idem.md"), content).unwrap();
|
|
crate::db::ensure_content_store();
|
|
crate::db::write_item_with_content(
|
|
"9907_story_idem",
|
|
"2_current",
|
|
content,
|
|
crate::db::ItemMeta::named("Idem"),
|
|
);
|
|
|
|
let ctx = test_ctx(root);
|
|
let result = super::super::tool_move_story(
|
|
&json!({"story_id": "9907_story_idem", "target_stage": "current"}),
|
|
&ctx,
|
|
)
|
|
.unwrap();
|
|
|
|
assert!(crate::db::read_content(crate::db::ContentKey::Story("9907_story_idem")).is_some());
|
|
let parsed: Value = serde_json::from_str(&result).unwrap();
|
|
// When CRDT is uninitialised the content-store fallback handles the
|
|
// move, so idempotency detection may not fire. Verify the to_stage
|
|
// is correct regardless.
|
|
assert_eq!(parsed["to_stage"], "current");
|
|
}
|
|
|
|
#[test]
|
|
fn tool_move_story_error_when_not_found() {
|
|
let tmp = tempfile::tempdir().unwrap();
|
|
let ctx = test_ctx(tmp.path());
|
|
let result = super::super::tool_move_story(
|
|
&json!({"story_id": "99_story_ghost", "target_stage": "current"}),
|
|
&ctx,
|
|
);
|
|
assert!(result.is_err());
|
|
assert!(
|
|
result
|
|
.unwrap_err()
|
|
.contains("not found in any pipeline stage")
|
|
);
|
|
}
|
|
|
|
// ── dump_crdt tool tests ──────────────────────────────────────────
|
|
|
|
#[test]
|
|
fn tool_dump_crdt_returns_valid_json() {
|
|
let result = super::super::tool_dump_crdt(&json!({})).unwrap();
|
|
let parsed: Value = serde_json::from_str(&result).expect("result must be valid JSON");
|
|
assert!(parsed["metadata"].is_object(), "must have metadata object");
|
|
assert!(parsed["items"].is_array(), "must have items array");
|
|
}
|
|
|
|
#[test]
|
|
fn tool_dump_crdt_metadata_has_required_fields() {
|
|
let result = super::super::tool_dump_crdt(&json!({})).unwrap();
|
|
let parsed: Value = serde_json::from_str(&result).unwrap();
|
|
let meta = &parsed["metadata"];
|
|
assert!(meta["in_memory_state_loaded"].is_boolean());
|
|
assert!(meta["total_items"].is_number());
|
|
assert!(meta["total_ops_in_list"].is_number());
|
|
assert!(meta["max_seq_in_list"].is_number());
|
|
assert!(meta["persisted_ops_count"].is_number());
|
|
}
|
|
|
|
#[test]
|
|
fn tool_dump_crdt_with_story_id_filter_returns_valid_json() {
|
|
let result =
|
|
super::super::tool_dump_crdt(&json!({"story_id": "9999_story_nonexistent"})).unwrap();
|
|
let parsed: Value = serde_json::from_str(&result).unwrap();
|
|
assert!(parsed["items"].as_array().unwrap().is_empty());
|
|
}
|
|
|
|
#[test]
|
|
fn dump_crdt_in_tools_list() {
|
|
use super::super::super::tools_list::handle_tools_list;
|
|
let resp = handle_tools_list(Some(json!(1)));
|
|
let tools = resp.result.unwrap()["tools"].as_array().unwrap().clone();
|
|
let tool = tools.iter().find(|t| t["name"] == "dump_crdt");
|
|
assert!(tool.is_some(), "dump_crdt missing from tools list");
|
|
let t = tool.unwrap();
|
|
assert!(
|
|
t["description"]
|
|
.as_str()
|
|
.unwrap()
|
|
.to_lowercase()
|
|
.contains("debug"),
|
|
"description must mention this is a debug tool"
|
|
);
|
|
assert!(t["inputSchema"].is_object());
|
|
}
|
|
}
|