huskies: merge 1023

This commit is contained in:
dave
2026-05-14 11:19:15 +00:00
parent 8e996e2bd3
commit c64deca7c2
4 changed files with 263 additions and 17 deletions
+12 -1
View File
@@ -16,9 +16,20 @@ pub(crate) fn tool_get_server_logs(args: &Value) -> Result<String, String> {
let lines_count = args
.get("lines")
.and_then(|v| v.as_u64())
.map(|n| n.min(1000) as usize)
.map(|n| n.min(10_000) as usize)
.unwrap_or(100);
let filter = args.get("filter").and_then(|v| v.as_str());
let from_file = args
.get("from_file")
.and_then(|v| v.as_bool())
.unwrap_or(false);
if from_file {
let offset = args.get("offset").and_then(|v| v.as_u64()).unwrap_or(0) as usize;
let lines = log_buffer::global().read_from_disk(filter, offset, lines_count);
return Ok(lines.join("\n"));
}
let severity = args
.get("severity")
.and_then(|v| v.as_str())
+12 -4
View File
@@ -7,21 +7,29 @@ pub(super) fn system_tools() -> Vec<Value> {
vec![
json!({
"name": "get_server_logs",
"description": "Return recent server log lines captured in the in-process ring buffer. Useful for diagnosing runtime behaviour such as WebSocket events, MCP call flow, and filesystem watcher activity.",
"description": "Return server log lines. By default reads from the in-process ring buffer (last 1000 entries). Pass from_file: true to read from the on-disk daily log files, which retain history beyond the ring buffer. The file path supports offset for pagination.",
"inputSchema": {
"type": "object",
"properties": {
"lines": {
"type": "integer",
"description": "Number of recent lines to return (default 100, max 1000)"
"description": "Number of lines to return (default 100; ring-buffer max 1000, file max 10000)"
},
"filter": {
"type": "string",
"description": "Optional substring filter (e.g. 'watcher', 'mcp', 'permission')"
"description": "Optional substring filter applied to each log line (e.g. 'watcher', 'mcp', 'ERROR')"
},
"severity": {
"type": "string",
"description": "Filter by severity level: ERROR, WARN, or INFO. Returns only entries at that level."
"description": "Ring-buffer only: filter by severity level ERROR, WARN, or INFO"
},
"from_file": {
"type": "boolean",
"description": "If true, read from on-disk daily log files instead of the in-memory ring. Supports older history and the offset parameter."
},
"offset": {
"type": "integer",
"description": "File mode only: skip the first N matching lines before returning results (for pagination)"
}
}
}
+238 -11
View File
@@ -9,11 +9,13 @@
use std::collections::VecDeque;
use std::fs::OpenOptions;
use std::io::Write;
use std::path::PathBuf;
use std::path::{Path, PathBuf};
use std::sync::{Mutex, OnceLock};
use tokio::sync::broadcast;
const CAPACITY: usize = 1000;
/// Number of daily log files to keep on disk before pruning older ones.
const DEFAULT_KEEP_DAYS: u64 = 7;
/// Severity level for a log entry.
#[derive(Debug, Clone, PartialEq, Eq)]
@@ -76,10 +78,17 @@ impl LogEntry {
}
}
/// Internal state for the on-disk log: directory and last-written date.
struct LogDiskState {
dir: Option<PathBuf>,
/// `YYYY-MM-DD` of the last written entry — used to detect day rollover.
last_date: String,
}
/// Bounded in-memory ring buffer holding recent log entries and a broadcast channel for live streaming.
pub struct LogBuffer {
entries: Mutex<VecDeque<LogEntry>>,
log_file: Mutex<Option<PathBuf>>,
disk: Mutex<LogDiskState>,
/// Broadcast channel for live log streaming to WebSocket subscribers.
broadcast_tx: broadcast::Sender<LogEntry>,
}
@@ -89,7 +98,10 @@ impl LogBuffer {
let (broadcast_tx, _) = broadcast::channel(512);
Self {
entries: Mutex::new(VecDeque::with_capacity(CAPACITY)),
log_file: Mutex::new(None),
disk: Mutex::new(LogDiskState {
dir: None,
last_date: String::new(),
}),
broadcast_tx,
}
}
@@ -99,11 +111,15 @@ impl LogBuffer {
self.broadcast_tx.subscribe()
}
/// Set the persistent log file path. Call once at startup after the
/// project root is known.
pub fn set_log_file(&self, path: PathBuf) {
if let Ok(mut f) = self.log_file.lock() {
*f = Some(path);
/// Set the directory for daily-rotated persistent log files.
///
/// Files are written as `server-YYYY-MM-DD.log` inside `dir`. Files older
/// than [`DEFAULT_KEEP_DAYS`] are pruned immediately and again on each day
/// rollover. Call once at startup after the project root is known.
pub fn set_log_dir(&self, dir: PathBuf) {
prune_old_logs(&dir, DEFAULT_KEEP_DAYS);
if let Ok(mut state) = self.disk.lock() {
state.dir = Some(dir);
}
}
@@ -117,9 +133,35 @@ impl LogBuffer {
};
eprintln!("{}", entry.colored_formatted());
// Append to persistent log file (best-effort).
if let Ok(guard) = self.log_file.lock()
&& let Some(ref path) = *guard
// Compute today's log-file path and detect day rollover under a single lock.
let (log_path, prune_dir) = {
match self.disk.lock() {
Ok(mut state) => {
if let Some(dir) = state.dir.clone() {
let today = chrono::Utc::now().format("%Y-%m-%d").to_string();
let path = dir.join(format!("server-{today}.log"));
let maybe_prune = if state.last_date != today {
state.last_date = today;
Some(dir)
} else {
None
};
(Some(path), maybe_prune)
} else {
(None, None)
}
}
Err(_) => (None, None),
}
};
// Prune old files after releasing the lock (filesystem I/O outside the lock).
if let Some(ref dir) = prune_dir {
prune_old_logs(dir, DEFAULT_KEEP_DAYS);
}
// Append to the current day's log file (best-effort).
if let Some(ref path) = log_path
&& let Ok(mut file) = OpenOptions::new().create(true).append(true).open(path)
{
let _ = writeln!(file, "{}", entry.formatted());
@@ -136,6 +178,26 @@ impl LogBuffer {
let _ = self.broadcast_tx.send(entry);
}
/// Read log lines from on-disk files with optional substring filter, offset, and limit.
///
/// Reads all `server-YYYY-MM-DD.log` files inside the configured log directory
/// in chronological order. Returns up to `limit` matching lines, skipping the
/// first `offset` matches. Useful for accessing history beyond the in-memory ring.
pub fn read_from_disk(&self, filter: Option<&str>, offset: usize, limit: usize) -> Vec<String> {
if limit == 0 {
return vec![];
}
let dir = match self.disk.lock() {
Ok(s) => s.dir.clone(),
Err(_) => return vec![],
};
let dir = match dir {
Some(d) => d,
None => return vec![],
};
read_log_dir(&dir, filter, offset, limit)
}
/// Return up to `count` recent log lines as formatted strings,
/// optionally filtered by substring and/or severity level.
/// Lines are returned in chronological order (oldest first).
@@ -196,6 +258,85 @@ pub fn global() -> &'static LogBuffer {
GLOBAL.get_or_init(LogBuffer::new)
}
/// Delete daily log files older than `keep_days` from `dir`.
fn prune_old_logs(dir: &Path, keep_days: u64) {
let cutoff = chrono::Utc::now()
.checked_sub_signed(chrono::Duration::days(keep_days as i64))
.map(|t| t.format("%Y-%m-%d").to_string())
.unwrap_or_default();
let Ok(entries) = std::fs::read_dir(dir) else {
return;
};
for entry in entries.filter_map(|e| e.ok()) {
let path = entry.path();
let Some(name) = path.file_name().and_then(|n| n.to_str()) else {
continue;
};
// Match "server-YYYY-MM-DD.log"
if name.starts_with("server-") && name.ends_with(".log") && name.len() == 21 {
// SAFETY: "server-" is 7 ASCII bytes, ".log" is 4, total 21 chars means
// the middle 10 bytes are the date "YYYY-MM-DD" — all ASCII, safe to slice.
if let Some(date_part) = name.get(7..17)
&& date_part < cutoff.as_str()
{
let _ = std::fs::remove_file(&path);
}
}
}
}
/// Read log lines from all daily files in `dir`, applying filter/offset/limit.
fn read_log_dir(dir: &Path, filter: Option<&str>, offset: usize, limit: usize) -> Vec<String> {
use std::io::BufRead;
let mut log_files: Vec<PathBuf> = match std::fs::read_dir(dir) {
Ok(entries) => entries
.filter_map(|e| e.ok())
.map(|e| e.path())
.filter(|p| {
p.file_name()
.and_then(|n| n.to_str())
.map(|n| n.starts_with("server-") && n.ends_with(".log"))
.unwrap_or(false)
})
.collect(),
Err(_) => return vec![],
};
log_files.sort();
let mut skipped = 0usize;
let mut results = Vec::new();
'outer: for path in &log_files {
let file = match std::fs::File::open(path) {
Ok(f) => f,
Err(_) => continue,
};
let reader = std::io::BufReader::new(file);
for line in reader.lines() {
let line = match line {
Ok(l) => l,
Err(_) => continue,
};
if let Some(f) = filter
&& !line.contains(f)
{
continue;
}
if skipped < offset {
skipped += 1;
continue;
}
results.push(line);
if results.len() >= limit {
break 'outer;
}
}
}
results
}
/// Write an INFO log to stderr **and** capture it in the ring buffer.
///
/// Usage is identical to `eprintln!`:
@@ -432,6 +573,92 @@ mod tests {
assert!(colored.contains("test info"));
}
#[test]
fn read_from_disk_returns_history_beyond_ring() {
// Create a unique temp dir so parallel test runs do not interfere.
let tmp = std::env::temp_dir().join(format!(
"huskies_log_test_{}",
std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap_or_default()
.as_nanos()
));
std::fs::create_dir_all(&tmp).unwrap();
let buf = LogBuffer::new();
buf.set_log_dir(tmp.clone());
// Write CAPACITY + 1 entries so entry 0 is evicted from the ring buffer.
for i in 0..=CAPACITY {
buf.push_entry(LogLevel::Info, format!("history-line-{i}"));
}
// The ring has evicted "history-line-0" (replaced by "history-line-1000").
let ring = buf.get_recent(CAPACITY + 1, None, None);
assert_eq!(
ring.len(),
CAPACITY,
"ring must hold exactly CAPACITY entries"
);
assert!(
!ring.iter().any(|l| l.ends_with("history-line-0")),
"history-line-0 must have been evicted from the ring"
);
// The on-disk path still has it.
let disk = buf.read_from_disk(Some("history-line-0"), 0, 10);
let _ = std::fs::remove_dir_all(&tmp);
// "history-line-0" as a substring only matches the entry whose message
// is exactly "history-line-0", not "history-line-100" etc.
let matching: Vec<_> = disk
.iter()
.filter(|l| l.ends_with("history-line-0"))
.collect();
assert!(
!matching.is_empty(),
"disk log must surface evicted line 0 (disk results: {:?})",
&disk[..disk.len().min(5)]
);
}
#[test]
fn read_from_disk_offset_skips_lines() {
let tmp = std::env::temp_dir().join(format!(
"huskies_log_test_offset_{}",
std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap_or_default()
.as_nanos()
));
std::fs::create_dir_all(&tmp).unwrap();
let buf = LogBuffer::new();
buf.set_log_dir(tmp.clone());
for i in 0..5 {
buf.push_entry(LogLevel::Info, format!("offset-test-{i}"));
}
let all = buf.read_from_disk(Some("offset-test-"), 0, 10);
let skipped = buf.read_from_disk(Some("offset-test-"), 2, 10);
let _ = std::fs::remove_dir_all(&tmp);
assert_eq!(all.len(), 5);
assert_eq!(skipped.len(), 3, "offset=2 should skip first 2 matches");
assert!(skipped[0].contains("offset-test-2"));
}
#[test]
fn read_from_disk_no_dir_returns_empty() {
let buf = LogBuffer::new();
// No set_log_dir call — dir is None
let result = buf.read_from_disk(None, 0, 100);
assert!(result.is_empty());
}
#[test]
fn ring_buffer_entries_have_no_ansi_codes() {
let buf = fresh_buffer();
+1 -1
View File
@@ -113,7 +113,7 @@ pub(crate) async fn init_subsystems(app_state: &Arc<SessionState>, cwd: &Path) {
if let Some(ref root) = *app_state.project_root.lock().unwrap() {
let log_dir = root.join(".huskies").join("logs");
let _ = std::fs::create_dir_all(&log_dir);
log_buffer::global().set_log_file(log_dir.join("server.log"));
log_buffer::global().set_log_dir(log_dir);
}
// Initialise the node's Ed25519 identity keypair (file-based, mode 0600).