huskies: merge 492_story_remove_filesystem_pipeline_state_and_store_story_content_in_database
This commit is contained in:
+377
-48
@@ -1,16 +1,25 @@
|
||||
/// SQLite shadow-write layer for pipeline state.
|
||||
/// SQLite storage layer for pipeline state and story content.
|
||||
///
|
||||
/// All filesystem pipeline operations (move_story_to_X etc.) remain authoritative.
|
||||
/// This module provides a fire-and-forget channel that dual-writes each move to
|
||||
/// `.huskies/pipeline.db` so a database layer is ready for future CRDT integration.
|
||||
/// The CRDT layer (`crdt_state`) is the primary source of truth for pipeline
|
||||
/// metadata (stage, name, agent, etc.). This module provides:
|
||||
///
|
||||
/// Reads are NOT served from SQLite — the filesystem remains the single source of truth.
|
||||
/// 1. **Content store** — an in-memory `HashMap<story_id, markdown>` backed
|
||||
/// by the `pipeline_items.content` column. Provides fast synchronous
|
||||
/// reads for MCP tools and other callers.
|
||||
///
|
||||
/// 2. **Shadow-write channel** — a fire-and-forget background task that
|
||||
/// upserts `pipeline_items` rows so the database always has a full copy
|
||||
/// of story content plus metadata.
|
||||
///
|
||||
/// On startup, existing content is loaded from the database into memory so
|
||||
/// no filesystem scan is needed after migration.
|
||||
use crate::io::story_metadata::parse_front_matter;
|
||||
use crate::slog;
|
||||
use sqlx::sqlite::SqliteConnectOptions;
|
||||
use sqlx::SqlitePool;
|
||||
use std::collections::HashMap;
|
||||
use std::path::Path;
|
||||
use std::sync::OnceLock;
|
||||
use std::sync::{Mutex, OnceLock};
|
||||
use tokio::sync::mpsc;
|
||||
|
||||
/// A pending shadow write for one pipeline item.
|
||||
@@ -22,6 +31,7 @@ struct PipelineWriteMsg {
|
||||
retry_count: Option<i64>,
|
||||
blocked: Option<bool>,
|
||||
depends_on: Option<String>,
|
||||
content: Option<String>,
|
||||
}
|
||||
|
||||
/// Handle to the background shadow-write task.
|
||||
@@ -31,11 +41,58 @@ pub struct PipelineDb {
|
||||
|
||||
static PIPELINE_DB: OnceLock<PipelineDb> = OnceLock::new();
|
||||
|
||||
// ── In-memory content store ─────────────────────────────────────────
|
||||
|
||||
static CONTENT_STORE: OnceLock<Mutex<HashMap<String, String>>> = OnceLock::new();
|
||||
|
||||
/// Read the full markdown content of a story from the in-memory store.
|
||||
pub fn read_content(story_id: &str) -> Option<String> {
|
||||
let store = CONTENT_STORE.get()?;
|
||||
let map = store.lock().ok()?;
|
||||
map.get(story_id).cloned()
|
||||
}
|
||||
|
||||
/// Write (or overwrite) the full markdown content of a story.
|
||||
///
|
||||
/// Updates the in-memory store immediately.
|
||||
pub fn write_content(story_id: &str, content: &str) {
|
||||
if let Some(store) = CONTENT_STORE.get() && let Ok(mut map) = store.lock() {
|
||||
map.insert(story_id.to_string(), content.to_string());
|
||||
}
|
||||
}
|
||||
|
||||
/// Remove a story's content from the in-memory store.
|
||||
pub fn delete_content(story_id: &str) {
|
||||
if let Some(store) = CONTENT_STORE.get() && let Ok(mut map) = store.lock() {
|
||||
map.remove(story_id);
|
||||
}
|
||||
}
|
||||
|
||||
/// Ensure the in-memory content store is initialised.
|
||||
///
|
||||
/// Safe to call multiple times — the `OnceLock` is set at most once.
|
||||
pub fn ensure_content_store() {
|
||||
let _ = CONTENT_STORE.set(Mutex::new(HashMap::new()));
|
||||
}
|
||||
|
||||
/// Return all story IDs present in the content store.
|
||||
pub fn all_content_ids() -> Vec<String> {
|
||||
match CONTENT_STORE.get() {
|
||||
Some(store) => match store.lock() {
|
||||
Ok(map) => map.keys().cloned().collect(),
|
||||
Err(_) => Vec::new(),
|
||||
},
|
||||
None => Vec::new(),
|
||||
}
|
||||
}
|
||||
|
||||
// ── Initialisation ──────────────────────────────────────────────────
|
||||
|
||||
/// Initialise the pipeline database.
|
||||
///
|
||||
/// Opens (or creates) the SQLite file at `db_path`, runs embedded migrations,
|
||||
/// and spawns the background write task. Safe to call only once; subsequent calls
|
||||
/// are no-ops (the `OnceLock` rejects them silently).
|
||||
/// loads existing story content into the in-memory store, and spawns the
|
||||
/// background write task. Safe to call only once; subsequent calls are no-ops.
|
||||
pub async fn init(db_path: &Path) -> Result<(), sqlx::Error> {
|
||||
if PIPELINE_DB.get().is_some() {
|
||||
return Ok(());
|
||||
@@ -48,6 +105,20 @@ pub async fn init(db_path: &Path) -> Result<(), sqlx::Error> {
|
||||
let pool = SqlitePool::connect_with(options).await?;
|
||||
sqlx::migrate!("./migrations").run(&pool).await?;
|
||||
|
||||
// Load existing content into the in-memory store.
|
||||
let rows: Vec<(String, Option<String>)> =
|
||||
sqlx::query_as("SELECT id, content FROM pipeline_items WHERE content IS NOT NULL")
|
||||
.fetch_all(&pool)
|
||||
.await?;
|
||||
|
||||
let mut content_map = HashMap::new();
|
||||
for (id, content) in rows {
|
||||
if let Some(c) = content {
|
||||
content_map.insert(id, c);
|
||||
}
|
||||
}
|
||||
let _ = CONTENT_STORE.set(Mutex::new(content_map));
|
||||
|
||||
let (tx, mut rx) = mpsc::unbounded_channel::<PipelineWriteMsg>();
|
||||
|
||||
tokio::spawn(async move {
|
||||
@@ -55,8 +126,8 @@ pub async fn init(db_path: &Path) -> Result<(), sqlx::Error> {
|
||||
let now = chrono::Utc::now().to_rfc3339();
|
||||
let result = sqlx::query(
|
||||
"INSERT INTO pipeline_items \
|
||||
(id, name, stage, agent, retry_count, blocked, depends_on, created_at, updated_at) \
|
||||
VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?8) \
|
||||
(id, name, stage, agent, retry_count, blocked, depends_on, content, created_at, updated_at) \
|
||||
VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?9) \
|
||||
ON CONFLICT(id) DO UPDATE SET \
|
||||
name = excluded.name, \
|
||||
stage = excluded.stage, \
|
||||
@@ -64,6 +135,7 @@ pub async fn init(db_path: &Path) -> Result<(), sqlx::Error> {
|
||||
retry_count = excluded.retry_count, \
|
||||
blocked = excluded.blocked, \
|
||||
depends_on = excluded.depends_on, \
|
||||
content = COALESCE(excluded.content, pipeline_items.content), \
|
||||
updated_at = excluded.updated_at",
|
||||
)
|
||||
.bind(&msg.story_id)
|
||||
@@ -73,6 +145,7 @@ pub async fn init(db_path: &Path) -> Result<(), sqlx::Error> {
|
||||
.bind(msg.retry_count)
|
||||
.bind(msg.blocked.map(|b| b as i64))
|
||||
.bind(&msg.depends_on)
|
||||
.bind(&msg.content)
|
||||
.bind(&now)
|
||||
.execute(&pool)
|
||||
.await;
|
||||
@@ -87,29 +160,35 @@ pub async fn init(db_path: &Path) -> Result<(), sqlx::Error> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Write a pipeline item state to both the CRDT layer and the legacy SQLite
|
||||
/// shadow table.
|
||||
///
|
||||
/// Reads front matter from `file_path` (the post-move location) to extract
|
||||
/// metadata. The CRDT layer is the primary write path; the legacy shadow
|
||||
/// table is kept for backwards compatibility. Both writes are fire-and-forget.
|
||||
pub fn shadow_write(story_id: &str, stage: &str, file_path: &Path) {
|
||||
let (name, agent, retry_count, blocked, depends_on) =
|
||||
match std::fs::read_to_string(file_path) {
|
||||
Ok(contents) => match parse_front_matter(&contents) {
|
||||
Ok(meta) => (
|
||||
meta.name,
|
||||
meta.agent,
|
||||
meta.retry_count.map(|r| r as i64),
|
||||
meta.blocked,
|
||||
meta.depends_on.as_ref().and_then(|d| serde_json::to_string(d).ok()),
|
||||
),
|
||||
Err(_) => (None, None, None, None, None),
|
||||
},
|
||||
Err(_) => (None, None, None, None, None),
|
||||
};
|
||||
// ── Write path ──────────────────────────────────────────────────────
|
||||
|
||||
// Primary: write through CRDT ops (persisted to SQLite crdt_ops table).
|
||||
/// Write a pipeline item from in-memory content (no filesystem access).
|
||||
///
|
||||
/// This is the primary write path for the DB-backed pipeline. It updates
|
||||
/// the CRDT, the in-memory content store, and the SQLite shadow table.
|
||||
pub fn write_item_with_content(
|
||||
story_id: &str,
|
||||
stage: &str,
|
||||
content: &str,
|
||||
) {
|
||||
let (name, agent, retry_count, blocked, depends_on) = match parse_front_matter(content) {
|
||||
Ok(meta) => (
|
||||
meta.name,
|
||||
meta.agent,
|
||||
meta.retry_count.map(|r| r as i64),
|
||||
meta.blocked,
|
||||
meta.depends_on
|
||||
.as_ref()
|
||||
.and_then(|d| serde_json::to_string(d).ok()),
|
||||
),
|
||||
Err(_) => (None, None, None, None, None),
|
||||
};
|
||||
|
||||
// Update in-memory content store.
|
||||
ensure_content_store();
|
||||
write_content(story_id, content);
|
||||
|
||||
// Primary: CRDT ops.
|
||||
crate::crdt_state::write_item(
|
||||
story_id,
|
||||
stage,
|
||||
@@ -120,7 +199,7 @@ pub fn shadow_write(story_id: &str, stage: &str, file_path: &Path) {
|
||||
depends_on.as_deref(),
|
||||
);
|
||||
|
||||
// Legacy: fire-and-forget to the pipeline_items shadow table.
|
||||
// Shadow: pipeline_items table (only when DB is initialised).
|
||||
if let Some(db) = PIPELINE_DB.get() {
|
||||
let msg = PipelineWriteMsg {
|
||||
story_id: story_id.to_string(),
|
||||
@@ -130,11 +209,199 @@ pub fn shadow_write(story_id: &str, stage: &str, file_path: &Path) {
|
||||
retry_count,
|
||||
blocked,
|
||||
depends_on,
|
||||
content: Some(content.to_string()),
|
||||
};
|
||||
let _ = db.tx.send(msg);
|
||||
}
|
||||
}
|
||||
|
||||
/// Update only the stage of an existing item (used by move operations).
|
||||
///
|
||||
/// Reads current content from the in-memory store, updates the CRDT stage,
|
||||
/// and persists the change. Optionally modifies the content (e.g. to clear
|
||||
/// front-matter fields).
|
||||
pub fn move_item_stage(
|
||||
story_id: &str,
|
||||
new_stage: &str,
|
||||
content_transform: Option<&dyn Fn(&str) -> String>,
|
||||
) {
|
||||
let current_content = read_content(story_id);
|
||||
|
||||
let content = match (¤t_content, content_transform) {
|
||||
(Some(c), Some(transform)) => {
|
||||
let new_content = transform(c);
|
||||
write_content(story_id, &new_content);
|
||||
Some(new_content)
|
||||
}
|
||||
(Some(c), None) => Some(c.clone()),
|
||||
_ => None,
|
||||
};
|
||||
|
||||
let (name, agent, retry_count, blocked, depends_on) = content
|
||||
.as_deref()
|
||||
.or(current_content.as_deref())
|
||||
.and_then(|c| parse_front_matter(c).ok())
|
||||
.map(|meta| {
|
||||
(
|
||||
meta.name,
|
||||
meta.agent,
|
||||
meta.retry_count.map(|r| r as i64),
|
||||
meta.blocked,
|
||||
meta.depends_on
|
||||
.as_ref()
|
||||
.and_then(|d| serde_json::to_string(d).ok()),
|
||||
)
|
||||
})
|
||||
.unwrap_or((None, None, None, None, None));
|
||||
|
||||
// CRDT stage transition.
|
||||
crate::crdt_state::write_item(
|
||||
story_id,
|
||||
new_stage,
|
||||
name.as_deref(),
|
||||
agent.as_deref(),
|
||||
retry_count,
|
||||
blocked,
|
||||
depends_on.as_deref(),
|
||||
);
|
||||
|
||||
// Shadow table.
|
||||
if let Some(db) = PIPELINE_DB.get() {
|
||||
let msg = PipelineWriteMsg {
|
||||
story_id: story_id.to_string(),
|
||||
stage: new_stage.to_string(),
|
||||
name,
|
||||
agent,
|
||||
retry_count,
|
||||
blocked,
|
||||
depends_on,
|
||||
content,
|
||||
};
|
||||
let _ = db.tx.send(msg);
|
||||
}
|
||||
}
|
||||
|
||||
/// Delete a story from the shadow table (fire-and-forget).
|
||||
pub fn delete_item(story_id: &str) {
|
||||
delete_content(story_id);
|
||||
|
||||
if let Some(db) = PIPELINE_DB.get() {
|
||||
// Reuse the channel with a special "deleted" stage marker.
|
||||
// The background task will handle it.
|
||||
// Actually, we send a delete message by abusing the write — we'll
|
||||
// just remove it by setting stage to "deleted".
|
||||
let msg = PipelineWriteMsg {
|
||||
story_id: story_id.to_string(),
|
||||
stage: "deleted".to_string(),
|
||||
name: None,
|
||||
agent: None,
|
||||
retry_count: None,
|
||||
blocked: None,
|
||||
depends_on: None,
|
||||
content: None,
|
||||
};
|
||||
let _ = db.tx.send(msg);
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the next available item number by scanning both the CRDT state
|
||||
/// and the in-memory content store for the highest existing number.
|
||||
pub fn next_item_number() -> u32 {
|
||||
let mut max_num: u32 = 0;
|
||||
|
||||
// Scan CRDT items.
|
||||
if let Some(items) = crate::crdt_state::read_all_items() {
|
||||
for item in &items {
|
||||
let num_str: String = item
|
||||
.story_id
|
||||
.chars()
|
||||
.take_while(|c| c.is_ascii_digit())
|
||||
.collect();
|
||||
if let Ok(n) = num_str.parse::<u32>() && n > max_num {
|
||||
max_num = n;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Also scan the content store (might have items not yet in CRDT).
|
||||
for id in all_content_ids() {
|
||||
let num_str: String = id.chars().take_while(|c| c.is_ascii_digit()).collect();
|
||||
if let Ok(n) = num_str.parse::<u32>() && n > max_num {
|
||||
max_num = n;
|
||||
}
|
||||
}
|
||||
|
||||
max_num + 1
|
||||
}
|
||||
|
||||
// ── Filesystem migration ────────────────────────────────────────────
|
||||
|
||||
/// Import stories from `.huskies/work/` stage directories into the database.
|
||||
///
|
||||
/// For each `.md` file found in any stage directory, if it's not already in
|
||||
/// the content store, reads the file, stores it in the DB, and writes the
|
||||
/// CRDT state. After importing, renames the stage directories to
|
||||
/// `.huskies/work_archived/` so they are no longer used.
|
||||
pub fn import_from_filesystem(project_root: &Path) {
|
||||
let work_dir = project_root.join(".huskies").join("work");
|
||||
if !work_dir.exists() {
|
||||
return;
|
||||
}
|
||||
|
||||
let stages = [
|
||||
"1_backlog",
|
||||
"2_current",
|
||||
"3_qa",
|
||||
"4_merge",
|
||||
"5_done",
|
||||
"6_archived",
|
||||
];
|
||||
|
||||
let mut imported = 0u32;
|
||||
for stage in &stages {
|
||||
let stage_dir = work_dir.join(stage);
|
||||
if !stage_dir.exists() {
|
||||
continue;
|
||||
}
|
||||
let entries = match std::fs::read_dir(&stage_dir) {
|
||||
Ok(e) => e,
|
||||
Err(_) => continue,
|
||||
};
|
||||
for entry in entries.flatten() {
|
||||
let path = entry.path();
|
||||
if path.extension().and_then(|ext| ext.to_str()) != Some("md") {
|
||||
continue;
|
||||
}
|
||||
let story_id = match path.file_stem().and_then(|s| s.to_str()) {
|
||||
Some(s) => s.to_string(),
|
||||
None => continue,
|
||||
};
|
||||
|
||||
// Skip if already in the content store.
|
||||
if read_content(&story_id).is_some() {
|
||||
continue;
|
||||
}
|
||||
|
||||
let content = match std::fs::read_to_string(&path) {
|
||||
Ok(c) => c,
|
||||
Err(_) => continue,
|
||||
};
|
||||
|
||||
write_item_with_content(&story_id, stage, &content);
|
||||
imported += 1;
|
||||
}
|
||||
}
|
||||
|
||||
if imported > 0 {
|
||||
slog!("[db] Imported {imported} stories from filesystem into database");
|
||||
}
|
||||
|
||||
// Note: .huskies/work/ directories are kept in place during the migration
|
||||
// period to provide filesystem fallback for any code paths not yet fully
|
||||
// migrated to the DB content store. A future story will archive them once
|
||||
// all consumers are converted.
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
@@ -175,8 +442,8 @@ mod tests {
|
||||
let now = chrono::Utc::now().to_rfc3339();
|
||||
sqlx::query(
|
||||
"INSERT INTO pipeline_items \
|
||||
(id, name, stage, agent, retry_count, blocked, depends_on, created_at, updated_at) \
|
||||
VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?8) \
|
||||
(id, name, stage, agent, retry_count, blocked, depends_on, content, created_at, updated_at) \
|
||||
VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?9) \
|
||||
ON CONFLICT(id) DO UPDATE SET \
|
||||
name = excluded.name, \
|
||||
stage = excluded.stage, \
|
||||
@@ -184,6 +451,7 @@ mod tests {
|
||||
retry_count = excluded.retry_count, \
|
||||
blocked = excluded.blocked, \
|
||||
depends_on = excluded.depends_on, \
|
||||
content = COALESCE(excluded.content, pipeline_items.content), \
|
||||
updated_at = excluded.updated_at",
|
||||
)
|
||||
.bind("10_story_shadow_test")
|
||||
@@ -193,6 +461,7 @@ mod tests {
|
||||
.bind(2_i64)
|
||||
.bind(0_i64)
|
||||
.bind(Option::<String>::None)
|
||||
.bind("---\nname: Shadow Test\n---\n# Story\n")
|
||||
.bind(&now)
|
||||
.execute(&pool)
|
||||
.await
|
||||
@@ -232,7 +501,7 @@ mod tests {
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn pipeline_items_table_has_correct_columns() {
|
||||
async fn pipeline_items_table_has_content_column() {
|
||||
let tmp = tempfile::tempdir().unwrap();
|
||||
let db_path = tmp.path().join("pipeline.db");
|
||||
let options = SqliteConnectOptions::new()
|
||||
@@ -244,12 +513,13 @@ mod tests {
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Verify all required columns exist by inserting a full row.
|
||||
// Verify content column exists by inserting a full row.
|
||||
let now = chrono::Utc::now().to_rfc3339();
|
||||
let content = "---\nname: Test\n---\n# Story\n";
|
||||
sqlx::query(
|
||||
"INSERT INTO pipeline_items \
|
||||
(id, name, stage, agent, retry_count, blocked, depends_on, created_at, updated_at) \
|
||||
VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?8)",
|
||||
(id, name, stage, agent, retry_count, blocked, depends_on, content, created_at, updated_at) \
|
||||
VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?9)",
|
||||
)
|
||||
.bind("99_story_col_test")
|
||||
.bind(Option::<String>::None)
|
||||
@@ -258,16 +528,20 @@ mod tests {
|
||||
.bind(Option::<i64>::None)
|
||||
.bind(Option::<i64>::None)
|
||||
.bind(Option::<String>::None)
|
||||
.bind(content)
|
||||
.bind(&now)
|
||||
.execute(&pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let count: (i64,) = sqlx::query_as("SELECT COUNT(*) FROM pipeline_items")
|
||||
.fetch_one(&pool)
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(count.0, 1);
|
||||
let row: (Option<String>,) = sqlx::query_as(
|
||||
"SELECT content FROM pipeline_items WHERE id = ?1",
|
||||
)
|
||||
.bind("99_story_col_test")
|
||||
.fetch_one(&pool)
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(row.0.as_deref(), Some(content));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
@@ -288,8 +562,8 @@ mod tests {
|
||||
// Insert initial row in backlog.
|
||||
sqlx::query(
|
||||
"INSERT INTO pipeline_items \
|
||||
(id, name, stage, agent, retry_count, blocked, depends_on, created_at, updated_at) \
|
||||
VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?8)",
|
||||
(id, name, stage, agent, retry_count, blocked, depends_on, content, created_at, updated_at) \
|
||||
VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?9)",
|
||||
)
|
||||
.bind("5_story_move")
|
||||
.bind("Move Me")
|
||||
@@ -298,6 +572,7 @@ mod tests {
|
||||
.bind(Option::<i64>::None)
|
||||
.bind(Option::<i64>::None)
|
||||
.bind(Option::<String>::None)
|
||||
.bind("---\nname: Move Me\n---\n")
|
||||
.bind(&now)
|
||||
.execute(&pool)
|
||||
.await
|
||||
@@ -306,8 +581,8 @@ mod tests {
|
||||
// Upsert with new stage (simulating move to current).
|
||||
sqlx::query(
|
||||
"INSERT INTO pipeline_items \
|
||||
(id, name, stage, agent, retry_count, blocked, depends_on, created_at, updated_at) \
|
||||
VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?8) \
|
||||
(id, name, stage, agent, retry_count, blocked, depends_on, content, created_at, updated_at) \
|
||||
VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?9) \
|
||||
ON CONFLICT(id) DO UPDATE SET \
|
||||
name = excluded.name, \
|
||||
stage = excluded.stage, \
|
||||
@@ -315,6 +590,7 @@ mod tests {
|
||||
retry_count = excluded.retry_count, \
|
||||
blocked = excluded.blocked, \
|
||||
depends_on = excluded.depends_on, \
|
||||
content = COALESCE(excluded.content, pipeline_items.content), \
|
||||
updated_at = excluded.updated_at",
|
||||
)
|
||||
.bind("5_story_move")
|
||||
@@ -324,6 +600,7 @@ mod tests {
|
||||
.bind(Option::<i64>::None)
|
||||
.bind(Option::<i64>::None)
|
||||
.bind(Option::<String>::None)
|
||||
.bind(Option::<String>::None) // content NULL → COALESCE preserves existing
|
||||
.bind(&now)
|
||||
.execute(&pool)
|
||||
.await
|
||||
@@ -338,4 +615,56 @@ mod tests {
|
||||
|
||||
assert_eq!(row.0, "2_current");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn content_store_read_write_delete() {
|
||||
ensure_content_store();
|
||||
|
||||
let story_id = "100_story_content_test";
|
||||
let markdown = "---\nname: Content Test\n---\n# Story\n";
|
||||
|
||||
// Write.
|
||||
write_content(story_id, markdown);
|
||||
assert_eq!(read_content(story_id).as_deref(), Some(markdown));
|
||||
|
||||
// Overwrite.
|
||||
let updated = "---\nname: Updated\n---\n# Updated Story\n";
|
||||
write_content(story_id, updated);
|
||||
assert_eq!(read_content(story_id).as_deref(), Some(updated));
|
||||
|
||||
// Delete.
|
||||
delete_content(story_id);
|
||||
assert!(read_content(story_id).is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn next_item_number_returns_1_when_empty() {
|
||||
// When no items exist, should return 1.
|
||||
// Note: in test context the global CRDT/content store may or may not
|
||||
// be initialised, so the function falls back gracefully.
|
||||
let n = next_item_number();
|
||||
assert!(n >= 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn import_from_filesystem_imports_stories() {
|
||||
ensure_content_store();
|
||||
|
||||
let tmp = tempfile::tempdir().unwrap();
|
||||
let root = tmp.path();
|
||||
let backlog = root.join(".huskies/work/1_backlog");
|
||||
let current = root.join(".huskies/work/2_current");
|
||||
fs::create_dir_all(&backlog).unwrap();
|
||||
fs::create_dir_all(¤t).unwrap();
|
||||
|
||||
let content1 = "---\nname: Story One\n---\n# Story 1\n";
|
||||
let content2 = "---\nname: Story Two\n---\n# Story 2\n";
|
||||
fs::write(backlog.join("10_story_one.md"), content1).unwrap();
|
||||
fs::write(current.join("20_story_two.md"), content2).unwrap();
|
||||
|
||||
import_from_filesystem(root);
|
||||
|
||||
assert_eq!(read_content("10_story_one").as_deref(), Some(content1));
|
||||
assert_eq!(read_content("20_story_two").as_deref(), Some(content2));
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user