use anyhow::{bail, Context, Result};
use chrono::Utc;
use std::path::Path;
use crate::knowledge::edit::{append_to_section_content, extract_body, replace_section_content};
use crate::knowledge::{
parse_frontmatter, serialize_frontmatter, KnowledgeManager, PageFrontmatter, Source,
SyncOutcome,
};
use crate::utils::truncate;
use std::fmt::Write as _;
fn current_agent_id(crosslink_dir: &Path) -> String {
crate::identity::AgentConfig::load(crosslink_dir)
.ok()
.flatten()
.map_or_else(|| "unknown".to_string(), |a| a.agent_id)
}
fn ensure_initialized(km: &KnowledgeManager) -> Result<()> {
if !km.is_initialized() {
km.init_cache()?;
}
Ok(())
}
fn warn_resolved_conflicts(outcome: &SyncOutcome) {
for slug in &outcome.resolved_conflicts {
eprintln!(
"Warning: Merge conflict in {slug}.md — both versions kept. \
A cleanup issue should be created."
);
}
}
pub fn add(
crosslink_dir: &Path,
slug: &str,
title: Option<&str>,
tags: &[String],
sources: &[String],
content: Option<&str>,
from_doc: Option<&std::path::Path>,
) -> Result<()> {
let km = KnowledgeManager::new(crosslink_dir)?;
ensure_initialized(&km)?;
let sync_outcome = km.sync()?;
warn_resolved_conflicts(&sync_outcome);
if km.page_exists(slug) {
bail!("Page '{slug}' already exists. Use 'crosslink knowledge edit' instead.");
}
let design_doc = if let Some(path) = from_doc {
let doc_content = std::fs::read_to_string(path)
.with_context(|| format!("Failed to read design doc: {}", path.display()))?;
Some(crate::commands::design_doc::parse_design_doc(&doc_content))
} else {
None
};
let now = Utc::now().format("%Y-%m-%d").to_string();
let display_title = title.map_or_else(
|| {
design_doc.as_ref().map_or_else(
|| slug.to_string(),
|doc| {
if doc.title.is_empty() {
slug.to_string()
} else {
doc.title.clone()
}
},
)
},
std::string::ToString::to_string,
);
let agent_id = current_agent_id(crosslink_dir);
let parsed_sources: Vec<Source> = sources
.iter()
.map(|url| Source {
url: url.clone(),
title: String::new(),
accessed_at: Some(now.clone()),
})
.collect();
let mut all_tags = tags.to_vec();
if design_doc.is_some() && !all_tags.iter().any(|t| t == "design-doc") {
all_tags.push("design-doc".to_string());
}
let fm = PageFrontmatter {
title: display_title.clone(),
tags: all_tags,
sources: parsed_sources,
contributors: vec![agent_id],
created: now.clone(),
updated: now,
};
let mut page_content = serialize_frontmatter(&fm);
page_content.push('\n');
if let Some(body) = content {
page_content.push_str(body);
if !body.ends_with('\n') {
page_content.push('\n');
}
} else if let Some(ref doc) = design_doc {
let section = crate::commands::design_doc::build_design_doc_section(doc);
page_content.push_str(§ion);
} else {
writeln!(page_content, "# {display_title}")?;
}
km.write_page(slug, &page_content)?;
km.commit(&format!("knowledge: add {slug}"))?;
let push_outcome = km.push()?;
warn_resolved_conflicts(&push_outcome);
println!("Created knowledge page: {slug}");
Ok(())
}
pub fn show(crosslink_dir: &Path, slug: &str, json: bool) -> Result<()> {
let km = KnowledgeManager::new(crosslink_dir)?;
ensure_initialized(&km)?;
let sync_outcome = km.sync()?;
warn_resolved_conflicts(&sync_outcome);
let content = km.read_page(slug)?;
if json {
if let Some(fm) = parse_frontmatter(&content) {
let json_obj = serde_json::json!({
"slug": slug,
"title": fm.title,
"tags": fm.tags,
"sources": fm.sources.iter().map(|s| {
let mut m = serde_json::Map::new();
m.insert("url".to_string(), serde_json::Value::String(s.url.clone()));
m.insert("title".to_string(), serde_json::Value::String(s.title.clone()));
if let Some(ref a) = s.accessed_at {
m.insert("accessed_at".to_string(), serde_json::Value::String(a.clone()));
}
serde_json::Value::Object(m)
}).collect::<Vec<_>>(),
"contributors": fm.contributors,
"created": fm.created,
"updated": fm.updated,
});
println!("{}", serde_json::to_string_pretty(&json_obj)?);
} else {
bail!("Page '{slug}' has no valid frontmatter");
}
} else {
print!("{content}");
}
Ok(())
}
pub fn list(
crosslink_dir: &Path,
tag_filter: Option<&str>,
contributor_filter: Option<&str>,
since: Option<&str>,
json: bool,
) -> Result<()> {
let km = KnowledgeManager::new(crosslink_dir)?;
ensure_initialized(&km)?;
let sync_outcome = km.sync()?;
warn_resolved_conflicts(&sync_outcome);
let pages = km.list_pages()?;
let filtered: Vec<_> = pages
.iter()
.filter(|p| {
if let Some(tag) = tag_filter {
if !p.frontmatter.tags.iter().any(|t| t == tag) {
return false;
}
}
if let Some(contributor) = contributor_filter {
if !p.frontmatter.contributors.iter().any(|c| c == contributor) {
return false;
}
}
if let Some(since) = since {
if p.frontmatter.updated.as_str() < since {
return false;
}
}
true
})
.collect();
if json {
print_list_json(&filtered);
return Ok(());
}
if filtered.is_empty() {
println!("No knowledge pages found.");
return Ok(());
}
println!("{:<30} {:<30} {:<20} UPDATED", "SLUG", "TITLE", "TAGS");
println!("{}", "-".repeat(90));
for page in &filtered {
let tags_str = if page.frontmatter.tags.is_empty() {
String::new()
} else {
page.frontmatter.tags.join(", ")
};
let updated = &page.frontmatter.updated;
println!(
"{:<30} {:<30} {:<20} {updated}",
truncate(&page.slug, 28),
truncate(&page.frontmatter.title, 28),
truncate(&tags_str, 18),
);
}
println!("\n{} page(s)", filtered.len());
Ok(())
}
#[allow(clippy::too_many_arguments)]
pub fn edit(
crosslink_dir: &Path,
slug: &str,
append: Option<&str>,
content: Option<&str>,
replace_section: Option<&str>,
append_to_section: Option<&str>,
tags: &[String],
sources: &[String],
) -> Result<()> {
if (replace_section.is_some() || append_to_section.is_some()) && content.is_none() {
bail!("--replace-section and --append-to-section require --content to be specified");
}
let km = KnowledgeManager::new(crosslink_dir)?;
ensure_initialized(&km)?;
let sync_outcome = km.sync()?;
warn_resolved_conflicts(&sync_outcome);
if !km.page_exists(slug) {
bail!("Page '{slug}' not found. Use 'crosslink knowledge add' to create it.");
}
let existing = km.read_page(slug)?;
let now = Utc::now().format("%Y-%m-%d").to_string();
let agent_id = current_agent_id(crosslink_dir);
let mut fm = parse_frontmatter(&existing).unwrap_or_else(|| PageFrontmatter {
title: slug.to_string(),
tags: Vec::new(),
sources: Vec::new(),
contributors: Vec::new(),
created: now.clone(),
updated: now.clone(),
});
fm.updated.clone_from(&now);
if !fm.contributors.iter().any(|c| c == &agent_id) {
fm.contributors.push(agent_id);
}
for tag in tags {
if !fm.tags.iter().any(|t| t == tag) {
fm.tags.push(tag.clone());
}
}
for url in sources {
if !fm.sources.iter().any(|s| s.url == *url) {
fm.sources.push(Source {
url: url.clone(),
title: String::new(),
accessed_at: Some(now.clone()),
});
}
}
let existing_body = extract_body(&existing);
let new_body = if let Some(heading) = replace_section {
let new_content =
content.ok_or_else(|| anyhow::anyhow!("--replace-section requires --content"))?;
replace_section_content(existing_body, heading, new_content)?
} else if let Some(heading) = append_to_section {
let new_content =
content.ok_or_else(|| anyhow::anyhow!("--append-to-section requires --content"))?;
append_to_section_content(existing_body, heading, new_content)?
} else if let Some(full_content) = content {
let mut body = full_content.to_string();
if !body.ends_with('\n') {
body.push('\n');
}
body
} else if let Some(append_text) = append {
let mut body = existing_body.to_string();
if !body.ends_with('\n') {
body.push('\n');
}
body.push('\n');
body.push_str(append_text);
if !body.ends_with('\n') {
body.push('\n');
}
body
} else {
existing_body.to_string()
};
let mut page_content = serialize_frontmatter(&fm);
page_content.push('\n');
page_content.push_str(&new_body);
km.write_page(slug, &page_content)?;
km.commit(&format!("knowledge: edit {slug}"))?;
let push_outcome = km.push()?;
warn_resolved_conflicts(&push_outcome);
println!("Updated knowledge page: {slug}");
Ok(())
}
pub fn remove(crosslink_dir: &Path, slug: &str) -> Result<()> {
let km = KnowledgeManager::new(crosslink_dir)?;
ensure_initialized(&km)?;
let sync_outcome = km.sync()?;
warn_resolved_conflicts(&sync_outcome);
if !km.page_exists(slug) {
bail!("Page '{slug}' not found");
}
let pages = km.list_pages()?;
let referencing: Vec<_> = pages
.iter()
.filter(|p| p.slug != slug)
.filter(|p| {
km.read_page(&p.slug)
.is_ok_and(|content| content.contains(slug))
})
.collect();
if !referencing.is_empty() {
let slugs: Vec<_> = referencing.iter().map(|p| p.slug.as_str()).collect();
eprintln!(
"Warning: the following pages reference '{}': {}",
slug,
slugs.join(", ")
);
}
km.delete_page(slug)?;
km.commit(&format!("knowledge: remove {slug}"))?;
let push_outcome = km.push()?;
warn_resolved_conflicts(&push_outcome);
println!("Removed knowledge page: {slug}");
Ok(())
}
pub fn sync(crosslink_dir: &Path) -> Result<()> {
let km = KnowledgeManager::new(crosslink_dir)?;
ensure_initialized(&km)?;
let sync_outcome = km.sync()?;
warn_resolved_conflicts(&sync_outcome);
println!("Knowledge cache synced.");
Ok(())
}
pub fn import(
crosslink_dir: &Path,
directory: &Path,
extra_tags: &[String],
overwrite: bool,
dry_run: bool,
) -> Result<()> {
if !directory.is_dir() {
bail!("'{}' is not a directory", directory.display());
}
let km = KnowledgeManager::new(crosslink_dir)?;
ensure_initialized(&km)?;
let sync_outcome = km.sync()?;
warn_resolved_conflicts(&sync_outcome);
let files = collect_md_files(directory)?;
if files.is_empty() {
println!("No .md files found in '{}'.", directory.display());
return Ok(());
}
let agent_id = current_agent_id(crosslink_dir);
let now = Utc::now().format("%Y-%m-%d").to_string();
let mut imported = 0u32;
let mut skipped = 0u32;
let mut errors = 0u32;
for file_path in &files {
let rel = file_path
.strip_prefix(directory)
.unwrap_or(file_path.as_path());
let slug = infer_slug(rel);
let path_tags = infer_tags_from_path(rel);
if km.page_exists(&slug) && !overwrite {
if dry_run {
println!("[skip] {slug} (exists)");
}
skipped += 1;
continue;
}
if dry_run {
let action = if km.page_exists(&slug) {
"overwrite"
} else {
"import"
};
println!("[{}] {} <- {}", action, slug, rel.display());
imported += 1;
continue;
}
match import_single_file(
&km, file_path, &slug, &path_tags, extra_tags, &agent_id, &now,
) {
Ok(()) => imported += 1,
Err(e) => {
eprintln!("Error importing {}: {}", rel.display(), e);
errors += 1;
}
}
}
if !dry_run && imported > 0 {
km.commit(&format!("knowledge: import {imported} page(s)"))?;
let push_outcome = km.push()?;
warn_resolved_conflicts(&push_outcome);
}
println!("Imported: {imported} | Skipped: {skipped} | Errors: {errors}");
Ok(())
}
fn collect_md_files(dir: &Path) -> Result<Vec<std::path::PathBuf>> {
let mut files = Vec::new();
collect_md_files_recursive(dir, &mut files)?;
files.sort();
Ok(files)
}
fn collect_md_files_recursive(dir: &Path, files: &mut Vec<std::path::PathBuf>) -> Result<()> {
for entry in std::fs::read_dir(dir).with_context(|| format!("reading {}", dir.display()))? {
let entry = entry?;
let path = entry.path();
if path.is_dir() {
collect_md_files_recursive(&path, files)?;
} else if path.extension().is_some_and(|e| e == "md") {
files.push(path);
}
}
Ok(())
}
fn infer_slug(rel_path: &Path) -> String {
let stem = rel_path
.file_stem()
.unwrap_or_default()
.to_string_lossy()
.to_string();
let parent = rel_path.parent().unwrap_or_else(|| Path::new(""));
if parent == Path::new("") || parent == Path::new(".") {
slug_sanitize(&stem)
} else {
let prefix = parent
.components()
.map(|c| c.as_os_str().to_string_lossy().to_string())
.collect::<Vec<_>>()
.join("-");
slug_sanitize(&format!("{prefix}-{stem}"))
}
}
fn infer_tags_from_path(rel_path: &Path) -> Vec<String> {
let parent = rel_path.parent().unwrap_or_else(|| Path::new(""));
parent
.components()
.filter_map(|c| {
let s = c.as_os_str().to_string_lossy().to_string();
if s == "." {
None
} else {
Some(s)
}
})
.collect()
}
fn slug_sanitize(s: &str) -> String {
s.to_lowercase()
.chars()
.map(|c| {
if c.is_alphanumeric() || c == '-' {
c
} else {
'-'
}
})
.collect::<String>()
.trim_matches('-')
.to_string()
}
fn import_single_file(
km: &KnowledgeManager,
file_path: &Path,
slug: &str,
path_tags: &[String],
extra_tags: &[String],
agent_id: &str,
now: &str,
) -> Result<()> {
let raw = std::fs::read_to_string(file_path)
.with_context(|| format!("reading {}", file_path.display()))?;
let page_content = if let Some(mut fm) = parse_frontmatter(&raw) {
for tag in path_tags.iter().chain(extra_tags.iter()) {
if !fm.tags.iter().any(|t| t == tag) {
fm.tags.push(tag.clone());
}
}
if !fm.contributors.iter().any(|c| c == agent_id) {
fm.contributors.push(agent_id.to_string());
}
let body = extract_body(&raw);
let mut content = serialize_frontmatter(&fm);
content.push('\n');
content.push_str(body);
content
} else {
let title = slug.replace('-', " ");
let mut all_tags: Vec<String> = path_tags.to_vec();
for tag in extra_tags {
if !all_tags.iter().any(|t| t == tag) {
all_tags.push(tag.clone());
}
}
let fm = PageFrontmatter {
title,
tags: all_tags,
sources: Vec::new(),
contributors: vec![agent_id.to_string()],
created: now.to_string(),
updated: now.to_string(),
};
let mut content = serialize_frontmatter(&fm);
content.push('\n');
content.push_str(&raw);
if !raw.ends_with('\n') {
content.push('\n');
}
content
};
km.write_page(slug, &page_content)?;
Ok(())
}
#[allow(clippy::too_many_arguments)]
pub fn search(
crosslink_dir: &Path,
query: Option<&str>,
context: usize,
source: Option<&str>,
json: bool,
tag: Option<&str>,
since: Option<&str>,
contributor: Option<&str>,
) -> Result<()> {
if query.is_none() && source.is_none() {
bail!("Provide a search query or --source domain");
}
let manager = KnowledgeManager::new(crosslink_dir)?;
if !manager.is_initialized() {
if json {
println!("[]");
} else {
println!("Knowledge cache not initialized. Run 'crosslink knowledge init' or add a page first.");
}
return Ok(());
}
if let Some(domain) = source {
return search_sources(&manager, domain, json);
}
let Some(query) = query else {
bail!("Provide a search query or --source domain");
};
let matches = manager.search_content(query, context)?;
let matches = filter_by_metadata(&manager, matches, tag, since, contributor);
if json {
print_content_json(&matches);
return Ok(());
}
if matches.is_empty() {
println!("No knowledge pages match \"{query}\". Consider adding one.");
return Ok(());
}
for (i, m) in matches.iter().enumerate() {
if i > 0 {
println!();
}
println!("{}.md (line {}):", m.slug, m.line_number);
for (line_num, line) in &m.context_lines {
println!(" {line_num:>4} | {line}");
}
}
Ok(())
}
fn filter_by_metadata(
manager: &KnowledgeManager,
matches: Vec<crate::knowledge::SearchMatch>,
tag: Option<&str>,
since: Option<&str>,
contributor: Option<&str>,
) -> Vec<crate::knowledge::SearchMatch> {
if tag.is_none() && since.is_none() && contributor.is_none() {
return matches;
}
let mut filtered = Vec::new();
for m in matches {
let Ok(content) = manager.read_page(&m.slug) else {
continue;
};
let Some(fm) = parse_frontmatter(&content) else {
continue;
};
if let Some(tag) = tag {
if !fm.tags.iter().any(|t| t == tag) {
continue;
}
}
if let Some(since) = since {
if fm.updated.as_str() < since {
continue;
}
}
if let Some(contributor) = contributor {
if !fm.contributors.iter().any(|c| c == contributor) {
continue;
}
}
filtered.push(m);
}
filtered
}
fn search_sources(manager: &KnowledgeManager, domain: &str, json: bool) -> Result<()> {
let matches = manager.search_sources(domain)?;
if json {
print_sources_json(&matches);
return Ok(());
}
if matches.is_empty() {
println!("No knowledge pages cite \"{domain}\". Consider adding one.");
return Ok(());
}
for page in &matches {
let matching_sources: Vec<&crate::knowledge::Source> = page
.frontmatter
.sources
.iter()
.filter(|src| src.url.to_lowercase().contains(&domain.to_lowercase()))
.collect();
println!("{}.md — {}", page.slug, page.frontmatter.title);
for src in matching_sources {
print!(" {} ({})", src.url, src.title);
if let Some(ref accessed) = src.accessed_at {
print!(" [accessed: {accessed}]");
}
println!();
}
}
Ok(())
}
fn print_content_json(matches: &[crate::knowledge::SearchMatch]) {
let entries: Vec<String> = matches
.iter()
.map(|m| {
let lines: Vec<String> = m
.context_lines
.iter()
.map(|(num, text)| {
format!("{{\"line\":{},\"text\":{}}}", num, serde_json_string(text))
})
.collect();
format!(
"{{\"slug\":{},\"line_number\":{},\"context\":[{}]}}",
serde_json_string(&m.slug),
m.line_number,
lines.join(",")
)
})
.collect();
println!("[{}]", entries.join(","));
}
fn print_sources_json(pages: &[crate::knowledge::PageInfo]) {
let entries: Vec<String> = pages
.iter()
.map(|page| {
let sources: Vec<String> = page
.frontmatter
.sources
.iter()
.map(|src| {
let accessed = src
.accessed_at
.as_ref()
.map_or_else(|| "null".to_string(), |a| serde_json_string(a));
format!(
"{{\"url\":{},\"title\":{},\"accessed_at\":{}}}",
serde_json_string(&src.url),
serde_json_string(&src.title),
accessed
)
})
.collect();
format!(
"{{\"slug\":{},\"title\":{},\"sources\":[{}]}}",
serde_json_string(&page.slug),
serde_json_string(&page.frontmatter.title),
sources.join(",")
)
})
.collect();
println!("[{}]", entries.join(","));
}
fn print_list_json(pages: &[&crate::knowledge::PageInfo]) {
let entries: Vec<String> = pages
.iter()
.map(|page| {
let tags: Vec<String> = page
.frontmatter
.tags
.iter()
.map(|t| serde_json_string(t))
.collect();
let contributors: Vec<String> = page
.frontmatter
.contributors
.iter()
.map(|c| serde_json_string(c))
.collect();
format!(
"{{\"slug\":{},\"title\":{},\"tags\":[{}],\"contributors\":[{}],\"created\":{},\"updated\":{}}}",
serde_json_string(&page.slug),
serde_json_string(&page.frontmatter.title),
tags.join(","),
contributors.join(","),
serde_json_string(&page.frontmatter.created),
serde_json_string(&page.frontmatter.updated),
)
})
.collect();
println!("[{}]", entries.join(","));
}
fn serde_json_string(s: &str) -> String {
let mut out = String::with_capacity(s.len() + 2);
out.push('"');
for c in s.chars() {
match c {
'"' => out.push_str("\\\""),
'\\' => out.push_str("\\\\"),
'\n' => out.push_str("\\n"),
'\r' => out.push_str("\\r"),
'\t' => out.push_str("\\t"),
c if (c as u32) < 0x20 => {
let _ = write!(out, "\\u{:04x}", c as u32);
}
c => out.push(c),
}
}
out.push('"');
out
}
#[cfg(test)]
mod tests {
use super::*;
use crate::knowledge::edit::{find_section_range, parse_heading};
use crate::knowledge::{PageFrontmatter, Source, KNOWLEDGE_CACHE_DIR};
use tempfile::tempdir;
fn setup_km() -> (KnowledgeManager, tempfile::TempDir) {
let dir = tempdir().unwrap();
let crosslink_dir = dir.path().join(".crosslink");
let cache_dir = crosslink_dir.join(KNOWLEDGE_CACHE_DIR);
std::fs::create_dir_all(&cache_dir).unwrap();
let km = KnowledgeManager::new(&crosslink_dir).unwrap();
(km, dir)
}
#[test]
fn test_extract_body_with_frontmatter() {
let content = "---\ntitle: Test\ntags: []\n---\n\n# Test\n\nBody text.\n";
let body = extract_body(content);
assert_eq!(body, "\n# Test\n\nBody text.\n");
}
#[test]
fn test_extract_body_no_frontmatter() {
let content = "# Just a heading\n\nNo frontmatter.\n";
let body = extract_body(content);
assert_eq!(body, content);
}
#[test]
fn test_extract_body_crlf() {
let content = "---\r\ntitle: Test\r\ntags: []\r\n---\r\n\r\n# Test\r\n\r\nBody text.\r\n";
let body = extract_body(content);
assert!(
body.starts_with("\r\n# Test") || body.starts_with("\n# Test"),
"got: {body:?}"
);
assert!(!body.contains("title: Test"));
}
#[test]
fn test_truncate_short() {
assert_eq!(truncate("hello", 10), "hello");
}
#[test]
fn test_truncate_long() {
assert_eq!(truncate("hello world foo bar", 10), "hello w...");
}
#[test]
fn test_add_creates_file_with_correct_frontmatter() {
let (km, dir) = setup_km();
let crosslink_dir = dir.path().join(".crosslink");
let tags = vec!["rust".to_string(), "testing".to_string()];
let sources = ["https://example.com".to_string()];
let now = Utc::now().format("%Y-%m-%d").to_string();
let fm = PageFrontmatter {
title: "Rust Testing Patterns".to_string(),
tags,
sources: sources
.iter()
.map(|url| Source {
url: url.clone(),
title: String::new(),
accessed_at: Some(now.clone()),
})
.collect(),
contributors: vec![current_agent_id(&crosslink_dir)],
created: now.clone(),
updated: now,
};
let mut page_content = serialize_frontmatter(&fm);
page_content.push_str("\n# Rust Testing Patterns\n");
km.write_page("rust-testing-patterns", &page_content)
.unwrap();
let read_back = km.read_page("rust-testing-patterns").unwrap();
let parsed = parse_frontmatter(&read_back).unwrap();
assert_eq!(parsed.title, "Rust Testing Patterns");
assert_eq!(parsed.tags, vec!["rust", "testing"]);
assert_eq!(parsed.sources.len(), 1);
assert_eq!(parsed.sources[0].url, "https://example.com");
}
#[test]
fn test_add_with_content() {
let (km, _dir) = setup_km();
let now = Utc::now().format("%Y-%m-%d").to_string();
let fm = PageFrontmatter {
title: "Test".to_string(),
tags: Vec::new(),
sources: Vec::new(),
contributors: vec!["test-agent".to_string()],
created: now.clone(),
updated: now,
};
let mut page_content = serialize_frontmatter(&fm);
page_content.push_str("\nCustom body content\n");
km.write_page("test-page", &page_content).unwrap();
let read_back = km.read_page("test-page").unwrap();
assert!(read_back.contains("Custom body content"));
}
#[test]
fn test_show_displays_content() {
let (km, _dir) = setup_km();
let content =
"---\ntitle: Demo\ntags: [demo]\nsources: []\ncontributors: []\ncreated: 2026-01-01\nupdated: 2026-01-01\n---\n\n# Demo\n\nSome text.\n";
km.write_page("demo", content).unwrap();
let read = km.read_page("demo").unwrap();
assert_eq!(read, content);
let fm = parse_frontmatter(&read).unwrap();
assert_eq!(fm.title, "Demo");
}
#[test]
fn test_list_filters_by_tag() {
let (km, _dir) = setup_km();
let page_a = "---\ntitle: Alpha\ntags: [rust]\nsources: []\ncontributors: []\ncreated: 2026-01-01\nupdated: 2026-01-01\n---\n\nA\n";
let page_b = "---\ntitle: Beta\ntags: [python]\nsources: []\ncontributors: []\ncreated: 2026-01-01\nupdated: 2026-01-01\n---\n\nB\n";
km.write_page("alpha", page_a).unwrap();
km.write_page("beta", page_b).unwrap();
let pages = km.list_pages().unwrap();
let rust_pages: Vec<_> = pages
.iter()
.filter(|p| p.frontmatter.tags.iter().any(|t| t == "rust"))
.collect();
assert_eq!(rust_pages.len(), 1);
assert_eq!(rust_pages[0].slug, "alpha");
let python_pages: Vec<_> = pages
.iter()
.filter(|p| p.frontmatter.tags.iter().any(|t| t == "python"))
.collect();
assert_eq!(python_pages.len(), 1);
assert_eq!(python_pages[0].slug, "beta");
}
#[test]
fn test_list_filters_by_contributor() {
let (km, _dir) = setup_km();
let page_a = "---\ntitle: Alpha\ntags: []\nsources: []\ncontributors: [alice]\ncreated: 2026-01-01\nupdated: 2026-01-01\n---\n\nA\n";
let page_b = "---\ntitle: Beta\ntags: []\nsources: []\ncontributors: [bob]\ncreated: 2026-01-01\nupdated: 2026-01-01\n---\n\nB\n";
km.write_page("alpha", page_a).unwrap();
km.write_page("beta", page_b).unwrap();
let pages = km.list_pages().unwrap();
let alice_pages: Vec<_> = pages
.iter()
.filter(|p| p.frontmatter.contributors.iter().any(|c| c == "alice"))
.collect();
assert_eq!(alice_pages.len(), 1);
assert_eq!(alice_pages[0].slug, "alpha");
}
#[test]
fn test_edit_appends_content_and_updates_metadata() {
let (km, _dir) = setup_km();
let original = "---\ntitle: Test\ntags: [rust]\nsources: []\ncontributors: [alice]\ncreated: 2026-01-01\nupdated: 2026-01-01\n---\n\n# Test\n\nOriginal content.\n";
km.write_page("test-page", original).unwrap();
let existing = km.read_page("test-page").unwrap();
let mut fm = parse_frontmatter(&existing).unwrap();
let now = Utc::now().format("%Y-%m-%d").to_string();
fm.updated = now;
if !fm.contributors.iter().any(|c| c == "bob") {
fm.contributors.push("bob".to_string());
}
let existing_body = extract_body(&existing);
let mut body = existing_body.to_string();
body.push_str("\n## Appended Section\n\nNew content.\n");
let mut page_content = serialize_frontmatter(&fm);
page_content.push('\n');
page_content.push_str(&body);
km.write_page("test-page", &page_content).unwrap();
let updated = km.read_page("test-page").unwrap();
assert!(updated.contains("Original content."));
assert!(updated.contains("Appended Section"));
assert!(updated.contains("New content."));
let updated_fm = parse_frontmatter(&updated).unwrap();
assert!(updated_fm.contributors.contains(&"alice".to_string()));
assert!(updated_fm.contributors.contains(&"bob".to_string()));
}
#[test]
fn test_edit_adds_source_without_duplicating() {
let (km, _dir) = setup_km();
let original = "---\ntitle: Test\ntags: []\nsources:\n - url: https://existing.com\n title: Existing\ncontributors: []\ncreated: 2026-01-01\nupdated: 2026-01-01\n---\n\nBody.\n";
km.write_page("test-page", original).unwrap();
let existing = km.read_page("test-page").unwrap();
let mut fm = parse_frontmatter(&existing).unwrap();
let existing_url = "https://existing.com";
if !fm.sources.iter().any(|s| s.url == existing_url) {
fm.sources.push(Source {
url: existing_url.to_string(),
title: String::new(),
accessed_at: None,
});
}
assert_eq!(fm.sources.len(), 1);
let new_url = "https://new.com";
if !fm.sources.iter().any(|s| s.url == new_url) {
fm.sources.push(Source {
url: new_url.to_string(),
title: String::new(),
accessed_at: None,
});
}
assert_eq!(fm.sources.len(), 2);
assert_eq!(fm.sources[0].url, "https://existing.com");
assert_eq!(fm.sources[1].url, "https://new.com");
}
#[test]
fn test_remove_deletes_page() {
let (km, _dir) = setup_km();
let content = "---\ntitle: Temp\ntags: []\nsources: []\ncontributors: []\ncreated: 2026-01-01\nupdated: 2026-01-01\n---\n\nTemp page.\n";
km.write_page("temp-page", content).unwrap();
assert!(km.page_exists("temp-page"));
km.delete_page("temp-page").unwrap();
assert!(!km.page_exists("temp-page"));
}
#[test]
fn test_remove_nonexistent_fails() {
let (km, _dir) = setup_km();
let result = km.delete_page("nonexistent");
assert!(result.is_err());
}
#[test]
fn test_remove_warns_about_broken_links() {
let (km, _dir) = setup_km();
let target = "---\ntitle: Target\ntags: []\nsources: []\ncontributors: []\ncreated: 2026-01-01\nupdated: 2026-01-01\n---\n\nTarget page.\n";
let referencing = "---\ntitle: Referencing\ntags: []\nsources: []\ncontributors: []\ncreated: 2026-01-01\nupdated: 2026-01-01\n---\n\nSee target-page for details.\n";
km.write_page("target-page", target).unwrap();
km.write_page("referencing-page", referencing).unwrap();
let pages = km.list_pages().unwrap();
let referencing_pages: Vec<_> = pages
.iter()
.filter(|p| p.slug != "target-page")
.filter(|p| {
km.read_page(&p.slug)
.is_ok_and(|content| content.contains("target-page"))
})
.collect();
assert_eq!(referencing_pages.len(), 1);
assert_eq!(referencing_pages[0].slug, "referencing-page");
}
#[test]
fn test_page_exists() {
let (km, _dir) = setup_km();
assert!(!km.page_exists("nope"));
km.write_page("exists", "content").unwrap();
assert!(km.page_exists("exists"));
}
#[test]
fn test_delete_page() {
let (km, _dir) = setup_km();
km.write_page("to-delete", "content").unwrap();
assert!(km.page_exists("to-delete"));
km.delete_page("to-delete").unwrap();
assert!(!km.page_exists("to-delete"));
}
#[test]
fn test_add_from_doc_creates_page() {
let (km, dir) = setup_km();
let doc_path = dir.path().join("design.md");
std::fs::write(
&doc_path,
"# Feature: Batch Retry\n\n## Summary\n\nRetry logic.\n\n## Requirements\n- REQ-1: Retry\n",
)
.unwrap();
let doc = crate::commands::design_doc::parse_design_doc(
&std::fs::read_to_string(&doc_path).unwrap(),
);
let now = chrono::Utc::now().format("%Y-%m-%d").to_string();
let tags = vec!["design-doc".to_string()];
let fm = PageFrontmatter {
title: doc.title.clone(),
tags,
sources: Vec::new(),
contributors: vec!["test-agent".to_string()],
created: now.clone(),
updated: now,
};
let mut page_content = serialize_frontmatter(&fm);
page_content.push('\n');
page_content.push_str(&crate::commands::design_doc::build_design_doc_section(&doc));
km.write_page("batch-retry", &page_content).unwrap();
let read_back = km.read_page("batch-retry").unwrap();
assert!(read_back.contains("Batch Retry"));
assert!(read_back.contains("Design Specification"));
assert!(read_back.contains("REQ-1: Retry"));
}
#[test]
fn test_add_from_doc_auto_tags() {
let tags: Vec<String> = vec!["existing-tag".to_string()];
let mut all_tags = tags;
if !all_tags.iter().any(|t| t == "design-doc") {
all_tags.push("design-doc".to_string());
}
assert!(all_tags.contains(&"design-doc".to_string()));
assert!(all_tags.contains(&"existing-tag".to_string()));
}
#[test]
fn test_add_from_doc_derives_title() {
let doc = crate::commands::design_doc::parse_design_doc("# Feature: My Great Feature\n");
let title: Option<&str> = None;
let display_title = if let Some(t) = title {
t.to_string()
} else if doc.title.is_empty() {
"fallback-slug".to_string()
} else {
doc.title
};
assert_eq!(display_title, "My Great Feature");
}
#[test]
fn test_add_from_doc_explicit_title_overrides() {
let doc = crate::commands::design_doc::parse_design_doc("# Feature: Doc Title\n");
let title: Option<&str> = Some("Explicit Title");
let display_title = if let Some(t) = title {
t.to_string()
} else if doc.title.is_empty() {
"fallback".to_string()
} else {
doc.title
};
assert_eq!(display_title, "Explicit Title");
}
#[test]
fn test_search_filter_by_tag() {
let (km, _dir) = setup_km();
let page_a = "---\ntitle: Alpha\ntags: [rust]\nsources: []\ncontributors: []\ncreated: 2026-01-01\nupdated: 2026-01-01\n---\n\nshared keyword here\n";
let page_b = "---\ntitle: Beta\ntags: [python]\nsources: []\ncontributors: []\ncreated: 2026-01-01\nupdated: 2026-01-01\n---\n\nshared keyword here\n";
km.write_page("alpha", page_a).unwrap();
km.write_page("beta", page_b).unwrap();
let matches = km.search_content("shared keyword", 0).unwrap();
assert_eq!(matches.len(), 2);
let filtered = filter_by_metadata(&km, matches, Some("rust"), None, None);
assert_eq!(filtered.len(), 1);
assert_eq!(filtered[0].slug, "alpha");
}
#[test]
fn test_search_filter_by_since() {
let (km, _dir) = setup_km();
let page_old = "---\ntitle: Old\ntags: []\nsources: []\ncontributors: []\ncreated: 2025-01-01\nupdated: 2025-06-01\n---\n\ncommon text\n";
let page_new = "---\ntitle: New\ntags: []\nsources: []\ncontributors: []\ncreated: 2026-01-01\nupdated: 2026-02-01\n---\n\ncommon text\n";
km.write_page("old-page", page_old).unwrap();
km.write_page("new-page", page_new).unwrap();
let matches = km.search_content("common text", 0).unwrap();
assert_eq!(matches.len(), 2);
let filtered = filter_by_metadata(&km, matches, None, Some("2026-01-01"), None);
assert_eq!(filtered.len(), 1);
assert_eq!(filtered[0].slug, "new-page");
}
#[test]
fn test_search_filter_by_contributor() {
let (km, _dir) = setup_km();
let page_a = "---\ntitle: A\ntags: []\nsources: []\ncontributors: [alice]\ncreated: 2026-01-01\nupdated: 2026-01-01\n---\n\nfindme\n";
let page_b = "---\ntitle: B\ntags: []\nsources: []\ncontributors: [bob]\ncreated: 2026-01-01\nupdated: 2026-01-01\n---\n\nfindme\n";
km.write_page("a-page", page_a).unwrap();
km.write_page("b-page", page_b).unwrap();
let matches = km.search_content("findme", 0).unwrap();
assert_eq!(matches.len(), 2);
let filtered = filter_by_metadata(&km, matches, None, None, Some("bob"));
assert_eq!(filtered.len(), 1);
assert_eq!(filtered[0].slug, "b-page");
}
#[test]
fn test_list_filter_by_since() {
let (km, _dir) = setup_km();
let page_old = "---\ntitle: Old\ntags: []\nsources: []\ncontributors: []\ncreated: 2025-01-01\nupdated: 2025-06-01\n---\n\nold\n";
let page_new = "---\ntitle: New\ntags: []\nsources: []\ncontributors: []\ncreated: 2026-01-01\nupdated: 2026-03-01\n---\n\nnew\n";
km.write_page("old", page_old).unwrap();
km.write_page("new", page_new).unwrap();
let pages = km.list_pages().unwrap();
let filtered: Vec<_> = pages
.iter()
.filter(|p| p.frontmatter.updated.as_str() >= "2026-01-01")
.collect();
assert_eq!(filtered.len(), 1);
assert_eq!(filtered[0].slug, "new");
}
#[test]
fn test_infer_slug_simple() {
assert_eq!(infer_slug(Path::new("readme.md")), "readme");
assert_eq!(infer_slug(Path::new("my-design.md")), "my-design");
}
#[test]
fn test_infer_slug_with_parent() {
assert_eq!(infer_slug(Path::new("api/design.md")), "api-design");
assert_eq!(
infer_slug(Path::new("arch/api/overview.md")),
"arch-api-overview"
);
}
#[test]
fn test_infer_tags_from_path() {
let tags = infer_tags_from_path(Path::new("arch/api/design.md"));
assert_eq!(tags, vec!["arch", "api"]);
}
#[test]
fn test_infer_tags_root_file() {
let tags = infer_tags_from_path(Path::new("readme.md"));
assert!(tags.is_empty());
}
#[test]
fn test_import_preserves_existing_frontmatter() {
let (km, dir) = setup_km();
let raw = "---\ntitle: Existing Title\ntags: [original]\nsources: []\ncontributors: [alice]\ncreated: 2026-01-01\nupdated: 2026-01-15\n---\n\nBody content.\n";
import_single_file(
&km,
&{
let p = dir.path().join("test.md");
std::fs::write(&p, raw).unwrap();
p
},
"test-import",
&["docs".to_string()],
&["extra".to_string()],
"bot",
"2026-03-01",
)
.unwrap();
let content = km.read_page("test-import").unwrap();
let fm = parse_frontmatter(&content).unwrap();
assert_eq!(fm.title, "Existing Title");
assert!(fm.tags.contains(&"original".to_string()));
assert!(fm.tags.contains(&"docs".to_string()));
assert!(fm.tags.contains(&"extra".to_string()));
assert!(fm.contributors.contains(&"alice".to_string()));
assert!(fm.contributors.contains(&"bot".to_string()));
assert!(content.contains("Body content."));
}
#[test]
fn test_import_generates_frontmatter() {
let (km, dir) = setup_km();
let raw = "# Just a heading\n\nSome body text.\n";
import_single_file(
&km,
&{
let p = dir.path().join("my-doc.md");
std::fs::write(&p, raw).unwrap();
p
},
"my-doc",
&[],
&["imported".to_string()],
"bot",
"2026-03-01",
)
.unwrap();
let content = km.read_page("my-doc").unwrap();
let fm = parse_frontmatter(&content).unwrap();
assert_eq!(fm.title, "my doc");
assert!(fm.tags.contains(&"imported".to_string()));
assert_eq!(fm.contributors, vec!["bot"]);
assert_eq!(fm.created, "2026-03-01");
assert!(content.contains("# Just a heading"));
}
#[test]
fn test_parse_heading_valid() {
assert_eq!(parse_heading("# Title"), Some((1, "Title")));
assert_eq!(parse_heading("## Section"), Some((2, "Section")));
assert_eq!(parse_heading("### Sub"), Some((3, "Sub")));
assert_eq!(parse_heading("###### Deep"), Some((6, "Deep")));
}
#[test]
fn test_parse_heading_invalid() {
assert_eq!(parse_heading("not a heading"), None);
assert_eq!(parse_heading("#no space"), None);
assert_eq!(parse_heading("####### too deep"), None);
assert_eq!(parse_heading(""), None);
}
#[test]
fn test_find_section_range_basic() {
let body = "# Title\n\nIntro text.\n\n## Architecture\n\nArch content.\n\n## Notes\n\nNote content.\n";
let lines: Vec<&str> = body.lines().collect();
let (start, end) = find_section_range(&lines, "## Architecture").unwrap();
assert_eq!(start, 4); assert_eq!(end, 8); }
#[test]
fn test_find_section_range_last_section() {
let body = "# Title\n\nIntro.\n\n## Last Section\n\nLast content.\n";
let lines: Vec<&str> = body.lines().collect();
let (start, end) = find_section_range(&lines, "## Last Section").unwrap();
assert_eq!(start, 4);
assert_eq!(end, lines.len()); }
#[test]
fn test_find_section_range_without_hashes_in_query() {
let body = "# Title\n\n## Architecture\n\nContent.\n";
let lines: Vec<&str> = body.lines().collect();
let (start, _) = find_section_range(&lines, "Architecture").unwrap();
assert_eq!(start, 2);
}
#[test]
fn test_find_section_range_not_found() {
let body = "# Title\n\n## Existing\n\nContent.\n";
let lines: Vec<&str> = body.lines().collect();
let result = find_section_range(&lines, "## Missing");
assert!(result.is_err());
assert!(result.unwrap_err().to_string().contains("not found"));
}
#[test]
fn test_find_section_subsections_included() {
let body = "## Parent\n\nParent content.\n\n### Child\n\nChild content.\n\n## Sibling\n\nSibling.\n";
let lines: Vec<&str> = body.lines().collect();
let (start, end) = find_section_range(&lines, "## Parent").unwrap();
assert_eq!(start, 0);
assert_eq!(lines[end], "## Sibling");
}
#[test]
fn test_replace_section_content() {
let body = "# Title\n\nIntro.\n\n## Architecture\n\nOld arch content.\nMore old content.\n\n## Notes\n\nNote text.\n";
let result = replace_section_content(body, "## Architecture", "New arch content.").unwrap();
assert!(result.contains("# Title"));
assert!(result.contains("Intro."));
assert!(result.contains("## Architecture"));
assert!(result.contains("New arch content."));
assert!(!result.contains("Old arch content."));
assert!(!result.contains("More old content."));
assert!(result.contains("## Notes"));
assert!(result.contains("Note text."));
}
#[test]
fn test_replace_section_last_section() {
let body = "# Title\n\n## Only Section\n\nOld content.\n";
let result = replace_section_content(body, "## Only Section", "Replaced.").unwrap();
assert!(result.contains("## Only Section"));
assert!(result.contains("Replaced."));
assert!(!result.contains("Old content."));
}
#[test]
fn test_replace_section_not_found() {
let body = "# Title\n\n## Existing\n\nContent.\n";
let result = replace_section_content(body, "## Missing", "new");
assert!(result.is_err());
}
#[test]
fn test_append_to_section_content() {
let body = "# Title\n\nIntro.\n\n## Notes\n\nExisting note.\n\n## Other\n\nOther text.\n";
let result = append_to_section_content(body, "## Notes", "Appended note.").unwrap();
assert!(result.contains("Existing note."));
assert!(result.contains("Appended note."));
assert!(result.contains("## Other"));
assert!(result.contains("Other text."));
let notes_pos = result.find("Appended note.").unwrap();
let other_pos = result.find("## Other").unwrap();
assert!(notes_pos < other_pos);
}
#[test]
fn test_append_to_section_last_section() {
let body = "# Title\n\n## Notes\n\nExisting.\n";
let result = append_to_section_content(body, "## Notes", "More text.").unwrap();
assert!(result.contains("Existing."));
assert!(result.contains("More text."));
}
#[test]
fn test_append_to_section_not_found() {
let body = "# Title\n\n## Existing\n\nContent.\n";
let result = append_to_section_content(body, "## Missing", "new");
assert!(result.is_err());
}
#[test]
fn test_replace_section_preserves_subsections_of_siblings() {
let body = "## A\n\nA content.\n\n### A1\n\nA1 content.\n\n## B\n\n### B1\n\nB1 content.\n";
let result = replace_section_content(body, "## A", "New A content.").unwrap();
assert!(result.contains("## A"));
assert!(result.contains("New A content."));
assert!(!result.contains("A1 content.")); assert!(result.contains("## B"));
assert!(result.contains("### B1"));
assert!(result.contains("B1 content."));
}
#[test]
fn test_section_edit_query_without_hash_prefix() {
let body = "# Title\n\n## Architecture\n\nArch content.\n\n## Notes\n\nNote.\n";
let result = replace_section_content(body, "Architecture", "New arch.").unwrap();
assert!(result.contains("New arch."));
assert!(!result.contains("Arch content."));
}
#[test]
fn test_list_json_output() {
let (km, _dir) = setup_km();
let page = "---\ntitle: Test Page\ntags: [rust, cli]\nsources: []\ncontributors: [alice]\ncreated: 2026-01-15\nupdated: 2026-02-20\n---\n\nbody\n";
km.write_page("test-page", page).unwrap();
let pages = km.list_pages().unwrap();
let refs: Vec<&crate::knowledge::PageInfo> = pages.iter().collect();
let entries: Vec<String> = refs
.iter()
.map(|p| {
format!(
"{{\"slug\":{},\"title\":{},\"tags\":[{}],\"contributors\":[{}],\"created\":{},\"updated\":{}}}",
serde_json_string(&p.slug),
serde_json_string(&p.frontmatter.title),
p.frontmatter.tags.iter().map(|t| serde_json_string(t)).collect::<Vec<_>>().join(","),
p.frontmatter.contributors.iter().map(|c| serde_json_string(c)).collect::<Vec<_>>().join(","),
serde_json_string(&p.frontmatter.created),
serde_json_string(&p.frontmatter.updated),
)
})
.collect();
let json_str = format!("[{}]", entries.join(","));
let parsed: serde_json::Value = serde_json::from_str(&json_str).unwrap();
let arr = parsed.as_array().unwrap();
assert_eq!(arr.len(), 1);
assert_eq!(arr[0]["slug"], "test-page");
assert_eq!(arr[0]["title"], "Test Page");
assert_eq!(arr[0]["tags"], serde_json::json!(["rust", "cli"]));
assert_eq!(arr[0]["contributors"], serde_json::json!(["alice"]));
assert_eq!(arr[0]["created"], "2026-01-15");
assert_eq!(arr[0]["updated"], "2026-02-20");
}
}