use crate::issue_file::{
read_counters, read_issue_file, write_counters, write_issue_file, IssueFile,
};
use crate::models::{IssueStatus, Priority};
use crate::shared_writer::core::{
PushOutcome, SharedWriter, LOCK_CONFIRM_TIMEOUT_SECS, MAX_RETRIES,
};
use crate::shared_writer::locks::LockClaimResult;
use crate::shared_writer::mutations::DescriptionUpdate;
use crate::shared_writer::offline::{replace_local_refs, RewriteStats};
use anyhow::{bail, Result};
use chrono::Utc;
use std::path::Path;
use tempfile::tempdir;
use uuid::Uuid;
fn make_issue(display_id: i64, title: &str) -> IssueFile {
IssueFile {
uuid: Uuid::new_v4(),
display_id: Some(display_id),
title: title.to_string(),
description: None,
status: IssueStatus::Open,
priority: Priority::Medium,
parent_uuid: None,
created_by: "test-agent".to_string(),
created_at: Utc::now(),
updated_at: Utc::now(),
closed_at: None,
labels: vec![],
comments: vec![],
blockers: vec![],
related: vec![],
milestone_uuid: None,
time_entries: vec![],
}
}
#[test]
fn test_new_returns_none_without_agent_config() {
let dir = tempdir().unwrap();
let crosslink_dir = dir.path().join(".crosslink");
std::fs::create_dir_all(&crosslink_dir).unwrap();
let writer = SharedWriter::new(&crosslink_dir).unwrap();
assert!(writer.is_none());
}
#[test]
fn test_claim_display_id() {
let dir = tempdir().unwrap();
let meta_dir = dir.path().join("meta");
std::fs::create_dir_all(&meta_dir).unwrap();
let counters_path = meta_dir.join("counters.json");
let counters = read_counters(&counters_path).unwrap();
assert_eq!(counters.next_display_id, 1);
let first = counters.next_display_id;
let mut updated = counters;
updated.next_display_id += 1;
write_counters(&counters_path, &updated).unwrap();
assert_eq!(first, 1);
let counters = read_counters(&counters_path).unwrap();
assert_eq!(counters.next_display_id, 2);
}
#[test]
fn test_load_issue_by_display_id() {
let dir = tempdir().unwrap();
let issues_dir = dir.path().join("issues");
std::fs::create_dir_all(&issues_dir).unwrap();
let issue1 = make_issue(1, "First");
let issue2 = make_issue(2, "Second");
write_issue_file(&issues_dir.join(format!("{}.json", issue1.uuid)), &issue1).unwrap();
write_issue_file(&issues_dir.join(format!("{}.json", issue2.uuid)), &issue2).unwrap();
let found = scan_for_display_id(&issues_dir, 2).unwrap();
assert_eq!(found.title, "Second");
assert_eq!(found.uuid, issue2.uuid);
}
#[test]
fn test_load_issue_by_display_id_not_found() {
let dir = tempdir().unwrap();
let issues_dir = dir.path().join("issues");
std::fs::create_dir_all(&issues_dir).unwrap();
let result = scan_for_display_id(&issues_dir, 99);
assert!(result.is_err());
}
#[test]
fn test_resolve_uuid_from_files() {
let dir = tempdir().unwrap();
let issues_dir = dir.path().join("issues");
std::fs::create_dir_all(&issues_dir).unwrap();
let issue = make_issue(42, "Target");
write_issue_file(&issues_dir.join(format!("{}.json", issue.uuid)), &issue).unwrap();
let found = scan_for_display_id(&issues_dir, 42).unwrap();
assert_eq!(found.uuid, issue.uuid);
}
#[test]
fn test_counters_sequential_claim() {
let dir = tempdir().unwrap();
let meta_dir = dir.path().join("meta");
std::fs::create_dir_all(&meta_dir).unwrap();
let path = meta_dir.join("counters.json");
let mut counters = read_counters(&path).unwrap();
let ids: Vec<i64> = (0..3)
.map(|_| {
let id = counters.next_display_id;
counters.next_display_id += 1;
id
})
.collect();
write_counters(&path, &counters).unwrap();
assert_eq!(ids, vec![1, 2, 3]);
let reloaded = read_counters(&path).unwrap();
assert_eq!(reloaded.next_display_id, 4);
}
#[test]
fn test_replace_local_refs_basic() {
let replacements = vec![
("L1".to_string(), "#5".to_string()),
("L2".to_string(), "#6".to_string()),
];
let result = replace_local_refs("See L1 and L2 for details", &replacements);
assert_eq!(result, Some("See #5 and #6 for details".to_string()));
}
#[test]
fn test_replace_local_refs_no_match() {
let replacements = vec![("L1".to_string(), "#5".to_string())];
let result = replace_local_refs("No local refs here", &replacements);
assert!(result.is_none());
}
#[test]
fn test_replace_local_refs_non_matching_id() {
let replacements = vec![("L1".to_string(), "#5".to_string())];
let result = replace_local_refs("See L99 for info", &replacements);
assert!(result.is_none());
}
#[test]
fn test_replace_local_refs_word_boundary() {
let replacements = vec![("L1".to_string(), "#5".to_string())];
let result = replace_local_refs("Check FILE1 now", &replacements);
assert!(result.is_none());
let result = replace_local_refs("Fixed L1.", &replacements);
assert_eq!(result, Some("Fixed #5.".to_string()));
let result = replace_local_refs(
"L1, L2 are done",
&[
("L1".to_string(), "#5".to_string()),
("L2".to_string(), "#6".to_string()),
],
);
assert_eq!(result, Some("#5, #6 are done".to_string()));
}
#[test]
fn test_replace_local_refs_start_end() {
let replacements = vec![("L1".to_string(), "#5".to_string())];
let result = replace_local_refs("L1 is done", &replacements);
assert_eq!(result, Some("#5 is done".to_string()));
let result = replace_local_refs("Working on L1", &replacements);
assert_eq!(result, Some("Working on #5".to_string()));
let result = replace_local_refs("L1", &replacements);
assert_eq!(result, Some("#5".to_string()));
}
fn scan_for_display_id(issues_dir: &Path, display_id: i64) -> Result<IssueFile> {
for entry in std::fs::read_dir(issues_dir)? {
let entry = entry?;
let path = entry.path();
if path.extension().and_then(|e| e.to_str()) != Some("json") {
continue;
}
if let Ok(issue) = read_issue_file(&path) {
if issue.display_id == Some(display_id) {
return Ok(issue);
}
}
}
bail!("Issue #{display_id} not found")
}
#[test]
fn test_v1_issue_path_format() {
let uuid = Uuid::parse_str("a1b2c3d4-e5f6-7890-abcd-ef1234567890").unwrap();
let path = format!("issues/{uuid}.json");
assert_eq!(path, "issues/a1b2c3d4-e5f6-7890-abcd-ef1234567890.json");
}
#[test]
fn test_v2_issue_path_format() {
let uuid = Uuid::parse_str("a1b2c3d4-e5f6-7890-abcd-ef1234567890").unwrap();
let path = format!("issues/{uuid}/issue.json");
assert_eq!(
path,
"issues/a1b2c3d4-e5f6-7890-abcd-ef1234567890/issue.json"
);
}
#[test]
fn test_v2_comment_path_format() {
let issue_uuid = Uuid::parse_str("a1b2c3d4-e5f6-7890-abcd-ef1234567890").unwrap();
let comment_uuid = Uuid::parse_str("11111111-2222-3333-4444-555555555555").unwrap();
let path = format!("issues/{issue_uuid}/comments/{comment_uuid}.json");
assert_eq!(
path,
"issues/a1b2c3d4-e5f6-7890-abcd-ef1234567890/comments/11111111-2222-3333-4444-555555555555.json"
);
}
#[test]
fn test_v2_scan_finds_issue_in_subdirectory() {
let dir = tempdir().unwrap();
let issues_dir = dir.path().join("issues");
let issue = make_issue(7, "V2 Issue");
let issue_subdir = issues_dir.join(issue.uuid.to_string());
std::fs::create_dir_all(issue_subdir.join("comments")).unwrap();
write_issue_file(&issue_subdir.join("issue.json"), &issue).unwrap();
let mut found = false;
for entry in std::fs::read_dir(&issues_dir).unwrap() {
let entry = entry.unwrap();
let path = entry.path();
if path.is_dir() {
let issue_file = path.join("issue.json");
if issue_file.exists() {
if let Ok(loaded) = read_issue_file(&issue_file) {
if loaded.display_id == Some(7) {
assert_eq!(loaded.title, "V2 Issue");
found = true;
}
}
}
}
}
assert!(found, "v2 issue not found in subdirectory scan");
}
#[test]
fn test_v2_comment_file_construction() {
use crate::issue_file::CommentFile;
let issue_uuid = Uuid::parse_str("a1b2c3d4-e5f6-7890-abcd-ef1234567890").unwrap();
let comment_uuid = Uuid::new_v4();
let comment = CommentFile {
uuid: comment_uuid,
issue_uuid,
author: "test-agent".to_string(),
content: "A standalone comment".to_string(),
created_at: Utc::now(),
kind: "note".to_string(),
trigger_type: None,
intervention_context: None,
driver_key_fingerprint: None,
signed_by: None,
signature: None,
};
let json = serde_json::to_string_pretty(&comment).unwrap();
let parsed: CommentFile = serde_json::from_str(&json).unwrap();
assert_eq!(parsed.uuid, comment_uuid);
assert_eq!(parsed.issue_uuid, issue_uuid);
assert_eq!(parsed.content, "A standalone comment");
assert_eq!(parsed.kind, "note");
}
#[test]
fn test_v2_intervention_comment_file_construction() {
use crate::issue_file::CommentFile;
let issue_uuid = Uuid::parse_str("a1b2c3d4-e5f6-7890-abcd-ef1234567890").unwrap();
let comment_uuid = Uuid::new_v4();
let comment = CommentFile {
uuid: comment_uuid,
issue_uuid,
author: "test-agent".to_string(),
content: "Driver intervention".to_string(),
created_at: Utc::now(),
kind: "intervention".to_string(),
trigger_type: Some("redirect".to_string()),
intervention_context: Some("User redirected task".to_string()),
driver_key_fingerprint: Some("SHA256:abc123".to_string()),
signed_by: None,
signature: None,
};
let json = serde_json::to_string_pretty(&comment).unwrap();
let parsed: CommentFile = serde_json::from_str(&json).unwrap();
assert_eq!(parsed.kind, "intervention");
assert_eq!(parsed.trigger_type, Some("redirect".to_string()));
assert_eq!(
parsed.intervention_context,
Some("User redirected task".to_string())
);
assert_eq!(
parsed.driver_key_fingerprint,
Some("SHA256:abc123".to_string())
);
}
#[test]
fn test_lock_confirm_timeout_constant() {
assert_eq!(LOCK_CONFIRM_TIMEOUT_SECS, 30);
}
mod lock_v2_tests {
use super::*;
use crate::issue_file::LockFileV2;
use tempfile::tempdir;
#[test]
fn test_lock_claim_result_variants() {
let claimed = LockClaimResult::Claimed;
let already = LockClaimResult::AlreadyHeld;
let contended = LockClaimResult::Contended {
winner_agent_id: "agent-2".to_string(),
};
assert_eq!(claimed, LockClaimResult::Claimed);
assert_eq!(already, LockClaimResult::AlreadyHeld);
assert_ne!(claimed, already);
assert_ne!(claimed, contended);
assert_eq!(
contended,
LockClaimResult::Contended {
winner_agent_id: "agent-2".to_string(),
}
);
let _ = format!("{claimed:?}");
let _ = format!("{contended:?}");
}
#[test]
fn test_read_lock_v2_file() {
let dir = tempdir().unwrap();
let locks_dir = dir.path().join("locks");
std::fs::create_dir_all(&locks_dir).unwrap();
let lock = LockFileV2 {
issue_id: 42,
agent_id: "agent-1".to_string(),
branch: Some("feature/x".to_string()),
claimed_at: chrono::Utc::now(),
signed_by: Some("SHA256:abc".to_string()),
};
let json = serde_json::to_string_pretty(&lock).unwrap();
std::fs::write(locks_dir.join("42.json"), &json).unwrap();
let content = std::fs::read_to_string(locks_dir.join("42.json")).unwrap();
let parsed: LockFileV2 = serde_json::from_str(&content).unwrap();
assert_eq!(parsed.issue_id, 42);
assert_eq!(parsed.agent_id, "agent-1");
assert_eq!(parsed.branch, Some("feature/x".to_string()));
}
#[test]
fn test_read_lock_v2_missing() {
let dir = tempdir().unwrap();
let lock_path = dir.path().join("locks").join("99.json");
assert!(!lock_path.exists());
}
#[test]
fn test_lock_v2_file_roundtrip() {
let dir = tempdir().unwrap();
let locks_dir = dir.path().join("locks");
std::fs::create_dir_all(&locks_dir).unwrap();
let lock = LockFileV2 {
issue_id: 5,
agent_id: "worker-1".to_string(),
branch: None,
claimed_at: chrono::Utc::now(),
signed_by: None,
};
let json = serde_json::to_string_pretty(&lock).unwrap();
let path = locks_dir.join("5.json");
std::fs::write(&path, &json).unwrap();
let content = std::fs::read_to_string(&path).unwrap();
let parsed: LockFileV2 = serde_json::from_str(&content).unwrap();
assert_eq!(parsed.issue_id, lock.issue_id);
assert_eq!(parsed.agent_id, lock.agent_id);
assert!(parsed.branch.is_none());
assert!(parsed.signed_by.is_none());
}
#[test]
fn test_lock_contention_deterministic_winner() {
use crate::checkpoint::{read_checkpoint, write_checkpoint, CheckpointState};
use crate::events::{append_event, Event, EventEnvelope};
use chrono::Utc;
let dir = tempdir().unwrap();
let cache = dir.path();
std::fs::create_dir_all(cache.join("checkpoint")).unwrap();
std::fs::create_dir_all(cache.join("agents/agent-a")).unwrap();
std::fs::create_dir_all(cache.join("agents/agent-b")).unwrap();
std::fs::create_dir_all(cache.join("locks")).unwrap();
std::fs::create_dir_all(cache.join("issues")).unwrap();
let state = CheckpointState::default();
write_checkpoint(cache, &state).unwrap();
let now = Utc::now();
let e1 = EventEnvelope {
agent_id: "agent-a".to_string(),
agent_seq: 1,
timestamp: now - chrono::Duration::seconds(1),
event: Event::LockClaimed {
issue_display_id: 1,
branch: None,
},
signed_by: None,
signature: None,
};
append_event(&cache.join("agents/agent-a/events.log"), &e1).unwrap();
let e2 = EventEnvelope {
agent_id: "agent-b".to_string(),
agent_seq: 1,
timestamp: now,
event: Event::LockClaimed {
issue_display_id: 1,
branch: None,
},
signed_by: None,
signature: None,
};
append_event(&cache.join("agents/agent-b/events.log"), &e2).unwrap();
let result = crate::compaction::compact(cache, "agent-a", true)
.unwrap()
.unwrap();
assert_eq!(result.locks_materialized, 1);
let state = read_checkpoint(cache).unwrap();
let lock = state.locks.get(&1).unwrap();
assert_eq!(lock.agent_id, "agent-a");
}
#[test]
fn test_prune_then_checkpoint_clear() {
use crate::checkpoint::{write_checkpoint, CheckpointState, LockEntry};
use crate::events::{append_event, Event, EventEnvelope, OrderingKey};
use chrono::Utc;
let dir = tempdir().unwrap();
let cache = dir.path();
std::fs::create_dir_all(cache.join("checkpoint")).unwrap();
std::fs::create_dir_all(cache.join("agents/stale-agent")).unwrap();
std::fs::create_dir_all(cache.join("locks")).unwrap();
std::fs::create_dir_all(cache.join("issues")).unwrap();
let now = Utc::now();
let e = EventEnvelope {
agent_id: "stale-agent".to_string(),
agent_seq: 1,
timestamp: now,
event: Event::LockClaimed {
issue_display_id: 5,
branch: None,
},
signed_by: None,
signature: None,
};
append_event(&cache.join("agents/stale-agent/events.log"), &e).unwrap();
let watermark = OrderingKey {
timestamp: now + chrono::Duration::seconds(1),
agent_id: "stale-agent".to_string(),
agent_seq: 1,
};
let mut state = CheckpointState {
watermark: Some(watermark),
..CheckpointState::default()
};
state.locks.insert(
5,
LockEntry {
agent_id: "stale-agent".to_string(),
branch: None,
claimed_at: now,
},
);
write_checkpoint(cache, &state).unwrap();
let lock = crate::issue_file::LockFileV2 {
issue_id: 5,
agent_id: "stale-agent".to_string(),
branch: None,
claimed_at: now,
signed_by: None,
};
std::fs::write(
cache.join("locks/5.json"),
serde_json::to_string_pretty(&lock).unwrap(),
)
.unwrap();
let pruned = crate::compaction::prune_events(cache, "stale-agent").unwrap();
assert!(pruned > 0);
let mut state = crate::checkpoint::read_checkpoint(cache).unwrap();
state.locks.remove(&5);
write_checkpoint(cache, &state).unwrap();
let lock_path = cache.join("locks/5.json");
if lock_path.exists() {
std::fs::remove_file(&lock_path).unwrap();
}
let state = crate::checkpoint::read_checkpoint(cache).unwrap();
assert!(state.locks.is_empty());
assert!(!cache.join("locks/5.json").exists());
}
#[test]
fn test_lock_file_v2_with_all_fields() {
let dir = tempdir().unwrap();
let locks_dir = dir.path().join("locks");
std::fs::create_dir_all(&locks_dir).unwrap();
let now = chrono::Utc::now();
let lock = LockFileV2 {
issue_id: 100,
agent_id: "agent-special".to_string(),
branch: Some("feature/special-branch".to_string()),
claimed_at: now,
signed_by: Some("SHA256:xyz789".to_string()),
};
let json = serde_json::to_string_pretty(&lock).unwrap();
let path = locks_dir.join("100.json");
std::fs::write(&path, &json).unwrap();
let content = std::fs::read_to_string(&path).unwrap();
let parsed: LockFileV2 = serde_json::from_str(&content).unwrap();
assert_eq!(parsed.issue_id, 100);
assert_eq!(parsed.agent_id, "agent-special");
assert_eq!(parsed.branch, Some("feature/special-branch".to_string()));
assert_eq!(parsed.claimed_at, now);
assert_eq!(parsed.signed_by, Some("SHA256:xyz789".to_string()));
}
#[test]
fn test_lock_claim_result_display_and_equality() {
let c1 = LockClaimResult::Contended {
winner_agent_id: "agent-1".to_string(),
};
let c2 = LockClaimResult::Contended {
winner_agent_id: "agent-2".to_string(),
};
assert_ne!(c1, c2);
let c3 = LockClaimResult::Contended {
winner_agent_id: "agent-1".to_string(),
};
assert_eq!(c1, c3);
let cloned = c1.clone();
assert_eq!(c1, cloned);
}
#[test]
fn test_lock_contention_with_three_agents() {
use crate::checkpoint::{read_checkpoint, write_checkpoint, CheckpointState};
use crate::events::{append_event, Event, EventEnvelope};
use chrono::Utc;
let dir = tempdir().unwrap();
let cache = dir.path();
std::fs::create_dir_all(cache.join("checkpoint")).unwrap();
std::fs::create_dir_all(cache.join("agents/agent-a")).unwrap();
std::fs::create_dir_all(cache.join("agents/agent-b")).unwrap();
std::fs::create_dir_all(cache.join("agents/agent-c")).unwrap();
std::fs::create_dir_all(cache.join("locks")).unwrap();
std::fs::create_dir_all(cache.join("issues")).unwrap();
let state = CheckpointState::default();
write_checkpoint(cache, &state).unwrap();
let now = Utc::now();
let e1 = EventEnvelope {
agent_id: "agent-c".to_string(),
agent_seq: 1,
timestamp: now - chrono::Duration::seconds(3),
event: Event::LockClaimed {
issue_display_id: 1,
branch: Some("feature/c".to_string()),
},
signed_by: None,
signature: None,
};
append_event(&cache.join("agents/agent-c/events.log"), &e1).unwrap();
let e2 = EventEnvelope {
agent_id: "agent-a".to_string(),
agent_seq: 1,
timestamp: now - chrono::Duration::seconds(2),
event: Event::LockClaimed {
issue_display_id: 1,
branch: Some("feature/a".to_string()),
},
signed_by: None,
signature: None,
};
append_event(&cache.join("agents/agent-a/events.log"), &e2).unwrap();
let e3 = EventEnvelope {
agent_id: "agent-b".to_string(),
agent_seq: 1,
timestamp: now - chrono::Duration::seconds(1),
event: Event::LockClaimed {
issue_display_id: 1,
branch: Some("feature/b".to_string()),
},
signed_by: None,
signature: None,
};
append_event(&cache.join("agents/agent-b/events.log"), &e3).unwrap();
let result = crate::compaction::compact(cache, "agent-a", true)
.unwrap()
.unwrap();
assert_eq!(result.locks_materialized, 1);
let state = read_checkpoint(cache).unwrap();
let lock = state.locks.get(&1).unwrap();
assert_eq!(lock.agent_id, "agent-c");
assert_eq!(lock.branch, Some("feature/c".to_string()));
}
#[test]
fn test_lock_contention_then_winner_releases() {
use crate::checkpoint::{read_checkpoint, write_checkpoint, CheckpointState};
use crate::events::{append_event, Event, EventEnvelope};
use chrono::Utc;
let dir = tempdir().unwrap();
let cache = dir.path();
std::fs::create_dir_all(cache.join("checkpoint")).unwrap();
std::fs::create_dir_all(cache.join("agents/agent-a")).unwrap();
std::fs::create_dir_all(cache.join("agents/agent-b")).unwrap();
std::fs::create_dir_all(cache.join("locks")).unwrap();
std::fs::create_dir_all(cache.join("issues")).unwrap();
let state = CheckpointState::default();
write_checkpoint(cache, &state).unwrap();
let now = Utc::now();
let e1 = EventEnvelope {
agent_id: "agent-a".to_string(),
agent_seq: 1,
timestamp: now - chrono::Duration::seconds(3),
event: Event::LockClaimed {
issue_display_id: 1,
branch: None,
},
signed_by: None,
signature: None,
};
append_event(&cache.join("agents/agent-a/events.log"), &e1).unwrap();
let e2 = EventEnvelope {
agent_id: "agent-b".to_string(),
agent_seq: 1,
timestamp: now - chrono::Duration::seconds(2),
event: Event::LockClaimed {
issue_display_id: 1,
branch: None,
},
signed_by: None,
signature: None,
};
append_event(&cache.join("agents/agent-b/events.log"), &e2).unwrap();
let e3 = EventEnvelope {
agent_id: "agent-a".to_string(),
agent_seq: 2,
timestamp: now - chrono::Duration::seconds(1),
event: Event::LockReleased {
issue_display_id: 1,
},
signed_by: None,
signature: None,
};
append_event(&cache.join("agents/agent-a/events.log"), &e3).unwrap();
crate::compaction::compact(cache, "agent-a", true).unwrap();
let state = read_checkpoint(cache).unwrap();
assert!(state.locks.is_empty());
assert!(!cache.join("locks/1.json").exists());
}
#[test]
fn test_lock_file_v2_missing_optional_fields() {
let json = r#"{
"issue_id": 7,
"agent_id": "agent-minimal",
"branch": null,
"claimed_at": "2026-01-01T00:00:00Z",
"signed_by": null
}"#;
let parsed: LockFileV2 = serde_json::from_str(json).unwrap();
assert_eq!(parsed.issue_id, 7);
assert_eq!(parsed.agent_id, "agent-minimal");
assert!(parsed.branch.is_none());
assert!(parsed.signed_by.is_none());
}
#[test]
fn test_lock_contention_deterministic_across_compaction_agents() {
use crate::checkpoint::{read_checkpoint, write_checkpoint, CheckpointState};
use crate::events::{append_event, Event, EventEnvelope};
use chrono::Utc;
let now = Utc::now();
for compactor in &["agent-a", "agent-b"] {
let dir = tempdir().unwrap();
let cache = dir.path();
std::fs::create_dir_all(cache.join("checkpoint")).unwrap();
std::fs::create_dir_all(cache.join("agents/agent-a")).unwrap();
std::fs::create_dir_all(cache.join("agents/agent-b")).unwrap();
std::fs::create_dir_all(cache.join("locks")).unwrap();
std::fs::create_dir_all(cache.join("issues")).unwrap();
let state = CheckpointState::default();
write_checkpoint(cache, &state).unwrap();
let e1 = EventEnvelope {
agent_id: "agent-a".to_string(),
agent_seq: 1,
timestamp: now - chrono::Duration::seconds(2),
event: Event::LockClaimed {
issue_display_id: 1,
branch: None,
},
signed_by: None,
signature: None,
};
append_event(&cache.join("agents/agent-a/events.log"), &e1).unwrap();
let e2 = EventEnvelope {
agent_id: "agent-b".to_string(),
agent_seq: 1,
timestamp: now - chrono::Duration::seconds(1),
event: Event::LockClaimed {
issue_display_id: 1,
branch: None,
},
signed_by: None,
signature: None,
};
append_event(&cache.join("agents/agent-b/events.log"), &e2).unwrap();
crate::compaction::compact(cache, compactor, true).unwrap();
let state = read_checkpoint(cache).unwrap();
assert_eq!(
state.locks[&1].agent_id, "agent-a",
"Winner should be agent-a regardless of who runs compaction (compactor={compactor})"
);
}
}
}
mod integration {
use super::*;
use crate::db::Database;
use crate::identity::AgentConfig;
use std::process::Command;
use tempfile::TempDir;
fn setup_shared_writer_env() -> (TempDir, TempDir, std::path::PathBuf) {
let remote_dir = tempfile::tempdir().unwrap();
let work_dir = tempfile::tempdir().unwrap();
Command::new("git")
.current_dir(remote_dir.path())
.args(["init", "--bare", "-b", "main"])
.output()
.unwrap();
Command::new("git")
.current_dir(work_dir.path())
.args(["init", "-b", "main"])
.output()
.unwrap();
for args in [
vec!["config", "user.email", "test@test.local"],
vec!["config", "user.name", "Test"],
vec![
"remote",
"add",
"origin",
remote_dir.path().to_str().unwrap(),
],
] {
Command::new("git")
.current_dir(work_dir.path())
.args(&args)
.output()
.unwrap();
}
std::fs::write(work_dir.path().join("README.md"), "# test\n").unwrap();
Command::new("git")
.current_dir(work_dir.path())
.args(["add", "."])
.output()
.unwrap();
Command::new("git")
.current_dir(work_dir.path())
.args(["commit", "-m", "init", "--no-gpg-sign"])
.output()
.unwrap();
Command::new("git")
.current_dir(work_dir.path())
.args(["push", "-u", "origin", "main"])
.output()
.unwrap();
let crosslink_dir = work_dir.path().join(".crosslink");
std::fs::create_dir_all(&crosslink_dir).unwrap();
std::fs::write(
crosslink_dir.join("hook-config.json"),
r#"{"remote":"origin","layout":"v2"}"#,
)
.unwrap();
let agent_config = AgentConfig {
agent_id: "test-agent".to_string(),
machine_id: "test-machine".to_string(),
description: Some("Integration test agent".to_string()),
ssh_key_path: None,
ssh_fingerprint: None,
ssh_public_key: None,
};
let agent_json = serde_json::to_string_pretty(&agent_config).unwrap();
std::fs::write(crosslink_dir.join("agent.json"), agent_json).unwrap();
let sync = crate::sync::SyncManager::new(&crosslink_dir).unwrap();
sync.init_cache().unwrap();
(work_dir, remote_dir, crosslink_dir)
}
fn make_db(dir: &std::path::Path) -> Database {
Database::open(&dir.join("issues.db")).unwrap()
}
#[test]
fn test_new_returns_some_with_agent_and_hub() {
let (work_dir, _remote, crosslink_dir) = setup_shared_writer_env();
let writer = SharedWriter::new(&crosslink_dir).unwrap();
assert!(
writer.is_some(),
"SharedWriter::new() should return Some when agent.json and hub branch exist"
);
drop(work_dir);
}
#[test]
fn test_new_agent_id_matches_config() {
let (work_dir, _remote, crosslink_dir) = setup_shared_writer_env();
let writer = SharedWriter::new(&crosslink_dir).unwrap().unwrap();
assert_eq!(writer.agent_id(), "test-agent");
drop(work_dir);
}
#[test]
fn test_new_creates_issues_and_meta_dirs() {
let (work_dir, _remote, crosslink_dir) = setup_shared_writer_env();
SharedWriter::new(&crosslink_dir).unwrap().unwrap();
let cache_dir = crosslink_dir.join(".hub-cache");
assert!(
cache_dir.join("issues").exists(),
"issues/ dir should exist"
);
assert!(
cache_dir.join("meta").join("milestones").exists(),
"meta/milestones/ dir should exist"
);
drop(work_dir);
}
#[test]
fn test_create_issue_returns_display_id() {
let (work_dir, _remote, crosslink_dir) = setup_shared_writer_env();
let writer = SharedWriter::new(&crosslink_dir).unwrap().unwrap();
let db = make_db(work_dir.path());
let id = writer
.create_issue(&db, "Test issue", None, "medium")
.unwrap();
assert!(id > 0, "create_issue should return a positive display ID");
drop(work_dir);
}
#[test]
fn test_create_issue_increments_id() {
let (work_dir, _remote, crosslink_dir) = setup_shared_writer_env();
let writer = SharedWriter::new(&crosslink_dir).unwrap().unwrap();
let db = make_db(work_dir.path());
let id1 = writer
.create_issue(&db, "First issue", None, "low")
.unwrap();
let id2 = writer
.create_issue(&db, "Second issue", None, "low")
.unwrap();
assert_eq!(id2, id1 + 1, "IDs should be sequential");
drop(work_dir);
}
#[test]
fn test_create_issue_with_description() {
let (work_dir, _remote, crosslink_dir) = setup_shared_writer_env();
let writer = SharedWriter::new(&crosslink_dir).unwrap().unwrap();
let db = make_db(work_dir.path());
let id = writer
.create_issue(
&db,
"With description",
Some("A detailed description"),
"high",
)
.unwrap();
assert!(id > 0);
let issue = db.get_issue(id).unwrap();
assert!(
issue.is_some(),
"Issue should exist in database after create"
);
let issue = issue.unwrap();
assert_eq!(issue.title, "With description");
assert_eq!(issue.description.as_deref(), Some("A detailed description"));
drop(work_dir);
}
#[test]
fn test_create_issue_high_priority() {
let (work_dir, _remote, crosslink_dir) = setup_shared_writer_env();
let writer = SharedWriter::new(&crosslink_dir).unwrap().unwrap();
let db = make_db(work_dir.path());
let id = writer
.create_issue(&db, "Critical bug", None, "critical")
.unwrap();
let issue = db.get_issue(id).unwrap().unwrap();
assert_eq!(issue.priority, Priority::Critical);
drop(work_dir);
}
#[test]
fn test_create_issue_writes_json_to_cache() {
let (work_dir, _remote, crosslink_dir) = setup_shared_writer_env();
let writer = SharedWriter::new(&crosslink_dir).unwrap().unwrap();
let db = make_db(work_dir.path());
writer
.create_issue(&db, "Cache test", None, "medium")
.unwrap();
let cache_dir = crosslink_dir.join(".hub-cache").join("issues");
let entries: Vec<_> = std::fs::read_dir(&cache_dir)
.unwrap()
.filter_map(std::result::Result::ok)
.collect();
assert!(
!entries.is_empty(),
"At least one issue entry should exist in cache"
);
drop(work_dir);
}
#[test]
fn test_create_subissue() {
let (work_dir, _remote, crosslink_dir) = setup_shared_writer_env();
let writer = SharedWriter::new(&crosslink_dir).unwrap().unwrap();
let db = make_db(work_dir.path());
let parent_id = writer
.create_issue(&db, "Parent issue", None, "medium")
.unwrap();
let child_id = writer
.create_subissue(&db, parent_id, "Child issue", None, "low")
.unwrap();
assert!(child_id > 0);
assert_ne!(parent_id, child_id);
let child = db.get_issue(child_id).unwrap().unwrap();
assert_eq!(child.parent_id, Some(parent_id));
drop(work_dir);
}
#[test]
fn test_update_issue_title() {
let (work_dir, _remote, crosslink_dir) = setup_shared_writer_env();
let writer = SharedWriter::new(&crosslink_dir).unwrap().unwrap();
let db = make_db(work_dir.path());
let id = writer
.create_issue(&db, "Old title", None, "medium")
.unwrap();
writer
.update_issue(
&db,
id,
Some("New title"),
DescriptionUpdate::Unchanged,
None,
None,
)
.unwrap();
let issue = db.get_issue(id).unwrap().unwrap();
assert_eq!(issue.title, "New title");
drop(work_dir);
}
#[test]
fn test_update_issue_priority() {
let (work_dir, _remote, crosslink_dir) = setup_shared_writer_env();
let writer = SharedWriter::new(&crosslink_dir).unwrap().unwrap();
let db = make_db(work_dir.path());
let id = writer
.create_issue(&db, "Priority test", None, "low")
.unwrap();
writer
.update_issue(
&db,
id,
None,
DescriptionUpdate::Unchanged,
None,
Some("high"),
)
.unwrap();
let issue = db.get_issue(id).unwrap().unwrap();
assert_eq!(issue.priority, Priority::High);
drop(work_dir);
}
#[test]
fn test_update_issue_description() {
let (work_dir, _remote, crosslink_dir) = setup_shared_writer_env();
let writer = SharedWriter::new(&crosslink_dir).unwrap().unwrap();
let db = make_db(work_dir.path());
let id = writer.create_issue(&db, "Desc test", None, "low").unwrap();
writer
.update_issue(
&db,
id,
None,
DescriptionUpdate::Set("Updated desc"),
None,
None,
)
.unwrap();
let issue = db.get_issue(id).unwrap().unwrap();
assert_eq!(issue.description.as_deref(), Some("Updated desc"));
drop(work_dir);
}
#[test]
fn test_update_issue_clear_description() {
let (work_dir, _remote, crosslink_dir) = setup_shared_writer_env();
let writer = SharedWriter::new(&crosslink_dir).unwrap().unwrap();
let db = make_db(work_dir.path());
let id = writer
.create_issue(&db, "Has desc", Some("initial desc"), "low")
.unwrap();
writer
.update_issue(&db, id, None, DescriptionUpdate::Clear, None, None)
.unwrap();
let issue = db.get_issue(id).unwrap().unwrap();
assert!(issue.description.is_none(), "Description should be cleared");
drop(work_dir);
}
#[test]
fn test_close_issue() {
let (work_dir, _remote, crosslink_dir) = setup_shared_writer_env();
let writer = SharedWriter::new(&crosslink_dir).unwrap().unwrap();
let db = make_db(work_dir.path());
let id = writer
.create_issue(&db, "Close me", None, "medium")
.unwrap();
writer.close_issue(&db, id).unwrap();
let issue = db.get_issue(id).unwrap().unwrap();
assert_eq!(issue.status, IssueStatus::Closed);
drop(work_dir);
}
#[test]
fn test_reopen_issue() {
let (work_dir, _remote, crosslink_dir) = setup_shared_writer_env();
let writer = SharedWriter::new(&crosslink_dir).unwrap().unwrap();
let db = make_db(work_dir.path());
let id = writer
.create_issue(&db, "Open/close cycle", None, "medium")
.unwrap();
writer.close_issue(&db, id).unwrap();
writer.reopen_issue(&db, id).unwrap();
let issue = db.get_issue(id).unwrap().unwrap();
assert_eq!(issue.status, IssueStatus::Open);
drop(work_dir);
}
#[test]
fn test_closed_issue_has_closed_at() {
let (work_dir, _remote, crosslink_dir) = setup_shared_writer_env();
let writer = SharedWriter::new(&crosslink_dir).unwrap().unwrap();
let db = make_db(work_dir.path());
let id = writer
.create_issue(&db, "Closed at test", None, "medium")
.unwrap();
let cache_dir = crosslink_dir.join(".hub-cache");
let issue_before = writer.load_issue_by_id(id, &db).unwrap();
assert!(
issue_before.closed_at.is_none(),
"closed_at should be None before closing"
);
writer.close_issue(&db, id).unwrap();
let issue_after = writer.load_issue_by_id(id, &db).unwrap();
assert!(
issue_after.closed_at.is_some(),
"closed_at should be set after closing"
);
assert_eq!(issue_after.status, IssueStatus::Closed);
drop(cache_dir);
drop(work_dir);
}
#[test]
fn test_reopen_clears_closed_at() {
let (work_dir, _remote, crosslink_dir) = setup_shared_writer_env();
let writer = SharedWriter::new(&crosslink_dir).unwrap().unwrap();
let db = make_db(work_dir.path());
let id = writer
.create_issue(&db, "Reopen cleared", None, "medium")
.unwrap();
writer.close_issue(&db, id).unwrap();
writer.reopen_issue(&db, id).unwrap();
let issue = writer.load_issue_by_id(id, &db).unwrap();
assert!(
issue.closed_at.is_none(),
"closed_at should be cleared after reopen"
);
drop(work_dir);
}
#[test]
fn test_delete_issue() {
let (work_dir, _remote, crosslink_dir) = setup_shared_writer_env();
let writer = SharedWriter::new(&crosslink_dir).unwrap().unwrap();
let db = make_db(work_dir.path());
let id1 = writer
.create_issue(&db, "Delete me", None, "medium")
.unwrap();
let id2 = writer.create_issue(&db, "Keep me", None, "medium").unwrap();
let delete_result = writer.delete_issue(&db, id1);
if delete_result.is_ok() {
let deleted = db.get_issue(id1).unwrap();
assert!(deleted.is_none(), "Deleted issue should be gone from DB");
}
let kept = db.get_issue(id2).unwrap();
assert!(kept.is_some(), "Kept issue should still be in DB");
drop(work_dir);
}
#[test]
fn test_delete_issue_removes_file_from_disk() {
let (work_dir, _remote, crosslink_dir) = setup_shared_writer_env();
let writer = SharedWriter::new(&crosslink_dir).unwrap().unwrap();
let db = make_db(work_dir.path());
let id = writer
.create_issue(&db, "File remove test", None, "medium")
.unwrap();
let uuid_str = db.get_issue_uuid_by_id(id).unwrap();
let uuid: Uuid = uuid_str.parse().unwrap();
let v2_issue_path = crosslink_dir
.join(".hub-cache")
.join("issues")
.join(uuid.to_string())
.join("issue.json");
assert!(
v2_issue_path.exists(),
"Issue file should exist before delete"
);
let _ = writer.delete_issue(&db, id);
assert!(
!v2_issue_path.exists(),
"Issue file should be removed from disk by delete_issue's prepare closure"
);
drop(work_dir);
}
#[test]
fn test_add_comment_returns_id() {
let (work_dir, _remote, crosslink_dir) = setup_shared_writer_env();
let writer = SharedWriter::new(&crosslink_dir).unwrap().unwrap();
let db = make_db(work_dir.path());
let issue_id = writer
.create_issue(&db, "Comment host", None, "medium")
.unwrap();
let comment_id = writer
.add_comment(&db, issue_id, "A test comment", "note")
.unwrap();
assert!(comment_id > 0, "comment ID should be positive");
drop(work_dir);
}
#[test]
fn test_add_comment_persists_to_db() {
let (work_dir, _remote, crosslink_dir) = setup_shared_writer_env();
let writer = SharedWriter::new(&crosslink_dir).unwrap().unwrap();
let db = make_db(work_dir.path());
let issue_id = writer
.create_issue(&db, "Comment persist", None, "medium")
.unwrap();
writer
.add_comment(&db, issue_id, "Persisted comment content", "plan")
.unwrap();
let comments = db.get_comments(issue_id).unwrap();
assert!(!comments.is_empty(), "Comment should be in DB");
assert_eq!(comments[0].content, "Persisted comment content");
drop(work_dir);
}
#[test]
fn test_add_comment_multiple_kinds() {
let (work_dir, _remote, crosslink_dir) = setup_shared_writer_env();
let writer = SharedWriter::new(&crosslink_dir).unwrap().unwrap();
let db = make_db(work_dir.path());
let issue_id = writer
.create_issue(&db, "Typed comments", None, "medium")
.unwrap();
let kinds = ["plan", "decision", "observation", "blocker", "resolution"];
for kind in &kinds {
writer
.add_comment(&db, issue_id, &format!("Comment: {kind}"), kind)
.unwrap();
}
let comments = db.get_comments(issue_id).unwrap();
assert_eq!(comments.len(), kinds.len());
drop(work_dir);
}
#[test]
fn test_add_comment_sequential_ids() {
let (work_dir, _remote, crosslink_dir) = setup_shared_writer_env();
let writer = SharedWriter::new(&crosslink_dir).unwrap().unwrap();
let db = make_db(work_dir.path());
let issue_id = writer
.create_issue(&db, "Sequential comments", None, "medium")
.unwrap();
let c1 = writer
.add_comment(&db, issue_id, "First comment", "note")
.unwrap();
let c2 = writer
.add_comment(&db, issue_id, "Second comment", "note")
.unwrap();
assert_eq!(c2, c1 + 1, "Comment IDs should be sequential");
drop(work_dir);
}
#[test]
fn test_add_intervention_comment() {
let (work_dir, _remote, crosslink_dir) = setup_shared_writer_env();
let writer = SharedWriter::new(&crosslink_dir).unwrap().unwrap();
let db = make_db(work_dir.path());
let issue_id = writer
.create_issue(&db, "Intervention host", None, "medium")
.unwrap();
let comment_id = writer
.add_intervention_comment(
&db,
issue_id,
"Intervention content",
"manual_redirect",
Some("context string"),
None,
)
.unwrap();
assert!(comment_id > 0);
let comments = db.get_comments(issue_id).unwrap();
assert!(!comments.is_empty());
assert_eq!(comments[0].content, "Intervention content");
drop(work_dir);
}
#[test]
fn test_add_label() {
let (work_dir, _remote, crosslink_dir) = setup_shared_writer_env();
let writer = SharedWriter::new(&crosslink_dir).unwrap().unwrap();
let db = make_db(work_dir.path());
let id = writer
.create_issue(&db, "Label test", None, "medium")
.unwrap();
writer.add_label(&db, id, "bug").unwrap();
let labels = db.get_labels(id).unwrap();
assert!(labels.contains(&"bug".to_string()));
drop(work_dir);
}
#[test]
fn test_add_multiple_labels() {
let (work_dir, _remote, crosslink_dir) = setup_shared_writer_env();
let writer = SharedWriter::new(&crosslink_dir).unwrap().unwrap();
let db = make_db(work_dir.path());
let id = writer
.create_issue(&db, "Multi-label", None, "medium")
.unwrap();
writer.add_label(&db, id, "bug").unwrap();
writer.add_label(&db, id, "urgent").unwrap();
writer.add_label(&db, id, "frontend").unwrap();
let labels = db.get_labels(id).unwrap();
assert!(labels.contains(&"bug".to_string()));
assert!(labels.contains(&"urgent".to_string()));
assert!(labels.contains(&"frontend".to_string()));
drop(work_dir);
}
#[test]
fn test_remove_label() {
let (work_dir, _remote, crosslink_dir) = setup_shared_writer_env();
let writer = SharedWriter::new(&crosslink_dir).unwrap().unwrap();
let db = make_db(work_dir.path());
let id = writer
.create_issue(&db, "Remove label", None, "medium")
.unwrap();
writer.add_label(&db, id, "bug").unwrap();
writer.add_label(&db, id, "keep").unwrap();
writer.remove_label(&db, id, "bug").unwrap();
let labels = db.get_labels(id).unwrap();
assert!(
!labels.contains(&"bug".to_string()),
"bug label should be gone"
);
assert!(
labels.contains(&"keep".to_string()),
"keep label should remain"
);
drop(work_dir);
}
#[test]
fn test_add_label_idempotent() {
let (work_dir, _remote, crosslink_dir) = setup_shared_writer_env();
let writer = SharedWriter::new(&crosslink_dir).unwrap().unwrap();
let db = make_db(work_dir.path());
let id = writer
.create_issue(&db, "Idempotent label", None, "medium")
.unwrap();
writer.add_label(&db, id, "tag").unwrap();
let _ = writer.add_label(&db, id, "tag");
let labels = db.get_labels(id).unwrap();
let tag_count = labels.iter().filter(|l| l.as_str() == "tag").count();
assert_eq!(tag_count, 1, "Duplicate label should not be double-added");
drop(work_dir);
}
#[test]
fn test_add_blocker() {
let (work_dir, _remote, crosslink_dir) = setup_shared_writer_env();
let writer = SharedWriter::new(&crosslink_dir).unwrap().unwrap();
let db = make_db(work_dir.path());
let blocked = writer
.create_issue(&db, "Blocked issue", None, "medium")
.unwrap();
let blocker = writer
.create_issue(&db, "Blocker issue", None, "high")
.unwrap();
writer.add_blocker(&db, blocked, blocker).unwrap();
let issue_file = writer.load_issue_by_id(blocked, &db).unwrap();
assert!(
!issue_file.blockers.is_empty(),
"Blocker should be recorded"
);
drop(work_dir);
}
#[test]
fn test_remove_blocker() {
let (work_dir, _remote, crosslink_dir) = setup_shared_writer_env();
let writer = SharedWriter::new(&crosslink_dir).unwrap().unwrap();
let db = make_db(work_dir.path());
let blocked = writer
.create_issue(&db, "Was blocked", None, "medium")
.unwrap();
let blocker = writer
.create_issue(&db, "Was blocker", None, "high")
.unwrap();
writer.add_blocker(&db, blocked, blocker).unwrap();
writer.remove_blocker(&db, blocked, blocker).unwrap();
let issue_file = writer.load_issue_by_id(blocked, &db).unwrap();
assert!(issue_file.blockers.is_empty(), "Blocker should be removed");
drop(work_dir);
}
#[test]
fn test_add_relation() {
let (work_dir, _remote, crosslink_dir) = setup_shared_writer_env();
let writer = SharedWriter::new(&crosslink_dir).unwrap().unwrap();
let db = make_db(work_dir.path());
let id1 = writer
.create_issue(&db, "Related A", None, "medium")
.unwrap();
let id2 = writer
.create_issue(&db, "Related B", None, "medium")
.unwrap();
writer.add_relation(&db, id1, id2).unwrap();
let issue = writer.load_issue_by_id(id1, &db).unwrap();
assert!(!issue.related.is_empty(), "Relation should be recorded");
drop(work_dir);
}
#[test]
fn test_remove_relation() {
let (work_dir, _remote, crosslink_dir) = setup_shared_writer_env();
let writer = SharedWriter::new(&crosslink_dir).unwrap().unwrap();
let db = make_db(work_dir.path());
let id1 = writer
.create_issue(&db, "Related C", None, "medium")
.unwrap();
let id2 = writer
.create_issue(&db, "Related D", None, "medium")
.unwrap();
writer.add_relation(&db, id1, id2).unwrap();
writer.remove_relation(&db, id1, id2).unwrap();
let issue = writer.load_issue_by_id(id1, &db).unwrap();
assert!(issue.related.is_empty(), "Relation should be removed");
drop(work_dir);
}
#[test]
fn test_create_milestone() {
let (work_dir, _remote, crosslink_dir) = setup_shared_writer_env();
let writer = SharedWriter::new(&crosslink_dir).unwrap().unwrap();
let db = make_db(work_dir.path());
let ms_id = writer
.create_milestone(&db, "v1.0", Some("First release"))
.unwrap();
assert!(ms_id > 0, "Milestone ID should be positive");
drop(work_dir);
}
#[test]
fn test_create_multiple_milestones() {
let (work_dir, _remote, crosslink_dir) = setup_shared_writer_env();
let writer = SharedWriter::new(&crosslink_dir).unwrap().unwrap();
let db = make_db(work_dir.path());
let ms1 = writer.create_milestone(&db, "v1.0", None).unwrap();
let ms2 = writer.create_milestone(&db, "v2.0", None).unwrap();
assert_eq!(ms2, ms1 + 1, "Milestone IDs should be sequential");
drop(work_dir);
}
#[test]
fn test_close_milestone() {
let (work_dir, _remote, crosslink_dir) = setup_shared_writer_env();
let writer = SharedWriter::new(&crosslink_dir).unwrap().unwrap();
let db = make_db(work_dir.path());
let ms_id = writer.create_milestone(&db, "v1.0", None).unwrap();
writer.close_milestone(&db, ms_id).unwrap();
let entry = writer.load_milestone_by_id(ms_id).unwrap();
assert_eq!(entry.status, IssueStatus::Closed);
assert!(entry.closed_at.is_some());
drop(work_dir);
}
#[test]
fn test_delete_milestone() {
let (work_dir, _remote, crosslink_dir) = setup_shared_writer_env();
let writer = SharedWriter::new(&crosslink_dir).unwrap().unwrap();
let db = make_db(work_dir.path());
let ms_id = writer.create_milestone(&db, "v1.0-del", None).unwrap();
writer.delete_milestone(&db, ms_id).unwrap();
let result = writer.load_milestone_by_id(ms_id);
assert!(result.is_err(), "Deleted milestone should not be loadable");
drop(work_dir);
}
#[test]
fn test_set_milestone_on_issues() {
let (work_dir, _remote, crosslink_dir) = setup_shared_writer_env();
let writer = SharedWriter::new(&crosslink_dir).unwrap().unwrap();
let db = make_db(work_dir.path());
let ms_id = writer.create_milestone(&db, "Sprint 1", None).unwrap();
let issue_id = writer
.create_issue(&db, "Sprint task", None, "medium")
.unwrap();
writer
.set_milestone_on_issues(&db, ms_id, &[issue_id])
.unwrap();
let issue = writer.load_issue_by_id(issue_id, &db).unwrap();
assert!(
issue.milestone_uuid.is_some(),
"Issue should have milestone_uuid set"
);
drop(work_dir);
}
#[test]
fn test_clear_milestone_on_issue() {
let (work_dir, _remote, crosslink_dir) = setup_shared_writer_env();
let writer = SharedWriter::new(&crosslink_dir).unwrap().unwrap();
let db = make_db(work_dir.path());
let ms_id = writer.create_milestone(&db, "Sprint 2", None).unwrap();
let issue_id = writer
.create_issue(&db, "Sprint 2 task", None, "medium")
.unwrap();
writer
.set_milestone_on_issues(&db, ms_id, &[issue_id])
.unwrap();
writer.clear_milestone_on_issue(&db, issue_id).unwrap();
let issue = writer.load_issue_by_id(issue_id, &db).unwrap();
assert!(
issue.milestone_uuid.is_none(),
"Issue should have milestone_uuid cleared"
);
drop(work_dir);
}
#[test]
fn test_read_lock_v2_returns_none_when_no_lock() {
let (work_dir, _remote, crosslink_dir) = setup_shared_writer_env();
let writer = SharedWriter::new(&crosslink_dir).unwrap().unwrap();
let result = writer.read_lock_v2(999).unwrap();
assert!(
result.is_none(),
"No lock should exist for non-existent issue"
);
drop(work_dir);
}
#[test]
fn test_read_lock_v2_reads_existing_lock_file() {
let (work_dir, _remote, crosslink_dir) = setup_shared_writer_env();
let writer = SharedWriter::new(&crosslink_dir).unwrap().unwrap();
let locks_dir = crosslink_dir.join(".hub-cache").join("locks");
std::fs::create_dir_all(&locks_dir).unwrap();
let lock = crate::issue_file::LockFileV2 {
issue_id: 42,
agent_id: "test-agent".to_string(),
branch: Some("feature/x".to_string()),
claimed_at: chrono::Utc::now(),
signed_by: None,
};
std::fs::write(
locks_dir.join("42.json"),
serde_json::to_string_pretty(&lock).unwrap(),
)
.unwrap();
let result = writer.read_lock_v2(42).unwrap();
assert!(result.is_some());
let read_lock = result.unwrap();
assert_eq!(read_lock.issue_id, 42);
assert_eq!(read_lock.agent_id, "test-agent");
assert_eq!(read_lock.branch, Some("feature/x".to_string()));
drop(work_dir);
}
#[test]
fn test_hydration_roundtrip_issue() {
let (work_dir, _remote, crosslink_dir) = setup_shared_writer_env();
let writer = SharedWriter::new(&crosslink_dir).unwrap().unwrap();
let db = make_db(work_dir.path());
let id = writer
.create_issue(&db, "Hydration test", Some("desc"), "high")
.unwrap();
let cache_dir = crosslink_dir.join(".hub-cache");
crate::hydration::hydrate_to_sqlite(&cache_dir, &db).unwrap();
let issue = db.get_issue(id).unwrap();
assert!(issue.is_some());
let issue = issue.unwrap();
assert_eq!(issue.title, "Hydration test");
assert_eq!(issue.priority, Priority::High);
drop(work_dir);
}
#[test]
fn test_hydration_after_close() {
let (work_dir, _remote, crosslink_dir) = setup_shared_writer_env();
let writer = SharedWriter::new(&crosslink_dir).unwrap().unwrap();
let db = make_db(work_dir.path());
let id = writer
.create_issue(&db, "Close hydration", None, "medium")
.unwrap();
writer.close_issue(&db, id).unwrap();
let cache_dir = crosslink_dir.join(".hub-cache");
crate::hydration::hydrate_to_sqlite(&cache_dir, &db).unwrap();
let issue = db.get_issue(id).unwrap().unwrap();
assert_eq!(issue.status, IssueStatus::Closed);
drop(work_dir);
}
#[test]
fn test_hydration_after_comment() {
let (work_dir, _remote, crosslink_dir) = setup_shared_writer_env();
let writer = SharedWriter::new(&crosslink_dir).unwrap().unwrap();
let db = make_db(work_dir.path());
let issue_id = writer
.create_issue(&db, "Comment hydration", None, "medium")
.unwrap();
writer
.add_comment(&db, issue_id, "Hydrated comment", "note")
.unwrap();
let cache_dir = crosslink_dir.join(".hub-cache");
crate::hydration::hydrate_to_sqlite(&cache_dir, &db).unwrap();
let comments = db.get_comments(issue_id).unwrap();
assert!(!comments.is_empty());
assert_eq!(comments[0].content, "Hydrated comment");
drop(work_dir);
}
#[test]
fn test_rewrite_stats_total() {
let stats = RewriteStats {
comments_updated: 3,
descriptions_updated: 2,
sessions_updated: 1,
};
assert_eq!(stats.total(), 6);
}
#[test]
fn test_rewrite_stats_default_total() {
let stats = RewriteStats::default();
assert_eq!(stats.total(), 0);
}
#[test]
fn test_rewrite_local_references_empty_mapping() {
let (work_dir, _remote, crosslink_dir) = setup_shared_writer_env();
let writer = SharedWriter::new(&crosslink_dir).unwrap().unwrap();
let db = make_db(work_dir.path());
let stats = writer.rewrite_local_references(&db, &[]).unwrap();
assert_eq!(
stats.total(),
0,
"Empty mapping should produce zero rewrites"
);
drop(work_dir);
}
#[test]
fn test_rewrite_local_references_no_matches() {
let (work_dir, _remote, crosslink_dir) = setup_shared_writer_env();
let writer = SharedWriter::new(&crosslink_dir).unwrap().unwrap();
let db = make_db(work_dir.path());
let id = writer
.create_issue(&db, "No local refs here", Some("Clean description"), "low")
.unwrap();
let mapping = vec![(1i64, 5i64, "Some title".to_string())];
let stats = writer.rewrite_local_references(&db, &mapping).unwrap();
assert_eq!(stats.comments_updated, 0);
assert_eq!(stats.descriptions_updated, 0);
let _ = id; drop(work_dir);
}
#[test]
fn test_promote_offline_issues_empty() {
let (work_dir, _remote, crosslink_dir) = setup_shared_writer_env();
let writer = SharedWriter::new(&crosslink_dir).unwrap().unwrap();
let db = make_db(work_dir.path());
let mapping = writer.promote_offline_issues(&db).unwrap();
assert!(mapping.is_empty(), "No offline issues to promote");
drop(work_dir);
}
#[test]
fn test_promoted_uuids_roundtrip() {
let (work_dir, _remote, crosslink_dir) = setup_shared_writer_env();
let writer = SharedWriter::new(&crosslink_dir).unwrap().unwrap();
let before = writer.read_promoted_uuids();
assert!(before.is_empty());
let uuid1 = Uuid::new_v4();
let uuid2 = Uuid::new_v4();
writer.record_promoted_uuids(&[uuid1, uuid2]).unwrap();
let after = writer.read_promoted_uuids();
assert!(after.contains(&uuid1));
assert!(after.contains(&uuid2));
drop(work_dir);
}
#[test]
fn test_promoted_uuids_are_not_re_promoted() {
let (work_dir, _remote, crosslink_dir) = setup_shared_writer_env();
let writer = SharedWriter::new(&crosslink_dir).unwrap().unwrap();
let db = make_db(work_dir.path());
let uuid = Uuid::new_v4();
writer.record_promoted_uuids(&[uuid]).unwrap();
let cache_dir = crosslink_dir.join(".hub-cache");
let issues_dir = cache_dir.join("issues");
std::fs::create_dir_all(&issues_dir).unwrap();
let issue = crate::issue_file::IssueFile {
uuid,
display_id: None,
title: "Already promoted".to_string(),
description: None,
status: IssueStatus::Open,
priority: Priority::Low,
parent_uuid: None,
created_by: "test-agent".to_string(),
created_at: chrono::Utc::now(),
updated_at: chrono::Utc::now(),
closed_at: None,
labels: vec![],
comments: vec![],
blockers: vec![],
related: vec![],
milestone_uuid: None,
time_entries: vec![],
};
crate::issue_file::write_issue_file(&issues_dir.join(format!("{uuid}.json")), &issue)
.unwrap();
let promoted = writer.promote_offline_issues(&db).unwrap();
assert!(
promoted.is_empty(),
"Already-promoted UUID should not be re-promoted"
);
drop(work_dir);
}
#[test]
fn test_layout_version_is_v2_for_new_hub() {
let (work_dir, _remote, crosslink_dir) = setup_shared_writer_env();
let writer = SharedWriter::new(&crosslink_dir).unwrap().unwrap();
assert_eq!(writer.layout_version(), 2, "New hub should be v2 layout");
drop(work_dir);
}
#[test]
fn test_v2_issue_path_uses_subdir() {
let (work_dir, _remote, crosslink_dir) = setup_shared_writer_env();
let writer = SharedWriter::new(&crosslink_dir).unwrap().unwrap();
let db = make_db(work_dir.path());
let id = writer
.create_issue(&db, "V2 path check", None, "low")
.unwrap();
let uuid_str = db.get_issue_uuid_by_id(id).unwrap();
let uuid: Uuid = uuid_str.parse().unwrap();
let cache_dir = crosslink_dir.join(".hub-cache");
let v2_path = cache_dir
.join("issues")
.join(uuid.to_string())
.join("issue.json");
assert!(
v2_path.exists(),
"V2 issue.json should exist at {}",
v2_path.display()
);
let comments_dir = cache_dir
.join("issues")
.join(uuid.to_string())
.join("comments");
assert!(
comments_dir.exists(),
"V2 comments dir should exist at {}",
comments_dir.display()
);
drop(work_dir);
}
#[test]
fn test_full_issue_lifecycle() {
let (work_dir, _remote, crosslink_dir) = setup_shared_writer_env();
let writer = SharedWriter::new(&crosslink_dir).unwrap().unwrap();
let db = make_db(work_dir.path());
let id = writer
.create_issue(&db, "Lifecycle issue", Some("Initial desc"), "medium")
.unwrap();
writer
.add_comment(&db, id, "Planning note", "plan")
.unwrap();
writer.add_label(&db, id, "in-progress").unwrap();
writer
.update_issue(
&db,
id,
Some("Updated lifecycle"),
DescriptionUpdate::Unchanged,
None,
Some("high"),
)
.unwrap();
writer.close_issue(&db, id).unwrap();
let issue = db.get_issue(id).unwrap().unwrap();
assert_eq!(issue.title, "Updated lifecycle");
assert_eq!(issue.priority, Priority::High);
assert_eq!(issue.status, IssueStatus::Closed);
let labels = db.get_labels(id).unwrap();
assert!(labels.contains(&"in-progress".to_string()));
let comments = db.get_comments(id).unwrap();
assert_eq!(comments.len(), 1);
drop(work_dir);
}
#[test]
fn test_multiple_issues_independent() {
let (work_dir, _remote, crosslink_dir) = setup_shared_writer_env();
let writer = SharedWriter::new(&crosslink_dir).unwrap().unwrap();
let db = make_db(work_dir.path());
let id1 = writer
.create_issue(&db, "Issue Alpha", None, "high")
.unwrap();
let id2 = writer.create_issue(&db, "Issue Beta", None, "low").unwrap();
let id3 = writer
.create_issue(&db, "Issue Gamma", None, "medium")
.unwrap();
writer.close_issue(&db, id2).unwrap();
writer.add_label(&db, id1, "critical").unwrap();
let i1 = db.get_issue(id1).unwrap().unwrap();
let i2 = db.get_issue(id2).unwrap().unwrap();
let i3 = db.get_issue(id3).unwrap().unwrap();
assert_eq!(i1.status, IssueStatus::Open);
assert_eq!(i2.status, IssueStatus::Closed);
assert_eq!(i3.status, IssueStatus::Open);
let labels = db.get_labels(id1).unwrap();
assert!(labels.contains(&"critical".to_string()));
drop(work_dir);
}
#[test]
fn test_crosslink_dir_accessor() {
let (work_dir, _remote, crosslink_dir) = setup_shared_writer_env();
let writer = SharedWriter::new(&crosslink_dir).unwrap().unwrap();
let dir = writer.crosslink_dir();
assert!(
dir.exists(),
"crosslink_dir() should point to an existing dir"
);
drop(work_dir);
}
#[test]
fn test_event_seq_increments() {
let (work_dir, _remote, crosslink_dir) = setup_shared_writer_env();
let writer = SharedWriter::new(&crosslink_dir).unwrap().unwrap();
let db = make_db(work_dir.path());
writer.create_issue(&db, "Seq 1", None, "low").unwrap();
writer.create_issue(&db, "Seq 2", None, "low").unwrap();
let cache_dir = crosslink_dir.join(".hub-cache");
let log_path = cache_dir
.join("agents")
.join("test-agent")
.join("events.log");
drop(log_path);
drop(work_dir);
}
#[test]
fn test_counters_persist_across_writer_instances() {
let (work_dir, _remote, crosslink_dir) = setup_shared_writer_env();
{
let writer = SharedWriter::new(&crosslink_dir).unwrap().unwrap();
let db = make_db(work_dir.path());
writer.create_issue(&db, "Issue 1", None, "low").unwrap();
writer.create_issue(&db, "Issue 2", None, "low").unwrap();
}
{
let writer = SharedWriter::new(&crosslink_dir).unwrap().unwrap();
let db = make_db(work_dir.path());
let id = writer.create_issue(&db, "Issue 3", None, "low").unwrap();
assert_eq!(id, 3, "Counter should persist: 3rd issue should get ID 3");
}
drop(work_dir);
}
#[test]
fn test_promoted_uuids_path() {
let (work_dir, _remote, crosslink_dir) = setup_shared_writer_env();
let writer = SharedWriter::new(&crosslink_dir).unwrap().unwrap();
let path = writer.promoted_uuids_path();
assert!(
path.to_string_lossy().contains(".promoted-uuids"),
"promoted_uuids_path should contain .promoted-uuids"
);
drop(work_dir);
}
#[test]
fn test_event_log_path() {
let (work_dir, _remote, crosslink_dir) = setup_shared_writer_env();
let writer = SharedWriter::new(&crosslink_dir).unwrap().unwrap();
let path = writer.event_log_path();
assert!(
path.to_string_lossy().contains("test-agent"),
"event_log_path should contain agent_id"
);
assert!(
path.to_string_lossy().contains("events.log"),
"event_log_path should end in events.log"
);
drop(work_dir);
}
#[test]
fn test_resolve_ssh_key_path_returns_none_without_key() {
let (work_dir, _remote, crosslink_dir) = setup_shared_writer_env();
let writer = SharedWriter::new(&crosslink_dir).unwrap().unwrap();
let key_path = writer.resolve_ssh_key_path();
assert!(
key_path.is_none(),
"resolve_ssh_key_path should return None when no key is configured"
);
drop(work_dir);
}
#[test]
fn test_read_counters_defaults_to_one() {
let (work_dir, _remote, crosslink_dir) = setup_shared_writer_env();
let writer = SharedWriter::new(&crosslink_dir).unwrap().unwrap();
let counters = writer.read_counters().unwrap();
assert_eq!(counters.next_display_id, 1);
drop(work_dir);
}
#[test]
fn test_write_then_read_counters() {
let (work_dir, _remote, crosslink_dir) = setup_shared_writer_env();
let writer = SharedWriter::new(&crosslink_dir).unwrap().unwrap();
let db = make_db(work_dir.path());
writer
.create_issue(&db, "Counter check", None, "low")
.unwrap();
let counters = writer.read_counters().unwrap();
assert_eq!(
counters.next_display_id, 2,
"After one create, next_display_id should be 2"
);
drop(work_dir);
}
#[test]
fn test_load_issue_by_id_positive() {
let (work_dir, _remote, crosslink_dir) = setup_shared_writer_env();
let writer = SharedWriter::new(&crosslink_dir).unwrap().unwrap();
let db = make_db(work_dir.path());
let id = writer
.create_issue(&db, "Load by ID", Some("description"), "medium")
.unwrap();
let loaded = writer.load_issue_by_id(id, &db).unwrap();
assert_eq!(loaded.title, "Load by ID");
assert_eq!(loaded.status, IssueStatus::Open);
drop(work_dir);
}
#[test]
fn test_load_issue_by_display_id_not_found() {
let (work_dir, _remote, crosslink_dir) = setup_shared_writer_env();
let writer = SharedWriter::new(&crosslink_dir).unwrap().unwrap();
let result = writer.load_issue_by_display_id(9999);
assert!(result.is_err(), "Non-existent issue should return error");
drop(work_dir);
}
#[test]
fn test_resolve_uuid_for_positive_id() {
let (work_dir, _remote, crosslink_dir) = setup_shared_writer_env();
let writer = SharedWriter::new(&crosslink_dir).unwrap().unwrap();
let db = make_db(work_dir.path());
let id = writer
.create_issue(&db, "UUID resolve", None, "low")
.unwrap();
let uuid = writer.resolve_uuid(id, &db).unwrap();
let issue = writer.load_issue_by_display_id(id).unwrap();
assert_eq!(uuid, issue.uuid, "Resolved UUID should match issue UUID");
drop(work_dir);
}
#[test]
fn test_sign_comment_without_key_returns_none() {
let (work_dir, _remote, crosslink_dir) = setup_shared_writer_env();
let writer = SharedWriter::new(&crosslink_dir).unwrap().unwrap();
let (signed_by, signature) = writer.sign_comment("content", "author", 1);
assert!(signed_by.is_none());
assert!(signature.is_none());
drop(work_dir);
}
#[test]
fn test_create_envelope_without_signing() {
let (work_dir, _remote, crosslink_dir) = setup_shared_writer_env();
let writer = SharedWriter::new(&crosslink_dir).unwrap().unwrap();
let event = crate::events::Event::IssueCreated {
uuid: Uuid::new_v4(),
title: "test".to_string(),
description: None,
priority: "low".to_string(),
labels: vec![],
parent_uuid: None,
created_by: "test-agent".to_string(),
};
let envelope = writer.create_envelope(event);
assert_eq!(envelope.agent_id, "test-agent");
assert!(envelope.signature.is_none(), "No signature without key");
assert!(envelope.signed_by.is_none(), "No signed_by without key");
assert_eq!(envelope.agent_seq, 1, "First event should have seq 1");
drop(work_dir);
}
#[test]
fn test_next_event_seq_increments() {
let (work_dir, _remote, crosslink_dir) = setup_shared_writer_env();
let writer = SharedWriter::new(&crosslink_dir).unwrap().unwrap();
let s1 = writer.next_event_seq();
let s2 = writer.next_event_seq();
let s3 = writer.next_event_seq();
assert_eq!(s1 + 1, s2);
assert_eq!(s2 + 1, s3);
drop(work_dir);
}
#[test]
fn test_find_offline_issues_empty_when_all_have_ids() {
let (work_dir, _remote, crosslink_dir) = setup_shared_writer_env();
let writer = SharedWriter::new(&crosslink_dir).unwrap().unwrap();
let db = make_db(work_dir.path());
writer.create_issue(&db, "Normal 1", None, "low").unwrap();
writer.create_issue(&db, "Normal 2", None, "low").unwrap();
let offline = writer.find_offline_issues().unwrap();
assert!(
offline.is_empty(),
"No offline issues expected when all have display IDs"
);
drop(work_dir);
}
#[test]
fn test_claim_display_id_uses_correct_starting_value() {
let (work_dir, _remote, crosslink_dir) = setup_shared_writer_env();
let writer = SharedWriter::new(&crosslink_dir).unwrap().unwrap();
let (first, counters) = writer.claim_display_id(1).unwrap();
assert_eq!(first, 1, "First claimed ID should be 1");
assert_eq!(
counters.next_display_id, 2,
"After claiming 1, next should be 2"
);
drop(work_dir);
}
#[test]
fn test_claim_display_id_bulk() {
let (work_dir, _remote, crosslink_dir) = setup_shared_writer_env();
let writer = SharedWriter::new(&crosslink_dir).unwrap().unwrap();
let (first, counters) = writer.claim_display_id(5).unwrap();
assert_eq!(first, 1);
assert_eq!(
counters.next_display_id, 6,
"After claiming 5, next should be 6"
);
drop(work_dir);
}
#[test]
fn test_claim_milestone_id() {
let (work_dir, _remote, crosslink_dir) = setup_shared_writer_env();
let writer = SharedWriter::new(&crosslink_dir).unwrap().unwrap();
let (id, counters) = writer.claim_milestone_id().unwrap();
assert_eq!(id, 1, "First milestone ID should be 1");
assert_eq!(counters.next_milestone_id, 2);
drop(work_dir);
}
#[test]
fn test_claim_display_id_reconciles_against_stale_counter() {
let (work_dir, _remote, crosslink_dir) = setup_shared_writer_env();
let writer = SharedWriter::new(&crosslink_dir).unwrap().unwrap();
let issues_dir = writer.sync.cache_path().join("issues");
std::fs::create_dir_all(&issues_dir).unwrap();
let stale = make_issue(100, "closed-from-hub");
write_issue_file(&issues_dir.join(format!("{}.json", stale.uuid)), &stale).unwrap();
let counters = writer.read_counters().unwrap();
assert_eq!(
counters.next_display_id, 1,
"precondition: counter is stale"
);
let (first, updated) = writer.claim_display_id(1).unwrap();
assert_eq!(first, 101, "first claim must be past the max existing ID");
assert_eq!(updated.next_display_id, 102);
drop(work_dir);
}
#[test]
fn test_claim_display_id_reconciles_across_v1_and_v2_layouts() {
let (work_dir, _remote, crosslink_dir) = setup_shared_writer_env();
let writer = SharedWriter::new(&crosslink_dir).unwrap().unwrap();
let issues_dir = writer.sync.cache_path().join("issues");
std::fs::create_dir_all(&issues_dir).unwrap();
let v1 = make_issue(50, "v1-issue");
write_issue_file(&issues_dir.join(format!("{}.json", v1.uuid)), &v1).unwrap();
let v2 = make_issue(200, "v2-issue");
let v2_dir = issues_dir.join(v2.uuid.to_string());
std::fs::create_dir_all(&v2_dir).unwrap();
write_issue_file(&v2_dir.join("issue.json"), &v2).unwrap();
let (first, _) = writer.claim_display_id(1).unwrap();
assert_eq!(
first, 201,
"reconciler must see the highest ID across both layouts"
);
drop(work_dir);
}
#[test]
fn test_claim_display_id_does_not_regress_advanced_counter() {
let (work_dir, _remote, crosslink_dir) = setup_shared_writer_env();
let writer = SharedWriter::new(&crosslink_dir).unwrap().unwrap();
let mut counters = writer.read_counters().unwrap();
counters.next_display_id = 500;
writer.write_counters_to_cache(&counters).unwrap();
let issues_dir = writer.sync.cache_path().join("issues");
std::fs::create_dir_all(&issues_dir).unwrap();
let low = make_issue(5, "low");
write_issue_file(&issues_dir.join(format!("{}.json", low.uuid)), &low).unwrap();
let (first, updated) = writer.claim_display_id(1).unwrap();
assert_eq!(first, 500, "counter must not be regressed by older files");
assert_eq!(updated.next_display_id, 501);
drop(work_dir);
}
#[test]
fn test_claim_milestone_id_reconciles_against_stale_counter() {
let (work_dir, _remote, crosslink_dir) = setup_shared_writer_env();
let writer = SharedWriter::new(&crosslink_dir).unwrap().unwrap();
let milestones_dir = writer.sync.cache_path().join("meta").join("milestones");
std::fs::create_dir_all(&milestones_dir).unwrap();
let ms_uuid = Uuid::new_v4();
let milestone = crate::issue_file::MilestoneEntry {
uuid: ms_uuid,
display_id: 42,
name: "Q1".to_string(),
description: None,
status: IssueStatus::Open,
created_at: Utc::now(),
closed_at: None,
};
let path = milestones_dir.join(format!("{ms_uuid}.json"));
std::fs::write(&path, serde_json::to_string_pretty(&milestone).unwrap()).unwrap();
let counters = writer.read_counters().unwrap();
assert_eq!(counters.next_milestone_id, 1, "precondition: counter stale");
let (id, updated) = writer.claim_milestone_id().unwrap();
assert_eq!(id, 43, "milestone id must skip past the existing max");
assert_eq!(updated.next_milestone_id, 44);
drop(work_dir);
}
#[test]
fn test_read_max_event_seq_returns_zero_when_no_log() {
let dir = tempfile::tempdir().unwrap();
let seq = SharedWriter::read_max_event_seq(dir.path(), "nonexistent-agent");
assert_eq!(seq, 0, "Max event seq should be 0 when no log exists");
}
#[test]
fn test_layout_version_one_for_v1_hub() {
let dir = tempfile::tempdir().unwrap();
let meta_dir = dir.path().join("meta");
std::fs::create_dir_all(&meta_dir).unwrap();
let version = crate::issue_file::read_layout_version(&meta_dir).unwrap_or(1);
assert_eq!(version, 1);
}
#[test]
fn test_write_counters_to_cache() {
let (work_dir, _remote, crosslink_dir) = setup_shared_writer_env();
let writer = SharedWriter::new(&crosslink_dir).unwrap().unwrap();
let mut counters = writer.read_counters().unwrap();
counters.next_display_id = 42;
writer.write_counters_to_cache(&counters).unwrap();
let reloaded = writer.read_counters().unwrap();
assert_eq!(reloaded.next_display_id, 42);
drop(work_dir);
}
#[test]
fn test_push_outcome_eq() {
assert_eq!(PushOutcome::Pushed, PushOutcome::Pushed);
assert_eq!(PushOutcome::LocalOnly, PushOutcome::LocalOnly);
assert_ne!(PushOutcome::Pushed, PushOutcome::LocalOnly);
}
#[test]
fn test_push_outcome_copy() {
let o = PushOutcome::Pushed;
let o2 = o; assert_eq!(o, o2);
}
#[test]
fn test_max_retries_constant() {
assert_eq!(MAX_RETRIES, 3);
}
fn setup_shared_writer_env_v1() -> (TempDir, TempDir, std::path::PathBuf) {
let (work_dir, remote_dir, crosslink_dir) = setup_shared_writer_env();
let version_file = crosslink_dir
.join(".hub-cache")
.join("meta")
.join("version.json");
if version_file.exists() {
std::fs::remove_file(&version_file).unwrap();
}
(work_dir, remote_dir, crosslink_dir)
}
#[test]
fn test_add_comment_v1_layout() {
let (work_dir, _remote, crosslink_dir) = setup_shared_writer_env_v1();
let writer = SharedWriter::new(&crosslink_dir).unwrap().unwrap();
let db = make_db(work_dir.path());
assert_eq!(
writer.layout_version(),
1,
"Should be V1 layout after version.json removal"
);
let issue_id = writer
.create_issue(&db, "V1 comment host", None, "medium")
.unwrap();
let comment_id = writer
.add_comment(&db, issue_id, "V1 inline comment", "note")
.unwrap();
assert!(comment_id > 0, "Comment ID should be positive");
let comments = db.get_comments(issue_id).unwrap();
assert!(
!comments.is_empty(),
"V1 comment should appear in DB after hydration"
);
assert_eq!(comments[0].content, "V1 inline comment");
drop(work_dir);
}
#[test]
fn test_add_intervention_comment_v1_layout() {
let (work_dir, _remote, crosslink_dir) = setup_shared_writer_env_v1();
let writer = SharedWriter::new(&crosslink_dir).unwrap().unwrap();
let db = make_db(work_dir.path());
assert_eq!(writer.layout_version(), 1);
let issue_id = writer
.create_issue(&db, "V1 intervention host", None, "medium")
.unwrap();
let comment_id = writer
.add_intervention_comment(
&db,
issue_id,
"V1 intervention content",
"manual_redirect",
Some("V1 context"),
None,
)
.unwrap();
assert!(comment_id > 0, "Intervention comment ID should be positive");
let comments = db.get_comments(issue_id).unwrap();
assert!(
!comments.is_empty(),
"V1 intervention comment should appear in DB"
);
assert_eq!(comments[0].content, "V1 intervention content");
drop(work_dir);
}
#[test]
fn test_new_without_agent_config_but_hub_already_initialized() {
let (work_dir, _remote, crosslink_dir) = setup_shared_writer_env();
std::fs::remove_file(crosslink_dir.join("agent.json")).unwrap();
let writer = SharedWriter::new(&crosslink_dir).unwrap();
assert!(
writer.is_some(),
"SharedWriter::new() should return Some when hub cache already exists (anonymous mode)"
);
let writer = writer.unwrap();
assert!(
writer.agent_id().starts_with("anon-"),
"Anonymous writer should have agent_id starting with 'anon-', got: {}",
writer.agent_id()
);
drop(work_dir);
}
#[test]
fn test_new_without_agent_config_hub_init_fails_returns_none() {
let work_dir = tempfile::tempdir().unwrap();
Command::new("git")
.current_dir(work_dir.path())
.args(["init", "-b", "main"])
.output()
.unwrap();
for args in [
vec!["config", "user.email", "test@test.local"],
vec!["config", "user.name", "Test"],
vec!["remote", "add", "origin", "/nonexistent/path/to/remote"],
] {
Command::new("git")
.current_dir(work_dir.path())
.args(&args)
.output()
.unwrap();
}
let crosslink_dir = work_dir.path().join(".crosslink");
std::fs::create_dir_all(&crosslink_dir).unwrap();
std::fs::write(
crosslink_dir.join("hook-config.json"),
r#"{"remote":"origin","layout":"v2"}"#,
)
.unwrap();
let result = SharedWriter::new(&crosslink_dir);
assert!(
result.is_ok(),
"SharedWriter::new() should not error even when hub unavailable"
);
drop(work_dir);
}
#[test]
fn test_resolve_ssh_key_path_nonexistent_file() {
let (work_dir, _remote, crosslink_dir) = setup_shared_writer_env();
let agent_config = AgentConfig {
agent_id: "test-agent".to_string(),
machine_id: "test-machine".to_string(),
description: None,
ssh_key_path: Some("nonexistent_key_file.pem".to_string()),
ssh_fingerprint: Some("SHA256:fakefingerprint".to_string()),
ssh_public_key: None,
};
let agent_json = serde_json::to_string_pretty(&agent_config).unwrap();
std::fs::write(crosslink_dir.join("agent.json"), agent_json).unwrap();
let writer = SharedWriter::new(&crosslink_dir).unwrap().unwrap();
let resolved = writer.resolve_ssh_key_path();
assert!(
resolved.is_none(),
"resolve_ssh_key_path should return None when file doesn't exist"
);
drop(work_dir);
}
#[test]
fn test_resolve_ssh_key_path_existing_file() {
let (work_dir, _remote, crosslink_dir) = setup_shared_writer_env();
let fake_key_name = "test_agent_key.pem";
let fake_key_path = crosslink_dir.join(fake_key_name);
std::fs::write(&fake_key_path, "fake key content").unwrap();
let agent_config = AgentConfig {
agent_id: "test-agent".to_string(),
machine_id: "test-machine".to_string(),
description: None,
ssh_key_path: Some(fake_key_name.to_string()),
ssh_fingerprint: Some("SHA256:fakefingerprint".to_string()),
ssh_public_key: None,
};
let agent_json = serde_json::to_string_pretty(&agent_config).unwrap();
std::fs::write(crosslink_dir.join("agent.json"), agent_json).unwrap();
let writer = SharedWriter::new(&crosslink_dir).unwrap().unwrap();
let resolved = writer.resolve_ssh_key_path();
assert!(
resolved.is_some(),
"resolve_ssh_key_path should return Some when key file exists"
);
assert!(
resolved.unwrap().ends_with(fake_key_name),
"Resolved path should end with the key filename"
);
drop(work_dir);
}
#[test]
fn test_replace_local_refs_after_boundary_rejection() {
let replacements = vec![("L1".to_string(), "#5".to_string())];
let result = replace_local_refs("L10 is a thing", &replacements);
assert!(
result.is_none(),
"L1 in L10 should NOT be replaced (after-boundary alphanumeric char)"
);
let result = replace_local_refs("L10 and L1 done", &replacements);
assert_eq!(
result,
Some("L10 and #5 done".to_string()),
"Only standalone L1 should be replaced, not L1 inside L10"
);
let result = replace_local_refs("L10", &replacements);
assert!(
result.is_none(),
"L1 at start of L10 (entire string) should NOT be replaced"
);
}
#[test]
fn test_claim_lock_v2_succeeds() {
let (work_dir, _remote, crosslink_dir) = setup_shared_writer_env();
let writer = SharedWriter::new(&crosslink_dir).unwrap().unwrap();
let db = make_db(work_dir.path());
let id = writer
.create_issue(&db, "Lock target", None, "medium")
.unwrap();
let result = writer.claim_lock_v2(id, Some("feature/test")).unwrap();
assert_eq!(result, LockClaimResult::Claimed);
drop(work_dir);
}
#[test]
fn test_claim_lock_v2_already_held() {
let (work_dir, _remote, crosslink_dir) = setup_shared_writer_env();
let writer = SharedWriter::new(&crosslink_dir).unwrap().unwrap();
let db = make_db(work_dir.path());
let id = writer
.create_issue(&db, "Lock target 2", None, "medium")
.unwrap();
writer.claim_lock_v2(id, None).unwrap();
let result = writer.claim_lock_v2(id, None).unwrap();
assert_eq!(result, LockClaimResult::AlreadyHeld);
drop(work_dir);
}
#[test]
fn test_release_lock_v2_held() {
let (work_dir, _remote, crosslink_dir) = setup_shared_writer_env();
let writer = SharedWriter::new(&crosslink_dir).unwrap().unwrap();
let db = make_db(work_dir.path());
let id = writer
.create_issue(&db, "Lock release", None, "medium")
.unwrap();
writer.claim_lock_v2(id, None).unwrap();
let released = writer.release_lock_v2(id).unwrap();
assert!(released, "Should release own lock");
drop(work_dir);
}
#[test]
fn test_release_lock_v2_not_locked() {
let (work_dir, _remote, crosslink_dir) = setup_shared_writer_env();
let writer = SharedWriter::new(&crosslink_dir).unwrap().unwrap();
let released = writer.release_lock_v2(999).unwrap();
assert!(!released, "Releasing non-existent lock returns false");
drop(work_dir);
}
#[test]
fn test_steal_lock_v2() {
let (work_dir, _remote, crosslink_dir) = setup_shared_writer_env();
let writer = SharedWriter::new(&crosslink_dir).unwrap().unwrap();
let db = make_db(work_dir.path());
let id = writer
.create_issue(&db, "Steal target", None, "medium")
.unwrap();
writer.claim_lock_v2(id, None).unwrap();
let result = writer
.steal_lock_v2(id, "test-agent", Some("feature/steal"))
.unwrap();
assert_eq!(result, LockClaimResult::Claimed);
drop(work_dir);
}
#[test]
fn test_rewrite_local_references_rewrites_description() {
let (work_dir, _remote, crosslink_dir) = setup_shared_writer_env();
let writer = SharedWriter::new(&crosslink_dir).unwrap().unwrap();
let db = make_db(work_dir.path());
let id = writer
.create_issue(&db, "Rewrite test", Some("See L1 for details"), "medium")
.unwrap();
let mapping = vec![(-1i64, id, "Rewrite test".to_string())];
let stats = writer.rewrite_local_references(&db, &mapping).unwrap();
assert_eq!(stats.descriptions_updated, 1);
let issue = db.get_issue(id).unwrap().unwrap();
assert!(
issue
.description
.as_deref()
.unwrap()
.contains(&format!("#{id}")),
"L1 should be rewritten to #{id}"
);
drop(work_dir);
}
#[test]
fn test_rewrite_local_references_rewrites_comments() {
let (work_dir, _remote, crosslink_dir) = setup_shared_writer_env();
let writer = SharedWriter::new(&crosslink_dir).unwrap().unwrap();
let db = make_db(work_dir.path());
let id = writer
.create_issue(&db, "Comment rewrite", None, "medium")
.unwrap();
writer
.add_comment(&db, id, "Related to L2", "observation")
.unwrap();
let mapping = vec![(-2i64, id, "Comment rewrite".to_string())];
let stats = writer.rewrite_local_references(&db, &mapping).unwrap();
assert_eq!(stats.comments_updated, 1);
drop(work_dir);
}
#[test]
fn test_rewrite_local_references_no_refs_no_changes() {
let (work_dir, _remote, crosslink_dir) = setup_shared_writer_env();
let writer = SharedWriter::new(&crosslink_dir).unwrap().unwrap();
let db = make_db(work_dir.path());
let id = writer
.create_issue(&db, "No refs", Some("Plain description"), "medium")
.unwrap();
let mapping = vec![(-1i64, id, "No refs".to_string())];
let stats = writer.rewrite_local_references(&db, &mapping).unwrap();
assert_eq!(stats.descriptions_updated, 0);
assert_eq!(stats.comments_updated, 0);
drop(work_dir);
}
#[test]
fn test_new_without_agent_json_and_no_hub() {
let dir = tempfile::tempdir().unwrap();
let crosslink_dir = dir.path().join(".crosslink");
std::fs::create_dir_all(&crosslink_dir).unwrap();
std::fs::write(
crosslink_dir.join("hook-config.json"),
r#"{"remote":"origin"}"#,
)
.unwrap();
let result = SharedWriter::new(&crosslink_dir).unwrap();
assert!(result.is_none());
}
#[test]
fn test_promote_offline_issues_with_offline_issue() {
let (work_dir, _remote, crosslink_dir) = setup_shared_writer_env();
let writer = SharedWriter::new(&crosslink_dir).unwrap().unwrap();
let db = make_db(work_dir.path());
let uuid = uuid::Uuid::new_v4();
let now = chrono::Utc::now();
let issue = crate::issue_file::IssueFile {
uuid,
display_id: None,
title: "Offline issue".to_string(),
description: None,
status: IssueStatus::Open,
priority: Priority::Medium,
parent_uuid: None,
created_by: "test-agent".to_string(),
created_at: now,
updated_at: now,
closed_at: None,
labels: vec![],
comments: vec![],
blockers: vec![],
related: vec![],
milestone_uuid: None,
time_entries: vec![],
};
let cache_dir = crosslink_dir.join(".hub-cache");
let issue_dir = cache_dir.join("issues").join(uuid.to_string());
std::fs::create_dir_all(&issue_dir).unwrap();
let json = serde_json::to_string_pretty(&issue).unwrap();
std::fs::write(issue_dir.join("issue.json"), &json).unwrap();
writer
.git_in_cache(&["add", &format!("issues/{uuid}/issue.json")])
.unwrap();
let _ = writer.git_in_cache(&["commit", "-m", "add offline issue", "--no-gpg-sign"]);
let mapping = writer.promote_offline_issues(&db).unwrap();
assert_eq!(mapping.len(), 1, "Should promote exactly 1 issue");
let (_neg_id, new_id, title) = &mapping[0];
assert_eq!(title, "Offline issue");
assert!(*new_id > 0, "New display ID should be positive");
let v1_file = cache_dir.join("issues").join(format!("{uuid}.json"));
if v1_file.exists() {
let content = std::fs::read_to_string(&v1_file).unwrap();
let updated: crate::issue_file::IssueFile = serde_json::from_str(&content).unwrap();
assert!(
updated.display_id.is_some(),
"display_id should be set after promotion"
);
}
drop(work_dir);
}
}