use std::path::Path;
use std::sync::Arc;
use sqry_core::graph::Language;
use sqry_core::graph::unified::concurrent::{CodeGraph, GraphSnapshot};
use sqry_core::graph::unified::edge::kind::EdgeKind;
use sqry_core::graph::unified::node::kind::NodeKind;
use sqry_core::graph::unified::storage::arena::NodeEntry;
use sqry_core::query::{CircularType, UnusedScope};
use sqry_db::persistence::{
DERIVED_FORMAT_VERSION, DERIVED_MAGIC, DerivedHeader, LoadError, LoadOutcome, PersistedEntry,
QueryDeps, deserialize_derived_header, load_derived, save_derived, serialize_derived_stream,
};
use sqry_db::queries::dispatch::load_derived_opportunistic;
use sqry_db::queries::type_ids;
use sqry_db::queries::{
CalleesQuery, CallersQuery, CondensationQuery, CyclesKey, CyclesQuery, EntryPointsQuery,
ExportsQuery, ImplementsQuery, ImportsQuery, IsInCycleQuery, IsNodeUnusedQuery,
ReachabilityQuery, ReachableFromEntryPointsQuery, ReferencesQuery, RelationKey, SccQuery,
UnusedKey, UnusedQuery,
};
use sqry_db::query::DerivedQuery;
use sqry_db::{QueryDb, QueryDbConfig};
use tempfile::TempDir;
fn empty_snapshot() -> Arc<GraphSnapshot> {
Arc::new(CodeGraph::new().snapshot())
}
fn add_node(
graph: &mut CodeGraph,
entry: NodeEntry,
) -> sqry_core::graph::unified::node::id::NodeId {
let id = graph.nodes_mut().alloc(entry.clone()).expect("alloc node");
graph
.indices_mut()
.add(id, entry.kind, entry.name, entry.qualified_name, entry.file);
id
}
fn build_call_graph() -> (
Arc<GraphSnapshot>,
sqry_core::graph::unified::file::id::FileId,
) {
let mut graph = CodeGraph::new();
let file = graph
.files_mut()
.register_with_language(Path::new("src/lib.rs"), Some(Language::Rust))
.expect("register file");
let caller_name = graph.strings_mut().intern("main").expect("intern main");
let caller = add_node(
&mut graph,
NodeEntry::new(NodeKind::Function, caller_name, file)
.with_qualified_name(caller_name)
.with_byte_range(0, 100),
);
let callee_name = graph.strings_mut().intern("helper").expect("intern helper");
let callee = add_node(
&mut graph,
NodeEntry::new(NodeKind::Function, callee_name, file)
.with_qualified_name(callee_name)
.with_byte_range(110, 200),
);
graph.edges().add_edge(
caller,
callee,
EdgeKind::Calls {
argument_count: 0,
is_async: false,
},
file,
);
(Arc::new(graph.snapshot()), file)
}
fn make_valid_stream_with_revs(
sha: [u8; 32],
edge_rev: u64,
metadata_rev: u64,
file_revisions: Vec<(sqry_core::graph::unified::file::id::FileId, u64)>,
n_entries: usize,
) -> Vec<u8> {
let entries: Vec<PersistedEntry> = (0..n_entries)
.map(|i| PersistedEntry {
query_type_id: type_ids::CALLERS,
raw_key_bytes: postcard::to_allocvec(&RelationKey::exact(format!("sym_{i}")))
.unwrap_or_default(),
raw_result_bytes: vec![0xAA, i as u8],
deps: QueryDeps {
file_deps: vec![],
edge_revision: Some(edge_rev),
metadata_revision: None,
},
})
.collect();
let header = DerivedHeader::new(
sha,
edge_rev,
metadata_rev,
file_revisions,
n_entries as u64,
);
serialize_derived_stream(&header, entries).unwrap()
}
fn make_valid_stream(sha: [u8; 32], n_entries: usize) -> Vec<u8> {
make_valid_stream_with_revs(sha, 0, 0, vec![], n_entries)
}
#[test]
fn mixed_query_roundtrip() {
let (snapshot, _file) = build_call_graph();
let dir = TempDir::new().unwrap();
let derived_path = dir.path().join("derived.sqry");
let workspace_root = dir.path();
let sha: [u8; 32] = [0x55; 32];
let db1 = QueryDb::new(Arc::clone(&snapshot), QueryDbConfig::default());
let cycles_key = CyclesKey {
circular_type: CircularType::Calls,
bounds: Default::default(),
};
let cycles_result = db1.get::<CyclesQuery>(&cycles_key);
let callers_key = RelationKey::exact("main");
let callers_result = db1.get::<CallersQuery>(&callers_key);
let unused_key = UnusedKey {
scope: UnusedScope::All,
max_results: 100,
};
let unused_result = db1.get::<UnusedQuery>(&unused_key);
let after_warm = db1.metrics();
assert!(
after_warm.cache_misses >= 3,
"at least 3 misses during warm-up"
);
save_derived(&db1, sha, &derived_path, workspace_root).expect("save_derived must succeed");
let mut db2 = QueryDb::new(Arc::clone(&snapshot), QueryDbConfig::default());
let outcome = load_derived(&mut db2, sha, &derived_path, workspace_root)
.expect("load_derived must succeed");
match outcome {
LoadOutcome::Applied { entries } => {
assert!(entries > 0, "at least one entry must be applied");
}
LoadOutcome::Skipped(_) => panic!("unexpected Skipped outcome"),
}
let base = db2.metrics();
let cycles_result2 = db2.get::<CyclesQuery>(&cycles_key);
let callers_result2 = db2.get::<CallersQuery>(&callers_key);
let unused_result2 = db2.get::<UnusedQuery>(&unused_key);
let after_first_requery = db2.metrics();
assert_eq!(
*cycles_result, *cycles_result2,
"CyclesQuery result must survive roundtrip"
);
assert_eq!(
*callers_result, *callers_result2,
"CallersQuery result must survive roundtrip"
);
assert_eq!(
*unused_result, *unused_result2,
"UnusedQuery result must survive roundtrip"
);
let first_pass_misses = after_first_requery.cache_misses - base.cache_misses;
assert_eq!(
first_pass_misses, 0,
"ZERO cache misses expected on first typed re-query after cold-load \
(spec §2: first query after a cold start is free); got {first_pass_misses}"
);
let first_pass_hits = after_first_requery.cache_hits - base.cache_hits;
assert_eq!(
first_pass_hits, 3,
"exactly 3 cache hits expected on first typed re-query (all three \
top-level rehydrated entries); got {first_pass_hits}"
);
let base2 = db2.metrics();
let _ = db2.get::<CyclesQuery>(&cycles_key);
let _ = db2.get::<CallersQuery>(&callers_key);
let _ = db2.get::<UnusedQuery>(&unused_key);
let after_second_requery = db2.metrics();
let second_pass_misses = after_second_requery.cache_misses - base2.cache_misses;
assert_eq!(
second_pass_misses, 0,
"zero additional misses expected on second typed re-query (entries \
now typed after first-pass promotion); got {second_pass_misses}"
);
let second_pass_hits = after_second_requery.cache_hits - base2.cache_hits;
assert_eq!(
second_pass_hits, 3,
"exactly 3 cache hits expected on second typed re-query; got {second_pass_hits}"
);
}
#[test]
fn header_restoration() {
use sqry_core::graph::unified::file::id::FileId;
let dir = TempDir::new().unwrap();
let derived_path = dir.path().join("derived.sqry");
let workspace_root = dir.path();
let sha: [u8; 32] = [0xBE; 32];
let saved_edge_rev: u64 = 42;
let saved_metadata_rev: u64 = 17;
let fid_a = FileId::new(1);
let fid_b = FileId::new(2);
let saved_per_file: Vec<(FileId, u64)> = vec![(fid_a, 5), (fid_b, 8)];
let bytes = make_valid_stream_with_revs(
sha,
saved_edge_rev,
saved_metadata_rev,
saved_per_file.clone(),
0,
);
std::fs::write(&derived_path, &bytes).unwrap();
let mut db = QueryDb::new(empty_snapshot(), QueryDbConfig::default());
let outcome = load_derived(&mut db, sha, &derived_path, workspace_root).unwrap();
assert!(matches!(outcome, LoadOutcome::Applied { .. }));
assert_eq!(
db.edge_revision(),
saved_edge_rev,
"edge_revision must be restored from the header"
);
assert_eq!(
db.metadata_revision(),
saved_metadata_rev,
"metadata_revision must be restored from the header"
);
let store = db.inputs();
assert_eq!(
store.revision(fid_a),
Some(5),
"per-file revision for fid_a must be restored"
);
assert_eq!(
store.revision(fid_b),
Some(8),
"per-file revision for fid_b must be restored"
);
}
#[test]
fn builtin_query_type_ids_are_unique() {
let ids: Vec<u32> = vec![
CallersQuery::QUERY_TYPE_ID,
CalleesQuery::QUERY_TYPE_ID,
ImportsQuery::QUERY_TYPE_ID,
ExportsQuery::QUERY_TYPE_ID,
ReferencesQuery::QUERY_TYPE_ID,
ImplementsQuery::QUERY_TYPE_ID,
CyclesQuery::QUERY_TYPE_ID,
IsInCycleQuery::QUERY_TYPE_ID,
UnusedQuery::QUERY_TYPE_ID,
IsNodeUnusedQuery::QUERY_TYPE_ID,
ReachabilityQuery::QUERY_TYPE_ID,
EntryPointsQuery::QUERY_TYPE_ID,
ReachableFromEntryPointsQuery::QUERY_TYPE_ID,
SccQuery::QUERY_TYPE_ID,
CondensationQuery::QUERY_TYPE_ID,
];
let mut sorted = ids.clone();
sorted.sort_unstable();
sorted.dedup();
assert_eq!(
sorted.len(),
ids.len(),
"QUERY_TYPE_ID collision detected among built-ins: {ids:?}"
);
assert!(
!ids.contains(&0),
"0x0000 is reserved — must never appear as a QUERY_TYPE_ID"
);
assert_eq!(
ids.len(),
15,
"expected exactly 15 built-in QUERY_TYPE_IDs; got {}",
ids.len()
);
}
#[test]
fn unknown_query_type_id_skip() {
let dir = TempDir::new().unwrap();
let derived_path = dir.path().join("derived.sqry");
let workspace_root = dir.path();
let sha: [u8; 32] = [0x33; 32];
let entries: Vec<PersistedEntry> = vec![
PersistedEntry {
query_type_id: type_ids::CALLERS,
raw_key_bytes: b"key0".to_vec(),
raw_result_bytes: b"val0".to_vec(),
deps: QueryDeps::default(),
},
PersistedEntry {
query_type_id: type_ids::CALLERS,
raw_key_bytes: b"key1".to_vec(),
raw_result_bytes: b"val1".to_vec(),
deps: QueryDeps::default(),
},
PersistedEntry {
query_type_id: 0x9999_u32,
raw_key_bytes: b"unknownkey".to_vec(),
raw_result_bytes: b"unknownval".to_vec(),
deps: QueryDeps::default(),
},
PersistedEntry {
query_type_id: type_ids::CALLEES,
raw_key_bytes: b"key3".to_vec(),
raw_result_bytes: b"val3".to_vec(),
deps: QueryDeps::default(),
},
PersistedEntry {
query_type_id: type_ids::CALLEES,
raw_key_bytes: b"key4".to_vec(),
raw_result_bytes: b"val4".to_vec(),
deps: QueryDeps::default(),
},
];
let header = DerivedHeader::new(sha, 0, 0, vec![], 5);
let bytes = serialize_derived_stream(&header, entries).unwrap();
std::fs::write(&derived_path, &bytes).unwrap();
let mut db = QueryDb::new(empty_snapshot(), QueryDbConfig::default());
let outcome = load_derived(&mut db, sha, &derived_path, workspace_root)
.expect("load_derived must not error for unknown IDs");
match outcome {
LoadOutcome::Applied { entries } => {
assert_eq!(
entries, 4,
"unknown id 0x9999 must be skipped; expected 4 entries applied, got {entries}"
);
}
LoadOutcome::Skipped(_) => panic!("unexpected Skipped"),
}
}
#[test]
fn fatal_framing_reject() {
let dir = TempDir::new().unwrap();
let derived_path = dir.path().join("derived.sqry");
let workspace_root = dir.path();
let sha: [u8; 32] = [0x44; 32];
let mut bytes = make_valid_stream(sha, 2);
assert!(
bytes.len() > 8,
"test precondition: stream must be > 8 bytes"
);
let truncated_len = bytes.len() - 8;
bytes.truncate(truncated_len);
std::fs::write(&derived_path, &bytes).unwrap();
let mut db = QueryDb::new(empty_snapshot(), QueryDbConfig::default());
assert_eq!(db.edge_revision(), 0);
assert!(db.cold_load_allowed());
let err = load_derived(&mut db, sha, &derived_path, workspace_root)
.expect_err("truncated stream must return Err");
assert!(
matches!(err, LoadError::Corrupt { .. }),
"expected Corrupt error for truncated stream; got: {err}"
);
assert_eq!(
db.edge_revision(),
0,
"DB edge_revision must be 0 after failed framing rejection"
);
assert!(
db.cold_load_allowed(),
"cold_load_allowed must remain true after a failed load"
);
}
#[test]
fn idempotent_load() {
let dir = TempDir::new().unwrap();
let derived_path = dir.path().join("derived.sqry");
let workspace_root = dir.path();
let sha: [u8; 32] = [0x77; 32];
let bytes = make_valid_stream(sha, 3);
std::fs::write(&derived_path, &bytes).unwrap();
let mut db = QueryDb::new(empty_snapshot(), QueryDbConfig::default());
let first =
load_derived(&mut db, sha, &derived_path, workspace_root).expect("first load must succeed");
assert!(matches!(first, LoadOutcome::Applied { .. }));
assert!(
!db.cold_load_allowed(),
"cold_load_allowed must be false after first load"
);
let metrics_after_first = db.metrics();
std::fs::remove_file(&derived_path).unwrap();
let second_err = load_derived(&mut db, sha, &derived_path, workspace_root)
.expect_err("second load must return Err");
assert!(
matches!(second_err, LoadError::AlreadyLoaded),
"second load must return AlreadyLoaded, got: {second_err}"
);
let metrics_after_second = db.metrics();
assert_eq!(
metrics_after_first.cache_hits, metrics_after_second.cache_hits,
"no new hits after AlreadyLoaded"
);
assert_eq!(
metrics_after_first.cache_misses, metrics_after_second.cache_misses,
"no new misses after AlreadyLoaded"
);
}
#[test]
fn atomic_replace_under_reader() {
use std::sync::atomic::{AtomicBool, Ordering};
use std::thread;
let dir = TempDir::new().unwrap();
let derived_path = dir.path().join("derived.sqry");
let workspace_root = dir.path().to_path_buf();
let sha: [u8; 32] = [0xCC; 32];
let initial_bytes = make_valid_stream(sha, 1);
std::fs::write(&derived_path, &initial_bytes).unwrap();
let stop = Arc::new(AtomicBool::new(false));
let stop_reader = Arc::clone(&stop);
let reader_path = derived_path.clone();
let reader = thread::spawn(move || {
let mut iterations = 0usize;
let mut torn_count = 0usize;
while !stop_reader.load(Ordering::Relaxed) {
let bytes = match std::fs::read(&reader_path) {
Ok(b) => b,
Err(_) => {
continue;
}
};
if bytes.is_empty() {
continue;
}
match deserialize_derived_header(&bytes) {
Ok((header, _rest)) => {
if header.magic != DERIVED_MAGIC
|| header.format_version != DERIVED_FORMAT_VERSION
{
torn_count += 1;
}
}
Err(_) => {
torn_count += 1;
}
}
iterations = iterations.saturating_add(1);
}
(iterations, torn_count)
});
let snapshot = empty_snapshot();
for _ in 0..50 {
let db = QueryDb::new(Arc::clone(&snapshot), QueryDbConfig::default());
save_derived(&db, sha, &derived_path, &workspace_root).expect("save_derived must not fail");
}
stop.store(true, Ordering::Relaxed);
let (iterations, torn_count) = reader.join().expect("reader thread panicked");
assert_eq!(
torn_count, 0,
"reader observed {torn_count} torn headers across {iterations} iterations; \
atomic writes must never produce partially-written files"
);
}
#[cfg(unix)]
#[test]
fn symlink_rejection_parent() {
use std::os::unix::fs::symlink;
let dir = TempDir::new().unwrap();
let real_dir = dir.path().join("real");
std::fs::create_dir_all(&real_dir).unwrap();
let link_dir = dir.path().join("linked");
symlink(&real_dir, &link_dir).unwrap();
let target = link_dir.join("derived.sqry");
let workspace_root = dir.path();
let sha: [u8; 32] = [0x10; 32];
let snapshot = empty_snapshot();
let db = QueryDb::new(Arc::clone(&snapshot), QueryDbConfig::default());
let save_err = save_derived(&db, sha, &target, workspace_root)
.expect_err("save_derived must fail with a symlinked parent directory");
let err_display = save_err.to_string();
assert!(
err_display.contains("symlink")
|| err_display.contains("symlink")
|| save_err
.downcast_ref::<sqry_core::persistence::PathSafetyError>()
.is_some()
|| err_display.contains("ancestor")
|| err_display.contains("outside"),
"error message must mention symlink or path safety; got: {err_display}"
);
let mut db2 = QueryDb::new(empty_snapshot(), QueryDbConfig::default());
let load_err = load_derived(&mut db2, sha, &target, workspace_root)
.expect_err("load_derived must fail with a symlinked parent directory");
assert!(
matches!(load_err, LoadError::PathSafety(_)),
"expected PathSafety error from load_derived; got: {load_err}"
);
}
#[cfg(unix)]
#[test]
fn symlink_rejection_target() {
use std::os::unix::fs::symlink;
let dir = TempDir::new().unwrap();
let real_target = dir.path().join("real_derived.sqry");
std::fs::write(&real_target, b"placeholder").unwrap();
let sym_path = dir.path().join("derived.sqry");
symlink(&real_target, &sym_path).unwrap();
let workspace_root = dir.path();
let sha: [u8; 32] = [0x20; 32];
let snapshot = empty_snapshot();
let db = QueryDb::new(Arc::clone(&snapshot), QueryDbConfig::default());
let save_err = save_derived(&db, sha, &sym_path, workspace_root)
.expect_err("save_derived must fail when the target file is a symlink");
let err_display = save_err.to_string();
assert!(
err_display.contains("symlink")
|| save_err
.downcast_ref::<sqry_core::persistence::PathSafetyError>()
.is_some(),
"error message must mention symlink; got: {err_display}"
);
let mut db2 = QueryDb::new(empty_snapshot(), QueryDbConfig::default());
let load_err = load_derived(&mut db2, sha, &sym_path, workspace_root)
.expect_err("load_derived must fail when the target file is a symlink");
assert!(
matches!(load_err, LoadError::PathSafety(_)),
"expected PathSafety error from load_derived; got: {load_err}"
);
}
#[test]
fn oversize_entry_skip() {
let dir = TempDir::new().unwrap();
let derived_path = dir.path().join("derived.sqry");
let workspace_root = dir.path();
let tiny_cap: usize = 1;
let config = QueryDbConfig::builder()
.max_entry_size_bytes(tiny_cap)
.build();
let (snapshot, _file) = build_call_graph();
let db = QueryDb::new(Arc::clone(&snapshot), config);
let callers_key = RelationKey::exact("main");
let result = db.get::<CallersQuery>(&callers_key);
assert!(
!result.is_empty(),
"test precondition: CallersQuery for 'main' must return non-empty result"
);
let sha: [u8; 32] = [0xEE; 32];
save_derived(&db, sha, &derived_path, workspace_root)
.expect("save_derived must not fail even with 0 persistent entries");
let mut db2 = QueryDb::new(Arc::clone(&snapshot), QueryDbConfig::default());
let outcome = load_derived(&mut db2, sha, &derived_path, workspace_root)
.expect("load_derived must succeed even with 0 entries");
let entries_applied = match outcome {
LoadOutcome::Applied { entries } => entries,
LoadOutcome::Skipped(_) => panic!("unexpected Skipped"),
};
assert_eq!(
entries_applied, 0,
"oversize entries must not appear after reload; expected 0, got {entries_applied}"
);
}
#[test]
fn staged_validation_purity() {
let dir = TempDir::new().unwrap();
let derived_path = dir.path().join("derived.sqry");
let workspace_root = dir.path();
let sha: [u8; 32] = [0x66; 32];
let mut bytes = make_valid_stream(sha, 4);
assert!(bytes.len() > 16, "precondition: stream must be > 16 bytes");
let truncated_len = bytes.len() - 16;
bytes.truncate(truncated_len);
std::fs::write(&derived_path, &bytes).unwrap();
let mut db = QueryDb::new(empty_snapshot(), QueryDbConfig::default());
let edge_rev_before = db.edge_revision();
let metadata_rev_before = db.metadata_revision();
let cold_load_allowed_before = db.cold_load_allowed();
assert_eq!(edge_rev_before, 0);
assert_eq!(metadata_rev_before, 0);
assert!(cold_load_allowed_before);
let err = load_derived(&mut db, sha, &derived_path, workspace_root)
.expect_err("corrupt file must return Err");
assert!(
matches!(err, LoadError::Corrupt { .. }),
"expected Corrupt error; got: {err}"
);
assert_eq!(
db.edge_revision(),
edge_rev_before,
"edge_revision must not change after a failed staged load"
);
assert_eq!(
db.metadata_revision(),
metadata_rev_before,
"metadata_revision must not change after a failed staged load"
);
assert_eq!(
db.cold_load_allowed(),
cold_load_allowed_before,
"cold_load_allowed must not change after a failed staged load"
);
}
#[test]
fn sha_mismatch_whole_file_reject() {
let dir = TempDir::new().unwrap();
let workspace_root = dir.path();
let saved_sha: [u8; 32] = [0xAA; 32];
let caller_sha: [u8; 32] = [0xBB; 32];
let sqry_dir = workspace_root.join(".sqry").join("graph");
std::fs::create_dir_all(&sqry_dir).unwrap();
let snapshot_path = sqry_dir.join("snapshot.sqry");
std::fs::write(&snapshot_path, b"fake-snapshot").unwrap();
let derived_path = sqry_dir.join("derived.sqry");
let bytes = make_valid_stream(saved_sha, 2);
std::fs::write(&derived_path, &bytes).unwrap();
let mut db = QueryDb::new(empty_snapshot(), QueryDbConfig::default());
let err = load_derived(&mut db, caller_sha, &derived_path, workspace_root)
.expect_err("SHA mismatch must return Err");
assert!(
matches!(err, LoadError::StaleSnapshot),
"expected StaleSnapshot error; got: {err}"
);
assert_eq!(
db.edge_revision(),
0,
"DB must be pristine after StaleSnapshot rejection"
);
assert!(
db.cold_load_allowed(),
"cold_load_allowed must remain true after StaleSnapshot rejection"
);
std::fs::write(&derived_path, &bytes).unwrap();
assert!(
derived_path.exists(),
"derived file must exist before opportunistic load"
);
let mut db3 = QueryDb::new(empty_snapshot(), QueryDbConfig::default());
let opp_result = load_derived_opportunistic(&mut db3, workspace_root);
match opp_result {
Err(LoadError::StaleSnapshot) | Err(LoadError::NotFound { .. }) => {
}
Err(other) => {
panic!(
"opportunistic load returned unexpected error: {other}; \
expected StaleSnapshot or NotFound"
);
}
Ok(outcome) => {
let _ = outcome;
}
}
}
#[test]
fn revision_mismatch_per_entry_skip() {
let (snapshot, _file) = build_call_graph();
let dir = TempDir::new().unwrap();
let derived_path = dir.path().join("derived.sqry");
let workspace_root = dir.path();
let sha: [u8; 32] = [0x55; 32];
let db1 = QueryDb::new(Arc::clone(&snapshot), QueryDbConfig::default());
for _ in 0..5 {
db1.bump_edge_revision();
}
assert_eq!(db1.edge_revision(), 5);
let callers_key = RelationKey::exact("main");
let callers_val = db1.get::<CallersQuery>(&callers_key);
save_derived(&db1, sha, &derived_path, workspace_root).expect("save_derived must succeed");
let mut db2 = QueryDb::new(Arc::clone(&snapshot), QueryDbConfig::default());
let outcome = load_derived(&mut db2, sha, &derived_path, workspace_root)
.expect("load_derived must succeed");
let entries_applied = match outcome {
LoadOutcome::Applied { entries } => entries,
LoadOutcome::Skipped(_) => panic!("unexpected Skipped"),
};
assert!(
entries_applied > 0,
"at least one entry must be applied; got 0"
);
assert_eq!(
db2.edge_revision(),
5,
"edge_revision restored to 5 after load"
);
let base = db2.metrics();
let _ = db2.get::<CallersQuery>(&callers_key);
let after_first = db2.metrics();
assert_eq!(
after_first.cache_misses - base.cache_misses,
0,
"first typed query after cold-load must be a HIT (spec §2)"
);
assert_eq!(
after_first.cache_hits - base.cache_hits,
1,
"first typed query after cold-load must be exactly one cache hit"
);
let base2 = db2.metrics();
let _ = db2.get::<CallersQuery>(&callers_key);
let after_second = db2.metrics();
assert_eq!(
after_second.cache_hits - base2.cache_hits,
1,
"second typed query at same edge_revision must also be a cache hit"
);
assert_eq!(
after_second.cache_misses, base2.cache_misses,
"second typed query must produce zero additional misses"
);
let new_rev = db2.bump_edge_revision();
assert_eq!(new_rev, 6, "edge_revision must now be 6");
let base3 = db2.metrics();
let callers_val2 = db2.get::<CallersQuery>(&callers_key);
let after_bump = db2.metrics();
let new_misses = after_bump.cache_misses - base3.cache_misses;
assert_eq!(
new_misses, 1,
"after edge_revision bump the rehydrated entry must invalidate \
and produce a cache miss (Tier 2), not a silent stale hit; got {new_misses} misses"
);
assert_eq!(
*callers_val, *callers_val2,
"recomputed result must match the original warm value"
);
}
#[test]
#[allow(clippy::similar_names)] fn pf04_make_query_db_cold_never_writes_derived_sqry() {
use sqry_core::graph::unified::persistence::save_to_path;
use sqry_db::queries::dispatch::make_query_db_cold;
use std::time::SystemTime;
{
let dir = TempDir::new().expect("tempdir");
let workspace_root = dir.path();
let graph_dir = workspace_root.join(".sqry").join("graph");
std::fs::create_dir_all(&graph_dir).expect("mkdir .sqry/graph");
let (snapshot_arc, _file) = build_call_graph();
let mut graph_for_save = CodeGraph::new();
{
let file = graph_for_save
.files_mut()
.register_with_language(Path::new("src/lib.rs"), Some(Language::Rust))
.expect("register file");
let caller_name = graph_for_save
.strings_mut()
.intern("main")
.expect("intern main");
let caller = add_node(
&mut graph_for_save,
NodeEntry::new(NodeKind::Function, caller_name, file)
.with_qualified_name(caller_name)
.with_byte_range(0, 100),
);
let callee_name = graph_for_save
.strings_mut()
.intern("helper")
.expect("intern helper");
let callee = add_node(
&mut graph_for_save,
NodeEntry::new(NodeKind::Function, callee_name, file)
.with_qualified_name(callee_name)
.with_byte_range(110, 200),
);
graph_for_save.edges().add_edge(
caller,
callee,
EdgeKind::Calls {
argument_count: 0,
is_async: false,
},
file,
);
}
let snapshot_path = graph_dir.join("snapshot.sqry");
save_to_path(&graph_for_save, &snapshot_path).expect("save snapshot");
assert!(snapshot_path.exists(), "scenario A: snapshot must exist");
let derived_path = graph_dir.join("derived.sqry");
assert!(
!derived_path.exists(),
"scenario A precondition: derived.sqry must not exist before cold-load"
);
let db = make_query_db_cold(Arc::clone(&snapshot_arc), workspace_root);
let _callers = db.get::<CallersQuery>(&RelationKey::exact("main"));
let _imports = db.get::<ImportsQuery>(&RelationKey::exact("main"));
let _callees = db.get::<CalleesQuery>(&RelationKey::exact("helper"));
assert!(
!derived_path.exists(),
"scenario A: make_query_db_cold + queries must NOT create derived.sqry; \
reader-only contract violated (file appeared at {})",
derived_path.display()
);
}
{
let dir = TempDir::new().expect("tempdir");
let workspace_root = dir.path();
let snapshot_arc = empty_snapshot();
let db = make_query_db_cold(Arc::clone(&snapshot_arc), workspace_root);
let _callers = db.get::<CallersQuery>(&RelationKey::exact("main"));
let _imports = db.get::<ImportsQuery>(&RelationKey::exact("main"));
let derived_path = workspace_root
.join(".sqry")
.join("graph")
.join("derived.sqry");
assert!(
!derived_path.exists(),
"scenario B: make_query_db_cold against a workspace with no snapshot.sqry \
must not create derived.sqry"
);
let sqry_dir = workspace_root.join(".sqry");
assert!(
!sqry_dir.exists(),
"scenario B: cold-load must not create the .sqry/ directory either"
);
}
{
use sqry_db::persistence::{compute_file_sha256, save_derived};
let dir = TempDir::new().expect("tempdir");
let workspace_root = dir.path();
let graph_dir = workspace_root.join(".sqry").join("graph");
std::fs::create_dir_all(&graph_dir).expect("mkdir .sqry/graph");
let mut graph_for_save = CodeGraph::new();
let file = graph_for_save
.files_mut()
.register_with_language(Path::new("src/lib.rs"), Some(Language::Rust))
.expect("register file");
let caller_name = graph_for_save
.strings_mut()
.intern("main")
.expect("intern main");
let caller = add_node(
&mut graph_for_save,
NodeEntry::new(NodeKind::Function, caller_name, file)
.with_qualified_name(caller_name)
.with_byte_range(0, 100),
);
let callee_name = graph_for_save
.strings_mut()
.intern("helper")
.expect("intern helper");
let callee = add_node(
&mut graph_for_save,
NodeEntry::new(NodeKind::Function, callee_name, file)
.with_qualified_name(callee_name)
.with_byte_range(110, 200),
);
graph_for_save.edges().add_edge(
caller,
callee,
EdgeKind::Calls {
argument_count: 0,
is_async: false,
},
file,
);
let snapshot_path = graph_dir.join("snapshot.sqry");
save_to_path(&graph_for_save, &snapshot_path).expect("save snapshot");
let on_disk_sha = compute_file_sha256(&snapshot_path).expect("hash snapshot");
let snapshot_arc = Arc::new(graph_for_save.snapshot());
let writer_db = QueryDb::new(Arc::clone(&snapshot_arc), QueryDbConfig::default());
let _ = writer_db.get::<CallersQuery>(&RelationKey::exact("main"));
let derived_path = graph_dir.join("derived.sqry");
save_derived(&writer_db, on_disk_sha, &derived_path, workspace_root)
.expect("setup: save_derived must succeed");
assert!(
derived_path.exists(),
"scenario C precondition: derived.sqry must exist after setup"
);
let before_bytes = std::fs::read(&derived_path).expect("read derived");
let before_meta = std::fs::metadata(&derived_path).expect("metadata derived");
let before_mtime = before_meta
.modified()
.expect("mtime")
.duration_since(SystemTime::UNIX_EPOCH)
.expect("duration since epoch");
std::thread::sleep(std::time::Duration::from_millis(1100));
let db = make_query_db_cold(Arc::clone(&snapshot_arc), workspace_root);
let _ = db.get::<CallersQuery>(&RelationKey::exact("main"));
let _ = db.get::<ImportsQuery>(&RelationKey::exact("main"));
let _ = db.get::<CalleesQuery>(&RelationKey::exact("helper"));
assert!(
derived_path.exists(),
"scenario C: derived.sqry must still exist after cold-load (it was valid)"
);
let after_bytes = std::fs::read(&derived_path).expect("read derived after");
assert_eq!(
before_bytes, after_bytes,
"scenario C: derived.sqry bytes must be unchanged by make_query_db_cold + queries; \
reader-only contract violated (file was rewritten)"
);
let after_meta = std::fs::metadata(&derived_path).expect("metadata derived after");
let after_mtime = after_meta
.modified()
.expect("mtime")
.duration_since(SystemTime::UNIX_EPOCH)
.expect("duration since epoch");
assert_eq!(
before_mtime, after_mtime,
"scenario C: derived.sqry mtime must be unchanged by make_query_db_cold + queries; \
reader-only contract violated (file was touched)"
);
}
}