use sqlitegraph::backend::native::graph_ops::native_bfs_with_telemetry;
use sqlitegraph::backend::native::{
NativeNodeId, edge_store::EdgeStore, graph_file::GraphFile, node_store::NodeStore,
};
use tempfile::TempDir;
fn create_chain_graph(size: usize, temp_dir: &TempDir) -> (GraphFile, Vec<NativeNodeId>) {
let db_path = temp_dir.path().join("test_chain.db");
let mut graph_file = GraphFile::create(&db_path).expect("Failed to create graph file");
let mut node_ids = Vec::with_capacity(size);
for i in 0..size {
let mut node_store = NodeStore::new(&mut graph_file);
let node_id = node_store
.allocate_node_id()
.expect("Failed to allocate node ID");
let record = sqlitegraph::backend::native::NodeRecord::new(
node_id,
"Node".to_string(),
format!("node_{}", i),
serde_json::json!({"id": i}),
);
node_store
.write_node(&record)
.expect("Failed to write node");
node_ids.push(node_id);
}
let mut edge_store = EdgeStore::new(&mut graph_file);
for i in 0..size.saturating_sub(1) {
let edge = sqlitegraph::backend::native::EdgeRecord::new(
i as i64 + 1, node_ids[i], node_ids[i + 1], "chain".to_string(),
serde_json::json!({"order": i}),
);
edge_store
.write_edge(&edge)
.expect("Failed to write chain edge");
}
(graph_file, node_ids)
}
#[test]
fn test_bfs_uses_sequential_cluster_reads() {
let chain_length = 500;
let temp_dir = TempDir::new().expect("Failed to create temp dir");
let (mut graph_file, node_ids) = create_chain_graph(chain_length, &temp_dir);
let start_node = node_ids[0];
let (visited_nodes, telemetry_json) =
native_bfs_with_telemetry(&mut graph_file, start_node, chain_length as u32)
.expect("BFS traversal failed");
let telemetry: serde_json::Value =
serde_json::from_str(&telemetry_json).expect("Failed to parse telemetry");
println!(
"Telemetry: {}",
serde_json::to_string_pretty(&telemetry).unwrap()
);
assert_eq!(
visited_nodes.len() + 1,
chain_length,
"Should visit all {} nodes",
chain_length
);
let cluster_offsets_count = telemetry["cluster_offsets_count"]
.as_u64()
.expect("cluster_offsets_count missing");
assert!(
cluster_offsets_count > 0,
"Expected cluster_offsets_count > 0 after observe_with_cluster fix, got {}",
cluster_offsets_count
);
println!(
"✓ cluster_offsets_count = {} (cluster metadata IS being tracked!)",
cluster_offsets_count
);
let fragmentation_score = telemetry["fragmentation_score"]
.as_f64()
.expect("fragmentation_score missing");
let gap_bytes = telemetry["gap_bytes"].as_u64().expect("gap_bytes missing");
assert_eq!(
fragmentation_score, 0.0,
"Expected fragmentation_score = 0.0 for contiguous clusters, got {}",
fragmentation_score
);
assert_eq!(
gap_bytes, 0,
"Expected gap_bytes = 0 for contiguous clusters, got {}",
gap_bytes
);
println!(
"✓ fragmentation_score = {} (clusters are perfectly contiguous!)",
fragmentation_score
);
println!("✓ gap_bytes = {} (no gaps between clusters!)", gap_bytes);
let time_total_ms = telemetry["time_total_ms"]
.as_f64()
.expect("time_total_ms missing");
println!("✓ time_total_ms = {:.2}ms", time_total_ms);
let chains_detected = telemetry["chains_detected"]
.as_u64()
.expect("chains_detected missing");
println!(
"✓ chains_detected = {} (expected 0 - record_chain not called during BFS)",
chains_detected
);
}
#[test]
fn test_linear_detector_confirms_chains() {
use sqlitegraph::backend::native::adjacency::LinearDetector;
let mut detector = LinearDetector::new();
let base_offset = 100u64;
let cluster_size = 8u32;
for i in 0..10 {
let node_id = (i + 1) as NativeNodeId; let degree = 1; let cluster_offset = base_offset + (i as u64 * cluster_size as u64);
let pattern = detector.observe_with_cluster(node_id, degree, cluster_offset, cluster_size);
println!("Observation {}: pattern = {:?}", i, pattern);
if i >= 2 {
assert!(
detector.is_linear_confirmed(),
"Expected linear confirmation after {} observations",
i + 1
);
}
}
let offsets = detector.cluster_offsets();
assert_eq!(
offsets.len(),
10,
"Expected 10 cluster offsets, got {}",
offsets.len()
);
for i in 0..offsets.len() - 1 {
let (current_offset, current_size) = offsets[i];
let (next_offset, _) = offsets[i + 1];
let expected_next = current_offset + current_size as u64;
assert_eq!(
next_offset, expected_next,
"Cluster {} not contiguous: expected offset {}, got {}",
i, expected_next, next_offset
);
}
println!("✓ LinearDetector confirmed linear chain with contiguous clusters");
}
#[test]
fn test_sequential_cluster_reader_engaged() {
use sqlitegraph::backend::native::adjacency::SequentialClusterReader;
let reader = SequentialClusterReader::new();
assert_eq!(
reader.metrics.read_time_ns, 0,
"Initial read_time_ns should be 0"
);
assert_eq!(
reader.metrics.total_bytes_read, 0,
"Initial total_bytes_read should be 0"
);
assert_eq!(
reader.metrics.clusters_read, 0,
"Initial clusters_read should be 0"
);
assert_eq!(
reader.metrics.extract_count, 0,
"Initial extract_count should be 0"
);
println!("✓ SequentialClusterReader metrics initialized correctly");
}
#[test]
fn test_traversal_context_cluster_tracking() {
use sqlitegraph::backend::native::graph_ops::TraversalContext;
let mut ctx = TraversalContext::new();
assert!(!ctx.detector.is_linear_confirmed());
assert_eq!(ctx.detector.cluster_offsets().len(), 0);
for i in 0..5 {
let node_id = (i + 1) as NativeNodeId; let degree = 1;
let cluster_offset = 100 + (i * 8);
let cluster_size = 8;
let _pattern =
ctx.detector
.observe_with_cluster(node_id, degree, cluster_offset, cluster_size);
}
assert!(
ctx.detector.is_linear_confirmed(),
"Expected linear confirmation after 5 observations"
);
assert_eq!(ctx.detector.cluster_offsets().len(), 5);
println!("✓ TraversalContext correctly tracks cluster offsets");
}