use sqlitegraph::backend::native::{
NativeNodeId,
adjacency::{LinearDetector, SequentialReadBuffer},
edge_store::EdgeStore,
graph_file::GraphFile,
graph_ops::{TraversalCache, TraversalCacheStats, TraversalContext, native_bfs},
node_store::NodeStore,
};
use std::time::Instant;
use tempfile::TempDir;
fn create_chain_graph(node_count: usize) -> (GraphFile, Vec<NativeNodeId>, TempDir) {
let temp_dir = TempDir::new().expect("Failed to create temp dir");
let db_path = temp_dir.path().join("test_chain.db");
let mut graph_file = GraphFile::create(&db_path).expect("Failed to create graph file");
let mut node_ids = Vec::with_capacity(node_count);
for i in 0..node_count {
let mut node_store = NodeStore::new(&mut graph_file);
let node_id = node_store
.allocate_node_id()
.expect("Failed to allocate node ID");
let record = sqlitegraph::backend::native::NodeRecord::new(
node_id,
"Node".to_string(),
format!("node_{}", i),
serde_json::json!({"id": i}),
);
node_store
.write_node(&record)
.expect("Failed to write node");
node_ids.push(node_id);
}
let mut edge_store = EdgeStore::new(&mut graph_file);
for i in 0..node_count.saturating_sub(1) {
let edge = sqlitegraph::backend::native::EdgeRecord::new(
i as i64 + 1, node_ids[i], node_ids[i + 1], "chain".to_string(),
serde_json::json!({"order": i}),
);
edge_store
.write_edge(&edge)
.expect("Failed to write chain edge");
}
(graph_file, node_ids, temp_dir)
}
fn measure_prefetch_timing(
graph_file: &mut GraphFile,
start_node: NativeNodeId,
prefetch_window: usize,
) -> (std::time::Duration, usize) {
let mut buffer = SequentialReadBuffer::with_prefetch_window(prefetch_window);
let start_time = Instant::now();
let _ = buffer.prefetch_from(graph_file, start_node);
let duration = start_time.elapsed();
let cached_count = buffer.len();
(duration, cached_count)
}
#[test]
fn test_prefetch_window_4_chain_500() {
let node_count = 500;
let (mut graph_file, node_ids, _temp_dir) = create_chain_graph(node_count);
let start_node = node_ids[0];
println!("\n=== Prefetch Window 4: Chain({}) ===", node_count);
let (duration, cached_count) = measure_prefetch_timing(&mut graph_file, start_node, 4);
println!("Window 4 prefetch: {:?}", duration);
println!(" Cached nodes: {} (expected: ~4)", cached_count);
println!(
" Throughput: {:.2} nodes/sec",
cached_count as f64 / duration.as_secs_f64()
);
assert!(cached_count > 0, "Prefetch should cache some nodes");
assert!(cached_count <= 4, "Window 4 should cache at most 4 nodes");
}
#[test]
fn test_prefetch_window_8_chain_500() {
let node_count = 500;
let (mut graph_file, node_ids, _temp_dir) = create_chain_graph(node_count);
let start_node = node_ids[0];
println!("\n=== Prefetch Window 8: Chain({}) ===", node_count);
let (duration, cached_count) = measure_prefetch_timing(&mut graph_file, start_node, 8);
println!("Window 8 prefetch: {:?}", duration);
println!(" Cached nodes: {} (expected: ~8)", cached_count);
println!(
" Throughput: {:.2} nodes/sec",
cached_count as f64 / duration.as_secs_f64()
);
assert!(cached_count > 0, "Prefetch should cache some nodes");
assert!(cached_count <= 8, "Window 8 should cache at most 8 nodes");
}
#[test]
fn test_prefetch_window_16_chain_500() {
let node_count = 500;
let (mut graph_file, node_ids, _temp_dir) = create_chain_graph(node_count);
let start_node = node_ids[0];
println!("\n=== Prefetch Window 16: Chain({}) ===", node_count);
let (duration, cached_count) = measure_prefetch_timing(&mut graph_file, start_node, 16);
println!("Window 16 prefetch: {:?}", duration);
println!(" Cached nodes: {} (expected: ~16)", cached_count);
println!(
" Throughput: {:.2} nodes/sec",
cached_count as f64 / duration.as_secs_f64()
);
assert!(cached_count > 0, "Prefetch should cache some nodes");
assert!(
cached_count <= 16,
"Window 16 should cache at most 16 nodes"
);
}
#[test]
fn test_prefetch_window_comparison_chain_500() {
let node_count = 500;
println!(
"\n=== Prefetch Window Comparison: Chain({}) ===",
node_count
);
let (mut graph_file_4, node_ids_4, _temp_dir_4) = create_chain_graph(node_count);
let start_node = node_ids_4[0];
let (duration_4, cached_4) = measure_prefetch_timing(&mut graph_file_4, start_node, 4);
let (mut graph_file_8, node_ids_8, _temp_dir_8) = create_chain_graph(node_count);
let start_node_8 = node_ids_8[0];
let (duration_8, cached_8) = measure_prefetch_timing(&mut graph_file_8, start_node_8, 8);
let (mut graph_file_16, node_ids_16, _temp_dir_16) = create_chain_graph(node_count);
let start_node_16 = node_ids_16[0];
let (duration_16, cached_16) = measure_prefetch_timing(&mut graph_file_16, start_node_16, 16);
println!("\n=== Timing Comparison ===");
println!("Window 4: {:?} (cached: {})", duration_4, cached_4);
println!("Window 8: {:?} (cached: {})", duration_8, cached_8);
println!("Window 16: {:?} (cached: {})", duration_16, cached_16);
let ratio_4_to_8 = duration_4.as_nanos() as f64 / duration_8.as_nanos() as f64;
let ratio_16_to_8 = duration_16.as_nanos() as f64 / duration_8.as_nanos() as f64;
println!("\n=== Timing Ratios (relative to window 8) ===");
println!("Window 4 vs 8: {:.3}x", ratio_4_to_8);
println!("Window 16 vs 8: {:.3}x", ratio_16_to_8);
assert!(cached_4 > 0);
assert!(cached_8 > 0);
assert!(cached_16 > 0);
assert!(
cached_4 <= cached_8,
"Window 8 should cache >= nodes as window 4"
);
assert!(
cached_8 <= cached_16,
"Window 16 should cache >= nodes as window 8"
);
}
#[test]
fn test_memory_overhead_per_buffer_size() {
println!("\n=== SequentialReadBuffer Memory Overhead ===");
let buffer_4 = SequentialReadBuffer::with_prefetch_window(4);
let buffer_8 = SequentialReadBuffer::with_prefetch_window(8);
let buffer_16 = SequentialReadBuffer::with_prefetch_window(16);
println!("Empty buffer overhead:");
println!(
" Window 4: len={}, is_empty={}",
buffer_4.len(),
buffer_4.is_empty()
);
println!(
" Window 8: len={}, is_empty={}",
buffer_8.len(),
buffer_8.is_empty()
);
println!(
" Window 16: len={}, is_empty={}",
buffer_16.len(),
buffer_16.is_empty()
);
assert_eq!(buffer_4.len(), 0);
assert_eq!(buffer_8.len(), 0);
assert_eq!(buffer_16.len(), 0);
}
#[test]
fn test_memory_overhead_full_buffer() {
println!("\n=== SequentialReadBuffer Full Buffer Memory ===");
let mut buffer = SequentialReadBuffer::with_prefetch_window(8);
use sqlitegraph::backend::native::v2::node_record_v2::NodeRecordV2;
let mut total_estimated_bytes = 0;
for i in 1..=8 {
let node = NodeRecordV2::new(
i,
"TestNode".to_string(),
format!("node_{}", i),
serde_json::json!({"data": "x".repeat(50)}), );
let estimated_size = 200; total_estimated_bytes += estimated_size;
buffer.insert(node);
}
println!("Full buffer (8 slots):");
println!(" Entries: {}", buffer.len());
println!(" Estimated memory: ~{} bytes", total_estimated_bytes);
println!(
" Per-slot average: ~{} bytes",
total_estimated_bytes / buffer.len()
);
assert_eq!(buffer.len(), 8);
assert!(!buffer.is_empty());
buffer.clear();
assert_eq!(buffer.len(), 0);
assert!(buffer.is_empty());
}
#[test]
fn test_prefetch_window_sizes_chain_100() {
let node_count = 100;
println!("\n=== Prefetch Window Sizes: Chain({}) ===", node_count);
let windows = [4, 8, 16];
let mut results = Vec::new();
for &window in &windows {
let (mut graph_file, node_ids, _temp_dir) = create_chain_graph(node_count);
let start_node = node_ids[0];
let (duration, cached) = measure_prefetch_timing(&mut graph_file, start_node, window);
results.push((window, duration, cached));
println!("Window {}: {:?} (cached: {})", window, duration, cached);
}
for (_window, _duration, cached) in &results {
assert!(*cached > 0, "Prefetch should cache nodes");
}
assert!(results[0].2 <= results[1].2); assert!(results[1].2 <= results[2].2); }
#[test]
fn test_buffer_contains_after_prefetch() {
let node_count = 20;
let (mut graph_file, node_ids, _temp_dir) = create_chain_graph(node_count);
let start_node = node_ids[0];
let mut buffer = SequentialReadBuffer::with_prefetch_window(8);
buffer
.prefetch_from(&mut graph_file, start_node)
.expect("Prefetch should succeed");
assert!(
buffer.contains(start_node),
"Buffer should contain start node"
);
assert!(buffer.len() > 0, "Buffer should have cached nodes");
assert!(buffer.len() <= 8, "Buffer should not exceed window size");
println!("\n=== Buffer Containment After Prefetch ===");
println!(" Start node: {}", start_node);
println!(" Cached nodes: {}", buffer.len());
println!(" Contains start: {}", buffer.contains(start_node));
}
#[test]
fn test_next_prefetch_start_tracking() {
let node_count = 20;
let (mut graph_file, node_ids, _temp_dir) = create_chain_graph(node_count);
let start_node = node_ids[0];
let mut buffer = SequentialReadBuffer::with_prefetch_window(8);
buffer
.prefetch_from(&mut graph_file, start_node)
.expect("Prefetch should succeed");
let next_start = buffer.next_prefetch_start();
assert!(
next_start.is_some(),
"Next prefetch start should be tracked"
);
let expected_next = start_node + 8;
assert_eq!(
next_start.unwrap(),
expected_next,
"Next prefetch should be at start + window"
);
println!("\n=== Next Prefetch Start Tracking ===");
println!(" Start node: {}", start_node);
println!(" Window size: 8");
println!(" Next prefetch start: {}", next_start.unwrap());
println!(" Expected next: {}", expected_next);
}
#[test]
fn test_buffer_window_configuration() {
let buffer_4 = SequentialReadBuffer::with_prefetch_window(4);
let buffer_8 = SequentialReadBuffer::with_prefetch_window(8);
let buffer_16 = SequentialReadBuffer::with_prefetch_window(16);
assert_eq!(buffer_4.prefetch_window(), 4);
assert_eq!(buffer_8.prefetch_window(), 8);
assert_eq!(buffer_16.prefetch_window(), 16);
let buffer_default = SequentialReadBuffer::new();
assert_eq!(buffer_default.prefetch_window(), 8);
println!("\nBuffer window configuration verified:");
println!(
" with_prefetch_window(4) -> {}",
buffer_4.prefetch_window()
);
println!(
" with_prefetch_window(8) -> {}",
buffer_8.prefetch_window()
);
println!(
" with_prefetch_window(16) -> {}",
buffer_16.prefetch_window()
);
println!(
" SequentialReadBuffer::new() -> {}",
buffer_default.prefetch_window()
);
}
#[test]
fn test_traversal_context_custom_buffer() {
let custom_buffer = SequentialReadBuffer::with_prefetch_window(16);
let mut ctx = TraversalContext::new();
ctx.buffer = custom_buffer;
assert_eq!(ctx.buffer.prefetch_window(), 16);
assert_eq!(ctx.buffer_hits, 0);
assert_eq!(ctx.buffer_misses, 0);
assert_eq!(ctx.combined_hit_rate(), 0.0);
println!("\nTraversalContext custom buffer configuration verified:");
println!(" Custom window 16 buffer works correctly");
}
#[test]
fn test_native_bfs_chain_500_with_default_window() {
let node_count = 500;
let (mut graph_file, node_ids, _temp_dir) = create_chain_graph(node_count);
let start_node = node_ids[0];
let depth = node_count as u32;
println!(
"\n=== Native BFS with Default Window 8: Chain({}) ===",
node_count
);
let start_time = Instant::now();
let result = native_bfs(&mut graph_file, start_node, depth);
let duration = start_time.elapsed();
assert!(result.is_ok(), "BFS should succeed");
let visited = result.unwrap();
println!("Native BFS: {:?}", duration);
println!(
" Visited: {} nodes (expected: {})",
visited.len(),
node_count
);
println!(
" Throughput: {:.2} nodes/sec",
node_count as f64 / duration.as_secs_f64()
);
assert_eq!(
visited.len(),
node_count - 1,
"BFS should visit all reachable nodes (excluding start)"
);
}
#[test]
fn test_buffer_clear_operations() {
let mut buffer = SequentialReadBuffer::with_prefetch_window(8);
use sqlitegraph::backend::native::v2::node_record_v2::NodeRecordV2;
for i in 1..=5 {
buffer.insert(NodeRecordV2::new(
i,
"Test".to_string(),
format!("node_{}", i),
serde_json::json!({}),
));
}
assert_eq!(buffer.len(), 5);
buffer.clear();
assert_eq!(buffer.len(), 0);
assert!(buffer.is_empty());
assert!(buffer.next_prefetch_start().is_none());
println!("\nBuffer clear operations verified:");
println!(" Clear removes all entries and resets tracking");
}