use sqlitegraph::{
backend::native::EdgeRecord,
backend::native::{
NativeNodeId,
adjacency::{Direction, SequentialReadBuffer},
edge_store::EdgeStore,
graph_file::GraphFile,
graph_ops::{TraversalContext, get_neighbors_optimized},
node_store::NodeStore,
v2::node_record_v2::NodeRecordV2,
},
};
use tempfile::TempDir;
fn create_test_chain_graph() -> (GraphFile, Vec<NativeNodeId>, TempDir) {
let temp_dir = TempDir::new().expect("Failed to create temp dir");
let db_path = temp_dir.path().join("test_chain.db");
let mut graph_file = GraphFile::create(&db_path).expect("Failed to create graph file");
let mut node_ids = Vec::new();
for i in 1..=5 {
let mut node_store = NodeStore::new(&mut graph_file);
let node_id = node_store
.allocate_node_id()
.expect("Failed to allocate node ID");
let record = sqlitegraph::backend::native::NodeRecord::new(
node_id,
"test".to_string(),
format!("node_{}", i),
serde_json::json!({}),
);
node_store
.write_node(&record)
.expect("Failed to write node");
node_ids.push(node_id);
}
let mut edge_store = EdgeStore::new(&mut graph_file);
for i in 0..4 {
let edge = EdgeRecord::new(
(i + 1) as i64, node_ids[i], node_ids[i + 1], "connects".to_string(),
serde_json::json!({}),
);
edge_store.write_edge(&edge).expect("Failed to write edge");
}
(graph_file, node_ids, temp_dir)
}
fn populate_buffer_with_node(
buffer: &mut SequentialReadBuffer,
node_id: NativeNodeId,
outgoing_offset: u64,
outgoing_size: u32,
incoming_offset: u64,
incoming_size: u32,
) {
let node_record = NodeRecordV2 {
id: node_id,
flags: sqlitegraph::backend::native::NodeFlags::empty(),
kind: "Test".to_string(),
name: format!("node_{}", node_id),
data: serde_json::json!({}),
outgoing_cluster_offset: outgoing_offset,
outgoing_cluster_size: outgoing_size,
outgoing_edge_count: if outgoing_size > 0 { 1 } else { 0 },
incoming_cluster_offset: incoming_offset,
incoming_cluster_size: incoming_size,
incoming_edge_count: if incoming_size > 0 { 1 } else { 0 },
};
buffer.insert(node_record);
}
fn create_linear_context() -> TraversalContext {
let mut ctx = TraversalContext::new();
ctx.detector.observe(1, 1); ctx.detector.observe(2, 1); ctx.detector.observe(3, 1);
assert!(
ctx.detector.is_linear_confirmed(),
"LinearDetector should be confirmed after 3 linear steps"
);
ctx
}
#[test]
fn test_l1_buffer_returns_neighbors_from_buffer() {
let (mut graph_file, node_ids, _temp_dir) = create_test_chain_graph();
let mut ctx = create_linear_context();
ctx.buffer
.prefetch_from(&mut graph_file, node_ids[0])
.expect("Prefetch should succeed");
assert!(
ctx.buffer.contains(node_ids[0]),
"Node should be in buffer after prefetch"
);
let neighbors =
get_neighbors_optimized(&mut graph_file, node_ids[0], Direction::Outgoing, &mut ctx)
.expect("get_neighbors_optimized should succeed");
assert_eq!(neighbors.len(), 1, "Node A should have 1 outgoing neighbor");
assert_eq!(neighbors[0], node_ids[1], "Neighbor should be node B");
assert_eq!(ctx.buffer_hits, 1, "Should have 1 buffer hit");
assert_eq!(ctx.buffer_misses, 0, "Should have 0 buffer misses");
assert!(
ctx.cache.contains_key(&(node_ids[0], Direction::Outgoing)),
"L2 cache should contain the result"
);
}
#[test]
fn test_l1_buffer_outgoing_direction() {
let temp_dir = TempDir::new().expect("Failed to create temp dir");
let db_path = temp_dir.path().join("test_multi_outgoing.db");
let mut graph_file = GraphFile::create(&db_path).expect("Failed to create graph file");
let mut node_store = NodeStore::new(&mut graph_file);
let node1 = node_store
.allocate_node_id()
.expect("Failed to allocate node ID");
let record = sqlitegraph::backend::native::NodeRecord::new(
node1,
"hub".to_string(),
"hub_node".to_string(),
serde_json::json!({}),
);
node_store
.write_node(&record)
.expect("Failed to write node");
let mut targets = Vec::new();
for i in 2..=4 {
let mut node_store = NodeStore::new(&mut graph_file);
let node_id = node_store
.allocate_node_id()
.expect("Failed to allocate node ID");
let record = sqlitegraph::backend::native::NodeRecord::new(
node_id,
"target".to_string(),
format!("target_{}", i),
serde_json::json!({}),
);
node_store
.write_node(&record)
.expect("Failed to write node");
targets.push(node_id);
}
let mut edge_store = EdgeStore::new(&mut graph_file);
for &target in &targets {
let edge = EdgeRecord::new(
target, node1, target, "connects".to_string(),
serde_json::json!({}),
);
edge_store.write_edge(&edge).expect("Failed to write edge");
}
let mut ctx = create_linear_context();
ctx.buffer
.prefetch_from(&mut graph_file, node1)
.expect("Prefetch should succeed");
let neighbors = get_neighbors_optimized(&mut graph_file, node1, Direction::Outgoing, &mut ctx)
.expect("get_neighbors_optimized should succeed");
assert_eq!(
neighbors.len(),
3,
"Hub node should have 3 outgoing neighbors"
);
for target in &targets {
assert!(
neighbors.contains(target),
"Neighbors should contain target {}",
target
);
}
assert_eq!(ctx.buffer_hits, 1, "Should have 1 buffer hit");
}
#[test]
fn test_l1_buffer_incoming_direction() {
let temp_dir = TempDir::new().expect("Failed to create temp dir");
let db_path = temp_dir.path().join("test_incoming.db");
let mut graph_file = GraphFile::create(&db_path).expect("Failed to create graph file");
let mut sources = Vec::new();
for i in 1..=3 {
let mut node_store = NodeStore::new(&mut graph_file);
let node_id = node_store
.allocate_node_id()
.expect("Failed to allocate node ID");
let record = sqlitegraph::backend::native::NodeRecord::new(
node_id,
"source".to_string(),
format!("source_{}", i),
serde_json::json!({}),
);
node_store
.write_node(&record)
.expect("Failed to write node");
sources.push(node_id);
}
let mut node_store = NodeStore::new(&mut graph_file);
let node4 = node_store
.allocate_node_id()
.expect("Failed to allocate node ID");
let record = sqlitegraph::backend::native::NodeRecord::new(
node4,
"target".to_string(),
"target_node".to_string(),
serde_json::json!({}),
);
node_store
.write_node(&record)
.expect("Failed to write node");
let mut edge_store = EdgeStore::new(&mut graph_file);
for &source in &sources {
let edge = EdgeRecord::new(
source, source, node4, "connects".to_string(),
serde_json::json!({}),
);
edge_store.write_edge(&edge).expect("Failed to write edge");
}
let mut ctx = create_linear_context();
ctx.buffer
.prefetch_from(&mut graph_file, node4)
.expect("Prefetch should succeed");
let neighbors = get_neighbors_optimized(&mut graph_file, node4, Direction::Incoming, &mut ctx)
.expect("get_neighbors_optimized should succeed");
assert_eq!(
neighbors.len(),
3,
"Target node should have 3 incoming neighbors"
);
for source in &sources {
assert!(
neighbors.contains(source),
"Neighbors should contain source {}",
source
);
}
assert_eq!(ctx.buffer_hits, 1, "Should have 1 buffer hit");
}
#[test]
fn test_l1_buffer_fallback_to_l2_on_miss() {
let (mut graph_file, node_ids, _temp_dir) = create_test_chain_graph();
let mut ctx = create_linear_context();
assert!(
!ctx.buffer.contains(node_ids[0]),
"Node should not be in buffer"
);
let neighbors =
get_neighbors_optimized(&mut graph_file, node_ids[0], Direction::Outgoing, &mut ctx)
.expect("get_neighbors_optimized should succeed");
assert_eq!(neighbors.len(), 1, "Node A should have 1 outgoing neighbor");
assert_eq!(neighbors[0], node_ids[1], "Neighbor should be node B");
assert_eq!(ctx.buffer_hits, 0, "Should have 0 buffer hits");
assert_eq!(ctx.buffer_misses, 1, "Should have 1 buffer miss");
assert!(
ctx.cache.contains_key(&(node_ids[0], Direction::Outgoing)),
"L2 cache should contain the result"
);
let neighbors2 =
get_neighbors_optimized(&mut graph_file, node_ids[0], Direction::Outgoing, &mut ctx)
.expect("get_neighbors_optimized should succeed");
assert_eq!(
neighbors2, neighbors,
"Second call should return same result"
);
assert_eq!(
ctx.stats.hits, 1,
"Should have 1 L2 cache hit on second call"
);
}
#[test]
fn test_l1_buffer_empty_cluster() {
let temp_dir = TempDir::new().expect("Failed to create temp dir");
let db_path = temp_dir.path().join("test_empty_cluster.db");
let mut graph_file = GraphFile::create(&db_path).expect("Failed to create graph file");
let mut node_store = NodeStore::new(&mut graph_file);
let node1 = node_store
.allocate_node_id()
.expect("Failed to allocate node ID");
let record = sqlitegraph::backend::native::NodeRecord::new(
node1,
"empty".to_string(),
"empty_node".to_string(),
serde_json::json!({}),
);
node_store
.write_node(&record)
.expect("Failed to write node");
let mut ctx = create_linear_context();
populate_buffer_with_node(
&mut ctx.buffer,
node1,
0, 0, 0, 0, );
assert!(ctx.buffer.contains(node1), "Node should be in buffer");
let neighbors = get_neighbors_optimized(&mut graph_file, node1, Direction::Outgoing, &mut ctx)
.expect("get_neighbors_optimized should succeed");
assert_eq!(
neighbors.len(),
0,
"Node with no cluster should have 0 neighbors"
);
assert_eq!(ctx.buffer_hits, 1, "Should have 1 buffer hit");
assert_eq!(ctx.buffer_misses, 0, "Should have 0 buffer misses");
assert!(
ctx.cache.contains_key(&(node1, Direction::Outgoing)),
"L2 cache should contain empty result"
);
}
#[test]
fn test_l1_buffer_populates_l2_cache() {
let (mut graph_file, node_ids, _temp_dir) = create_test_chain_graph();
let mut ctx = create_linear_context();
ctx.buffer
.prefetch_from(&mut graph_file, node_ids[0])
.expect("Prefetch should succeed");
let neighbors1 =
get_neighbors_optimized(&mut graph_file, node_ids[0], Direction::Outgoing, &mut ctx)
.expect("get_neighbors_optimized should succeed");
assert!(
ctx.cache.contains_key(&(node_ids[0], Direction::Outgoing)),
"L2 cache should be populated after L1 hit"
);
ctx.buffer.clear();
let neighbors2 =
get_neighbors_optimized(&mut graph_file, node_ids[0], Direction::Outgoing, &mut ctx)
.expect("get_neighbors_optimized should succeed");
assert_eq!(neighbors2, neighbors1, "L2 result should match L1 result");
assert_eq!(ctx.stats.hits, 1, "Should have 1 L2 cache hit");
}
#[test]
fn test_l1_buffer_directions_are_independent() {
let (mut graph_file, node_ids, _temp_dir) = create_test_chain_graph();
let mut ctx = create_linear_context();
let node_b = node_ids[1];
ctx.buffer
.prefetch_from(&mut graph_file, node_b)
.expect("Prefetch should succeed");
let outgoing = get_neighbors_optimized(&mut graph_file, node_b, Direction::Outgoing, &mut ctx)
.expect("get_neighbors_optimized should succeed");
assert_eq!(outgoing.len(), 1, "Node B should have 1 outgoing neighbor");
assert_eq!(outgoing[0], node_ids[2], "Outgoing neighbor should be C");
let incoming = get_neighbors_optimized(&mut graph_file, node_b, Direction::Incoming, &mut ctx)
.expect("get_neighbors_optimized should succeed");
assert_eq!(incoming.len(), 1, "Node B should have 1 incoming neighbor");
assert_eq!(incoming[0], node_ids[0], "Incoming neighbor should be A");
assert_eq!(ctx.buffer_hits, 2, "Should have 2 buffer hits");
assert!(
ctx.cache.contains_key(&(node_b, Direction::Outgoing)),
"L2 cache should contain outgoing result"
);
assert!(
ctx.cache.contains_key(&(node_b, Direction::Incoming)),
"L2 cache should contain incoming result"
);
}
#[test]
fn test_l1_buffer_only_checked_after_linear_confirmed() {
let (mut graph_file, node_ids, _temp_dir) = create_test_chain_graph();
let mut ctx = TraversalContext::new();
ctx.buffer
.prefetch_from(&mut graph_file, node_ids[0])
.expect("Prefetch should succeed");
assert!(ctx.buffer.contains(node_ids[0]), "Node should be in buffer");
assert!(
!ctx.detector.is_linear_confirmed(),
"LinearDetector should not be confirmed yet"
);
let neighbors =
get_neighbors_optimized(&mut graph_file, node_ids[0], Direction::Outgoing, &mut ctx)
.expect("get_neighbors_optimized should succeed");
assert_eq!(neighbors.len(), 1, "Node A should have 1 outgoing neighbor");
assert_eq!(
ctx.buffer_hits, 0,
"Should have 0 buffer hits when not linear confirmed"
);
assert_eq!(
ctx.buffer_misses, 0,
"Should have 0 buffer misses when not linear confirmed"
);
assert_eq!(ctx.stats.misses, 1, "Should have 1 L2 cache miss");
}
#[test]
fn test_cluster_cache_hit() {
let (mut graph_file, node_ids, _temp_dir) = create_test_chain_graph();
let mut ctx = create_linear_context();
ctx.buffer
.prefetch_clusters_from(&mut graph_file, node_ids[0])
.expect("prefetch_clusters_from should succeed");
assert!(
ctx.buffer.contains(node_ids[0]),
"Node should be in buffer after prefetch"
);
let neighbors =
get_neighbors_optimized(&mut graph_file, node_ids[0], Direction::Outgoing, &mut ctx)
.expect("get_neighbors_optimized should succeed");
assert_eq!(neighbors.len(), 1, "Node A should have 1 outgoing neighbor");
assert_eq!(neighbors[0], node_ids[1], "Neighbor should be node B");
assert_eq!(ctx.buffer_hits, 1, "Should have 1 buffer hit");
assert_eq!(ctx.buffer_misses, 0, "Should have 0 buffer misses");
}
#[test]
fn test_cluster_cache_miss() {
let (mut graph_file, node_ids, _temp_dir) = create_test_chain_graph();
let mut ctx = create_linear_context();
ctx.buffer
.prefetch_from(&mut graph_file, node_ids[0])
.expect("prefetch_from should succeed");
assert!(ctx.buffer.contains(node_ids[0]), "Node should be in buffer");
let node_record = ctx.buffer.get(node_ids[0]).unwrap();
assert!(
!ctx.buffer.has_cluster(node_record.outgoing_cluster_offset),
"Cluster should not be cached when using prefetch_from"
);
let neighbors =
get_neighbors_optimized(&mut graph_file, node_ids[0], Direction::Outgoing, &mut ctx)
.expect("get_neighbors_optimized should succeed");
assert_eq!(neighbors.len(), 1, "Node A should have 1 outgoing neighbor");
assert_eq!(neighbors[0], node_ids[1], "Neighbor should be node B");
assert_eq!(ctx.buffer_hits, 1, "Should have 1 buffer hit");
}
#[test]
fn test_cluster_prefetch_batch() {
let (mut graph_file, node_ids, _temp_dir) = create_test_chain_graph();
let mut ctx = create_linear_context();
ctx.buffer
.prefetch_clusters_from(&mut graph_file, node_ids[0])
.expect("prefetch_clusters_from should succeed");
for (i, &node_id) in node_ids.iter().enumerate() {
assert!(
ctx.buffer.contains(node_id),
"Node {} should be in buffer",
i + 1
);
}
let node_record = ctx.buffer.get(node_ids[0]).unwrap();
if node_record.outgoing_cluster_offset > 0 {
assert!(
ctx.buffer.has_cluster(node_record.outgoing_cluster_offset),
"Outgoing cluster should be cached"
);
assert!(
ctx.buffer
.get_cluster(node_record.outgoing_cluster_offset)
.is_some(),
"Should be able to get cached cluster"
);
}
let node_record = ctx.buffer.get(node_ids[1]).unwrap();
if node_record.outgoing_cluster_offset > 0 {
assert!(
ctx.buffer.has_cluster(node_record.outgoing_cluster_offset),
"Outgoing cluster for node 2 should be cached"
);
}
if node_record.incoming_cluster_offset > 0 {
assert!(
ctx.buffer.has_cluster(node_record.incoming_cluster_offset),
"Incoming cluster for node 2 should be cached"
);
}
}
#[test]
fn test_cluster_cache_clear() {
let (mut graph_file, node_ids, _temp_dir) = create_test_chain_graph();
let mut ctx = create_linear_context();
ctx.buffer
.prefetch_clusters_from(&mut graph_file, node_ids[0])
.expect("prefetch_clusters_from should succeed");
assert!(ctx.buffer.contains(node_ids[0]), "Node should be in buffer");
let node_record = ctx.buffer.get(node_ids[0]).unwrap();
if node_record.outgoing_cluster_offset > 0 {
assert!(
ctx.buffer.has_cluster(node_record.outgoing_cluster_offset),
"Cluster should be cached"
);
}
ctx.buffer.clear();
assert!(!ctx.buffer.contains(node_ids[0]), "Node should be cleared");
assert_eq!(ctx.buffer.len(), 0, "Buffer should be empty");
assert_eq!(
ctx.buffer.cluster_cache_len(),
0,
"Cluster cache should be empty"
);
}
#[test]
fn test_cluster_cache_with_linear_traversal() {
let (mut graph_file, node_ids, _temp_dir) = create_test_chain_graph();
let mut ctx = create_linear_context();
ctx.buffer
.prefetch_clusters_from(&mut graph_file, node_ids[0])
.expect("First prefetch should succeed");
let neighbors_a =
get_neighbors_optimized(&mut graph_file, node_ids[0], Direction::Outgoing, &mut ctx)
.expect("get_neighbors_optimized for A should succeed");
assert_eq!(neighbors_a.len(), 1);
assert_eq!(neighbors_a[0], node_ids[1]);
let neighbors_b =
get_neighbors_optimized(&mut graph_file, node_ids[1], Direction::Outgoing, &mut ctx)
.expect("get_neighbors_optimized for B should succeed");
assert_eq!(neighbors_b.len(), 1);
assert_eq!(neighbors_b[0], node_ids[2]);
let neighbors_c =
get_neighbors_optimized(&mut graph_file, node_ids[2], Direction::Outgoing, &mut ctx)
.expect("get_neighbors_optimized for C should succeed");
assert_eq!(neighbors_c.len(), 1);
assert_eq!(neighbors_c[0], node_ids[3]);
assert!(ctx.buffer_hits >= 2, "Should have at least 2 buffer hits");
}