use sqlitegraph::backend::native::v2::edge_cluster::{
CacheKey, CompactEdgeRecord, Direction, EdgeCluster,
cache::{ThreadSafeCache, TraversalAwareCache},
};
use std::sync::Arc;
fn create_test_cluster_compact(node_id: i64, edge_count: u32) -> EdgeCluster {
let compact_edges: Vec<CompactEdgeRecord> = (1..=edge_count)
.map(|i| {
CompactEdgeRecord::new(
node_id + i as i64,
(i % 1000) as u16, Vec::new(),
)
})
.collect();
EdgeCluster::create_from_compact_edges(compact_edges, node_id, Direction::Outgoing).unwrap()
}
#[test]
fn test_cache_hit_ratio_traversal() {
let cache = ThreadSafeCache::new(100);
let start_node = 1;
let mut visited = std::collections::HashSet::new();
let mut frontier = vec![start_node];
for _hop in 0..3 {
let mut next_frontier = Vec::new();
for node_id in frontier {
if visited.contains(&node_id) {
continue;
}
visited.insert(node_id);
let cluster = Arc::new(create_test_cluster_compact(node_id, 10));
let neighbors: Vec<i64> = cluster.iter_neighbors().collect();
let key = CacheKey::new(node_id, Direction::Outgoing);
cache.insert(key, Arc::clone(&cluster));
cache.get(key);
for neighbor_id in neighbors {
if !visited.contains(&neighbor_id) && neighbor_id < 1000 {
next_frontier.push(neighbor_id);
}
}
}
frontier = next_frontier;
}
let hit_ratio = cache.hit_ratio();
assert!(
hit_ratio > 0.6,
"Expected hit ratio > 60%, got {:.2}%",
hit_ratio * 100.0
);
println!(
"Cache hit ratio for BFS traversal: {:.2}%",
hit_ratio * 100.0
);
let stats = cache.stats();
println!(
"Stats: hits={}, misses={}, traversals={}, lookups={}",
stats.hits, stats.misses, stats.traversals, stats.lookups
);
}
#[test]
fn test_cache_high_degree_priority() {
let cache = ThreadSafeCache::new(50);
let hub_cluster = Arc::new(create_test_cluster_compact(1, 100));
let hub_key = CacheKey::new(1, Direction::Outgoing);
cache.insert(hub_key, Arc::clone(&hub_cluster));
for i in 2..52 {
let leaf_cluster = Arc::new(create_test_cluster_compact(i, 1));
let leaf_key = CacheKey::new(i, Direction::Outgoing);
cache.insert(leaf_key, leaf_cluster);
}
let new_cluster = Arc::new(create_test_cluster_compact(1000, 1));
let new_key = CacheKey::new(1000, Direction::Outgoing);
cache.insert(new_key, new_cluster);
assert!(
cache.get(hub_key).is_some(),
"High-degree hub node should not be evicted from cache"
);
println!("High-degree node priority test passed: hub node retained in cache");
}
#[test]
fn test_cache_lru_k_eviction() {
let mut cache = TraversalAwareCache::new(5);
for i in 1..=5 {
let cluster = Arc::new(create_test_cluster_compact(i, 10));
let key = CacheKey::new(i, Direction::Outgoing);
cache.insert(key, cluster);
}
for _ in 0..3 {
cache.get(CacheKey::new(1, Direction::Outgoing));
cache.get(CacheKey::new(2, Direction::Outgoing));
}
let cluster = Arc::new(create_test_cluster_compact(6, 10));
let key = CacheKey::new(6, Direction::Outgoing);
cache.insert(key, cluster);
assert!(
cache.get(CacheKey::new(1, Direction::Outgoing)).is_some(),
"Entry 1 should still be in cache (LRU-2 protection)"
);
assert!(
cache.get(CacheKey::new(2, Direction::Outgoing)).is_some(),
"Entry 2 should still be in cache (LRU-2 protection)"
);
println!("LRU-K eviction test passed: frequently accessed entries retained");
}
#[test]
fn test_prefetch_neighbors() {
let cache = ThreadSafeCache::new(100);
let node_id = 1;
let cluster = Arc::new(create_test_cluster_compact(node_id, 10));
let neighbors: Vec<i64> = cluster.iter_neighbors().collect();
cluster.prefetch_neighbors(
&cache,
&neighbors,
|neighbor_id, direction| {
if neighbor_id <= 11 {
Some(create_test_cluster_compact(neighbor_id, 5))
} else {
None
}
},
Direction::Outgoing,
);
let mut prefetch_count = 0;
for neighbor_id in neighbors.iter().take(10) {
let key = CacheKey::new(*neighbor_id, Direction::Outgoing);
if cache.get(key).is_some() {
prefetch_count += 1;
}
}
assert!(
prefetch_count >= 5,
"Expected at least 5 neighbors to be prefetched, got {}",
prefetch_count
);
println!(
"Prefetch test passed: {} neighbors preloaded into cache",
prefetch_count
);
}
#[test]
fn test_cache_high_degree_not_cached() {
let cache = ThreadSafeCache::new(100);
let high_degree_cluster = Arc::new(create_test_cluster_compact(1, 1500));
let key = CacheKey::new(1, Direction::Outgoing);
high_degree_cluster.get_neighbors_with_cache(&cache, 1, Direction::Outgoing);
assert!(
cache.get(key).is_none(),
"Very high-degree node (>1000 edges) should not be cached"
);
println!("High-degree node exclusion test passed: node not cached to reduce memory pressure");
}
#[test]
fn test_cache_statistics_tracking() {
let cache = ThreadSafeCache::new(50);
for i in 1..10 {
let cluster = Arc::new(create_test_cluster_compact(i, 5));
let key = CacheKey::new(i, Direction::Outgoing);
cache.insert(key, cluster);
}
for i in 1..5 {
let key = CacheKey::new(i, Direction::Outgoing);
cache.get(key);
}
for i in 100..110 {
let key = CacheKey::new(i, Direction::Outgoing);
cache.get(key);
}
let stats = cache.stats();
assert!(stats.hits > 0, "Should have recorded some hits");
assert!(stats.misses > 0, "Should have recorded some misses");
assert!(
stats.hits + stats.misses > 0,
"Total accesses should be > 0"
);
println!(
"Cache statistics: hits={}, misses={}, hit_ratio={:.2}%",
stats.hits,
stats.misses,
cache.hit_ratio() * 100.0
);
}
#[test]
fn test_cache_thread_safety() {
use std::thread;
let cache = Arc::new(ThreadSafeCache::new(100));
let mut handles = Vec::new();
for thread_id in 0..4 {
let cache_clone = Arc::clone(&cache);
let handle = thread::spawn(move || {
for i in 0..25 {
let node_id = thread_id * 25 + i;
let cluster = Arc::new(create_test_cluster_compact(node_id as i64, 5));
let key = CacheKey::new(node_id as i64, Direction::Outgoing);
cache_clone.insert(key, Arc::clone(&cluster));
cache_clone.get(key);
}
});
handles.push(handle);
}
for handle in handles {
handle.join().unwrap();
}
let stats = cache.stats();
assert!(
stats.hits > 0 || stats.misses > 0,
"Cache should have recorded accesses"
);
println!(
"Thread safety test passed: {} concurrent operations completed",
stats.hits + stats.misses
);
}
#[test]
fn test_cache_capacity_enforcement() {
let cache = ThreadSafeCache::new(10);
for i in 1..=20 {
let cluster = Arc::new(create_test_cluster_compact(i, 5));
let key = CacheKey::new(i, Direction::Outgoing);
cache.insert(key, cluster);
}
let stats_before = cache.stats();
for i in 1..=5 {
let key = CacheKey::new(i, Direction::Outgoing);
cache.get(key);
}
let stats_after = cache.stats();
assert!(
stats_after.hits + stats_after.misses > stats_before.hits + stats_before.misses,
"Cache should still be operational after exceeding capacity"
);
println!("Capacity enforcement test passed: cache operates correctly at capacity limit");
}