use crate::backend::native::NativeBackendError;
use crate::backend::native::NativeResult;
use crate::backend::native::types::NodeFlags;
use crate::backend::native::v3::index::{IndexPage, IndexPageType};
use crate::backend::native::v3::node::page::NodePage;
use crate::backend::native::v3::node::record::NodeRecordV3;
use crate::backend::native::v3::node::store::TraversalCache;
use std::sync::Arc;
pub const TEST_NODE_COUNT: usize = 100;
pub const TEST_PAGE_CAPACITY: usize = 20;
#[test]
fn test_btree_lookup_single_node() {
let index_page = IndexPage::new_leaf(1);
assert_eq!(index_page.page_id(), 1);
assert!(matches!(index_page.page_type(), IndexPageType::Leaf));
assert_eq!(index_page.count(), 0);
}
#[test]
fn test_btree_lookup_multiple_nodes() {
let page_ids: Vec<u64> = (1..=5).collect();
for (i, &page_id) in page_ids.iter().enumerate() {
let index_page = IndexPage::new_leaf(page_id);
assert_eq!(index_page.page_id(), page_id);
assert_eq!(index_page.count(), 0);
let test_node_id = (i * 100) as i64;
assert!(test_node_id >= 0);
}
}
#[test]
fn test_btree_lookup_nonexistent_node() {
let index_page = IndexPage::new_leaf(1);
assert_eq!(index_page.count(), 0);
}
#[test]
fn test_btree_index_page_creation() {
let leaf_page = IndexPage::new_leaf(10);
assert!(matches!(leaf_page.page_type(), IndexPageType::Leaf));
assert_eq!(leaf_page.page_id(), 10);
}
#[test]
fn test_btree_page_type_discrimination() {
let leaf = IndexPage::new_leaf(1);
assert!(matches!(leaf.page_type(), IndexPageType::Leaf));
}
#[test]
fn test_page_loading_decompression() {
let mut page = NodePage::new(1);
for i in 0..5 {
let node = NodeRecordV3::new_inline(
i as i64,
NodeFlags::empty(),
i as u16 * 10,
i as u16 * 20,
vec![i as u8; 32],
i as u64 * 1000,
i as u32 * 5,
i as u64 * 2000,
i as u32 * 3,
);
page.add_node(node).unwrap();
}
let bytes = page.pack().unwrap();
let loaded_page = NodePage::unpack(&bytes).unwrap();
assert_eq!(loaded_page.node_count(), 5);
assert_eq!(loaded_page.page_id, 1);
for (i, node) in loaded_page.nodes.iter().enumerate() {
assert_eq!(node.id(), i as i64);
assert_eq!(node.kind_offset, (i * 10) as u16);
assert_eq!(node.name_offset, (i * 20) as u16);
}
}
#[test]
fn test_page_checksum_validation() {
let mut page = NodePage::new(42);
let node = NodeRecordV3::new_inline(
123,
NodeFlags::empty(),
10,
20,
b"test data".to_vec(),
1000,
5,
2000,
3,
);
page.add_node(node).unwrap();
let bytes = page.pack().unwrap();
assert!(NodePage::unpack(&bytes).is_ok());
let mut corrupted = bytes.clone();
corrupted[28] ^= 0xFF;
let result = NodePage::unpack(&corrupted);
assert!(result.is_err(), "Corrupted checksum should fail validation");
if let Err(NativeBackendError::InvalidHeader { field, .. }) = result {
assert!(field.contains("checksum") || field.contains("node_page"));
} else {
panic!("Expected InvalidHeader error for checksum mismatch");
}
}
#[test]
fn test_page_not_found_error() {
let short_data = vec![0u8; 100];
let result = NodePage::unpack(&short_data);
assert!(result.is_err(), "Short page data should return error");
if let Err(NativeBackendError::InvalidHeader { field, .. }) = result {
assert!(
field.contains("node_page"),
"Expected node_page error for short data"
);
}
}
#[test]
fn test_page_overflow_handling() {
let mut page = NodePage::new(1);
let mut added_count = 0;
for i in 0..100 {
let node = NodeRecordV3::new_inline(
i as i64,
NodeFlags::empty(),
0,
0,
vec![i as u8; 50],
0,
0,
0,
0,
);
match page.add_node(node) {
Ok(_) => added_count += 1,
Err(_) => {
break;
}
}
}
assert!(
added_count >= 20,
"Should fit at least 20 nodes, got {}",
added_count
);
let bytes = page.pack().unwrap();
let loaded = NodePage::unpack(&bytes).unwrap();
assert_eq!(loaded.node_count() as usize, added_count);
}
#[test]
fn test_page_empty_data() {
let mut page = NodePage::new(1);
let node = NodeRecordV3::new_inline(1, NodeFlags::empty(), 0, 0, vec![], 0, 0, 0, 0);
page.add_node(node).unwrap();
let bytes = page.pack().unwrap();
let loaded = NodePage::unpack(&bytes).unwrap();
assert_eq!(loaded.node_count(), 1);
assert_eq!(loaded.nodes[0].data_inline, Some(vec![]));
}
#[test]
fn test_page_with_external_data() {
let mut page = NodePage::new(1);
let node = NodeRecordV3::new_external(1, NodeFlags::empty(), 100, 200, 5000, 200, 0, 5, 0, 3);
page.add_node(node).unwrap();
let bytes = page.pack().unwrap();
let loaded = NodePage::unpack(&bytes).unwrap();
assert_eq!(loaded.node_count(), 1);
assert!(loaded.nodes[0].is_external());
assert_eq!(loaded.nodes[0].data_len(), 200);
}
#[test]
fn test_cache_hit_miss_tracking() {
let mut cache = TraversalCache::new(4);
assert_eq!(cache.len(), 0);
assert_eq!(cache.hits(), 0);
assert_eq!(cache.misses(), 0);
assert!(cache.get(1).is_none());
assert_eq!(cache.misses(), 1);
let page = Arc::new(NodePage::new(1));
cache.insert(1, page);
assert!(cache.get(1).is_some());
assert_eq!(cache.hits(), 1);
assert_eq!(cache.misses(), 1);
}
#[test]
fn test_cache_lru_eviction() {
let mut cache = TraversalCache::new(2);
cache.insert(1, Arc::new(NodePage::new(1)));
cache.insert(2, Arc::new(NodePage::new(2)));
assert_eq!(cache.len(), 2);
cache.insert(3, Arc::new(NodePage::new(3)));
assert_eq!(cache.len(), 2);
assert!(cache.get(1).is_none());
assert!(cache.get(2).is_some());
assert!(cache.get(3).is_some());
}
#[test]
fn test_cache_invalidation() {
let mut cache = TraversalCache::new(4);
cache.insert(1, Arc::new(NodePage::new(1)));
cache.insert(2, Arc::new(NodePage::new(2)));
assert_eq!(cache.len(), 2);
cache.invalidate(1);
assert_eq!(cache.len(), 1);
assert!(cache.get(1).is_none());
assert!(cache.get(2).is_some());
}
#[test]
fn test_cache_clear() {
let mut cache = TraversalCache::new(4);
cache.insert(1, Arc::new(NodePage::new(1)));
cache.insert(2, Arc::new(NodePage::new(2)));
cache.insert(3, Arc::new(NodePage::new(3)));
assert_eq!(cache.len(), 3);
cache.clear();
assert_eq!(cache.len(), 0);
assert!(cache.get(1).is_none());
assert!(cache.get(2).is_none());
assert!(cache.get(3).is_none());
}
#[test]
fn test_cache_hit_rate_calculation() {
let mut cache = TraversalCache::new(4);
assert_eq!(cache.hit_ratio(), 0.0);
cache.insert(1, Arc::new(NodePage::new(1)));
for _ in 0..10 {
cache.get(1);
}
assert_eq!(cache.hit_ratio(), 1.0);
cache.get(2); cache.get(3);
let hit_rate = cache.hit_ratio();
assert!(
hit_rate > 0.8 && hit_rate < 0.9,
"Expected hit rate ~0.833, got {}",
hit_rate
);
}
#[test]
fn test_cache_sequential_access_pattern() {
let mut cache = TraversalCache::new(4);
for i in 1..=4 {
cache.insert(i, Arc::new(NodePage::new(i)));
}
for i in 1..=4 {
cache.get(i);
}
assert_eq!(cache.hits(), 4);
assert_eq!(cache.misses(), 0);
assert_eq!(cache.hit_ratio(), 1.0);
}
#[test]
fn test_storage_error_propagation() {
let short_data = vec![0u8; 10];
let result = NodePage::unpack(&short_data);
assert!(result.is_err());
match result {
Err(NativeBackendError::InvalidHeader { field, .. }) => {
assert!(field.contains("node_page") || field.contains("insufficient"));
}
_ => panic!("Expected InvalidHeader error for insufficient bytes"),
}
}
#[test]
fn test_compression_error_handling() {
let mut page = NodePage::new(1);
let node = NodeRecordV3::new_inline(1, NodeFlags::empty(), 0, 0, vec![1, 2, 3, 4], 0, 0, 0, 0);
page.add_node(node).unwrap();
let bytes = page.pack().unwrap();
let mut corrupted = bytes.clone();
let data_start = 32; if data_start + 10 < corrupted.len() {
corrupted[data_start] ^= 0xFF;
corrupted[data_start + 5] ^= 0xFF;
}
let result = NodePage::unpack(&corrupted);
assert!(result.is_err(), "Corrupted data should fail unpacking");
}
#[test]
fn test_corruption_recovery() {
let mut page = NodePage::new(1);
for i in 0..3 {
let node = NodeRecordV3::new_inline(
i as i64,
NodeFlags::empty(),
i as u16 * 10,
i as u16 * 20,
vec![i as u8; 20],
i as u64 * 100,
i as u32 * 2,
i as u64 * 200,
i as u32 * 3,
);
page.add_node(node).unwrap();
}
let valid_bytes = page.pack().unwrap();
let _valid_checksum = u32::from_be_bytes([
valid_bytes[28],
valid_bytes[29],
valid_bytes[30],
valid_bytes[31],
]);
let mut corrupted = valid_bytes.clone();
corrupted[100] ^= 0xFF;
let result = NodePage::unpack(&corrupted);
assert!(result.is_err());
}
#[test]
fn test_invalid_node_id() {
let mut page = NodePage::new(1);
let node = NodeRecordV3::new_inline(i64::MIN, NodeFlags::empty(), 0, 0, vec![], 0, 0, 0, 0);
assert!(page.add_node(node).is_ok());
let node2 = NodeRecordV3::new_inline(i64::MAX, NodeFlags::empty(), 0, 0, vec![], 0, 0, 0, 0);
assert!(page.add_node(node2).is_ok());
}
#[test]
fn test_edge_case_empty_page() {
let page = NodePage::new(0);
let bytes = page.pack().unwrap();
assert_eq!(bytes.len(), 4096);
let loaded = NodePage::unpack(&bytes).unwrap();
assert_eq!(loaded.page_id, 0);
assert_eq!(loaded.node_count(), 0);
assert!(loaded.is_empty());
}
#[test]
fn test_edge_case_max_inline_data() {
let mut page = NodePage::new(1);
let max_data = vec![0xABu8; 64]; let node = NodeRecordV3::new_inline(
1,
NodeFlags::empty(),
100,
200,
max_data.clone(),
0,
0,
0,
0,
);
page.add_node(node).unwrap();
let bytes = page.pack().unwrap();
let loaded = NodePage::unpack(&bytes).unwrap();
assert_eq!(loaded.nodes[0].data_inline, Some(max_data));
}
pub fn create_test_node(id: i64) -> NodeRecordV3 {
NodeRecordV3::new_inline(
id,
NodeFlags::empty(),
(id % 1000) as u16,
((id % 100) + 100) as u16,
vec![id as u8; 32],
(id as u64) * 1000,
((id % 10) + 1) as u32,
(id as u64) * 2000,
((id % 5) + 1) as u32,
)
}
pub fn create_test_page(page_id: u64, node_count: usize) -> NodePage {
let mut page = NodePage::new(page_id);
for i in 0..node_count {
let node = create_test_node(i as i64);
if page.add_node(node).is_err() {
break; }
}
page
}
pub fn verify_round_trip(page: &NodePage) -> NativeResult<()> {
let bytes = page.pack()?;
let loaded = NodePage::unpack(&bytes)?;
assert_eq!(loaded.page_id, page.page_id);
assert_eq!(loaded.node_count(), page.node_count());
assert_eq!(loaded.nodes.len(), page.nodes.len());
for (original, restored) in page.nodes.iter().zip(loaded.nodes.iter()) {
assert_eq!(restored.id(), original.id());
assert_eq!(restored.flags, original.flags);
assert_eq!(restored.kind_offset, original.kind_offset);
assert_eq!(restored.name_offset, original.name_offset);
}
Ok(())
}
#[test]
fn test_end_to_end_node_storage() {
let page = create_test_page(1, 10);
assert!(page.node_count() > 0);
verify_round_trip(&page).unwrap();
}
#[test]
fn test_multiple_pages_round_trip() {
let pages: Vec<NodePage> = vec![
create_test_page(1, 5),
create_test_page(2, 10),
create_test_page(3, 15),
];
for page in &pages {
verify_round_trip(page).unwrap();
}
}
#[test]
fn test_node_flags_preservation() {
let flags_to_test = vec![NodeFlags::empty(), NodeFlags::DELETED, NodeFlags::NONE];
for flags in flags_to_test {
let mut page = NodePage::new(1);
let node = NodeRecordV3::new_inline(1, flags, 10, 20, vec![], 0, 0, 0, 0);
page.add_node(node).unwrap();
let bytes = page.pack().unwrap();
let loaded = NodePage::unpack(&bytes).unwrap();
assert_eq!(loaded.nodes[0].flags, flags);
}
}
#[test]
fn test_page_capacity_calculation() {
let mut page = NodePage::new(1);
assert_eq!(page.remaining_capacity(), 4064);
let node = NodeRecordV3::new_inline(
1,
NodeFlags::empty(),
0,
0,
vec![1u8; 50], 0,
0,
0,
0,
);
page.add_node(node).unwrap();
assert!(page.remaining_capacity() < 4064);
}
#[test]
fn test_space_efficiency_tracking() {
let mut page = NodePage::new(1);
assert_eq!(page.space_efficiency(), 0.0);
for i in 0..5 {
let node = NodeRecordV3::new_inline(
i as i64,
NodeFlags::empty(),
i as u16 * 10,
i as u16 * 20,
vec![i as u8; 50],
i as u64 * 100,
i as u32 * 2,
i as u64 * 200,
i as u32 * 3,
);
page.add_node(node).unwrap();
}
let efficiency = page.space_efficiency();
assert!(efficiency > 0.0);
assert!(efficiency <= 1.0);
}
#[test]
fn test_cache_with_various_page_ids() {
let mut cache = TraversalCache::new(16);
let test_ids = vec![0, 1, 100, 1000, u64::MAX - 1];
for &id in &test_ids {
cache.insert(id, Arc::new(NodePage::new(id)));
}
assert_eq!(cache.len(), test_ids.len());
for &id in &test_ids {
assert!(cache.contains(&id));
let retrieved = cache.get(id);
assert!(retrieved.is_some());
assert_eq!(retrieved.unwrap().page_id, id);
}
}
#[test]
fn test_large_scale_page_operations() {
let page_count = 50;
let pages: Vec<NodePage> = (0..page_count).map(|i| create_test_page(i, 10)).collect();
for page in &pages {
verify_round_trip(page).unwrap();
}
let mut cache = TraversalCache::new(32);
for (i, page) in pages.iter().take(32).enumerate() {
cache.insert(i as u64, Arc::new(page.clone()));
}
assert_eq!(cache.len(), 32);
assert_eq!(cache.capacity(), 32);
}