use sqlitegraph::backend::native::v2::storage::JsonLimits;
use sqlitegraph::backend::native::v2::wal::{V2WALConfig, V2WALHeader};
use sqlitegraph::backend::native::{NativeBackendError, NativeResult};
use std::path::Path;
use tempfile::tempdir;
#[test]
fn test_wal_config_validation() -> NativeResult<()> {
let temp_dir = tempdir()?;
let valid_config = V2WALConfig {
graph_path: temp_dir.path().join("valid.db"),
wal_path: temp_dir.path().join("valid.wal"),
checkpoint_path: temp_dir.path().join("checkpoint.tracker"),
max_wal_size: 64 * 1024 * 1024, buffer_size: 1024 * 1024, checkpoint_interval: 1000,
group_commit_timeout_ms: 100,
max_group_commit_size: 8,
enable_compression: true,
compression_level: 3,
auto_checkpoint: false,
background_checkpoint_thread: false,
background_checkpoint_interval_secs: 0,
json_limits: JsonLimits::default(),
};
assert_eq!(valid_config.max_wal_size, 64 * 1024 * 1024);
assert_eq!(valid_config.buffer_size, 1024 * 1024);
assert_eq!(valid_config.group_commit_timeout_ms, 100);
assert_eq!(valid_config.max_group_commit_size, 8);
assert!(valid_config.enable_compression);
assert_eq!(valid_config.compression_level, 3);
let mut config_variant1 = valid_config.clone();
config_variant1.max_wal_size = 1024;
let mut config_variant2 = valid_config.clone();
config_variant2.buffer_size = 128 * 1024 * 1024;
let mut config_variant3 = valid_config.clone();
config_variant3.buffer_size = 1000;
let mut config_variant4 = valid_config.clone();
config_variant4.group_commit_timeout_ms = 5;
let mut config_variant5 = valid_config.clone();
config_variant5.max_group_commit_size = 16;
Ok(())
}
#[test]
fn test_wal_header_operations() -> NativeResult<()> {
let header = V2WALHeader::new();
assert_eq!(header.magic, V2WALHeader::MAGIC);
assert_eq!(header.version, V2WALHeader::VERSION);
assert_eq!(header.current_lsn, 1);
assert_eq!(header.committed_lsn, 0);
assert_eq!(header.checkpointed_lsn, 0);
let valid_magic = V2WALHeader::MAGIC;
assert_eq!(valid_magic, [b'V', b'2', b'W', b'A', b'L', 0, 0, 0]);
let invalid_magic = [b'X', b'2', b'W', b'A', b'L', 0, 0, 0];
assert_ne!(invalid_magic, V2WALHeader::MAGIC);
Ok(())
}
#[test]
fn test_lsn_validation() -> NativeResult<()> {
fn is_valid_lsn_sequence(prev: u64, next: u64) -> bool {
next > prev && next <= u64::MAX
}
assert!(
is_valid_lsn_sequence(0, 1),
"Valid forward sequence should pass"
);
assert!(
is_valid_lsn_sequence(1000, 1001),
"Valid increment by 1 should pass"
);
assert!(
is_valid_lsn_sequence(1000, 1050),
"Valid increment by more than 1 should pass"
);
assert!(
!is_valid_lsn_sequence(1000, 999),
"Backward sequence should fail"
);
assert!(!is_valid_lsn_sequence(1000, 1000), "Same LSN should fail");
assert!(
is_valid_lsn_sequence(0, u64::MAX),
"Maximum jump should be valid"
);
assert!(
is_valid_lsn_sequence(u64::MAX - 1, u64::MAX),
"Maximum LSN increment should work"
);
Ok(())
}
#[test]
fn test_wal_size_estimation() -> NativeResult<()> {
fn calculate_wal_size_estimate(record_count: u64, avg_size: u64) -> u64 {
let header_size = std::mem::size_of::<V2WALHeader>() as u64;
let record_data_size = record_count * avg_size;
let overhead = record_count * 20; header_size + record_data_size + overhead
}
let test_cases = vec![
(100, 100), (1000, 500), (10000, 1000), (100000, 2000), ];
for (record_count, avg_size) in test_cases {
let estimated_size = calculate_wal_size_estimate(record_count, avg_size);
let header_size = std::mem::size_of::<V2WALHeader>();
let min_expected_size = header_size + (record_count * avg_size) as usize;
let max_expected_size = min_expected_size + (record_count * 20) as usize;
assert!(
estimated_size >= min_expected_size as u64,
"Size estimate should be at least minimum expected for {} records",
record_count
);
assert!(
estimated_size <= max_expected_size as u64,
"Size estimate should not exceed maximum expected for {} records",
record_count
);
}
Ok(())
}
#[test]
fn test_lsn_formatting_parsing() -> NativeResult<()> {
fn format_lsn(lsn: u64) -> String {
lsn.to_string()
}
fn parse_lsn(s: &str) -> Result<u64, std::num::ParseIntError> {
s.parse::<u64>()
}
let test_lsns = vec![0, 1, 42, 1000, u64::MAX / 2, u64::MAX - 1];
for lsn in test_lsns {
let formatted = format_lsn(lsn);
let parsed =
parse_lsn(&formatted).map_err(|_| NativeBackendError::InvalidConfiguration {
parameter: "lsn".to_string(),
reason: "Parse error".to_string(),
})?;
assert_eq!(
lsn, parsed,
"LSN should be preserved through format/parse cycle"
);
}
let invalid_strings = vec!["", "not_a_number", "-1", "18446744073709551616"];
for invalid_str in invalid_strings {
assert!(
parse_lsn(invalid_str).is_err(),
"Invalid LSN string '{}' should fail",
invalid_str
);
}
Ok(())
}
#[test]
fn test_config_serialization() -> NativeResult<()> {
let temp_dir = tempdir()?;
let original_config = V2WALConfig {
graph_path: temp_dir.path().join("test.db"),
wal_path: temp_dir.path().join("test.wal"),
checkpoint_path: temp_dir.path().join("checkpoint.tracker"),
max_wal_size: 32 * 1024 * 1024,
buffer_size: 512 * 1024,
checkpoint_interval: 1000,
group_commit_timeout_ms: 200,
max_group_commit_size: 16,
enable_compression: true,
compression_level: 3,
auto_checkpoint: false,
background_checkpoint_thread: false,
background_checkpoint_interval_secs: 0,
json_limits: JsonLimits::default(),
};
assert_eq!(original_config.max_wal_size, 32 * 1024 * 1024);
assert_eq!(original_config.buffer_size, 512 * 1024);
assert_eq!(original_config.group_commit_timeout_ms, 200);
assert_eq!(original_config.max_group_commit_size, 16);
assert!(original_config.enable_compression);
assert_eq!(original_config.compression_level, 3);
Ok(())
}
#[test]
fn test_wal_file_creation() -> NativeResult<()> {
let temp_dir = tempdir()?;
let wal_path = temp_dir.path().join("test_creation.wal");
assert!(!wal_path.exists(), "WAL file should not exist initially");
let config = V2WALConfig {
graph_path: temp_dir.path().join("test_creation.db"),
wal_path: wal_path.clone(),
checkpoint_path: temp_dir.path().join("creation_checkpoint.tracker"),
max_wal_size: 16 * 1024 * 1024,
buffer_size: 256 * 1024,
checkpoint_interval: 1000,
group_commit_timeout_ms: 100,
max_group_commit_size: 8,
enable_compression: false,
compression_level: 3,
auto_checkpoint: false,
background_checkpoint_thread: false,
background_checkpoint_interval_secs: 0,
json_limits: JsonLimits::default(),
};
assert_eq!(config.max_wal_size, 16 * 1024 * 1024);
assert_eq!(config.buffer_size, 256 * 1024);
assert!(temp_dir.path().exists(), "Temp directory should exist");
Ok(())
}
#[test]
fn test_error_handling_edge_cases() -> NativeResult<()> {
let invalid_config = V2WALConfig {
wal_path: Path::new("/non/existent/directory/test.wal").to_path_buf(),
checkpoint_path: Path::new("/non/existent/directory/checkpoint.tracker").to_path_buf(),
..Default::default()
};
assert_eq!(
invalid_config.wal_path,
Path::new("/non/existent/directory/test.wal")
);
let is_valid_lsn_sequence = |prev: u64, next: u64| -> bool { next > prev };
assert!(
!is_valid_lsn_sequence(u64::MAX, u64::MAX),
"Same LSN should fail"
);
assert!(
!is_valid_lsn_sequence(u64::MAX, 0),
"Wrap-around should fail"
);
let calculate_wal_size_estimate = |record_count: u64, avg_size: u64| -> u64 {
let header_size = std::mem::size_of::<V2WALHeader>() as u64;
header_size + (record_count * avg_size)
};
let extreme_estimate = calculate_wal_size_estimate(u32::MAX as u64, u32::MAX as u64);
assert!(extreme_estimate > 0, "Extreme estimate should be positive");
Ok(())
}
#[test]
fn test_concurrent_access_patterns() -> NativeResult<()> {
use std::sync::{Arc, Mutex};
use std::thread;
let temp_dir = tempdir()?;
let config = Arc::new(V2WALConfig {
graph_path: temp_dir.path().join("concurrent_test.db"),
wal_path: temp_dir.path().join("concurrent_test.wal"),
checkpoint_path: temp_dir.path().join("concurrent_checkpoint.tracker"),
max_wal_size: 16 * 1024 * 1024,
buffer_size: 256 * 1024,
checkpoint_interval: 1000,
group_commit_timeout_ms: 50,
max_group_commit_size: 4,
enable_compression: false,
compression_level: 3,
auto_checkpoint: false,
background_checkpoint_thread: false,
background_checkpoint_interval_secs: 0,
json_limits: JsonLimits::default(),
});
let validation_results = Arc::new(Mutex::new(Vec::new()));
let mut handles = vec![];
for i in 0..4 {
let config_clone = Arc::clone(&config);
let results_clone = Arc::clone(&validation_results);
let handle = thread::spawn(move || {
let is_valid = config_clone.max_wal_size > 0 && config_clone.buffer_size > 0;
let mut results = results_clone.lock().unwrap();
results.push((i, is_valid));
});
handles.push(handle);
}
for handle in handles {
handle.join().unwrap();
}
let results = validation_results.lock().unwrap();
assert_eq!(results.len(), 4, "All threads should complete");
for (thread_id, success) in results.iter() {
assert!(success, "Thread {} validation should succeed", thread_id);
}
Ok(())
}
#[test]
fn test_memory_resource_management() -> NativeResult<()> {
let temp_dir = tempdir()?;
let memory_configs = vec![
(64 * 1024, 8 * 1024), (1024 * 1024, 64 * 1024), (16 * 1024 * 1024, 1024 * 1024), ];
for (wal_size, buffer_size) in memory_configs {
let config = V2WALConfig {
graph_path: temp_dir.path().join(format!("memory_test_{}.db", wal_size)),
wal_path: temp_dir
.path()
.join(format!("memory_test_{}.wal", wal_size)),
checkpoint_path: temp_dir
.path()
.join(format!("memory_test_{}.checkpoint", wal_size)),
max_wal_size: wal_size,
buffer_size,
checkpoint_interval: 1000,
group_commit_timeout_ms: 10,
max_group_commit_size: 8,
enable_compression: false,
compression_level: 3,
auto_checkpoint: false,
background_checkpoint_thread: false,
background_checkpoint_interval_secs: 0,
json_limits: JsonLimits::default(),
};
assert!(
config.max_wal_size > 0 && config.buffer_size > 0,
"Config with WAL size {} and buffer size {} should be valid",
wal_size,
buffer_size
);
assert!(
buffer_size <= (wal_size / 4) as usize,
"Buffer size should be reasonable fraction of WAL size"
);
}
Ok(())
}
#[test]
fn test_performance_requirements() -> NativeResult<()> {
let start_time = std::time::Instant::now();
let iterations = 1_000_000;
let mut current_lsn = 0;
for _i in 0..iterations {
current_lsn += 1;
let is_valid = current_lsn > (current_lsn - 1);
assert!(is_valid, "LSN sequence should be valid");
}
let elapsed = start_time.elapsed();
let lsn_ops_per_second = iterations as f64 / elapsed.as_secs_f64();
assert!(
lsn_ops_per_second >= 1_000_000.0,
"LSN validation should be fast: {:.0} ops/sec",
lsn_ops_per_second
);
Ok(())
}