#[allow(unused_macros)]
#[cfg(target_arch = "wasm32")]
macro_rules! lock_mutex {
($mutex:expr) => {
$mutex.try_borrow_mut().expect("RefCell borrow failed")
};
}
#[allow(unused_macros)]
#[cfg(not(target_arch = "wasm32"))]
macro_rules! lock_mutex {
($mutex:expr) => {
$mutex.borrow_mut()
};
}
#[cfg(target_arch = "wasm32")]
use wasm_bindgen_test::*;
#[cfg(target_arch = "wasm32")]
use wasm_bindgen::JsCast;
#[cfg(target_arch = "wasm32")]
wasm_bindgen_test::wasm_bindgen_test_configure!(run_in_browser);
#[cfg(not(target_arch = "wasm32"))]
#[test]
fn test_parse_sqlite_header_valid() {
use absurder_sql::storage::export::parse_sqlite_header;
let mut header = vec![0u8; 100];
header[0..16].copy_from_slice(b"SQLite format 3\0");
header[16] = 0x10;
header[17] = 0x00;
header[24..28].copy_from_slice(&[0, 0, 0, 1]);
header[28..32].copy_from_slice(&[0, 0, 0, 10]);
let result = parse_sqlite_header(&header);
assert!(result.is_ok());
let (page_size, page_count) = result.unwrap();
assert_eq!(page_size, 4096);
assert_eq!(page_count, 10);
}
#[cfg(not(target_arch = "wasm32"))]
#[test]
fn test_parse_sqlite_header_special_page_size() {
use absurder_sql::storage::export::parse_sqlite_header;
let mut header = vec![0u8; 100];
header[0..16].copy_from_slice(b"SQLite format 3\0");
header[16] = 0x00;
header[17] = 0x01;
header[28..32].copy_from_slice(&[0, 0, 0, 5]);
let result = parse_sqlite_header(&header);
assert!(result.is_ok());
let (page_size, page_count) = result.unwrap();
assert_eq!(page_size, 65536);
assert_eq!(page_count, 5);
}
#[cfg(not(target_arch = "wasm32"))]
#[test]
fn test_parse_sqlite_header_invalid_magic() {
use absurder_sql::storage::export::parse_sqlite_header;
let mut header = vec![0u8; 100];
header[0..16].copy_from_slice(b"Invalid format!\0");
let result = parse_sqlite_header(&header);
assert!(result.is_err());
assert!(result.unwrap_err().message.contains("Invalid SQLite"));
}
#[cfg(not(target_arch = "wasm32"))]
#[test]
fn test_parse_sqlite_header_insufficient_data() {
use absurder_sql::storage::export::parse_sqlite_header;
let header = vec![0u8; 50];
let result = parse_sqlite_header(&header);
assert!(result.is_err());
assert!(result.unwrap_err().message.contains("Header too small"));
}
#[cfg(not(target_arch = "wasm32"))]
#[test]
fn test_parse_sqlite_header_various_page_sizes() {
use absurder_sql::storage::export::parse_sqlite_header;
let test_cases = vec![
(512, 0x02, 0x00),
(1024, 0x04, 0x00),
(2048, 0x08, 0x00),
(4096, 0x10, 0x00),
(8192, 0x20, 0x00),
(16384, 0x40, 0x00),
(32768, 0x80, 0x00),
];
for (expected_size, byte1, byte2) in test_cases {
let mut header = vec![0u8; 100];
header[0..16].copy_from_slice(b"SQLite format 3\0");
header[16] = byte1;
header[17] = byte2;
header[28..32].copy_from_slice(&[0, 0, 0, 1]);
let result = parse_sqlite_header(&header);
assert!(result.is_ok(), "Failed for page size {}", expected_size);
assert_eq!(result.unwrap().0, expected_size);
}
}
#[cfg(not(target_arch = "wasm32"))]
#[test]
fn test_parse_sqlite_header_page_count() {
use absurder_sql::storage::export::parse_sqlite_header;
let test_counts = vec![0, 1, 100, 1000, 65535, 1000000];
for expected_count in test_counts {
let mut header = vec![0u8; 100];
header[0..16].copy_from_slice(b"SQLite format 3\0");
header[16] = 0x10; header[17] = 0x00;
let count_bytes = (expected_count as u32).to_be_bytes();
header[28..32].copy_from_slice(&count_bytes);
let result = parse_sqlite_header(&header);
assert!(result.is_ok());
assert_eq!(result.unwrap().1, expected_count as u32);
}
}
#[cfg(target_arch = "wasm32")]
#[wasm_bindgen_test]
async fn test_database_export_to_file() {
use absurder_sql::{Database, DatabaseConfig};
let config = DatabaseConfig {
name: "test_export.db".to_string(),
version: None,
cache_size: None,
page_size: None,
auto_vacuum: None,
journal_mode: None,
max_export_size_bytes: Some(2 * 1024 * 1024 * 1024),
};
let mut db = Database::new(config)
.await
.expect("Failed to create database");
db.allow_non_leader_writes(true)
.await
.expect("Failed to allow non-leader writes");
db.execute("CREATE TABLE test (id INTEGER PRIMARY KEY, value TEXT)")
.await
.expect("Failed to create table");
db.execute("INSERT INTO test (value) VALUES ('test data')")
.await
.expect("Failed to insert data");
let exported_bytes = db
.export_to_file()
.await
.expect("Failed to export database");
assert!(exported_bytes.is_object());
assert!(exported_bytes.is_instance_of::<js_sys::Uint8Array>());
let uint8_array = js_sys::Uint8Array::from(exported_bytes);
let exported_vec = uint8_array.to_vec();
assert!(exported_vec.len() >= 16);
assert_eq!(&exported_vec[0..16], b"SQLite format 3\0");
assert!(exported_vec.len() >= 4096);
}
#[cfg(target_arch = "wasm32")]
#[wasm_bindgen_test]
async fn test_export_multi_table_database() {
use absurder_sql::{Database, DatabaseConfig};
let config = DatabaseConfig {
name: "test_multi_export.db".to_string(),
version: None,
cache_size: None,
page_size: None,
auto_vacuum: None,
journal_mode: None,
max_export_size_bytes: Some(2 * 1024 * 1024 * 1024),
};
let mut db = Database::new(config)
.await
.expect("Failed to create database");
db.allow_non_leader_writes(true)
.await
.expect("Failed to allow non-leader writes");
db.execute("CREATE TABLE users (id INTEGER PRIMARY KEY, name TEXT)")
.await
.expect("Failed to create users table");
db.execute("CREATE TABLE posts (id INTEGER PRIMARY KEY, user_id INTEGER, content TEXT)")
.await
.expect("Failed to create posts table");
db.execute("INSERT INTO users (name) VALUES ('Alice')")
.await
.expect("Failed to insert user");
db.execute("INSERT INTO posts (user_id, content) VALUES (1, 'Hello World')")
.await
.expect("Failed to insert post");
let exported_bytes = db
.export_to_file()
.await
.expect("Failed to export database");
let uint8_array = js_sys::Uint8Array::from(exported_bytes);
let exported_vec = uint8_array.to_vec();
assert_eq!(&exported_vec[0..16], b"SQLite format 3\0");
assert!(exported_vec.len() > 4096);
}
#[cfg(not(target_arch = "wasm32"))]
#[test]
fn test_validate_sqlite_file_valid() {
use absurder_sql::storage::export::validate_sqlite_file;
let mut data = vec![0u8; 8192];
data[0..16].copy_from_slice(b"SQLite format 3\0");
data[16] = 0x10;
data[17] = 0x00;
data[28..32].copy_from_slice(&[0, 0, 0, 2]);
let result = validate_sqlite_file(&data);
assert!(result.is_ok());
}
#[cfg(not(target_arch = "wasm32"))]
#[test]
fn test_validate_sqlite_file_invalid_magic() {
use absurder_sql::storage::export::validate_sqlite_file;
let mut data = vec![0u8; 100];
data[0..16].copy_from_slice(b"Invalid format!\0");
let result = validate_sqlite_file(&data);
assert!(result.is_err());
assert!(result.unwrap_err().message.contains("Invalid SQLite"));
}
#[cfg(not(target_arch = "wasm32"))]
#[test]
fn test_validate_sqlite_file_too_small() {
use absurder_sql::storage::export::validate_sqlite_file;
let data = vec![0u8; 50];
let result = validate_sqlite_file(&data);
assert!(result.is_err());
assert!(result.unwrap_err().message.contains("too small"));
}
#[cfg(not(target_arch = "wasm32"))]
#[test]
fn test_validate_sqlite_file_invalid_page_size() {
use absurder_sql::storage::export::validate_sqlite_file;
let mut data = vec![0u8; 1000];
data[0..16].copy_from_slice(b"SQLite format 3\0");
data[16] = 0x01;
data[17] = 0x2C;
data[28..32].copy_from_slice(&[0, 0, 0, 1]);
let result = validate_sqlite_file(&data);
assert!(result.is_err());
assert!(result.unwrap_err().message.contains("page size"));
}
#[cfg(not(target_arch = "wasm32"))]
#[test]
fn test_validate_sqlite_file_size_mismatch() {
use absurder_sql::storage::export::validate_sqlite_file;
let mut data = vec![0u8; 4096]; data[0..16].copy_from_slice(b"SQLite format 3\0");
data[16] = 0x10;
data[17] = 0x00;
data[28..32].copy_from_slice(&[0, 0, 0, 10]);
let result = validate_sqlite_file(&data);
assert!(result.is_err());
assert!(result.unwrap_err().message.contains("size mismatch"));
}
#[cfg(not(target_arch = "wasm32"))]
#[test]
fn test_validate_sqlite_file_zero_pages() {
use absurder_sql::storage::export::validate_sqlite_file;
let mut data = vec![0u8; 4096];
data[0..16].copy_from_slice(b"SQLite format 3\0");
data[16] = 0x10;
data[17] = 0x00;
data[28..32].copy_from_slice(&[0, 0, 0, 0]);
let result = validate_sqlite_file(&data);
assert!(result.is_err());
assert!(result.unwrap_err().message.contains("page count"));
}
#[cfg(not(target_arch = "wasm32"))]
#[test]
fn test_clear_database_storage() {
use absurder_sql::storage::import::clear_database_storage;
use absurder_sql::storage::vfs_sync::{
with_global_allocation_map, with_global_commit_marker, with_global_storage,
};
use std::collections::HashMap;
let db_name = "test_clear_db";
with_global_storage(|gs| {
let mut storage = lock_mutex!(gs);
let mut blocks = HashMap::new();
blocks.insert(0, vec![1, 2, 3, 4]);
blocks.insert(1, vec![5, 6, 7, 8]);
storage.insert(db_name.to_string(), blocks);
});
#[cfg(all(
not(target_arch = "wasm32"),
any(test, debug_assertions),
not(feature = "fs_persist")
))]
{
use absurder_sql::storage::metadata::{BlockMetadataPersist, ChecksumAlgorithm};
use absurder_sql::storage::vfs_sync::with_global_metadata;
with_global_metadata(|gm| {
let mut metadata = gm.lock(); let mut meta_map = HashMap::new();
meta_map.insert(
0,
BlockMetadataPersist {
checksum: 123,
version: 1,
last_modified_ms: 1000,
algo: ChecksumAlgorithm::FastHash,
},
);
metadata.insert(db_name.to_string(), meta_map);
});
}
with_global_commit_marker(|gcm| {
let mut markers = lock_mutex!(gcm);
markers.insert(db_name.to_string(), 42);
});
with_global_allocation_map(|gam| {
let mut alloc = lock_mutex!(gam);
let mut ids = std::collections::HashSet::new();
ids.insert(0);
ids.insert(1);
alloc.insert(db_name.to_string(), ids);
});
with_global_storage(|gs| {
let storage = lock_mutex!(gs);
assert!(storage.contains_key(db_name));
assert_eq!(storage.get(db_name).unwrap().len(), 2);
});
let result = futures::executor::block_on(clear_database_storage(db_name));
assert!(result.is_ok());
with_global_storage(|gs| {
let storage = lock_mutex!(gs);
assert!(!storage.contains_key(db_name) || storage.get(db_name).unwrap().is_empty());
});
#[cfg(all(
not(target_arch = "wasm32"),
any(test, debug_assertions),
not(feature = "fs_persist")
))]
{
use absurder_sql::storage::vfs_sync::with_global_metadata;
with_global_metadata(|gm| {
let metadata = gm.lock(); assert!(!metadata.contains_key(db_name) || metadata.get(db_name).unwrap().is_empty());
});
}
with_global_commit_marker(|gcm| {
let markers = lock_mutex!(gcm);
assert!(!markers.contains_key(db_name) || markers.get(db_name) == Some(&0));
});
with_global_allocation_map(|gam| {
let alloc = lock_mutex!(gam);
assert!(!alloc.contains_key(db_name) || alloc.get(db_name).unwrap().is_empty());
});
}
#[cfg(not(target_arch = "wasm32"))]
#[test]
fn test_clear_nonexistent_database() {
use absurder_sql::storage::import::clear_database_storage;
let db_name = "nonexistent_db_12345";
let result = futures::executor::block_on(clear_database_storage(db_name));
assert!(result.is_ok());
}
#[cfg(not(target_arch = "wasm32"))]
#[test]
fn test_export_large_database_warning() {
use absurder_sql::storage::export::parse_sqlite_header;
let mut header = vec![0u8; 100];
header[0..16].copy_from_slice(b"SQLite format 3\0");
header[16] = 0x10; header[17] = 0x00; header[28] = 0x00; header[29] = 0x00; header[30] = 0x75; header[31] = 0x30;
let (page_size, page_count) = parse_sqlite_header(&header).expect("Valid header");
let total_size = (page_size as u64) * (page_count as u64);
assert!(total_size > 100 * 1024 * 1024, "Database should be >100MB");
assert_eq!(page_size, 4096);
assert_eq!(page_count, 30000);
}
#[cfg(not(target_arch = "wasm32"))]
#[test]
fn test_export_excessively_large_database_error() {
use absurder_sql::storage::export::{parse_sqlite_header, validate_export_size};
let mut header = vec![0u8; 100];
header[0..16].copy_from_slice(b"SQLite format 3\0");
header[16] = 0x10; header[17] = 0x00; header[28] = 0x00; header[29] = 0x09; header[30] = 0x27; header[31] = 0xC0;
let (page_size, page_count) = parse_sqlite_header(&header).expect("Valid header");
let total_size = (page_size as u64) * (page_count as u64);
assert!(
total_size > 2 * 1024 * 1024 * 1024,
"Database should be >2GB"
);
assert_eq!(page_size, 4096);
assert_eq!(page_count, 600000);
let result = validate_export_size(total_size, None);
assert!(result.is_err(), "Should error for database >2GB");
let error = result.unwrap_err();
assert!(error.message.contains("too large"));
assert!(error.message.contains("2048.00")); }
#[cfg(not(target_arch = "wasm32"))]
#[test]
fn test_export_size_limit_configurable() {
use absurder_sql::storage::export::validate_export_size;
let size_3gb = 3 * 1024 * 1024 * 1024;
let result = validate_export_size(size_3gb, None);
assert!(result.is_err(), "Should error with default 2GB limit");
let result = validate_export_size(size_3gb, Some(5 * 1024 * 1024 * 1024));
assert!(result.is_ok(), "Should pass with 5GB limit");
let result = validate_export_size(size_3gb, Some(1024 * 1024 * 1024));
assert!(result.is_err(), "Should error with 1GB limit");
}
#[cfg(not(target_arch = "wasm32"))]
#[test]
fn test_clear_database_isolation() {
use absurder_sql::storage::import::clear_database_storage;
use absurder_sql::storage::vfs_sync::with_global_storage;
use std::collections::HashMap;
let db1 = "test_db1";
let db2 = "test_db2";
with_global_storage(|gs| {
let mut storage = lock_mutex!(gs);
let mut blocks1 = HashMap::new();
blocks1.insert(0, vec![1, 2, 3, 4]);
storage.insert(db1.to_string(), blocks1);
let mut blocks2 = HashMap::new();
blocks2.insert(0, vec![5, 6, 7, 8]);
storage.insert(db2.to_string(), blocks2);
});
let result = futures::executor::block_on(clear_database_storage(db1));
assert!(result.is_ok());
with_global_storage(|gs| {
let storage = lock_mutex!(gs);
assert!(!storage.contains_key(db1) || storage.get(db1).unwrap().is_empty());
});
with_global_storage(|gs| {
let storage = lock_mutex!(gs);
assert!(storage.contains_key(db2));
assert_eq!(storage.get(db2).unwrap().len(), 1);
assert_eq!(
storage.get(db2).unwrap().get(&0).unwrap(),
&vec![5, 6, 7, 8]
);
});
}
#[cfg(not(target_arch = "wasm32"))]
#[tokio::test]
async fn test_streaming_export_with_progress_callback() {
use absurder_sql::storage::export::ExportOptions;
use std::sync::{Arc, Mutex};
let progress_calls = Arc::new(Mutex::new(Vec::new()));
let progress_calls_clone = progress_calls.clone();
let _progress_callback = move |bytes_exported: u64, total_bytes: u64| {
progress_calls_clone
.lock()
.unwrap()
.push((bytes_exported, total_bytes));
};
let options = ExportOptions {
max_size_bytes: Some(1024 * 1024 * 1024), chunk_size_bytes: Some(10 * 1024 * 1024), progress_callback: None, };
assert_eq!(options.max_size_bytes, Some(1024 * 1024 * 1024));
assert_eq!(options.chunk_size_bytes, Some(10 * 1024 * 1024));
}
#[cfg(not(target_arch = "wasm32"))]
#[test]
fn test_export_chunk_size_parameter() {
use absurder_sql::storage::export::ExportOptions;
let options_10mb = ExportOptions {
max_size_bytes: None,
chunk_size_bytes: Some(10 * 1024 * 1024), progress_callback: None,
};
let options_5mb = ExportOptions {
max_size_bytes: None,
chunk_size_bytes: Some(5 * 1024 * 1024), progress_callback: None,
};
assert_eq!(options_10mb.chunk_size_bytes, Some(10 * 1024 * 1024));
assert_eq!(options_5mb.chunk_size_bytes, Some(5 * 1024 * 1024));
}
#[cfg(not(target_arch = "wasm32"))]
#[tokio::test]
async fn test_export_yields_between_batches() {
use std::time::{Duration, Instant};
let start = Instant::now();
let concurrent_task = tokio::spawn(async move {
tokio::time::sleep(Duration::from_millis(10)).await;
Instant::now()
});
for _ in 0..5 {
tokio::task::yield_now().await;
}
let concurrent_end = concurrent_task.await.unwrap();
let concurrent_duration = concurrent_end.duration_since(start);
assert!(
concurrent_duration < Duration::from_millis(100),
"Concurrent task was blocked, yield_now not working"
);
}
#[cfg(not(target_arch = "wasm32"))]
#[test]
fn test_check_memory_availability() {
use absurder_sql::utils::check_available_memory;
let memory_info = check_available_memory();
assert!(
memory_info.is_some(),
"Should be able to check memory availability"
);
if let Some(info) = memory_info {
assert!(
info.available_bytes > 0,
"Available memory should be positive"
);
if let Some(total) = info.total_bytes {
assert!(
total >= info.available_bytes,
"Total memory should be >= available"
);
}
}
}
#[cfg(not(target_arch = "wasm32"))]
#[test]
fn test_estimate_export_memory_requirement() {
use absurder_sql::utils::estimate_export_memory_requirement;
let db_size_100mb = 100 * 1024 * 1024;
let required = estimate_export_memory_requirement(db_size_100mb);
assert!(
required >= db_size_100mb,
"Memory requirement should be >= database size"
);
assert!(
required <= db_size_100mb * 3,
"Memory requirement shouldn't be > 3x database size"
);
}
#[cfg(not(target_arch = "wasm32"))]
#[test]
fn test_validate_memory_for_export() {
use absurder_sql::utils::validate_memory_for_export;
let db_size_10mb = 10 * 1024 * 1024;
let result = validate_memory_for_export(db_size_10mb);
match result {
Ok(_) => {
}
Err(e) => {
assert!(
e.message.contains("memory") || e.message.contains("Memory"),
"Error message should mention memory"
);
}
}
}
#[cfg(not(target_arch = "wasm32"))]
#[tokio::test]
async fn test_concurrent_export_attempts() {
use absurder_sql::storage::block_storage::BlockStorage;
use absurder_sql::storage::export::export_database_to_bytes;
use std::sync::Arc;
let db_name = "test_concurrent_exports";
let mut storage = BlockStorage::new(db_name).await.expect("create storage");
const NUM_BLOCKS: u64 = 10;
const BLOCK_SIZE: usize = 4096;
let mut header = vec![0u8; BLOCK_SIZE];
header[0..16].copy_from_slice(b"SQLite format 3\0");
header[16] = 0x10; header[17] = 0x00; header[18] = 0x01; header[19] = 0x01; let page_count_bytes = (NUM_BLOCKS as u32).to_be_bytes();
header[28..32].copy_from_slice(&page_count_bytes);
header[100] = 0xAB;
header[101] = 0xCD;
storage.write_block(0, header).await.expect("write header");
for block_id in 1..NUM_BLOCKS {
let mut block = vec![0u8; BLOCK_SIZE];
let pattern = block_id as u8;
for (i, byte) in block.iter_mut().enumerate() {
*byte = pattern.wrapping_add((i % 256) as u8);
}
storage
.write_block(block_id, block)
.await
.unwrap_or_else(|_| panic!("write block {}", block_id));
}
storage.sync().await.expect("sync storage");
drop(storage);
let db_name_arc = Arc::new(db_name.to_string());
const NUM_CONCURRENT: usize = 5;
let mut tasks = vec![];
for task_id in 0..NUM_CONCURRENT {
let db_name_clone = Arc::clone(&db_name_arc);
let task = tokio::spawn(async move {
println!("Task {} starting export", task_id);
let mut task_storage = BlockStorage::new(&db_name_clone)
.await
.unwrap_or_else(|_| panic!("Task {} create storage", task_id));
let export_result = export_database_to_bytes(&mut task_storage, None).await;
match export_result {
Ok(data) => {
println!("Task {} completed export: {} bytes", task_id, data.len());
assert_eq!(
data.len(),
(NUM_BLOCKS * BLOCK_SIZE as u64) as usize,
"Task {} export size should match",
task_id
);
assert_eq!(data[100], 0xAB, "Task {} header marker 1", task_id);
assert_eq!(data[101], 0xCD, "Task {} header marker 2", task_id);
Ok(data)
}
Err(e) => {
println!("Task {} export failed: {}", task_id, e.message);
Err(e)
}
}
});
tasks.push(task);
}
let results = futures::future::join_all(tasks).await;
let mut successful_exports = 0;
let mut first_export: Option<Vec<u8>> = None;
for (idx, result) in results.iter().enumerate() {
match result {
Ok(Ok(data)) => {
successful_exports += 1;
if let Some(ref first) = first_export {
assert_eq!(data, first, "Task {} export should match first export", idx);
} else {
first_export = Some(data.clone());
}
}
Ok(Err(e)) => {
panic!("Task {} failed with error: {}", idx, e.message);
}
Err(e) => {
panic!("Task {} panicked: {:?}", idx, e);
}
}
}
assert_eq!(
successful_exports, NUM_CONCURRENT,
"All {} concurrent exports should succeed",
NUM_CONCURRENT
);
println!(
"All {} concurrent exports completed successfully and produced identical results",
NUM_CONCURRENT
);
}