#[cfg(not(target_arch = "wasm32"))]
#[test]
fn test_import_database_from_bytes() {
use absurder_sql::storage::import::import_database_from_bytes;
use absurder_sql::storage::vfs_sync::with_global_storage;
let db_name = "test_import_db";
let mut data = vec![0u8; 8192];
data[0..16].copy_from_slice(b"SQLite format 3\0");
data[16] = 0x10; data[17] = 0x00;
data[28..32].copy_from_slice(&[0, 0, 0, 2]);
data[4096] = 0xFF;
let result = futures::executor::block_on(import_database_from_bytes(db_name, data.clone()));
assert!(result.is_ok(), "Import should succeed");
with_global_storage(|gs| {
#[cfg(target_arch = "wasm32")]
let storage = gs;
#[cfg(not(target_arch = "wasm32"))]
let storage = gs.borrow();
assert!(
storage.contains_key(db_name),
"Database should exist in storage"
);
let blocks = storage.get(db_name).unwrap();
assert_eq!(blocks.len(), 2, "Should have 2 blocks");
let block0 = blocks.get(&0).unwrap();
assert_eq!(
&block0[0..16],
b"SQLite format 3\0",
"Block 0 should contain header"
);
let block1 = blocks.get(&1).unwrap();
assert_eq!(block1[0], 0xFF, "Block 1 should contain marker");
});
}
#[cfg(not(target_arch = "wasm32"))]
#[test]
fn test_import_database_different_page_size() {
use absurder_sql::storage::import::import_database_from_bytes;
use absurder_sql::storage::vfs_sync::with_global_storage;
let db_name = "test_import_2k_pages";
let mut data = vec![0u8; 8192];
data[0..16].copy_from_slice(b"SQLite format 3\0");
data[16] = 0x08; data[17] = 0x00;
data[28..32].copy_from_slice(&[0, 0, 0, 4]);
let result = futures::executor::block_on(import_database_from_bytes(db_name, data.clone()));
assert!(result.is_ok());
with_global_storage(|gs| {
#[cfg(target_arch = "wasm32")]
let storage = gs;
#[cfg(not(target_arch = "wasm32"))]
let storage = gs.borrow();
let blocks = storage.get(db_name).unwrap();
assert_eq!(blocks.len(), 2, "Should have 2 blocks (8192 / 4096)");
});
}
#[cfg(not(target_arch = "wasm32"))]
#[test]
fn test_import_invalid_database_fails() {
use absurder_sql::storage::import::import_database_from_bytes;
let db_name = "test_import_invalid";
let mut data = vec![0u8; 4096];
data[0..16].copy_from_slice(b"Invalid format!\0");
let result = futures::executor::block_on(import_database_from_bytes(db_name, data));
assert!(result.is_err(), "Import should fail for invalid file");
assert!(result.unwrap_err().message.contains("Invalid SQLite"));
}
#[cfg(not(target_arch = "wasm32"))]
#[test]
fn test_import_clears_existing_data() {
use absurder_sql::storage::import::import_database_from_bytes;
use absurder_sql::storage::vfs_sync::with_global_storage;
use std::collections::HashMap;
let db_name = "test_import_clear";
with_global_storage(|gs| {
#[cfg(target_arch = "wasm32")]
let mut storage = gs;
#[cfg(not(target_arch = "wasm32"))]
let mut storage = gs.borrow_mut();
let mut blocks = HashMap::new();
blocks.insert(99, vec![0xAA; 4096]); storage.insert(db_name.to_string(), blocks);
});
let mut data = vec![0u8; 4096];
data[0..16].copy_from_slice(b"SQLite format 3\0");
data[16] = 0x10;
data[17] = 0x00;
data[28..32].copy_from_slice(&[0, 0, 0, 1]);
let result = futures::executor::block_on(import_database_from_bytes(db_name, data));
assert!(result.is_ok());
with_global_storage(|gs| {
#[cfg(target_arch = "wasm32")]
let storage = gs;
#[cfg(not(target_arch = "wasm32"))]
let storage = gs.borrow();
let blocks = storage.get(db_name).unwrap();
assert!(!blocks.contains_key(&99), "Old block 99 should be cleared");
assert!(blocks.contains_key(&0), "New block 0 should exist");
});
}
#[cfg(not(target_arch = "wasm32"))]
#[test]
fn test_import_database_with_padding() {
use absurder_sql::storage::import::import_database_from_bytes;
use absurder_sql::storage::vfs_sync::with_global_storage;
let db_name = "test_import_padding";
let mut data = vec![0u8; 6144];
data[0..16].copy_from_slice(b"SQLite format 3\0");
data[16] = 0x08; data[17] = 0x00;
data[28..32].copy_from_slice(&[0, 0, 0, 3]);
for (i, byte) in data.iter_mut().enumerate().take(6144).skip(100) {
*byte = (i % 256) as u8;
}
let result = futures::executor::block_on(import_database_from_bytes(db_name, data.clone()));
assert!(result.is_ok());
with_global_storage(|gs| {
#[cfg(target_arch = "wasm32")]
let storage = gs;
#[cfg(not(target_arch = "wasm32"))]
let storage = gs.borrow();
let blocks = storage.get(db_name).unwrap();
assert_eq!(blocks.len(), 2, "Should have 2 blocks with padding");
let block0 = blocks.get(&0).unwrap();
assert_eq!(block0.len(), 4096);
assert_eq!(&block0[0..16], b"SQLite format 3\0");
let block1 = blocks.get(&1).unwrap();
assert_eq!(block1.len(), 4096);
assert_eq!(block1[2048], 0, "Should be zero-padded after data");
});
}
#[cfg(not(target_arch = "wasm32"))]
#[tokio::test]
async fn test_import_clears_block_storage_cache() {
use absurder_sql::storage::block_storage::BlockStorage;
use absurder_sql::storage::import::import_database_from_bytes;
let db_name = "test_cache_clear";
let mut storage = BlockStorage::new(db_name).await.expect("create storage");
let original_data = vec![0xAA; 4096];
storage
.write_block(0, original_data.clone())
.await
.expect("write original");
storage.sync().await.expect("sync original");
let read_original = storage.read_block(0).await.expect("read original");
assert_eq!(read_original[0], 0xAA, "Original data should be 0xAA");
let mut import_data = vec![0xBB; 4096]; import_data[0..16].copy_from_slice(b"SQLite format 3\0");
import_data[16] = 0x10; import_data[17] = 0x00;
import_data[28..32].copy_from_slice(&[0, 0, 0, 1]);
import_database_from_bytes(db_name, import_data)
.await
.expect("import new database");
storage.on_database_import().await.expect("notify import");
let read_after_import = storage.read_block(0).await.expect("read after import");
assert_eq!(
read_after_import[100], 0xBB,
"Should read newly imported data (0xBB/187), not stale cached data (0xAA/170) or zeroed (0)"
);
assert_eq!(
&read_after_import[0..16],
b"SQLite format 3\0",
"Should have SQLite header from imported data"
);
}
#[cfg(not(target_arch = "wasm32"))]
#[test]
fn test_import_cache_invalidation_api_exists() {
use absurder_sql::storage::import::invalidate_block_storage_caches;
let db_name = "test_invalidate_api";
invalidate_block_storage_caches(db_name);
}
#[cfg(not(target_arch = "wasm32"))]
#[tokio::test]
async fn test_export_import_export_identical() {
use absurder_sql::storage::block_storage::BlockStorage;
use absurder_sql::storage::export::export_database_to_bytes;
use absurder_sql::storage::import::import_database_from_bytes;
let db_name_1 = "test_roundtrip_original";
let db_name_2 = "test_roundtrip_imported";
let mut storage1 = BlockStorage::new(db_name_1).await.expect("create storage1");
let mut db_data = vec![0u8; 12288];
db_data[0..16].copy_from_slice(b"SQLite format 3\0");
db_data[16] = 0x10; db_data[17] = 0x00;
db_data[18] = 0x01; db_data[19] = 0x01; db_data[28..32].copy_from_slice(&[0, 0, 0, 3]);
db_data[100] = 0xAA; db_data[4196] = 0xBB; db_data[8292] = 0xCC;
storage1
.write_block(0, db_data[0..4096].to_vec())
.await
.expect("write block 0");
storage1
.write_block(1, db_data[4096..8192].to_vec())
.await
.expect("write block 1");
storage1
.write_block(2, db_data[8192..12288].to_vec())
.await
.expect("write block 2");
storage1.sync().await.expect("sync storage1");
let export1 = export_database_to_bytes(&mut storage1, None)
.await
.expect("export database 1");
assert!(!export1.is_empty(), "Export 1 should not be empty");
assert_eq!(
export1.len(),
12288,
"Export 1 should be 3 pages (12288 bytes)"
);
import_database_from_bytes(db_name_2, export1.clone())
.await
.expect("import to database 2");
let mut storage2 = BlockStorage::new(db_name_2).await.expect("create storage2");
storage2
.on_database_import()
.await
.expect("notify storage2 of import");
let export2 = export_database_to_bytes(&mut storage2, None)
.await
.expect("export database 2");
assert_eq!(
export1.len(),
export2.len(),
"Export sizes should be identical"
);
assert_eq!(
export1, export2,
"Export → Import → Export should produce identical bytes"
);
assert_eq!(export2[100], 0xAA, "Page 0 marker should be preserved");
assert_eq!(export2[4196], 0xBB, "Page 1 marker should be preserved");
assert_eq!(export2[8292], 0xCC, "Page 2 marker should be preserved");
}
#[cfg(not(target_arch = "wasm32"))]
#[tokio::test]
async fn test_import_large_database() {
use absurder_sql::storage::block_storage::BlockStorage;
use absurder_sql::storage::export::export_database_to_bytes;
use absurder_sql::storage::import::import_database_from_bytes;
let db_name_original = "test_large_import_original";
let db_name_imported = "test_large_import_imported";
const NUM_BLOCKS: u64 = 2600;
const BLOCK_SIZE: usize = 4096;
let mut storage = BlockStorage::new(db_name_original)
.await
.expect("create storage");
let mut header_block = vec![0u8; BLOCK_SIZE];
header_block[0..16].copy_from_slice(b"SQLite format 3\0");
header_block[16] = 0x10; header_block[17] = 0x00; header_block[18] = 0x01; header_block[19] = 0x01;
let page_count_bytes = (NUM_BLOCKS as u32).to_be_bytes();
header_block[28..32].copy_from_slice(&page_count_bytes);
header_block[100] = 0xDE;
header_block[101] = 0xAD;
header_block[102] = 0xBE;
header_block[103] = 0xEF;
storage
.write_block(0, header_block.clone())
.await
.expect("write header block");
for block_id in 1..NUM_BLOCKS {
let mut block = vec![0u8; BLOCK_SIZE];
let block_id_bytes = block_id.to_le_bytes();
block[0..8].copy_from_slice(&block_id_bytes);
let pattern = (block_id % 256) as u8;
for (i, byte) in block.iter_mut().enumerate().skip(8) {
*byte = pattern.wrapping_add((i % 256) as u8);
}
storage
.write_block(block_id, block)
.await
.unwrap_or_else(|_| panic!("write block {}", block_id));
}
storage.sync().await.expect("sync storage");
println!(
"Created large database with {} blocks (~{:.2} MB)",
NUM_BLOCKS,
(NUM_BLOCKS * BLOCK_SIZE as u64) as f64 / (1024.0 * 1024.0)
);
let export_data = export_database_to_bytes(&mut storage, None)
.await
.expect("export large database");
let export_size = export_data.len();
assert_eq!(
export_size,
(NUM_BLOCKS * BLOCK_SIZE as u64) as usize,
"Export size should match database size"
);
assert!(
export_size > 10 * 1024 * 1024,
"Database should be larger than 10MB"
);
println!(
"Exported database: {} bytes ({:.2} MB)",
export_size,
export_size as f64 / (1024.0 * 1024.0)
);
import_database_from_bytes(db_name_imported, export_data.clone())
.await
.expect("import large database");
println!("Imported large database successfully");
let mut storage_imported = BlockStorage::new(db_name_imported)
.await
.expect("create imported storage");
storage_imported
.on_database_import()
.await
.expect("refresh imported storage");
let imported_header = storage_imported
.read_block(0)
.await
.expect("read imported header");
assert_eq!(
&imported_header[0..16],
b"SQLite format 3\0",
"Header magic should be preserved"
);
assert_eq!(
imported_header[100..104],
[0xDE, 0xAD, 0xBE, 0xEF],
"Header marker should be preserved"
);
let sample_blocks = [1, 100, 500, 1000, 1500, 2000, 2500, NUM_BLOCKS - 1];
for &block_id in &sample_blocks {
let imported_block = storage_imported
.read_block(block_id)
.await
.unwrap_or_else(|_| panic!("read imported block {}", block_id));
let mut stored_id_bytes = [0u8; 8];
stored_id_bytes.copy_from_slice(&imported_block[0..8]);
let stored_id = u64::from_le_bytes(stored_id_bytes);
assert_eq!(
stored_id, block_id,
"Block {} should have correct ID in data",
block_id
);
let base_pattern = (block_id % 256) as u8;
let expected_at_pos_8 = base_pattern.wrapping_add(8);
assert_eq!(
imported_block[8], expected_at_pos_8,
"Block {} should have correct pattern at position 8",
block_id
);
}
println!("Large database import test completed successfully");
}
#[cfg(not(target_arch = "wasm32"))]
#[tokio::test]
async fn test_export_import_with_indexes_and_triggers() {
use absurder_sql::storage::block_storage::BlockStorage;
use absurder_sql::storage::export::export_database_to_bytes;
use absurder_sql::storage::import::import_database_from_bytes;
use rusqlite::Connection;
let db_name_original = "test_schema_original";
let db_name_imported = "test_schema_imported";
let conn = Connection::open_in_memory().expect("create connection");
conn.execute(
"CREATE TABLE users (
id INTEGER PRIMARY KEY,
name TEXT NOT NULL,
email TEXT UNIQUE,
created_at INTEGER
)",
[],
)
.expect("create table");
conn.execute("CREATE INDEX idx_users_name ON users(name)", [])
.expect("create index");
conn.execute(
"CREATE TRIGGER users_created_at
AFTER INSERT ON users
BEGIN
UPDATE users SET created_at = strftime('%s', 'now') WHERE id = NEW.id;
END",
[],
)
.expect("create trigger");
conn.execute(
"INSERT INTO users (name, email) VALUES ('Alice', 'alice@example.com')",
[],
)
.expect("insert 1");
conn.execute(
"INSERT INTO users (name, email) VALUES ('Bob', 'bob@example.com')",
[],
)
.expect("insert 2");
conn.execute(
"INSERT INTO users (name, email) VALUES ('Charlie', 'charlie@example.com')",
[],
)
.expect("insert 3");
let mut backup_conn = Connection::open(":memory:").expect("create backup connection");
let backup = rusqlite::backup::Backup::new(&conn, &mut backup_conn).expect("create backup");
backup
.run_to_completion(5, std::time::Duration::from_millis(100), None)
.expect("backup");
drop(backup);
drop(conn);
let db_bytes = {
let temp_path = std::env::temp_dir().join(format!("test_schema_{}.db", std::process::id()));
backup_conn
.execute("VACUUM INTO ?1", [temp_path.to_str().unwrap()])
.expect("vacuum");
let bytes = std::fs::read(&temp_path).expect("read temp file");
std::fs::remove_file(&temp_path).ok();
bytes
};
println!(
"Created test database with schema: {} bytes",
db_bytes.len()
);
import_database_from_bytes(db_name_original, db_bytes.clone())
.await
.expect("import original database");
let mut storage = BlockStorage::new(db_name_original)
.await
.expect("create storage");
storage.on_database_import().await.expect("refresh storage");
let export_data = export_database_to_bytes(&mut storage, None)
.await
.expect("export database");
println!("Exported database: {} bytes", export_data.len());
import_database_from_bytes(db_name_imported, export_data.clone())
.await
.expect("import to new database");
let mut storage_imported = BlockStorage::new(db_name_imported)
.await
.expect("create imported storage");
storage_imported
.on_database_import()
.await
.expect("refresh imported storage");
let export_data2 = export_database_to_bytes(&mut storage_imported, None)
.await
.expect("export imported database");
assert_eq!(export_data, export_data2, "Exports should be identical");
println!("Database with indexes and triggers exported and imported successfully");
println!("Schema objects preserved through export/import cycle");
}