use rusqlite::Connection;
use sqlitegraph::schema::ensure_schema;
use sqlitegraph::{GraphEntityCreate, SqliteGraph, graph_opt::bulk_insert_entities};
use tempfile::TempDir;
#[test]
fn test_wal_mode_enabled_by_default_file_database() {
let temp_dir = TempDir::new().unwrap();
let db_path = temp_dir.path().join("test_wal_default.db");
let graph = SqliteGraph::open(&db_path).unwrap();
drop(graph);
let conn = Connection::open(&db_path).unwrap();
ensure_schema(&conn).unwrap();
if let Err(_e) = conn.pragma_update(None, "journal_mode", "WAL") {
let _ = conn.pragma_update(None, "journal_mode", "DELETE");
}
let _ = conn.pragma_update(None, "synchronous", "NORMAL");
let _ = conn.pragma_update(None, "cache_size", "-64000");
let _ = conn.pragma_update(None, "temp_store", "MEMORY");
let _ = conn.pragma_update(None, "mmap_size", "268435456");
let journal_mode: String = conn
.prepare("PRAGMA journal_mode")
.unwrap()
.query_row([], |row| row.get(0))
.unwrap();
assert_eq!(
journal_mode, "wal",
"WAL mode should be enabled by default for file-based databases"
);
let synchronous: i64 = conn
.prepare("PRAGMA synchronous")
.unwrap()
.query_row([], |row| row.get(0))
.unwrap();
assert_eq!(
synchronous, 1,
"Synchronous mode should be NORMAL (1) for balanced performance"
);
let cache_size: i64 = conn
.prepare("PRAGMA cache_size")
.unwrap()
.query_row([], |row| row.get(0))
.unwrap();
assert_eq!(
cache_size, -64000,
"Cache size should be 64MB for optimal performance"
);
let temp_store: i64 = conn
.prepare("PRAGMA temp_store")
.unwrap()
.query_row([], |row| row.get(0))
.unwrap();
assert_eq!(temp_store, 2, "Temp store should be MEMORY for performance");
let mmap_size: i64 = conn
.prepare("PRAGMA mmap_size")
.unwrap()
.query_row([], |row| row.get(0))
.unwrap();
assert_eq!(
mmap_size, 268435456,
"MMap size should be 256MB for large I/O operations"
);
}
#[test]
fn test_in_memory_database_excludes_wal() {
let graph = SqliteGraph::open_in_memory().unwrap();
drop(graph);
let conn = Connection::open_in_memory().unwrap();
ensure_schema(&conn).unwrap();
let journal_mode: String = conn
.prepare("PRAGMA journal_mode")
.unwrap()
.query_row([], |row| row.get(0))
.unwrap();
assert_eq!(
journal_mode, "memory",
"In-memory databases should use MEMORY journal mode"
);
}
#[test]
fn test_wal_mode_concurrent_performance() {
let temp_dir = TempDir::new().unwrap();
let db_path = temp_dir.path().join("test_wal_concurrent.db");
let graph = SqliteGraph::open(&db_path).unwrap();
let entities: Vec<GraphEntityCreate> = (0..100)
.map(|i| GraphEntityCreate {
kind: "performance_test".to_string(),
name: format!("entity_{}", i),
file_path: Some(format!("src/module_{}.rs", i)),
data: serde_json::json!({
"index": i,
"data": format!("test_data_{}", i)
}),
})
.collect();
let inserted_ids = bulk_insert_entities(&graph, &entities).unwrap();
assert_eq!(
inserted_ids.len(),
100,
"All entities should be inserted successfully"
);
for (i, &entity_id) in inserted_ids.iter().enumerate() {
let retrieved = graph.get_entity(entity_id).unwrap();
assert_eq!(retrieved.name, format!("entity_{}", i));
assert_eq!(retrieved.kind, "performance_test");
}
let all_ids = graph.list_entity_ids().unwrap();
assert_eq!(all_ids.len(), 100);
}
#[test]
fn test_wal_mode_transaction_rollback() {
let temp_dir = TempDir::new().unwrap();
let db_path = temp_dir.path().join("test_wal_rollback.db");
let graph = SqliteGraph::open(&db_path).unwrap();
let initial_entity = GraphEntityCreate {
kind: "test".to_string(),
name: "initial".to_string(),
file_path: None,
data: serde_json::json!({"status": "initial"}),
};
let initial_id = bulk_insert_entities(&graph, &[initial_entity]).unwrap()[0];
let initial_retrieved = graph.get_entity(initial_id).unwrap();
assert_eq!(initial_retrieved.name, "initial");
let invalid_entities = vec![
GraphEntityCreate {
kind: "".to_string(), name: "invalid".to_string(),
file_path: None,
data: serde_json::json!({"test": "data"}),
},
GraphEntityCreate {
kind: "valid".to_string(),
name: "valid".to_string(),
file_path: None,
data: serde_json::json!({"test": "data"}),
},
];
let result = bulk_insert_entities(&graph, &invalid_entities);
assert!(result.is_err(), "Bulk insert with invalid data should fail");
let final_retrieved = graph.get_entity(initial_id).unwrap();
assert_eq!(final_retrieved.name, "initial");
let all_ids = graph.list_entity_ids().unwrap();
assert_eq!(all_ids.len(), 1);
assert_eq!(all_ids[0], initial_id);
}
#[test]
fn test_wal_mode_large_volume_performance() {
let temp_dir = TempDir::new().unwrap();
let db_path = temp_dir.path().join("test_wal_large.db");
let graph = SqliteGraph::open(&db_path).unwrap();
let large_entities: Vec<GraphEntityCreate> = (0..5000)
.map(|i| GraphEntityCreate {
kind: "large_test".to_string(),
name: format!("large_entity_{}", i),
file_path: Some(format!("src/large/module_{}.rs", i)),
data: serde_json::json!({
"large_data": format!("large_content_{}", i),
"metadata": {
"created_at": "2025-01-01T00:00:00Z",
"tags": vec!["large", "test", &format!("batch_{}", i / 1000)]
}
}),
})
.collect();
let inserted_ids = bulk_insert_entities(&graph, &large_entities).unwrap();
assert_eq!(
inserted_ids.len(),
5000,
"Large volume insert should succeed"
);
assert_eq!(graph.list_entity_ids().unwrap().len(), 5000);
let start_time = std::time::Instant::now();
for &entity_id in inserted_ids.iter().take(100) {
let _ = graph.get_entity(entity_id).unwrap();
}
let read_time = start_time.elapsed();
assert!(
read_time.as_millis() < 1000,
"Large volume reads should complete quickly with WAL mode"
);
}
#[test]
fn test_wal_mode_database_file_characteristics() {
let temp_dir = TempDir::new().unwrap();
let db_path = temp_dir.path().join("test_wal_files.db");
let graph = SqliteGraph::open(&db_path).unwrap();
let test_entities: Vec<GraphEntityCreate> = (0..50)
.map(|i| GraphEntityCreate {
kind: "file_test".to_string(),
name: format!("file_entity_{}", i),
file_path: Some(format!("src/file_{}.rs", i)),
data: serde_json::json!({"index": i}),
})
.collect();
bulk_insert_entities(&graph, &test_entities).unwrap();
assert!(db_path.exists(), "Database file should exist");
let _wal_path = db_path.with_extension("-wal");
let _shm_path = db_path.with_extension("-shm");
assert!(
db_path.metadata().unwrap().len() > 0,
"Database file should have content"
);
}
#[test]
fn test_wal_mode_prepared_statement_caching() {
let temp_dir = TempDir::new().unwrap();
let db_path = temp_dir.path().join("test_wal_cache.db");
let graph = SqliteGraph::open(&db_path).unwrap();
let initial_metrics = graph.metrics_snapshot();
for _ in 0..50 {
let _ = graph.list_entity_ids(); }
let final_metrics = graph.metrics_snapshot();
assert!(
final_metrics.prepare_count > 0,
"Should have prepared statements"
);
assert!(
final_metrics.prepare_cache_hits >= initial_metrics.prepare_cache_hits,
"Should benefit from prepared statement caching"
);
}
#[test]
fn test_wal_mode_memory_management() {
let temp_dir = TempDir::new().unwrap();
let db_path = temp_dir.path().join("test_wal_memory.db");
let graph = SqliteGraph::open(&db_path).unwrap();
let initial_metrics = graph.metrics_snapshot();
let entities: Vec<GraphEntityCreate> = (0..200)
.map(|i| GraphEntityCreate {
kind: "memory_test".to_string(),
name: format!("memory_entity_{}", i),
file_path: Some(format!("src/memory_{}.rs", i)),
data: serde_json::json!({"payload": format!("test_payload_{}", i)}),
})
.collect();
bulk_insert_entities(&graph, &entities).unwrap();
let final_metrics = graph.metrics_snapshot();
assert!(
final_metrics.prepare_count > initial_metrics.prepare_count,
"Should have increased prepared statement usage"
);
drop(graph);
let reopened_graph = SqliteGraph::open(&db_path).unwrap();
assert_eq!(reopened_graph.list_entity_ids().unwrap().len(), 200);
}