use sqry_core::cache::CacheKey;
use sqry_core::cache::{CacheConfig, CacheManager, GraphNodeSummary, PersistManager};
use sqry_core::graph::unified::node::NodeKind;
use sqry_core::hash::{Blake3Hash, hash_bytes};
use sqry_core::test_support::verbosity;
use std::fs;
use std::io::{Read, Write};
use std::path::{Path, PathBuf};
use std::process::{Command, Stdio};
use std::sync::{Arc, Once};
use std::thread;
use std::time::Duration;
#[cfg(target_os = "linux")]
use std::time::Instant;
use tempfile::TempDir;
static INIT: Once = Once::new();
fn init_logging() {
INIT.call_once(|| {
verbosity::init(env!("CARGO_PKG_NAME"));
});
}
fn make_content_hash(content: &str) -> Blake3Hash {
hash_bytes(content.as_bytes())
}
fn make_test_summaries(names: &[&str], file: &str) -> Vec<GraphNodeSummary> {
names
.iter()
.map(|name| {
GraphNodeSummary::new(
Arc::from(*name),
NodeKind::Function,
Arc::from(Path::new(file)),
1,
0,
10,
0,
)
})
.collect()
}
struct ManualLockGuard {
path: PathBuf,
file: fs::File,
}
impl Drop for ManualLockGuard {
fn drop(&mut self) {
let _ = fs::remove_file(&self.path);
}
}
#[derive(Debug, serde::Serialize, serde::Deserialize)]
struct ChildResult {
success: bool,
message: String,
data: Option<Vec<String>>, }
impl ChildResult {
fn success(message: String, data: Option<Vec<String>>) -> Self {
Self {
success: true,
message,
data,
}
}
fn failure(message: String) -> Self {
Self {
success: false,
message,
data: None,
}
}
fn write_to_file(&self, path: &Path) -> std::io::Result<()> {
let json = serde_json::to_string_pretty(self)?;
fs::write(path, json)
}
fn read_from_file(path: &Path) -> std::io::Result<Self> {
let json = fs::read_to_string(path)?;
serde_json::from_str(&json)
.map_err(|e| std::io::Error::new(std::io::ErrorKind::InvalidData, e))
}
}
fn spawn_cache_child_process(
_test_name: &str,
cache_dir: &Path,
result_file: &Path,
operation: &str,
args: &[&str],
) -> std::process::Child {
let exe = std::env::current_exe().expect("Failed to get test executable path");
let mut cmd = Command::new(exe);
let child_test_name = format!("cache_child_{operation}");
cmd.arg("--nocapture")
.arg("--ignored") .arg("--exact")
.arg(&child_test_name)
.env("CACHE_CHILD_MODE", "1")
.env("CACHE_DIR", cache_dir)
.env("RESULT_FILE", result_file);
for (i, arg) in args.iter().enumerate() {
cmd.env(format!("ARG_{i}"), arg);
}
cmd.stdout(Stdio::null())
.stderr(Stdio::piped())
.spawn()
.expect("Failed to spawn child process")
}
fn is_child_process() -> bool {
std::env::var("CACHE_CHILD_MODE").is_ok()
}
fn get_child_env(key: &str) -> String {
std::env::var(key).unwrap_or_else(|_| panic!("Missing env var: {key}"))
}
fn wait_for_child(child: &mut std::process::Child, label: &str) {
let status = child
.wait()
.unwrap_or_else(|e| panic!("Failed to wait for {label}: {e}"));
if !status.success() {
let mut stderr_output = String::new();
if let Some(mut stderr) = child.stderr.take() {
let _ = stderr.read_to_string(&mut stderr_output);
}
panic!("{label} exited with status {status:?}. Stderr:\n{stderr_output}");
}
}
#[test]
#[ignore = "Only run when spawned as child process"]
fn cache_child_write_entry() {
if !is_child_process() {
return;
}
let cache_dir = PathBuf::from(get_child_env("CACHE_DIR"));
let result_file = PathBuf::from(get_child_env("RESULT_FILE"));
let file_path = get_child_env("ARG_0");
let content = get_child_env("ARG_1");
let symbol_names = get_child_env("ARG_2");
let result: Result<ChildResult, String> = {
let config = CacheConfig::new()
.with_cache_root(cache_dir)
.with_persistence(true);
let cache = CacheManager::new(config);
let hash = make_content_hash(&content);
let names: Vec<&str> = symbol_names.split(',').collect();
let summaries = make_test_summaries(&names, &file_path);
cache.insert(&file_path, "rust", hash, summaries);
Ok(ChildResult::success(
format!("Wrote {} symbols to cache", names.len()),
Some(names.iter().map(std::string::ToString::to_string).collect()),
))
};
let final_result = result.unwrap_or_else(ChildResult::failure);
final_result
.write_to_file(&result_file)
.expect("Failed to write result file");
}
#[test]
#[ignore = "Only run when spawned as child process"]
fn cache_child_read_entry() {
if !is_child_process() {
return;
}
let cache_dir = PathBuf::from(get_child_env("CACHE_DIR"));
let result_file = PathBuf::from(get_child_env("RESULT_FILE"));
let file_path = get_child_env("ARG_0");
let content = get_child_env("ARG_1");
let result: Result<ChildResult, String> = {
let config = CacheConfig::new()
.with_cache_root(cache_dir)
.with_persistence(true);
let cache = CacheManager::new(config);
let hash = make_content_hash(&content);
match cache.get(&file_path, "rust", hash) {
Some(summaries) => {
let names: Vec<String> = summaries.iter().map(|s| s.name.to_string()).collect();
Ok(ChildResult::success(
format!("Read {} symbols from cache", names.len()),
Some(names),
))
}
None => Ok(ChildResult::success("Cache miss".to_string(), None)),
}
};
let final_result = result.unwrap_or_else(ChildResult::failure);
final_result
.write_to_file(&result_file)
.expect("Failed to write result file");
}
#[test]
#[ignore = "Only run when spawned as child process"]
fn cache_child_hold_lock() -> Result<(), String> {
if !is_child_process() {
return Ok(());
}
let cache_dir = PathBuf::from(get_child_env("CACHE_DIR"));
let result_file = PathBuf::from(get_child_env("RESULT_FILE"));
let file_path = get_child_env("ARG_0");
let content = get_child_env("ARG_1");
let hold_duration_ms: u64 = get_child_env("ARG_2").parse().expect("Invalid duration");
let result: Result<ChildResult, String> = {
let config = CacheConfig::new()
.with_cache_root(cache_dir.clone())
.with_persistence(true);
let cache = CacheManager::new(config);
let hash = make_content_hash(&content);
let key = CacheKey::from_raw_path(PathBuf::from(&file_path), "rust", hash);
let persist = PersistManager::new(cache_dir.clone())
.map_err(|e| format!("Failed to initialize persistence: {e}"))?;
let entry_path = persist
.user_cache_dir()
.join(format!("{}.bin", key.storage_key()));
let mut lock_path = entry_path.clone();
lock_path.set_extension("bin.lock");
if let Some(parent) = lock_path.parent() {
fs::create_dir_all(parent).map_err(|e| {
format!(
"Failed to create lock directory {}: {}",
parent.display(),
e
)
})?;
}
let lock_file = fs::OpenOptions::new()
.write(true)
.create_new(true)
.open(&lock_path)
.map_err(|e| format!("Failed to create lock file {}: {}", lock_path.display(), e))?;
let mut lock_guard = ManualLockGuard {
path: lock_path.clone(),
file: lock_file,
};
writeln!(&mut lock_guard.file, "{}", std::process::id())
.map_err(|e| format!("Failed to write lock PID: {e}"))?;
lock_guard
.file
.sync_all()
.map_err(|e| format!("Failed to sync lock file: {e}"))?;
thread::sleep(Duration::from_millis(hold_duration_ms));
drop(lock_guard);
let summaries = make_test_summaries(&["held_fn"], &file_path);
cache.insert(&file_path, "rust", hash, summaries);
drop(cache);
thread::sleep(Duration::from_millis(50));
Ok(ChildResult::success(
format!("Held lock for {hold_duration_ms}ms"),
None,
))
};
let final_result = result.unwrap_or_else(ChildResult::failure);
final_result
.write_to_file(&result_file)
.map_err(|e| e.to_string())?;
Ok(())
}
#[test]
fn test_multiprocess_concurrent_writes() {
init_logging();
log::info!("Testing multi-process concurrent writes (lock contention)");
if is_child_process() {
return;
}
let tmp_cache_dir = TempDir::new().expect("Failed to create temp dir");
let cache_dir = tmp_cache_dir.path().join("cache");
fs::create_dir_all(&cache_dir).expect("Failed to create cache dir");
let file_path = "/test/file.rs";
let content = "fn test() {}";
let result_file_1 = tmp_cache_dir.path().join("result1.json");
let result_file_2 = tmp_cache_dir.path().join("result2.json");
log::debug!("Spawning child process 1 to write fn1,fn2,fn3");
let mut child1 = spawn_cache_child_process(
"concurrent_writes",
&cache_dir,
&result_file_1,
"write_entry",
&[file_path, content, "fn1,fn2,fn3"],
);
log::debug!("Spawning child process 2 to write fn4,fn5,fn6");
let mut child2 = spawn_cache_child_process(
"concurrent_writes",
&cache_dir,
&result_file_2,
"write_entry",
&[file_path, content, "fn4,fn5,fn6"],
);
log::debug!("Waiting for both child processes to complete");
wait_for_child(&mut child1, "child 1 (concurrent_writes write_entry)");
wait_for_child(&mut child2, "child 2 (concurrent_writes write_entry)");
let result1 = ChildResult::read_from_file(&result_file_1).expect("Failed to read result 1");
let result2 = ChildResult::read_from_file(&result_file_2).expect("Failed to read result 2");
assert!(
result1.success,
"Child 1 operation failed: {}",
result1.message
);
assert!(
result2.success,
"Child 2 operation failed: {}",
result2.message
);
let config = CacheConfig::new()
.with_cache_root(cache_dir.clone())
.with_persistence(true);
let cache = CacheManager::new(config);
let hash = make_content_hash(content);
let cached_summaries = cache
.get(file_path, "rust", hash)
.expect("Cache should have entry after writes");
assert_eq!(
cached_summaries.len(),
3,
"Cache should have exactly 3 symbols from one complete write"
);
let names: Vec<String> = cached_summaries
.iter()
.map(|s| s.name.to_string())
.collect();
let is_child1_set = names
.iter()
.all(|n| n.starts_with("fn") && ["fn1", "fn2", "fn3"].contains(&n.as_str()));
let is_child2_set = names
.iter()
.all(|n| n.starts_with("fn") && ["fn4", "fn5", "fn6"].contains(&n.as_str()));
assert!(
is_child1_set || is_child2_set,
"Cache should contain complete set from one child, not a corrupted mix. Got: {names:?}"
);
log::info!("✓ Multi-process concurrent writes: No corruption detected. Final state: {names:?}");
}
#[test]
fn test_multiprocess_read_write_consistency() {
init_logging();
log::info!("Testing multi-process read-write consistency");
if is_child_process() {
return;
}
let tmp_cache_dir = TempDir::new().expect("Failed to create temp dir");
let cache_dir = tmp_cache_dir.path().join("cache");
fs::create_dir_all(&cache_dir).expect("Failed to create cache dir");
let file_path = "/test/file.rs";
let content = "fn test() {}";
let config = CacheConfig::new()
.with_cache_root(cache_dir.clone())
.with_persistence(true);
let cache = CacheManager::new(config.clone());
let hash = make_content_hash(content);
let initial_summaries = make_test_summaries(&["initial_fn"], file_path);
cache.insert(file_path, "rust", hash, initial_summaries);
let result_file = tmp_cache_dir.path().join("result.json");
let mut child = spawn_cache_child_process(
"read_write",
&cache_dir,
&result_file,
"read_entry",
&[file_path, content],
);
wait_for_child(&mut child, "child (read_write read_entry)");
let result = ChildResult::read_from_file(&result_file).expect("Failed to read result");
assert!(result.success, "Child read failed: {}", result.message);
let read_names = result.data.expect("Child should have read data");
assert_eq!(read_names, vec!["initial_fn"], "Child read incorrect data");
log::info!("✓ Multi-process read-write consistency verified: child read {read_names:?}");
}
#[test]
#[cfg(target_os = "linux")] fn test_multiprocess_stale_lock_cleanup() {
init_logging();
log::info!("Testing stale lock cleanup after process crash");
if is_child_process() {
return;
}
let tmp_cache_dir = TempDir::new().expect("Failed to create temp dir");
let cache_dir = tmp_cache_dir.path().join("cache");
fs::create_dir_all(&cache_dir).expect("Failed to create cache dir");
let file_path = "/test/file.rs";
let content = "fn test() {}";
let result_file_1 = tmp_cache_dir.path().join("result1.json");
let mut child1 = spawn_cache_child_process(
"stale_lock",
&cache_dir,
&result_file_1,
"hold_lock",
&[file_path, content, "100"], );
let hash = make_content_hash(content);
let key = CacheKey::from_raw_path(PathBuf::from(file_path), "rust", hash);
let persist_check =
PersistManager::new(cache_dir.clone()).expect("Failed to create persist manager");
let entry_path = persist_check
.user_cache_dir()
.join(format!("{}.bin", key.storage_key()));
let mut lock_path = entry_path.clone();
lock_path.set_extension("bin.lock");
let mut lock_acquired = false;
for _ in 0..200 {
if lock_path.exists() {
lock_acquired = true;
break;
}
thread::sleep(Duration::from_millis(10));
}
assert!(lock_acquired, "Child 1 never acquired lock");
log::debug!("Simulating process crash by killing child 1");
child1.kill().expect("Failed to kill child process");
let _ = child1.wait();
thread::sleep(Duration::from_millis(100));
assert!(
lock_path.exists(),
"Lock file should still exist after process crash (before cleanup): {lock_path:?}"
);
let result_file_2 = tmp_cache_dir.path().join("result2.json");
let mut child2 = spawn_cache_child_process(
"stale_lock",
&cache_dir,
&result_file_2,
"write_entry",
&[file_path, content, "cleanup_fn"],
);
wait_for_child(&mut child2, "child 2 (stale_lock write_entry)");
let result2 = ChildResult::read_from_file(&result_file_2).expect("Failed to read result 2");
assert!(
result2.success,
"Child 2 should successfully write after cleaning stale lock: {}",
result2.message
);
assert!(
!lock_path.exists(),
"Stale lock should be cleaned up after child 2 completes: {lock_path:?}"
);
log::info!("✓ Stale lock cleanup verified: lock removed after process crash and recovery");
}
#[test]
#[cfg(target_os = "linux")]
fn test_multiprocess_lock_retry_succeeds() {
init_logging();
log::info!("Testing multi-process lock retry mechanism");
if is_child_process() {
return;
}
let tmp_cache_dir = TempDir::new().expect("Failed to create temp dir");
let cache_dir = tmp_cache_dir.path().join("cache");
fs::create_dir_all(&cache_dir).expect("Failed to create cache dir");
let file_path = "/test/file.rs";
let content = "fn test() {}";
let result_file_lock = tmp_cache_dir.path().join("lock_holder.json");
let mut lock_holder = spawn_cache_child_process(
"lock_retry",
&cache_dir,
&result_file_lock,
"hold_lock",
&[file_path, content, "300"],
);
let hash = make_content_hash(content);
let key = CacheKey::from_raw_path(PathBuf::from(file_path), "rust", hash);
let persist_check =
PersistManager::new(cache_dir.clone()).expect("Failed to create persist manager");
let entry_path = persist_check
.user_cache_dir()
.join(format!("{}.bin", key.storage_key()));
let mut expected_lock_path = entry_path.clone();
expected_lock_path.set_extension("bin.lock");
let mut lock_ready = false;
for _ in 0..200 {
if expected_lock_path.exists() {
lock_ready = true;
break;
}
thread::sleep(Duration::from_millis(10));
}
assert!(
lock_ready,
"Lock holder never acquired lock at {expected_lock_path:?}"
);
let result_file_writer = tmp_cache_dir.path().join("writer.json");
let start = Instant::now();
let mut writer = spawn_cache_child_process(
"lock_retry",
&cache_dir,
&result_file_writer,
"write_entry",
&[file_path, content, "retry_fn"],
);
wait_for_child(&mut writer, "child 2 (lock_retry write_entry)");
let elapsed = start.elapsed();
assert!(
elapsed >= Duration::from_millis(200),
"Writer should block until lock released, proving retry loop executed (waited {elapsed:?})"
);
let writer_result =
ChildResult::read_from_file(&result_file_writer).expect("Failed to read writer result");
assert!(
writer_result.success,
"Writer should succeed after retries: {}",
writer_result.message
);
wait_for_child(&mut lock_holder, "child 1 (lock_retry hold_lock)");
let lock_result =
ChildResult::read_from_file(&result_file_lock).expect("Failed to read lock holder result");
assert!(
lock_result.success,
"Lock holder should succeed: {}",
lock_result.message
);
let config = CacheConfig::new()
.with_cache_root(cache_dir.clone())
.with_persistence(true);
let cache = CacheManager::new(config);
let hash = make_content_hash(content);
let cached = cache
.get(file_path, "rust", hash)
.expect("Cache should have entry after both processes complete");
let names: Vec<String> = cached.iter().map(|s| s.name.to_string()).collect();
assert!(
names == vec!["held_fn"] || names == vec!["retry_fn"],
"Cache should contain data from one of the writers; got {names:?}"
);
log::info!(
"✓ Multi-process lock retry succeeded after {elapsed:?} wait: final state {names:?}"
);
}
#[test]
fn test_multiprocess_cache_persistence_across_restarts() {
init_logging();
log::info!("Testing cache persistence across process restarts");
if is_child_process() {
return;
}
let tmp_cache_dir = TempDir::new().expect("Failed to create temp dir");
let cache_dir = tmp_cache_dir.path().join("cache");
fs::create_dir_all(&cache_dir).expect("Failed to create cache dir");
let file_path = "/test/file.rs";
let content = "fn test() {}";
let result_file_1 = tmp_cache_dir.path().join("result1.json");
let mut child1 = spawn_cache_child_process(
"persistence",
&cache_dir,
&result_file_1,
"write_entry",
&[file_path, content, "persisted_fn"],
);
wait_for_child(&mut child1, "child 1 (persistence write_entry)");
thread::sleep(Duration::from_millis(100));
let result_file_2 = tmp_cache_dir.path().join("result2.json");
let mut child2 = spawn_cache_child_process(
"persistence",
&cache_dir,
&result_file_2,
"read_entry",
&[file_path, content],
);
wait_for_child(&mut child2, "child 2 (persistence read_entry)");
let result2 = ChildResult::read_from_file(&result_file_2).expect("Failed to read result 2");
assert!(
result2.success,
"Child 2 read operation failed: {}",
result2.message
);
let read_names = result2
.data
.expect("Child 2 should have read data from disk");
assert_eq!(
read_names,
vec!["persisted_fn"],
"Child 2 should read data persisted by child 1"
);
log::info!(
"✓ Cache persistence across process restarts verified: child 2 read {read_names:?} from disk"
);
}