sqry-core 6.0.17

Core library for sqry - semantic code search engine
Documentation
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
//! Multi-process integration tests for cache persistence layer.
//!
//! These tests verify that the cache module correctly handles concurrent access
//! from multiple separate processes, ensuring:
//! - Lock files prevent data corruption
//! - Stale locks are cleaned up after process crashes
//! - Concurrent reads and writes maintain data integrity
//! - Lock timeouts and retries work correctly
//!
//! # Test Infrastructure
//!
//! Each test spawns actual child processes that interact with the cache, then
//! verifies the results through IPC (inter-process communication via temp files).

use sqry_core::cache::CacheKey;
use sqry_core::cache::{CacheConfig, CacheManager, GraphNodeSummary, PersistManager};
use sqry_core::graph::unified::node::NodeKind;
use sqry_core::hash::{Blake3Hash, hash_bytes};
use sqry_core::test_support::verbosity;
use std::fs;
use std::io::{Read, Write};
use std::path::{Path, PathBuf};
use std::process::{Command, Stdio};
use std::sync::{Arc, Once};
use std::thread;
use std::time::Duration;
#[cfg(target_os = "linux")]
use std::time::Instant;
use tempfile::TempDir;

// Initialize verbose logging once for all tests in this file
static INIT: Once = Once::new();

fn init_logging() {
    INIT.call_once(|| {
        verbosity::init(env!("CARGO_PKG_NAME"));
    });
}

// ============================================================================
// Test Infrastructure
// ============================================================================

/// Helper to create test content hash
fn make_content_hash(content: &str) -> Blake3Hash {
    hash_bytes(content.as_bytes())
}

/// Helper to create test symbol summaries
fn make_test_summaries(names: &[&str], file: &str) -> Vec<GraphNodeSummary> {
    names
        .iter()
        .map(|name| {
            GraphNodeSummary::new(
                Arc::from(*name),
                NodeKind::Function,
                Arc::from(Path::new(file)),
                1,
                0,
                10,
                0,
            )
        })
        .collect()
}

struct ManualLockGuard {
    path: PathBuf,
    file: fs::File,
}

impl Drop for ManualLockGuard {
    fn drop(&mut self) {
        let _ = fs::remove_file(&self.path);
    }
}

/// Result file for child process communication
#[derive(Debug, serde::Serialize, serde::Deserialize)]
struct ChildResult {
    success: bool,
    message: String,
    data: Option<Vec<String>>, // Symbol names read from cache
}

impl ChildResult {
    fn success(message: String, data: Option<Vec<String>>) -> Self {
        Self {
            success: true,
            message,
            data,
        }
    }

    fn failure(message: String) -> Self {
        Self {
            success: false,
            message,
            data: None,
        }
    }

    fn write_to_file(&self, path: &Path) -> std::io::Result<()> {
        let json = serde_json::to_string_pretty(self)?;
        fs::write(path, json)
    }

    fn read_from_file(path: &Path) -> std::io::Result<Self> {
        let json = fs::read_to_string(path)?;
        serde_json::from_str(&json)
            .map_err(|e| std::io::Error::new(std::io::ErrorKind::InvalidData, e))
    }
}

/// Spawn a child process that runs a cache operation
///
/// The child process will:
/// 1. Parse command-line arguments
/// 2. Execute the requested cache operation
/// 3. Write results to a JSON file for parent to read
///
/// # Arguments
///
/// - `test_name`: Unique identifier for this test
/// - `cache_dir`: Path to the shared cache directory
/// - `result_file`: Path where child writes its results
/// - `operation`: The cache operation to perform
/// - `args`: Additional arguments (key, content, delay, etc.)
fn spawn_cache_child_process(
    _test_name: &str,
    cache_dir: &Path,
    result_file: &Path,
    operation: &str,
    args: &[&str],
) -> std::process::Child {
    // Build the command to run this test binary in child mode
    let exe = std::env::current_exe().expect("Failed to get test executable path");

    let mut cmd = Command::new(exe);
    let child_test_name = format!("cache_child_{operation}");
    cmd.arg("--nocapture")
        .arg("--ignored") // Child processes run as ignored tests
        .arg("--exact")
        .arg(&child_test_name)
        .env("CACHE_CHILD_MODE", "1")
        .env("CACHE_DIR", cache_dir)
        .env("RESULT_FILE", result_file);

    // Add operation-specific args
    for (i, arg) in args.iter().enumerate() {
        cmd.env(format!("ARG_{i}"), arg);
    }

    cmd.stdout(Stdio::null())
        .stderr(Stdio::piped())
        .spawn()
        .expect("Failed to spawn child process")
}

/// Check if we're running as a child process
fn is_child_process() -> bool {
    std::env::var("CACHE_CHILD_MODE").is_ok()
}

/// Get child process environment variables
fn get_child_env(key: &str) -> String {
    std::env::var(key).unwrap_or_else(|_| panic!("Missing env var: {key}"))
}

/// Wait for a child process to finish, surfacing stderr if it fails.
fn wait_for_child(child: &mut std::process::Child, label: &str) {
    let status = child
        .wait()
        .unwrap_or_else(|e| panic!("Failed to wait for {label}: {e}"));

    if !status.success() {
        let mut stderr_output = String::new();
        if let Some(mut stderr) = child.stderr.take() {
            let _ = stderr.read_to_string(&mut stderr_output);
        }
        panic!("{label} exited with status {status:?}. Stderr:\n{stderr_output}");
    }
}

// ============================================================================
// Child Process Entry Points
// ============================================================================

/// Child process: Write to cache
#[test]
#[ignore = "Only run when spawned as child process"]
fn cache_child_write_entry() {
    if !is_child_process() {
        return;
    }

    let cache_dir = PathBuf::from(get_child_env("CACHE_DIR"));
    let result_file = PathBuf::from(get_child_env("RESULT_FILE"));
    let file_path = get_child_env("ARG_0");
    let content = get_child_env("ARG_1");
    let symbol_names = get_child_env("ARG_2");

    let result: Result<ChildResult, String> = {
        // Create cache config pointing to shared directory
        let config = CacheConfig::new()
            .with_cache_root(cache_dir)
            .with_persistence(true);

        let cache = CacheManager::new(config);

        // Create content hash and summaries
        let hash = make_content_hash(&content);
        let names: Vec<&str> = symbol_names.split(',').collect();
        let summaries = make_test_summaries(&names, &file_path);

        // Write to cache
        cache.insert(&file_path, "rust", hash, summaries);

        Ok(ChildResult::success(
            format!("Wrote {} symbols to cache", names.len()),
            Some(names.iter().map(std::string::ToString::to_string).collect()),
        ))
    };

    let final_result = result.unwrap_or_else(ChildResult::failure);
    final_result
        .write_to_file(&result_file)
        .expect("Failed to write result file");
}

/// Child process: Read from cache
#[test]
#[ignore = "Only run when spawned as child process"]
fn cache_child_read_entry() {
    if !is_child_process() {
        return;
    }

    let cache_dir = PathBuf::from(get_child_env("CACHE_DIR"));
    let result_file = PathBuf::from(get_child_env("RESULT_FILE"));
    let file_path = get_child_env("ARG_0");
    let content = get_child_env("ARG_1");

    let result: Result<ChildResult, String> = {
        let config = CacheConfig::new()
            .with_cache_root(cache_dir)
            .with_persistence(true);

        let cache = CacheManager::new(config);
        let hash = make_content_hash(&content);

        // Try to read from cache
        match cache.get(&file_path, "rust", hash) {
            Some(summaries) => {
                let names: Vec<String> = summaries.iter().map(|s| s.name.to_string()).collect();
                Ok(ChildResult::success(
                    format!("Read {} symbols from cache", names.len()),
                    Some(names),
                ))
            }
            None => Ok(ChildResult::success("Cache miss".to_string(), None)),
        }
    };

    let final_result = result.unwrap_or_else(ChildResult::failure);
    final_result
        .write_to_file(&result_file)
        .expect("Failed to write result file");
}

/// Child process: Hold lock for a duration
#[test]
#[ignore = "Only run when spawned as child process"]
fn cache_child_hold_lock() -> Result<(), String> {
    if !is_child_process() {
        return Ok(());
    }

    let cache_dir = PathBuf::from(get_child_env("CACHE_DIR"));
    let result_file = PathBuf::from(get_child_env("RESULT_FILE"));
    let file_path = get_child_env("ARG_0");
    let content = get_child_env("ARG_1");
    let hold_duration_ms: u64 = get_child_env("ARG_2").parse().expect("Invalid duration");

    let result: Result<ChildResult, String> = {
        let config = CacheConfig::new()
            .with_cache_root(cache_dir.clone())
            .with_persistence(true);

        let cache = CacheManager::new(config);
        let hash = make_content_hash(&content);
        let key = CacheKey::from_raw_path(PathBuf::from(&file_path), "rust", hash);

        // Create lock file manually to simulate long-running write
        let persist = PersistManager::new(cache_dir.clone())
            .map_err(|e| format!("Failed to initialize persistence: {e}"))?;

        // H1 FIX: Use production lock path construction (entry_path + set_extension)
        let entry_path = persist
            .user_cache_dir()
            .join(format!("{}.bin", key.storage_key()));
        let mut lock_path = entry_path.clone();
        lock_path.set_extension("bin.lock");

        if let Some(parent) = lock_path.parent() {
            fs::create_dir_all(parent).map_err(|e| {
                format!(
                    "Failed to create lock directory {}: {}",
                    parent.display(),
                    e
                )
            })?;
        }

        let lock_file = fs::OpenOptions::new()
            .write(true)
            .create_new(true)
            .open(&lock_path)
            .map_err(|e| format!("Failed to create lock file {}: {}", lock_path.display(), e))?;

        let mut lock_guard = ManualLockGuard {
            path: lock_path.clone(),
            file: lock_file,
        };

        // Write PID to lock file
        writeln!(&mut lock_guard.file, "{}", std::process::id())
            .map_err(|e| format!("Failed to write lock PID: {e}"))?;
        lock_guard
            .file
            .sync_all()
            .map_err(|e| format!("Failed to sync lock file: {e}"))?;

        // Hold the lock by sleeping
        thread::sleep(Duration::from_millis(hold_duration_ms));

        // Lock released automatically via Drop
        drop(lock_guard); // Explicit drop to ensure lock is released before cache write

        // After releasing the lock, write to cache so callers observe normal behavior
        let summaries = make_test_summaries(&["held_fn"], &file_path);
        cache.insert(&file_path, "rust", hash, summaries);

        // Ensure cache is dropped and persisted before process exits
        drop(cache);
        thread::sleep(Duration::from_millis(50));

        Ok(ChildResult::success(
            format!("Held lock for {hold_duration_ms}ms"),
            None,
        ))
    };

    let final_result = result.unwrap_or_else(ChildResult::failure);
    final_result
        .write_to_file(&result_file)
        .map_err(|e| e.to_string())?;
    Ok(())
}

// ============================================================================
// Multi-Process Integration Tests
// ============================================================================

#[test]
fn test_multiprocess_concurrent_writes() {
    init_logging();
    log::info!("Testing multi-process concurrent writes (lock contention)");

    // Skip if running as child process
    if is_child_process() {
        return;
    }

    let tmp_cache_dir = TempDir::new().expect("Failed to create temp dir");
    let cache_dir = tmp_cache_dir.path().join("cache");
    fs::create_dir_all(&cache_dir).expect("Failed to create cache dir");

    let file_path = "/test/file.rs";
    let content = "fn test() {}";

    // Spawn two child processes that both try to write to the same cache key
    let result_file_1 = tmp_cache_dir.path().join("result1.json");
    let result_file_2 = tmp_cache_dir.path().join("result2.json");

    log::debug!("Spawning child process 1 to write fn1,fn2,fn3");
    let mut child1 = spawn_cache_child_process(
        "concurrent_writes",
        &cache_dir,
        &result_file_1,
        "write_entry",
        &[file_path, content, "fn1,fn2,fn3"],
    );

    log::debug!("Spawning child process 2 to write fn4,fn5,fn6");
    let mut child2 = spawn_cache_child_process(
        "concurrent_writes",
        &cache_dir,
        &result_file_2,
        "write_entry",
        &[file_path, content, "fn4,fn5,fn6"],
    );

    // Wait for both children to complete
    log::debug!("Waiting for both child processes to complete");
    wait_for_child(&mut child1, "child 1 (concurrent_writes write_entry)");
    wait_for_child(&mut child2, "child 2 (concurrent_writes write_entry)");

    // Read results
    let result1 = ChildResult::read_from_file(&result_file_1).expect("Failed to read result 1");
    let result2 = ChildResult::read_from_file(&result_file_2).expect("Failed to read result 2");

    assert!(
        result1.success,
        "Child 1 operation failed: {}",
        result1.message
    );
    assert!(
        result2.success,
        "Child 2 operation failed: {}",
        result2.message
    );

    // Verify final cache state in parent process
    let config = CacheConfig::new()
        .with_cache_root(cache_dir.clone())
        .with_persistence(true);

    let cache = CacheManager::new(config);
    let hash = make_content_hash(content);

    let cached_summaries = cache
        .get(file_path, "rust", hash)
        .expect("Cache should have entry after writes");

    // Should have symbols from ONE of the two writes (not corrupted mix)
    assert_eq!(
        cached_summaries.len(),
        3,
        "Cache should have exactly 3 symbols from one complete write"
    );

    // Verify symbols are either all from child1 OR all from child2
    let names: Vec<String> = cached_summaries
        .iter()
        .map(|s| s.name.to_string())
        .collect();

    let is_child1_set = names
        .iter()
        .all(|n| n.starts_with("fn") && ["fn1", "fn2", "fn3"].contains(&n.as_str()));
    let is_child2_set = names
        .iter()
        .all(|n| n.starts_with("fn") && ["fn4", "fn5", "fn6"].contains(&n.as_str()));

    assert!(
        is_child1_set || is_child2_set,
        "Cache should contain complete set from one child, not a corrupted mix. Got: {names:?}"
    );

    log::info!("✓ Multi-process concurrent writes: No corruption detected. Final state: {names:?}");
}

#[test]
fn test_multiprocess_read_write_consistency() {
    init_logging();
    log::info!("Testing multi-process read-write consistency");

    if is_child_process() {
        return;
    }

    let tmp_cache_dir = TempDir::new().expect("Failed to create temp dir");
    let cache_dir = tmp_cache_dir.path().join("cache");
    fs::create_dir_all(&cache_dir).expect("Failed to create cache dir");

    let file_path = "/test/file.rs";
    let content = "fn test() {}";

    // Parent writes initial data
    let config = CacheConfig::new()
        .with_cache_root(cache_dir.clone())
        .with_persistence(true);

    let cache = CacheManager::new(config.clone());
    let hash = make_content_hash(content);
    let initial_summaries = make_test_summaries(&["initial_fn"], file_path);
    cache.insert(file_path, "rust", hash, initial_summaries);

    // Spawn child process to read
    let result_file = tmp_cache_dir.path().join("result.json");
    let mut child = spawn_cache_child_process(
        "read_write",
        &cache_dir,
        &result_file,
        "read_entry",
        &[file_path, content],
    );

    wait_for_child(&mut child, "child (read_write read_entry)");

    let result = ChildResult::read_from_file(&result_file).expect("Failed to read result");
    assert!(result.success, "Child read failed: {}", result.message);

    // Verify child read the correct data
    let read_names = result.data.expect("Child should have read data");
    assert_eq!(read_names, vec!["initial_fn"], "Child read incorrect data");

    log::info!("✓ Multi-process read-write consistency verified: child read {read_names:?}");
}

#[test]
#[cfg(target_os = "linux")] // This test requires process existence checking
fn test_multiprocess_stale_lock_cleanup() {
    init_logging();
    log::info!("Testing stale lock cleanup after process crash");

    if is_child_process() {
        return;
    }

    let tmp_cache_dir = TempDir::new().expect("Failed to create temp dir");
    let cache_dir = tmp_cache_dir.path().join("cache");
    fs::create_dir_all(&cache_dir).expect("Failed to create cache dir");

    let file_path = "/test/file.rs";
    let content = "fn test() {}";

    // Start a child that will hold a lock
    let result_file_1 = tmp_cache_dir.path().join("result1.json");
    let mut child1 = spawn_cache_child_process(
        "stale_lock",
        &cache_dir,
        &result_file_1,
        "hold_lock",
        &[file_path, content, "100"], // Hold for 100ms
    );

    // M2 FIX: Wait until lock is acquired, then verify it exists before cleanup
    let hash = make_content_hash(content);
    let key = CacheKey::from_raw_path(PathBuf::from(file_path), "rust", hash);
    let persist_check =
        PersistManager::new(cache_dir.clone()).expect("Failed to create persist manager");
    let entry_path = persist_check
        .user_cache_dir()
        .join(format!("{}.bin", key.storage_key()));
    let mut lock_path = entry_path.clone();
    lock_path.set_extension("bin.lock");

    // Wait for lock to be acquired (2 seconds max - allows headroom for coverage instrumentation)
    let mut lock_acquired = false;
    for _ in 0..200 {
        if lock_path.exists() {
            lock_acquired = true;
            break;
        }
        thread::sleep(Duration::from_millis(10));
    }
    assert!(lock_acquired, "Child 1 never acquired lock");

    // Kill the child process (simulate crash)
    log::debug!("Simulating process crash by killing child 1");
    child1.kill().expect("Failed to kill child process");

    // Wait to ensure process is dead
    let _ = child1.wait();
    thread::sleep(Duration::from_millis(100));

    // M2 FIX: Verify lock file still exists (proving it's now stale)
    assert!(
        lock_path.exists(),
        "Lock file should still exist after process crash (before cleanup): {lock_path:?}"
    );

    // Now try to acquire the same lock from a new process
    // This should succeed after detecting and cleaning up the stale lock
    let result_file_2 = tmp_cache_dir.path().join("result2.json");
    let mut child2 = spawn_cache_child_process(
        "stale_lock",
        &cache_dir,
        &result_file_2,
        "write_entry",
        &[file_path, content, "cleanup_fn"],
    );

    wait_for_child(&mut child2, "child 2 (stale_lock write_entry)");

    let result2 = ChildResult::read_from_file(&result_file_2).expect("Failed to read result 2");
    assert!(
        result2.success,
        "Child 2 should successfully write after cleaning stale lock: {}",
        result2.message
    );

    // M2 FIX: Verify lock was removed after successful acquisition and release
    assert!(
        !lock_path.exists(),
        "Stale lock should be cleaned up after child 2 completes: {lock_path:?}"
    );

    log::info!("✓ Stale lock cleanup verified: lock removed after process crash and recovery");
}

#[test]
#[cfg(target_os = "linux")]
fn test_multiprocess_lock_retry_succeeds() {
    init_logging();
    log::info!("Testing multi-process lock retry mechanism");

    if is_child_process() {
        return;
    }

    let tmp_cache_dir = TempDir::new().expect("Failed to create temp dir");
    let cache_dir = tmp_cache_dir.path().join("cache");
    fs::create_dir_all(&cache_dir).expect("Failed to create cache dir");

    let file_path = "/test/file.rs";
    let content = "fn test() {}";

    // Child 1: hold the lock for a while to force retries
    let result_file_lock = tmp_cache_dir.path().join("lock_holder.json");
    let mut lock_holder = spawn_cache_child_process(
        "lock_retry",
        &cache_dir,
        &result_file_lock,
        "hold_lock",
        &[file_path, content, "300"],
    );

    // M1 FIX: Poll until lock file actually exists (guarantees contention)
    let hash = make_content_hash(content);
    let key = CacheKey::from_raw_path(PathBuf::from(file_path), "rust", hash);
    let persist_check =
        PersistManager::new(cache_dir.clone()).expect("Failed to create persist manager");
    let entry_path = persist_check
        .user_cache_dir()
        .join(format!("{}.bin", key.storage_key()));
    let mut expected_lock_path = entry_path.clone();
    expected_lock_path.set_extension("bin.lock");

    let mut lock_ready = false;
    for _ in 0..200 {
        if expected_lock_path.exists() {
            lock_ready = true;
            break;
        }
        thread::sleep(Duration::from_millis(10));
    }
    assert!(
        lock_ready,
        "Lock holder never acquired lock at {expected_lock_path:?}"
    );

    // Child 2: attempt to write while lock is held; should retry until lock is released
    let result_file_writer = tmp_cache_dir.path().join("writer.json");
    let start = Instant::now();
    let mut writer = spawn_cache_child_process(
        "lock_retry",
        &cache_dir,
        &result_file_writer,
        "write_entry",
        &[file_path, content, "retry_fn"],
    );

    wait_for_child(&mut writer, "child 2 (lock_retry write_entry)");
    let elapsed = start.elapsed();

    // M1 FIX: Relax timing to allow 50ms variance for CI systems
    // M3 FIX: This timing check validates retry loop executed
    // Retry delay is 100ms, lock held for 300ms, so we should wait 200-300ms
    // This proves at least 2 retry attempts occurred (2 × 100ms = 200ms minimum)
    assert!(
        elapsed >= Duration::from_millis(200),
        "Writer should block until lock released, proving retry loop executed (waited {elapsed:?})"
    );

    let writer_result =
        ChildResult::read_from_file(&result_file_writer).expect("Failed to read writer result");
    assert!(
        writer_result.success,
        "Writer should succeed after retries: {}",
        writer_result.message
    );
    // Lock holder should also complete successfully
    wait_for_child(&mut lock_holder, "child 1 (lock_retry hold_lock)");
    let lock_result =
        ChildResult::read_from_file(&result_file_lock).expect("Failed to read lock holder result");
    assert!(
        lock_result.success,
        "Lock holder should succeed: {}",
        lock_result.message
    );

    // Parent verifies final cache state has data from one of the writers
    // Note: Since both lock holder and writer complete successfully, either may
    // be the final state depending on timing. The important thing is that both
    // processes successfully acquired the lock and wrote data without corruption.
    let config = CacheConfig::new()
        .with_cache_root(cache_dir.clone())
        .with_persistence(true);
    let cache = CacheManager::new(config);
    let hash = make_content_hash(content);

    let cached = cache
        .get(file_path, "rust", hash)
        .expect("Cache should have entry after both processes complete");
    let names: Vec<String> = cached.iter().map(|s| s.name.to_string()).collect();

    // Accept either "held_fn" (lock holder) or "retry_fn" (writer) as final state
    assert!(
        names == vec!["held_fn"] || names == vec!["retry_fn"],
        "Cache should contain data from one of the writers; got {names:?}"
    );

    log::info!(
        "✓ Multi-process lock retry succeeded after {elapsed:?} wait: final state {names:?}"
    );
}

#[test]
fn test_multiprocess_cache_persistence_across_restarts() {
    init_logging();
    log::info!("Testing cache persistence across process restarts");

    if is_child_process() {
        return;
    }

    let tmp_cache_dir = TempDir::new().expect("Failed to create temp dir");
    let cache_dir = tmp_cache_dir.path().join("cache");
    fs::create_dir_all(&cache_dir).expect("Failed to create cache dir");

    let file_path = "/test/file.rs";
    let content = "fn test() {}";

    // Process 1: Write to cache
    let result_file_1 = tmp_cache_dir.path().join("result1.json");
    let mut child1 = spawn_cache_child_process(
        "persistence",
        &cache_dir,
        &result_file_1,
        "write_entry",
        &[file_path, content, "persisted_fn"],
    );

    wait_for_child(&mut child1, "child 1 (persistence write_entry)");

    // Wait to ensure write is flushed to disk
    thread::sleep(Duration::from_millis(100));

    // Process 2: Read from cache (different process, should load from disk)
    let result_file_2 = tmp_cache_dir.path().join("result2.json");
    let mut child2 = spawn_cache_child_process(
        "persistence",
        &cache_dir,
        &result_file_2,
        "read_entry",
        &[file_path, content],
    );

    wait_for_child(&mut child2, "child 2 (persistence read_entry)");

    let result2 = ChildResult::read_from_file(&result_file_2).expect("Failed to read result 2");
    assert!(
        result2.success,
        "Child 2 read operation failed: {}",
        result2.message
    );

    let read_names = result2
        .data
        .expect("Child 2 should have read data from disk");
    assert_eq!(
        read_names,
        vec!["persisted_fn"],
        "Child 2 should read data persisted by child 1"
    );

    log::info!(
        "✓ Cache persistence across process restarts verified: child 2 read {read_names:?} from disk"
    );
}