Skip to main content

moonpool_explorer/
split_loop.rs

1//! Timeline splitting loop.
2//!
3//! When a new assertion success is discovered, [`split_on_discovery`] forks
4//! child processes with different seeds to explore alternate timelines from
5//! that splitpoint forward.
6//!
7//! # Process Model
8//!
9//! ```text
10//! Parent timeline (seed S0, depth D)
11//!   |-- Timeline 0 (seed S0', depth D+1) -> waitpid -> merge coverage
12//!   |-- Timeline 1 (seed S1', depth D+1) -> waitpid -> merge coverage
13//!   |-- ...
14//!   `-- Timeline N (seed SN', depth D+1) -> waitpid -> merge coverage
15//!   resume parent timeline
16//! ```
17//!
18//! Each child returns from this function and continues the simulation with
19//! reseeded randomness. The parent waits for each child sequentially.
20
21use std::sync::atomic::Ordering;
22
23#[cfg(unix)]
24use std::collections::HashMap;
25
26use crate::context::{
27    self, COVERAGE_BITMAP_PTR, ENERGY_BUDGET_PTR, EXPLORED_MAP_PTR, SHARED_RECIPE, SHARED_STATS,
28};
29#[cfg(unix)]
30use crate::context::{BITMAP_POOL, BITMAP_POOL_SLOTS};
31use crate::coverage::{COVERAGE_MAP_SIZE, CoverageBitmap, ExploredMap};
32use crate::shared_stats::MAX_RECIPE_ENTRIES;
33
34/// Compute a child seed by mixing the parent seed, assertion name, and child index.
35///
36/// Uses FNV-1a mixing to produce well-distributed seeds.
37fn compute_child_seed(parent_seed: u64, mark_name: &str, child_idx: u32) -> u64 {
38    let mut hash: u64 = 0xcbf29ce484222325;
39    for &byte in mark_name.as_bytes() {
40        hash ^= byte as u64;
41        hash = hash.wrapping_mul(0x100000001b3);
42    }
43    hash ^= parent_seed;
44    hash = hash.wrapping_mul(0x100000001b3);
45    hash ^= child_idx as u64;
46    hash = hash.wrapping_mul(0x100000001b3);
47    hash
48}
49
50/// Controls how many children can run in parallel during splitting.
51///
52/// When set on [`crate::ExplorationConfig::parallelism`], the fork loop
53/// uses a sliding window of this many concurrent children instead of the
54/// default sequential fork→wait→fork→wait cycle.
55#[derive(Debug, Clone)]
56pub enum Parallelism {
57    /// Use all available CPU cores (`sysconf(_SC_NPROCESSORS_ONLN)`).
58    MaxCores,
59    /// Use half the available CPU cores.
60    HalfCores,
61    /// Use exactly this many concurrent children.
62    Cores(usize),
63    /// Use all available cores minus `n` (e.g., leave 1 for the OS).
64    MaxCoresMinus(usize),
65}
66
67/// Resolve a [`Parallelism`] value to a concrete slot count (≥ 1).
68#[cfg(unix)]
69fn resolve_parallelism(p: &Parallelism) -> usize {
70    let ncpus = unsafe { libc::sysconf(libc::_SC_NPROCESSORS_ONLN) };
71    let ncpus = if ncpus > 0 { ncpus as usize } else { 1 };
72    let n = match p {
73        Parallelism::MaxCores => ncpus,
74        Parallelism::HalfCores => ncpus / 2,
75        Parallelism::Cores(c) => *c,
76        Parallelism::MaxCoresMinus(minus) => ncpus.saturating_sub(*minus),
77    };
78    n.max(1) // always at least 1
79}
80
81/// Get or initialize the per-process bitmap pool in shared memory.
82///
83/// Returns the pool base pointer, or null if allocation fails.
84/// Each forked child resets this to null so it allocates its own pool
85/// if it becomes a parent (avoids sharing pool slots with siblings).
86#[cfg(unix)]
87fn get_or_init_pool(slot_count: usize) -> *mut u8 {
88    let existing = BITMAP_POOL.with(|c| c.get());
89    let existing_slots = BITMAP_POOL_SLOTS.with(|c| c.get());
90
91    if !existing.is_null() && existing_slots >= slot_count {
92        return existing;
93    }
94
95    // Free old pool if it exists but is too small
96    if !existing.is_null() {
97        // Safety: ptr was returned by alloc_shared with existing_slots * COVERAGE_MAP_SIZE
98        unsafe {
99            crate::shared_mem::free_shared(existing, existing_slots * COVERAGE_MAP_SIZE);
100        }
101        BITMAP_POOL.with(|c| c.set(std::ptr::null_mut()));
102        BITMAP_POOL_SLOTS.with(|c| c.set(0));
103    }
104
105    match crate::shared_mem::alloc_shared(slot_count * COVERAGE_MAP_SIZE) {
106        Ok(ptr) => {
107            BITMAP_POOL.with(|c| c.set(ptr));
108            BITMAP_POOL_SLOTS.with(|c| c.set(slot_count));
109            ptr
110        }
111        Err(_) => std::ptr::null_mut(),
112    }
113}
114
115/// Return the pointer to slot `idx` within a bitmap pool.
116#[cfg(unix)]
117fn pool_slot(pool_base: *mut u8, idx: usize) -> *mut u8 {
118    // Safety: caller ensures idx < slot_count and pool_base is valid
119    unsafe { pool_base.add(idx * COVERAGE_MAP_SIZE) }
120}
121
122/// Common child-process setup after fork: reseed RNG, update context, bump counter.
123///
124/// Also resets the bitmap pool pointer so nested splits allocate a fresh pool.
125#[cfg(unix)]
126fn setup_child(
127    child_seed: u64,
128    split_call_count: u64,
129    stats_ptr: *mut crate::shared_stats::SharedStats,
130) {
131    context::rng_reseed(child_seed);
132    context::with_ctx_mut(|ctx| {
133        ctx.is_child = true;
134        ctx.depth += 1;
135        ctx.current_seed = child_seed;
136        ctx.recipe.push((split_call_count, child_seed));
137    });
138    if !stats_ptr.is_null() {
139        // Safety: stats_ptr points to valid shared memory
140        unsafe {
141            (*stats_ptr).total_timelines.fetch_add(1, Ordering::Relaxed);
142        }
143    }
144    // Reset bitmap pool so nested splits allocate a fresh pool
145    BITMAP_POOL.with(|c| c.set(std::ptr::null_mut()));
146    BITMAP_POOL_SLOTS.with(|c| c.set(0));
147}
148
149/// Reap one finished child via `waitpid(-1)`, merge its coverage, check for bugs.
150///
151/// Removes the reaped PID from `active`, pushes its slot back to `free_slots`,
152/// and sets `batch_has_new` if the child contributed new coverage bits.
153#[cfg(unix)]
154fn reap_one(
155    active: &mut HashMap<libc::pid_t, (u64, usize)>,
156    free_slots: &mut Vec<usize>,
157    pool_base: *mut u8,
158    vm_ptr: *mut u8,
159    stats_ptr: *mut crate::shared_stats::SharedStats,
160    split_call_count: u64,
161    batch_has_new: &mut bool,
162) {
163    let mut status: libc::c_int = 0;
164    // Safety: waitpid(-1) waits for any child of this process
165    let finished_pid = unsafe { libc::waitpid(-1, &mut status, 0) };
166    if finished_pid <= 0 {
167        return;
168    }
169
170    let Some((child_seed, slot)) = active.remove(&finished_pid) else {
171        return;
172    };
173
174    // Merge child's coverage bitmap into explored map
175    if !vm_ptr.is_null() {
176        // Safety: pool_base + slot offset is valid shared memory
177        let child_bm = unsafe { CoverageBitmap::new(pool_slot(pool_base, slot)) };
178        let vm = unsafe { ExploredMap::new(vm_ptr) };
179        if vm.has_new_bits(&child_bm) {
180            *batch_has_new = true;
181        }
182        vm.merge_from(&child_bm);
183    }
184
185    // Check if child found a bug (exit code 42)
186    let exited_normally = libc::WIFEXITED(status);
187    if exited_normally && libc::WEXITSTATUS(status) == 42 {
188        if !stats_ptr.is_null() {
189            // Safety: stats_ptr is valid shared memory
190            unsafe {
191                (*stats_ptr).bug_found.fetch_add(1, Ordering::Relaxed);
192            }
193        }
194        save_bug_recipe(split_call_count, child_seed);
195    }
196
197    if !stats_ptr.is_null() {
198        // Safety: stats_ptr is valid shared memory
199        unsafe {
200            (*stats_ptr).fork_points.fetch_add(1, Ordering::Relaxed);
201        }
202    }
203
204    free_slots.push(slot);
205}
206
207/// Configuration for adaptive batch-based timeline splitting.
208///
209/// Instead of spawning a fixed number of timelines, the adaptive loop
210/// spawns in batches and checks coverage yield between batches. Productive
211/// marks (that find new coverage) get more timelines; barren marks stop
212/// early and return their energy to the reallocation pool.
213#[derive(Debug, Clone)]
214pub struct AdaptiveConfig {
215    /// Number of children to fork per batch before checking coverage yield.
216    pub batch_size: u32,
217    /// Minimum total forks for a mark (even if barren after first batch).
218    pub min_timelines: u32,
219    /// Maximum total forks for a mark (hard cap).
220    pub max_timelines: u32,
221    /// Initial per-mark energy budget.
222    pub per_mark_energy: i64,
223    /// Minimum timelines for marks during warm starts (explored map has prior
224    /// coverage from previous seeds). Defaults to `batch_size` if `None`.
225    pub warm_min_timelines: Option<u32>,
226}
227
228/// Dispatch to either adaptive or fixed-count splitting based on config.
229///
230/// If an energy budget is configured (adaptive mode), uses coverage-yield-driven
231/// batching. Otherwise falls back to the fixed `timelines_per_split` behavior.
232#[cfg(unix)]
233pub(crate) fn dispatch_split(mark_name: &str, slot_idx: usize) {
234    let has_adaptive = ENERGY_BUDGET_PTR.with(|c| !c.get().is_null());
235    if has_adaptive {
236        adaptive_split_on_discovery(mark_name, slot_idx);
237    } else {
238        split_on_discovery(mark_name);
239    }
240}
241
242/// No-op on non-unix platforms.
243#[cfg(not(unix))]
244pub(crate) fn dispatch_split(_mark_name: &str, _slot_idx: usize) {}
245
246/// Adaptive split: spawn timelines in batches, check coverage yield, stop when barren.
247///
248/// When parallelism is configured, uses a sliding window of concurrent children
249/// capped at the resolved slot count. Otherwise falls back to sequential forking.
250#[cfg(unix)]
251fn adaptive_split_on_discovery(mark_name: &str, slot_idx: usize) {
252    // Read context for guard checks
253    let (ctx_active, depth, max_depth, current_seed) =
254        context::with_ctx(|ctx| (ctx.active, ctx.depth, ctx.max_depth, ctx.current_seed));
255
256    if !ctx_active || depth >= max_depth {
257        return;
258    }
259
260    let budget_ptr = ENERGY_BUDGET_PTR.with(|c| c.get());
261    if budget_ptr.is_null() {
262        return;
263    }
264
265    // Initialize per-mark budget on first use
266    // Safety: budget_ptr is valid shared memory
267    unsafe {
268        crate::energy::init_mark_budget(budget_ptr, slot_idx);
269    }
270
271    let split_call_count = context::rng_get_count();
272
273    let bm_ptr = COVERAGE_BITMAP_PTR.with(|c| c.get());
274    let vm_ptr = EXPLORED_MAP_PTR.with(|c| c.get());
275    let stats_ptr = SHARED_STATS.with(|c| c.get());
276
277    let (batch_size, min_timelines, max_timelines) = context::with_ctx(|ctx| {
278        ctx.adaptive
279            .as_ref()
280            .map(|a| (a.batch_size, a.min_timelines, a.max_timelines))
281            .unwrap_or((4, 1, 16))
282    });
283
284    // Warm start: when explored map has prior coverage from previous seeds,
285    // barren marks stop after fewer timelines since they're re-treading
286    // already-discovered paths.
287    let effective_min_timelines = {
288        let (is_warm, warm_min) = context::with_ctx(|ctx| {
289            let wm = ctx
290                .adaptive
291                .as_ref()
292                .and_then(|a| a.warm_min_timelines)
293                .unwrap_or(batch_size);
294            (ctx.warm_start, wm)
295        });
296        if is_warm { warm_min } else { min_timelines }
297    };
298
299    // Check parallelism
300    let parallelism = context::with_ctx(|ctx| ctx.parallelism.clone());
301    let (slot_count, pool_base) = if let Some(ref p) = parallelism {
302        let sc = resolve_parallelism(p);
303        let pb = get_or_init_pool(sc);
304        if pb.is_null() {
305            (0, std::ptr::null_mut())
306        } else {
307            (sc, pb)
308        }
309    } else {
310        (0, std::ptr::null_mut())
311    };
312    let parallel = slot_count > 0;
313
314    // Save parent bitmap (sequential only — parallel children use pool slots)
315    let mut parent_bitmap_backup = [0u8; COVERAGE_MAP_SIZE];
316    if !parallel && !bm_ptr.is_null() {
317        // Safety: bm_ptr points to COVERAGE_MAP_SIZE bytes
318        unsafe {
319            std::ptr::copy_nonoverlapping(
320                bm_ptr,
321                parent_bitmap_backup.as_mut_ptr(),
322                COVERAGE_MAP_SIZE,
323            );
324        }
325    }
326
327    let mut timelines_spawned: u32 = 0;
328
329    // Parallel state (only used when parallel == true)
330    let mut active: HashMap<libc::pid_t, (u64, usize)> = HashMap::new();
331    let mut free_slots: Vec<usize> = if parallel {
332        (0..slot_count).collect()
333    } else {
334        Vec::new()
335    };
336
337    // Batch loop
338    loop {
339        let mut batch_has_new = false;
340        let batch_start = timelines_spawned;
341
342        while timelines_spawned - batch_start < batch_size {
343            if timelines_spawned >= max_timelines {
344                break;
345            }
346
347            // Safety: budget_ptr is valid
348            if !unsafe { crate::energy::decrement_mark_energy(budget_ptr, slot_idx) } {
349                break;
350            }
351
352            let child_seed = compute_child_seed(current_seed, mark_name, timelines_spawned);
353            timelines_spawned += 1;
354
355            if parallel {
356                // Back-pressure: reap a finished child if all slots are busy
357                while free_slots.is_empty() {
358                    reap_one(
359                        &mut active,
360                        &mut free_slots,
361                        pool_base,
362                        vm_ptr,
363                        stats_ptr,
364                        split_call_count,
365                        &mut batch_has_new,
366                    );
367                }
368                let slot = match free_slots.pop() {
369                    Some(s) => s,
370                    None => break,
371                };
372
373                // Clear child bitmap slot
374                // Safety: pool_base + slot offset is valid shared memory
375                unsafe {
376                    std::ptr::write_bytes(pool_slot(pool_base, slot), 0, COVERAGE_MAP_SIZE);
377                    COVERAGE_BITMAP_PTR.with(|c| c.set(pool_slot(pool_base, slot)));
378                }
379
380                // Safety: single-threaded, no real I/O
381                let pid = unsafe { libc::fork() };
382                match pid {
383                    -1 => {
384                        free_slots.push(slot);
385                        break;
386                    }
387                    0 => {
388                        setup_child(child_seed, split_call_count, stats_ptr);
389                        return;
390                    }
391                    child_pid => {
392                        active.insert(child_pid, (child_seed, slot));
393                    }
394                }
395            } else {
396                // Sequential path
397                if !bm_ptr.is_null() {
398                    let bm = unsafe { CoverageBitmap::new(bm_ptr) };
399                    bm.clear();
400                }
401
402                // Safety: single-threaded, no real I/O
403                let pid = unsafe { libc::fork() };
404                match pid {
405                    -1 => break,
406                    0 => {
407                        setup_child(child_seed, split_call_count, stats_ptr);
408                        return;
409                    }
410                    child_pid => {
411                        let mut status: libc::c_int = 0;
412                        // Safety: child_pid is valid
413                        unsafe {
414                            libc::waitpid(child_pid, &mut status, 0);
415                        }
416
417                        if !bm_ptr.is_null() && !vm_ptr.is_null() {
418                            let bm = unsafe { CoverageBitmap::new(bm_ptr) };
419                            let vm = unsafe { ExploredMap::new(vm_ptr) };
420                            if vm.has_new_bits(&bm) {
421                                batch_has_new = true;
422                            }
423                            vm.merge_from(&bm);
424                        }
425
426                        let exited_normally = libc::WIFEXITED(status);
427                        if exited_normally && libc::WEXITSTATUS(status) == 42 {
428                            if !stats_ptr.is_null() {
429                                // Safety: stats_ptr is valid
430                                unsafe {
431                                    (*stats_ptr).bug_found.fetch_add(1, Ordering::Relaxed);
432                                }
433                            }
434                            save_bug_recipe(split_call_count, child_seed);
435                        }
436
437                        if !stats_ptr.is_null() {
438                            // Safety: stats_ptr is valid
439                            unsafe {
440                                (*stats_ptr).fork_points.fetch_add(1, Ordering::Relaxed);
441                            }
442                        }
443                    }
444                }
445            }
446        }
447
448        // Drain remaining active children before checking batch yield
449        while !active.is_empty() {
450            reap_one(
451                &mut active,
452                &mut free_slots,
453                pool_base,
454                vm_ptr,
455                stats_ptr,
456                split_call_count,
457                &mut batch_has_new,
458            );
459        }
460
461        // Batch complete — decide whether to continue
462        if timelines_spawned >= max_timelines {
463            break;
464        }
465        if !batch_has_new && timelines_spawned >= effective_min_timelines {
466            // Barren — return remaining energy to pool
467            // Safety: budget_ptr is valid
468            unsafe {
469                crate::energy::return_mark_energy_to_pool(budget_ptr, slot_idx);
470            }
471            break;
472        }
473        // Check if we ran out of energy mid-batch
474        if timelines_spawned - batch_start < batch_size && timelines_spawned < max_timelines {
475            break;
476        }
477    }
478
479    if parallel {
480        // Restore parent's bitmap pointer
481        COVERAGE_BITMAP_PTR.with(|c| c.set(bm_ptr));
482    } else {
483        // Restore parent bitmap content
484        if !bm_ptr.is_null() {
485            // Safety: bm_ptr points to COVERAGE_MAP_SIZE bytes
486            unsafe {
487                std::ptr::copy_nonoverlapping(
488                    parent_bitmap_backup.as_ptr(),
489                    bm_ptr,
490                    COVERAGE_MAP_SIZE,
491                );
492            }
493        }
494    }
495}
496
497/// Split the simulation timeline at a discovery point.
498///
499/// Called when an assertion detects a new success (e.g. via `assertion_bool`
500/// or `assertion_numeric`). Spawns `timelines_per_split` child timelines,
501/// each with a different seed derived from the current seed and the mark name.
502///
503/// When parallelism is configured, uses a sliding window of concurrent children.
504/// Otherwise falls back to sequential fork→wait→fork→wait.
505#[cfg(unix)]
506pub fn split_on_discovery(mark_name: &str) {
507    let (ctx_active, depth, max_depth, timelines_per_split, current_seed) =
508        context::with_ctx(|ctx| {
509            (
510                ctx.active,
511                ctx.depth,
512                ctx.max_depth,
513                ctx.timelines_per_split,
514                ctx.current_seed,
515            )
516        });
517
518    if !ctx_active || depth >= max_depth {
519        return;
520    }
521
522    let stats_ptr = SHARED_STATS.with(|c| c.get());
523    if stats_ptr.is_null() {
524        return;
525    }
526    // Safety: stats_ptr set during init, points to valid shared stats
527    if !unsafe { crate::shared_stats::decrement_energy(stats_ptr) } {
528        return;
529    }
530
531    let split_call_count = context::rng_get_count();
532    let bm_ptr = COVERAGE_BITMAP_PTR.with(|c| c.get());
533    let vm_ptr = EXPLORED_MAP_PTR.with(|c| c.get());
534
535    // Check parallelism
536    let parallelism = context::with_ctx(|ctx| ctx.parallelism.clone());
537    let (slot_count, pool_base) = if let Some(ref p) = parallelism {
538        let sc = resolve_parallelism(p);
539        let pb = get_or_init_pool(sc);
540        if pb.is_null() {
541            (0, std::ptr::null_mut())
542        } else {
543            (sc, pb)
544        }
545    } else {
546        (0, std::ptr::null_mut())
547    };
548    let parallel = slot_count > 0;
549
550    // Save parent bitmap (sequential only)
551    let mut parent_bitmap_backup = [0u8; COVERAGE_MAP_SIZE];
552    if !parallel && !bm_ptr.is_null() {
553        // Safety: bm_ptr points to COVERAGE_MAP_SIZE bytes
554        unsafe {
555            std::ptr::copy_nonoverlapping(
556                bm_ptr,
557                parent_bitmap_backup.as_mut_ptr(),
558                COVERAGE_MAP_SIZE,
559            );
560        }
561    }
562
563    // Parallel state
564    let mut active: HashMap<libc::pid_t, (u64, usize)> = HashMap::new();
565    let mut free_slots: Vec<usize> = if parallel {
566        (0..slot_count).collect()
567    } else {
568        Vec::new()
569    };
570    let mut batch_has_new = false;
571
572    for child_idx in 0..timelines_per_split {
573        if child_idx > 0 {
574            // Safety: stats_ptr is valid
575            if !unsafe { crate::shared_stats::decrement_energy(stats_ptr) } {
576                break;
577            }
578        }
579
580        let child_seed = compute_child_seed(current_seed, mark_name, child_idx);
581
582        if parallel {
583            // Back-pressure: reap if all slots busy
584            while free_slots.is_empty() {
585                reap_one(
586                    &mut active,
587                    &mut free_slots,
588                    pool_base,
589                    vm_ptr,
590                    stats_ptr,
591                    split_call_count,
592                    &mut batch_has_new,
593                );
594            }
595            let slot = match free_slots.pop() {
596                Some(s) => s,
597                None => break,
598            };
599
600            // Safety: pool slot is valid shared memory
601            unsafe {
602                std::ptr::write_bytes(pool_slot(pool_base, slot), 0, COVERAGE_MAP_SIZE);
603                COVERAGE_BITMAP_PTR.with(|c| c.set(pool_slot(pool_base, slot)));
604            }
605
606            // Safety: single-threaded, no real I/O
607            let pid = unsafe { libc::fork() };
608            match pid {
609                -1 => {
610                    free_slots.push(slot);
611                    break;
612                }
613                0 => {
614                    setup_child(child_seed, split_call_count, stats_ptr);
615                    return;
616                }
617                child_pid => {
618                    active.insert(child_pid, (child_seed, slot));
619                }
620            }
621        } else {
622            // Sequential path
623            if !bm_ptr.is_null() {
624                let bm = unsafe { CoverageBitmap::new(bm_ptr) };
625                bm.clear();
626            }
627
628            // Safety: single-threaded, no real I/O
629            let pid = unsafe { libc::fork() };
630            match pid {
631                -1 => break,
632                0 => {
633                    setup_child(child_seed, split_call_count, stats_ptr);
634                    return;
635                }
636                child_pid => {
637                    let mut status: libc::c_int = 0;
638                    // Safety: child_pid is valid
639                    unsafe {
640                        libc::waitpid(child_pid, &mut status, 0);
641                    }
642
643                    if !bm_ptr.is_null() && !vm_ptr.is_null() {
644                        let bm = unsafe { CoverageBitmap::new(bm_ptr) };
645                        let vm = unsafe { ExploredMap::new(vm_ptr) };
646                        vm.merge_from(&bm);
647                    }
648
649                    let exited_normally = libc::WIFEXITED(status);
650                    if exited_normally && libc::WEXITSTATUS(status) == 42 {
651                        // Safety: stats_ptr is valid
652                        unsafe {
653                            (*stats_ptr).bug_found.fetch_add(1, Ordering::Relaxed);
654                        }
655                        save_bug_recipe(split_call_count, child_seed);
656                    }
657
658                    // Safety: stats_ptr is valid
659                    unsafe {
660                        (*stats_ptr).fork_points.fetch_add(1, Ordering::Relaxed);
661                    }
662                }
663            }
664        }
665    }
666
667    // Drain remaining active children
668    while !active.is_empty() {
669        reap_one(
670            &mut active,
671            &mut free_slots,
672            pool_base,
673            vm_ptr,
674            stats_ptr,
675            split_call_count,
676            &mut batch_has_new,
677        );
678    }
679
680    if parallel {
681        COVERAGE_BITMAP_PTR.with(|c| c.set(bm_ptr));
682    } else if !bm_ptr.is_null() {
683        // Safety: bm_ptr points to COVERAGE_MAP_SIZE bytes
684        unsafe {
685            std::ptr::copy_nonoverlapping(parent_bitmap_backup.as_ptr(), bm_ptr, COVERAGE_MAP_SIZE);
686        }
687    }
688}
689
690/// No-op on non-unix platforms.
691#[cfg(not(unix))]
692pub fn split_on_discovery(_mark_name: &str) {}
693
694/// Save a bug recipe to shared memory.
695fn save_bug_recipe(split_call_count: u64, child_seed: u64) {
696    let recipe_ptr = SHARED_RECIPE.with(|c| c.get());
697    if recipe_ptr.is_null() {
698        return;
699    }
700
701    // Safety: recipe_ptr points to valid shared memory
702    unsafe {
703        let recipe = &mut *recipe_ptr;
704
705        // Only save the first bug recipe (CAS from 0 to 1)
706        if recipe
707            .claimed
708            .compare_exchange(0, 1, Ordering::Relaxed, Ordering::Relaxed)
709            .is_ok()
710        {
711            // Copy the current context's recipe plus this fork point
712            context::with_ctx(|ctx| {
713                let total_entries = ctx.recipe.len() + 1;
714                let len = total_entries.min(MAX_RECIPE_ENTRIES);
715
716                // Copy existing recipe entries
717                for (i, &entry) in ctx.recipe.iter().take(len - 1).enumerate() {
718                    recipe.entries[i] = entry;
719                }
720                // Add the current fork point
721                if len > 0 {
722                    recipe.entries[len - 1] = (split_call_count, child_seed);
723                }
724                recipe.len = len as u32;
725            });
726        }
727    }
728}
729
730/// Exit the current child process with the given code.
731///
732/// Calls `libc::_exit()` which skips atexit handlers and stdio flushing.
733/// This is appropriate for forked child processes.
734///
735/// # Safety
736///
737/// This function terminates the process immediately. Only call from a
738/// forked child process.
739#[cfg(unix)]
740pub fn exit_child(code: i32) -> ! {
741    // Safety: _exit is always safe to call; it terminates the process.
742    unsafe { libc::_exit(code) }
743}
744
745/// Panics on non-unix platforms (should never be called).
746#[cfg(not(unix))]
747pub fn exit_child(code: i32) -> ! {
748    std::process::exit(code)
749}
750
751#[cfg(test)]
752mod tests {
753    use super::*;
754
755    #[test]
756    fn test_compute_child_seed_deterministic() {
757        let s1 = compute_child_seed(42, "test", 0);
758        let s2 = compute_child_seed(42, "test", 0);
759        assert_eq!(s1, s2);
760    }
761
762    #[test]
763    fn test_compute_child_seed_varies_by_index() {
764        let s0 = compute_child_seed(42, "test", 0);
765        let s1 = compute_child_seed(42, "test", 1);
766        let s2 = compute_child_seed(42, "test", 2);
767        assert_ne!(s0, s1);
768        assert_ne!(s1, s2);
769        assert_ne!(s0, s2);
770    }
771
772    #[test]
773    fn test_compute_child_seed_varies_by_name() {
774        let s1 = compute_child_seed(42, "alpha", 0);
775        let s2 = compute_child_seed(42, "beta", 0);
776        assert_ne!(s1, s2);
777    }
778
779    #[test]
780    fn test_compute_child_seed_varies_by_parent() {
781        let s1 = compute_child_seed(1, "test", 0);
782        let s2 = compute_child_seed(2, "test", 0);
783        assert_ne!(s1, s2);
784    }
785}