Skip to main content

moonpool_explorer/
split_loop.rs

1//! Timeline splitting loop.
2//!
3//! When a new assertion success is discovered, [`split_on_discovery`] forks
4//! child processes with different seeds to explore alternate timelines from
5//! that splitpoint forward.
6//!
7//! # Process Model
8//!
9//! ```text
10//! Parent timeline (seed S0, depth D)
11//!   |-- Timeline 0 (seed S0', depth D+1) -> waitpid -> merge coverage
12//!   |-- Timeline 1 (seed S1', depth D+1) -> waitpid -> merge coverage
13//!   |-- ...
14//!   `-- Timeline N (seed SN', depth D+1) -> waitpid -> merge coverage
15//!   resume parent timeline
16//! ```
17//!
18//! Each child returns from this function and continues the simulation with
19//! reseeded randomness. The parent waits for each child sequentially.
20
21use std::sync::atomic::Ordering;
22
23#[cfg(unix)]
24use std::collections::HashMap;
25
26use crate::context::{
27    self, COVERAGE_BITMAP_PTR, ENERGY_BUDGET_PTR, EXPLORED_MAP_PTR, SHARED_RECIPE, SHARED_STATS,
28};
29#[cfg(unix)]
30use crate::context::{BITMAP_POOL, BITMAP_POOL_SLOTS};
31use crate::coverage::{COVERAGE_MAP_SIZE, CoverageBitmap, ExploredMap};
32use crate::shared_stats::MAX_RECIPE_ENTRIES;
33
34/// Compute a child seed by mixing the parent seed, assertion name, and child index.
35///
36/// Uses FNV-1a mixing to produce well-distributed seeds.
37fn compute_child_seed(parent_seed: u64, mark_name: &str, child_idx: u32) -> u64 {
38    let mut hash: u64 = 0xcbf29ce484222325;
39    for &byte in mark_name.as_bytes() {
40        hash ^= byte as u64;
41        hash = hash.wrapping_mul(0x100000001b3);
42    }
43    hash ^= parent_seed;
44    hash = hash.wrapping_mul(0x100000001b3);
45    hash ^= child_idx as u64;
46    hash = hash.wrapping_mul(0x100000001b3);
47    hash
48}
49
50/// Controls how many children can run in parallel during splitting.
51///
52/// When set on [`crate::ExplorationConfig::parallelism`], the fork loop
53/// uses a sliding window of this many concurrent children instead of the
54/// default sequential fork→wait→fork→wait cycle.
55#[derive(Debug, Clone)]
56pub enum Parallelism {
57    /// Use all available CPU cores (`sysconf(_SC_NPROCESSORS_ONLN)`).
58    MaxCores,
59    /// Use half the available CPU cores.
60    HalfCores,
61    /// Use exactly this many concurrent children.
62    Cores(usize),
63    /// Use all available cores minus `n` (e.g., leave 1 for the OS).
64    MaxCoresMinus(usize),
65}
66
67/// Resolve a [`Parallelism`] value to a concrete slot count (≥ 1).
68#[cfg(unix)]
69fn resolve_parallelism(p: &Parallelism) -> usize {
70    // Safety: sysconf reads a system configuration value and does not
71    // dereference any pointers. It is always safe to call.
72    let ncpus = unsafe { libc::sysconf(libc::_SC_NPROCESSORS_ONLN) };
73    let ncpus = if ncpus > 0 { ncpus as usize } else { 1 };
74    let n = match p {
75        Parallelism::MaxCores => ncpus,
76        Parallelism::HalfCores => ncpus / 2,
77        Parallelism::Cores(c) => *c,
78        Parallelism::MaxCoresMinus(minus) => ncpus.saturating_sub(*minus),
79    };
80    n.max(1) // always at least 1
81}
82
83/// Get or initialize the per-process bitmap pool in shared memory.
84///
85/// Returns the pool base pointer, or null if allocation fails.
86/// Each forked child resets this to null so it allocates its own pool
87/// if it becomes a parent (avoids sharing pool slots with siblings).
88#[cfg(unix)]
89fn get_or_init_pool(slot_count: usize) -> *mut u8 {
90    let existing = BITMAP_POOL.with(|c| c.get());
91    let existing_slots = BITMAP_POOL_SLOTS.with(|c| c.get());
92
93    if !existing.is_null() && existing_slots >= slot_count {
94        return existing;
95    }
96
97    // Free old pool if it exists but is too small
98    if !existing.is_null() {
99        // Safety: ptr was returned by alloc_shared with existing_slots * COVERAGE_MAP_SIZE
100        unsafe {
101            crate::shared_mem::free_shared(existing, existing_slots * COVERAGE_MAP_SIZE);
102        }
103        BITMAP_POOL.with(|c| c.set(std::ptr::null_mut()));
104        BITMAP_POOL_SLOTS.with(|c| c.set(0));
105    }
106
107    match crate::shared_mem::alloc_shared(slot_count * COVERAGE_MAP_SIZE) {
108        Ok(ptr) => {
109            BITMAP_POOL.with(|c| c.set(ptr));
110            BITMAP_POOL_SLOTS.with(|c| c.set(slot_count));
111            ptr
112        }
113        Err(_) => std::ptr::null_mut(),
114    }
115}
116
117/// Return the pointer to slot `idx` within a bitmap pool.
118#[cfg(unix)]
119fn pool_slot(pool_base: *mut u8, idx: usize) -> *mut u8 {
120    // Safety: caller ensures idx < slot_count and pool_base is valid
121    unsafe { pool_base.add(idx * COVERAGE_MAP_SIZE) }
122}
123
124/// Common child-process setup after fork: reseed RNG, update context, bump counter.
125///
126/// Also resets the bitmap pool pointer so nested splits allocate a fresh pool.
127#[cfg(unix)]
128fn setup_child(
129    child_seed: u64,
130    split_call_count: u64,
131    stats_ptr: *mut crate::shared_stats::SharedStats,
132) {
133    context::rng_reseed(child_seed);
134    context::with_ctx_mut(|ctx| {
135        ctx.is_child = true;
136        ctx.depth += 1;
137        ctx.current_seed = child_seed;
138        ctx.recipe.push((split_call_count, child_seed));
139    });
140    if !stats_ptr.is_null() {
141        // Safety: stats_ptr points to valid shared memory
142        unsafe {
143            (*stats_ptr).total_timelines.fetch_add(1, Ordering::Relaxed);
144        }
145    }
146    // Reset bitmap pool so nested splits allocate a fresh pool
147    BITMAP_POOL.with(|c| c.set(std::ptr::null_mut()));
148    BITMAP_POOL_SLOTS.with(|c| c.set(0));
149
150    // Zero BSS counters so child captures only its OWN sancov edges
151    crate::sancov::reset_bss_counters();
152    // Reset sancov pool so nested splits allocate a fresh pool
153    crate::sancov::SANCOV_POOL.with(|c| c.set(std::ptr::null_mut()));
154    crate::sancov::SANCOV_POOL_SLOTS.with(|c| c.set(0));
155}
156
157/// Reap one finished child via `waitpid(-1)`, merge its coverage, check for bugs.
158///
159/// Removes the reaped PID from `active`, pushes its slot back to `free_slots`,
160/// and sets `batch_has_new` if the child contributed new coverage bits.
161#[cfg(unix)]
162#[allow(clippy::too_many_arguments)]
163fn reap_one(
164    active: &mut HashMap<libc::pid_t, (u64, usize)>,
165    free_slots: &mut Vec<usize>,
166    pool_base: *mut u8,
167    sancov_pool_base: *mut u8,
168    vm_ptr: *mut u8,
169    stats_ptr: *mut crate::shared_stats::SharedStats,
170    split_call_count: u64,
171    batch_has_new: &mut bool,
172) {
173    let mut status: libc::c_int = 0;
174    // Safety: waitpid(-1) waits for any child of this process
175    let finished_pid = unsafe { libc::waitpid(-1, &mut status, 0) };
176    if finished_pid <= 0 {
177        return;
178    }
179
180    let Some((child_seed, slot)) = active.remove(&finished_pid) else {
181        return;
182    };
183
184    // Merge child's coverage bitmap into explored map
185    if !vm_ptr.is_null() {
186        // Safety: pool_base + slot offset is valid shared memory
187        let child_bm = unsafe { CoverageBitmap::new(pool_slot(pool_base, slot)) };
188        let vm = unsafe { ExploredMap::new(vm_ptr) };
189        if vm.has_new_bits(&child_bm) {
190            *batch_has_new = true;
191        }
192        vm.merge_from(&child_bm);
193    }
194
195    // Check child's sancov coverage from its pool slot
196    if !sancov_pool_base.is_null() {
197        let sancov_slot = unsafe { crate::sancov::sancov_pool_slot(sancov_pool_base, slot) };
198        if crate::sancov::has_new_sancov_coverage_from(sancov_slot) {
199            *batch_has_new = true;
200        }
201    }
202
203    // Check if child found a bug (exit code 42)
204    let exited_normally = libc::WIFEXITED(status);
205    if exited_normally && libc::WEXITSTATUS(status) == 42 {
206        if !stats_ptr.is_null() {
207            // Safety: stats_ptr is valid shared memory
208            unsafe {
209                (*stats_ptr).bug_found.fetch_add(1, Ordering::Relaxed);
210            }
211        }
212        save_bug_recipe(split_call_count, child_seed);
213    }
214
215    if !stats_ptr.is_null() {
216        // Safety: stats_ptr is valid shared memory
217        unsafe {
218            (*stats_ptr).fork_points.fetch_add(1, Ordering::Relaxed);
219        }
220    }
221
222    free_slots.push(slot);
223}
224
225/// Configuration for adaptive batch-based timeline splitting.
226///
227/// Instead of spawning a fixed number of timelines, the adaptive loop
228/// spawns in batches and checks coverage yield between batches. Productive
229/// marks (that find new coverage) get more timelines; barren marks stop
230/// early and return their energy to the reallocation pool.
231#[derive(Debug, Clone)]
232pub struct AdaptiveConfig {
233    /// Number of children to fork per batch before checking coverage yield.
234    pub batch_size: u32,
235    /// Minimum total forks for a mark (even if barren after first batch).
236    pub min_timelines: u32,
237    /// Maximum total forks for a mark (hard cap).
238    pub max_timelines: u32,
239    /// Initial per-mark energy budget.
240    pub per_mark_energy: i64,
241    /// Minimum timelines for marks during warm starts (explored map has prior
242    /// coverage from previous seeds). Defaults to `batch_size` if `None`.
243    pub warm_min_timelines: Option<u32>,
244}
245
246/// Dispatch to either adaptive or fixed-count splitting based on config.
247///
248/// If an energy budget is configured (adaptive mode), uses coverage-yield-driven
249/// batching. Otherwise falls back to the fixed `timelines_per_split` behavior.
250#[cfg(unix)]
251pub(crate) fn dispatch_split(mark_name: &str, slot_idx: usize) {
252    let has_adaptive = ENERGY_BUDGET_PTR.with(|c| !c.get().is_null());
253    if has_adaptive {
254        adaptive_split_on_discovery(mark_name, slot_idx);
255    } else {
256        split_on_discovery(mark_name);
257    }
258}
259
260/// No-op on non-unix platforms.
261#[cfg(not(unix))]
262pub(crate) fn dispatch_split(_mark_name: &str, _slot_idx: usize) {}
263
264/// Adaptive split: spawn timelines in batches, check coverage yield, stop when barren.
265///
266/// When parallelism is configured, uses a sliding window of concurrent children
267/// capped at the resolved slot count. Otherwise falls back to sequential forking.
268#[cfg(unix)]
269fn adaptive_split_on_discovery(mark_name: &str, slot_idx: usize) {
270    // Read context for guard checks
271    let (ctx_active, depth, max_depth, current_seed) =
272        context::with_ctx(|ctx| (ctx.active, ctx.depth, ctx.max_depth, ctx.current_seed));
273
274    if !ctx_active || depth >= max_depth {
275        return;
276    }
277
278    let budget_ptr = ENERGY_BUDGET_PTR.with(|c| c.get());
279    if budget_ptr.is_null() {
280        return;
281    }
282
283    // Initialize per-mark budget on first use
284    // Safety: budget_ptr is valid shared memory
285    unsafe {
286        crate::energy::init_mark_budget(budget_ptr, slot_idx);
287    }
288
289    let split_call_count = context::rng_get_count();
290
291    let bm_ptr = COVERAGE_BITMAP_PTR.with(|c| c.get());
292    let vm_ptr = EXPLORED_MAP_PTR.with(|c| c.get());
293    let stats_ptr = SHARED_STATS.with(|c| c.get());
294
295    let (batch_size, min_timelines, max_timelines) = context::with_ctx(|ctx| {
296        ctx.adaptive
297            .as_ref()
298            .map(|a| (a.batch_size, a.min_timelines, a.max_timelines))
299            .unwrap_or((4, 1, 16))
300    });
301
302    // Warm start: when explored map has prior coverage from previous seeds,
303    // barren marks stop after fewer timelines since they're re-treading
304    // already-discovered paths.
305    let effective_min_timelines = {
306        let (is_warm, warm_min) = context::with_ctx(|ctx| {
307            let wm = ctx
308                .adaptive
309                .as_ref()
310                .and_then(|a| a.warm_min_timelines)
311                .unwrap_or(batch_size);
312            (ctx.warm_start, wm)
313        });
314        if is_warm { warm_min } else { min_timelines }
315    };
316
317    // Check parallelism
318    let parallelism = context::with_ctx(|ctx| ctx.parallelism.clone());
319    let (slot_count, pool_base) = if let Some(ref p) = parallelism {
320        let sc = resolve_parallelism(p);
321        let pb = get_or_init_pool(sc);
322        if pb.is_null() {
323            (0, std::ptr::null_mut())
324        } else {
325            (sc, pb)
326        }
327    } else {
328        (0, std::ptr::null_mut())
329    };
330    let parallel = slot_count > 0;
331
332    // Sancov parallel pool (one slot per concurrent child for sancov counters)
333    let sancov_pool_base = if parallel {
334        crate::sancov::get_or_init_sancov_pool(slot_count)
335    } else {
336        std::ptr::null_mut()
337    };
338    let parent_sancov_transfer = if parallel && !sancov_pool_base.is_null() {
339        crate::sancov::SANCOV_TRANSFER.with(|c| c.get())
340    } else {
341        std::ptr::null_mut()
342    };
343
344    // Save parent bitmap (sequential only — parallel children use pool slots)
345    let mut parent_bitmap_backup = [0u8; COVERAGE_MAP_SIZE];
346    if !parallel && !bm_ptr.is_null() {
347        // Safety: bm_ptr points to COVERAGE_MAP_SIZE bytes
348        unsafe {
349            std::ptr::copy_nonoverlapping(
350                bm_ptr,
351                parent_bitmap_backup.as_mut_ptr(),
352                COVERAGE_MAP_SIZE,
353            );
354        }
355    }
356
357    let mut timelines_spawned: u32 = 0;
358
359    // Parallel state (only used when parallel == true)
360    let mut active: HashMap<libc::pid_t, (u64, usize)> = HashMap::new();
361    let mut free_slots: Vec<usize> = if parallel {
362        (0..slot_count).collect()
363    } else {
364        Vec::new()
365    };
366
367    // Batch loop
368    loop {
369        let mut batch_has_new = false;
370        let batch_start = timelines_spawned;
371
372        while timelines_spawned - batch_start < batch_size {
373            if timelines_spawned >= max_timelines {
374                break;
375            }
376
377            // Safety: budget_ptr is valid
378            if !unsafe { crate::energy::decrement_mark_energy(budget_ptr, slot_idx) } {
379                break;
380            }
381
382            let child_seed = compute_child_seed(current_seed, mark_name, timelines_spawned);
383            timelines_spawned += 1;
384
385            if parallel {
386                // Back-pressure: reap a finished child if all slots are busy
387                while free_slots.is_empty() {
388                    reap_one(
389                        &mut active,
390                        &mut free_slots,
391                        pool_base,
392                        sancov_pool_base,
393                        vm_ptr,
394                        stats_ptr,
395                        split_call_count,
396                        &mut batch_has_new,
397                    );
398                }
399                let slot = match free_slots.pop() {
400                    Some(s) => s,
401                    None => break,
402                };
403
404                // Clear child bitmap slot
405                // Safety: pool_base + slot offset is valid shared memory
406                unsafe {
407                    std::ptr::write_bytes(pool_slot(pool_base, slot), 0, COVERAGE_MAP_SIZE);
408                    COVERAGE_BITMAP_PTR.with(|c| c.set(pool_slot(pool_base, slot)));
409                }
410
411                // Clear child sancov slot and redirect transfer pointer
412                if !sancov_pool_base.is_null() {
413                    let sancov_len = crate::sancov::sancov_edge_count();
414                    unsafe {
415                        let sancov_slot = crate::sancov::sancov_pool_slot(sancov_pool_base, slot);
416                        std::ptr::write_bytes(sancov_slot, 0, sancov_len);
417                        crate::sancov::SANCOV_TRANSFER.with(|c| c.set(sancov_slot));
418                    }
419                }
420
421                // Safety: single-threaded, no real I/O
422                let pid = unsafe { libc::fork() };
423                match pid {
424                    -1 => {
425                        free_slots.push(slot);
426                        break;
427                    }
428                    0 => {
429                        setup_child(child_seed, split_call_count, stats_ptr);
430                        return;
431                    }
432                    child_pid => {
433                        active.insert(child_pid, (child_seed, slot));
434                    }
435                }
436            } else {
437                // Sequential path
438                if !bm_ptr.is_null() {
439                    let bm = unsafe { CoverageBitmap::new(bm_ptr) };
440                    bm.clear();
441                }
442                crate::sancov::clear_transfer_buffer();
443
444                // Safety: single-threaded, no real I/O
445                let pid = unsafe { libc::fork() };
446                match pid {
447                    -1 => break,
448                    0 => {
449                        setup_child(child_seed, split_call_count, stats_ptr);
450                        return;
451                    }
452                    child_pid => {
453                        let mut status: libc::c_int = 0;
454                        // Safety: child_pid is valid
455                        unsafe {
456                            libc::waitpid(child_pid, &mut status, 0);
457                        }
458
459                        if !bm_ptr.is_null() && !vm_ptr.is_null() {
460                            let bm = unsafe { CoverageBitmap::new(bm_ptr) };
461                            let vm = unsafe { ExploredMap::new(vm_ptr) };
462                            if vm.has_new_bits(&bm) {
463                                batch_has_new = true;
464                            }
465                            vm.merge_from(&bm);
466                        }
467                        batch_has_new |= crate::sancov::has_new_sancov_coverage();
468
469                        let exited_normally = libc::WIFEXITED(status);
470                        if exited_normally && libc::WEXITSTATUS(status) == 42 {
471                            if !stats_ptr.is_null() {
472                                // Safety: stats_ptr is valid
473                                unsafe {
474                                    (*stats_ptr).bug_found.fetch_add(1, Ordering::Relaxed);
475                                }
476                            }
477                            save_bug_recipe(split_call_count, child_seed);
478                        }
479
480                        if !stats_ptr.is_null() {
481                            // Safety: stats_ptr is valid
482                            unsafe {
483                                (*stats_ptr).fork_points.fetch_add(1, Ordering::Relaxed);
484                            }
485                        }
486                    }
487                }
488            }
489        }
490
491        // Drain remaining active children before checking batch yield
492        while !active.is_empty() {
493            reap_one(
494                &mut active,
495                &mut free_slots,
496                pool_base,
497                sancov_pool_base,
498                vm_ptr,
499                stats_ptr,
500                split_call_count,
501                &mut batch_has_new,
502            );
503        }
504
505        // Batch complete — decide whether to continue
506        if timelines_spawned >= max_timelines {
507            break;
508        }
509        if !batch_has_new && timelines_spawned >= effective_min_timelines {
510            // Barren — return remaining energy to pool
511            // Safety: budget_ptr is valid
512            unsafe {
513                crate::energy::return_mark_energy_to_pool(budget_ptr, slot_idx);
514            }
515            break;
516        }
517        // Check if we ran out of energy mid-batch
518        if timelines_spawned - batch_start < batch_size && timelines_spawned < max_timelines {
519            break;
520        }
521    }
522
523    if parallel {
524        // Restore parent's bitmap pointer
525        COVERAGE_BITMAP_PTR.with(|c| c.set(bm_ptr));
526        // Restore parent's sancov transfer pointer
527        if !sancov_pool_base.is_null() {
528            crate::sancov::SANCOV_TRANSFER.with(|c| c.set(parent_sancov_transfer));
529        }
530    } else {
531        // Restore parent bitmap content
532        if !bm_ptr.is_null() {
533            // Safety: bm_ptr points to COVERAGE_MAP_SIZE bytes
534            unsafe {
535                std::ptr::copy_nonoverlapping(
536                    parent_bitmap_backup.as_ptr(),
537                    bm_ptr,
538                    COVERAGE_MAP_SIZE,
539                );
540            }
541        }
542    }
543}
544
545/// Split the simulation timeline at a discovery point.
546///
547/// Called when an assertion detects a new success (e.g. via `assertion_bool`
548/// or `assertion_numeric`). Spawns `timelines_per_split` child timelines,
549/// each with a different seed derived from the current seed and the mark name.
550///
551/// When parallelism is configured, uses a sliding window of concurrent children.
552/// Otherwise falls back to sequential fork→wait→fork→wait.
553#[cfg(unix)]
554pub fn split_on_discovery(mark_name: &str) {
555    let (ctx_active, depth, max_depth, timelines_per_split, current_seed) =
556        context::with_ctx(|ctx| {
557            (
558                ctx.active,
559                ctx.depth,
560                ctx.max_depth,
561                ctx.timelines_per_split,
562                ctx.current_seed,
563            )
564        });
565
566    if !ctx_active || depth >= max_depth {
567        return;
568    }
569
570    let stats_ptr = SHARED_STATS.with(|c| c.get());
571    if stats_ptr.is_null() {
572        return;
573    }
574    // Safety: stats_ptr set during init, points to valid shared stats
575    if !unsafe { crate::shared_stats::decrement_energy(stats_ptr) } {
576        return;
577    }
578
579    let split_call_count = context::rng_get_count();
580    let bm_ptr = COVERAGE_BITMAP_PTR.with(|c| c.get());
581    let vm_ptr = EXPLORED_MAP_PTR.with(|c| c.get());
582
583    // Check parallelism
584    let parallelism = context::with_ctx(|ctx| ctx.parallelism.clone());
585    let (slot_count, pool_base) = if let Some(ref p) = parallelism {
586        let sc = resolve_parallelism(p);
587        let pb = get_or_init_pool(sc);
588        if pb.is_null() {
589            (0, std::ptr::null_mut())
590        } else {
591            (sc, pb)
592        }
593    } else {
594        (0, std::ptr::null_mut())
595    };
596    let parallel = slot_count > 0;
597
598    // Sancov parallel pool (one slot per concurrent child for sancov counters)
599    let sancov_pool_base = if parallel {
600        crate::sancov::get_or_init_sancov_pool(slot_count)
601    } else {
602        std::ptr::null_mut()
603    };
604    let parent_sancov_transfer = if parallel && !sancov_pool_base.is_null() {
605        crate::sancov::SANCOV_TRANSFER.with(|c| c.get())
606    } else {
607        std::ptr::null_mut()
608    };
609
610    // Save parent bitmap (sequential only)
611    let mut parent_bitmap_backup = [0u8; COVERAGE_MAP_SIZE];
612    if !parallel && !bm_ptr.is_null() {
613        // Safety: bm_ptr points to COVERAGE_MAP_SIZE bytes
614        unsafe {
615            std::ptr::copy_nonoverlapping(
616                bm_ptr,
617                parent_bitmap_backup.as_mut_ptr(),
618                COVERAGE_MAP_SIZE,
619            );
620        }
621    }
622
623    // Parallel state
624    let mut active: HashMap<libc::pid_t, (u64, usize)> = HashMap::new();
625    let mut free_slots: Vec<usize> = if parallel {
626        (0..slot_count).collect()
627    } else {
628        Vec::new()
629    };
630    let mut batch_has_new = false;
631
632    for child_idx in 0..timelines_per_split {
633        if child_idx > 0 {
634            // Safety: stats_ptr is valid
635            if !unsafe { crate::shared_stats::decrement_energy(stats_ptr) } {
636                break;
637            }
638        }
639
640        let child_seed = compute_child_seed(current_seed, mark_name, child_idx);
641
642        if parallel {
643            // Back-pressure: reap if all slots busy
644            while free_slots.is_empty() {
645                reap_one(
646                    &mut active,
647                    &mut free_slots,
648                    pool_base,
649                    sancov_pool_base,
650                    vm_ptr,
651                    stats_ptr,
652                    split_call_count,
653                    &mut batch_has_new,
654                );
655            }
656            let slot = match free_slots.pop() {
657                Some(s) => s,
658                None => break,
659            };
660
661            // Safety: pool slot is valid shared memory
662            unsafe {
663                std::ptr::write_bytes(pool_slot(pool_base, slot), 0, COVERAGE_MAP_SIZE);
664                COVERAGE_BITMAP_PTR.with(|c| c.set(pool_slot(pool_base, slot)));
665            }
666
667            // Clear child sancov slot and redirect transfer pointer
668            if !sancov_pool_base.is_null() {
669                let sancov_len = crate::sancov::sancov_edge_count();
670                unsafe {
671                    let sancov_slot = crate::sancov::sancov_pool_slot(sancov_pool_base, slot);
672                    std::ptr::write_bytes(sancov_slot, 0, sancov_len);
673                    crate::sancov::SANCOV_TRANSFER.with(|c| c.set(sancov_slot));
674                }
675            }
676
677            // Safety: single-threaded, no real I/O
678            let pid = unsafe { libc::fork() };
679            match pid {
680                -1 => {
681                    free_slots.push(slot);
682                    break;
683                }
684                0 => {
685                    setup_child(child_seed, split_call_count, stats_ptr);
686                    return;
687                }
688                child_pid => {
689                    active.insert(child_pid, (child_seed, slot));
690                }
691            }
692        } else {
693            // Sequential path
694            if !bm_ptr.is_null() {
695                let bm = unsafe { CoverageBitmap::new(bm_ptr) };
696                bm.clear();
697            }
698            crate::sancov::clear_transfer_buffer();
699
700            // Safety: single-threaded, no real I/O
701            let pid = unsafe { libc::fork() };
702            match pid {
703                -1 => break,
704                0 => {
705                    setup_child(child_seed, split_call_count, stats_ptr);
706                    return;
707                }
708                child_pid => {
709                    let mut status: libc::c_int = 0;
710                    // Safety: child_pid is valid
711                    unsafe {
712                        libc::waitpid(child_pid, &mut status, 0);
713                    }
714
715                    if !bm_ptr.is_null() && !vm_ptr.is_null() {
716                        let bm = unsafe { CoverageBitmap::new(bm_ptr) };
717                        let vm = unsafe { ExploredMap::new(vm_ptr) };
718                        vm.merge_from(&bm);
719                    }
720                    batch_has_new |= crate::sancov::has_new_sancov_coverage();
721
722                    let exited_normally = libc::WIFEXITED(status);
723                    if exited_normally && libc::WEXITSTATUS(status) == 42 {
724                        // Safety: stats_ptr is valid
725                        unsafe {
726                            (*stats_ptr).bug_found.fetch_add(1, Ordering::Relaxed);
727                        }
728                        save_bug_recipe(split_call_count, child_seed);
729                    }
730
731                    // Safety: stats_ptr is valid
732                    unsafe {
733                        (*stats_ptr).fork_points.fetch_add(1, Ordering::Relaxed);
734                    }
735                }
736            }
737        }
738    }
739
740    // Drain remaining active children
741    while !active.is_empty() {
742        reap_one(
743            &mut active,
744            &mut free_slots,
745            pool_base,
746            sancov_pool_base,
747            vm_ptr,
748            stats_ptr,
749            split_call_count,
750            &mut batch_has_new,
751        );
752    }
753
754    if parallel {
755        COVERAGE_BITMAP_PTR.with(|c| c.set(bm_ptr));
756        // Restore parent's sancov transfer pointer
757        if !sancov_pool_base.is_null() {
758            crate::sancov::SANCOV_TRANSFER.with(|c| c.set(parent_sancov_transfer));
759        }
760    } else if !bm_ptr.is_null() {
761        // Safety: bm_ptr points to COVERAGE_MAP_SIZE bytes
762        unsafe {
763            std::ptr::copy_nonoverlapping(parent_bitmap_backup.as_ptr(), bm_ptr, COVERAGE_MAP_SIZE);
764        }
765    }
766}
767
768/// No-op on non-unix platforms.
769#[cfg(not(unix))]
770pub fn split_on_discovery(_mark_name: &str) {}
771
772/// Save a bug recipe to shared memory.
773fn save_bug_recipe(split_call_count: u64, child_seed: u64) {
774    let recipe_ptr = SHARED_RECIPE.with(|c| c.get());
775    if recipe_ptr.is_null() {
776        return;
777    }
778
779    // Safety: recipe_ptr points to valid shared memory
780    unsafe {
781        let recipe = &mut *recipe_ptr;
782
783        // Only save the first bug recipe (CAS from 0 to 1)
784        if recipe
785            .claimed
786            .compare_exchange(0, 1, Ordering::Relaxed, Ordering::Relaxed)
787            .is_ok()
788        {
789            // Copy the current context's recipe plus this fork point
790            context::with_ctx(|ctx| {
791                let total_entries = ctx.recipe.len() + 1;
792                let len = total_entries.min(MAX_RECIPE_ENTRIES);
793
794                // Copy existing recipe entries
795                for (i, &entry) in ctx.recipe.iter().take(len - 1).enumerate() {
796                    recipe.entries[i] = entry;
797                }
798                // Add the current fork point
799                if len > 0 {
800                    recipe.entries[len - 1] = (split_call_count, child_seed);
801                }
802                recipe.len = len as u32;
803            });
804        }
805    }
806}
807
808/// Exit the current child process with the given code.
809///
810/// Calls `libc::_exit()` which skips atexit handlers and stdio flushing.
811/// This is appropriate for forked child processes.
812///
813/// # Safety
814///
815/// This function terminates the process immediately. Only call from a
816/// forked child process.
817#[cfg(unix)]
818pub fn exit_child(code: i32) -> ! {
819    crate::sancov::copy_counters_to_shared();
820    // Safety: _exit is always safe to call; it terminates the process.
821    unsafe { libc::_exit(code) }
822}
823
824/// Panics on non-unix platforms (should never be called).
825#[cfg(not(unix))]
826pub fn exit_child(code: i32) -> ! {
827    std::process::exit(code)
828}
829
830#[cfg(test)]
831mod tests {
832    use super::*;
833
834    #[test]
835    fn test_compute_child_seed_deterministic() {
836        let s1 = compute_child_seed(42, "test", 0);
837        let s2 = compute_child_seed(42, "test", 0);
838        assert_eq!(s1, s2);
839    }
840
841    #[test]
842    fn test_compute_child_seed_varies_by_index() {
843        let s0 = compute_child_seed(42, "test", 0);
844        let s1 = compute_child_seed(42, "test", 1);
845        let s2 = compute_child_seed(42, "test", 2);
846        assert_ne!(s0, s1);
847        assert_ne!(s1, s2);
848        assert_ne!(s0, s2);
849    }
850
851    #[test]
852    fn test_compute_child_seed_varies_by_name() {
853        let s1 = compute_child_seed(42, "alpha", 0);
854        let s2 = compute_child_seed(42, "beta", 0);
855        assert_ne!(s1, s2);
856    }
857
858    #[test]
859    fn test_compute_child_seed_varies_by_parent() {
860        let s1 = compute_child_seed(1, "test", 0);
861        let s2 = compute_child_seed(2, "test", 0);
862        assert_ne!(s1, s2);
863    }
864}