Skip to main content

moonpool_explorer/
split_loop.rs

1//! Timeline splitting loop.
2//!
3//! When a new assertion success is discovered, [`split_on_discovery`] forks
4//! child processes with different seeds to explore alternate timelines from
5//! that splitpoint forward.
6//!
7//! # Process Model
8//!
9//! ```text
10//! Parent timeline (seed S0, depth D)
11//!   |-- Timeline 0 (seed S0', depth D+1) -> waitpid -> merge coverage
12//!   |-- Timeline 1 (seed S1', depth D+1) -> waitpid -> merge coverage
13//!   |-- ...
14//!   `-- Timeline N (seed SN', depth D+1) -> waitpid -> merge coverage
15//!   resume parent timeline
16//! ```
17//!
18//! Each child returns from this function and continues the simulation with
19//! reseeded randomness. The parent waits for each child sequentially.
20
21use std::sync::atomic::Ordering;
22
23#[cfg(unix)]
24use std::collections::HashMap;
25
26use crate::context::{
27    self, COVERAGE_BITMAP_PTR, ENERGY_BUDGET_PTR, EXPLORED_MAP_PTR, SHARED_RECIPE, SHARED_STATS,
28};
29#[cfg(unix)]
30use crate::context::{BITMAP_POOL, BITMAP_POOL_SLOTS};
31use crate::coverage::{COVERAGE_MAP_SIZE, CoverageBitmap, ExploredMap};
32use crate::shared_stats::MAX_RECIPE_ENTRIES;
33
34/// Compute a child seed by mixing the parent seed, assertion name, and child index.
35///
36/// Uses FNV-1a mixing to produce well-distributed seeds.
37fn compute_child_seed(parent_seed: u64, mark_name: &str, child_idx: u32) -> u64 {
38    let mut hash: u64 = 0xcbf29ce484222325;
39    for &byte in mark_name.as_bytes() {
40        hash ^= byte as u64;
41        hash = hash.wrapping_mul(0x100000001b3);
42    }
43    hash ^= parent_seed;
44    hash = hash.wrapping_mul(0x100000001b3);
45    hash ^= child_idx as u64;
46    hash = hash.wrapping_mul(0x100000001b3);
47    hash
48}
49
50/// Controls how many children can run in parallel during splitting.
51///
52/// When set on [`crate::ExplorationConfig::parallelism`], the fork loop
53/// uses a sliding window of this many concurrent children instead of the
54/// default sequential fork→wait→fork→wait cycle.
55#[derive(Debug, Clone)]
56pub enum Parallelism {
57    /// Use all available CPU cores (`sysconf(_SC_NPROCESSORS_ONLN)`).
58    MaxCores,
59    /// Use half the available CPU cores.
60    HalfCores,
61    /// Use exactly this many concurrent children.
62    Cores(usize),
63    /// Use all available cores minus `n` (e.g., leave 1 for the OS).
64    MaxCoresMinus(usize),
65}
66
67/// Resolve a [`Parallelism`] value to a concrete slot count (≥ 1).
68#[cfg(unix)]
69fn resolve_parallelism(p: &Parallelism) -> usize {
70    let ncpus = unsafe { libc::sysconf(libc::_SC_NPROCESSORS_ONLN) };
71    let ncpus = if ncpus > 0 { ncpus as usize } else { 1 };
72    let n = match p {
73        Parallelism::MaxCores => ncpus,
74        Parallelism::HalfCores => ncpus / 2,
75        Parallelism::Cores(c) => *c,
76        Parallelism::MaxCoresMinus(minus) => ncpus.saturating_sub(*minus),
77    };
78    n.max(1) // always at least 1
79}
80
81/// Get or initialize the per-process bitmap pool in shared memory.
82///
83/// Returns the pool base pointer, or null if allocation fails.
84/// Each forked child resets this to null so it allocates its own pool
85/// if it becomes a parent (avoids sharing pool slots with siblings).
86#[cfg(unix)]
87fn get_or_init_pool(slot_count: usize) -> *mut u8 {
88    let existing = BITMAP_POOL.with(|c| c.get());
89    let existing_slots = BITMAP_POOL_SLOTS.with(|c| c.get());
90
91    if !existing.is_null() && existing_slots >= slot_count {
92        return existing;
93    }
94
95    // Free old pool if it exists but is too small
96    if !existing.is_null() {
97        // Safety: ptr was returned by alloc_shared with existing_slots * COVERAGE_MAP_SIZE
98        unsafe {
99            crate::shared_mem::free_shared(existing, existing_slots * COVERAGE_MAP_SIZE);
100        }
101        BITMAP_POOL.with(|c| c.set(std::ptr::null_mut()));
102        BITMAP_POOL_SLOTS.with(|c| c.set(0));
103    }
104
105    match crate::shared_mem::alloc_shared(slot_count * COVERAGE_MAP_SIZE) {
106        Ok(ptr) => {
107            BITMAP_POOL.with(|c| c.set(ptr));
108            BITMAP_POOL_SLOTS.with(|c| c.set(slot_count));
109            ptr
110        }
111        Err(_) => std::ptr::null_mut(),
112    }
113}
114
115/// Return the pointer to slot `idx` within a bitmap pool.
116#[cfg(unix)]
117fn pool_slot(pool_base: *mut u8, idx: usize) -> *mut u8 {
118    // Safety: caller ensures idx < slot_count and pool_base is valid
119    unsafe { pool_base.add(idx * COVERAGE_MAP_SIZE) }
120}
121
122/// Common child-process setup after fork: reseed RNG, update context, bump counter.
123///
124/// Also resets the bitmap pool pointer so nested splits allocate a fresh pool.
125#[cfg(unix)]
126fn setup_child(
127    child_seed: u64,
128    split_call_count: u64,
129    stats_ptr: *mut crate::shared_stats::SharedStats,
130) {
131    context::rng_reseed(child_seed);
132    context::with_ctx_mut(|ctx| {
133        ctx.is_child = true;
134        ctx.depth += 1;
135        ctx.current_seed = child_seed;
136        ctx.recipe.push((split_call_count, child_seed));
137    });
138    if !stats_ptr.is_null() {
139        // Safety: stats_ptr points to valid shared memory
140        unsafe {
141            (*stats_ptr).total_timelines.fetch_add(1, Ordering::Relaxed);
142        }
143    }
144    // Reset bitmap pool so nested splits allocate a fresh pool
145    BITMAP_POOL.with(|c| c.set(std::ptr::null_mut()));
146    BITMAP_POOL_SLOTS.with(|c| c.set(0));
147
148    // Zero BSS counters so child captures only its OWN sancov edges
149    crate::sancov::reset_bss_counters();
150    // Reset sancov pool so nested splits allocate a fresh pool
151    crate::sancov::SANCOV_POOL.with(|c| c.set(std::ptr::null_mut()));
152    crate::sancov::SANCOV_POOL_SLOTS.with(|c| c.set(0));
153}
154
155/// Reap one finished child via `waitpid(-1)`, merge its coverage, check for bugs.
156///
157/// Removes the reaped PID from `active`, pushes its slot back to `free_slots`,
158/// and sets `batch_has_new` if the child contributed new coverage bits.
159#[cfg(unix)]
160#[allow(clippy::too_many_arguments)]
161fn reap_one(
162    active: &mut HashMap<libc::pid_t, (u64, usize)>,
163    free_slots: &mut Vec<usize>,
164    pool_base: *mut u8,
165    sancov_pool_base: *mut u8,
166    vm_ptr: *mut u8,
167    stats_ptr: *mut crate::shared_stats::SharedStats,
168    split_call_count: u64,
169    batch_has_new: &mut bool,
170) {
171    let mut status: libc::c_int = 0;
172    // Safety: waitpid(-1) waits for any child of this process
173    let finished_pid = unsafe { libc::waitpid(-1, &mut status, 0) };
174    if finished_pid <= 0 {
175        return;
176    }
177
178    let Some((child_seed, slot)) = active.remove(&finished_pid) else {
179        return;
180    };
181
182    // Merge child's coverage bitmap into explored map
183    if !vm_ptr.is_null() {
184        // Safety: pool_base + slot offset is valid shared memory
185        let child_bm = unsafe { CoverageBitmap::new(pool_slot(pool_base, slot)) };
186        let vm = unsafe { ExploredMap::new(vm_ptr) };
187        if vm.has_new_bits(&child_bm) {
188            *batch_has_new = true;
189        }
190        vm.merge_from(&child_bm);
191    }
192
193    // Check child's sancov coverage from its pool slot
194    if !sancov_pool_base.is_null() {
195        let sancov_slot = unsafe { crate::sancov::sancov_pool_slot(sancov_pool_base, slot) };
196        if crate::sancov::has_new_sancov_coverage_from(sancov_slot) {
197            *batch_has_new = true;
198        }
199    }
200
201    // Check if child found a bug (exit code 42)
202    let exited_normally = libc::WIFEXITED(status);
203    if exited_normally && libc::WEXITSTATUS(status) == 42 {
204        if !stats_ptr.is_null() {
205            // Safety: stats_ptr is valid shared memory
206            unsafe {
207                (*stats_ptr).bug_found.fetch_add(1, Ordering::Relaxed);
208            }
209        }
210        save_bug_recipe(split_call_count, child_seed);
211    }
212
213    if !stats_ptr.is_null() {
214        // Safety: stats_ptr is valid shared memory
215        unsafe {
216            (*stats_ptr).fork_points.fetch_add(1, Ordering::Relaxed);
217        }
218    }
219
220    free_slots.push(slot);
221}
222
223/// Configuration for adaptive batch-based timeline splitting.
224///
225/// Instead of spawning a fixed number of timelines, the adaptive loop
226/// spawns in batches and checks coverage yield between batches. Productive
227/// marks (that find new coverage) get more timelines; barren marks stop
228/// early and return their energy to the reallocation pool.
229#[derive(Debug, Clone)]
230pub struct AdaptiveConfig {
231    /// Number of children to fork per batch before checking coverage yield.
232    pub batch_size: u32,
233    /// Minimum total forks for a mark (even if barren after first batch).
234    pub min_timelines: u32,
235    /// Maximum total forks for a mark (hard cap).
236    pub max_timelines: u32,
237    /// Initial per-mark energy budget.
238    pub per_mark_energy: i64,
239    /// Minimum timelines for marks during warm starts (explored map has prior
240    /// coverage from previous seeds). Defaults to `batch_size` if `None`.
241    pub warm_min_timelines: Option<u32>,
242}
243
244/// Dispatch to either adaptive or fixed-count splitting based on config.
245///
246/// If an energy budget is configured (adaptive mode), uses coverage-yield-driven
247/// batching. Otherwise falls back to the fixed `timelines_per_split` behavior.
248#[cfg(unix)]
249pub(crate) fn dispatch_split(mark_name: &str, slot_idx: usize) {
250    let has_adaptive = ENERGY_BUDGET_PTR.with(|c| !c.get().is_null());
251    if has_adaptive {
252        adaptive_split_on_discovery(mark_name, slot_idx);
253    } else {
254        split_on_discovery(mark_name);
255    }
256}
257
258/// No-op on non-unix platforms.
259#[cfg(not(unix))]
260pub(crate) fn dispatch_split(_mark_name: &str, _slot_idx: usize) {}
261
262/// Adaptive split: spawn timelines in batches, check coverage yield, stop when barren.
263///
264/// When parallelism is configured, uses a sliding window of concurrent children
265/// capped at the resolved slot count. Otherwise falls back to sequential forking.
266#[cfg(unix)]
267fn adaptive_split_on_discovery(mark_name: &str, slot_idx: usize) {
268    // Read context for guard checks
269    let (ctx_active, depth, max_depth, current_seed) =
270        context::with_ctx(|ctx| (ctx.active, ctx.depth, ctx.max_depth, ctx.current_seed));
271
272    if !ctx_active || depth >= max_depth {
273        return;
274    }
275
276    let budget_ptr = ENERGY_BUDGET_PTR.with(|c| c.get());
277    if budget_ptr.is_null() {
278        return;
279    }
280
281    // Initialize per-mark budget on first use
282    // Safety: budget_ptr is valid shared memory
283    unsafe {
284        crate::energy::init_mark_budget(budget_ptr, slot_idx);
285    }
286
287    let split_call_count = context::rng_get_count();
288
289    let bm_ptr = COVERAGE_BITMAP_PTR.with(|c| c.get());
290    let vm_ptr = EXPLORED_MAP_PTR.with(|c| c.get());
291    let stats_ptr = SHARED_STATS.with(|c| c.get());
292
293    let (batch_size, min_timelines, max_timelines) = context::with_ctx(|ctx| {
294        ctx.adaptive
295            .as_ref()
296            .map(|a| (a.batch_size, a.min_timelines, a.max_timelines))
297            .unwrap_or((4, 1, 16))
298    });
299
300    // Warm start: when explored map has prior coverage from previous seeds,
301    // barren marks stop after fewer timelines since they're re-treading
302    // already-discovered paths.
303    let effective_min_timelines = {
304        let (is_warm, warm_min) = context::with_ctx(|ctx| {
305            let wm = ctx
306                .adaptive
307                .as_ref()
308                .and_then(|a| a.warm_min_timelines)
309                .unwrap_or(batch_size);
310            (ctx.warm_start, wm)
311        });
312        if is_warm { warm_min } else { min_timelines }
313    };
314
315    // Check parallelism
316    let parallelism = context::with_ctx(|ctx| ctx.parallelism.clone());
317    let (slot_count, pool_base) = if let Some(ref p) = parallelism {
318        let sc = resolve_parallelism(p);
319        let pb = get_or_init_pool(sc);
320        if pb.is_null() {
321            (0, std::ptr::null_mut())
322        } else {
323            (sc, pb)
324        }
325    } else {
326        (0, std::ptr::null_mut())
327    };
328    let parallel = slot_count > 0;
329
330    // Sancov parallel pool (one slot per concurrent child for sancov counters)
331    let sancov_pool_base = if parallel {
332        crate::sancov::get_or_init_sancov_pool(slot_count)
333    } else {
334        std::ptr::null_mut()
335    };
336    let parent_sancov_transfer = if parallel && !sancov_pool_base.is_null() {
337        crate::sancov::SANCOV_TRANSFER.with(|c| c.get())
338    } else {
339        std::ptr::null_mut()
340    };
341
342    // Save parent bitmap (sequential only — parallel children use pool slots)
343    let mut parent_bitmap_backup = [0u8; COVERAGE_MAP_SIZE];
344    if !parallel && !bm_ptr.is_null() {
345        // Safety: bm_ptr points to COVERAGE_MAP_SIZE bytes
346        unsafe {
347            std::ptr::copy_nonoverlapping(
348                bm_ptr,
349                parent_bitmap_backup.as_mut_ptr(),
350                COVERAGE_MAP_SIZE,
351            );
352        }
353    }
354
355    let mut timelines_spawned: u32 = 0;
356
357    // Parallel state (only used when parallel == true)
358    let mut active: HashMap<libc::pid_t, (u64, usize)> = HashMap::new();
359    let mut free_slots: Vec<usize> = if parallel {
360        (0..slot_count).collect()
361    } else {
362        Vec::new()
363    };
364
365    // Batch loop
366    loop {
367        let mut batch_has_new = false;
368        let batch_start = timelines_spawned;
369
370        while timelines_spawned - batch_start < batch_size {
371            if timelines_spawned >= max_timelines {
372                break;
373            }
374
375            // Safety: budget_ptr is valid
376            if !unsafe { crate::energy::decrement_mark_energy(budget_ptr, slot_idx) } {
377                break;
378            }
379
380            let child_seed = compute_child_seed(current_seed, mark_name, timelines_spawned);
381            timelines_spawned += 1;
382
383            if parallel {
384                // Back-pressure: reap a finished child if all slots are busy
385                while free_slots.is_empty() {
386                    reap_one(
387                        &mut active,
388                        &mut free_slots,
389                        pool_base,
390                        sancov_pool_base,
391                        vm_ptr,
392                        stats_ptr,
393                        split_call_count,
394                        &mut batch_has_new,
395                    );
396                }
397                let slot = match free_slots.pop() {
398                    Some(s) => s,
399                    None => break,
400                };
401
402                // Clear child bitmap slot
403                // Safety: pool_base + slot offset is valid shared memory
404                unsafe {
405                    std::ptr::write_bytes(pool_slot(pool_base, slot), 0, COVERAGE_MAP_SIZE);
406                    COVERAGE_BITMAP_PTR.with(|c| c.set(pool_slot(pool_base, slot)));
407                }
408
409                // Clear child sancov slot and redirect transfer pointer
410                if !sancov_pool_base.is_null() {
411                    let sancov_len = crate::sancov::sancov_edge_count();
412                    unsafe {
413                        let sancov_slot = crate::sancov::sancov_pool_slot(sancov_pool_base, slot);
414                        std::ptr::write_bytes(sancov_slot, 0, sancov_len);
415                        crate::sancov::SANCOV_TRANSFER.with(|c| c.set(sancov_slot));
416                    }
417                }
418
419                // Safety: single-threaded, no real I/O
420                let pid = unsafe { libc::fork() };
421                match pid {
422                    -1 => {
423                        free_slots.push(slot);
424                        break;
425                    }
426                    0 => {
427                        setup_child(child_seed, split_call_count, stats_ptr);
428                        return;
429                    }
430                    child_pid => {
431                        active.insert(child_pid, (child_seed, slot));
432                    }
433                }
434            } else {
435                // Sequential path
436                if !bm_ptr.is_null() {
437                    let bm = unsafe { CoverageBitmap::new(bm_ptr) };
438                    bm.clear();
439                }
440                crate::sancov::clear_transfer_buffer();
441
442                // Safety: single-threaded, no real I/O
443                let pid = unsafe { libc::fork() };
444                match pid {
445                    -1 => break,
446                    0 => {
447                        setup_child(child_seed, split_call_count, stats_ptr);
448                        return;
449                    }
450                    child_pid => {
451                        let mut status: libc::c_int = 0;
452                        // Safety: child_pid is valid
453                        unsafe {
454                            libc::waitpid(child_pid, &mut status, 0);
455                        }
456
457                        if !bm_ptr.is_null() && !vm_ptr.is_null() {
458                            let bm = unsafe { CoverageBitmap::new(bm_ptr) };
459                            let vm = unsafe { ExploredMap::new(vm_ptr) };
460                            if vm.has_new_bits(&bm) {
461                                batch_has_new = true;
462                            }
463                            vm.merge_from(&bm);
464                        }
465                        batch_has_new |= crate::sancov::has_new_sancov_coverage();
466
467                        let exited_normally = libc::WIFEXITED(status);
468                        if exited_normally && libc::WEXITSTATUS(status) == 42 {
469                            if !stats_ptr.is_null() {
470                                // Safety: stats_ptr is valid
471                                unsafe {
472                                    (*stats_ptr).bug_found.fetch_add(1, Ordering::Relaxed);
473                                }
474                            }
475                            save_bug_recipe(split_call_count, child_seed);
476                        }
477
478                        if !stats_ptr.is_null() {
479                            // Safety: stats_ptr is valid
480                            unsafe {
481                                (*stats_ptr).fork_points.fetch_add(1, Ordering::Relaxed);
482                            }
483                        }
484                    }
485                }
486            }
487        }
488
489        // Drain remaining active children before checking batch yield
490        while !active.is_empty() {
491            reap_one(
492                &mut active,
493                &mut free_slots,
494                pool_base,
495                sancov_pool_base,
496                vm_ptr,
497                stats_ptr,
498                split_call_count,
499                &mut batch_has_new,
500            );
501        }
502
503        // Batch complete — decide whether to continue
504        if timelines_spawned >= max_timelines {
505            break;
506        }
507        if !batch_has_new && timelines_spawned >= effective_min_timelines {
508            // Barren — return remaining energy to pool
509            // Safety: budget_ptr is valid
510            unsafe {
511                crate::energy::return_mark_energy_to_pool(budget_ptr, slot_idx);
512            }
513            break;
514        }
515        // Check if we ran out of energy mid-batch
516        if timelines_spawned - batch_start < batch_size && timelines_spawned < max_timelines {
517            break;
518        }
519    }
520
521    if parallel {
522        // Restore parent's bitmap pointer
523        COVERAGE_BITMAP_PTR.with(|c| c.set(bm_ptr));
524        // Restore parent's sancov transfer pointer
525        if !sancov_pool_base.is_null() {
526            crate::sancov::SANCOV_TRANSFER.with(|c| c.set(parent_sancov_transfer));
527        }
528    } else {
529        // Restore parent bitmap content
530        if !bm_ptr.is_null() {
531            // Safety: bm_ptr points to COVERAGE_MAP_SIZE bytes
532            unsafe {
533                std::ptr::copy_nonoverlapping(
534                    parent_bitmap_backup.as_ptr(),
535                    bm_ptr,
536                    COVERAGE_MAP_SIZE,
537                );
538            }
539        }
540    }
541}
542
543/// Split the simulation timeline at a discovery point.
544///
545/// Called when an assertion detects a new success (e.g. via `assertion_bool`
546/// or `assertion_numeric`). Spawns `timelines_per_split` child timelines,
547/// each with a different seed derived from the current seed and the mark name.
548///
549/// When parallelism is configured, uses a sliding window of concurrent children.
550/// Otherwise falls back to sequential fork→wait→fork→wait.
551#[cfg(unix)]
552pub fn split_on_discovery(mark_name: &str) {
553    let (ctx_active, depth, max_depth, timelines_per_split, current_seed) =
554        context::with_ctx(|ctx| {
555            (
556                ctx.active,
557                ctx.depth,
558                ctx.max_depth,
559                ctx.timelines_per_split,
560                ctx.current_seed,
561            )
562        });
563
564    if !ctx_active || depth >= max_depth {
565        return;
566    }
567
568    let stats_ptr = SHARED_STATS.with(|c| c.get());
569    if stats_ptr.is_null() {
570        return;
571    }
572    // Safety: stats_ptr set during init, points to valid shared stats
573    if !unsafe { crate::shared_stats::decrement_energy(stats_ptr) } {
574        return;
575    }
576
577    let split_call_count = context::rng_get_count();
578    let bm_ptr = COVERAGE_BITMAP_PTR.with(|c| c.get());
579    let vm_ptr = EXPLORED_MAP_PTR.with(|c| c.get());
580
581    // Check parallelism
582    let parallelism = context::with_ctx(|ctx| ctx.parallelism.clone());
583    let (slot_count, pool_base) = if let Some(ref p) = parallelism {
584        let sc = resolve_parallelism(p);
585        let pb = get_or_init_pool(sc);
586        if pb.is_null() {
587            (0, std::ptr::null_mut())
588        } else {
589            (sc, pb)
590        }
591    } else {
592        (0, std::ptr::null_mut())
593    };
594    let parallel = slot_count > 0;
595
596    // Sancov parallel pool (one slot per concurrent child for sancov counters)
597    let sancov_pool_base = if parallel {
598        crate::sancov::get_or_init_sancov_pool(slot_count)
599    } else {
600        std::ptr::null_mut()
601    };
602    let parent_sancov_transfer = if parallel && !sancov_pool_base.is_null() {
603        crate::sancov::SANCOV_TRANSFER.with(|c| c.get())
604    } else {
605        std::ptr::null_mut()
606    };
607
608    // Save parent bitmap (sequential only)
609    let mut parent_bitmap_backup = [0u8; COVERAGE_MAP_SIZE];
610    if !parallel && !bm_ptr.is_null() {
611        // Safety: bm_ptr points to COVERAGE_MAP_SIZE bytes
612        unsafe {
613            std::ptr::copy_nonoverlapping(
614                bm_ptr,
615                parent_bitmap_backup.as_mut_ptr(),
616                COVERAGE_MAP_SIZE,
617            );
618        }
619    }
620
621    // Parallel state
622    let mut active: HashMap<libc::pid_t, (u64, usize)> = HashMap::new();
623    let mut free_slots: Vec<usize> = if parallel {
624        (0..slot_count).collect()
625    } else {
626        Vec::new()
627    };
628    let mut batch_has_new = false;
629
630    for child_idx in 0..timelines_per_split {
631        if child_idx > 0 {
632            // Safety: stats_ptr is valid
633            if !unsafe { crate::shared_stats::decrement_energy(stats_ptr) } {
634                break;
635            }
636        }
637
638        let child_seed = compute_child_seed(current_seed, mark_name, child_idx);
639
640        if parallel {
641            // Back-pressure: reap if all slots busy
642            while free_slots.is_empty() {
643                reap_one(
644                    &mut active,
645                    &mut free_slots,
646                    pool_base,
647                    sancov_pool_base,
648                    vm_ptr,
649                    stats_ptr,
650                    split_call_count,
651                    &mut batch_has_new,
652                );
653            }
654            let slot = match free_slots.pop() {
655                Some(s) => s,
656                None => break,
657            };
658
659            // Safety: pool slot is valid shared memory
660            unsafe {
661                std::ptr::write_bytes(pool_slot(pool_base, slot), 0, COVERAGE_MAP_SIZE);
662                COVERAGE_BITMAP_PTR.with(|c| c.set(pool_slot(pool_base, slot)));
663            }
664
665            // Clear child sancov slot and redirect transfer pointer
666            if !sancov_pool_base.is_null() {
667                let sancov_len = crate::sancov::sancov_edge_count();
668                unsafe {
669                    let sancov_slot = crate::sancov::sancov_pool_slot(sancov_pool_base, slot);
670                    std::ptr::write_bytes(sancov_slot, 0, sancov_len);
671                    crate::sancov::SANCOV_TRANSFER.with(|c| c.set(sancov_slot));
672                }
673            }
674
675            // Safety: single-threaded, no real I/O
676            let pid = unsafe { libc::fork() };
677            match pid {
678                -1 => {
679                    free_slots.push(slot);
680                    break;
681                }
682                0 => {
683                    setup_child(child_seed, split_call_count, stats_ptr);
684                    return;
685                }
686                child_pid => {
687                    active.insert(child_pid, (child_seed, slot));
688                }
689            }
690        } else {
691            // Sequential path
692            if !bm_ptr.is_null() {
693                let bm = unsafe { CoverageBitmap::new(bm_ptr) };
694                bm.clear();
695            }
696            crate::sancov::clear_transfer_buffer();
697
698            // Safety: single-threaded, no real I/O
699            let pid = unsafe { libc::fork() };
700            match pid {
701                -1 => break,
702                0 => {
703                    setup_child(child_seed, split_call_count, stats_ptr);
704                    return;
705                }
706                child_pid => {
707                    let mut status: libc::c_int = 0;
708                    // Safety: child_pid is valid
709                    unsafe {
710                        libc::waitpid(child_pid, &mut status, 0);
711                    }
712
713                    if !bm_ptr.is_null() && !vm_ptr.is_null() {
714                        let bm = unsafe { CoverageBitmap::new(bm_ptr) };
715                        let vm = unsafe { ExploredMap::new(vm_ptr) };
716                        vm.merge_from(&bm);
717                    }
718                    batch_has_new |= crate::sancov::has_new_sancov_coverage();
719
720                    let exited_normally = libc::WIFEXITED(status);
721                    if exited_normally && libc::WEXITSTATUS(status) == 42 {
722                        // Safety: stats_ptr is valid
723                        unsafe {
724                            (*stats_ptr).bug_found.fetch_add(1, Ordering::Relaxed);
725                        }
726                        save_bug_recipe(split_call_count, child_seed);
727                    }
728
729                    // Safety: stats_ptr is valid
730                    unsafe {
731                        (*stats_ptr).fork_points.fetch_add(1, Ordering::Relaxed);
732                    }
733                }
734            }
735        }
736    }
737
738    // Drain remaining active children
739    while !active.is_empty() {
740        reap_one(
741            &mut active,
742            &mut free_slots,
743            pool_base,
744            sancov_pool_base,
745            vm_ptr,
746            stats_ptr,
747            split_call_count,
748            &mut batch_has_new,
749        );
750    }
751
752    if parallel {
753        COVERAGE_BITMAP_PTR.with(|c| c.set(bm_ptr));
754        // Restore parent's sancov transfer pointer
755        if !sancov_pool_base.is_null() {
756            crate::sancov::SANCOV_TRANSFER.with(|c| c.set(parent_sancov_transfer));
757        }
758    } else if !bm_ptr.is_null() {
759        // Safety: bm_ptr points to COVERAGE_MAP_SIZE bytes
760        unsafe {
761            std::ptr::copy_nonoverlapping(parent_bitmap_backup.as_ptr(), bm_ptr, COVERAGE_MAP_SIZE);
762        }
763    }
764}
765
766/// No-op on non-unix platforms.
767#[cfg(not(unix))]
768pub fn split_on_discovery(_mark_name: &str) {}
769
770/// Save a bug recipe to shared memory.
771fn save_bug_recipe(split_call_count: u64, child_seed: u64) {
772    let recipe_ptr = SHARED_RECIPE.with(|c| c.get());
773    if recipe_ptr.is_null() {
774        return;
775    }
776
777    // Safety: recipe_ptr points to valid shared memory
778    unsafe {
779        let recipe = &mut *recipe_ptr;
780
781        // Only save the first bug recipe (CAS from 0 to 1)
782        if recipe
783            .claimed
784            .compare_exchange(0, 1, Ordering::Relaxed, Ordering::Relaxed)
785            .is_ok()
786        {
787            // Copy the current context's recipe plus this fork point
788            context::with_ctx(|ctx| {
789                let total_entries = ctx.recipe.len() + 1;
790                let len = total_entries.min(MAX_RECIPE_ENTRIES);
791
792                // Copy existing recipe entries
793                for (i, &entry) in ctx.recipe.iter().take(len - 1).enumerate() {
794                    recipe.entries[i] = entry;
795                }
796                // Add the current fork point
797                if len > 0 {
798                    recipe.entries[len - 1] = (split_call_count, child_seed);
799                }
800                recipe.len = len as u32;
801            });
802        }
803    }
804}
805
806/// Exit the current child process with the given code.
807///
808/// Calls `libc::_exit()` which skips atexit handlers and stdio flushing.
809/// This is appropriate for forked child processes.
810///
811/// # Safety
812///
813/// This function terminates the process immediately. Only call from a
814/// forked child process.
815#[cfg(unix)]
816pub fn exit_child(code: i32) -> ! {
817    crate::sancov::copy_counters_to_shared();
818    // Safety: _exit is always safe to call; it terminates the process.
819    unsafe { libc::_exit(code) }
820}
821
822/// Panics on non-unix platforms (should never be called).
823#[cfg(not(unix))]
824pub fn exit_child(code: i32) -> ! {
825    std::process::exit(code)
826}
827
828#[cfg(test)]
829mod tests {
830    use super::*;
831
832    #[test]
833    fn test_compute_child_seed_deterministic() {
834        let s1 = compute_child_seed(42, "test", 0);
835        let s2 = compute_child_seed(42, "test", 0);
836        assert_eq!(s1, s2);
837    }
838
839    #[test]
840    fn test_compute_child_seed_varies_by_index() {
841        let s0 = compute_child_seed(42, "test", 0);
842        let s1 = compute_child_seed(42, "test", 1);
843        let s2 = compute_child_seed(42, "test", 2);
844        assert_ne!(s0, s1);
845        assert_ne!(s1, s2);
846        assert_ne!(s0, s2);
847    }
848
849    #[test]
850    fn test_compute_child_seed_varies_by_name() {
851        let s1 = compute_child_seed(42, "alpha", 0);
852        let s2 = compute_child_seed(42, "beta", 0);
853        assert_ne!(s1, s2);
854    }
855
856    #[test]
857    fn test_compute_child_seed_varies_by_parent() {
858        let s1 = compute_child_seed(1, "test", 0);
859        let s2 = compute_child_seed(2, "test", 0);
860        assert_ne!(s1, s2);
861    }
862}