use std::sync::atomic::Ordering;
#[cfg(unix)]
use std::collections::HashMap;
use crate::context::{
self, COVERAGE_BITMAP_PTR, ENERGY_BUDGET_PTR, EXPLORED_MAP_PTR, SHARED_RECIPE, SHARED_STATS,
};
#[cfg(unix)]
use crate::context::{BITMAP_POOL, BITMAP_POOL_SLOTS};
use crate::coverage::{COVERAGE_MAP_SIZE, CoverageBitmap, ExploredMap};
use crate::shared_stats::MAX_RECIPE_ENTRIES;
fn compute_child_seed(parent_seed: u64, mark_name: &str, child_idx: u32) -> u64 {
let mut hash: u64 = 0xcbf29ce484222325;
for &byte in mark_name.as_bytes() {
hash ^= byte as u64;
hash = hash.wrapping_mul(0x100000001b3);
}
hash ^= parent_seed;
hash = hash.wrapping_mul(0x100000001b3);
hash ^= child_idx as u64;
hash = hash.wrapping_mul(0x100000001b3);
hash
}
#[derive(Debug, Clone)]
pub enum Parallelism {
MaxCores,
HalfCores,
Cores(usize),
MaxCoresMinus(usize),
}
#[cfg(unix)]
fn resolve_parallelism(p: &Parallelism) -> usize {
let ncpus = unsafe { libc::sysconf(libc::_SC_NPROCESSORS_ONLN) };
let ncpus = if ncpus > 0 { ncpus as usize } else { 1 };
let n = match p {
Parallelism::MaxCores => ncpus,
Parallelism::HalfCores => ncpus / 2,
Parallelism::Cores(c) => *c,
Parallelism::MaxCoresMinus(minus) => ncpus.saturating_sub(*minus),
};
n.max(1) }
#[cfg(unix)]
fn get_or_init_pool(slot_count: usize) -> *mut u8 {
let existing = BITMAP_POOL.with(|c| c.get());
let existing_slots = BITMAP_POOL_SLOTS.with(|c| c.get());
if !existing.is_null() && existing_slots >= slot_count {
return existing;
}
if !existing.is_null() {
unsafe {
crate::shared_mem::free_shared(existing, existing_slots * COVERAGE_MAP_SIZE);
}
BITMAP_POOL.with(|c| c.set(std::ptr::null_mut()));
BITMAP_POOL_SLOTS.with(|c| c.set(0));
}
match crate::shared_mem::alloc_shared(slot_count * COVERAGE_MAP_SIZE) {
Ok(ptr) => {
BITMAP_POOL.with(|c| c.set(ptr));
BITMAP_POOL_SLOTS.with(|c| c.set(slot_count));
ptr
}
Err(_) => std::ptr::null_mut(),
}
}
#[cfg(unix)]
fn pool_slot(pool_base: *mut u8, idx: usize) -> *mut u8 {
unsafe { pool_base.add(idx * COVERAGE_MAP_SIZE) }
}
#[cfg(unix)]
fn setup_child(
child_seed: u64,
split_call_count: u64,
stats_ptr: *mut crate::shared_stats::SharedStats,
) {
context::rng_reseed(child_seed);
context::with_ctx_mut(|ctx| {
ctx.is_child = true;
ctx.depth += 1;
ctx.current_seed = child_seed;
ctx.recipe.push((split_call_count, child_seed));
});
if !stats_ptr.is_null() {
unsafe {
(*stats_ptr).total_timelines.fetch_add(1, Ordering::Relaxed);
}
}
BITMAP_POOL.with(|c| c.set(std::ptr::null_mut()));
BITMAP_POOL_SLOTS.with(|c| c.set(0));
crate::sancov::reset_bss_counters();
crate::sancov::SANCOV_POOL.with(|c| c.set(std::ptr::null_mut()));
crate::sancov::SANCOV_POOL_SLOTS.with(|c| c.set(0));
}
#[cfg(unix)]
#[allow(clippy::too_many_arguments)]
fn reap_one(
active: &mut HashMap<libc::pid_t, (u64, usize)>,
free_slots: &mut Vec<usize>,
pool_base: *mut u8,
sancov_pool_base: *mut u8,
vm_ptr: *mut u8,
stats_ptr: *mut crate::shared_stats::SharedStats,
split_call_count: u64,
batch_has_new: &mut bool,
) {
let mut status: libc::c_int = 0;
let finished_pid = unsafe { libc::waitpid(-1, &mut status, 0) };
if finished_pid <= 0 {
return;
}
let Some((child_seed, slot)) = active.remove(&finished_pid) else {
return;
};
if !vm_ptr.is_null() {
let child_bm = unsafe { CoverageBitmap::new(pool_slot(pool_base, slot)) };
let vm = unsafe { ExploredMap::new(vm_ptr) };
if vm.has_new_bits(&child_bm) {
*batch_has_new = true;
}
vm.merge_from(&child_bm);
}
if !sancov_pool_base.is_null() {
let sancov_slot = unsafe { crate::sancov::sancov_pool_slot(sancov_pool_base, slot) };
if crate::sancov::has_new_sancov_coverage_from(sancov_slot) {
*batch_has_new = true;
}
}
let exited_normally = libc::WIFEXITED(status);
if exited_normally && libc::WEXITSTATUS(status) == 42 {
if !stats_ptr.is_null() {
unsafe {
(*stats_ptr).bug_found.fetch_add(1, Ordering::Relaxed);
}
}
save_bug_recipe(split_call_count, child_seed);
}
if !stats_ptr.is_null() {
unsafe {
(*stats_ptr).fork_points.fetch_add(1, Ordering::Relaxed);
}
}
free_slots.push(slot);
}
#[derive(Debug, Clone)]
pub struct AdaptiveConfig {
pub batch_size: u32,
pub min_timelines: u32,
pub max_timelines: u32,
pub per_mark_energy: i64,
pub warm_min_timelines: Option<u32>,
}
#[cfg(unix)]
pub(crate) fn dispatch_split(mark_name: &str, slot_idx: usize) {
let has_adaptive = ENERGY_BUDGET_PTR.with(|c| !c.get().is_null());
if has_adaptive {
adaptive_split_on_discovery(mark_name, slot_idx);
} else {
split_on_discovery(mark_name);
}
}
#[cfg(not(unix))]
pub(crate) fn dispatch_split(_mark_name: &str, _slot_idx: usize) {}
#[cfg(unix)]
fn adaptive_split_on_discovery(mark_name: &str, slot_idx: usize) {
let (ctx_active, depth, max_depth, current_seed) =
context::with_ctx(|ctx| (ctx.active, ctx.depth, ctx.max_depth, ctx.current_seed));
if !ctx_active || depth >= max_depth {
return;
}
let budget_ptr = ENERGY_BUDGET_PTR.with(|c| c.get());
if budget_ptr.is_null() {
return;
}
unsafe {
crate::energy::init_mark_budget(budget_ptr, slot_idx);
}
let split_call_count = context::rng_get_count();
let bm_ptr = COVERAGE_BITMAP_PTR.with(|c| c.get());
let vm_ptr = EXPLORED_MAP_PTR.with(|c| c.get());
let stats_ptr = SHARED_STATS.with(|c| c.get());
let (batch_size, min_timelines, max_timelines) = context::with_ctx(|ctx| {
ctx.adaptive
.as_ref()
.map(|a| (a.batch_size, a.min_timelines, a.max_timelines))
.unwrap_or((4, 1, 16))
});
let effective_min_timelines = {
let (is_warm, warm_min) = context::with_ctx(|ctx| {
let wm = ctx
.adaptive
.as_ref()
.and_then(|a| a.warm_min_timelines)
.unwrap_or(batch_size);
(ctx.warm_start, wm)
});
if is_warm { warm_min } else { min_timelines }
};
let parallelism = context::with_ctx(|ctx| ctx.parallelism.clone());
let (slot_count, pool_base) = if let Some(ref p) = parallelism {
let sc = resolve_parallelism(p);
let pb = get_or_init_pool(sc);
if pb.is_null() {
(0, std::ptr::null_mut())
} else {
(sc, pb)
}
} else {
(0, std::ptr::null_mut())
};
let parallel = slot_count > 0;
let sancov_pool_base = if parallel {
crate::sancov::get_or_init_sancov_pool(slot_count)
} else {
std::ptr::null_mut()
};
let parent_sancov_transfer = if parallel && !sancov_pool_base.is_null() {
crate::sancov::SANCOV_TRANSFER.with(|c| c.get())
} else {
std::ptr::null_mut()
};
let mut parent_bitmap_backup = [0u8; COVERAGE_MAP_SIZE];
if !parallel && !bm_ptr.is_null() {
unsafe {
std::ptr::copy_nonoverlapping(
bm_ptr,
parent_bitmap_backup.as_mut_ptr(),
COVERAGE_MAP_SIZE,
);
}
}
let mut timelines_spawned: u32 = 0;
let mut active: HashMap<libc::pid_t, (u64, usize)> = HashMap::new();
let mut free_slots: Vec<usize> = if parallel {
(0..slot_count).collect()
} else {
Vec::new()
};
loop {
let mut batch_has_new = false;
let batch_start = timelines_spawned;
while timelines_spawned - batch_start < batch_size {
if timelines_spawned >= max_timelines {
break;
}
if !unsafe { crate::energy::decrement_mark_energy(budget_ptr, slot_idx) } {
break;
}
let child_seed = compute_child_seed(current_seed, mark_name, timelines_spawned);
timelines_spawned += 1;
if parallel {
while free_slots.is_empty() {
reap_one(
&mut active,
&mut free_slots,
pool_base,
sancov_pool_base,
vm_ptr,
stats_ptr,
split_call_count,
&mut batch_has_new,
);
}
let slot = match free_slots.pop() {
Some(s) => s,
None => break,
};
unsafe {
std::ptr::write_bytes(pool_slot(pool_base, slot), 0, COVERAGE_MAP_SIZE);
COVERAGE_BITMAP_PTR.with(|c| c.set(pool_slot(pool_base, slot)));
}
if !sancov_pool_base.is_null() {
let sancov_len = crate::sancov::sancov_edge_count();
unsafe {
let sancov_slot = crate::sancov::sancov_pool_slot(sancov_pool_base, slot);
std::ptr::write_bytes(sancov_slot, 0, sancov_len);
crate::sancov::SANCOV_TRANSFER.with(|c| c.set(sancov_slot));
}
}
let pid = unsafe { libc::fork() };
match pid {
-1 => {
free_slots.push(slot);
break;
}
0 => {
setup_child(child_seed, split_call_count, stats_ptr);
return;
}
child_pid => {
active.insert(child_pid, (child_seed, slot));
}
}
} else {
if !bm_ptr.is_null() {
let bm = unsafe { CoverageBitmap::new(bm_ptr) };
bm.clear();
}
crate::sancov::clear_transfer_buffer();
let pid = unsafe { libc::fork() };
match pid {
-1 => break,
0 => {
setup_child(child_seed, split_call_count, stats_ptr);
return;
}
child_pid => {
let mut status: libc::c_int = 0;
unsafe {
libc::waitpid(child_pid, &mut status, 0);
}
if !bm_ptr.is_null() && !vm_ptr.is_null() {
let bm = unsafe { CoverageBitmap::new(bm_ptr) };
let vm = unsafe { ExploredMap::new(vm_ptr) };
if vm.has_new_bits(&bm) {
batch_has_new = true;
}
vm.merge_from(&bm);
}
batch_has_new |= crate::sancov::has_new_sancov_coverage();
let exited_normally = libc::WIFEXITED(status);
if exited_normally && libc::WEXITSTATUS(status) == 42 {
if !stats_ptr.is_null() {
unsafe {
(*stats_ptr).bug_found.fetch_add(1, Ordering::Relaxed);
}
}
save_bug_recipe(split_call_count, child_seed);
}
if !stats_ptr.is_null() {
unsafe {
(*stats_ptr).fork_points.fetch_add(1, Ordering::Relaxed);
}
}
}
}
}
}
while !active.is_empty() {
reap_one(
&mut active,
&mut free_slots,
pool_base,
sancov_pool_base,
vm_ptr,
stats_ptr,
split_call_count,
&mut batch_has_new,
);
}
if timelines_spawned >= max_timelines {
break;
}
if !batch_has_new && timelines_spawned >= effective_min_timelines {
unsafe {
crate::energy::return_mark_energy_to_pool(budget_ptr, slot_idx);
}
break;
}
if timelines_spawned - batch_start < batch_size && timelines_spawned < max_timelines {
break;
}
}
if parallel {
COVERAGE_BITMAP_PTR.with(|c| c.set(bm_ptr));
if !sancov_pool_base.is_null() {
crate::sancov::SANCOV_TRANSFER.with(|c| c.set(parent_sancov_transfer));
}
} else {
if !bm_ptr.is_null() {
unsafe {
std::ptr::copy_nonoverlapping(
parent_bitmap_backup.as_ptr(),
bm_ptr,
COVERAGE_MAP_SIZE,
);
}
}
}
}
#[cfg(unix)]
pub fn split_on_discovery(mark_name: &str) {
let (ctx_active, depth, max_depth, timelines_per_split, current_seed) =
context::with_ctx(|ctx| {
(
ctx.active,
ctx.depth,
ctx.max_depth,
ctx.timelines_per_split,
ctx.current_seed,
)
});
if !ctx_active || depth >= max_depth {
return;
}
let stats_ptr = SHARED_STATS.with(|c| c.get());
if stats_ptr.is_null() {
return;
}
if !unsafe { crate::shared_stats::decrement_energy(stats_ptr) } {
return;
}
let split_call_count = context::rng_get_count();
let bm_ptr = COVERAGE_BITMAP_PTR.with(|c| c.get());
let vm_ptr = EXPLORED_MAP_PTR.with(|c| c.get());
let parallelism = context::with_ctx(|ctx| ctx.parallelism.clone());
let (slot_count, pool_base) = if let Some(ref p) = parallelism {
let sc = resolve_parallelism(p);
let pb = get_or_init_pool(sc);
if pb.is_null() {
(0, std::ptr::null_mut())
} else {
(sc, pb)
}
} else {
(0, std::ptr::null_mut())
};
let parallel = slot_count > 0;
let sancov_pool_base = if parallel {
crate::sancov::get_or_init_sancov_pool(slot_count)
} else {
std::ptr::null_mut()
};
let parent_sancov_transfer = if parallel && !sancov_pool_base.is_null() {
crate::sancov::SANCOV_TRANSFER.with(|c| c.get())
} else {
std::ptr::null_mut()
};
let mut parent_bitmap_backup = [0u8; COVERAGE_MAP_SIZE];
if !parallel && !bm_ptr.is_null() {
unsafe {
std::ptr::copy_nonoverlapping(
bm_ptr,
parent_bitmap_backup.as_mut_ptr(),
COVERAGE_MAP_SIZE,
);
}
}
let mut active: HashMap<libc::pid_t, (u64, usize)> = HashMap::new();
let mut free_slots: Vec<usize> = if parallel {
(0..slot_count).collect()
} else {
Vec::new()
};
let mut batch_has_new = false;
for child_idx in 0..timelines_per_split {
if child_idx > 0 {
if !unsafe { crate::shared_stats::decrement_energy(stats_ptr) } {
break;
}
}
let child_seed = compute_child_seed(current_seed, mark_name, child_idx);
if parallel {
while free_slots.is_empty() {
reap_one(
&mut active,
&mut free_slots,
pool_base,
sancov_pool_base,
vm_ptr,
stats_ptr,
split_call_count,
&mut batch_has_new,
);
}
let slot = match free_slots.pop() {
Some(s) => s,
None => break,
};
unsafe {
std::ptr::write_bytes(pool_slot(pool_base, slot), 0, COVERAGE_MAP_SIZE);
COVERAGE_BITMAP_PTR.with(|c| c.set(pool_slot(pool_base, slot)));
}
if !sancov_pool_base.is_null() {
let sancov_len = crate::sancov::sancov_edge_count();
unsafe {
let sancov_slot = crate::sancov::sancov_pool_slot(sancov_pool_base, slot);
std::ptr::write_bytes(sancov_slot, 0, sancov_len);
crate::sancov::SANCOV_TRANSFER.with(|c| c.set(sancov_slot));
}
}
let pid = unsafe { libc::fork() };
match pid {
-1 => {
free_slots.push(slot);
break;
}
0 => {
setup_child(child_seed, split_call_count, stats_ptr);
return;
}
child_pid => {
active.insert(child_pid, (child_seed, slot));
}
}
} else {
if !bm_ptr.is_null() {
let bm = unsafe { CoverageBitmap::new(bm_ptr) };
bm.clear();
}
crate::sancov::clear_transfer_buffer();
let pid = unsafe { libc::fork() };
match pid {
-1 => break,
0 => {
setup_child(child_seed, split_call_count, stats_ptr);
return;
}
child_pid => {
let mut status: libc::c_int = 0;
unsafe {
libc::waitpid(child_pid, &mut status, 0);
}
if !bm_ptr.is_null() && !vm_ptr.is_null() {
let bm = unsafe { CoverageBitmap::new(bm_ptr) };
let vm = unsafe { ExploredMap::new(vm_ptr) };
vm.merge_from(&bm);
}
batch_has_new |= crate::sancov::has_new_sancov_coverage();
let exited_normally = libc::WIFEXITED(status);
if exited_normally && libc::WEXITSTATUS(status) == 42 {
unsafe {
(*stats_ptr).bug_found.fetch_add(1, Ordering::Relaxed);
}
save_bug_recipe(split_call_count, child_seed);
}
unsafe {
(*stats_ptr).fork_points.fetch_add(1, Ordering::Relaxed);
}
}
}
}
}
while !active.is_empty() {
reap_one(
&mut active,
&mut free_slots,
pool_base,
sancov_pool_base,
vm_ptr,
stats_ptr,
split_call_count,
&mut batch_has_new,
);
}
if parallel {
COVERAGE_BITMAP_PTR.with(|c| c.set(bm_ptr));
if !sancov_pool_base.is_null() {
crate::sancov::SANCOV_TRANSFER.with(|c| c.set(parent_sancov_transfer));
}
} else if !bm_ptr.is_null() {
unsafe {
std::ptr::copy_nonoverlapping(parent_bitmap_backup.as_ptr(), bm_ptr, COVERAGE_MAP_SIZE);
}
}
}
#[cfg(not(unix))]
pub fn split_on_discovery(_mark_name: &str) {}
fn save_bug_recipe(split_call_count: u64, child_seed: u64) {
let recipe_ptr = SHARED_RECIPE.with(|c| c.get());
if recipe_ptr.is_null() {
return;
}
unsafe {
let recipe = &mut *recipe_ptr;
if recipe
.claimed
.compare_exchange(0, 1, Ordering::Relaxed, Ordering::Relaxed)
.is_ok()
{
context::with_ctx(|ctx| {
let total_entries = ctx.recipe.len() + 1;
let len = total_entries.min(MAX_RECIPE_ENTRIES);
for (i, &entry) in ctx.recipe.iter().take(len - 1).enumerate() {
recipe.entries[i] = entry;
}
if len > 0 {
recipe.entries[len - 1] = (split_call_count, child_seed);
}
recipe.len = len as u32;
});
}
}
}
#[cfg(unix)]
pub fn exit_child(code: i32) -> ! {
crate::sancov::copy_counters_to_shared();
unsafe { libc::_exit(code) }
}
#[cfg(not(unix))]
pub fn exit_child(code: i32) -> ! {
std::process::exit(code)
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_compute_child_seed_deterministic() {
let s1 = compute_child_seed(42, "test", 0);
let s2 = compute_child_seed(42, "test", 0);
assert_eq!(s1, s2);
}
#[test]
fn test_compute_child_seed_varies_by_index() {
let s0 = compute_child_seed(42, "test", 0);
let s1 = compute_child_seed(42, "test", 1);
let s2 = compute_child_seed(42, "test", 2);
assert_ne!(s0, s1);
assert_ne!(s1, s2);
assert_ne!(s0, s2);
}
#[test]
fn test_compute_child_seed_varies_by_name() {
let s1 = compute_child_seed(42, "alpha", 0);
let s2 = compute_child_seed(42, "beta", 0);
assert_ne!(s1, s2);
}
#[test]
fn test_compute_child_seed_varies_by_parent() {
let s1 = compute_child_seed(1, "test", 0);
let s2 = compute_child_seed(2, "test", 0);
assert_ne!(s1, s2);
}
}