1use std::sync::atomic::Ordering;
22
23#[cfg(unix)]
24use std::collections::HashMap;
25
26use crate::context::{
27 self, COVERAGE_BITMAP_PTR, ENERGY_BUDGET_PTR, EXPLORED_MAP_PTR, SHARED_RECIPE, SHARED_STATS,
28};
29#[cfg(unix)]
30use crate::context::{BITMAP_POOL, BITMAP_POOL_SLOTS};
31use crate::coverage::{COVERAGE_MAP_SIZE, CoverageBitmap, ExploredMap};
32use crate::shared_stats::MAX_RECIPE_ENTRIES;
33
34fn compute_child_seed(parent_seed: u64, mark_name: &str, child_idx: u32) -> u64 {
38 let mut hash: u64 = 0xcbf29ce484222325;
39 for &byte in mark_name.as_bytes() {
40 hash ^= byte as u64;
41 hash = hash.wrapping_mul(0x100000001b3);
42 }
43 hash ^= parent_seed;
44 hash = hash.wrapping_mul(0x100000001b3);
45 hash ^= child_idx as u64;
46 hash = hash.wrapping_mul(0x100000001b3);
47 hash
48}
49
50#[derive(Debug, Clone)]
56pub enum Parallelism {
57 MaxCores,
59 HalfCores,
61 Cores(usize),
63 MaxCoresMinus(usize),
65}
66
67#[cfg(unix)]
69fn resolve_parallelism(p: &Parallelism) -> usize {
70 let ncpus = unsafe { libc::sysconf(libc::_SC_NPROCESSORS_ONLN) };
71 let ncpus = if ncpus > 0 { ncpus as usize } else { 1 };
72 let n = match p {
73 Parallelism::MaxCores => ncpus,
74 Parallelism::HalfCores => ncpus / 2,
75 Parallelism::Cores(c) => *c,
76 Parallelism::MaxCoresMinus(minus) => ncpus.saturating_sub(*minus),
77 };
78 n.max(1) }
80
81#[cfg(unix)]
87fn get_or_init_pool(slot_count: usize) -> *mut u8 {
88 let existing = BITMAP_POOL.with(|c| c.get());
89 let existing_slots = BITMAP_POOL_SLOTS.with(|c| c.get());
90
91 if !existing.is_null() && existing_slots >= slot_count {
92 return existing;
93 }
94
95 if !existing.is_null() {
97 unsafe {
99 crate::shared_mem::free_shared(existing, existing_slots * COVERAGE_MAP_SIZE);
100 }
101 BITMAP_POOL.with(|c| c.set(std::ptr::null_mut()));
102 BITMAP_POOL_SLOTS.with(|c| c.set(0));
103 }
104
105 match crate::shared_mem::alloc_shared(slot_count * COVERAGE_MAP_SIZE) {
106 Ok(ptr) => {
107 BITMAP_POOL.with(|c| c.set(ptr));
108 BITMAP_POOL_SLOTS.with(|c| c.set(slot_count));
109 ptr
110 }
111 Err(_) => std::ptr::null_mut(),
112 }
113}
114
115#[cfg(unix)]
117fn pool_slot(pool_base: *mut u8, idx: usize) -> *mut u8 {
118 unsafe { pool_base.add(idx * COVERAGE_MAP_SIZE) }
120}
121
122#[cfg(unix)]
126fn setup_child(
127 child_seed: u64,
128 split_call_count: u64,
129 stats_ptr: *mut crate::shared_stats::SharedStats,
130) {
131 context::rng_reseed(child_seed);
132 context::with_ctx_mut(|ctx| {
133 ctx.is_child = true;
134 ctx.depth += 1;
135 ctx.current_seed = child_seed;
136 ctx.recipe.push((split_call_count, child_seed));
137 });
138 if !stats_ptr.is_null() {
139 unsafe {
141 (*stats_ptr).total_timelines.fetch_add(1, Ordering::Relaxed);
142 }
143 }
144 BITMAP_POOL.with(|c| c.set(std::ptr::null_mut()));
146 BITMAP_POOL_SLOTS.with(|c| c.set(0));
147
148 crate::sancov::reset_bss_counters();
150 crate::sancov::SANCOV_POOL.with(|c| c.set(std::ptr::null_mut()));
152 crate::sancov::SANCOV_POOL_SLOTS.with(|c| c.set(0));
153}
154
155#[cfg(unix)]
160#[allow(clippy::too_many_arguments)]
161fn reap_one(
162 active: &mut HashMap<libc::pid_t, (u64, usize)>,
163 free_slots: &mut Vec<usize>,
164 pool_base: *mut u8,
165 sancov_pool_base: *mut u8,
166 vm_ptr: *mut u8,
167 stats_ptr: *mut crate::shared_stats::SharedStats,
168 split_call_count: u64,
169 batch_has_new: &mut bool,
170) {
171 let mut status: libc::c_int = 0;
172 let finished_pid = unsafe { libc::waitpid(-1, &mut status, 0) };
174 if finished_pid <= 0 {
175 return;
176 }
177
178 let Some((child_seed, slot)) = active.remove(&finished_pid) else {
179 return;
180 };
181
182 if !vm_ptr.is_null() {
184 let child_bm = unsafe { CoverageBitmap::new(pool_slot(pool_base, slot)) };
186 let vm = unsafe { ExploredMap::new(vm_ptr) };
187 if vm.has_new_bits(&child_bm) {
188 *batch_has_new = true;
189 }
190 vm.merge_from(&child_bm);
191 }
192
193 if !sancov_pool_base.is_null() {
195 let sancov_slot = unsafe { crate::sancov::sancov_pool_slot(sancov_pool_base, slot) };
196 if crate::sancov::has_new_sancov_coverage_from(sancov_slot) {
197 *batch_has_new = true;
198 }
199 }
200
201 let exited_normally = libc::WIFEXITED(status);
203 if exited_normally && libc::WEXITSTATUS(status) == 42 {
204 if !stats_ptr.is_null() {
205 unsafe {
207 (*stats_ptr).bug_found.fetch_add(1, Ordering::Relaxed);
208 }
209 }
210 save_bug_recipe(split_call_count, child_seed);
211 }
212
213 if !stats_ptr.is_null() {
214 unsafe {
216 (*stats_ptr).fork_points.fetch_add(1, Ordering::Relaxed);
217 }
218 }
219
220 free_slots.push(slot);
221}
222
223#[derive(Debug, Clone)]
230pub struct AdaptiveConfig {
231 pub batch_size: u32,
233 pub min_timelines: u32,
235 pub max_timelines: u32,
237 pub per_mark_energy: i64,
239 pub warm_min_timelines: Option<u32>,
242}
243
244#[cfg(unix)]
249pub(crate) fn dispatch_split(mark_name: &str, slot_idx: usize) {
250 let has_adaptive = ENERGY_BUDGET_PTR.with(|c| !c.get().is_null());
251 if has_adaptive {
252 adaptive_split_on_discovery(mark_name, slot_idx);
253 } else {
254 split_on_discovery(mark_name);
255 }
256}
257
258#[cfg(not(unix))]
260pub(crate) fn dispatch_split(_mark_name: &str, _slot_idx: usize) {}
261
262#[cfg(unix)]
267fn adaptive_split_on_discovery(mark_name: &str, slot_idx: usize) {
268 let (ctx_active, depth, max_depth, current_seed) =
270 context::with_ctx(|ctx| (ctx.active, ctx.depth, ctx.max_depth, ctx.current_seed));
271
272 if !ctx_active || depth >= max_depth {
273 return;
274 }
275
276 let budget_ptr = ENERGY_BUDGET_PTR.with(|c| c.get());
277 if budget_ptr.is_null() {
278 return;
279 }
280
281 unsafe {
284 crate::energy::init_mark_budget(budget_ptr, slot_idx);
285 }
286
287 let split_call_count = context::rng_get_count();
288
289 let bm_ptr = COVERAGE_BITMAP_PTR.with(|c| c.get());
290 let vm_ptr = EXPLORED_MAP_PTR.with(|c| c.get());
291 let stats_ptr = SHARED_STATS.with(|c| c.get());
292
293 let (batch_size, min_timelines, max_timelines) = context::with_ctx(|ctx| {
294 ctx.adaptive
295 .as_ref()
296 .map(|a| (a.batch_size, a.min_timelines, a.max_timelines))
297 .unwrap_or((4, 1, 16))
298 });
299
300 let effective_min_timelines = {
304 let (is_warm, warm_min) = context::with_ctx(|ctx| {
305 let wm = ctx
306 .adaptive
307 .as_ref()
308 .and_then(|a| a.warm_min_timelines)
309 .unwrap_or(batch_size);
310 (ctx.warm_start, wm)
311 });
312 if is_warm { warm_min } else { min_timelines }
313 };
314
315 let parallelism = context::with_ctx(|ctx| ctx.parallelism.clone());
317 let (slot_count, pool_base) = if let Some(ref p) = parallelism {
318 let sc = resolve_parallelism(p);
319 let pb = get_or_init_pool(sc);
320 if pb.is_null() {
321 (0, std::ptr::null_mut())
322 } else {
323 (sc, pb)
324 }
325 } else {
326 (0, std::ptr::null_mut())
327 };
328 let parallel = slot_count > 0;
329
330 let sancov_pool_base = if parallel {
332 crate::sancov::get_or_init_sancov_pool(slot_count)
333 } else {
334 std::ptr::null_mut()
335 };
336 let parent_sancov_transfer = if parallel && !sancov_pool_base.is_null() {
337 crate::sancov::SANCOV_TRANSFER.with(|c| c.get())
338 } else {
339 std::ptr::null_mut()
340 };
341
342 let mut parent_bitmap_backup = [0u8; COVERAGE_MAP_SIZE];
344 if !parallel && !bm_ptr.is_null() {
345 unsafe {
347 std::ptr::copy_nonoverlapping(
348 bm_ptr,
349 parent_bitmap_backup.as_mut_ptr(),
350 COVERAGE_MAP_SIZE,
351 );
352 }
353 }
354
355 let mut timelines_spawned: u32 = 0;
356
357 let mut active: HashMap<libc::pid_t, (u64, usize)> = HashMap::new();
359 let mut free_slots: Vec<usize> = if parallel {
360 (0..slot_count).collect()
361 } else {
362 Vec::new()
363 };
364
365 loop {
367 let mut batch_has_new = false;
368 let batch_start = timelines_spawned;
369
370 while timelines_spawned - batch_start < batch_size {
371 if timelines_spawned >= max_timelines {
372 break;
373 }
374
375 if !unsafe { crate::energy::decrement_mark_energy(budget_ptr, slot_idx) } {
377 break;
378 }
379
380 let child_seed = compute_child_seed(current_seed, mark_name, timelines_spawned);
381 timelines_spawned += 1;
382
383 if parallel {
384 while free_slots.is_empty() {
386 reap_one(
387 &mut active,
388 &mut free_slots,
389 pool_base,
390 sancov_pool_base,
391 vm_ptr,
392 stats_ptr,
393 split_call_count,
394 &mut batch_has_new,
395 );
396 }
397 let slot = match free_slots.pop() {
398 Some(s) => s,
399 None => break,
400 };
401
402 unsafe {
405 std::ptr::write_bytes(pool_slot(pool_base, slot), 0, COVERAGE_MAP_SIZE);
406 COVERAGE_BITMAP_PTR.with(|c| c.set(pool_slot(pool_base, slot)));
407 }
408
409 if !sancov_pool_base.is_null() {
411 let sancov_len = crate::sancov::sancov_edge_count();
412 unsafe {
413 let sancov_slot = crate::sancov::sancov_pool_slot(sancov_pool_base, slot);
414 std::ptr::write_bytes(sancov_slot, 0, sancov_len);
415 crate::sancov::SANCOV_TRANSFER.with(|c| c.set(sancov_slot));
416 }
417 }
418
419 let pid = unsafe { libc::fork() };
421 match pid {
422 -1 => {
423 free_slots.push(slot);
424 break;
425 }
426 0 => {
427 setup_child(child_seed, split_call_count, stats_ptr);
428 return;
429 }
430 child_pid => {
431 active.insert(child_pid, (child_seed, slot));
432 }
433 }
434 } else {
435 if !bm_ptr.is_null() {
437 let bm = unsafe { CoverageBitmap::new(bm_ptr) };
438 bm.clear();
439 }
440 crate::sancov::clear_transfer_buffer();
441
442 let pid = unsafe { libc::fork() };
444 match pid {
445 -1 => break,
446 0 => {
447 setup_child(child_seed, split_call_count, stats_ptr);
448 return;
449 }
450 child_pid => {
451 let mut status: libc::c_int = 0;
452 unsafe {
454 libc::waitpid(child_pid, &mut status, 0);
455 }
456
457 if !bm_ptr.is_null() && !vm_ptr.is_null() {
458 let bm = unsafe { CoverageBitmap::new(bm_ptr) };
459 let vm = unsafe { ExploredMap::new(vm_ptr) };
460 if vm.has_new_bits(&bm) {
461 batch_has_new = true;
462 }
463 vm.merge_from(&bm);
464 }
465 batch_has_new |= crate::sancov::has_new_sancov_coverage();
466
467 let exited_normally = libc::WIFEXITED(status);
468 if exited_normally && libc::WEXITSTATUS(status) == 42 {
469 if !stats_ptr.is_null() {
470 unsafe {
472 (*stats_ptr).bug_found.fetch_add(1, Ordering::Relaxed);
473 }
474 }
475 save_bug_recipe(split_call_count, child_seed);
476 }
477
478 if !stats_ptr.is_null() {
479 unsafe {
481 (*stats_ptr).fork_points.fetch_add(1, Ordering::Relaxed);
482 }
483 }
484 }
485 }
486 }
487 }
488
489 while !active.is_empty() {
491 reap_one(
492 &mut active,
493 &mut free_slots,
494 pool_base,
495 sancov_pool_base,
496 vm_ptr,
497 stats_ptr,
498 split_call_count,
499 &mut batch_has_new,
500 );
501 }
502
503 if timelines_spawned >= max_timelines {
505 break;
506 }
507 if !batch_has_new && timelines_spawned >= effective_min_timelines {
508 unsafe {
511 crate::energy::return_mark_energy_to_pool(budget_ptr, slot_idx);
512 }
513 break;
514 }
515 if timelines_spawned - batch_start < batch_size && timelines_spawned < max_timelines {
517 break;
518 }
519 }
520
521 if parallel {
522 COVERAGE_BITMAP_PTR.with(|c| c.set(bm_ptr));
524 if !sancov_pool_base.is_null() {
526 crate::sancov::SANCOV_TRANSFER.with(|c| c.set(parent_sancov_transfer));
527 }
528 } else {
529 if !bm_ptr.is_null() {
531 unsafe {
533 std::ptr::copy_nonoverlapping(
534 parent_bitmap_backup.as_ptr(),
535 bm_ptr,
536 COVERAGE_MAP_SIZE,
537 );
538 }
539 }
540 }
541}
542
543#[cfg(unix)]
552pub fn split_on_discovery(mark_name: &str) {
553 let (ctx_active, depth, max_depth, timelines_per_split, current_seed) =
554 context::with_ctx(|ctx| {
555 (
556 ctx.active,
557 ctx.depth,
558 ctx.max_depth,
559 ctx.timelines_per_split,
560 ctx.current_seed,
561 )
562 });
563
564 if !ctx_active || depth >= max_depth {
565 return;
566 }
567
568 let stats_ptr = SHARED_STATS.with(|c| c.get());
569 if stats_ptr.is_null() {
570 return;
571 }
572 if !unsafe { crate::shared_stats::decrement_energy(stats_ptr) } {
574 return;
575 }
576
577 let split_call_count = context::rng_get_count();
578 let bm_ptr = COVERAGE_BITMAP_PTR.with(|c| c.get());
579 let vm_ptr = EXPLORED_MAP_PTR.with(|c| c.get());
580
581 let parallelism = context::with_ctx(|ctx| ctx.parallelism.clone());
583 let (slot_count, pool_base) = if let Some(ref p) = parallelism {
584 let sc = resolve_parallelism(p);
585 let pb = get_or_init_pool(sc);
586 if pb.is_null() {
587 (0, std::ptr::null_mut())
588 } else {
589 (sc, pb)
590 }
591 } else {
592 (0, std::ptr::null_mut())
593 };
594 let parallel = slot_count > 0;
595
596 let sancov_pool_base = if parallel {
598 crate::sancov::get_or_init_sancov_pool(slot_count)
599 } else {
600 std::ptr::null_mut()
601 };
602 let parent_sancov_transfer = if parallel && !sancov_pool_base.is_null() {
603 crate::sancov::SANCOV_TRANSFER.with(|c| c.get())
604 } else {
605 std::ptr::null_mut()
606 };
607
608 let mut parent_bitmap_backup = [0u8; COVERAGE_MAP_SIZE];
610 if !parallel && !bm_ptr.is_null() {
611 unsafe {
613 std::ptr::copy_nonoverlapping(
614 bm_ptr,
615 parent_bitmap_backup.as_mut_ptr(),
616 COVERAGE_MAP_SIZE,
617 );
618 }
619 }
620
621 let mut active: HashMap<libc::pid_t, (u64, usize)> = HashMap::new();
623 let mut free_slots: Vec<usize> = if parallel {
624 (0..slot_count).collect()
625 } else {
626 Vec::new()
627 };
628 let mut batch_has_new = false;
629
630 for child_idx in 0..timelines_per_split {
631 if child_idx > 0 {
632 if !unsafe { crate::shared_stats::decrement_energy(stats_ptr) } {
634 break;
635 }
636 }
637
638 let child_seed = compute_child_seed(current_seed, mark_name, child_idx);
639
640 if parallel {
641 while free_slots.is_empty() {
643 reap_one(
644 &mut active,
645 &mut free_slots,
646 pool_base,
647 sancov_pool_base,
648 vm_ptr,
649 stats_ptr,
650 split_call_count,
651 &mut batch_has_new,
652 );
653 }
654 let slot = match free_slots.pop() {
655 Some(s) => s,
656 None => break,
657 };
658
659 unsafe {
661 std::ptr::write_bytes(pool_slot(pool_base, slot), 0, COVERAGE_MAP_SIZE);
662 COVERAGE_BITMAP_PTR.with(|c| c.set(pool_slot(pool_base, slot)));
663 }
664
665 if !sancov_pool_base.is_null() {
667 let sancov_len = crate::sancov::sancov_edge_count();
668 unsafe {
669 let sancov_slot = crate::sancov::sancov_pool_slot(sancov_pool_base, slot);
670 std::ptr::write_bytes(sancov_slot, 0, sancov_len);
671 crate::sancov::SANCOV_TRANSFER.with(|c| c.set(sancov_slot));
672 }
673 }
674
675 let pid = unsafe { libc::fork() };
677 match pid {
678 -1 => {
679 free_slots.push(slot);
680 break;
681 }
682 0 => {
683 setup_child(child_seed, split_call_count, stats_ptr);
684 return;
685 }
686 child_pid => {
687 active.insert(child_pid, (child_seed, slot));
688 }
689 }
690 } else {
691 if !bm_ptr.is_null() {
693 let bm = unsafe { CoverageBitmap::new(bm_ptr) };
694 bm.clear();
695 }
696 crate::sancov::clear_transfer_buffer();
697
698 let pid = unsafe { libc::fork() };
700 match pid {
701 -1 => break,
702 0 => {
703 setup_child(child_seed, split_call_count, stats_ptr);
704 return;
705 }
706 child_pid => {
707 let mut status: libc::c_int = 0;
708 unsafe {
710 libc::waitpid(child_pid, &mut status, 0);
711 }
712
713 if !bm_ptr.is_null() && !vm_ptr.is_null() {
714 let bm = unsafe { CoverageBitmap::new(bm_ptr) };
715 let vm = unsafe { ExploredMap::new(vm_ptr) };
716 vm.merge_from(&bm);
717 }
718 batch_has_new |= crate::sancov::has_new_sancov_coverage();
719
720 let exited_normally = libc::WIFEXITED(status);
721 if exited_normally && libc::WEXITSTATUS(status) == 42 {
722 unsafe {
724 (*stats_ptr).bug_found.fetch_add(1, Ordering::Relaxed);
725 }
726 save_bug_recipe(split_call_count, child_seed);
727 }
728
729 unsafe {
731 (*stats_ptr).fork_points.fetch_add(1, Ordering::Relaxed);
732 }
733 }
734 }
735 }
736 }
737
738 while !active.is_empty() {
740 reap_one(
741 &mut active,
742 &mut free_slots,
743 pool_base,
744 sancov_pool_base,
745 vm_ptr,
746 stats_ptr,
747 split_call_count,
748 &mut batch_has_new,
749 );
750 }
751
752 if parallel {
753 COVERAGE_BITMAP_PTR.with(|c| c.set(bm_ptr));
754 if !sancov_pool_base.is_null() {
756 crate::sancov::SANCOV_TRANSFER.with(|c| c.set(parent_sancov_transfer));
757 }
758 } else if !bm_ptr.is_null() {
759 unsafe {
761 std::ptr::copy_nonoverlapping(parent_bitmap_backup.as_ptr(), bm_ptr, COVERAGE_MAP_SIZE);
762 }
763 }
764}
765
766#[cfg(not(unix))]
768pub fn split_on_discovery(_mark_name: &str) {}
769
770fn save_bug_recipe(split_call_count: u64, child_seed: u64) {
772 let recipe_ptr = SHARED_RECIPE.with(|c| c.get());
773 if recipe_ptr.is_null() {
774 return;
775 }
776
777 unsafe {
779 let recipe = &mut *recipe_ptr;
780
781 if recipe
783 .claimed
784 .compare_exchange(0, 1, Ordering::Relaxed, Ordering::Relaxed)
785 .is_ok()
786 {
787 context::with_ctx(|ctx| {
789 let total_entries = ctx.recipe.len() + 1;
790 let len = total_entries.min(MAX_RECIPE_ENTRIES);
791
792 for (i, &entry) in ctx.recipe.iter().take(len - 1).enumerate() {
794 recipe.entries[i] = entry;
795 }
796 if len > 0 {
798 recipe.entries[len - 1] = (split_call_count, child_seed);
799 }
800 recipe.len = len as u32;
801 });
802 }
803 }
804}
805
806#[cfg(unix)]
816pub fn exit_child(code: i32) -> ! {
817 crate::sancov::copy_counters_to_shared();
818 unsafe { libc::_exit(code) }
820}
821
822#[cfg(not(unix))]
824pub fn exit_child(code: i32) -> ! {
825 std::process::exit(code)
826}
827
828#[cfg(test)]
829mod tests {
830 use super::*;
831
832 #[test]
833 fn test_compute_child_seed_deterministic() {
834 let s1 = compute_child_seed(42, "test", 0);
835 let s2 = compute_child_seed(42, "test", 0);
836 assert_eq!(s1, s2);
837 }
838
839 #[test]
840 fn test_compute_child_seed_varies_by_index() {
841 let s0 = compute_child_seed(42, "test", 0);
842 let s1 = compute_child_seed(42, "test", 1);
843 let s2 = compute_child_seed(42, "test", 2);
844 assert_ne!(s0, s1);
845 assert_ne!(s1, s2);
846 assert_ne!(s0, s2);
847 }
848
849 #[test]
850 fn test_compute_child_seed_varies_by_name() {
851 let s1 = compute_child_seed(42, "alpha", 0);
852 let s2 = compute_child_seed(42, "beta", 0);
853 assert_ne!(s1, s2);
854 }
855
856 #[test]
857 fn test_compute_child_seed_varies_by_parent() {
858 let s1 = compute_child_seed(1, "test", 0);
859 let s2 = compute_child_seed(2, "test", 0);
860 assert_ne!(s1, s2);
861 }
862}