1use std::sync::atomic::Ordering;
22
23#[cfg(unix)]
24use std::collections::HashMap;
25
26use crate::context::{
27 self, COVERAGE_BITMAP_PTR, ENERGY_BUDGET_PTR, EXPLORED_MAP_PTR, SHARED_RECIPE, SHARED_STATS,
28};
29#[cfg(unix)]
30use crate::context::{BITMAP_POOL, BITMAP_POOL_SLOTS};
31use crate::coverage::{COVERAGE_MAP_SIZE, CoverageBitmap, ExploredMap};
32use crate::shared_stats::MAX_RECIPE_ENTRIES;
33
34fn compute_child_seed(parent_seed: u64, mark_name: &str, child_idx: u32) -> u64 {
38 let mut hash: u64 = 0xcbf29ce484222325;
39 for &byte in mark_name.as_bytes() {
40 hash ^= byte as u64;
41 hash = hash.wrapping_mul(0x100000001b3);
42 }
43 hash ^= parent_seed;
44 hash = hash.wrapping_mul(0x100000001b3);
45 hash ^= child_idx as u64;
46 hash = hash.wrapping_mul(0x100000001b3);
47 hash
48}
49
50#[derive(Debug, Clone)]
56pub enum Parallelism {
57 MaxCores,
59 HalfCores,
61 Cores(usize),
63 MaxCoresMinus(usize),
65}
66
67#[cfg(unix)]
69fn resolve_parallelism(p: &Parallelism) -> usize {
70 let ncpus = unsafe { libc::sysconf(libc::_SC_NPROCESSORS_ONLN) };
73 let ncpus = if ncpus > 0 { ncpus as usize } else { 1 };
74 let n = match p {
75 Parallelism::MaxCores => ncpus,
76 Parallelism::HalfCores => ncpus / 2,
77 Parallelism::Cores(c) => *c,
78 Parallelism::MaxCoresMinus(minus) => ncpus.saturating_sub(*minus),
79 };
80 n.max(1) }
82
83#[cfg(unix)]
89fn get_or_init_pool(slot_count: usize) -> *mut u8 {
90 let existing = BITMAP_POOL.with(|c| c.get());
91 let existing_slots = BITMAP_POOL_SLOTS.with(|c| c.get());
92
93 if !existing.is_null() && existing_slots >= slot_count {
94 return existing;
95 }
96
97 if !existing.is_null() {
99 unsafe {
101 crate::shared_mem::free_shared(existing, existing_slots * COVERAGE_MAP_SIZE);
102 }
103 BITMAP_POOL.with(|c| c.set(std::ptr::null_mut()));
104 BITMAP_POOL_SLOTS.with(|c| c.set(0));
105 }
106
107 match crate::shared_mem::alloc_shared(slot_count * COVERAGE_MAP_SIZE) {
108 Ok(ptr) => {
109 BITMAP_POOL.with(|c| c.set(ptr));
110 BITMAP_POOL_SLOTS.with(|c| c.set(slot_count));
111 ptr
112 }
113 Err(_) => std::ptr::null_mut(),
114 }
115}
116
117#[cfg(unix)]
119fn pool_slot(pool_base: *mut u8, idx: usize) -> *mut u8 {
120 unsafe { pool_base.add(idx * COVERAGE_MAP_SIZE) }
122}
123
124#[cfg(unix)]
128fn setup_child(
129 child_seed: u64,
130 split_call_count: u64,
131 stats_ptr: *mut crate::shared_stats::SharedStats,
132) {
133 context::rng_reseed(child_seed);
134 context::with_ctx_mut(|ctx| {
135 ctx.is_child = true;
136 ctx.depth += 1;
137 ctx.current_seed = child_seed;
138 ctx.recipe.push((split_call_count, child_seed));
139 });
140 if !stats_ptr.is_null() {
141 unsafe {
143 (*stats_ptr).total_timelines.fetch_add(1, Ordering::Relaxed);
144 }
145 }
146 BITMAP_POOL.with(|c| c.set(std::ptr::null_mut()));
148 BITMAP_POOL_SLOTS.with(|c| c.set(0));
149
150 crate::sancov::reset_bss_counters();
152 crate::sancov::SANCOV_POOL.with(|c| c.set(std::ptr::null_mut()));
154 crate::sancov::SANCOV_POOL_SLOTS.with(|c| c.set(0));
155}
156
157#[cfg(unix)]
162#[allow(clippy::too_many_arguments)]
163fn reap_one(
164 active: &mut HashMap<libc::pid_t, (u64, usize)>,
165 free_slots: &mut Vec<usize>,
166 pool_base: *mut u8,
167 sancov_pool_base: *mut u8,
168 vm_ptr: *mut u8,
169 stats_ptr: *mut crate::shared_stats::SharedStats,
170 split_call_count: u64,
171 batch_has_new: &mut bool,
172) {
173 let mut status: libc::c_int = 0;
174 let finished_pid = unsafe { libc::waitpid(-1, &mut status, 0) };
176 if finished_pid <= 0 {
177 return;
178 }
179
180 let Some((child_seed, slot)) = active.remove(&finished_pid) else {
181 return;
182 };
183
184 if !vm_ptr.is_null() {
186 let child_bm = unsafe { CoverageBitmap::new(pool_slot(pool_base, slot)) };
188 let vm = unsafe { ExploredMap::new(vm_ptr) };
189 if vm.has_new_bits(&child_bm) {
190 *batch_has_new = true;
191 }
192 vm.merge_from(&child_bm);
193 }
194
195 if !sancov_pool_base.is_null() {
197 let sancov_slot = unsafe { crate::sancov::sancov_pool_slot(sancov_pool_base, slot) };
198 if crate::sancov::has_new_sancov_coverage_from(sancov_slot) {
199 *batch_has_new = true;
200 }
201 }
202
203 let exited_normally = libc::WIFEXITED(status);
205 if exited_normally && libc::WEXITSTATUS(status) == 42 {
206 if !stats_ptr.is_null() {
207 unsafe {
209 (*stats_ptr).bug_found.fetch_add(1, Ordering::Relaxed);
210 }
211 }
212 save_bug_recipe(split_call_count, child_seed);
213 }
214
215 if !stats_ptr.is_null() {
216 unsafe {
218 (*stats_ptr).fork_points.fetch_add(1, Ordering::Relaxed);
219 }
220 }
221
222 free_slots.push(slot);
223}
224
225#[derive(Debug, Clone)]
232pub struct AdaptiveConfig {
233 pub batch_size: u32,
235 pub min_timelines: u32,
237 pub max_timelines: u32,
239 pub per_mark_energy: i64,
241 pub warm_min_timelines: Option<u32>,
244}
245
246#[cfg(unix)]
251pub(crate) fn dispatch_split(mark_name: &str, slot_idx: usize) {
252 let has_adaptive = ENERGY_BUDGET_PTR.with(|c| !c.get().is_null());
253 if has_adaptive {
254 adaptive_split_on_discovery(mark_name, slot_idx);
255 } else {
256 split_on_discovery(mark_name);
257 }
258}
259
260#[cfg(not(unix))]
262pub(crate) fn dispatch_split(_mark_name: &str, _slot_idx: usize) {}
263
264#[cfg(unix)]
269fn adaptive_split_on_discovery(mark_name: &str, slot_idx: usize) {
270 let (ctx_active, depth, max_depth, current_seed) =
272 context::with_ctx(|ctx| (ctx.active, ctx.depth, ctx.max_depth, ctx.current_seed));
273
274 if !ctx_active || depth >= max_depth {
275 return;
276 }
277
278 let budget_ptr = ENERGY_BUDGET_PTR.with(|c| c.get());
279 if budget_ptr.is_null() {
280 return;
281 }
282
283 unsafe {
286 crate::energy::init_mark_budget(budget_ptr, slot_idx);
287 }
288
289 let split_call_count = context::rng_get_count();
290
291 let bm_ptr = COVERAGE_BITMAP_PTR.with(|c| c.get());
292 let vm_ptr = EXPLORED_MAP_PTR.with(|c| c.get());
293 let stats_ptr = SHARED_STATS.with(|c| c.get());
294
295 let (batch_size, min_timelines, max_timelines) = context::with_ctx(|ctx| {
296 ctx.adaptive
297 .as_ref()
298 .map(|a| (a.batch_size, a.min_timelines, a.max_timelines))
299 .unwrap_or((4, 1, 16))
300 });
301
302 let effective_min_timelines = {
306 let (is_warm, warm_min) = context::with_ctx(|ctx| {
307 let wm = ctx
308 .adaptive
309 .as_ref()
310 .and_then(|a| a.warm_min_timelines)
311 .unwrap_or(batch_size);
312 (ctx.warm_start, wm)
313 });
314 if is_warm { warm_min } else { min_timelines }
315 };
316
317 let parallelism = context::with_ctx(|ctx| ctx.parallelism.clone());
319 let (slot_count, pool_base) = if let Some(ref p) = parallelism {
320 let sc = resolve_parallelism(p);
321 let pb = get_or_init_pool(sc);
322 if pb.is_null() {
323 (0, std::ptr::null_mut())
324 } else {
325 (sc, pb)
326 }
327 } else {
328 (0, std::ptr::null_mut())
329 };
330 let parallel = slot_count > 0;
331
332 let sancov_pool_base = if parallel {
334 crate::sancov::get_or_init_sancov_pool(slot_count)
335 } else {
336 std::ptr::null_mut()
337 };
338 let parent_sancov_transfer = if parallel && !sancov_pool_base.is_null() {
339 crate::sancov::SANCOV_TRANSFER.with(|c| c.get())
340 } else {
341 std::ptr::null_mut()
342 };
343
344 let mut parent_bitmap_backup = [0u8; COVERAGE_MAP_SIZE];
346 if !parallel && !bm_ptr.is_null() {
347 unsafe {
349 std::ptr::copy_nonoverlapping(
350 bm_ptr,
351 parent_bitmap_backup.as_mut_ptr(),
352 COVERAGE_MAP_SIZE,
353 );
354 }
355 }
356
357 let mut timelines_spawned: u32 = 0;
358
359 let mut active: HashMap<libc::pid_t, (u64, usize)> = HashMap::new();
361 let mut free_slots: Vec<usize> = if parallel {
362 (0..slot_count).collect()
363 } else {
364 Vec::new()
365 };
366
367 loop {
369 let mut batch_has_new = false;
370 let batch_start = timelines_spawned;
371
372 while timelines_spawned - batch_start < batch_size {
373 if timelines_spawned >= max_timelines {
374 break;
375 }
376
377 if !unsafe { crate::energy::decrement_mark_energy(budget_ptr, slot_idx) } {
379 break;
380 }
381
382 let child_seed = compute_child_seed(current_seed, mark_name, timelines_spawned);
383 timelines_spawned += 1;
384
385 if parallel {
386 while free_slots.is_empty() {
388 reap_one(
389 &mut active,
390 &mut free_slots,
391 pool_base,
392 sancov_pool_base,
393 vm_ptr,
394 stats_ptr,
395 split_call_count,
396 &mut batch_has_new,
397 );
398 }
399 let slot = match free_slots.pop() {
400 Some(s) => s,
401 None => break,
402 };
403
404 unsafe {
407 std::ptr::write_bytes(pool_slot(pool_base, slot), 0, COVERAGE_MAP_SIZE);
408 COVERAGE_BITMAP_PTR.with(|c| c.set(pool_slot(pool_base, slot)));
409 }
410
411 if !sancov_pool_base.is_null() {
413 let sancov_len = crate::sancov::sancov_edge_count();
414 unsafe {
415 let sancov_slot = crate::sancov::sancov_pool_slot(sancov_pool_base, slot);
416 std::ptr::write_bytes(sancov_slot, 0, sancov_len);
417 crate::sancov::SANCOV_TRANSFER.with(|c| c.set(sancov_slot));
418 }
419 }
420
421 let pid = unsafe { libc::fork() };
423 match pid {
424 -1 => {
425 free_slots.push(slot);
426 break;
427 }
428 0 => {
429 setup_child(child_seed, split_call_count, stats_ptr);
430 return;
431 }
432 child_pid => {
433 active.insert(child_pid, (child_seed, slot));
434 }
435 }
436 } else {
437 if !bm_ptr.is_null() {
439 let bm = unsafe { CoverageBitmap::new(bm_ptr) };
440 bm.clear();
441 }
442 crate::sancov::clear_transfer_buffer();
443
444 let pid = unsafe { libc::fork() };
446 match pid {
447 -1 => break,
448 0 => {
449 setup_child(child_seed, split_call_count, stats_ptr);
450 return;
451 }
452 child_pid => {
453 let mut status: libc::c_int = 0;
454 unsafe {
456 libc::waitpid(child_pid, &mut status, 0);
457 }
458
459 if !bm_ptr.is_null() && !vm_ptr.is_null() {
460 let bm = unsafe { CoverageBitmap::new(bm_ptr) };
461 let vm = unsafe { ExploredMap::new(vm_ptr) };
462 if vm.has_new_bits(&bm) {
463 batch_has_new = true;
464 }
465 vm.merge_from(&bm);
466 }
467 batch_has_new |= crate::sancov::has_new_sancov_coverage();
468
469 let exited_normally = libc::WIFEXITED(status);
470 if exited_normally && libc::WEXITSTATUS(status) == 42 {
471 if !stats_ptr.is_null() {
472 unsafe {
474 (*stats_ptr).bug_found.fetch_add(1, Ordering::Relaxed);
475 }
476 }
477 save_bug_recipe(split_call_count, child_seed);
478 }
479
480 if !stats_ptr.is_null() {
481 unsafe {
483 (*stats_ptr).fork_points.fetch_add(1, Ordering::Relaxed);
484 }
485 }
486 }
487 }
488 }
489 }
490
491 while !active.is_empty() {
493 reap_one(
494 &mut active,
495 &mut free_slots,
496 pool_base,
497 sancov_pool_base,
498 vm_ptr,
499 stats_ptr,
500 split_call_count,
501 &mut batch_has_new,
502 );
503 }
504
505 if timelines_spawned >= max_timelines {
507 break;
508 }
509 if !batch_has_new && timelines_spawned >= effective_min_timelines {
510 unsafe {
513 crate::energy::return_mark_energy_to_pool(budget_ptr, slot_idx);
514 }
515 break;
516 }
517 if timelines_spawned - batch_start < batch_size && timelines_spawned < max_timelines {
519 break;
520 }
521 }
522
523 if parallel {
524 COVERAGE_BITMAP_PTR.with(|c| c.set(bm_ptr));
526 if !sancov_pool_base.is_null() {
528 crate::sancov::SANCOV_TRANSFER.with(|c| c.set(parent_sancov_transfer));
529 }
530 } else {
531 if !bm_ptr.is_null() {
533 unsafe {
535 std::ptr::copy_nonoverlapping(
536 parent_bitmap_backup.as_ptr(),
537 bm_ptr,
538 COVERAGE_MAP_SIZE,
539 );
540 }
541 }
542 }
543}
544
545#[cfg(unix)]
554pub fn split_on_discovery(mark_name: &str) {
555 let (ctx_active, depth, max_depth, timelines_per_split, current_seed) =
556 context::with_ctx(|ctx| {
557 (
558 ctx.active,
559 ctx.depth,
560 ctx.max_depth,
561 ctx.timelines_per_split,
562 ctx.current_seed,
563 )
564 });
565
566 if !ctx_active || depth >= max_depth {
567 return;
568 }
569
570 let stats_ptr = SHARED_STATS.with(|c| c.get());
571 if stats_ptr.is_null() {
572 return;
573 }
574 if !unsafe { crate::shared_stats::decrement_energy(stats_ptr) } {
576 return;
577 }
578
579 let split_call_count = context::rng_get_count();
580 let bm_ptr = COVERAGE_BITMAP_PTR.with(|c| c.get());
581 let vm_ptr = EXPLORED_MAP_PTR.with(|c| c.get());
582
583 let parallelism = context::with_ctx(|ctx| ctx.parallelism.clone());
585 let (slot_count, pool_base) = if let Some(ref p) = parallelism {
586 let sc = resolve_parallelism(p);
587 let pb = get_or_init_pool(sc);
588 if pb.is_null() {
589 (0, std::ptr::null_mut())
590 } else {
591 (sc, pb)
592 }
593 } else {
594 (0, std::ptr::null_mut())
595 };
596 let parallel = slot_count > 0;
597
598 let sancov_pool_base = if parallel {
600 crate::sancov::get_or_init_sancov_pool(slot_count)
601 } else {
602 std::ptr::null_mut()
603 };
604 let parent_sancov_transfer = if parallel && !sancov_pool_base.is_null() {
605 crate::sancov::SANCOV_TRANSFER.with(|c| c.get())
606 } else {
607 std::ptr::null_mut()
608 };
609
610 let mut parent_bitmap_backup = [0u8; COVERAGE_MAP_SIZE];
612 if !parallel && !bm_ptr.is_null() {
613 unsafe {
615 std::ptr::copy_nonoverlapping(
616 bm_ptr,
617 parent_bitmap_backup.as_mut_ptr(),
618 COVERAGE_MAP_SIZE,
619 );
620 }
621 }
622
623 let mut active: HashMap<libc::pid_t, (u64, usize)> = HashMap::new();
625 let mut free_slots: Vec<usize> = if parallel {
626 (0..slot_count).collect()
627 } else {
628 Vec::new()
629 };
630 let mut batch_has_new = false;
631
632 for child_idx in 0..timelines_per_split {
633 if child_idx > 0 {
634 if !unsafe { crate::shared_stats::decrement_energy(stats_ptr) } {
636 break;
637 }
638 }
639
640 let child_seed = compute_child_seed(current_seed, mark_name, child_idx);
641
642 if parallel {
643 while free_slots.is_empty() {
645 reap_one(
646 &mut active,
647 &mut free_slots,
648 pool_base,
649 sancov_pool_base,
650 vm_ptr,
651 stats_ptr,
652 split_call_count,
653 &mut batch_has_new,
654 );
655 }
656 let slot = match free_slots.pop() {
657 Some(s) => s,
658 None => break,
659 };
660
661 unsafe {
663 std::ptr::write_bytes(pool_slot(pool_base, slot), 0, COVERAGE_MAP_SIZE);
664 COVERAGE_BITMAP_PTR.with(|c| c.set(pool_slot(pool_base, slot)));
665 }
666
667 if !sancov_pool_base.is_null() {
669 let sancov_len = crate::sancov::sancov_edge_count();
670 unsafe {
671 let sancov_slot = crate::sancov::sancov_pool_slot(sancov_pool_base, slot);
672 std::ptr::write_bytes(sancov_slot, 0, sancov_len);
673 crate::sancov::SANCOV_TRANSFER.with(|c| c.set(sancov_slot));
674 }
675 }
676
677 let pid = unsafe { libc::fork() };
679 match pid {
680 -1 => {
681 free_slots.push(slot);
682 break;
683 }
684 0 => {
685 setup_child(child_seed, split_call_count, stats_ptr);
686 return;
687 }
688 child_pid => {
689 active.insert(child_pid, (child_seed, slot));
690 }
691 }
692 } else {
693 if !bm_ptr.is_null() {
695 let bm = unsafe { CoverageBitmap::new(bm_ptr) };
696 bm.clear();
697 }
698 crate::sancov::clear_transfer_buffer();
699
700 let pid = unsafe { libc::fork() };
702 match pid {
703 -1 => break,
704 0 => {
705 setup_child(child_seed, split_call_count, stats_ptr);
706 return;
707 }
708 child_pid => {
709 let mut status: libc::c_int = 0;
710 unsafe {
712 libc::waitpid(child_pid, &mut status, 0);
713 }
714
715 if !bm_ptr.is_null() && !vm_ptr.is_null() {
716 let bm = unsafe { CoverageBitmap::new(bm_ptr) };
717 let vm = unsafe { ExploredMap::new(vm_ptr) };
718 vm.merge_from(&bm);
719 }
720 batch_has_new |= crate::sancov::has_new_sancov_coverage();
721
722 let exited_normally = libc::WIFEXITED(status);
723 if exited_normally && libc::WEXITSTATUS(status) == 42 {
724 unsafe {
726 (*stats_ptr).bug_found.fetch_add(1, Ordering::Relaxed);
727 }
728 save_bug_recipe(split_call_count, child_seed);
729 }
730
731 unsafe {
733 (*stats_ptr).fork_points.fetch_add(1, Ordering::Relaxed);
734 }
735 }
736 }
737 }
738 }
739
740 while !active.is_empty() {
742 reap_one(
743 &mut active,
744 &mut free_slots,
745 pool_base,
746 sancov_pool_base,
747 vm_ptr,
748 stats_ptr,
749 split_call_count,
750 &mut batch_has_new,
751 );
752 }
753
754 if parallel {
755 COVERAGE_BITMAP_PTR.with(|c| c.set(bm_ptr));
756 if !sancov_pool_base.is_null() {
758 crate::sancov::SANCOV_TRANSFER.with(|c| c.set(parent_sancov_transfer));
759 }
760 } else if !bm_ptr.is_null() {
761 unsafe {
763 std::ptr::copy_nonoverlapping(parent_bitmap_backup.as_ptr(), bm_ptr, COVERAGE_MAP_SIZE);
764 }
765 }
766}
767
768#[cfg(not(unix))]
770pub fn split_on_discovery(_mark_name: &str) {}
771
772fn save_bug_recipe(split_call_count: u64, child_seed: u64) {
774 let recipe_ptr = SHARED_RECIPE.with(|c| c.get());
775 if recipe_ptr.is_null() {
776 return;
777 }
778
779 unsafe {
781 let recipe = &mut *recipe_ptr;
782
783 if recipe
785 .claimed
786 .compare_exchange(0, 1, Ordering::Relaxed, Ordering::Relaxed)
787 .is_ok()
788 {
789 context::with_ctx(|ctx| {
791 let total_entries = ctx.recipe.len() + 1;
792 let len = total_entries.min(MAX_RECIPE_ENTRIES);
793
794 for (i, &entry) in ctx.recipe.iter().take(len - 1).enumerate() {
796 recipe.entries[i] = entry;
797 }
798 if len > 0 {
800 recipe.entries[len - 1] = (split_call_count, child_seed);
801 }
802 recipe.len = len as u32;
803 });
804 }
805 }
806}
807
808#[cfg(unix)]
818pub fn exit_child(code: i32) -> ! {
819 crate::sancov::copy_counters_to_shared();
820 unsafe { libc::_exit(code) }
822}
823
824#[cfg(not(unix))]
826pub fn exit_child(code: i32) -> ! {
827 std::process::exit(code)
828}
829
830#[cfg(test)]
831mod tests {
832 use super::*;
833
834 #[test]
835 fn test_compute_child_seed_deterministic() {
836 let s1 = compute_child_seed(42, "test", 0);
837 let s2 = compute_child_seed(42, "test", 0);
838 assert_eq!(s1, s2);
839 }
840
841 #[test]
842 fn test_compute_child_seed_varies_by_index() {
843 let s0 = compute_child_seed(42, "test", 0);
844 let s1 = compute_child_seed(42, "test", 1);
845 let s2 = compute_child_seed(42, "test", 2);
846 assert_ne!(s0, s1);
847 assert_ne!(s1, s2);
848 assert_ne!(s0, s2);
849 }
850
851 #[test]
852 fn test_compute_child_seed_varies_by_name() {
853 let s1 = compute_child_seed(42, "alpha", 0);
854 let s2 = compute_child_seed(42, "beta", 0);
855 assert_ne!(s1, s2);
856 }
857
858 #[test]
859 fn test_compute_child_seed_varies_by_parent() {
860 let s1 = compute_child_seed(1, "test", 0);
861 let s2 = compute_child_seed(2, "test", 0);
862 assert_ne!(s1, s2);
863 }
864}