1use std::sync::atomic::Ordering;
22
23#[cfg(unix)]
24use std::collections::HashMap;
25
26use crate::context::{
27 self, COVERAGE_BITMAP_PTR, ENERGY_BUDGET_PTR, EXPLORED_MAP_PTR, SHARED_RECIPE, SHARED_STATS,
28};
29#[cfg(unix)]
30use crate::context::{BITMAP_POOL, BITMAP_POOL_SLOTS};
31use crate::coverage::{COVERAGE_MAP_SIZE, CoverageBitmap, ExploredMap};
32use crate::shared_stats::MAX_RECIPE_ENTRIES;
33
34fn compute_child_seed(parent_seed: u64, mark_name: &str, child_idx: u32) -> u64 {
38 let mut hash: u64 = 0xcbf29ce484222325;
39 for &byte in mark_name.as_bytes() {
40 hash ^= byte as u64;
41 hash = hash.wrapping_mul(0x100000001b3);
42 }
43 hash ^= parent_seed;
44 hash = hash.wrapping_mul(0x100000001b3);
45 hash ^= child_idx as u64;
46 hash = hash.wrapping_mul(0x100000001b3);
47 hash
48}
49
50#[derive(Debug, Clone)]
56pub enum Parallelism {
57 MaxCores,
59 HalfCores,
61 Cores(usize),
63 MaxCoresMinus(usize),
65}
66
67#[cfg(unix)]
69fn resolve_parallelism(p: &Parallelism) -> usize {
70 let ncpus = unsafe { libc::sysconf(libc::_SC_NPROCESSORS_ONLN) };
71 let ncpus = if ncpus > 0 { ncpus as usize } else { 1 };
72 let n = match p {
73 Parallelism::MaxCores => ncpus,
74 Parallelism::HalfCores => ncpus / 2,
75 Parallelism::Cores(c) => *c,
76 Parallelism::MaxCoresMinus(minus) => ncpus.saturating_sub(*minus),
77 };
78 n.max(1) }
80
81#[cfg(unix)]
87fn get_or_init_pool(slot_count: usize) -> *mut u8 {
88 let existing = BITMAP_POOL.with(|c| c.get());
89 let existing_slots = BITMAP_POOL_SLOTS.with(|c| c.get());
90
91 if !existing.is_null() && existing_slots >= slot_count {
92 return existing;
93 }
94
95 if !existing.is_null() {
97 unsafe {
99 crate::shared_mem::free_shared(existing, existing_slots * COVERAGE_MAP_SIZE);
100 }
101 BITMAP_POOL.with(|c| c.set(std::ptr::null_mut()));
102 BITMAP_POOL_SLOTS.with(|c| c.set(0));
103 }
104
105 match crate::shared_mem::alloc_shared(slot_count * COVERAGE_MAP_SIZE) {
106 Ok(ptr) => {
107 BITMAP_POOL.with(|c| c.set(ptr));
108 BITMAP_POOL_SLOTS.with(|c| c.set(slot_count));
109 ptr
110 }
111 Err(_) => std::ptr::null_mut(),
112 }
113}
114
115#[cfg(unix)]
117fn pool_slot(pool_base: *mut u8, idx: usize) -> *mut u8 {
118 unsafe { pool_base.add(idx * COVERAGE_MAP_SIZE) }
120}
121
122#[cfg(unix)]
126fn setup_child(
127 child_seed: u64,
128 split_call_count: u64,
129 stats_ptr: *mut crate::shared_stats::SharedStats,
130) {
131 context::rng_reseed(child_seed);
132 context::with_ctx_mut(|ctx| {
133 ctx.is_child = true;
134 ctx.depth += 1;
135 ctx.current_seed = child_seed;
136 ctx.recipe.push((split_call_count, child_seed));
137 });
138 if !stats_ptr.is_null() {
139 unsafe {
141 (*stats_ptr).total_timelines.fetch_add(1, Ordering::Relaxed);
142 }
143 }
144 BITMAP_POOL.with(|c| c.set(std::ptr::null_mut()));
146 BITMAP_POOL_SLOTS.with(|c| c.set(0));
147}
148
149#[cfg(unix)]
154fn reap_one(
155 active: &mut HashMap<libc::pid_t, (u64, usize)>,
156 free_slots: &mut Vec<usize>,
157 pool_base: *mut u8,
158 vm_ptr: *mut u8,
159 stats_ptr: *mut crate::shared_stats::SharedStats,
160 split_call_count: u64,
161 batch_has_new: &mut bool,
162) {
163 let mut status: libc::c_int = 0;
164 let finished_pid = unsafe { libc::waitpid(-1, &mut status, 0) };
166 if finished_pid <= 0 {
167 return;
168 }
169
170 let Some((child_seed, slot)) = active.remove(&finished_pid) else {
171 return;
172 };
173
174 if !vm_ptr.is_null() {
176 let child_bm = unsafe { CoverageBitmap::new(pool_slot(pool_base, slot)) };
178 let vm = unsafe { ExploredMap::new(vm_ptr) };
179 if vm.has_new_bits(&child_bm) {
180 *batch_has_new = true;
181 }
182 vm.merge_from(&child_bm);
183 }
184
185 let exited_normally = libc::WIFEXITED(status);
187 if exited_normally && libc::WEXITSTATUS(status) == 42 {
188 if !stats_ptr.is_null() {
189 unsafe {
191 (*stats_ptr).bug_found.fetch_add(1, Ordering::Relaxed);
192 }
193 }
194 save_bug_recipe(split_call_count, child_seed);
195 }
196
197 if !stats_ptr.is_null() {
198 unsafe {
200 (*stats_ptr).fork_points.fetch_add(1, Ordering::Relaxed);
201 }
202 }
203
204 free_slots.push(slot);
205}
206
207#[derive(Debug, Clone)]
214pub struct AdaptiveConfig {
215 pub batch_size: u32,
217 pub min_timelines: u32,
219 pub max_timelines: u32,
221 pub per_mark_energy: i64,
223 pub warm_min_timelines: Option<u32>,
226}
227
228#[cfg(unix)]
233pub(crate) fn dispatch_split(mark_name: &str, slot_idx: usize) {
234 let has_adaptive = ENERGY_BUDGET_PTR.with(|c| !c.get().is_null());
235 if has_adaptive {
236 adaptive_split_on_discovery(mark_name, slot_idx);
237 } else {
238 split_on_discovery(mark_name);
239 }
240}
241
242#[cfg(not(unix))]
244pub(crate) fn dispatch_split(_mark_name: &str, _slot_idx: usize) {}
245
246#[cfg(unix)]
251fn adaptive_split_on_discovery(mark_name: &str, slot_idx: usize) {
252 let (ctx_active, depth, max_depth, current_seed) =
254 context::with_ctx(|ctx| (ctx.active, ctx.depth, ctx.max_depth, ctx.current_seed));
255
256 if !ctx_active || depth >= max_depth {
257 return;
258 }
259
260 let budget_ptr = ENERGY_BUDGET_PTR.with(|c| c.get());
261 if budget_ptr.is_null() {
262 return;
263 }
264
265 unsafe {
268 crate::energy::init_mark_budget(budget_ptr, slot_idx);
269 }
270
271 let split_call_count = context::rng_get_count();
272
273 let bm_ptr = COVERAGE_BITMAP_PTR.with(|c| c.get());
274 let vm_ptr = EXPLORED_MAP_PTR.with(|c| c.get());
275 let stats_ptr = SHARED_STATS.with(|c| c.get());
276
277 let (batch_size, min_timelines, max_timelines) = context::with_ctx(|ctx| {
278 ctx.adaptive
279 .as_ref()
280 .map(|a| (a.batch_size, a.min_timelines, a.max_timelines))
281 .unwrap_or((4, 1, 16))
282 });
283
284 let effective_min_timelines = {
288 let (is_warm, warm_min) = context::with_ctx(|ctx| {
289 let wm = ctx
290 .adaptive
291 .as_ref()
292 .and_then(|a| a.warm_min_timelines)
293 .unwrap_or(batch_size);
294 (ctx.warm_start, wm)
295 });
296 if is_warm { warm_min } else { min_timelines }
297 };
298
299 let parallelism = context::with_ctx(|ctx| ctx.parallelism.clone());
301 let (slot_count, pool_base) = if let Some(ref p) = parallelism {
302 let sc = resolve_parallelism(p);
303 let pb = get_or_init_pool(sc);
304 if pb.is_null() {
305 (0, std::ptr::null_mut())
306 } else {
307 (sc, pb)
308 }
309 } else {
310 (0, std::ptr::null_mut())
311 };
312 let parallel = slot_count > 0;
313
314 let mut parent_bitmap_backup = [0u8; COVERAGE_MAP_SIZE];
316 if !parallel && !bm_ptr.is_null() {
317 unsafe {
319 std::ptr::copy_nonoverlapping(
320 bm_ptr,
321 parent_bitmap_backup.as_mut_ptr(),
322 COVERAGE_MAP_SIZE,
323 );
324 }
325 }
326
327 let mut timelines_spawned: u32 = 0;
328
329 let mut active: HashMap<libc::pid_t, (u64, usize)> = HashMap::new();
331 let mut free_slots: Vec<usize> = if parallel {
332 (0..slot_count).collect()
333 } else {
334 Vec::new()
335 };
336
337 loop {
339 let mut batch_has_new = false;
340 let batch_start = timelines_spawned;
341
342 while timelines_spawned - batch_start < batch_size {
343 if timelines_spawned >= max_timelines {
344 break;
345 }
346
347 if !unsafe { crate::energy::decrement_mark_energy(budget_ptr, slot_idx) } {
349 break;
350 }
351
352 let child_seed = compute_child_seed(current_seed, mark_name, timelines_spawned);
353 timelines_spawned += 1;
354
355 if parallel {
356 while free_slots.is_empty() {
358 reap_one(
359 &mut active,
360 &mut free_slots,
361 pool_base,
362 vm_ptr,
363 stats_ptr,
364 split_call_count,
365 &mut batch_has_new,
366 );
367 }
368 let slot = match free_slots.pop() {
369 Some(s) => s,
370 None => break,
371 };
372
373 unsafe {
376 std::ptr::write_bytes(pool_slot(pool_base, slot), 0, COVERAGE_MAP_SIZE);
377 COVERAGE_BITMAP_PTR.with(|c| c.set(pool_slot(pool_base, slot)));
378 }
379
380 let pid = unsafe { libc::fork() };
382 match pid {
383 -1 => {
384 free_slots.push(slot);
385 break;
386 }
387 0 => {
388 setup_child(child_seed, split_call_count, stats_ptr);
389 return;
390 }
391 child_pid => {
392 active.insert(child_pid, (child_seed, slot));
393 }
394 }
395 } else {
396 if !bm_ptr.is_null() {
398 let bm = unsafe { CoverageBitmap::new(bm_ptr) };
399 bm.clear();
400 }
401
402 let pid = unsafe { libc::fork() };
404 match pid {
405 -1 => break,
406 0 => {
407 setup_child(child_seed, split_call_count, stats_ptr);
408 return;
409 }
410 child_pid => {
411 let mut status: libc::c_int = 0;
412 unsafe {
414 libc::waitpid(child_pid, &mut status, 0);
415 }
416
417 if !bm_ptr.is_null() && !vm_ptr.is_null() {
418 let bm = unsafe { CoverageBitmap::new(bm_ptr) };
419 let vm = unsafe { ExploredMap::new(vm_ptr) };
420 if vm.has_new_bits(&bm) {
421 batch_has_new = true;
422 }
423 vm.merge_from(&bm);
424 }
425
426 let exited_normally = libc::WIFEXITED(status);
427 if exited_normally && libc::WEXITSTATUS(status) == 42 {
428 if !stats_ptr.is_null() {
429 unsafe {
431 (*stats_ptr).bug_found.fetch_add(1, Ordering::Relaxed);
432 }
433 }
434 save_bug_recipe(split_call_count, child_seed);
435 }
436
437 if !stats_ptr.is_null() {
438 unsafe {
440 (*stats_ptr).fork_points.fetch_add(1, Ordering::Relaxed);
441 }
442 }
443 }
444 }
445 }
446 }
447
448 while !active.is_empty() {
450 reap_one(
451 &mut active,
452 &mut free_slots,
453 pool_base,
454 vm_ptr,
455 stats_ptr,
456 split_call_count,
457 &mut batch_has_new,
458 );
459 }
460
461 if timelines_spawned >= max_timelines {
463 break;
464 }
465 if !batch_has_new && timelines_spawned >= effective_min_timelines {
466 unsafe {
469 crate::energy::return_mark_energy_to_pool(budget_ptr, slot_idx);
470 }
471 break;
472 }
473 if timelines_spawned - batch_start < batch_size && timelines_spawned < max_timelines {
475 break;
476 }
477 }
478
479 if parallel {
480 COVERAGE_BITMAP_PTR.with(|c| c.set(bm_ptr));
482 } else {
483 if !bm_ptr.is_null() {
485 unsafe {
487 std::ptr::copy_nonoverlapping(
488 parent_bitmap_backup.as_ptr(),
489 bm_ptr,
490 COVERAGE_MAP_SIZE,
491 );
492 }
493 }
494 }
495}
496
497#[cfg(unix)]
506pub fn split_on_discovery(mark_name: &str) {
507 let (ctx_active, depth, max_depth, timelines_per_split, current_seed) =
508 context::with_ctx(|ctx| {
509 (
510 ctx.active,
511 ctx.depth,
512 ctx.max_depth,
513 ctx.timelines_per_split,
514 ctx.current_seed,
515 )
516 });
517
518 if !ctx_active || depth >= max_depth {
519 return;
520 }
521
522 let stats_ptr = SHARED_STATS.with(|c| c.get());
523 if stats_ptr.is_null() {
524 return;
525 }
526 if !unsafe { crate::shared_stats::decrement_energy(stats_ptr) } {
528 return;
529 }
530
531 let split_call_count = context::rng_get_count();
532 let bm_ptr = COVERAGE_BITMAP_PTR.with(|c| c.get());
533 let vm_ptr = EXPLORED_MAP_PTR.with(|c| c.get());
534
535 let parallelism = context::with_ctx(|ctx| ctx.parallelism.clone());
537 let (slot_count, pool_base) = if let Some(ref p) = parallelism {
538 let sc = resolve_parallelism(p);
539 let pb = get_or_init_pool(sc);
540 if pb.is_null() {
541 (0, std::ptr::null_mut())
542 } else {
543 (sc, pb)
544 }
545 } else {
546 (0, std::ptr::null_mut())
547 };
548 let parallel = slot_count > 0;
549
550 let mut parent_bitmap_backup = [0u8; COVERAGE_MAP_SIZE];
552 if !parallel && !bm_ptr.is_null() {
553 unsafe {
555 std::ptr::copy_nonoverlapping(
556 bm_ptr,
557 parent_bitmap_backup.as_mut_ptr(),
558 COVERAGE_MAP_SIZE,
559 );
560 }
561 }
562
563 let mut active: HashMap<libc::pid_t, (u64, usize)> = HashMap::new();
565 let mut free_slots: Vec<usize> = if parallel {
566 (0..slot_count).collect()
567 } else {
568 Vec::new()
569 };
570 let mut batch_has_new = false;
571
572 for child_idx in 0..timelines_per_split {
573 if child_idx > 0 {
574 if !unsafe { crate::shared_stats::decrement_energy(stats_ptr) } {
576 break;
577 }
578 }
579
580 let child_seed = compute_child_seed(current_seed, mark_name, child_idx);
581
582 if parallel {
583 while free_slots.is_empty() {
585 reap_one(
586 &mut active,
587 &mut free_slots,
588 pool_base,
589 vm_ptr,
590 stats_ptr,
591 split_call_count,
592 &mut batch_has_new,
593 );
594 }
595 let slot = match free_slots.pop() {
596 Some(s) => s,
597 None => break,
598 };
599
600 unsafe {
602 std::ptr::write_bytes(pool_slot(pool_base, slot), 0, COVERAGE_MAP_SIZE);
603 COVERAGE_BITMAP_PTR.with(|c| c.set(pool_slot(pool_base, slot)));
604 }
605
606 let pid = unsafe { libc::fork() };
608 match pid {
609 -1 => {
610 free_slots.push(slot);
611 break;
612 }
613 0 => {
614 setup_child(child_seed, split_call_count, stats_ptr);
615 return;
616 }
617 child_pid => {
618 active.insert(child_pid, (child_seed, slot));
619 }
620 }
621 } else {
622 if !bm_ptr.is_null() {
624 let bm = unsafe { CoverageBitmap::new(bm_ptr) };
625 bm.clear();
626 }
627
628 let pid = unsafe { libc::fork() };
630 match pid {
631 -1 => break,
632 0 => {
633 setup_child(child_seed, split_call_count, stats_ptr);
634 return;
635 }
636 child_pid => {
637 let mut status: libc::c_int = 0;
638 unsafe {
640 libc::waitpid(child_pid, &mut status, 0);
641 }
642
643 if !bm_ptr.is_null() && !vm_ptr.is_null() {
644 let bm = unsafe { CoverageBitmap::new(bm_ptr) };
645 let vm = unsafe { ExploredMap::new(vm_ptr) };
646 vm.merge_from(&bm);
647 }
648
649 let exited_normally = libc::WIFEXITED(status);
650 if exited_normally && libc::WEXITSTATUS(status) == 42 {
651 unsafe {
653 (*stats_ptr).bug_found.fetch_add(1, Ordering::Relaxed);
654 }
655 save_bug_recipe(split_call_count, child_seed);
656 }
657
658 unsafe {
660 (*stats_ptr).fork_points.fetch_add(1, Ordering::Relaxed);
661 }
662 }
663 }
664 }
665 }
666
667 while !active.is_empty() {
669 reap_one(
670 &mut active,
671 &mut free_slots,
672 pool_base,
673 vm_ptr,
674 stats_ptr,
675 split_call_count,
676 &mut batch_has_new,
677 );
678 }
679
680 if parallel {
681 COVERAGE_BITMAP_PTR.with(|c| c.set(bm_ptr));
682 } else if !bm_ptr.is_null() {
683 unsafe {
685 std::ptr::copy_nonoverlapping(parent_bitmap_backup.as_ptr(), bm_ptr, COVERAGE_MAP_SIZE);
686 }
687 }
688}
689
690#[cfg(not(unix))]
692pub fn split_on_discovery(_mark_name: &str) {}
693
694fn save_bug_recipe(split_call_count: u64, child_seed: u64) {
696 let recipe_ptr = SHARED_RECIPE.with(|c| c.get());
697 if recipe_ptr.is_null() {
698 return;
699 }
700
701 unsafe {
703 let recipe = &mut *recipe_ptr;
704
705 if recipe
707 .claimed
708 .compare_exchange(0, 1, Ordering::Relaxed, Ordering::Relaxed)
709 .is_ok()
710 {
711 context::with_ctx(|ctx| {
713 let total_entries = ctx.recipe.len() + 1;
714 let len = total_entries.min(MAX_RECIPE_ENTRIES);
715
716 for (i, &entry) in ctx.recipe.iter().take(len - 1).enumerate() {
718 recipe.entries[i] = entry;
719 }
720 if len > 0 {
722 recipe.entries[len - 1] = (split_call_count, child_seed);
723 }
724 recipe.len = len as u32;
725 });
726 }
727 }
728}
729
730#[cfg(unix)]
740pub fn exit_child(code: i32) -> ! {
741 unsafe { libc::_exit(code) }
743}
744
745#[cfg(not(unix))]
747pub fn exit_child(code: i32) -> ! {
748 std::process::exit(code)
749}
750
751#[cfg(test)]
752mod tests {
753 use super::*;
754
755 #[test]
756 fn test_compute_child_seed_deterministic() {
757 let s1 = compute_child_seed(42, "test", 0);
758 let s2 = compute_child_seed(42, "test", 0);
759 assert_eq!(s1, s2);
760 }
761
762 #[test]
763 fn test_compute_child_seed_varies_by_index() {
764 let s0 = compute_child_seed(42, "test", 0);
765 let s1 = compute_child_seed(42, "test", 1);
766 let s2 = compute_child_seed(42, "test", 2);
767 assert_ne!(s0, s1);
768 assert_ne!(s1, s2);
769 assert_ne!(s0, s2);
770 }
771
772 #[test]
773 fn test_compute_child_seed_varies_by_name() {
774 let s1 = compute_child_seed(42, "alpha", 0);
775 let s2 = compute_child_seed(42, "beta", 0);
776 assert_ne!(s1, s2);
777 }
778
779 #[test]
780 fn test_compute_child_seed_varies_by_parent() {
781 let s1 = compute_child_seed(1, "test", 0);
782 let s2 = compute_child_seed(2, "test", 0);
783 assert_ne!(s1, s2);
784 }
785}