1use std::sync::atomic::{AtomicI64, AtomicU8, AtomicU32, Ordering};
14
15pub const MAX_ASSERTION_SLOTS: usize = 128;
17
18const SLOT_MSG_LEN: usize = 64;
20
21pub const ASSERTION_TABLE_MEM_SIZE: usize =
25 8 + MAX_ASSERTION_SLOTS * std::mem::size_of::<AssertionSlot>();
26
27#[repr(u8)]
29#[derive(Debug, Clone, Copy, PartialEq, Eq)]
30pub enum AssertKind {
31 Always = 0,
33 AlwaysOrUnreachable = 1,
35 Sometimes = 2,
37 Reachable = 3,
39 Unreachable = 4,
41 NumericAlways = 5,
43 NumericSometimes = 6,
45 BooleanSometimesAll = 7,
47}
48
49impl AssertKind {
50 pub fn from_u8(v: u8) -> Option<Self> {
52 match v {
53 0 => Some(Self::Always),
54 1 => Some(Self::AlwaysOrUnreachable),
55 2 => Some(Self::Sometimes),
56 3 => Some(Self::Reachable),
57 4 => Some(Self::Unreachable),
58 5 => Some(Self::NumericAlways),
59 6 => Some(Self::NumericSometimes),
60 7 => Some(Self::BooleanSometimesAll),
61 _ => None,
62 }
63 }
64}
65
66#[repr(u8)]
68#[derive(Debug, Clone, Copy, PartialEq, Eq)]
69pub enum AssertCmp {
70 Gt = 0,
72 Ge = 1,
74 Lt = 2,
76 Le = 3,
78}
79
80#[repr(C)]
84pub struct AssertionSlot {
85 pub msg_hash: u32,
87 pub kind: u8,
89 pub must_hit: u8,
91 pub maximize: u8,
93 pub split_triggered: u8,
95 pub pass_count: u64,
97 pub fail_count: u64,
99 pub watermark: i64,
101 pub split_watermark: i64,
103 pub frontier: u8,
105 pub _pad: [u8; 7],
107 pub msg: [u8; SLOT_MSG_LEN],
109}
110
111impl AssertionSlot {
112 pub fn msg_str(&self) -> &str {
114 let len = self
115 .msg
116 .iter()
117 .position(|&b| b == 0)
118 .unwrap_or(SLOT_MSG_LEN);
119 std::str::from_utf8(&self.msg[..len]).unwrap_or("???")
120 }
121}
122
123pub fn msg_hash(msg: &str) -> u32 {
125 let mut h: u32 = 0x811c9dc5;
126 for b in msg.bytes() {
127 h ^= b as u32;
128 h = h.wrapping_mul(0x01000193);
129 }
130 h
131}
132
133unsafe fn find_or_alloc_slot(
142 table_ptr: *mut u8,
143 hash: u32,
144 kind: AssertKind,
145 must_hit: u8,
146 maximize: u8,
147 msg: &str,
148) -> (*mut AssertionSlot, usize) {
149 unsafe {
150 let next_atomic = &*(table_ptr as *const AtomicU32);
151 let count = next_atomic.load(Ordering::Acquire) as usize;
152 let base = table_ptr.add(8) as *mut AssertionSlot;
153
154 for i in 0..count.min(MAX_ASSERTION_SLOTS) {
156 let slot = base.add(i);
157 let h = &*(std::ptr::addr_of!((*slot).msg_hash) as *const AtomicU32);
158 if h.load(Ordering::Acquire) == hash {
159 return (slot, i);
160 }
161 }
162
163 let new_idx = next_atomic.fetch_add(1, Ordering::AcqRel) as usize;
165 if new_idx >= MAX_ASSERTION_SLOTS {
166 next_atomic.fetch_sub(1, Ordering::AcqRel);
167 return (std::ptr::null_mut(), 0);
168 }
169
170 let slot = base.add(new_idx);
174 let hash_atomic = &*(std::ptr::addr_of!((*slot).msg_hash) as *const AtomicU32);
175 hash_atomic.store(hash, Ordering::Release);
176
177 for i in 0..new_idx {
180 let existing = base.add(i);
181 let existing_hash = &*(std::ptr::addr_of!((*existing).msg_hash) as *const AtomicU32);
182 if existing_hash.load(Ordering::Acquire) == hash {
183 hash_atomic.store(0, Ordering::Release);
185 std::ptr::write_bytes(slot as *mut u8, 0, std::mem::size_of::<AssertionSlot>());
186 return (existing, i);
187 }
188 }
189
190 let mut msg_buf = [0u8; SLOT_MSG_LEN];
192 let n = msg.len().min(SLOT_MSG_LEN - 1);
193 msg_buf[..n].copy_from_slice(&msg.as_bytes()[..n]);
194
195 (*slot).kind = kind as u8;
196 (*slot).must_hit = must_hit;
197 (*slot).maximize = maximize;
198 (*slot).split_triggered = 0;
199 (*slot).pass_count = 0;
200 (*slot).fail_count = 0;
201 (*slot).watermark = if maximize == 1 { i64::MIN } else { i64::MAX };
202 (*slot).split_watermark = if maximize == 1 { i64::MIN } else { i64::MAX };
203 (*slot).frontier = 0;
204 (*slot)._pad = [0; 7];
205 (*slot).msg = msg_buf;
206
207 (slot, new_idx)
208 }
209}
210
211fn assertion_split(slot_idx: usize, hash: u32) {
216 let bm_ptr = crate::context::COVERAGE_BITMAP_PTR.with(|c| c.get());
218 if !bm_ptr.is_null() {
219 let bm = unsafe { crate::coverage::CoverageBitmap::new(bm_ptr) };
222 bm.set_bit(hash as usize);
223 }
224
225 let vm_ptr = crate::context::EXPLORED_MAP_PTR.with(|c| c.get());
227 if !vm_ptr.is_null() {
228 let vm = unsafe { crate::coverage::ExploredMap::new(vm_ptr) };
231 let bm_ptr2 = crate::context::COVERAGE_BITMAP_PTR.with(|c| c.get());
232 if !bm_ptr2.is_null() {
233 let bm = unsafe { crate::coverage::CoverageBitmap::new(bm_ptr2) };
235 vm.merge_from(&bm);
236 }
237 }
238
239 if crate::context::explorer_is_active() {
241 crate::split_loop::dispatch_split("", slot_idx % MAX_ASSERTION_SLOTS);
242 }
243}
244
245pub fn assertion_bool(kind: AssertKind, must_hit: bool, condition: bool, msg: &str) {
253 let table_ptr = crate::context::assertion_table_ptr();
254 if table_ptr.is_null() {
255 return;
256 }
257
258 let hash = msg_hash(msg);
259 let must_hit_u8 = if must_hit { 1 } else { 0 };
260
261 let (slot, slot_idx) =
263 unsafe { find_or_alloc_slot(table_ptr, hash, kind, must_hit_u8, 0, msg) };
264 if slot.is_null() {
265 return;
266 }
267
268 unsafe {
270 match kind {
271 AssertKind::Always | AssertKind::AlwaysOrUnreachable | AssertKind::NumericAlways => {
272 if condition {
273 let pc = &*((&(*slot).pass_count) as *const u64 as *const AtomicI64);
274 pc.fetch_add(1, Ordering::Relaxed);
275 } else {
276 let fc = &*((&(*slot).fail_count) as *const u64 as *const AtomicI64);
277 let prev = fc.fetch_add(1, Ordering::Relaxed);
278 if prev == 0 {
279 eprintln!("[ASSERTION FAILED] {} (kind={:?})", msg, kind);
280 }
281 }
282 }
283 AssertKind::Sometimes | AssertKind::Reachable => {
284 if condition {
285 let pc = &*((&(*slot).pass_count) as *const u64 as *const AtomicI64);
286 pc.fetch_add(1, Ordering::Relaxed);
287
288 let ft = &*((&(*slot).split_triggered) as *const u8 as *const AtomicU8);
290 if ft
291 .compare_exchange(0, 1, Ordering::Relaxed, Ordering::Relaxed)
292 .is_ok()
293 {
294 assertion_split(slot_idx, hash);
295 }
296 } else {
297 let fc = &*((&(*slot).fail_count) as *const u64 as *const AtomicI64);
298 fc.fetch_add(1, Ordering::Relaxed);
299 }
300 }
301 AssertKind::Unreachable => {
302 let pc = &*((&(*slot).pass_count) as *const u64 as *const AtomicI64);
305 let prev = pc.fetch_add(1, Ordering::Relaxed);
306 if prev == 0 {
307 eprintln!("[UNREACHABLE REACHED] {}", msg);
308 }
309 }
310 _ => {}
311 }
312 }
313}
314
315pub fn assertion_numeric(
326 kind: AssertKind,
327 cmp: AssertCmp,
328 maximize: bool,
329 left: i64,
330 right: i64,
331 msg: &str,
332) {
333 let table_ptr = crate::context::assertion_table_ptr();
334 if table_ptr.is_null() {
335 return;
336 }
337
338 let hash = msg_hash(msg);
339 let maximize_u8 = if maximize { 1 } else { 0 };
340
341 let (slot, slot_idx) =
343 unsafe { find_or_alloc_slot(table_ptr, hash, kind, 1, maximize_u8, msg) };
344 if slot.is_null() {
345 return;
346 }
347
348 let passes = match cmp {
350 AssertCmp::Gt => left > right,
351 AssertCmp::Ge => left >= right,
352 AssertCmp::Lt => left < right,
353 AssertCmp::Le => left <= right,
354 };
355
356 unsafe {
358 if passes {
359 let pc = &*((&(*slot).pass_count) as *const u64 as *const AtomicI64);
360 pc.fetch_add(1, Ordering::Relaxed);
361 } else {
362 let fc = &*((&(*slot).fail_count) as *const u64 as *const AtomicI64);
363 let prev = fc.fetch_add(1, Ordering::Relaxed);
364 if kind == AssertKind::NumericAlways && prev == 0 {
365 eprintln!(
366 "[NUMERIC ASSERTION FAILED] {} (left={}, right={}, cmp={:?})",
367 msg, left, right, cmp
368 );
369 }
370 }
371
372 let wm = &*((&(*slot).watermark) as *const i64 as *const AtomicI64);
374 let mut current = wm.load(Ordering::Relaxed);
375 loop {
376 let is_better = if maximize {
377 left > current
378 } else {
379 left < current
380 };
381 if !is_better {
382 break;
383 }
384 match wm.compare_exchange_weak(current, left, Ordering::Relaxed, Ordering::Relaxed) {
385 Ok(_) => break,
386 Err(actual) => current = actual,
387 }
388 }
389
390 if kind == AssertKind::NumericSometimes {
392 let fw = &*((&(*slot).split_watermark) as *const i64 as *const AtomicI64);
393 let mut fork_current = fw.load(Ordering::Relaxed);
394 loop {
395 let is_better = if maximize {
396 left > fork_current
397 } else {
398 left < fork_current
399 };
400 if !is_better {
401 break;
402 }
403 match fw.compare_exchange_weak(
404 fork_current,
405 left,
406 Ordering::Relaxed,
407 Ordering::Relaxed,
408 ) {
409 Ok(_) => {
410 assertion_split(slot_idx, hash);
411 break;
412 }
413 Err(actual) => fork_current = actual,
414 }
415 }
416 }
417 }
418}
419
420pub fn assertion_sometimes_all(msg: &str, named_bools: &[(&str, bool)]) {
427 let table_ptr = crate::context::assertion_table_ptr();
428 if table_ptr.is_null() {
429 return;
430 }
431
432 let hash = msg_hash(msg);
433
434 let (slot, slot_idx) =
436 unsafe { find_or_alloc_slot(table_ptr, hash, AssertKind::BooleanSometimesAll, 1, 0, msg) };
437 if slot.is_null() {
438 return;
439 }
440
441 let true_count = named_bools.iter().filter(|(_, v)| *v).count() as u8;
443
444 unsafe {
446 let pc = &*((&(*slot).pass_count) as *const u64 as *const AtomicI64);
448 pc.fetch_add(1, Ordering::Relaxed);
449
450 let fr = &*((&(*slot).frontier) as *const u8 as *const AtomicU8);
452 let mut current = fr.load(Ordering::Relaxed);
453 loop {
454 if true_count <= current {
455 break;
456 }
457 match fr.compare_exchange_weak(
458 current,
459 true_count,
460 Ordering::Relaxed,
461 Ordering::Relaxed,
462 ) {
463 Ok(_) => {
464 assertion_split(slot_idx, hash);
465 break;
466 }
467 Err(actual) => current = actual,
468 }
469 }
470 }
471}
472
473pub fn assertion_read_all() -> Vec<AssertionSlotSnapshot> {
477 let table_ptr = crate::context::assertion_table_ptr();
478 if table_ptr.is_null() {
479 return Vec::new();
480 }
481
482 unsafe {
489 let count = (*(table_ptr as *const u32)) as usize;
490 let count = count.min(MAX_ASSERTION_SLOTS);
491 let base = table_ptr.add(8) as *const AssertionSlot;
492
493 (0..count)
494 .filter_map(|i| {
495 let slot = &*base.add(i);
496 if slot.msg_hash == 0 {
498 return None;
499 }
500 Some(AssertionSlotSnapshot {
501 msg: slot.msg_str().to_string(),
502 kind: slot.kind,
503 must_hit: slot.must_hit,
504 pass_count: slot.pass_count,
505 fail_count: slot.fail_count,
506 watermark: slot.watermark,
507 frontier: slot.frontier,
508 })
509 })
510 .collect()
511 }
512}
513
514#[derive(Debug, Clone)]
516pub struct AssertionSlotSnapshot {
517 pub msg: String,
519 pub kind: u8,
521 pub must_hit: u8,
523 pub pass_count: u64,
525 pub fail_count: u64,
527 pub watermark: i64,
529 pub frontier: u8,
531}
532
533#[cfg(test)]
534mod tests {
535 use super::*;
536
537 #[test]
538 fn test_msg_hash_deterministic() {
539 let h1 = msg_hash("test_assertion");
540 let h2 = msg_hash("test_assertion");
541 assert_eq!(h1, h2);
542 }
543
544 #[test]
545 fn test_msg_hash_no_collision() {
546 let names = ["a", "b", "c", "timeout", "connect", "retry"];
547 let hashes: Vec<u32> = names.iter().map(|n| msg_hash(n)).collect();
548 for i in 0..hashes.len() {
549 for j in (i + 1)..hashes.len() {
550 assert_ne!(
551 hashes[i], hashes[j],
552 "{} and {} collide",
553 names[i], names[j]
554 );
555 }
556 }
557 }
558
559 #[test]
560 fn test_slot_size_stable() {
561 assert_eq!(std::mem::size_of::<AssertionSlot>(), 112);
566 }
567
568 #[test]
569 fn test_assertion_bool_noop_when_inactive() {
570 assertion_bool(AssertKind::Sometimes, true, true, "test");
572 assertion_bool(AssertKind::Always, true, false, "test2");
573 }
574
575 #[test]
576 fn test_assertion_numeric_noop_when_inactive() {
577 assertion_numeric(
579 AssertKind::NumericAlways,
580 AssertCmp::Gt,
581 false,
582 10,
583 5,
584 "test",
585 );
586 }
587
588 #[test]
589 fn test_assertion_read_all_when_inactive() {
590 let slots = assertion_read_all();
592 assert!(slots.is_empty());
593 }
594
595 #[test]
596 fn test_assert_kind_from_u8() {
597 assert_eq!(AssertKind::from_u8(0), Some(AssertKind::Always));
598 assert_eq!(
599 AssertKind::from_u8(7),
600 Some(AssertKind::BooleanSometimesAll)
601 );
602 assert_eq!(AssertKind::from_u8(8), None);
603 }
604}