1use std::sync::atomic::{AtomicI64, AtomicU8, AtomicU32, Ordering};
14
15pub const MAX_ASSERTION_SLOTS: usize = 128;
17
18const SLOT_MSG_LEN: usize = 64;
20
21pub const ASSERTION_TABLE_MEM_SIZE: usize =
25 8 + MAX_ASSERTION_SLOTS * std::mem::size_of::<AssertionSlot>();
26
27#[repr(u8)]
29#[derive(Debug, Clone, Copy, PartialEq, Eq)]
30pub enum AssertKind {
31 Always = 0,
33 AlwaysOrUnreachable = 1,
35 Sometimes = 2,
37 Reachable = 3,
39 Unreachable = 4,
41 NumericAlways = 5,
43 NumericSometimes = 6,
45 BooleanSometimesAll = 7,
47}
48
49impl AssertKind {
50 pub fn from_u8(v: u8) -> Option<Self> {
52 match v {
53 0 => Some(Self::Always),
54 1 => Some(Self::AlwaysOrUnreachable),
55 2 => Some(Self::Sometimes),
56 3 => Some(Self::Reachable),
57 4 => Some(Self::Unreachable),
58 5 => Some(Self::NumericAlways),
59 6 => Some(Self::NumericSometimes),
60 7 => Some(Self::BooleanSometimesAll),
61 _ => None,
62 }
63 }
64}
65
66#[repr(u8)]
68#[derive(Debug, Clone, Copy, PartialEq, Eq)]
69pub enum AssertCmp {
70 Gt = 0,
72 Ge = 1,
74 Lt = 2,
76 Le = 3,
78}
79
80#[repr(C)]
84pub struct AssertionSlot {
85 pub msg_hash: u32,
87 pub kind: u8,
89 pub must_hit: u8,
91 pub maximize: u8,
93 pub split_triggered: u8,
95 pub pass_count: u64,
97 pub fail_count: u64,
99 pub watermark: i64,
101 pub split_watermark: i64,
103 pub frontier: u8,
105 pub _pad: [u8; 7],
107 pub msg: [u8; SLOT_MSG_LEN],
109}
110
111impl AssertionSlot {
112 pub fn msg_str(&self) -> &str {
114 let len = self
115 .msg
116 .iter()
117 .position(|&b| b == 0)
118 .unwrap_or(SLOT_MSG_LEN);
119 std::str::from_utf8(&self.msg[..len]).unwrap_or("???")
120 }
121}
122
123pub fn msg_hash(msg: &str) -> u32 {
125 let mut h: u32 = 0x811c9dc5;
126 for b in msg.bytes() {
127 h ^= b as u32;
128 h = h.wrapping_mul(0x01000193);
129 }
130 h
131}
132
133unsafe fn find_or_alloc_slot(
142 table_ptr: *mut u8,
143 hash: u32,
144 kind: AssertKind,
145 must_hit: u8,
146 maximize: u8,
147 msg: &str,
148) -> (*mut AssertionSlot, usize) {
149 unsafe {
150 let next_atomic = &*(table_ptr as *const AtomicU32);
151 let count = next_atomic.load(Ordering::Acquire) as usize;
152 let base = table_ptr.add(8) as *mut AssertionSlot;
153
154 for i in 0..count.min(MAX_ASSERTION_SLOTS) {
156 let slot = base.add(i);
157 let h = &*(std::ptr::addr_of!((*slot).msg_hash) as *const AtomicU32);
158 if h.load(Ordering::Acquire) == hash {
159 return (slot, i);
160 }
161 }
162
163 let new_idx = next_atomic.fetch_add(1, Ordering::AcqRel) as usize;
165 if new_idx >= MAX_ASSERTION_SLOTS {
166 next_atomic.fetch_sub(1, Ordering::AcqRel);
167 return (std::ptr::null_mut(), 0);
168 }
169
170 let slot = base.add(new_idx);
174 let hash_atomic = &*(std::ptr::addr_of!((*slot).msg_hash) as *const AtomicU32);
175 hash_atomic.store(hash, Ordering::Release);
176
177 for i in 0..new_idx {
180 let existing = base.add(i);
181 let existing_hash = &*(std::ptr::addr_of!((*existing).msg_hash) as *const AtomicU32);
182 if existing_hash.load(Ordering::Acquire) == hash {
183 hash_atomic.store(0, Ordering::Release);
185 std::ptr::write_bytes(slot as *mut u8, 0, std::mem::size_of::<AssertionSlot>());
186 return (existing, i);
187 }
188 }
189
190 let mut msg_buf = [0u8; SLOT_MSG_LEN];
192 let n = msg.len().min(SLOT_MSG_LEN - 1);
193 msg_buf[..n].copy_from_slice(&msg.as_bytes()[..n]);
194
195 (*slot).kind = kind as u8;
196 (*slot).must_hit = must_hit;
197 (*slot).maximize = maximize;
198 (*slot).split_triggered = 0;
199 (*slot).pass_count = 0;
200 (*slot).fail_count = 0;
201 (*slot).watermark = if maximize == 1 { i64::MIN } else { i64::MAX };
202 (*slot).split_watermark = if maximize == 1 { i64::MIN } else { i64::MAX };
203 (*slot).frontier = 0;
204 (*slot)._pad = [0; 7];
205 (*slot).msg = msg_buf;
206
207 (slot, new_idx)
208 }
209}
210
211fn assertion_split(slot_idx: usize, hash: u32) {
216 let bm_ptr = crate::context::COVERAGE_BITMAP_PTR.with(|c| c.get());
218 if !bm_ptr.is_null() {
219 let bm = unsafe { crate::coverage::CoverageBitmap::new(bm_ptr) };
220 bm.set_bit(hash as usize);
221 }
222
223 let vm_ptr = crate::context::EXPLORED_MAP_PTR.with(|c| c.get());
225 if !vm_ptr.is_null() {
226 let vm = unsafe { crate::coverage::ExploredMap::new(vm_ptr) };
227 let bm_ptr2 = crate::context::COVERAGE_BITMAP_PTR.with(|c| c.get());
228 if !bm_ptr2.is_null() {
229 let bm = unsafe { crate::coverage::CoverageBitmap::new(bm_ptr2) };
230 vm.merge_from(&bm);
231 }
232 }
233
234 if crate::context::explorer_is_active() {
236 crate::split_loop::dispatch_split("", slot_idx % MAX_ASSERTION_SLOTS);
237 }
238}
239
240pub fn assertion_bool(kind: AssertKind, must_hit: bool, condition: bool, msg: &str) {
248 let table_ptr = crate::context::get_assertion_table_ptr();
249 if table_ptr.is_null() {
250 return;
251 }
252
253 let hash = msg_hash(msg);
254 let must_hit_u8 = if must_hit { 1 } else { 0 };
255
256 let (slot, slot_idx) =
258 unsafe { find_or_alloc_slot(table_ptr, hash, kind, must_hit_u8, 0, msg) };
259 if slot.is_null() {
260 return;
261 }
262
263 unsafe {
265 match kind {
266 AssertKind::Always | AssertKind::AlwaysOrUnreachable | AssertKind::NumericAlways => {
267 if condition {
268 let pc = &*((&(*slot).pass_count) as *const u64 as *const AtomicI64);
269 pc.fetch_add(1, Ordering::Relaxed);
270 } else {
271 let fc = &*((&(*slot).fail_count) as *const u64 as *const AtomicI64);
272 let prev = fc.fetch_add(1, Ordering::Relaxed);
273 if prev == 0 {
274 eprintln!("[ASSERTION FAILED] {} (kind={:?})", msg, kind);
275 }
276 }
277 }
278 AssertKind::Sometimes | AssertKind::Reachable => {
279 if condition {
280 let pc = &*((&(*slot).pass_count) as *const u64 as *const AtomicI64);
281 pc.fetch_add(1, Ordering::Relaxed);
282
283 let ft = &*((&(*slot).split_triggered) as *const u8 as *const AtomicU8);
285 if ft
286 .compare_exchange(0, 1, Ordering::Relaxed, Ordering::Relaxed)
287 .is_ok()
288 {
289 assertion_split(slot_idx, hash);
290 }
291 } else {
292 let fc = &*((&(*slot).fail_count) as *const u64 as *const AtomicI64);
293 fc.fetch_add(1, Ordering::Relaxed);
294 }
295 }
296 AssertKind::Unreachable => {
297 let pc = &*((&(*slot).pass_count) as *const u64 as *const AtomicI64);
300 let prev = pc.fetch_add(1, Ordering::Relaxed);
301 if prev == 0 {
302 eprintln!("[UNREACHABLE REACHED] {}", msg);
303 }
304 }
305 _ => {}
306 }
307 }
308}
309
310pub fn assertion_numeric(
321 kind: AssertKind,
322 cmp: AssertCmp,
323 maximize: bool,
324 left: i64,
325 right: i64,
326 msg: &str,
327) {
328 let table_ptr = crate::context::get_assertion_table_ptr();
329 if table_ptr.is_null() {
330 return;
331 }
332
333 let hash = msg_hash(msg);
334 let maximize_u8 = if maximize { 1 } else { 0 };
335
336 let (slot, slot_idx) =
338 unsafe { find_or_alloc_slot(table_ptr, hash, kind, 1, maximize_u8, msg) };
339 if slot.is_null() {
340 return;
341 }
342
343 let passes = match cmp {
345 AssertCmp::Gt => left > right,
346 AssertCmp::Ge => left >= right,
347 AssertCmp::Lt => left < right,
348 AssertCmp::Le => left <= right,
349 };
350
351 unsafe {
353 if passes {
354 let pc = &*((&(*slot).pass_count) as *const u64 as *const AtomicI64);
355 pc.fetch_add(1, Ordering::Relaxed);
356 } else {
357 let fc = &*((&(*slot).fail_count) as *const u64 as *const AtomicI64);
358 let prev = fc.fetch_add(1, Ordering::Relaxed);
359 if kind == AssertKind::NumericAlways && prev == 0 {
360 eprintln!(
361 "[NUMERIC ASSERTION FAILED] {} (left={}, right={}, cmp={:?})",
362 msg, left, right, cmp
363 );
364 }
365 }
366
367 let wm = &*((&(*slot).watermark) as *const i64 as *const AtomicI64);
369 let mut current = wm.load(Ordering::Relaxed);
370 loop {
371 let is_better = if maximize {
372 left > current
373 } else {
374 left < current
375 };
376 if !is_better {
377 break;
378 }
379 match wm.compare_exchange_weak(current, left, Ordering::Relaxed, Ordering::Relaxed) {
380 Ok(_) => break,
381 Err(actual) => current = actual,
382 }
383 }
384
385 if kind == AssertKind::NumericSometimes {
387 let fw = &*((&(*slot).split_watermark) as *const i64 as *const AtomicI64);
388 let mut fork_current = fw.load(Ordering::Relaxed);
389 loop {
390 let is_better = if maximize {
391 left > fork_current
392 } else {
393 left < fork_current
394 };
395 if !is_better {
396 break;
397 }
398 match fw.compare_exchange_weak(
399 fork_current,
400 left,
401 Ordering::Relaxed,
402 Ordering::Relaxed,
403 ) {
404 Ok(_) => {
405 assertion_split(slot_idx, hash);
406 break;
407 }
408 Err(actual) => fork_current = actual,
409 }
410 }
411 }
412 }
413}
414
415pub fn assertion_sometimes_all(msg: &str, named_bools: &[(&str, bool)]) {
422 let table_ptr = crate::context::get_assertion_table_ptr();
423 if table_ptr.is_null() {
424 return;
425 }
426
427 let hash = msg_hash(msg);
428
429 let (slot, slot_idx) =
431 unsafe { find_or_alloc_slot(table_ptr, hash, AssertKind::BooleanSometimesAll, 1, 0, msg) };
432 if slot.is_null() {
433 return;
434 }
435
436 let true_count = named_bools.iter().filter(|(_, v)| *v).count() as u8;
438
439 unsafe {
441 let pc = &*((&(*slot).pass_count) as *const u64 as *const AtomicI64);
443 pc.fetch_add(1, Ordering::Relaxed);
444
445 let fr = &*((&(*slot).frontier) as *const u8 as *const AtomicU8);
447 let mut current = fr.load(Ordering::Relaxed);
448 loop {
449 if true_count <= current {
450 break;
451 }
452 match fr.compare_exchange_weak(
453 current,
454 true_count,
455 Ordering::Relaxed,
456 Ordering::Relaxed,
457 ) {
458 Ok(_) => {
459 assertion_split(slot_idx, hash);
460 break;
461 }
462 Err(actual) => current = actual,
463 }
464 }
465 }
466}
467
468pub fn assertion_read_all() -> Vec<AssertionSlotSnapshot> {
472 let table_ptr = crate::context::get_assertion_table_ptr();
473 if table_ptr.is_null() {
474 return Vec::new();
475 }
476
477 unsafe {
478 let count = (*(table_ptr as *const u32)) as usize;
479 let count = count.min(MAX_ASSERTION_SLOTS);
480 let base = table_ptr.add(8) as *const AssertionSlot;
481
482 (0..count)
483 .filter_map(|i| {
484 let slot = &*base.add(i);
485 if slot.msg_hash == 0 {
487 return None;
488 }
489 Some(AssertionSlotSnapshot {
490 msg: slot.msg_str().to_string(),
491 kind: slot.kind,
492 must_hit: slot.must_hit,
493 pass_count: slot.pass_count,
494 fail_count: slot.fail_count,
495 watermark: slot.watermark,
496 frontier: slot.frontier,
497 })
498 })
499 .collect()
500 }
501}
502
503#[derive(Debug, Clone)]
505pub struct AssertionSlotSnapshot {
506 pub msg: String,
508 pub kind: u8,
510 pub must_hit: u8,
512 pub pass_count: u64,
514 pub fail_count: u64,
516 pub watermark: i64,
518 pub frontier: u8,
520}
521
522#[cfg(test)]
523mod tests {
524 use super::*;
525
526 #[test]
527 fn test_msg_hash_deterministic() {
528 let h1 = msg_hash("test_assertion");
529 let h2 = msg_hash("test_assertion");
530 assert_eq!(h1, h2);
531 }
532
533 #[test]
534 fn test_msg_hash_no_collision() {
535 let names = ["a", "b", "c", "timeout", "connect", "retry"];
536 let hashes: Vec<u32> = names.iter().map(|n| msg_hash(n)).collect();
537 for i in 0..hashes.len() {
538 for j in (i + 1)..hashes.len() {
539 assert_ne!(
540 hashes[i], hashes[j],
541 "{} and {} collide",
542 names[i], names[j]
543 );
544 }
545 }
546 }
547
548 #[test]
549 fn test_slot_size_stable() {
550 assert_eq!(std::mem::size_of::<AssertionSlot>(), 112);
555 }
556
557 #[test]
558 fn test_assertion_bool_noop_when_inactive() {
559 assertion_bool(AssertKind::Sometimes, true, true, "test");
561 assertion_bool(AssertKind::Always, true, false, "test2");
562 }
563
564 #[test]
565 fn test_assertion_numeric_noop_when_inactive() {
566 assertion_numeric(
568 AssertKind::NumericAlways,
569 AssertCmp::Gt,
570 false,
571 10,
572 5,
573 "test",
574 );
575 }
576
577 #[test]
578 fn test_assertion_read_all_when_inactive() {
579 let slots = assertion_read_all();
581 assert!(slots.is_empty());
582 }
583
584 #[test]
585 fn test_assert_kind_from_u8() {
586 assert_eq!(AssertKind::from_u8(0), Some(AssertKind::Always));
587 assert_eq!(
588 AssertKind::from_u8(7),
589 Some(AssertKind::BooleanSometimesAll)
590 );
591 assert_eq!(AssertKind::from_u8(8), None);
592 }
593}