use std::sync::atomic::{AtomicI64, AtomicU8, AtomicU32, Ordering};
pub const MAX_ASSERTION_SLOTS: usize = 128;
const SLOT_MSG_LEN: usize = 64;
pub const ASSERTION_TABLE_MEM_SIZE: usize =
8 + MAX_ASSERTION_SLOTS * std::mem::size_of::<AssertionSlot>();
#[repr(u8)]
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum AssertKind {
Always = 0,
AlwaysOrUnreachable = 1,
Sometimes = 2,
Reachable = 3,
Unreachable = 4,
NumericAlways = 5,
NumericSometimes = 6,
BooleanSometimesAll = 7,
}
impl AssertKind {
pub fn from_u8(v: u8) -> Option<Self> {
match v {
0 => Some(Self::Always),
1 => Some(Self::AlwaysOrUnreachable),
2 => Some(Self::Sometimes),
3 => Some(Self::Reachable),
4 => Some(Self::Unreachable),
5 => Some(Self::NumericAlways),
6 => Some(Self::NumericSometimes),
7 => Some(Self::BooleanSometimesAll),
_ => None,
}
}
}
#[repr(u8)]
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum AssertCmp {
Gt = 0,
Ge = 1,
Lt = 2,
Le = 3,
}
#[repr(C)]
pub struct AssertionSlot {
pub msg_hash: u32,
pub kind: u8,
pub must_hit: u8,
pub maximize: u8,
pub split_triggered: u8,
pub pass_count: u64,
pub fail_count: u64,
pub watermark: i64,
pub split_watermark: i64,
pub frontier: u8,
pub _pad: [u8; 7],
pub msg: [u8; SLOT_MSG_LEN],
}
impl AssertionSlot {
pub fn msg_str(&self) -> &str {
let len = self
.msg
.iter()
.position(|&b| b == 0)
.unwrap_or(SLOT_MSG_LEN);
std::str::from_utf8(&self.msg[..len]).unwrap_or("???")
}
}
pub fn msg_hash(msg: &str) -> u32 {
let mut h: u32 = 0x811c9dc5;
for b in msg.bytes() {
h ^= b as u32;
h = h.wrapping_mul(0x01000193);
}
h
}
unsafe fn find_or_alloc_slot(
table_ptr: *mut u8,
hash: u32,
kind: AssertKind,
must_hit: u8,
maximize: u8,
msg: &str,
) -> (*mut AssertionSlot, usize) {
unsafe {
let next_atomic = &*(table_ptr as *const AtomicU32);
let count = next_atomic.load(Ordering::Acquire) as usize;
let base = table_ptr.add(8) as *mut AssertionSlot;
for i in 0..count.min(MAX_ASSERTION_SLOTS) {
let slot = base.add(i);
let h = &*(std::ptr::addr_of!((*slot).msg_hash) as *const AtomicU32);
if h.load(Ordering::Acquire) == hash {
return (slot, i);
}
}
let new_idx = next_atomic.fetch_add(1, Ordering::AcqRel) as usize;
if new_idx >= MAX_ASSERTION_SLOTS {
next_atomic.fetch_sub(1, Ordering::AcqRel);
return (std::ptr::null_mut(), 0);
}
let slot = base.add(new_idx);
let hash_atomic = &*(std::ptr::addr_of!((*slot).msg_hash) as *const AtomicU32);
hash_atomic.store(hash, Ordering::Release);
for i in 0..new_idx {
let existing = base.add(i);
let existing_hash = &*(std::ptr::addr_of!((*existing).msg_hash) as *const AtomicU32);
if existing_hash.load(Ordering::Acquire) == hash {
hash_atomic.store(0, Ordering::Release);
std::ptr::write_bytes(slot as *mut u8, 0, std::mem::size_of::<AssertionSlot>());
return (existing, i);
}
}
let mut msg_buf = [0u8; SLOT_MSG_LEN];
let n = msg.len().min(SLOT_MSG_LEN - 1);
msg_buf[..n].copy_from_slice(&msg.as_bytes()[..n]);
(*slot).kind = kind as u8;
(*slot).must_hit = must_hit;
(*slot).maximize = maximize;
(*slot).split_triggered = 0;
(*slot).pass_count = 0;
(*slot).fail_count = 0;
(*slot).watermark = if maximize == 1 { i64::MIN } else { i64::MAX };
(*slot).split_watermark = if maximize == 1 { i64::MIN } else { i64::MAX };
(*slot).frontier = 0;
(*slot)._pad = [0; 7];
(*slot).msg = msg_buf;
(slot, new_idx)
}
}
fn assertion_split(slot_idx: usize, hash: u32) {
let bm_ptr = crate::context::COVERAGE_BITMAP_PTR.with(|c| c.get());
if !bm_ptr.is_null() {
let bm = unsafe { crate::coverage::CoverageBitmap::new(bm_ptr) };
bm.set_bit(hash as usize);
}
let vm_ptr = crate::context::EXPLORED_MAP_PTR.with(|c| c.get());
if !vm_ptr.is_null() {
let vm = unsafe { crate::coverage::ExploredMap::new(vm_ptr) };
let bm_ptr2 = crate::context::COVERAGE_BITMAP_PTR.with(|c| c.get());
if !bm_ptr2.is_null() {
let bm = unsafe { crate::coverage::CoverageBitmap::new(bm_ptr2) };
vm.merge_from(&bm);
}
}
if crate::context::explorer_is_active() {
crate::split_loop::dispatch_split("", slot_idx % MAX_ASSERTION_SLOTS);
}
}
pub fn assertion_bool(kind: AssertKind, must_hit: bool, condition: bool, msg: &str) {
let table_ptr = crate::context::assertion_table_ptr();
if table_ptr.is_null() {
return;
}
let hash = msg_hash(msg);
let must_hit_u8 = if must_hit { 1 } else { 0 };
let (slot, slot_idx) =
unsafe { find_or_alloc_slot(table_ptr, hash, kind, must_hit_u8, 0, msg) };
if slot.is_null() {
return;
}
unsafe {
match kind {
AssertKind::Always | AssertKind::AlwaysOrUnreachable | AssertKind::NumericAlways => {
if condition {
let pc = &*((&(*slot).pass_count) as *const u64 as *const AtomicI64);
pc.fetch_add(1, Ordering::Relaxed);
} else {
let fc = &*((&(*slot).fail_count) as *const u64 as *const AtomicI64);
let prev = fc.fetch_add(1, Ordering::Relaxed);
if prev == 0 {
eprintln!("[ASSERTION FAILED] {} (kind={:?})", msg, kind);
}
}
}
AssertKind::Sometimes | AssertKind::Reachable => {
if condition {
let pc = &*((&(*slot).pass_count) as *const u64 as *const AtomicI64);
pc.fetch_add(1, Ordering::Relaxed);
let ft = &*((&(*slot).split_triggered) as *const u8 as *const AtomicU8);
if ft
.compare_exchange(0, 1, Ordering::Relaxed, Ordering::Relaxed)
.is_ok()
{
assertion_split(slot_idx, hash);
}
} else {
let fc = &*((&(*slot).fail_count) as *const u64 as *const AtomicI64);
fc.fetch_add(1, Ordering::Relaxed);
}
}
AssertKind::Unreachable => {
let pc = &*((&(*slot).pass_count) as *const u64 as *const AtomicI64);
let prev = pc.fetch_add(1, Ordering::Relaxed);
if prev == 0 {
eprintln!("[UNREACHABLE REACHED] {}", msg);
}
}
_ => {}
}
}
}
pub fn assertion_numeric(
kind: AssertKind,
cmp: AssertCmp,
maximize: bool,
left: i64,
right: i64,
msg: &str,
) {
let table_ptr = crate::context::assertion_table_ptr();
if table_ptr.is_null() {
return;
}
let hash = msg_hash(msg);
let maximize_u8 = if maximize { 1 } else { 0 };
let (slot, slot_idx) =
unsafe { find_or_alloc_slot(table_ptr, hash, kind, 1, maximize_u8, msg) };
if slot.is_null() {
return;
}
let passes = match cmp {
AssertCmp::Gt => left > right,
AssertCmp::Ge => left >= right,
AssertCmp::Lt => left < right,
AssertCmp::Le => left <= right,
};
unsafe {
if passes {
let pc = &*((&(*slot).pass_count) as *const u64 as *const AtomicI64);
pc.fetch_add(1, Ordering::Relaxed);
} else {
let fc = &*((&(*slot).fail_count) as *const u64 as *const AtomicI64);
let prev = fc.fetch_add(1, Ordering::Relaxed);
if kind == AssertKind::NumericAlways && prev == 0 {
eprintln!(
"[NUMERIC ASSERTION FAILED] {} (left={}, right={}, cmp={:?})",
msg, left, right, cmp
);
}
}
let wm = &*((&(*slot).watermark) as *const i64 as *const AtomicI64);
let mut current = wm.load(Ordering::Relaxed);
loop {
let is_better = if maximize {
left > current
} else {
left < current
};
if !is_better {
break;
}
match wm.compare_exchange_weak(current, left, Ordering::Relaxed, Ordering::Relaxed) {
Ok(_) => break,
Err(actual) => current = actual,
}
}
if kind == AssertKind::NumericSometimes {
let fw = &*((&(*slot).split_watermark) as *const i64 as *const AtomicI64);
let mut fork_current = fw.load(Ordering::Relaxed);
loop {
let is_better = if maximize {
left > fork_current
} else {
left < fork_current
};
if !is_better {
break;
}
match fw.compare_exchange_weak(
fork_current,
left,
Ordering::Relaxed,
Ordering::Relaxed,
) {
Ok(_) => {
assertion_split(slot_idx, hash);
break;
}
Err(actual) => fork_current = actual,
}
}
}
}
}
pub fn assertion_sometimes_all(msg: &str, named_bools: &[(&str, bool)]) {
let table_ptr = crate::context::assertion_table_ptr();
if table_ptr.is_null() {
return;
}
let hash = msg_hash(msg);
let (slot, slot_idx) =
unsafe { find_or_alloc_slot(table_ptr, hash, AssertKind::BooleanSometimesAll, 1, 0, msg) };
if slot.is_null() {
return;
}
let true_count = named_bools.iter().filter(|(_, v)| *v).count() as u8;
unsafe {
let pc = &*((&(*slot).pass_count) as *const u64 as *const AtomicI64);
pc.fetch_add(1, Ordering::Relaxed);
let fr = &*((&(*slot).frontier) as *const u8 as *const AtomicU8);
let mut current = fr.load(Ordering::Relaxed);
loop {
if true_count <= current {
break;
}
match fr.compare_exchange_weak(
current,
true_count,
Ordering::Relaxed,
Ordering::Relaxed,
) {
Ok(_) => {
assertion_split(slot_idx, hash);
break;
}
Err(actual) => current = actual,
}
}
}
}
pub fn assertion_read_all() -> Vec<AssertionSlotSnapshot> {
let table_ptr = crate::context::assertion_table_ptr();
if table_ptr.is_null() {
return Vec::new();
}
unsafe {
let count = (*(table_ptr as *const u32)) as usize;
let count = count.min(MAX_ASSERTION_SLOTS);
let base = table_ptr.add(8) as *const AssertionSlot;
(0..count)
.filter_map(|i| {
let slot = &*base.add(i);
if slot.msg_hash == 0 {
return None;
}
Some(AssertionSlotSnapshot {
msg: slot.msg_str().to_string(),
kind: slot.kind,
must_hit: slot.must_hit,
pass_count: slot.pass_count,
fail_count: slot.fail_count,
watermark: slot.watermark,
frontier: slot.frontier,
})
})
.collect()
}
}
#[derive(Debug, Clone)]
pub struct AssertionSlotSnapshot {
pub msg: String,
pub kind: u8,
pub must_hit: u8,
pub pass_count: u64,
pub fail_count: u64,
pub watermark: i64,
pub frontier: u8,
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_msg_hash_deterministic() {
let h1 = msg_hash("test_assertion");
let h2 = msg_hash("test_assertion");
assert_eq!(h1, h2);
}
#[test]
fn test_msg_hash_no_collision() {
let names = ["a", "b", "c", "timeout", "connect", "retry"];
let hashes: Vec<u32> = names.iter().map(|n| msg_hash(n)).collect();
for i in 0..hashes.len() {
for j in (i + 1)..hashes.len() {
assert_ne!(
hashes[i], hashes[j],
"{} and {} collide",
names[i], names[j]
);
}
}
}
#[test]
fn test_slot_size_stable() {
assert_eq!(std::mem::size_of::<AssertionSlot>(), 112);
}
#[test]
fn test_assertion_bool_noop_when_inactive() {
assertion_bool(AssertKind::Sometimes, true, true, "test");
assertion_bool(AssertKind::Always, true, false, "test2");
}
#[test]
fn test_assertion_numeric_noop_when_inactive() {
assertion_numeric(
AssertKind::NumericAlways,
AssertCmp::Gt,
false,
10,
5,
"test",
);
}
#[test]
fn test_assertion_read_all_when_inactive() {
let slots = assertion_read_all();
assert!(slots.is_empty());
}
#[test]
fn test_assert_kind_from_u8() {
assert_eq!(AssertKind::from_u8(0), Some(AssertKind::Always));
assert_eq!(
AssertKind::from_u8(7),
Some(AssertKind::BooleanSometimesAll)
);
assert_eq!(AssertKind::from_u8(8), None);
}
}