Skip to main content

moonpool_explorer/
assertion_slots.rs

1//! Rich assertion slot tracking for the Antithesis-style assertion suite.
2//!
3//! Maintains a fixed-size table of assertion slots in shared memory.
4//! Supports boolean assertions (always/sometimes/reachable/unreachable),
5//! numeric guidance assertions (with watermark tracking), and compound
6//! boolean assertions (sometimes-all with frontier tracking).
7//!
8//! Each slot is accessed via raw pointer arithmetic on `MAP_SHARED` memory.
9//! With `Parallelism::Cores(N)`, multiple fork children run concurrently,
10//! so `find_or_alloc_slot` claims slots by atomically writing `msg_hash`
11//! before re-scanning, ensuring concurrent allocators see each other.
12
13use std::sync::atomic::{AtomicI64, AtomicU8, AtomicU32, Ordering};
14
15/// Maximum number of tracked assertion slots.
16pub const MAX_ASSERTION_SLOTS: usize = 128;
17
18/// Maximum length of the assertion message stored in a slot.
19const SLOT_MSG_LEN: usize = 64;
20
21/// Total size of the assertion table memory region in bytes.
22///
23/// Layout: `[next_slot: u32, _pad: u32, slots: [AssertionSlot; MAX_ASSERTION_SLOTS]]`
24pub const ASSERTION_TABLE_MEM_SIZE: usize =
25    8 + MAX_ASSERTION_SLOTS * std::mem::size_of::<AssertionSlot>();
26
27/// The kind of assertion being tracked.
28#[repr(u8)]
29#[derive(Debug, Clone, Copy, PartialEq, Eq)]
30pub enum AssertKind {
31    /// Invariant that must always hold when reached.
32    Always = 0,
33    /// Invariant that must hold when reached, but need not be reached.
34    AlwaysOrUnreachable = 1,
35    /// Condition that should sometimes be true.
36    Sometimes = 2,
37    /// Code path that should be reached at least once.
38    Reachable = 3,
39    /// Code path that should never be reached.
40    Unreachable = 4,
41    /// Numeric invariant that must always hold (e.g., val > threshold).
42    NumericAlways = 5,
43    /// Numeric condition that should sometimes hold.
44    NumericSometimes = 6,
45    /// Compound boolean: all named bools should sometimes be true simultaneously.
46    BooleanSometimesAll = 7,
47}
48
49impl AssertKind {
50    /// Convert from raw u8 to AssertKind, returning None for invalid values.
51    pub fn from_u8(v: u8) -> Option<Self> {
52        match v {
53            0 => Some(Self::Always),
54            1 => Some(Self::AlwaysOrUnreachable),
55            2 => Some(Self::Sometimes),
56            3 => Some(Self::Reachable),
57            4 => Some(Self::Unreachable),
58            5 => Some(Self::NumericAlways),
59            6 => Some(Self::NumericSometimes),
60            7 => Some(Self::BooleanSometimesAll),
61            _ => None,
62        }
63    }
64}
65
66/// Comparison operator for numeric assertions.
67#[repr(u8)]
68#[derive(Debug, Clone, Copy, PartialEq, Eq)]
69pub enum AssertCmp {
70    /// Greater than.
71    Gt = 0,
72    /// Greater than or equal to.
73    Ge = 1,
74    /// Less than.
75    Lt = 2,
76    /// Less than or equal to.
77    Le = 3,
78}
79
80/// A single assertion tracking slot in shared memory.
81///
82/// All fields are accessed via raw pointer arithmetic on `MAP_SHARED` memory.
83#[repr(C)]
84pub struct AssertionSlot {
85    /// FNV-1a hash of the assertion message (u32).
86    pub msg_hash: u32,
87    /// The kind of assertion (AssertKind as u8).
88    pub kind: u8,
89    /// Whether this assertion must be hit (1) or not (0).
90    pub must_hit: u8,
91    /// Whether to maximize (1) or minimize (0) the watermark value.
92    pub maximize: u8,
93    /// Whether a fork has been triggered for this assertion (0 = no, 1 = yes).
94    pub split_triggered: u8,
95    /// Total number of times this assertion passed.
96    pub pass_count: u64,
97    /// Total number of times this assertion failed.
98    pub fail_count: u64,
99    /// Numeric watermark: best value observed (for guidance assertions).
100    pub watermark: i64,
101    /// Watermark value at last fork (for detecting improvement).
102    pub split_watermark: i64,
103    /// Frontier: number of simultaneously true bools (for BooleanSometimesAll).
104    pub frontier: u8,
105    /// Padding for alignment.
106    pub _pad: [u8; 7],
107    /// Assertion message string (null-terminated).
108    pub msg: [u8; SLOT_MSG_LEN],
109}
110
111impl AssertionSlot {
112    /// Get the assertion message as a string slice.
113    pub fn msg_str(&self) -> &str {
114        let len = self
115            .msg
116            .iter()
117            .position(|&b| b == 0)
118            .unwrap_or(SLOT_MSG_LEN);
119        std::str::from_utf8(&self.msg[..len]).unwrap_or("???")
120    }
121}
122
123/// FNV-1a hash of a message string to a stable u32.
124pub fn msg_hash(msg: &str) -> u32 {
125    let mut h: u32 = 0x811c9dc5;
126    for b in msg.bytes() {
127        h ^= b as u32;
128        h = h.wrapping_mul(0x01000193);
129    }
130    h
131}
132
133/// Find an existing slot or allocate a new one by msg_hash.
134///
135/// Returns a pointer to the slot and its index, or null if the table is full.
136///
137/// # Safety
138///
139/// `table_ptr` must point to a valid assertion table region of at least
140/// `ASSERTION_TABLE_MEM_SIZE` bytes.
141unsafe fn find_or_alloc_slot(
142    table_ptr: *mut u8,
143    hash: u32,
144    kind: AssertKind,
145    must_hit: u8,
146    maximize: u8,
147    msg: &str,
148) -> (*mut AssertionSlot, usize) {
149    unsafe {
150        let next_atomic = &*(table_ptr as *const AtomicU32);
151        let count = next_atomic.load(Ordering::Acquire) as usize;
152        let base = table_ptr.add(8) as *mut AssertionSlot;
153
154        // Search existing slots (atomic load to see concurrent writers).
155        for i in 0..count.min(MAX_ASSERTION_SLOTS) {
156            let slot = base.add(i);
157            let h = &*(std::ptr::addr_of!((*slot).msg_hash) as *const AtomicU32);
158            if h.load(Ordering::Acquire) == hash {
159                return (slot, i);
160            }
161        }
162
163        // Allocate new slot atomically.
164        let new_idx = next_atomic.fetch_add(1, Ordering::AcqRel) as usize;
165        if new_idx >= MAX_ASSERTION_SLOTS {
166            next_atomic.fetch_sub(1, Ordering::AcqRel);
167            return (std::ptr::null_mut(), 0);
168        }
169
170        // Claim our slot by writing msg_hash atomically BEFORE re-scanning.
171        // This makes our claim visible to any concurrent process doing
172        // its own re-scan, preventing the TOCTOU duplicate slot race.
173        let slot = base.add(new_idx);
174        let hash_atomic = &*(std::ptr::addr_of!((*slot).msg_hash) as *const AtomicU32);
175        hash_atomic.store(hash, Ordering::Release);
176
177        // Re-scan 0..new_idx for a concurrent registration of the same hash.
178        // Lower index always wins — if we find a match, tombstone ourselves.
179        for i in 0..new_idx {
180            let existing = base.add(i);
181            let existing_hash = &*(std::ptr::addr_of!((*existing).msg_hash) as *const AtomicU32);
182            if existing_hash.load(Ordering::Acquire) == hash {
183                // Another process claimed a lower slot. Tombstone ours.
184                hash_atomic.store(0, Ordering::Release);
185                std::ptr::write_bytes(slot as *mut u8, 0, std::mem::size_of::<AssertionSlot>());
186                return (existing, i);
187            }
188        }
189
190        // No duplicate found — write remaining slot fields (msg_hash already set).
191        let mut msg_buf = [0u8; SLOT_MSG_LEN];
192        let n = msg.len().min(SLOT_MSG_LEN - 1);
193        msg_buf[..n].copy_from_slice(&msg.as_bytes()[..n]);
194
195        (*slot).kind = kind as u8;
196        (*slot).must_hit = must_hit;
197        (*slot).maximize = maximize;
198        (*slot).split_triggered = 0;
199        (*slot).pass_count = 0;
200        (*slot).fail_count = 0;
201        (*slot).watermark = if maximize == 1 { i64::MIN } else { i64::MAX };
202        (*slot).split_watermark = if maximize == 1 { i64::MIN } else { i64::MAX };
203        (*slot).frontier = 0;
204        (*slot)._pad = [0; 7];
205        (*slot).msg = msg_buf;
206
207        (slot, new_idx)
208    }
209}
210
211/// Trigger forking for a slot that discovered something new.
212///
213/// Writes to coverage bitmap and explored map (if pointers are non-null),
214/// then calls `dispatch_split()` if exploration is active.
215fn assertion_split(slot_idx: usize, hash: u32) {
216    // Mark coverage bitmap
217    let bm_ptr = crate::context::COVERAGE_BITMAP_PTR.with(|c| c.get());
218    if !bm_ptr.is_null() {
219        let bm = unsafe { crate::coverage::CoverageBitmap::new(bm_ptr) };
220        bm.set_bit(hash as usize);
221    }
222
223    // Mark explored map
224    let vm_ptr = crate::context::EXPLORED_MAP_PTR.with(|c| c.get());
225    if !vm_ptr.is_null() {
226        let vm = unsafe { crate::coverage::ExploredMap::new(vm_ptr) };
227        let bm_ptr2 = crate::context::COVERAGE_BITMAP_PTR.with(|c| c.get());
228        if !bm_ptr2.is_null() {
229            let bm = unsafe { crate::coverage::CoverageBitmap::new(bm_ptr2) };
230            vm.merge_from(&bm);
231        }
232    }
233
234    // Dispatch to fork loop if explorer is active
235    if crate::context::explorer_is_active() {
236        crate::split_loop::dispatch_split("", slot_idx % MAX_ASSERTION_SLOTS);
237    }
238}
239
240/// Boolean assertion backing function.
241///
242/// Handles Always, AlwaysOrUnreachable, Sometimes, Reachable, and Unreachable.
243/// Gets or allocates a slot, increments pass/fail counts, and triggers forking
244/// for Sometimes/Reachable assertions on first success.
245///
246/// This is a no-op if the assertion table is not initialized.
247pub fn assertion_bool(kind: AssertKind, must_hit: bool, condition: bool, msg: &str) {
248    let table_ptr = crate::context::get_assertion_table_ptr();
249    if table_ptr.is_null() {
250        return;
251    }
252
253    let hash = msg_hash(msg);
254    let must_hit_u8 = if must_hit { 1 } else { 0 };
255
256    // Safety: table_ptr points to ASSERTION_TABLE_MEM_SIZE bytes of shared memory.
257    let (slot, slot_idx) =
258        unsafe { find_or_alloc_slot(table_ptr, hash, kind, must_hit_u8, 0, msg) };
259    if slot.is_null() {
260        return;
261    }
262
263    // Safety: slot points to valid shared memory.
264    unsafe {
265        match kind {
266            AssertKind::Always | AssertKind::AlwaysOrUnreachable | AssertKind::NumericAlways => {
267                if condition {
268                    let pc = &*((&(*slot).pass_count) as *const u64 as *const AtomicI64);
269                    pc.fetch_add(1, Ordering::Relaxed);
270                } else {
271                    let fc = &*((&(*slot).fail_count) as *const u64 as *const AtomicI64);
272                    let prev = fc.fetch_add(1, Ordering::Relaxed);
273                    if prev == 0 {
274                        eprintln!("[ASSERTION FAILED] {} (kind={:?})", msg, kind);
275                    }
276                }
277            }
278            AssertKind::Sometimes | AssertKind::Reachable => {
279                if condition {
280                    let pc = &*((&(*slot).pass_count) as *const u64 as *const AtomicI64);
281                    pc.fetch_add(1, Ordering::Relaxed);
282
283                    // CAS split_triggered from 0 → 1 on first success
284                    let ft = &*((&(*slot).split_triggered) as *const u8 as *const AtomicU8);
285                    if ft
286                        .compare_exchange(0, 1, Ordering::Relaxed, Ordering::Relaxed)
287                        .is_ok()
288                    {
289                        assertion_split(slot_idx, hash);
290                    }
291                } else {
292                    let fc = &*((&(*slot).fail_count) as *const u64 as *const AtomicI64);
293                    fc.fetch_add(1, Ordering::Relaxed);
294                }
295            }
296            AssertKind::Unreachable => {
297                // Being reached at all is a "pass" (the assertion is that we should NOT reach)
298                // We track it as pass_count = times reached (bad), fail_count unused
299                let pc = &*((&(*slot).pass_count) as *const u64 as *const AtomicI64);
300                let prev = pc.fetch_add(1, Ordering::Relaxed);
301                if prev == 0 {
302                    eprintln!("[UNREACHABLE REACHED] {}", msg);
303                }
304            }
305            _ => {}
306        }
307    }
308}
309
310/// Numeric guidance assertion backing function.
311///
312/// Evaluates a comparison (left `cmp` right), tracks pass/fail counts,
313/// and maintains a watermark of the best observed value of `left`.
314/// For NumericSometimes, forks when the watermark improves past the
315/// last fork watermark.
316///
317/// `maximize` determines whether improving means getting larger (true) or smaller (false).
318///
319/// This is a no-op if the assertion table is not initialized.
320pub fn assertion_numeric(
321    kind: AssertKind,
322    cmp: AssertCmp,
323    maximize: bool,
324    left: i64,
325    right: i64,
326    msg: &str,
327) {
328    let table_ptr = crate::context::get_assertion_table_ptr();
329    if table_ptr.is_null() {
330        return;
331    }
332
333    let hash = msg_hash(msg);
334    let maximize_u8 = if maximize { 1 } else { 0 };
335
336    // Safety: table_ptr points to ASSERTION_TABLE_MEM_SIZE bytes.
337    let (slot, slot_idx) =
338        unsafe { find_or_alloc_slot(table_ptr, hash, kind, 1, maximize_u8, msg) };
339    if slot.is_null() {
340        return;
341    }
342
343    // Evaluate the comparison
344    let passes = match cmp {
345        AssertCmp::Gt => left > right,
346        AssertCmp::Ge => left >= right,
347        AssertCmp::Lt => left < right,
348        AssertCmp::Le => left <= right,
349    };
350
351    // Safety: slot points to valid shared memory.
352    unsafe {
353        if passes {
354            let pc = &*((&(*slot).pass_count) as *const u64 as *const AtomicI64);
355            pc.fetch_add(1, Ordering::Relaxed);
356        } else {
357            let fc = &*((&(*slot).fail_count) as *const u64 as *const AtomicI64);
358            let prev = fc.fetch_add(1, Ordering::Relaxed);
359            if kind == AssertKind::NumericAlways && prev == 0 {
360                eprintln!(
361                    "[NUMERIC ASSERTION FAILED] {} (left={}, right={}, cmp={:?})",
362                    msg, left, right, cmp
363                );
364            }
365        }
366
367        // Update watermark: track best value of `left`
368        let wm = &*((&(*slot).watermark) as *const i64 as *const AtomicI64);
369        let mut current = wm.load(Ordering::Relaxed);
370        loop {
371            let is_better = if maximize {
372                left > current
373            } else {
374                left < current
375            };
376            if !is_better {
377                break;
378            }
379            match wm.compare_exchange_weak(current, left, Ordering::Relaxed, Ordering::Relaxed) {
380                Ok(_) => break,
381                Err(actual) => current = actual,
382            }
383        }
384
385        // For NumericSometimes: fork when watermark improves past split_watermark
386        if kind == AssertKind::NumericSometimes {
387            let fw = &*((&(*slot).split_watermark) as *const i64 as *const AtomicI64);
388            let mut fork_current = fw.load(Ordering::Relaxed);
389            loop {
390                let is_better = if maximize {
391                    left > fork_current
392                } else {
393                    left < fork_current
394                };
395                if !is_better {
396                    break;
397                }
398                match fw.compare_exchange_weak(
399                    fork_current,
400                    left,
401                    Ordering::Relaxed,
402                    Ordering::Relaxed,
403                ) {
404                    Ok(_) => {
405                        assertion_split(slot_idx, hash);
406                        break;
407                    }
408                    Err(actual) => fork_current = actual,
409                }
410            }
411        }
412    }
413}
414
415/// Compound boolean assertion backing function (sometimes-all).
416///
417/// Counts how many of the named booleans are simultaneously true.
418/// Maintains a frontier (max count seen). Forks when the frontier advances.
419///
420/// This is a no-op if the assertion table is not initialized.
421pub fn assertion_sometimes_all(msg: &str, named_bools: &[(&str, bool)]) {
422    let table_ptr = crate::context::get_assertion_table_ptr();
423    if table_ptr.is_null() {
424        return;
425    }
426
427    let hash = msg_hash(msg);
428
429    // Safety: table_ptr points to ASSERTION_TABLE_MEM_SIZE bytes.
430    let (slot, slot_idx) =
431        unsafe { find_or_alloc_slot(table_ptr, hash, AssertKind::BooleanSometimesAll, 1, 0, msg) };
432    if slot.is_null() {
433        return;
434    }
435
436    // Count simultaneously true bools
437    let true_count = named_bools.iter().filter(|(_, v)| *v).count() as u8;
438
439    // Safety: slot points to valid shared memory.
440    unsafe {
441        // Increment pass_count (always, for statistics)
442        let pc = &*((&(*slot).pass_count) as *const u64 as *const AtomicI64);
443        pc.fetch_add(1, Ordering::Relaxed);
444
445        // CAS loop on frontier — fork when it advances
446        let fr = &*((&(*slot).frontier) as *const u8 as *const AtomicU8);
447        let mut current = fr.load(Ordering::Relaxed);
448        loop {
449            if true_count <= current {
450                break;
451            }
452            match fr.compare_exchange_weak(
453                current,
454                true_count,
455                Ordering::Relaxed,
456                Ordering::Relaxed,
457            ) {
458                Ok(_) => {
459                    assertion_split(slot_idx, hash);
460                    break;
461                }
462                Err(actual) => current = actual,
463            }
464        }
465    }
466}
467
468/// Read all allocated assertion slots from shared memory.
469///
470/// Returns an empty vector if the assertion table is not initialized.
471pub fn assertion_read_all() -> Vec<AssertionSlotSnapshot> {
472    let table_ptr = crate::context::get_assertion_table_ptr();
473    if table_ptr.is_null() {
474        return Vec::new();
475    }
476
477    unsafe {
478        let count = (*(table_ptr as *const u32)) as usize;
479        let count = count.min(MAX_ASSERTION_SLOTS);
480        let base = table_ptr.add(8) as *const AssertionSlot;
481
482        (0..count)
483            .filter_map(|i| {
484                let slot = &*base.add(i);
485                // Skip tombstones (msg_hash == 0) left by the duplicate-slot race fix.
486                if slot.msg_hash == 0 {
487                    return None;
488                }
489                Some(AssertionSlotSnapshot {
490                    msg: slot.msg_str().to_string(),
491                    kind: slot.kind,
492                    must_hit: slot.must_hit,
493                    pass_count: slot.pass_count,
494                    fail_count: slot.fail_count,
495                    watermark: slot.watermark,
496                    frontier: slot.frontier,
497                })
498            })
499            .collect()
500    }
501}
502
503/// A snapshot of an assertion slot for reporting.
504#[derive(Debug, Clone)]
505pub struct AssertionSlotSnapshot {
506    /// The assertion message.
507    pub msg: String,
508    /// The kind of assertion (AssertKind as u8).
509    pub kind: u8,
510    /// Whether this assertion must be hit.
511    pub must_hit: u8,
512    /// Number of times the assertion passed.
513    pub pass_count: u64,
514    /// Number of times the assertion failed.
515    pub fail_count: u64,
516    /// Best watermark value (for numeric assertions).
517    pub watermark: i64,
518    /// Frontier value (for BooleanSometimesAll).
519    pub frontier: u8,
520}
521
522#[cfg(test)]
523mod tests {
524    use super::*;
525
526    #[test]
527    fn test_msg_hash_deterministic() {
528        let h1 = msg_hash("test_assertion");
529        let h2 = msg_hash("test_assertion");
530        assert_eq!(h1, h2);
531    }
532
533    #[test]
534    fn test_msg_hash_no_collision() {
535        let names = ["a", "b", "c", "timeout", "connect", "retry"];
536        let hashes: Vec<u32> = names.iter().map(|n| msg_hash(n)).collect();
537        for i in 0..hashes.len() {
538            for j in (i + 1)..hashes.len() {
539                assert_ne!(
540                    hashes[i], hashes[j],
541                    "{} and {} collide",
542                    names[i], names[j]
543                );
544            }
545        }
546    }
547
548    #[test]
549    fn test_slot_size_stable() {
550        // Verify AssertionSlot size for shared memory layout stability.
551        // msg_hash(4) + kind(1) + must_hit(1) + maximize(1) + split_triggered(1) +
552        // pass_count(8) + fail_count(8) + watermark(8) + split_watermark(8) +
553        // frontier(1) + _pad(7) + msg(64) = 112
554        assert_eq!(std::mem::size_of::<AssertionSlot>(), 112);
555    }
556
557    #[test]
558    fn test_assertion_bool_noop_when_inactive() {
559        // Should not panic when assertion table is not initialized.
560        assertion_bool(AssertKind::Sometimes, true, true, "test");
561        assertion_bool(AssertKind::Always, true, false, "test2");
562    }
563
564    #[test]
565    fn test_assertion_numeric_noop_when_inactive() {
566        // Should not panic when assertion table is not initialized.
567        assertion_numeric(
568            AssertKind::NumericAlways,
569            AssertCmp::Gt,
570            false,
571            10,
572            5,
573            "test",
574        );
575    }
576
577    #[test]
578    fn test_assertion_read_all_when_inactive() {
579        // Should return empty when not initialized.
580        let slots = assertion_read_all();
581        assert!(slots.is_empty());
582    }
583
584    #[test]
585    fn test_assert_kind_from_u8() {
586        assert_eq!(AssertKind::from_u8(0), Some(AssertKind::Always));
587        assert_eq!(
588            AssertKind::from_u8(7),
589            Some(AssertKind::BooleanSometimesAll)
590        );
591        assert_eq!(AssertKind::from_u8(8), None);
592    }
593}