Skip to main content

moonpool_explorer/
assertion_slots.rs

1//! Rich assertion slot tracking for the Antithesis-style assertion suite.
2//!
3//! Maintains a fixed-size table of assertion slots in shared memory.
4//! Supports boolean assertions (always/sometimes/reachable/unreachable),
5//! numeric guidance assertions (with watermark tracking), and compound
6//! boolean assertions (sometimes-all with frontier tracking).
7//!
8//! Each slot is accessed via raw pointer arithmetic on `MAP_SHARED` memory.
9//! With `Parallelism::Cores(N)`, multiple fork children run concurrently,
10//! so `find_or_alloc_slot` claims slots by atomically writing `msg_hash`
11//! before re-scanning, ensuring concurrent allocators see each other.
12
13use std::sync::atomic::{AtomicI64, AtomicU8, AtomicU32, Ordering};
14
15/// Maximum number of tracked assertion slots.
16pub const MAX_ASSERTION_SLOTS: usize = 128;
17
18/// Maximum length of the assertion message stored in a slot.
19const SLOT_MSG_LEN: usize = 64;
20
21/// Total size of the assertion table memory region in bytes.
22///
23/// Layout: `[next_slot: u32, _pad: u32, slots: [AssertionSlot; MAX_ASSERTION_SLOTS]]`
24pub const ASSERTION_TABLE_MEM_SIZE: usize =
25    8 + MAX_ASSERTION_SLOTS * std::mem::size_of::<AssertionSlot>();
26
27/// The kind of assertion being tracked.
28#[repr(u8)]
29#[derive(Debug, Clone, Copy, PartialEq, Eq)]
30pub enum AssertKind {
31    /// Invariant that must always hold when reached.
32    Always = 0,
33    /// Invariant that must hold when reached, but need not be reached.
34    AlwaysOrUnreachable = 1,
35    /// Condition that should sometimes be true.
36    Sometimes = 2,
37    /// Code path that should be reached at least once.
38    Reachable = 3,
39    /// Code path that should never be reached.
40    Unreachable = 4,
41    /// Numeric invariant that must always hold (e.g., val > threshold).
42    NumericAlways = 5,
43    /// Numeric condition that should sometimes hold.
44    NumericSometimes = 6,
45    /// Compound boolean: all named bools should sometimes be true simultaneously.
46    BooleanSometimesAll = 7,
47}
48
49impl AssertKind {
50    /// Convert from raw u8 to AssertKind, returning None for invalid values.
51    pub fn from_u8(v: u8) -> Option<Self> {
52        match v {
53            0 => Some(Self::Always),
54            1 => Some(Self::AlwaysOrUnreachable),
55            2 => Some(Self::Sometimes),
56            3 => Some(Self::Reachable),
57            4 => Some(Self::Unreachable),
58            5 => Some(Self::NumericAlways),
59            6 => Some(Self::NumericSometimes),
60            7 => Some(Self::BooleanSometimesAll),
61            _ => None,
62        }
63    }
64}
65
66/// Comparison operator for numeric assertions.
67#[repr(u8)]
68#[derive(Debug, Clone, Copy, PartialEq, Eq)]
69pub enum AssertCmp {
70    /// Greater than.
71    Gt = 0,
72    /// Greater than or equal to.
73    Ge = 1,
74    /// Less than.
75    Lt = 2,
76    /// Less than or equal to.
77    Le = 3,
78}
79
80/// A single assertion tracking slot in shared memory.
81///
82/// All fields are accessed via raw pointer arithmetic on `MAP_SHARED` memory.
83#[repr(C)]
84pub struct AssertionSlot {
85    /// FNV-1a hash of the assertion message (u32).
86    pub msg_hash: u32,
87    /// The kind of assertion (AssertKind as u8).
88    pub kind: u8,
89    /// Whether this assertion must be hit (1) or not (0).
90    pub must_hit: u8,
91    /// Whether to maximize (1) or minimize (0) the watermark value.
92    pub maximize: u8,
93    /// Whether a fork has been triggered for this assertion (0 = no, 1 = yes).
94    pub split_triggered: u8,
95    /// Total number of times this assertion passed.
96    pub pass_count: u64,
97    /// Total number of times this assertion failed.
98    pub fail_count: u64,
99    /// Numeric watermark: best value observed (for guidance assertions).
100    pub watermark: i64,
101    /// Watermark value at last fork (for detecting improvement).
102    pub split_watermark: i64,
103    /// Frontier: number of simultaneously true bools (for BooleanSometimesAll).
104    pub frontier: u8,
105    /// Padding for alignment.
106    pub _pad: [u8; 7],
107    /// Assertion message string (null-terminated).
108    pub msg: [u8; SLOT_MSG_LEN],
109}
110
111impl AssertionSlot {
112    /// Get the assertion message as a string slice.
113    pub fn msg_str(&self) -> &str {
114        let len = self
115            .msg
116            .iter()
117            .position(|&b| b == 0)
118            .unwrap_or(SLOT_MSG_LEN);
119        std::str::from_utf8(&self.msg[..len]).unwrap_or("???")
120    }
121}
122
123/// FNV-1a hash of a message string to a stable u32.
124pub fn msg_hash(msg: &str) -> u32 {
125    let mut h: u32 = 0x811c9dc5;
126    for b in msg.bytes() {
127        h ^= b as u32;
128        h = h.wrapping_mul(0x01000193);
129    }
130    h
131}
132
133/// Find an existing slot or allocate a new one by msg_hash.
134///
135/// Returns a pointer to the slot and its index, or null if the table is full.
136///
137/// # Safety
138///
139/// `table_ptr` must point to a valid assertion table region of at least
140/// `ASSERTION_TABLE_MEM_SIZE` bytes.
141unsafe fn find_or_alloc_slot(
142    table_ptr: *mut u8,
143    hash: u32,
144    kind: AssertKind,
145    must_hit: u8,
146    maximize: u8,
147    msg: &str,
148) -> (*mut AssertionSlot, usize) {
149    unsafe {
150        let next_atomic = &*(table_ptr as *const AtomicU32);
151        let count = next_atomic.load(Ordering::Acquire) as usize;
152        let base = table_ptr.add(8) as *mut AssertionSlot;
153
154        // Search existing slots (atomic load to see concurrent writers).
155        for i in 0..count.min(MAX_ASSERTION_SLOTS) {
156            let slot = base.add(i);
157            let h = &*(std::ptr::addr_of!((*slot).msg_hash) as *const AtomicU32);
158            if h.load(Ordering::Acquire) == hash {
159                return (slot, i);
160            }
161        }
162
163        // Allocate new slot atomically.
164        let new_idx = next_atomic.fetch_add(1, Ordering::AcqRel) as usize;
165        if new_idx >= MAX_ASSERTION_SLOTS {
166            next_atomic.fetch_sub(1, Ordering::AcqRel);
167            return (std::ptr::null_mut(), 0);
168        }
169
170        // Claim our slot by writing msg_hash atomically BEFORE re-scanning.
171        // This makes our claim visible to any concurrent process doing
172        // its own re-scan, preventing the TOCTOU duplicate slot race.
173        let slot = base.add(new_idx);
174        let hash_atomic = &*(std::ptr::addr_of!((*slot).msg_hash) as *const AtomicU32);
175        hash_atomic.store(hash, Ordering::Release);
176
177        // Re-scan 0..new_idx for a concurrent registration of the same hash.
178        // Lower index always wins — if we find a match, tombstone ourselves.
179        for i in 0..new_idx {
180            let existing = base.add(i);
181            let existing_hash = &*(std::ptr::addr_of!((*existing).msg_hash) as *const AtomicU32);
182            if existing_hash.load(Ordering::Acquire) == hash {
183                // Another process claimed a lower slot. Tombstone ours.
184                hash_atomic.store(0, Ordering::Release);
185                std::ptr::write_bytes(slot as *mut u8, 0, std::mem::size_of::<AssertionSlot>());
186                return (existing, i);
187            }
188        }
189
190        // No duplicate found — write remaining slot fields (msg_hash already set).
191        let mut msg_buf = [0u8; SLOT_MSG_LEN];
192        let n = msg.len().min(SLOT_MSG_LEN - 1);
193        msg_buf[..n].copy_from_slice(&msg.as_bytes()[..n]);
194
195        (*slot).kind = kind as u8;
196        (*slot).must_hit = must_hit;
197        (*slot).maximize = maximize;
198        (*slot).split_triggered = 0;
199        (*slot).pass_count = 0;
200        (*slot).fail_count = 0;
201        (*slot).watermark = if maximize == 1 { i64::MIN } else { i64::MAX };
202        (*slot).split_watermark = if maximize == 1 { i64::MIN } else { i64::MAX };
203        (*slot).frontier = 0;
204        (*slot)._pad = [0; 7];
205        (*slot).msg = msg_buf;
206
207        (slot, new_idx)
208    }
209}
210
211/// Trigger forking for a slot that discovered something new.
212///
213/// Writes to coverage bitmap and explored map (if pointers are non-null),
214/// then calls `dispatch_split()` if exploration is active.
215fn assertion_split(slot_idx: usize, hash: u32) {
216    // Mark coverage bitmap
217    let bm_ptr = crate::context::COVERAGE_BITMAP_PTR.with(|c| c.get());
218    if !bm_ptr.is_null() {
219        // Safety: bm_ptr is non-null (checked above) and points to
220        // COVERAGE_MAP_SIZE bytes of shared memory set during init().
221        let bm = unsafe { crate::coverage::CoverageBitmap::new(bm_ptr) };
222        bm.set_bit(hash as usize);
223    }
224
225    // Mark explored map
226    let vm_ptr = crate::context::EXPLORED_MAP_PTR.with(|c| c.get());
227    if !vm_ptr.is_null() {
228        // Safety: vm_ptr is non-null (checked above) and points to
229        // COVERAGE_MAP_SIZE bytes of shared memory set during init().
230        let vm = unsafe { crate::coverage::ExploredMap::new(vm_ptr) };
231        let bm_ptr2 = crate::context::COVERAGE_BITMAP_PTR.with(|c| c.get());
232        if !bm_ptr2.is_null() {
233            // Safety: same invariant as bm_ptr above.
234            let bm = unsafe { crate::coverage::CoverageBitmap::new(bm_ptr2) };
235            vm.merge_from(&bm);
236        }
237    }
238
239    // Dispatch to fork loop if explorer is active
240    if crate::context::explorer_is_active() {
241        crate::split_loop::dispatch_split("", slot_idx % MAX_ASSERTION_SLOTS);
242    }
243}
244
245/// Boolean assertion backing function.
246///
247/// Handles Always, AlwaysOrUnreachable, Sometimes, Reachable, and Unreachable.
248/// Gets or allocates a slot, increments pass/fail counts, and triggers forking
249/// for Sometimes/Reachable assertions on first success.
250///
251/// This is a no-op if the assertion table is not initialized.
252pub fn assertion_bool(kind: AssertKind, must_hit: bool, condition: bool, msg: &str) {
253    let table_ptr = crate::context::assertion_table_ptr();
254    if table_ptr.is_null() {
255        return;
256    }
257
258    let hash = msg_hash(msg);
259    let must_hit_u8 = if must_hit { 1 } else { 0 };
260
261    // Safety: table_ptr points to ASSERTION_TABLE_MEM_SIZE bytes of shared memory.
262    let (slot, slot_idx) =
263        unsafe { find_or_alloc_slot(table_ptr, hash, kind, must_hit_u8, 0, msg) };
264    if slot.is_null() {
265        return;
266    }
267
268    // Safety: slot points to valid shared memory.
269    unsafe {
270        match kind {
271            AssertKind::Always | AssertKind::AlwaysOrUnreachable | AssertKind::NumericAlways => {
272                if condition {
273                    let pc = &*((&(*slot).pass_count) as *const u64 as *const AtomicI64);
274                    pc.fetch_add(1, Ordering::Relaxed);
275                } else {
276                    let fc = &*((&(*slot).fail_count) as *const u64 as *const AtomicI64);
277                    let prev = fc.fetch_add(1, Ordering::Relaxed);
278                    if prev == 0 {
279                        eprintln!("[ASSERTION FAILED] {} (kind={:?})", msg, kind);
280                    }
281                }
282            }
283            AssertKind::Sometimes | AssertKind::Reachable => {
284                if condition {
285                    let pc = &*((&(*slot).pass_count) as *const u64 as *const AtomicI64);
286                    pc.fetch_add(1, Ordering::Relaxed);
287
288                    // CAS split_triggered from 0 → 1 on first success
289                    let ft = &*((&(*slot).split_triggered) as *const u8 as *const AtomicU8);
290                    if ft
291                        .compare_exchange(0, 1, Ordering::Relaxed, Ordering::Relaxed)
292                        .is_ok()
293                    {
294                        assertion_split(slot_idx, hash);
295                    }
296                } else {
297                    let fc = &*((&(*slot).fail_count) as *const u64 as *const AtomicI64);
298                    fc.fetch_add(1, Ordering::Relaxed);
299                }
300            }
301            AssertKind::Unreachable => {
302                // Being reached at all is a "pass" (the assertion is that we should NOT reach)
303                // We track it as pass_count = times reached (bad), fail_count unused
304                let pc = &*((&(*slot).pass_count) as *const u64 as *const AtomicI64);
305                let prev = pc.fetch_add(1, Ordering::Relaxed);
306                if prev == 0 {
307                    eprintln!("[UNREACHABLE REACHED] {}", msg);
308                }
309            }
310            _ => {}
311        }
312    }
313}
314
315/// Numeric guidance assertion backing function.
316///
317/// Evaluates a comparison (left `cmp` right), tracks pass/fail counts,
318/// and maintains a watermark of the best observed value of `left`.
319/// For NumericSometimes, forks when the watermark improves past the
320/// last fork watermark.
321///
322/// `maximize` determines whether improving means getting larger (true) or smaller (false).
323///
324/// This is a no-op if the assertion table is not initialized.
325pub fn assertion_numeric(
326    kind: AssertKind,
327    cmp: AssertCmp,
328    maximize: bool,
329    left: i64,
330    right: i64,
331    msg: &str,
332) {
333    let table_ptr = crate::context::assertion_table_ptr();
334    if table_ptr.is_null() {
335        return;
336    }
337
338    let hash = msg_hash(msg);
339    let maximize_u8 = if maximize { 1 } else { 0 };
340
341    // Safety: table_ptr points to ASSERTION_TABLE_MEM_SIZE bytes.
342    let (slot, slot_idx) =
343        unsafe { find_or_alloc_slot(table_ptr, hash, kind, 1, maximize_u8, msg) };
344    if slot.is_null() {
345        return;
346    }
347
348    // Evaluate the comparison
349    let passes = match cmp {
350        AssertCmp::Gt => left > right,
351        AssertCmp::Ge => left >= right,
352        AssertCmp::Lt => left < right,
353        AssertCmp::Le => left <= right,
354    };
355
356    // Safety: slot points to valid shared memory.
357    unsafe {
358        if passes {
359            let pc = &*((&(*slot).pass_count) as *const u64 as *const AtomicI64);
360            pc.fetch_add(1, Ordering::Relaxed);
361        } else {
362            let fc = &*((&(*slot).fail_count) as *const u64 as *const AtomicI64);
363            let prev = fc.fetch_add(1, Ordering::Relaxed);
364            if kind == AssertKind::NumericAlways && prev == 0 {
365                eprintln!(
366                    "[NUMERIC ASSERTION FAILED] {} (left={}, right={}, cmp={:?})",
367                    msg, left, right, cmp
368                );
369            }
370        }
371
372        // Update watermark: track best value of `left`
373        let wm = &*((&(*slot).watermark) as *const i64 as *const AtomicI64);
374        let mut current = wm.load(Ordering::Relaxed);
375        loop {
376            let is_better = if maximize {
377                left > current
378            } else {
379                left < current
380            };
381            if !is_better {
382                break;
383            }
384            match wm.compare_exchange_weak(current, left, Ordering::Relaxed, Ordering::Relaxed) {
385                Ok(_) => break,
386                Err(actual) => current = actual,
387            }
388        }
389
390        // For NumericSometimes: fork when watermark improves past split_watermark
391        if kind == AssertKind::NumericSometimes {
392            let fw = &*((&(*slot).split_watermark) as *const i64 as *const AtomicI64);
393            let mut fork_current = fw.load(Ordering::Relaxed);
394            loop {
395                let is_better = if maximize {
396                    left > fork_current
397                } else {
398                    left < fork_current
399                };
400                if !is_better {
401                    break;
402                }
403                match fw.compare_exchange_weak(
404                    fork_current,
405                    left,
406                    Ordering::Relaxed,
407                    Ordering::Relaxed,
408                ) {
409                    Ok(_) => {
410                        assertion_split(slot_idx, hash);
411                        break;
412                    }
413                    Err(actual) => fork_current = actual,
414                }
415            }
416        }
417    }
418}
419
420/// Compound boolean assertion backing function (sometimes-all).
421///
422/// Counts how many of the named booleans are simultaneously true.
423/// Maintains a frontier (max count seen). Forks when the frontier advances.
424///
425/// This is a no-op if the assertion table is not initialized.
426pub fn assertion_sometimes_all(msg: &str, named_bools: &[(&str, bool)]) {
427    let table_ptr = crate::context::assertion_table_ptr();
428    if table_ptr.is_null() {
429        return;
430    }
431
432    let hash = msg_hash(msg);
433
434    // Safety: table_ptr points to ASSERTION_TABLE_MEM_SIZE bytes.
435    let (slot, slot_idx) =
436        unsafe { find_or_alloc_slot(table_ptr, hash, AssertKind::BooleanSometimesAll, 1, 0, msg) };
437    if slot.is_null() {
438        return;
439    }
440
441    // Count simultaneously true bools
442    let true_count = named_bools.iter().filter(|(_, v)| *v).count() as u8;
443
444    // Safety: slot points to valid shared memory.
445    unsafe {
446        // Increment pass_count (always, for statistics)
447        let pc = &*((&(*slot).pass_count) as *const u64 as *const AtomicI64);
448        pc.fetch_add(1, Ordering::Relaxed);
449
450        // CAS loop on frontier — fork when it advances
451        let fr = &*((&(*slot).frontier) as *const u8 as *const AtomicU8);
452        let mut current = fr.load(Ordering::Relaxed);
453        loop {
454            if true_count <= current {
455                break;
456            }
457            match fr.compare_exchange_weak(
458                current,
459                true_count,
460                Ordering::Relaxed,
461                Ordering::Relaxed,
462            ) {
463                Ok(_) => {
464                    assertion_split(slot_idx, hash);
465                    break;
466                }
467                Err(actual) => current = actual,
468            }
469        }
470    }
471}
472
473/// Read all allocated assertion slots from shared memory.
474///
475/// Returns an empty vector if the assertion table is not initialized.
476pub fn assertion_read_all() -> Vec<AssertionSlotSnapshot> {
477    let table_ptr = crate::context::assertion_table_ptr();
478    if table_ptr.is_null() {
479        return Vec::new();
480    }
481
482    // Safety: table_ptr was allocated during init() with ASSERTION_TABLE_MEM_SIZE bytes.
483    // - The first 4 bytes hold the slot count (u32), capped at MAX_ASSERTION_SLOTS.
484    // - base = table_ptr + 8 is the start of the AssertionSlot array.
485    // - Loop bound 0..count ensures base.add(i) stays within the allocated region.
486    // - AssertionSlot fields are read through a shared reference; tombstoned slots
487    //   (msg_hash == 0) are skipped.
488    unsafe {
489        let count = (*(table_ptr as *const u32)) as usize;
490        let count = count.min(MAX_ASSERTION_SLOTS);
491        let base = table_ptr.add(8) as *const AssertionSlot;
492
493        (0..count)
494            .filter_map(|i| {
495                let slot = &*base.add(i);
496                // Skip tombstones (msg_hash == 0) left by the duplicate-slot race fix.
497                if slot.msg_hash == 0 {
498                    return None;
499                }
500                Some(AssertionSlotSnapshot {
501                    msg: slot.msg_str().to_string(),
502                    kind: slot.kind,
503                    must_hit: slot.must_hit,
504                    pass_count: slot.pass_count,
505                    fail_count: slot.fail_count,
506                    watermark: slot.watermark,
507                    frontier: slot.frontier,
508                })
509            })
510            .collect()
511    }
512}
513
514/// A snapshot of an assertion slot for reporting.
515#[derive(Debug, Clone)]
516pub struct AssertionSlotSnapshot {
517    /// The assertion message.
518    pub msg: String,
519    /// The kind of assertion (AssertKind as u8).
520    pub kind: u8,
521    /// Whether this assertion must be hit.
522    pub must_hit: u8,
523    /// Number of times the assertion passed.
524    pub pass_count: u64,
525    /// Number of times the assertion failed.
526    pub fail_count: u64,
527    /// Best watermark value (for numeric assertions).
528    pub watermark: i64,
529    /// Frontier value (for BooleanSometimesAll).
530    pub frontier: u8,
531}
532
533#[cfg(test)]
534mod tests {
535    use super::*;
536
537    #[test]
538    fn test_msg_hash_deterministic() {
539        let h1 = msg_hash("test_assertion");
540        let h2 = msg_hash("test_assertion");
541        assert_eq!(h1, h2);
542    }
543
544    #[test]
545    fn test_msg_hash_no_collision() {
546        let names = ["a", "b", "c", "timeout", "connect", "retry"];
547        let hashes: Vec<u32> = names.iter().map(|n| msg_hash(n)).collect();
548        for i in 0..hashes.len() {
549            for j in (i + 1)..hashes.len() {
550                assert_ne!(
551                    hashes[i], hashes[j],
552                    "{} and {} collide",
553                    names[i], names[j]
554                );
555            }
556        }
557    }
558
559    #[test]
560    fn test_slot_size_stable() {
561        // Verify AssertionSlot size for shared memory layout stability.
562        // msg_hash(4) + kind(1) + must_hit(1) + maximize(1) + split_triggered(1) +
563        // pass_count(8) + fail_count(8) + watermark(8) + split_watermark(8) +
564        // frontier(1) + _pad(7) + msg(64) = 112
565        assert_eq!(std::mem::size_of::<AssertionSlot>(), 112);
566    }
567
568    #[test]
569    fn test_assertion_bool_noop_when_inactive() {
570        // Should not panic when assertion table is not initialized.
571        assertion_bool(AssertKind::Sometimes, true, true, "test");
572        assertion_bool(AssertKind::Always, true, false, "test2");
573    }
574
575    #[test]
576    fn test_assertion_numeric_noop_when_inactive() {
577        // Should not panic when assertion table is not initialized.
578        assertion_numeric(
579            AssertKind::NumericAlways,
580            AssertCmp::Gt,
581            false,
582            10,
583            5,
584            "test",
585        );
586    }
587
588    #[test]
589    fn test_assertion_read_all_when_inactive() {
590        // Should return empty when not initialized.
591        let slots = assertion_read_all();
592        assert!(slots.is_empty());
593    }
594
595    #[test]
596    fn test_assert_kind_from_u8() {
597        assert_eq!(AssertKind::from_u8(0), Some(AssertKind::Always));
598        assert_eq!(
599            AssertKind::from_u8(7),
600            Some(AssertKind::BooleanSometimesAll)
601        );
602        assert_eq!(AssertKind::from_u8(8), None);
603    }
604}