Skip to main content

moonpool_explorer/
coverage.rs

1//! Coverage tracking for exploration novelty detection.
2//!
3//! Tracks which assertion paths have been explored across all timelines.
4//! The [`ExploredMap`] lives in shared memory and accumulates coverage
5//! from every timeline. Each timeline gets a fresh [`CoverageBitmap`]
6//! that is merged into the explored map after the timeline finishes.
7
8/// Size of coverage bitmaps in bytes (8192 bit positions).
9pub const COVERAGE_MAP_SIZE: usize = 1024;
10
11/// Per-timeline coverage bitmap, cleared before each split.
12///
13/// Tracks which assertion paths were hit during this timeline's execution.
14/// After the timeline finishes, the parent merges this into the [`ExploredMap`].
15pub struct CoverageBitmap {
16    ptr: *mut u8,
17}
18
19impl CoverageBitmap {
20    /// Wrap a shared-memory pointer as a coverage bitmap.
21    ///
22    /// # Safety
23    ///
24    /// `ptr` must point to at least [`COVERAGE_MAP_SIZE`] bytes of valid,
25    /// writable shared memory.
26    pub unsafe fn new(ptr: *mut u8) -> Self {
27        Self { ptr }
28    }
29
30    /// Set the bit at the given index (mod total bits).
31    pub fn set_bit(&self, index: usize) {
32        let bit_index = index % (COVERAGE_MAP_SIZE * 8);
33        let byte = bit_index / 8;
34        let bit = bit_index % 8;
35        // Safety: self.ptr points to COVERAGE_MAP_SIZE bytes (constructor invariant).
36        // The modular arithmetic (bit_index % (COVERAGE_MAP_SIZE * 8)) guarantees
37        // byte = bit_index / 8 < COVERAGE_MAP_SIZE, so ptr.add(byte) is in bounds.
38        unsafe {
39            *self.ptr.add(byte) |= 1 << bit;
40        }
41    }
42
43    /// Clear all bits to zero.
44    pub fn clear(&self) {
45        // Safety: self.ptr points to COVERAGE_MAP_SIZE bytes of writable shared
46        // memory (constructor invariant). write_bytes zeroes exactly that region.
47        unsafe {
48            std::ptr::write_bytes(self.ptr, 0, COVERAGE_MAP_SIZE);
49        }
50    }
51
52    /// Get a pointer to the underlying data.
53    pub fn as_ptr(&self) -> *const u8 {
54        self.ptr
55    }
56}
57
58/// Cross-process coverage map, OR'd by all timelines.
59///
60/// Lives in `MAP_SHARED` memory so all forked timelines contribute.
61/// A bit set to 1 means "this assertion path has been explored."
62pub struct ExploredMap {
63    ptr: *mut u8,
64}
65
66impl ExploredMap {
67    /// Wrap a shared-memory pointer as a explored map.
68    ///
69    /// # Safety
70    ///
71    /// `ptr` must point to at least [`COVERAGE_MAP_SIZE`] bytes of valid,
72    /// writable shared memory.
73    pub unsafe fn new(ptr: *mut u8) -> Self {
74        Self { ptr }
75    }
76
77    /// Merge a timeline's coverage bitmap into this explored map (bitwise OR).
78    pub fn merge_from(&self, other: &CoverageBitmap) {
79        // Safety: both pointers are valid for COVERAGE_MAP_SIZE bytes
80        // (constructor invariants of ExploredMap and CoverageBitmap).
81        // Loop bound 0..COVERAGE_MAP_SIZE ensures all ptr.add(i) are in bounds.
82        unsafe {
83            for i in 0..COVERAGE_MAP_SIZE {
84                *self.ptr.add(i) |= *other.as_ptr().add(i);
85            }
86        }
87    }
88
89    /// Count the number of set bits in the explored map (population count).
90    ///
91    /// Returns the total number of unique assertion paths explored across
92    /// all timelines.
93    pub fn count_bits_set(&self) -> u32 {
94        let mut count: u32 = 0;
95        // Safety: self.ptr points to COVERAGE_MAP_SIZE bytes (constructor invariant).
96        // Loop bound 0..COVERAGE_MAP_SIZE ensures ptr.add(i) is in bounds.
97        unsafe {
98            for i in 0..COVERAGE_MAP_SIZE {
99                count += (*self.ptr.add(i)).count_ones();
100            }
101        }
102        count
103    }
104
105    /// Check if a timeline's bitmap contains any bits not yet in the explored map.
106    pub fn has_new_bits(&self, other: &CoverageBitmap) -> bool {
107        // Safety: both pointers are valid for COVERAGE_MAP_SIZE bytes
108        // (constructor invariants of ExploredMap and CoverageBitmap).
109        // Loop bound 0..COVERAGE_MAP_SIZE ensures all ptr.add(i) are in bounds.
110        unsafe {
111            for i in 0..COVERAGE_MAP_SIZE {
112                let explored = *self.ptr.add(i);
113                let child = *other.as_ptr().add(i);
114                // Child has bits that explored map doesn't
115                if (child & !explored) != 0 {
116                    return true;
117                }
118            }
119        }
120        false
121    }
122}
123
124#[cfg(test)]
125mod tests {
126    use super::*;
127    use crate::shared_mem;
128
129    #[test]
130    fn test_set_bit_and_check() {
131        let ptr = shared_mem::alloc_shared(COVERAGE_MAP_SIZE).expect("alloc failed");
132        let explored_ptr = shared_mem::alloc_shared(COVERAGE_MAP_SIZE).expect("alloc failed");
133        let bm = unsafe { CoverageBitmap::new(ptr) };
134        let vm = unsafe { ExploredMap::new(explored_ptr) };
135
136        // Initially no new bits
137        assert!(!vm.has_new_bits(&bm));
138
139        // Set a bit in bitmap
140        bm.set_bit(42);
141        assert!(vm.has_new_bits(&bm));
142
143        // Merge into explored map
144        vm.merge_from(&bm);
145        // Now no new bits (already merged)
146        assert!(!vm.has_new_bits(&bm));
147
148        unsafe {
149            shared_mem::free_shared(ptr, COVERAGE_MAP_SIZE);
150            shared_mem::free_shared(explored_ptr, COVERAGE_MAP_SIZE);
151        }
152    }
153
154    #[test]
155    fn test_clear() {
156        let ptr = shared_mem::alloc_shared(COVERAGE_MAP_SIZE).expect("alloc failed");
157        let bm = unsafe { CoverageBitmap::new(ptr) };
158
159        bm.set_bit(0);
160        bm.set_bit(100);
161        bm.set_bit(8000);
162
163        bm.clear();
164
165        // Verify all zeroed
166        unsafe {
167            for i in 0..COVERAGE_MAP_SIZE {
168                assert_eq!(*ptr.add(i), 0);
169            }
170            shared_mem::free_shared(ptr, COVERAGE_MAP_SIZE);
171        }
172    }
173
174    #[test]
175    fn test_merge_accumulates() {
176        let bm1_ptr = shared_mem::alloc_shared(COVERAGE_MAP_SIZE).expect("alloc failed");
177        let bm2_ptr = shared_mem::alloc_shared(COVERAGE_MAP_SIZE).expect("alloc failed");
178        let vm_ptr = shared_mem::alloc_shared(COVERAGE_MAP_SIZE).expect("alloc failed");
179
180        let bm1 = unsafe { CoverageBitmap::new(bm1_ptr) };
181        let bm2 = unsafe { CoverageBitmap::new(bm2_ptr) };
182        let vm = unsafe { ExploredMap::new(vm_ptr) };
183
184        bm1.set_bit(10);
185        bm2.set_bit(20);
186
187        vm.merge_from(&bm1);
188        // bm2 has bit 20 which is new
189        assert!(vm.has_new_bits(&bm2));
190
191        vm.merge_from(&bm2);
192        // Now neither has new bits
193        assert!(!vm.has_new_bits(&bm1));
194        assert!(!vm.has_new_bits(&bm2));
195
196        unsafe {
197            shared_mem::free_shared(bm1_ptr, COVERAGE_MAP_SIZE);
198            shared_mem::free_shared(bm2_ptr, COVERAGE_MAP_SIZE);
199            shared_mem::free_shared(vm_ptr, COVERAGE_MAP_SIZE);
200        }
201    }
202
203    #[test]
204    fn test_count_bits_set() {
205        let vm_ptr = shared_mem::alloc_shared(COVERAGE_MAP_SIZE).expect("alloc failed");
206        let bm_ptr = shared_mem::alloc_shared(COVERAGE_MAP_SIZE).expect("alloc failed");
207        let vm = unsafe { ExploredMap::new(vm_ptr) };
208        let bm = unsafe { CoverageBitmap::new(bm_ptr) };
209
210        assert_eq!(vm.count_bits_set(), 0);
211
212        bm.set_bit(0);
213        bm.set_bit(42);
214        bm.set_bit(8000);
215        vm.merge_from(&bm);
216
217        assert_eq!(vm.count_bits_set(), 3);
218
219        unsafe {
220            shared_mem::free_shared(vm_ptr, COVERAGE_MAP_SIZE);
221            shared_mem::free_shared(bm_ptr, COVERAGE_MAP_SIZE);
222        }
223    }
224}