revm_bytecode/legacy/
jump_map.rs

1use bitvec::vec::BitVec;
2use once_cell::race::OnceBox;
3use primitives::hex;
4use std::{fmt::Debug, sync::Arc};
5
6/// A table of valid `jump` destinations. Cheap to clone and memory efficient, one bit per opcode.
7#[derive(Clone, PartialEq, Eq, Hash, Ord, PartialOrd)]
8pub struct JumpTable {
9    /// Actual bit vec
10    pub table: Arc<BitVec<u8>>,
11    /// Fast pointer that skips Arc overhead
12    table_ptr: *const u8,
13    /// Number of bits in the table
14    pub len: usize,
15}
16
17#[cfg(feature = "serde")]
18impl serde::Serialize for JumpTable {
19    fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
20    where
21        S: serde::Serializer,
22    {
23        self.table.serialize(serializer)
24    }
25}
26
27#[cfg(feature = "serde")]
28impl<'de> serde::Deserialize<'de> for JumpTable {
29    fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
30    where
31        D: serde::Deserializer<'de>,
32    {
33        let bitvec = BitVec::deserialize(deserializer)?;
34        Ok(Self::new(bitvec))
35    }
36}
37
38// SAFETY: BitVec data is immutable through Arc, pointer won't be invalidated
39unsafe impl Send for JumpTable {}
40unsafe impl Sync for JumpTable {}
41
42impl Debug for JumpTable {
43    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
44        f.debug_struct("JumpTable")
45            .field("map", &hex::encode(self.table.as_raw_slice()))
46            .finish()
47    }
48}
49
50impl Default for JumpTable {
51    #[inline]
52    fn default() -> Self {
53        static DEFAULT: OnceBox<JumpTable> = OnceBox::new();
54        DEFAULT
55            .get_or_init(|| Self::new(BitVec::default()).into())
56            .clone()
57    }
58}
59
60impl JumpTable {
61    /// Create new JumpTable directly from an existing BitVec.
62    pub fn new(jumps: BitVec<u8>) -> Self {
63        let table = Arc::new(jumps);
64        let table_ptr = table.as_raw_slice().as_ptr();
65        let len = table.len();
66
67        Self {
68            table,
69            table_ptr,
70            len,
71        }
72    }
73
74    /// Gets the raw bytes of the jump map.
75    #[inline]
76    pub fn as_slice(&self) -> &[u8] {
77        self.table.as_raw_slice()
78    }
79
80    /// Constructs a jump map from raw bytes and length.
81    ///
82    /// Bit length represents number of used bits inside slice.
83    ///
84    /// # Panics
85    ///
86    /// Panics if number of bits in slice is less than bit_len.
87    #[inline]
88    pub fn from_slice(slice: &[u8], bit_len: usize) -> Self {
89        const BYTE_LEN: usize = 8;
90        assert!(
91            slice.len() * BYTE_LEN >= bit_len,
92            "slice bit length {} is less than bit_len {}",
93            slice.len() * BYTE_LEN,
94            bit_len
95        );
96        let mut bitvec = BitVec::from_slice(slice);
97        unsafe { bitvec.set_len(bit_len) };
98
99        let table = Arc::new(bitvec);
100        let table_ptr = table.as_raw_slice().as_ptr();
101        let len = table.len();
102
103        Self {
104            table,
105            table_ptr,
106            len,
107        }
108    }
109
110    /// Checks if `pc` is a valid jump destination.
111    /// Uses cached pointer and bit operations for faster access
112    #[inline]
113    pub fn is_valid(&self, pc: usize) -> bool {
114        pc < self.len && unsafe { *self.table_ptr.add(pc >> 3) & (1 << (pc & 7)) != 0 }
115    }
116}
117
118#[cfg(test)]
119mod tests {
120    use super::*;
121
122    #[test]
123    #[should_panic(expected = "slice bit length 8 is less than bit_len 10")]
124    fn test_jump_table_from_slice_panic() {
125        let slice = &[0x00];
126        let _ = JumpTable::from_slice(slice, 10);
127    }
128
129    #[test]
130    fn test_jump_table_from_slice() {
131        let slice = &[0x00];
132        let jumptable = JumpTable::from_slice(slice, 3);
133        assert_eq!(jumptable.len, 3);
134    }
135
136    #[test]
137    fn test_is_valid() {
138        let jump_table = JumpTable::from_slice(&[0x0D, 0x06], 13);
139
140        assert_eq!(jump_table.len, 13);
141
142        assert!(jump_table.is_valid(0)); // valid
143        assert!(!jump_table.is_valid(1));
144        assert!(jump_table.is_valid(2)); // valid
145        assert!(jump_table.is_valid(3)); // valid
146        assert!(!jump_table.is_valid(4));
147        assert!(!jump_table.is_valid(5));
148        assert!(!jump_table.is_valid(6));
149        assert!(!jump_table.is_valid(7));
150        assert!(!jump_table.is_valid(8));
151        assert!(jump_table.is_valid(9)); // valid
152        assert!(jump_table.is_valid(10)); // valid
153        assert!(!jump_table.is_valid(11));
154        assert!(!jump_table.is_valid(12));
155    }
156}
157
158#[cfg(test)]
159mod bench_is_valid {
160    use super::*;
161    use std::time::Instant;
162
163    const ITERATIONS: usize = 1_000_000;
164    const TEST_SIZE: usize = 10_000;
165
166    fn create_test_table() -> BitVec<u8> {
167        let mut bitvec = BitVec::from_vec(vec![0u8; TEST_SIZE.div_ceil(8)]);
168        bitvec.resize(TEST_SIZE, false);
169        for i in (0..TEST_SIZE).step_by(3) {
170            bitvec.set(i, true);
171        }
172        bitvec
173    }
174
175    #[derive(Clone)]
176    pub(super) struct JumpTableWithArcDeref(pub Arc<BitVec<u8>>);
177
178    impl JumpTableWithArcDeref {
179        #[inline]
180        pub(super) fn is_valid(&self, pc: usize) -> bool {
181            pc < self.0.len() && unsafe { *self.0.get_unchecked(pc) }
182        }
183    }
184
185    fn benchmark_implementation<F>(name: &str, table: &F, test_fn: impl Fn(&F, usize) -> bool)
186    where
187        F: Clone,
188    {
189        // Warmup
190        for i in 0..10_000 {
191            std::hint::black_box(test_fn(table, i % TEST_SIZE));
192        }
193
194        let start = Instant::now();
195        let mut count = 0;
196
197        for i in 0..ITERATIONS {
198            if test_fn(table, i % TEST_SIZE) {
199                count += 1;
200            }
201        }
202
203        let duration = start.elapsed();
204        let ns_per_op = duration.as_nanos() as f64 / ITERATIONS as f64;
205        let ops_per_sec = ITERATIONS as f64 / duration.as_secs_f64();
206
207        println!("{} Performance:", name);
208        println!("  Time per op: {:.2} ns", ns_per_op);
209        println!("  Ops per sec: {:.0}", ops_per_sec);
210        println!("  True count: {}", count);
211        println!();
212
213        std::hint::black_box(count);
214    }
215
216    #[test]
217    fn bench_is_valid() {
218        println!("JumpTable is_valid() Benchmark Comparison");
219        println!("=========================================");
220
221        let bitvec = create_test_table();
222
223        // Test cached pointer implementation
224        let cached_table = JumpTable::new(bitvec.clone());
225        benchmark_implementation("JumpTable (Cached Pointer)", &cached_table, |table, pc| {
226            table.is_valid(pc)
227        });
228
229        // Test Arc deref implementation
230        let arc_table = JumpTableWithArcDeref(Arc::new(bitvec));
231        benchmark_implementation("JumpTableWithArcDeref (Arc)", &arc_table, |table, pc| {
232            table.is_valid(pc)
233        });
234
235        println!("Benchmark completed successfully!");
236    }
237
238    #[test]
239    fn bench_different_access_patterns() {
240        let bitvec = create_test_table();
241        let cached_table = JumpTable::new(bitvec.clone());
242        let arc_table = JumpTableWithArcDeref(Arc::new(bitvec));
243
244        println!("Access Pattern Comparison");
245        println!("========================");
246
247        // Sequential access
248        let start = Instant::now();
249        for i in 0..ITERATIONS {
250            std::hint::black_box(cached_table.is_valid(i % TEST_SIZE));
251        }
252        let cached_sequential = start.elapsed();
253
254        let start = Instant::now();
255        for i in 0..ITERATIONS {
256            std::hint::black_box(arc_table.is_valid(i % TEST_SIZE));
257        }
258        let arc_sequential = start.elapsed();
259
260        // Random access
261        let start = Instant::now();
262        for i in 0..ITERATIONS {
263            std::hint::black_box(cached_table.is_valid((i * 17) % TEST_SIZE));
264        }
265        let cached_random = start.elapsed();
266
267        let start = Instant::now();
268        for i in 0..ITERATIONS {
269            std::hint::black_box(arc_table.is_valid((i * 17) % TEST_SIZE));
270        }
271        let arc_random = start.elapsed();
272
273        println!("Sequential Access:");
274        println!(
275            "  Cached: {:.2} ns/op",
276            cached_sequential.as_nanos() as f64 / ITERATIONS as f64
277        );
278        println!(
279            "  Arc:    {:.2} ns/op",
280            arc_sequential.as_nanos() as f64 / ITERATIONS as f64
281        );
282        println!(
283            "  Speedup: {:.1}x",
284            arc_sequential.as_nanos() as f64 / cached_sequential.as_nanos() as f64
285        );
286
287        println!();
288        println!("Random Access:");
289        println!(
290            "  Cached: {:.2} ns/op",
291            cached_random.as_nanos() as f64 / ITERATIONS as f64
292        );
293        println!(
294            "  Arc:    {:.2} ns/op",
295            arc_random.as_nanos() as f64 / ITERATIONS as f64
296        );
297        println!(
298            "  Speedup: {:.1}x",
299            arc_random.as_nanos() as f64 / cached_random.as_nanos() as f64
300        );
301    }
302}