revm_bytecode/legacy/
jump_map.rs1use bitvec::vec::BitVec;
2use core::{
3 cmp::Ordering,
4 hash::{Hash, Hasher},
5};
6use once_cell::race::OnceBox;
7use primitives::hex;
8use std::{fmt::Debug, sync::Arc};
9
10#[derive(Clone, Eq)]
14pub struct JumpTable {
15 table_ptr: *const u8,
17 len: usize,
19 table: Arc<BitVec<u8>>,
21}
22
23unsafe impl Send for JumpTable {}
25unsafe impl Sync for JumpTable {}
26
27impl PartialEq for JumpTable {
28 fn eq(&self, other: &Self) -> bool {
29 self.table.eq(&other.table)
30 }
31}
32
33impl Hash for JumpTable {
34 fn hash<H: Hasher>(&self, state: &mut H) {
35 self.table.hash(state);
36 }
37}
38
39impl PartialOrd for JumpTable {
40 fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
41 Some(self.cmp(other))
42 }
43}
44
45impl Ord for JumpTable {
46 fn cmp(&self, other: &Self) -> Ordering {
47 self.table.cmp(&other.table)
48 }
49}
50
51#[cfg(feature = "serde")]
52impl serde::Serialize for JumpTable {
53 fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
54 where
55 S: serde::Serializer,
56 {
57 self.table.serialize(serializer)
58 }
59}
60
61#[cfg(feature = "serde")]
62impl<'de> serde::Deserialize<'de> for JumpTable {
63 fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
64 where
65 D: serde::Deserializer<'de>,
66 {
67 let bitvec = BitVec::deserialize(deserializer)?;
68 Ok(Self::new(bitvec))
69 }
70}
71
72impl Debug for JumpTable {
73 fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
74 f.debug_struct("JumpTable")
75 .field("map", &hex::encode(self.table.as_raw_slice()))
76 .finish()
77 }
78}
79
80impl Default for JumpTable {
81 #[inline]
82 fn default() -> Self {
83 static DEFAULT: OnceBox<JumpTable> = OnceBox::new();
84 DEFAULT
85 .get_or_init(|| Self::new(BitVec::default()).into())
86 .clone()
87 }
88}
89
90impl JumpTable {
91 pub fn new(jumps: BitVec<u8>) -> Self {
93 let table = Arc::new(jumps);
94 let table_ptr = table.as_raw_slice().as_ptr();
95 let len = table.len();
96
97 Self {
98 table,
99 table_ptr,
100 len,
101 }
102 }
103
104 #[inline]
106 pub fn as_slice(&self) -> &[u8] {
107 self.table.as_raw_slice()
108 }
109
110 #[inline]
112 pub fn len(&self) -> usize {
113 self.len
114 }
115
116 #[inline]
118 pub fn is_empty(&self) -> bool {
119 self.len == 0
120 }
121
122 #[inline]
130 pub fn from_slice(slice: &[u8], bit_len: usize) -> Self {
131 const BYTE_LEN: usize = 8;
132 assert!(
133 slice.len() * BYTE_LEN >= bit_len,
134 "slice bit length {} is less than bit_len {}",
135 slice.len() * BYTE_LEN,
136 bit_len
137 );
138 let mut bitvec = BitVec::from_slice(slice);
139 unsafe { bitvec.set_len(bit_len) };
140 Self::new(bitvec)
141 }
142
143 #[inline]
146 pub fn is_valid(&self, pc: usize) -> bool {
147 pc < self.len && unsafe { *self.table_ptr.add(pc >> 3) & (1 << (pc & 7)) != 0 }
148 }
149}
150
151#[cfg(test)]
152mod tests {
153 use super::*;
154
155 #[test]
156 #[should_panic(expected = "slice bit length 8 is less than bit_len 10")]
157 fn test_jump_table_from_slice_panic() {
158 let slice = &[0x00];
159 let _ = JumpTable::from_slice(slice, 10);
160 }
161
162 #[test]
163 fn test_jump_table_from_slice() {
164 let slice = &[0x00];
165 let jumptable = JumpTable::from_slice(slice, 3);
166 assert_eq!(jumptable.len, 3);
167 }
168
169 #[test]
170 fn test_is_valid() {
171 let jump_table = JumpTable::from_slice(&[0x0D, 0x06], 13);
172
173 assert_eq!(jump_table.len, 13);
174
175 assert!(jump_table.is_valid(0)); assert!(!jump_table.is_valid(1));
177 assert!(jump_table.is_valid(2)); assert!(jump_table.is_valid(3)); assert!(!jump_table.is_valid(4));
180 assert!(!jump_table.is_valid(5));
181 assert!(!jump_table.is_valid(6));
182 assert!(!jump_table.is_valid(7));
183 assert!(!jump_table.is_valid(8));
184 assert!(jump_table.is_valid(9)); assert!(jump_table.is_valid(10)); assert!(!jump_table.is_valid(11));
187 assert!(!jump_table.is_valid(12));
188 }
189}
190
191#[cfg(test)]
192mod bench_is_valid {
193 use super::*;
194 use std::time::Instant;
195
196 const ITERATIONS: usize = 1_000_000;
197 const TEST_SIZE: usize = 10_000;
198
199 fn create_test_table() -> BitVec<u8> {
200 let mut bitvec = BitVec::from_vec(vec![0u8; TEST_SIZE.div_ceil(8)]);
201 bitvec.resize(TEST_SIZE, false);
202 for i in (0..TEST_SIZE).step_by(3) {
203 bitvec.set(i, true);
204 }
205 bitvec
206 }
207
208 #[derive(Clone)]
209 pub(super) struct JumpTableWithArcDeref(pub Arc<BitVec<u8>>);
210
211 impl JumpTableWithArcDeref {
212 #[inline]
213 pub(super) fn is_valid(&self, pc: usize) -> bool {
214 pc < self.0.len() && unsafe { *self.0.get_unchecked(pc) }
215 }
216 }
217
218 fn benchmark_implementation<F>(name: &str, table: &F, test_fn: impl Fn(&F, usize) -> bool)
219 where
220 F: Clone,
221 {
222 for i in 0..10_000 {
224 std::hint::black_box(test_fn(table, i % TEST_SIZE));
225 }
226
227 let start = Instant::now();
228 let mut count = 0;
229
230 for i in 0..ITERATIONS {
231 if test_fn(table, i % TEST_SIZE) {
232 count += 1;
233 }
234 }
235
236 let duration = start.elapsed();
237 let ns_per_op = duration.as_nanos() as f64 / ITERATIONS as f64;
238 let ops_per_sec = ITERATIONS as f64 / duration.as_secs_f64();
239
240 println!("{name} Performance:");
241 println!(" Time per op: {ns_per_op:.2} ns");
242 println!(" Ops per sec: {ops_per_sec:.0}");
243 println!(" True count: {count}");
244 println!();
245
246 std::hint::black_box(count);
247 }
248
249 #[test]
250 fn bench_is_valid() {
251 println!("JumpTable is_valid() Benchmark Comparison");
252 println!("=========================================");
253
254 let bitvec = create_test_table();
255
256 let cached_table = JumpTable::new(bitvec.clone());
258 benchmark_implementation("JumpTable (Cached Pointer)", &cached_table, |table, pc| {
259 table.is_valid(pc)
260 });
261
262 let arc_table = JumpTableWithArcDeref(Arc::new(bitvec));
264 benchmark_implementation("JumpTableWithArcDeref (Arc)", &arc_table, |table, pc| {
265 table.is_valid(pc)
266 });
267
268 println!("Benchmark completed successfully!");
269 }
270
271 #[test]
272 fn bench_different_access_patterns() {
273 let bitvec = create_test_table();
274 let cached_table = JumpTable::new(bitvec.clone());
275 let arc_table = JumpTableWithArcDeref(Arc::new(bitvec));
276
277 println!("Access Pattern Comparison");
278 println!("========================");
279
280 let start = Instant::now();
282 for i in 0..ITERATIONS {
283 std::hint::black_box(cached_table.is_valid(i % TEST_SIZE));
284 }
285 let cached_sequential = start.elapsed();
286
287 let start = Instant::now();
288 for i in 0..ITERATIONS {
289 std::hint::black_box(arc_table.is_valid(i % TEST_SIZE));
290 }
291 let arc_sequential = start.elapsed();
292
293 let start = Instant::now();
295 for i in 0..ITERATIONS {
296 std::hint::black_box(cached_table.is_valid((i * 17) % TEST_SIZE));
297 }
298 let cached_random = start.elapsed();
299
300 let start = Instant::now();
301 for i in 0..ITERATIONS {
302 std::hint::black_box(arc_table.is_valid((i * 17) % TEST_SIZE));
303 }
304 let arc_random = start.elapsed();
305
306 println!("Sequential Access:");
307 println!(
308 " Cached: {:.2} ns/op",
309 cached_sequential.as_nanos() as f64 / ITERATIONS as f64
310 );
311 println!(
312 " Arc: {:.2} ns/op",
313 arc_sequential.as_nanos() as f64 / ITERATIONS as f64
314 );
315 println!(
316 " Speedup: {:.1}x",
317 arc_sequential.as_nanos() as f64 / cached_sequential.as_nanos() as f64
318 );
319
320 println!();
321 println!("Random Access:");
322 println!(
323 " Cached: {:.2} ns/op",
324 cached_random.as_nanos() as f64 / ITERATIONS as f64
325 );
326 println!(
327 " Arc: {:.2} ns/op",
328 arc_random.as_nanos() as f64 / ITERATIONS as f64
329 );
330 println!(
331 " Speedup: {:.1}x",
332 arc_random.as_nanos() as f64 / cached_random.as_nanos() as f64
333 );
334 }
335}