revm_interpreter/interpreter/
stack.rs1use crate::InstructionResult;
2use core::{fmt, ptr};
3use primitives::U256;
4use std::vec::Vec;
5
6use super::StackTr;
7
8pub const STACK_LIMIT: usize = 1024;
10
11#[derive(Debug, PartialEq, Eq, Hash)]
13#[cfg_attr(feature = "serde", derive(serde::Serialize))]
14pub struct Stack {
15 data: Vec<U256>,
17}
18
19impl fmt::Display for Stack {
20 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
21 f.write_str("[")?;
22 for (i, x) in self.data.iter().enumerate() {
23 if i > 0 {
24 f.write_str(", ")?;
25 }
26 write!(f, "{x}")?;
27 }
28 f.write_str("]")
29 }
30}
31
32impl Default for Stack {
33 #[inline]
34 fn default() -> Self {
35 Self::new()
36 }
37}
38
39impl Clone for Stack {
40 fn clone(&self) -> Self {
41 let mut new_stack = Self::new();
45 new_stack.data.extend_from_slice(&self.data);
46 new_stack
47 }
48}
49
50impl StackTr for Stack {
51 #[inline]
52 fn len(&self) -> usize {
53 self.len()
54 }
55
56 #[inline]
57 fn data(&self) -> &[U256] {
58 &self.data
59 }
60
61 #[inline]
62 fn clear(&mut self) {
63 self.data.clear();
64 }
65
66 #[inline]
67 fn popn<const N: usize>(&mut self) -> Option<[U256; N]> {
68 if self.len() < N {
69 return None;
70 }
71 Some(unsafe { self.popn::<N>() })
73 }
74
75 #[inline]
76 fn popn_top<const POPN: usize>(&mut self) -> Option<([U256; POPN], &mut U256)> {
77 if self.len() < POPN + 1 {
78 return None;
79 }
80 Some(unsafe { self.popn_top::<POPN>() })
82 }
83
84 #[inline]
85 fn exchange(&mut self, n: usize, m: usize) -> bool {
86 self.exchange(n, m)
87 }
88
89 #[inline]
90 fn dup(&mut self, n: usize) -> bool {
91 self.dup(n)
92 }
93
94 #[inline]
95 fn push(&mut self, value: U256) -> bool {
96 self.push(value)
97 }
98
99 #[inline]
100 fn push_slice(&mut self, slice: &[u8]) -> bool {
101 self.push_slice_(slice)
102 }
103}
104
105impl Stack {
106 #[inline]
108 pub fn new() -> Self {
109 Self {
110 data: Vec::with_capacity(STACK_LIMIT),
112 }
113 }
114
115 #[inline]
117 pub fn invalid() -> Self {
118 Self { data: Vec::new() }
119 }
120
121 #[inline]
123 pub fn len(&self) -> usize {
124 self.data.len()
125 }
126
127 #[inline]
129 pub fn is_empty(&self) -> bool {
130 self.data.is_empty()
131 }
132
133 #[inline]
135 pub fn data(&self) -> &Vec<U256> {
136 &self.data
137 }
138
139 #[inline]
141 pub fn data_mut(&mut self) -> &mut Vec<U256> {
142 &mut self.data
143 }
144
145 #[inline]
147 pub fn into_data(self) -> Vec<U256> {
148 self.data
149 }
150
151 #[inline]
154 #[cfg_attr(debug_assertions, track_caller)]
155 pub fn pop(&mut self) -> Result<U256, InstructionResult> {
156 self.data.pop().ok_or(InstructionResult::StackUnderflow)
157 }
158
159 #[inline]
165 #[cfg_attr(debug_assertions, track_caller)]
166 pub unsafe fn pop_unsafe(&mut self) -> U256 {
167 assume!(!self.data.is_empty());
168 self.data.pop().unwrap_unchecked()
169 }
170
171 #[inline]
177 #[cfg_attr(debug_assertions, track_caller)]
178 pub unsafe fn top_unsafe(&mut self) -> &mut U256 {
179 assume!(!self.data.is_empty());
180 self.data.last_mut().unwrap_unchecked()
181 }
182
183 #[inline]
189 #[cfg_attr(debug_assertions, track_caller)]
190 pub unsafe fn popn<const N: usize>(&mut self) -> [U256; N] {
191 assume!(self.data.len() >= N);
192 core::array::from_fn(|_| unsafe { self.pop_unsafe() })
193 }
194
195 #[inline]
201 #[cfg_attr(debug_assertions, track_caller)]
202 pub unsafe fn popn_top<const POPN: usize>(&mut self) -> ([U256; POPN], &mut U256) {
203 let result = self.popn::<POPN>();
204 let top = self.top_unsafe();
205 (result, top)
206 }
207
208 #[inline]
213 #[must_use]
214 #[cfg_attr(debug_assertions, track_caller)]
215 pub fn push(&mut self, value: U256) -> bool {
216 debug_assert!(self.data.capacity() >= STACK_LIMIT);
218 if self.data.len() == STACK_LIMIT {
219 return false;
220 }
221 self.data.push(value);
222 true
223 }
224
225 #[inline]
229 pub fn peek(&self, no_from_top: usize) -> Result<U256, InstructionResult> {
230 if self.data.len() > no_from_top {
231 Ok(self.data[self.data.len() - no_from_top - 1])
232 } else {
233 Err(InstructionResult::StackUnderflow)
234 }
235 }
236
237 #[inline]
243 #[must_use]
244 #[cfg_attr(debug_assertions, track_caller)]
245 pub fn dup(&mut self, n: usize) -> bool {
246 assume!(n > 0, "attempted to dup 0");
247 let len = self.data.len();
248 if len < n || len + 1 > STACK_LIMIT {
249 false
250 } else {
251 unsafe {
253 let ptr = self.data.as_mut_ptr().add(len);
254 ptr::copy_nonoverlapping(ptr.sub(n), ptr, 1);
255 self.data.set_len(len + 1);
256 }
257 true
258 }
259 }
260
261 #[inline(always)]
267 #[cfg_attr(debug_assertions, track_caller)]
268 pub fn swap(&mut self, n: usize) -> bool {
269 self.exchange(0, n)
270 }
271
272 #[inline]
280 #[cfg_attr(debug_assertions, track_caller)]
281 pub fn exchange(&mut self, n: usize, m: usize) -> bool {
282 assume!(m > 0, "overlapping exchange");
283 let len = self.data.len();
284 let n_m_index = n + m;
285 if n_m_index >= len {
286 return false;
287 }
288 unsafe {
290 let top = self.data.as_mut_ptr().add(len - 1);
295 core::ptr::swap_nonoverlapping(top.sub(n), top.sub(n_m_index), 1);
296 }
297 true
298 }
299
300 #[inline]
303 pub fn push_slice(&mut self, slice: &[u8]) -> Result<(), InstructionResult> {
304 if self.push_slice_(slice) {
305 Ok(())
306 } else {
307 Err(InstructionResult::StackOverflow)
308 }
309 }
310
311 #[inline]
314 fn push_slice_(&mut self, slice: &[u8]) -> bool {
315 if slice.is_empty() {
316 return true;
317 }
318
319 let n_words = slice.len().div_ceil(32);
320 let new_len = self.data.len() + n_words;
321 if new_len > STACK_LIMIT {
322 return false;
323 }
324
325 debug_assert!(self.data.capacity() >= new_len);
327
328 unsafe {
330 let dst = self.data.as_mut_ptr().add(self.data.len()).cast::<u64>();
331 self.data.set_len(new_len);
332
333 let mut i = 0;
334
335 let words = slice.chunks_exact(32);
337 let partial_last_word = words.remainder();
338 for word in words {
339 for l in word.rchunks_exact(8) {
342 dst.add(i).write(u64::from_be_bytes(l.try_into().unwrap()));
343 i += 1;
344 }
345 }
346
347 if partial_last_word.is_empty() {
348 return true;
349 }
350
351 let limbs = partial_last_word.rchunks_exact(8);
353 let partial_last_limb = limbs.remainder();
354 for l in limbs {
355 dst.add(i).write(u64::from_be_bytes(l.try_into().unwrap()));
356 i += 1;
357 }
358
359 if !partial_last_limb.is_empty() {
361 let mut tmp = [0u8; 8];
362 tmp[8 - partial_last_limb.len()..].copy_from_slice(partial_last_limb);
363 dst.add(i).write(u64::from_be_bytes(tmp));
364 i += 1;
365 }
366
367 debug_assert_eq!(i.div_ceil(4), n_words, "wrote too much");
368
369 let m = i % 4; if m != 0 {
372 dst.add(i).write_bytes(0, 4 - m);
373 }
374 }
375
376 true
377 }
378
379 #[inline]
383 pub fn set(&mut self, no_from_top: usize, val: U256) -> Result<(), InstructionResult> {
384 if self.data.len() > no_from_top {
385 let len = self.data.len();
386 self.data[len - no_from_top - 1] = val;
387 Ok(())
388 } else {
389 Err(InstructionResult::StackUnderflow)
390 }
391 }
392}
393
394#[cfg(feature = "serde")]
395impl<'de> serde::Deserialize<'de> for Stack {
396 fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
397 where
398 D: serde::Deserializer<'de>,
399 {
400 let mut data = Vec::<U256>::deserialize(deserializer)?;
401 if data.len() > STACK_LIMIT {
402 return Err(serde::de::Error::custom(std::format!(
403 "stack size exceeds limit: {} > {}",
404 data.len(),
405 STACK_LIMIT
406 )));
407 }
408 data.reserve(STACK_LIMIT - data.len());
409 Ok(Self { data })
410 }
411}
412
413#[cfg(test)]
414mod tests {
415 use super::*;
416
417 fn run(f: impl FnOnce(&mut Stack)) {
418 let mut stack = Stack::new();
419 unsafe {
421 stack.data.set_len(STACK_LIMIT);
422 stack.data.fill(U256::MAX);
423 stack.data.set_len(0);
424 }
425 f(&mut stack);
426 }
427
428 #[test]
429 fn push_slices() {
430 run(|stack| {
432 stack.push_slice(b"").unwrap();
433 assert!(stack.data.is_empty());
434 });
435
436 run(|stack| {
438 stack.push_slice(&[42]).unwrap();
439 assert_eq!(stack.data, [U256::from(42)]);
440 });
441
442 let n = 0x1111_2222_3333_4444_5555_6666_7777_8888_u128;
443 run(|stack| {
444 stack.push_slice(&n.to_be_bytes()).unwrap();
445 assert_eq!(stack.data, [U256::from(n)]);
446 });
447
448 run(|stack| {
450 let b = [U256::from(n).to_be_bytes::<32>(); 2].concat();
451 stack.push_slice(&b).unwrap();
452 assert_eq!(stack.data, [U256::from(n); 2]);
453 });
454
455 run(|stack| {
456 let b = [&[0; 32][..], &[42u8]].concat();
457 stack.push_slice(&b).unwrap();
458 assert_eq!(stack.data, [U256::ZERO, U256::from(42)]);
459 });
460
461 run(|stack| {
462 let b = [&[0; 32][..], &n.to_be_bytes()].concat();
463 stack.push_slice(&b).unwrap();
464 assert_eq!(stack.data, [U256::ZERO, U256::from(n)]);
465 });
466
467 run(|stack| {
468 let b = [&[0; 64][..], &n.to_be_bytes()].concat();
469 stack.push_slice(&b).unwrap();
470 assert_eq!(stack.data, [U256::ZERO, U256::ZERO, U256::from(n)]);
471 });
472 }
473
474 #[test]
475 fn stack_clone() {
476 let empty_stack = Stack::new();
478 let cloned_empty = empty_stack.clone();
479 assert_eq!(empty_stack, cloned_empty);
480 assert_eq!(cloned_empty.len(), 0);
481 assert_eq!(cloned_empty.data().capacity(), STACK_LIMIT);
482
483 let mut partial_stack = Stack::new();
485 for i in 0..10 {
486 assert!(partial_stack.push(U256::from(i)));
487 }
488 let mut cloned_partial = partial_stack.clone();
489 assert_eq!(partial_stack, cloned_partial);
490 assert_eq!(cloned_partial.len(), 10);
491 assert_eq!(cloned_partial.data().capacity(), STACK_LIMIT);
492
493 assert!(cloned_partial.push(U256::from(100)));
495 assert_ne!(partial_stack, cloned_partial);
496 assert_eq!(partial_stack.len(), 10);
497 assert_eq!(cloned_partial.len(), 11);
498
499 let mut full_stack = Stack::new();
501 for i in 0..STACK_LIMIT {
502 assert!(full_stack.push(U256::from(i)));
503 }
504 let mut cloned_full = full_stack.clone();
505 assert_eq!(full_stack, cloned_full);
506 assert_eq!(cloned_full.len(), STACK_LIMIT);
507 assert_eq!(cloned_full.data().capacity(), STACK_LIMIT);
508
509 assert!(!full_stack.push(U256::from(100)));
511 assert!(!cloned_full.push(U256::from(100)));
512 }
513}