Skip to main content

fuel_vm/interpreter/
memory.rs

1#![cfg(feature = "alloc")]
2
3use super::{
4    Interpreter,
5    internal::inc_pc,
6};
7use crate::{
8    constraints::reg_key::*,
9    consts::*,
10    error::SimpleResult,
11};
12
13use fuel_asm::{
14    Imm12,
15    Imm24,
16    PanicReason,
17    RegId,
18};
19use fuel_types::{
20    Word,
21    fmt_truncated_hex,
22};
23
24use core::{
25    fmt,
26    ops::Range,
27};
28
29#[cfg(any(test, feature = "test-helpers"))]
30use core::ops::{
31    Index,
32    IndexMut,
33    RangeFrom,
34    RangeTo,
35};
36
37use crate::error::{
38    IoResult,
39    RuntimeError,
40};
41use alloc::vec::Vec;
42use fuel_storage::{
43    Mappable,
44    StorageRead,
45    StorageReadError,
46};
47
48#[cfg(test)]
49mod tests;
50
51#[cfg(test)]
52mod impl_tests;
53
54#[allow(non_snake_case)]
55#[cfg(test)]
56mod allocation_tests;
57
58#[cfg(test)]
59mod stack_tests;
60
61/// The trait for the memory.
62pub trait Memory: AsRef<MemoryInstance> + AsMut<MemoryInstance> {}
63
64impl<M> Memory for M where M: AsRef<MemoryInstance> + AsMut<MemoryInstance> {}
65
66/// The memory of the VM, represented as stack, heap, and storage preload area.
67#[derive(Clone, Eq)]
68pub struct MemoryInstance {
69    /// Stack. Grows upwards.
70    stack: Vec<u8>,
71    /// Heap. Grows downwards from MEM_SIZE.
72    heap: Vec<u8>,
73    /// Lowest allowed heap address, i.e. hp register value.
74    /// This is needed since we can allocate extra heap for performance reasons.
75    hp: usize,
76}
77
78impl Default for MemoryInstance {
79    fn default() -> Self {
80        Self::new()
81    }
82}
83
84impl fmt::Debug for MemoryInstance {
85    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
86        write!(f, "Memory {{ stack: ")?;
87        fmt_truncated_hex::<16>(&self.stack, f)?;
88        write!(f, ", heap: ")?;
89        let off = self.hp.saturating_sub(self.heap_offset());
90        fmt_truncated_hex::<16>(&self.heap[off..], f)?;
91        write!(f, ", hp: {} }}", self.hp)
92    }
93}
94
95impl PartialEq for MemoryInstance {
96    /// Equality comparison of the accessible memory.
97    #[allow(clippy::arithmetic_side_effects)] // Safety: hp is kept valid everywhere
98    fn eq(&self, other: &Self) -> bool {
99        self.stack == other.stack && self.hp == other.hp && {
100            let self_hs = self.hp - self.heap_offset();
101            let other_hs = other.hp - other.heap_offset();
102            self.heap[self_hs..] == other.heap[other_hs..]
103        }
104    }
105}
106
107impl AsRef<MemoryInstance> for MemoryInstance {
108    fn as_ref(&self) -> &MemoryInstance {
109        self
110    }
111}
112impl AsMut<MemoryInstance> for MemoryInstance {
113    fn as_mut(&mut self) -> &mut MemoryInstance {
114        self
115    }
116}
117
118impl MemoryInstance {
119    /// Create a new VM memory.
120    pub fn new() -> Self {
121        Self {
122            stack: Vec::new(),
123            heap: Vec::new(),
124            hp: MEM_SIZE,
125        }
126    }
127
128    /// Resets memory to initial state, keeping the original allocations.
129    pub fn reset(&mut self) {
130        self.stack.truncate(0);
131        self.hp = MEM_SIZE;
132    }
133
134    /// Offset of the heap section
135    fn heap_offset(&self) -> usize {
136        MEM_SIZE.saturating_sub(self.heap.len())
137    }
138
139    /// Grows the stack to be at least `new_sp` bytes.
140    pub fn grow_stack(&mut self, new_sp: Word) -> Result<(), PanicReason> {
141        if new_sp > VM_MAX_RAM {
142            return Err(PanicReason::MemoryOverflow);
143        }
144        #[allow(clippy::cast_possible_truncation)] // Safety: VM_MAX_RAM is usize
145        let new_sp = new_sp as usize;
146
147        if new_sp > self.stack.len() {
148            if new_sp > self.hp {
149                return Err(PanicReason::MemoryGrowthOverlap)
150            }
151
152            self.stack.resize(new_sp, 0);
153        }
154        Ok(())
155    }
156
157    /// Grows the heap by `amount` bytes. Updates hp register.
158    pub fn grow_heap_by(
159        &mut self,
160        sp_reg: Reg<SP>,
161        mut hp_reg: RegMut<HP>,
162        amount: Word,
163    ) -> Result<(), PanicReason> {
164        debug_assert_eq!(
165            self.hp as Word, *hp_reg,
166            "HP register changed without memory update"
167        );
168
169        let amount = usize::try_from(amount).map_err(|_| PanicReason::MemoryOverflow)?;
170        let new_hp = self
171            .hp
172            .checked_sub(amount)
173            .ok_or(PanicReason::MemoryOverflow)?;
174
175        if (new_hp as Word) < *sp_reg {
176            return Err(PanicReason::MemoryGrowthOverlap)
177        }
178
179        #[allow(clippy::arithmetic_side_effects)] // Safety: self.hp is in heap
180        let new_len = MEM_SIZE - new_hp;
181
182        #[allow(clippy::arithmetic_side_effects)] // Safety: self.hp is in heap
183        if self.heap.len() >= new_len {
184            // No need to reallocate, but we need to zero the new space
185            // in case it was used before a memory reset.
186            let start = new_hp - self.heap_offset();
187            let end = self.hp - self.heap_offset();
188            self.heap[start..end].fill(0);
189        } else {
190            // Need to clear dirty memory before expanding it. An example:
191            // Heap vector: [dirty, dirty, dirty, 0, 0, 0]
192            //                                   /|\
193            //                                    |
194            //                                   HP
195            //
196            // If we copy from [0, old_len), it means we copy the dirty memory as well.
197            // Ending up with:
198            // Heap vector: [0, 0, dirty, dirty, dirty, 0, 0, 0]
199            //              /|\
200            //               |
201            //              HP
202            //
203            // So, either we need to clear the memory before copying,
204            // or after we copied dirty parts.
205            // Clearing before looks like more readable solution.
206            let end = self.hp.checked_sub(self.heap_offset());
207            if let Some(end) = end {
208                self.heap[..end].fill(0);
209            }
210
211            // Reallocation is needed.
212            // To reduce frequent reallocations, allocate at least 256 bytes at once.
213            // After that, double the allocation every time.
214            let cap = new_len.next_power_of_two().clamp(256, MEM_SIZE);
215            let old_len = self.heap.len();
216            let prefix_zeroes = cap - old_len;
217            self.heap.resize(cap, 0);
218            self.heap.copy_within(..old_len, prefix_zeroes);
219            self.heap[..prefix_zeroes].fill(0);
220        }
221
222        self.hp = new_hp;
223        *hp_reg = new_hp as Word;
224
225        // If heap enters region where stack has been, truncate the stack
226        self.stack.truncate(new_hp);
227
228        Ok(())
229    }
230
231    /// Verify that the memory range is accessble and return it as a range.
232    pub fn verify<A: ToAddr, B: ToAddr>(
233        &self,
234        addr: A,
235        count: B,
236    ) -> Result<MemoryRange, PanicReason> {
237        let start = addr.to_addr()?;
238        let len = count.to_addr()?;
239        let end = start.saturating_add(len);
240        if end > MEM_SIZE {
241            return Err(PanicReason::MemoryOverflow)
242        }
243
244        if end <= self.stack.len() || start >= self.hp {
245            Ok(MemoryRange(start..end))
246        } else {
247            Err(PanicReason::UninitalizedMemoryAccess)
248        }
249    }
250
251    /// Verify a constant-sized memory range.
252    pub fn verify_const<A: ToAddr, const C: usize>(
253        &self,
254        addr: A,
255    ) -> Result<MemoryRange, PanicReason> {
256        self.verify(addr, C)
257    }
258
259    /// Returns a reference to memory for reading, if possible.
260    #[allow(clippy::arithmetic_side_effects)] // Safety: subtractions are checked
261    pub fn read<A: ToAddr, C: ToAddr>(
262        &self,
263        addr: A,
264        count: C,
265    ) -> Result<&[u8], PanicReason> {
266        let range = self.verify(addr, count)?;
267
268        if range.end() <= self.stack.len() {
269            Ok(&self.stack[range.usizes()])
270        } else if range.start() >= self.heap_offset() {
271            let start = range.start() - self.heap_offset();
272            let end = range.end() - self.heap_offset();
273            Ok(&self.heap[start..end])
274        } else {
275            unreachable!("Range was verified to be valid")
276        }
277    }
278
279    /// Reads a constant-sized byte array from memory, if possible.
280    pub fn read_bytes<A: ToAddr, const C: usize>(
281        &self,
282        at: A,
283    ) -> Result<[u8; C], PanicReason> {
284        let mut result = [0; C];
285        result.copy_from_slice(self.read(at, C)?);
286        Ok(result)
287    }
288
289    /// Gets write access to memory, if possible.
290    /// Doesn't perform any ownership checks.
291    #[allow(clippy::arithmetic_side_effects)] // Safety: subtractions are checked
292    pub fn write_noownerchecks<A: ToAddr, B: ToAddr>(
293        &mut self,
294        addr: A,
295        len: B,
296    ) -> Result<&mut [u8], PanicReason> {
297        let range = self.verify(addr, len)?;
298        if range.end() <= self.stack.len() {
299            Ok(&mut self.stack[range.usizes()])
300        } else if range.start() >= self.heap_offset() {
301            let start = range.start() - self.heap_offset();
302            let end = range.end() - self.heap_offset();
303            Ok(&mut self.heap[start..end])
304        } else {
305            unreachable!("Range was verified to be valid")
306        }
307    }
308
309    /// Writes a constant-sized byte array to memory, if possible.
310    /// Doesn't perform any ownership checks.
311    pub fn write_bytes_noownerchecks<A: ToAddr, const C: usize>(
312        &mut self,
313        addr: A,
314        data: [u8; C],
315    ) -> Result<(), PanicReason> {
316        self.write_noownerchecks(addr, C)?.copy_from_slice(&data);
317        Ok(())
318    }
319
320    /// Checks that memory is writable and returns a mutable slice to it.
321    pub fn write<A: ToAddr, C: ToAddr>(
322        &mut self,
323        owner: OwnershipRegisters,
324        addr: A,
325        len: C,
326    ) -> Result<&mut [u8], PanicReason> {
327        let range = self.verify(addr, len)?;
328        owner.verify_ownership(&range)?;
329        self.write_noownerchecks(range.start(), range.len())
330    }
331
332    /// Writes a constant-sized byte array to memory, checking for ownership.
333    pub fn write_bytes<A: ToAddr, const C: usize>(
334        &mut self,
335        owner: OwnershipRegisters,
336        addr: A,
337        data: [u8; C],
338    ) -> Result<(), PanicReason> {
339        self.write(owner, addr, data.len())?.copy_from_slice(&data);
340        Ok(())
341    }
342
343    /// Copies the memory from `src` to `dst` verifying ownership.
344    #[inline]
345    #[track_caller]
346    pub fn memcopy(
347        &mut self,
348        dst: Word,
349        src: Word,
350        length: Word,
351        owner: OwnershipRegisters,
352    ) -> Result<(), PanicReason> {
353        let dst_range = self.verify(dst, length)?;
354        let src_range = self.verify(src, length)?;
355
356        if dst_range.start() <= src_range.start() && src_range.start() < dst_range.end()
357            || src_range.start() <= dst_range.start()
358                && dst_range.start() < src_range.end()
359            || dst_range.start() < src_range.end() && src_range.end() <= dst_range.end()
360            || src_range.start() < dst_range.end() && dst_range.end() <= src_range.end()
361        {
362            return Err(PanicReason::MemoryWriteOverlap)
363        }
364
365        owner.verify_ownership(&dst_range)?;
366
367        if src_range.end() <= self.stack.len() {
368            if dst_range.end() <= self.stack.len() {
369                self.stack
370                    .copy_within(src_range.usizes(), dst_range.start());
371            } else if dst_range.start() >= self.heap_offset() {
372                #[allow(clippy::arithmetic_side_effects)]
373                // Safety: subtractions are checked above
374                let dst_start = dst_range.start() - self.heap_offset();
375                #[allow(clippy::arithmetic_side_effects)]
376                // Safety: subtractions are checked above
377                let dst_end = dst_range.end() - self.heap_offset();
378
379                let src_array = &self.stack[src_range.usizes()];
380                let dst_array = &mut self.heap[dst_start..dst_end];
381                dst_array.copy_from_slice(src_array);
382            } else {
383                unreachable!("Range was verified to be valid")
384            }
385        } else if src_range.start() >= self.heap_offset() {
386            #[allow(clippy::arithmetic_side_effects)]
387            // Safety: subtractions are checked above
388            let src_start = src_range.start() - self.heap_offset();
389            #[allow(clippy::arithmetic_side_effects)]
390            // Safety: subtractions are checked above
391            let src_end = src_range.end() - self.heap_offset();
392
393            if dst_range.end() <= self.stack.len() {
394                let src_array = &self.heap[src_start..src_end];
395
396                let dst_array = &mut self.stack[dst_range.usizes()];
397                dst_array.copy_from_slice(src_array);
398            } else if dst_range.start() >= self.heap_offset() {
399                #[allow(clippy::arithmetic_side_effects)]
400                // Safety: subtractions are checked above
401                let dst_start = dst_range.start() - self.heap_offset();
402
403                self.heap.copy_within(src_start..src_end, dst_start);
404            } else {
405                unreachable!("Range was verified to be valid")
406            }
407        } else {
408            unreachable!("Range was verified to be valid")
409        }
410
411        Ok(())
412    }
413
414    /// Memory access to the raw stack buffer.
415    /// Note that for efficiency reasons this might not match sp value.
416    #[cfg(any(test, feature = "test-helpers"))]
417    pub fn stack_raw(&self) -> &[u8] {
418        &self.stack
419    }
420
421    /// Memory access to the raw heap buffer.
422    /// Note that for efficiency reasons this might not match hp value.
423    #[cfg(any(test, feature = "test-helpers"))]
424    pub fn heap_raw(&self) -> &[u8] {
425        &self.heap
426    }
427
428    /// Returns a `MemoryRollbackData` that can be used to achieve the state of the
429    /// `desired_memory_state` instance.
430    pub fn collect_rollback_data(
431        &self,
432        desired_memory_state: &MemoryInstance,
433    ) -> Option<MemoryRollbackData> {
434        if self == desired_memory_state {
435            return None
436        }
437
438        let sp = desired_memory_state.stack.len();
439        let hp = desired_memory_state.hp;
440
441        assert!(
442            hp >= self.hp,
443            "We only allow shrinking of the heap during rollback"
444        );
445
446        let stack_changes =
447            get_changes(&self.stack[..sp], &desired_memory_state.stack[..sp], 0);
448
449        let heap_start = hp
450            .checked_sub(self.heap_offset())
451            .expect("Memory is invalid, hp is out of bounds");
452        let heap = &self.heap[heap_start..];
453        let desired_heap_start = hp
454            .checked_sub(desired_memory_state.heap_offset())
455            .expect("Memory is invalid, hp is out of bounds");
456        let desired_heap = &desired_memory_state.heap[desired_heap_start..];
457
458        let heap_changes = get_changes(heap, desired_heap, hp);
459
460        Some(MemoryRollbackData {
461            sp,
462            hp,
463            stack_changes,
464            heap_changes,
465        })
466    }
467
468    /// Rollbacks the memory changes returning the memory to the old state.
469    pub fn rollback(&mut self, data: &MemoryRollbackData) {
470        self.stack.resize(data.sp, 0);
471        assert!(
472            data.hp >= self.hp,
473            "We only allow shrinking of the heap during rollback"
474        );
475        self.hp = data.hp;
476
477        for change in &data.stack_changes {
478            self.stack[change.global_start
479                ..change.global_start.saturating_add(change.data.len())]
480                .copy_from_slice(&change.data);
481        }
482
483        let offset = self.heap_offset();
484        for change in &data.heap_changes {
485            let local_start = change
486                .global_start
487                .checked_sub(offset)
488                .expect("Invalid offset");
489            self.heap[local_start..local_start.saturating_add(change.data.len())]
490                .copy_from_slice(&change.data);
491        }
492    }
493}
494
495fn get_changes(
496    latest_array: &[u8],
497    desired_array: &[u8],
498    offset: usize,
499) -> Vec<MemorySliceChange> {
500    let mut changes = Vec::new();
501    let mut range = None;
502    for (i, (old, new)) in latest_array.iter().zip(desired_array.iter()).enumerate() {
503        if old != new {
504            range = match range {
505                None => Some((i, 1usize)),
506                Some((start, count)) => Some((start, count.saturating_add(1))),
507            };
508        } else if let Some((start, count)) = range.take() {
509            changes.push(MemorySliceChange {
510                global_start: offset.saturating_add(start),
511                data: desired_array[start..start.saturating_add(count)].to_vec(),
512            });
513        }
514    }
515    if let Some((start, count)) = range.take() {
516        changes.push(MemorySliceChange {
517            global_start: offset.saturating_add(start),
518            data: desired_array[start..start.saturating_add(count)].to_vec(),
519        });
520    }
521    changes
522}
523
524#[derive(Debug, Clone)]
525struct MemorySliceChange {
526    global_start: usize,
527    data: Vec<u8>,
528}
529
530/// The container for the data used to rollback memory changes.
531#[derive(Debug, Clone)]
532pub struct MemoryRollbackData {
533    /// Desired stack pointer.
534    sp: usize,
535    /// Desired heap pointer. Desired heap pointer can't be less than the current one.
536    hp: usize,
537    /// Changes to the stack to achieve the desired state of the stack.
538    stack_changes: Vec<MemorySliceChange>,
539    /// Changes to the heap to achieve the desired state of the heap.
540    heap_changes: Vec<MemorySliceChange>,
541}
542
543#[cfg(feature = "test-helpers")]
544impl From<Vec<u8>> for MemoryInstance {
545    fn from(stack: Vec<u8>) -> Self {
546        Self {
547            stack,
548            ..Self::new()
549        }
550    }
551}
552
553#[cfg(any(test, feature = "test-helpers"))]
554impl Index<Range<usize>> for MemoryInstance {
555    type Output = [u8];
556
557    fn index(&self, index: Range<usize>) -> &Self::Output {
558        self.read(index.start, index.len())
559            .expect("Memory range out of bounds")
560    }
561}
562
563#[cfg(any(test, feature = "test-helpers"))]
564impl Index<RangeFrom<usize>> for MemoryInstance {
565    type Output = [u8];
566
567    fn index(&self, index: RangeFrom<usize>) -> &Self::Output {
568        &self[index.start..MEM_SIZE]
569    }
570}
571
572#[cfg(any(test, feature = "test-helpers"))]
573impl Index<RangeTo<usize>> for MemoryInstance {
574    type Output = [u8];
575
576    fn index(&self, index: RangeTo<usize>) -> &Self::Output {
577        &self[0..index.end]
578    }
579}
580
581#[cfg(any(test, feature = "test-helpers"))]
582impl IndexMut<Range<usize>> for MemoryInstance {
583    fn index_mut(&mut self, index: Range<usize>) -> &mut Self::Output {
584        self.write_noownerchecks(index.start, index.len())
585            .expect("Memory range out of bounds")
586    }
587}
588
589/// Used to handle `Word` to `usize` conversions for memory addresses,
590/// as well as checking that the resulting value is withing the VM ram boundaries.
591pub trait ToAddr {
592    /// Converts a value to `usize` used for memory addresses.
593    /// Returns `Err` with `MemoryOverflow` if the resulting value does't fit in the VM
594    /// memory. This can be used for both addresses and offsets.
595    fn to_addr(self) -> Result<usize, PanicReason>;
596}
597
598impl ToAddr for usize {
599    fn to_addr(self) -> Result<usize, PanicReason> {
600        if self > MEM_SIZE {
601            return Err(PanicReason::MemoryOverflow)
602        }
603        Ok(self)
604    }
605}
606
607impl ToAddr for Word {
608    fn to_addr(self) -> Result<usize, PanicReason> {
609        let value = usize::try_from(self).map_err(|_| PanicReason::MemoryOverflow)?;
610        value.to_addr()
611    }
612}
613
614#[cfg(feature = "test-helpers")]
615/// Implemented for `i32` to allow integer literals. Panics on negative values.
616impl ToAddr for i32 {
617    fn to_addr(self) -> Result<usize, PanicReason> {
618        if self < 0 {
619            panic!("Negative memory address");
620        }
621        let value = usize::try_from(self).map_err(|_| PanicReason::MemoryOverflow)?;
622        value.to_addr()
623    }
624}
625
626/// A range of memory. No guarantees are made about validity of access.
627#[derive(Debug, Clone, PartialEq, Eq, Hash)]
628#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
629pub struct MemoryRange(Range<usize>);
630
631impl MemoryRange {
632    /// Create a new memory range. Cannot panic, but the range may be invalid.
633    pub const fn new(start: usize, len: usize) -> Self {
634        Self(start..start.saturating_add(len))
635    }
636
637    /// Start of the range.
638    pub fn start(&self) -> usize {
639        self.0.start
640    }
641
642    /// End of the range. One past the last byte.
643    pub fn end(&self) -> usize {
644        self.0.end
645    }
646
647    /// Is the range empty?
648    pub fn is_empty(&self) -> bool {
649        self.len() == 0
650    }
651
652    /// Length of the range.
653    pub fn len(&self) -> usize {
654        self.0.len()
655    }
656
657    /// Returns the range as a `usize` range.
658    pub fn usizes(&self) -> Range<usize> {
659        self.0.clone()
660    }
661
662    /// Returns the range as a `Word` range.
663    pub fn words(&self) -> Range<Word> {
664        self.0.start as Word..self.0.end as Word
665    }
666
667    /// Splits range at given relative offset. Panics if offset > range length.
668    pub fn split_at_offset(self, at: usize) -> (Self, Self) {
669        let mid = self.0.start.saturating_add(at);
670        assert!(mid <= self.0.end);
671        (Self(self.0.start..mid), Self(mid..self.0.end))
672    }
673}
674
675impl<M, S, Tx, Ecal, V> Interpreter<M, S, Tx, Ecal, V>
676where
677    M: Memory,
678{
679    /// Return the registers used to determine ownership.
680    pub(crate) fn ownership_registers(&self) -> OwnershipRegisters {
681        OwnershipRegisters::new(self)
682    }
683
684    pub(crate) fn stack_pointer_overflow<F>(&mut self, f: F, v: Word) -> SimpleResult<()>
685    where
686        F: FnOnce(Word, Word) -> (Word, bool),
687    {
688        let (
689            SystemRegisters {
690                sp, ssp, hp, pc, ..
691            },
692            _,
693        ) = split_registers(&mut self.registers);
694        stack_pointer_overflow(
695            sp,
696            ssp.as_ref(),
697            hp.as_ref(),
698            pc,
699            f,
700            v,
701            self.memory.as_mut(),
702        )
703    }
704
705    pub(crate) fn push_selected_registers(
706        &mut self,
707        segment: ProgramRegistersSegment,
708        bitmask: Imm24,
709    ) -> SimpleResult<()> {
710        let (
711            SystemRegisters {
712                sp, ssp, hp, pc, ..
713            },
714            program_regs,
715        ) = split_registers(&mut self.registers);
716        push_selected_registers(
717            self.memory.as_mut(),
718            sp,
719            ssp.as_ref(),
720            hp.as_ref(),
721            pc,
722            &program_regs,
723            segment,
724            bitmask,
725        )
726    }
727
728    pub(crate) fn pop_selected_registers(
729        &mut self,
730        segment: ProgramRegistersSegment,
731        bitmask: Imm24,
732    ) -> SimpleResult<()> {
733        let (
734            SystemRegisters {
735                sp, ssp, hp, pc, ..
736            },
737            mut program_regs,
738        ) = split_registers(&mut self.registers);
739        pop_selected_registers(
740            self.memory.as_mut(),
741            sp,
742            ssp.as_ref(),
743            hp.as_ref(),
744            pc,
745            &mut program_regs,
746            segment,
747            bitmask,
748        )
749    }
750
751    /// Expand heap by `amount` bytes.
752    pub fn allocate(&mut self, amount: Word) -> SimpleResult<()> {
753        let (SystemRegisters { hp, sp, .. }, _) = split_registers(&mut self.registers);
754        self.memory.as_mut().grow_heap_by(sp.as_ref(), hp, amount)?;
755        Ok(())
756    }
757
758    pub(crate) fn malloc(&mut self, a: Word) -> SimpleResult<()> {
759        let (SystemRegisters { hp, sp, pc, .. }, _) =
760            split_registers(&mut self.registers);
761        malloc(hp, sp.as_ref(), pc, a, self.memory.as_mut())
762    }
763
764    pub(crate) fn memclear(&mut self, a: Word, b: Word) -> SimpleResult<()> {
765        let owner = self.ownership_registers();
766        memclear(self.memory.as_mut(), owner, self.registers.pc_mut(), a, b)
767    }
768
769    pub(crate) fn memcopy(&mut self, a: Word, b: Word, c: Word) -> SimpleResult<()> {
770        let owner = self.ownership_registers();
771        memcopy(
772            self.memory.as_mut(),
773            owner,
774            self.registers.pc_mut(),
775            a,
776            b,
777            c,
778        )
779    }
780
781    pub(crate) fn memeq(
782        &mut self,
783        ra: RegId,
784        b: Word,
785        c: Word,
786        d: Word,
787    ) -> SimpleResult<()> {
788        let (SystemRegisters { pc, .. }, mut w) = split_registers(&mut self.registers);
789        let result = &mut w[WriteRegKey::try_from(ra)?];
790        memeq(self.memory.as_mut(), result, pc, b, c, d)
791    }
792}
793
794/// Update stack pointer, checking for validity first.
795pub(crate) fn try_update_stack_pointer(
796    mut sp: RegMut<SP>,
797    ssp: Reg<SSP>,
798    hp: Reg<HP>,
799    new_sp: Word,
800    memory: &mut MemoryInstance,
801) -> SimpleResult<()> {
802    if new_sp < *ssp {
803        Err(PanicReason::MemoryOverflow.into())
804    } else if new_sp > *hp {
805        Err(PanicReason::MemoryGrowthOverlap.into())
806    } else {
807        *sp = new_sp;
808        memory.grow_stack(new_sp)?;
809        Ok(())
810    }
811}
812
813pub(crate) fn stack_pointer_overflow<F>(
814    sp: RegMut<SP>,
815    ssp: Reg<SSP>,
816    hp: Reg<HP>,
817    pc: RegMut<PC>,
818    f: F,
819    v: Word,
820    memory: &mut MemoryInstance,
821) -> SimpleResult<()>
822where
823    F: FnOnce(Word, Word) -> (Word, bool),
824{
825    let (new_sp, overflow) = f(*sp, v);
826
827    if overflow {
828        return Err(PanicReason::MemoryOverflow.into())
829    }
830
831    try_update_stack_pointer(sp, ssp, hp, new_sp, memory)?;
832    inc_pc(pc);
833    Ok(())
834}
835
836#[allow(clippy::too_many_arguments)]
837pub(crate) fn push_selected_registers(
838    memory: &mut MemoryInstance,
839    sp: RegMut<SP>,
840    ssp: Reg<SSP>,
841    hp: Reg<HP>,
842    pc: RegMut<PC>,
843    program_regs: &ProgramRegisters,
844    segment: ProgramRegistersSegment,
845    bitmask: Imm24,
846) -> SimpleResult<()> {
847    let bitmask = bitmask.to_u32();
848
849    // First update the new stack pointer, as that's the only error condition
850    let count: u64 = bitmask.count_ones().into();
851    let write_size = count
852        .checked_mul(WORD_SIZE as u64)
853        .expect("Bitmask size times 8 can never oveflow");
854    let write_at = *sp;
855    // If this would overflow, the stack pointer update below will fail
856    let new_sp = write_at.saturating_add(write_size);
857    try_update_stack_pointer(sp, ssp, hp, new_sp, memory)?;
858
859    // Write the registers to the stack
860    let mut it = memory
861        .write_noownerchecks(write_at, write_size)?
862        .chunks_exact_mut(WORD_SIZE);
863    for (i, reg) in program_regs.segment(segment).iter().enumerate() {
864        if (bitmask & (1 << i)) != 0 {
865            let item = it
866                .next()
867                .expect("Memory range mismatched with register count");
868            item.copy_from_slice(&reg.to_be_bytes());
869        }
870    }
871
872    inc_pc(pc);
873    Ok(())
874}
875
876#[allow(clippy::too_many_arguments)]
877pub(crate) fn pop_selected_registers(
878    memory: &mut MemoryInstance,
879    sp: RegMut<SP>,
880    ssp: Reg<SSP>,
881    hp: Reg<HP>,
882    pc: RegMut<PC>,
883    program_regs: &mut ProgramRegisters,
884    segment: ProgramRegistersSegment,
885    bitmask: Imm24,
886) -> SimpleResult<()> {
887    let bitmask = bitmask.to_u32();
888
889    // First update the stack pointer, as that's the only error condition
890    let count: u64 = bitmask.count_ones().into();
891    let size_in_stack = count
892        .checked_mul(WORD_SIZE as u64)
893        .expect("Bitmask size times 8 can never oveflow");
894    let new_sp = sp
895        .checked_sub(size_in_stack)
896        .ok_or(PanicReason::MemoryOverflow)?;
897    try_update_stack_pointer(sp, ssp, hp, new_sp, memory)?;
898
899    // Restore registers from the stack
900    let mut it = memory.read(new_sp, size_in_stack)?.chunks_exact(WORD_SIZE);
901    for (i, reg) in program_regs.segment_mut(segment).iter_mut().enumerate() {
902        if (bitmask & (1 << i)) != 0 {
903            let mut buf = [0u8; WORD_SIZE];
904            buf.copy_from_slice(it.next().expect("Count mismatch"));
905            *reg = Word::from_be_bytes(buf);
906        }
907    }
908
909    inc_pc(pc);
910    Ok(())
911}
912
913macro_rules! store_load {
914    ($t:ident) => { paste::paste! {
915        impl<M, S, Tx, Ecal, V> Interpreter<M, S, Tx, Ecal, V>
916        where
917            M: Memory,
918        {
919            pub(crate) fn [< store_ $t >](
920                &mut self,
921                dst_addr: Word,
922                value: Word,
923                offset: Imm12,
924            ) -> SimpleResult<()> {
925                let owner = self.ownership_registers();
926                let (SystemRegisters { pc, .. }, _) = split_registers(&mut self.registers);
927
928
929                let offset = u64::from(offset)
930                .checked_mul(core::mem::size_of::<$t>() as u64)
931                .expect("u12 * size_of cannot overflow a Word");
932                let addr = dst_addr.checked_add(offset).ok_or(PanicReason::MemoryOverflow)?;
933
934                #[allow(clippy::cast_possible_truncation)] // We truncate here
935                let value = value as $t;
936
937                self.memory.as_mut().write_bytes(owner, addr, value.to_be_bytes())?;
938
939                inc_pc(pc);
940                Ok(())
941            }
942
943            pub(crate) fn [< load_ $t >](
944                &mut self,
945                result: RegId,
946                src_addr: Word,
947                offset: Imm12,
948            ) -> SimpleResult<()> {
949                let (SystemRegisters { pc, .. }, mut w) = split_registers(&mut self.registers);
950                let result = &mut w[WriteRegKey::try_from(result)?];
951
952                let offset = u64::from(offset)
953                    .checked_mul(core::mem::size_of::<$t>() as u64)
954                    .expect("u12 * size_of cannot overflow a Word");
955                let addr = src_addr.checked_add(offset).ok_or(PanicReason::MemoryOverflow)?;
956                *result = $t::from_be_bytes(self.memory.as_ref().read_bytes(addr)?) as u64;
957
958                inc_pc(pc);
959                Ok(())
960            }
961        }
962    }};
963}
964
965store_load!(u8);
966store_load!(u16);
967store_load!(u32);
968store_load!(u64);
969
970pub(crate) fn malloc(
971    hp: RegMut<HP>,
972    sp: Reg<SP>,
973    pc: RegMut<PC>,
974    amount: Word,
975    memory: &mut MemoryInstance,
976) -> SimpleResult<()> {
977    memory.grow_heap_by(sp, hp, amount)?;
978    inc_pc(pc);
979    Ok(())
980}
981
982pub(crate) fn memclear(
983    memory: &mut MemoryInstance,
984    owner: OwnershipRegisters,
985    pc: RegMut<PC>,
986    a: Word,
987    b: Word,
988) -> SimpleResult<()> {
989    memory.write(owner, a, b)?.fill(0);
990    inc_pc(pc);
991    Ok(())
992}
993
994pub(crate) fn memcopy(
995    memory: &mut MemoryInstance,
996    owner: OwnershipRegisters,
997    pc: RegMut<PC>,
998    dst: Word,
999    src: Word,
1000    length: Word,
1001) -> SimpleResult<()> {
1002    memory.memcopy(dst, src, length, owner)?;
1003
1004    inc_pc(pc);
1005    Ok(())
1006}
1007
1008pub(crate) fn memeq(
1009    memory: &mut MemoryInstance,
1010    result: &mut Word,
1011    pc: RegMut<PC>,
1012    b: Word,
1013    c: Word,
1014    d: Word,
1015) -> SimpleResult<()> {
1016    *result = (memory.read(b, d)? == memory.read(c, d)?) as Word;
1017    inc_pc(pc);
1018    Ok(())
1019}
1020
1021#[derive(Debug, Clone, Copy)]
1022pub struct OwnershipRegisters {
1023    pub(crate) sp: u64,
1024    pub(crate) ssp: u64,
1025    pub(crate) hp: u64,
1026    /// Previous heap pointer, used for external contexts.
1027    /// Otherwise, it's just memory size.
1028    pub(crate) prev_hp: u64,
1029}
1030
1031impl OwnershipRegisters {
1032    pub(crate) fn new<M, S, Tx, Ecal, V>(vm: &Interpreter<M, S, Tx, Ecal, V>) -> Self {
1033        let prev_hp = vm
1034            .frames
1035            .last()
1036            .map(|frame| frame.registers()[RegId::HP])
1037            .unwrap_or(VM_MAX_RAM);
1038
1039        OwnershipRegisters {
1040            sp: vm.registers[RegId::SP],
1041            ssp: vm.registers[RegId::SSP],
1042            hp: vm.registers[RegId::HP],
1043            prev_hp,
1044        }
1045    }
1046
1047    /// Create an instance that only allows stack writes.
1048    pub(crate) fn only_allow_stack_write(sp: u64, ssp: u64, hp: u64) -> Self {
1049        debug_assert!(sp <= VM_MAX_RAM);
1050        debug_assert!(ssp <= VM_MAX_RAM);
1051        debug_assert!(hp <= VM_MAX_RAM);
1052        debug_assert!(ssp <= sp);
1053        debug_assert!(sp <= hp);
1054        OwnershipRegisters {
1055            sp,
1056            ssp,
1057            hp,
1058            prev_hp: hp,
1059        }
1060    }
1061
1062    /// Allows all writes, whole memory is stack.allocated
1063    #[cfg(test)]
1064    pub(crate) fn test_full_stack() -> Self {
1065        OwnershipRegisters {
1066            sp: VM_MAX_RAM,
1067            ssp: 0,
1068            hp: VM_MAX_RAM,
1069            prev_hp: VM_MAX_RAM,
1070        }
1071    }
1072
1073    pub(crate) fn verify_ownership(
1074        &self,
1075        range: &MemoryRange,
1076    ) -> Result<(), PanicReason> {
1077        if self.has_ownership_range(&range.words()) {
1078            Ok(())
1079        } else {
1080            Err(PanicReason::MemoryOwnership)
1081        }
1082    }
1083
1084    pub fn has_ownership_range(&self, range: &Range<Word>) -> bool {
1085        self.has_ownership_stack(range) || self.has_ownership_heap(range)
1086    }
1087
1088    /// Empty range is owned iff the range.start is owned
1089    pub(crate) fn has_ownership_stack(&self, range: &Range<Word>) -> bool {
1090        if range.is_empty() && range.start == self.ssp {
1091            return true
1092        }
1093
1094        if !(self.ssp..self.sp).contains(&range.start) {
1095            return false
1096        }
1097
1098        if range.end > VM_MAX_RAM {
1099            return false
1100        }
1101
1102        (self.ssp..=self.sp).contains(&range.end)
1103    }
1104
1105    /// Empty range is owned iff the range.start is owned
1106    pub(crate) fn has_ownership_heap(&self, range: &Range<Word>) -> bool {
1107        if range.is_empty() && range.start == self.hp {
1108            return true
1109        }
1110
1111        if range.start < self.hp {
1112            return false
1113        }
1114
1115        self.hp != self.prev_hp && range.end <= self.prev_hp
1116    }
1117}
1118
1119/// Attempt copy from the storage to memory, filling zero bytes when exceeding slice
1120/// boundaries. Performs overflow and memory range checks, but no ownership checks.
1121/// Note that if `src_offset` is larger than `src.len()`, the whole range will be
1122/// zero-filled.
1123#[allow(clippy::too_many_arguments)]
1124pub(crate) fn copy_from_storage_zero_fill<M, S>(
1125    memory: &mut MemoryInstance,
1126    owner: OwnershipRegisters,
1127    storage: &S,
1128    dst_addr: Word,
1129    dst_len: Word,
1130    src_id: &M::Key,
1131    src_offset: u64,
1132    src_len: usize,
1133    not_found_error: PanicReason,
1134) -> IoResult<(), S::Error>
1135where
1136    M: Mappable,
1137    S: StorageRead<M>,
1138{
1139    let write_buffer = memory.write(owner, dst_addr, dst_len)?;
1140    let mut empty_offset = 0;
1141
1142    if src_offset < src_len as Word {
1143        let src_offset =
1144            u32::try_from(src_offset).map_err(|_| PanicReason::MemoryOverflow)?;
1145
1146        let src_read_length = src_len.saturating_sub(src_offset as usize);
1147        let src_read_length = src_read_length.min(write_buffer.len());
1148
1149        let (src_read_buffer, _) = write_buffer.split_at_mut(src_read_length);
1150        let read_result = storage
1151            .read_zerofill(src_id, src_offset as usize, src_read_buffer)
1152            .map_err(RuntimeError::Storage)?;
1153        match read_result {
1154            Ok(_) => {
1155                empty_offset = src_read_length;
1156            }
1157            Err(StorageReadError::KeyNotFound) => {
1158                return Err(not_found_error.into());
1159            }
1160            Err(StorageReadError::OutOfBounds) => {
1161                empty_offset = 0; // zero fill whole range       
1162            }
1163        }
1164    }
1165
1166    write_buffer[empty_offset..].fill(0);
1167
1168    Ok(())
1169}