fuel_vm/interpreter/
memory.rs

1#![cfg(feature = "alloc")]
2
3use super::{
4    Interpreter,
5    internal::inc_pc,
6};
7use crate::{
8    constraints::reg_key::*,
9    consts::*,
10    error::SimpleResult,
11};
12
13use fuel_asm::{
14    Imm12,
15    Imm24,
16    PanicReason,
17    RegId,
18};
19use fuel_types::{
20    Word,
21    fmt_truncated_hex,
22};
23
24use core::{
25    fmt,
26    ops::Range,
27};
28
29#[cfg(any(test, feature = "test-helpers"))]
30use core::ops::{
31    Index,
32    IndexMut,
33    RangeFrom,
34    RangeTo,
35};
36
37use crate::error::{
38    IoResult,
39    RuntimeError,
40};
41use alloc::vec::Vec;
42use fuel_storage::{
43    Mappable,
44    StorageRead,
45};
46
47#[cfg(test)]
48mod tests;
49
50#[cfg(test)]
51mod impl_tests;
52
53#[allow(non_snake_case)]
54#[cfg(test)]
55mod allocation_tests;
56
57#[cfg(test)]
58mod stack_tests;
59
60/// The trait for the memory.
61pub trait Memory: AsRef<MemoryInstance> + AsMut<MemoryInstance> {}
62
63impl<M> Memory for M where M: AsRef<MemoryInstance> + AsMut<MemoryInstance> {}
64
65/// The memory of the VM, represented as stack and heap.
66#[derive(Clone, Eq)]
67pub struct MemoryInstance {
68    /// Stack. Grows upwards.
69    stack: Vec<u8>,
70    /// Heap. Grows downwards from MEM_SIZE.
71    heap: Vec<u8>,
72    /// Lowest allowed heap address, i.e. hp register value.
73    /// This is needed since we can allocate extra heap for performance reasons.
74    hp: usize,
75}
76
77impl Default for MemoryInstance {
78    fn default() -> Self {
79        Self::new()
80    }
81}
82
83impl fmt::Debug for MemoryInstance {
84    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
85        write!(f, "Memory {{ stack: ")?;
86        fmt_truncated_hex::<16>(&self.stack, f)?;
87        write!(f, ", heap: ")?;
88        let off = self.hp.saturating_sub(self.heap_offset());
89        fmt_truncated_hex::<16>(&self.heap[off..], f)?;
90        write!(f, ", hp: {} }}", self.hp)
91    }
92}
93
94impl PartialEq for MemoryInstance {
95    /// Equality comparison of the accessible memory.
96    #[allow(clippy::arithmetic_side_effects)] // Safety: hp is kept valid everywhere
97    fn eq(&self, other: &Self) -> bool {
98        self.stack == other.stack && self.hp == other.hp && {
99            let self_hs = self.hp - self.heap_offset();
100            let other_hs = other.hp - other.heap_offset();
101            self.heap[self_hs..] == other.heap[other_hs..]
102        }
103    }
104}
105
106impl AsRef<MemoryInstance> for MemoryInstance {
107    fn as_ref(&self) -> &MemoryInstance {
108        self
109    }
110}
111impl AsMut<MemoryInstance> for MemoryInstance {
112    fn as_mut(&mut self) -> &mut MemoryInstance {
113        self
114    }
115}
116
117impl MemoryInstance {
118    /// Create a new VM memory.
119    pub fn new() -> Self {
120        Self {
121            stack: Vec::new(),
122            heap: Vec::new(),
123            hp: MEM_SIZE,
124        }
125    }
126
127    /// Resets memory to initial state, keeping the original allocations.
128    pub fn reset(&mut self) {
129        self.stack.truncate(0);
130        self.hp = MEM_SIZE;
131    }
132
133    /// Offset of the heap section
134    fn heap_offset(&self) -> usize {
135        MEM_SIZE.saturating_sub(self.heap.len())
136    }
137
138    /// Grows the stack to be at least `new_sp` bytes.
139    pub fn grow_stack(&mut self, new_sp: Word) -> Result<(), PanicReason> {
140        if new_sp > VM_MAX_RAM {
141            return Err(PanicReason::MemoryOverflow);
142        }
143        #[allow(clippy::cast_possible_truncation)] // Safety: VM_MAX_RAM is usize
144        let new_sp = new_sp as usize;
145
146        if new_sp > self.stack.len() {
147            if new_sp > self.hp {
148                return Err(PanicReason::MemoryGrowthOverlap)
149            }
150
151            self.stack.resize(new_sp, 0);
152        }
153        Ok(())
154    }
155
156    /// Grows the heap by `amount` bytes. Updates hp register.
157    pub fn grow_heap_by(
158        &mut self,
159        sp_reg: Reg<SP>,
160        mut hp_reg: RegMut<HP>,
161        amount: Word,
162    ) -> Result<(), PanicReason> {
163        debug_assert_eq!(
164            self.hp as Word, *hp_reg,
165            "HP register changed without memory update"
166        );
167
168        let amount = usize::try_from(amount).map_err(|_| PanicReason::MemoryOverflow)?;
169        let new_hp = self
170            .hp
171            .checked_sub(amount)
172            .ok_or(PanicReason::MemoryOverflow)?;
173
174        if (new_hp as Word) < *sp_reg {
175            return Err(PanicReason::MemoryGrowthOverlap)
176        }
177
178        #[allow(clippy::arithmetic_side_effects)] // Safety: self.hp is in heap
179        let new_len = MEM_SIZE - new_hp;
180
181        #[allow(clippy::arithmetic_side_effects)] // Safety: self.hp is in heap
182        if self.heap.len() >= new_len {
183            // No need to reallocate, but we need to zero the new space
184            // in case it was used before a memory reset.
185            let start = new_hp - self.heap_offset();
186            let end = self.hp - self.heap_offset();
187            self.heap[start..end].fill(0);
188        } else {
189            // Need to clear dirty memory before expanding it. An example:
190            // Heap vector: [dirty, dirty, dirty, 0, 0, 0]
191            //                                   /|\
192            //                                    |
193            //                                   HP
194            //
195            // If we copy from [0, old_len), it means we copy the dirty memory as well.
196            // Ending up with:
197            // Heap vector: [0, 0, dirty, dirty, dirty, 0, 0, 0]
198            //              /|\
199            //               |
200            //              HP
201            //
202            // So, either we need to clear the memory before copying,
203            // or after we copied dirty parts.
204            // Clearing before looks like more readable solution.
205            let end = self.hp.checked_sub(self.heap_offset());
206            if let Some(end) = end {
207                self.heap[..end].fill(0);
208            }
209
210            // Reallocation is needed.
211            // To reduce frequent reallocations, allocate at least 256 bytes at once.
212            // After that, double the allocation every time.
213            let cap = new_len.next_power_of_two().clamp(256, MEM_SIZE);
214            let old_len = self.heap.len();
215            let prefix_zeroes = cap - old_len;
216            self.heap.resize(cap, 0);
217            self.heap.copy_within(..old_len, prefix_zeroes);
218            self.heap[..prefix_zeroes].fill(0);
219        }
220
221        self.hp = new_hp;
222        *hp_reg = new_hp as Word;
223
224        // If heap enters region where stack has been, truncate the stack
225        self.stack.truncate(new_hp);
226
227        Ok(())
228    }
229
230    /// Verify that the memory range is accessble and return it as a range.
231    pub fn verify<A: ToAddr, B: ToAddr>(
232        &self,
233        addr: A,
234        count: B,
235    ) -> Result<MemoryRange, PanicReason> {
236        let start = addr.to_addr()?;
237        let len = count.to_addr()?;
238        let end = start.saturating_add(len);
239        if end > MEM_SIZE {
240            return Err(PanicReason::MemoryOverflow)
241        }
242
243        if end <= self.stack.len() || start >= self.hp {
244            Ok(MemoryRange(start..end))
245        } else {
246            Err(PanicReason::UninitalizedMemoryAccess)
247        }
248    }
249
250    /// Verify a constant-sized memory range.
251    pub fn verify_const<A: ToAddr, const C: usize>(
252        &self,
253        addr: A,
254    ) -> Result<MemoryRange, PanicReason> {
255        self.verify(addr, C)
256    }
257
258    /// Returns a reference to memory for reading, if possible.
259    #[allow(clippy::arithmetic_side_effects)] // Safety: subtractions are checked
260    pub fn read<A: ToAddr, C: ToAddr>(
261        &self,
262        addr: A,
263        count: C,
264    ) -> Result<&[u8], PanicReason> {
265        let range = self.verify(addr, count)?;
266
267        if range.end() <= self.stack.len() {
268            Ok(&self.stack[range.usizes()])
269        } else if range.start() >= self.heap_offset() {
270            let start = range.start() - self.heap_offset();
271            let end = range.end() - self.heap_offset();
272            Ok(&self.heap[start..end])
273        } else {
274            unreachable!("Range was verified to be valid")
275        }
276    }
277
278    /// Reads a constant-sized byte array from memory, if possible.
279    pub fn read_bytes<A: ToAddr, const C: usize>(
280        &self,
281        at: A,
282    ) -> Result<[u8; C], PanicReason> {
283        let mut result = [0; C];
284        result.copy_from_slice(self.read(at, C)?);
285        Ok(result)
286    }
287
288    /// Gets write access to memory, if possible.
289    /// Doesn't perform any ownership checks.
290    #[allow(clippy::arithmetic_side_effects)] // Safety: subtractions are checked
291    pub fn write_noownerchecks<A: ToAddr, B: ToAddr>(
292        &mut self,
293        addr: A,
294        len: B,
295    ) -> Result<&mut [u8], PanicReason> {
296        let range = self.verify(addr, len)?;
297        if range.end() <= self.stack.len() {
298            Ok(&mut self.stack[range.usizes()])
299        } else if range.start() >= self.heap_offset() {
300            let start = range.start() - self.heap_offset();
301            let end = range.end() - self.heap_offset();
302            Ok(&mut self.heap[start..end])
303        } else {
304            unreachable!("Range was verified to be valid")
305        }
306    }
307
308    /// Writes a constant-sized byte array to memory, if possible.
309    /// Doesn't perform any ownership checks.
310    pub fn write_bytes_noownerchecks<A: ToAddr, const C: usize>(
311        &mut self,
312        addr: A,
313        data: [u8; C],
314    ) -> Result<(), PanicReason> {
315        self.write_noownerchecks(addr, C)?.copy_from_slice(&data);
316        Ok(())
317    }
318
319    /// Checks that memory is writable and returns a mutable slice to it.
320    pub fn write<A: ToAddr, C: ToAddr>(
321        &mut self,
322        owner: OwnershipRegisters,
323        addr: A,
324        len: C,
325    ) -> Result<&mut [u8], PanicReason> {
326        let range = self.verify(addr, len)?;
327        owner.verify_ownership(&range)?;
328        self.write_noownerchecks(range.start(), range.len())
329    }
330
331    /// Writes a constant-sized byte array to memory, checking for ownership.
332    pub fn write_bytes<A: ToAddr, const C: usize>(
333        &mut self,
334        owner: OwnershipRegisters,
335        addr: A,
336        data: [u8; C],
337    ) -> Result<(), PanicReason> {
338        self.write(owner, addr, data.len())?.copy_from_slice(&data);
339        Ok(())
340    }
341
342    /// Copies the memory from `src` to `dst` verifying ownership.
343    #[inline]
344    #[track_caller]
345    pub fn memcopy(
346        &mut self,
347        dst: Word,
348        src: Word,
349        length: Word,
350        owner: OwnershipRegisters,
351    ) -> Result<(), PanicReason> {
352        let dst_range = self.verify(dst, length)?;
353        let src_range = self.verify(src, length)?;
354
355        if dst_range.start() <= src_range.start() && src_range.start() < dst_range.end()
356            || src_range.start() <= dst_range.start()
357                && dst_range.start() < src_range.end()
358            || dst_range.start() < src_range.end() && src_range.end() <= dst_range.end()
359            || src_range.start() < dst_range.end() && dst_range.end() <= src_range.end()
360        {
361            return Err(PanicReason::MemoryWriteOverlap)
362        }
363
364        owner.verify_ownership(&dst_range)?;
365
366        if src_range.end() <= self.stack.len() {
367            if dst_range.end() <= self.stack.len() {
368                self.stack
369                    .copy_within(src_range.usizes(), dst_range.start());
370            } else if dst_range.start() >= self.heap_offset() {
371                #[allow(clippy::arithmetic_side_effects)]
372                // Safety: subtractions are checked above
373                let dst_start = dst_range.start() - self.heap_offset();
374                #[allow(clippy::arithmetic_side_effects)]
375                // Safety: subtractions are checked above
376                let dst_end = dst_range.end() - self.heap_offset();
377
378                let src_array = &self.stack[src_range.usizes()];
379                let dst_array = &mut self.heap[dst_start..dst_end];
380                dst_array.copy_from_slice(src_array);
381            } else {
382                unreachable!("Range was verified to be valid")
383            }
384        } else if src_range.start() >= self.heap_offset() {
385            #[allow(clippy::arithmetic_side_effects)]
386            // Safety: subtractions are checked above
387            let src_start = src_range.start() - self.heap_offset();
388            #[allow(clippy::arithmetic_side_effects)]
389            // Safety: subtractions are checked above
390            let src_end = src_range.end() - self.heap_offset();
391
392            if dst_range.end() <= self.stack.len() {
393                let src_array = &self.heap[src_start..src_end];
394
395                let dst_array = &mut self.stack[dst_range.usizes()];
396                dst_array.copy_from_slice(src_array);
397            } else if dst_range.start() >= self.heap_offset() {
398                #[allow(clippy::arithmetic_side_effects)]
399                // Safety: subtractions are checked above
400                let dst_start = dst_range.start() - self.heap_offset();
401
402                self.heap.copy_within(src_start..src_end, dst_start);
403            } else {
404                unreachable!("Range was verified to be valid")
405            }
406        } else {
407            unreachable!("Range was verified to be valid")
408        }
409
410        Ok(())
411    }
412
413    /// Memory access to the raw stack buffer.
414    /// Note that for efficiency reasons this might not match sp value.
415    #[cfg(any(test, feature = "test-helpers"))]
416    pub fn stack_raw(&self) -> &[u8] {
417        &self.stack
418    }
419
420    /// Memory access to the raw heap buffer.
421    /// Note that for efficiency reasons this might not match hp value.
422    #[cfg(any(test, feature = "test-helpers"))]
423    pub fn heap_raw(&self) -> &[u8] {
424        &self.heap
425    }
426
427    /// Returns a `MemoryRollbackData` that can be used to achieve the state of the
428    /// `desired_memory_state` instance.
429    pub fn collect_rollback_data(
430        &self,
431        desired_memory_state: &MemoryInstance,
432    ) -> Option<MemoryRollbackData> {
433        if self == desired_memory_state {
434            return None
435        }
436
437        let sp = desired_memory_state.stack.len();
438        let hp = desired_memory_state.hp;
439
440        assert!(
441            hp >= self.hp,
442            "We only allow shrinking of the heap during rollback"
443        );
444
445        let stack_changes =
446            get_changes(&self.stack[..sp], &desired_memory_state.stack[..sp], 0);
447
448        let heap_start = hp
449            .checked_sub(self.heap_offset())
450            .expect("Memory is invalid, hp is out of bounds");
451        let heap = &self.heap[heap_start..];
452        let desired_heap_start = hp
453            .checked_sub(desired_memory_state.heap_offset())
454            .expect("Memory is invalid, hp is out of bounds");
455        let desired_heap = &desired_memory_state.heap[desired_heap_start..];
456
457        let heap_changes = get_changes(heap, desired_heap, hp);
458
459        Some(MemoryRollbackData {
460            sp,
461            hp,
462            stack_changes,
463            heap_changes,
464        })
465    }
466
467    /// Rollbacks the memory changes returning the memory to the old state.
468    pub fn rollback(&mut self, data: &MemoryRollbackData) {
469        self.stack.resize(data.sp, 0);
470        assert!(
471            data.hp >= self.hp,
472            "We only allow shrinking of the heap during rollback"
473        );
474        self.hp = data.hp;
475
476        for change in &data.stack_changes {
477            self.stack[change.global_start
478                ..change.global_start.saturating_add(change.data.len())]
479                .copy_from_slice(&change.data);
480        }
481
482        let offset = self.heap_offset();
483        for change in &data.heap_changes {
484            let local_start = change
485                .global_start
486                .checked_sub(offset)
487                .expect("Invalid offset");
488            self.heap[local_start..local_start.saturating_add(change.data.len())]
489                .copy_from_slice(&change.data);
490        }
491    }
492}
493
494fn get_changes(
495    latest_array: &[u8],
496    desired_array: &[u8],
497    offset: usize,
498) -> Vec<MemorySliceChange> {
499    let mut changes = Vec::new();
500    let mut range = None;
501    for (i, (old, new)) in latest_array.iter().zip(desired_array.iter()).enumerate() {
502        if old != new {
503            range = match range {
504                None => Some((i, 1usize)),
505                Some((start, count)) => Some((start, count.saturating_add(1))),
506            };
507        } else if let Some((start, count)) = range.take() {
508            changes.push(MemorySliceChange {
509                global_start: offset.saturating_add(start),
510                data: desired_array[start..start.saturating_add(count)].to_vec(),
511            });
512        }
513    }
514    if let Some((start, count)) = range.take() {
515        changes.push(MemorySliceChange {
516            global_start: offset.saturating_add(start),
517            data: desired_array[start..start.saturating_add(count)].to_vec(),
518        });
519    }
520    changes
521}
522
523#[derive(Debug, Clone)]
524struct MemorySliceChange {
525    global_start: usize,
526    data: Vec<u8>,
527}
528
529/// The container for the data used to rollback memory changes.
530#[derive(Debug, Clone)]
531pub struct MemoryRollbackData {
532    /// Desired stack pointer.
533    sp: usize,
534    /// Desired heap pointer. Desired heap pointer can't be less than the current one.
535    hp: usize,
536    /// Changes to the stack to achieve the desired state of the stack.
537    stack_changes: Vec<MemorySliceChange>,
538    /// Changes to the heap to achieve the desired state of the heap.
539    heap_changes: Vec<MemorySliceChange>,
540}
541
542#[cfg(feature = "test-helpers")]
543impl From<Vec<u8>> for MemoryInstance {
544    fn from(stack: Vec<u8>) -> Self {
545        Self {
546            stack,
547            ..Self::new()
548        }
549    }
550}
551
552#[cfg(any(test, feature = "test-helpers"))]
553impl Index<Range<usize>> for MemoryInstance {
554    type Output = [u8];
555
556    fn index(&self, index: Range<usize>) -> &Self::Output {
557        self.read(index.start, index.len())
558            .expect("Memory range out of bounds")
559    }
560}
561
562#[cfg(any(test, feature = "test-helpers"))]
563impl Index<RangeFrom<usize>> for MemoryInstance {
564    type Output = [u8];
565
566    fn index(&self, index: RangeFrom<usize>) -> &Self::Output {
567        &self[index.start..MEM_SIZE]
568    }
569}
570
571#[cfg(any(test, feature = "test-helpers"))]
572impl Index<RangeTo<usize>> for MemoryInstance {
573    type Output = [u8];
574
575    fn index(&self, index: RangeTo<usize>) -> &Self::Output {
576        &self[0..index.end]
577    }
578}
579
580#[cfg(any(test, feature = "test-helpers"))]
581impl IndexMut<Range<usize>> for MemoryInstance {
582    fn index_mut(&mut self, index: Range<usize>) -> &mut Self::Output {
583        self.write_noownerchecks(index.start, index.len())
584            .expect("Memory range out of bounds")
585    }
586}
587
588/// Used to handle `Word` to `usize` conversions for memory addresses,
589/// as well as checking that the resulting value is withing the VM ram boundaries.
590pub trait ToAddr {
591    /// Converts a value to `usize` used for memory addresses.
592    /// Returns `Err` with `MemoryOverflow` if the resulting value does't fit in the VM
593    /// memory. This can be used for both addresses and offsets.
594    fn to_addr(self) -> Result<usize, PanicReason>;
595}
596
597impl ToAddr for usize {
598    fn to_addr(self) -> Result<usize, PanicReason> {
599        if self > MEM_SIZE {
600            return Err(PanicReason::MemoryOverflow)
601        }
602        Ok(self)
603    }
604}
605
606impl ToAddr for Word {
607    fn to_addr(self) -> Result<usize, PanicReason> {
608        let value = usize::try_from(self).map_err(|_| PanicReason::MemoryOverflow)?;
609        value.to_addr()
610    }
611}
612
613#[cfg(feature = "test-helpers")]
614/// Implemented for `i32` to allow integer literals. Panics on negative values.
615impl ToAddr for i32 {
616    fn to_addr(self) -> Result<usize, PanicReason> {
617        if self < 0 {
618            panic!("Negative memory address");
619        }
620        let value = usize::try_from(self).map_err(|_| PanicReason::MemoryOverflow)?;
621        value.to_addr()
622    }
623}
624
625/// A range of memory. No guarantees are made about validity of access.
626#[derive(Debug, Clone, PartialEq, Eq, Hash)]
627#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
628pub struct MemoryRange(Range<usize>);
629
630impl MemoryRange {
631    /// Create a new memory range. Cannot panic, but the range may be invalid.
632    pub const fn new(start: usize, len: usize) -> Self {
633        Self(start..start.saturating_add(len))
634    }
635
636    /// Start of the range.
637    pub fn start(&self) -> usize {
638        self.0.start
639    }
640
641    /// End of the range. One past the last byte.
642    pub fn end(&self) -> usize {
643        self.0.end
644    }
645
646    /// Is the range empty?
647    pub fn is_empty(&self) -> bool {
648        self.len() == 0
649    }
650
651    /// Length of the range.
652    pub fn len(&self) -> usize {
653        self.0.len()
654    }
655
656    /// Returns the range as a `usize` range.
657    pub fn usizes(&self) -> Range<usize> {
658        self.0.clone()
659    }
660
661    /// Returns the range as a `Word` range.
662    pub fn words(&self) -> Range<Word> {
663        self.0.start as Word..self.0.end as Word
664    }
665
666    /// Splits range at given relative offset. Panics if offset > range length.
667    pub fn split_at_offset(self, at: usize) -> (Self, Self) {
668        let mid = self.0.start.saturating_add(at);
669        assert!(mid <= self.0.end);
670        (Self(self.0.start..mid), Self(mid..self.0.end))
671    }
672}
673
674impl<M, S, Tx, Ecal, V> Interpreter<M, S, Tx, Ecal, V>
675where
676    M: Memory,
677{
678    /// Return the registers used to determine ownership.
679    pub(crate) fn ownership_registers(&self) -> OwnershipRegisters {
680        OwnershipRegisters::new(self)
681    }
682
683    pub(crate) fn stack_pointer_overflow<F>(&mut self, f: F, v: Word) -> SimpleResult<()>
684    where
685        F: FnOnce(Word, Word) -> (Word, bool),
686    {
687        let (
688            SystemRegisters {
689                sp, ssp, hp, pc, ..
690            },
691            _,
692        ) = split_registers(&mut self.registers);
693        stack_pointer_overflow(
694            sp,
695            ssp.as_ref(),
696            hp.as_ref(),
697            pc,
698            f,
699            v,
700            self.memory.as_mut(),
701        )
702    }
703
704    pub(crate) fn push_selected_registers(
705        &mut self,
706        segment: ProgramRegistersSegment,
707        bitmask: Imm24,
708    ) -> SimpleResult<()> {
709        let (
710            SystemRegisters {
711                sp, ssp, hp, pc, ..
712            },
713            program_regs,
714        ) = split_registers(&mut self.registers);
715        push_selected_registers(
716            self.memory.as_mut(),
717            sp,
718            ssp.as_ref(),
719            hp.as_ref(),
720            pc,
721            &program_regs,
722            segment,
723            bitmask,
724        )
725    }
726
727    pub(crate) fn pop_selected_registers(
728        &mut self,
729        segment: ProgramRegistersSegment,
730        bitmask: Imm24,
731    ) -> SimpleResult<()> {
732        let (
733            SystemRegisters {
734                sp, ssp, hp, pc, ..
735            },
736            mut program_regs,
737        ) = split_registers(&mut self.registers);
738        pop_selected_registers(
739            self.memory.as_mut(),
740            sp,
741            ssp.as_ref(),
742            hp.as_ref(),
743            pc,
744            &mut program_regs,
745            segment,
746            bitmask,
747        )
748    }
749
750    /// Expand heap by `amount` bytes.
751    pub fn allocate(&mut self, amount: Word) -> SimpleResult<()> {
752        let (SystemRegisters { hp, sp, .. }, _) = split_registers(&mut self.registers);
753        self.memory.as_mut().grow_heap_by(sp.as_ref(), hp, amount)?;
754        Ok(())
755    }
756
757    pub(crate) fn malloc(&mut self, a: Word) -> SimpleResult<()> {
758        let (SystemRegisters { hp, sp, pc, .. }, _) =
759            split_registers(&mut self.registers);
760        malloc(hp, sp.as_ref(), pc, a, self.memory.as_mut())
761    }
762
763    pub(crate) fn memclear(&mut self, a: Word, b: Word) -> SimpleResult<()> {
764        let owner = self.ownership_registers();
765        memclear(self.memory.as_mut(), owner, self.registers.pc_mut(), a, b)
766    }
767
768    pub(crate) fn memcopy(&mut self, a: Word, b: Word, c: Word) -> SimpleResult<()> {
769        let owner = self.ownership_registers();
770        memcopy(
771            self.memory.as_mut(),
772            owner,
773            self.registers.pc_mut(),
774            a,
775            b,
776            c,
777        )
778    }
779
780    pub(crate) fn memeq(
781        &mut self,
782        ra: RegId,
783        b: Word,
784        c: Word,
785        d: Word,
786    ) -> SimpleResult<()> {
787        let (SystemRegisters { pc, .. }, mut w) = split_registers(&mut self.registers);
788        let result = &mut w[WriteRegKey::try_from(ra)?];
789        memeq(self.memory.as_mut(), result, pc, b, c, d)
790    }
791}
792
793/// Update stack pointer, checking for validity first.
794pub(crate) fn try_update_stack_pointer(
795    mut sp: RegMut<SP>,
796    ssp: Reg<SSP>,
797    hp: Reg<HP>,
798    new_sp: Word,
799    memory: &mut MemoryInstance,
800) -> SimpleResult<()> {
801    if new_sp < *ssp {
802        Err(PanicReason::MemoryOverflow.into())
803    } else if new_sp > *hp {
804        Err(PanicReason::MemoryGrowthOverlap.into())
805    } else {
806        *sp = new_sp;
807        memory.grow_stack(new_sp)?;
808        Ok(())
809    }
810}
811
812pub(crate) fn stack_pointer_overflow<F>(
813    sp: RegMut<SP>,
814    ssp: Reg<SSP>,
815    hp: Reg<HP>,
816    pc: RegMut<PC>,
817    f: F,
818    v: Word,
819    memory: &mut MemoryInstance,
820) -> SimpleResult<()>
821where
822    F: FnOnce(Word, Word) -> (Word, bool),
823{
824    let (new_sp, overflow) = f(*sp, v);
825
826    if overflow {
827        return Err(PanicReason::MemoryOverflow.into())
828    }
829
830    try_update_stack_pointer(sp, ssp, hp, new_sp, memory)?;
831    Ok(inc_pc(pc)?)
832}
833
834#[allow(clippy::too_many_arguments)]
835pub(crate) fn push_selected_registers(
836    memory: &mut MemoryInstance,
837    sp: RegMut<SP>,
838    ssp: Reg<SSP>,
839    hp: Reg<HP>,
840    pc: RegMut<PC>,
841    program_regs: &ProgramRegisters,
842    segment: ProgramRegistersSegment,
843    bitmask: Imm24,
844) -> SimpleResult<()> {
845    let bitmask = bitmask.to_u32();
846
847    // First update the new stack pointer, as that's the only error condition
848    let count: u64 = bitmask.count_ones().into();
849    let write_size = count
850        .checked_mul(WORD_SIZE as u64)
851        .expect("Bitmask size times 8 can never oveflow");
852    let write_at = *sp;
853    // If this would overflow, the stack pointer update below will fail
854    let new_sp = write_at.saturating_add(write_size);
855    try_update_stack_pointer(sp, ssp, hp, new_sp, memory)?;
856
857    // Write the registers to the stack
858    let mut it = memory
859        .write_noownerchecks(write_at, write_size)?
860        .chunks_exact_mut(WORD_SIZE);
861    for (i, reg) in program_regs.segment(segment).iter().enumerate() {
862        if (bitmask & (1 << i)) != 0 {
863            let item = it
864                .next()
865                .expect("Memory range mismatched with register count");
866            item.copy_from_slice(&reg.to_be_bytes());
867        }
868    }
869
870    Ok(inc_pc(pc)?)
871}
872
873#[allow(clippy::too_many_arguments)]
874pub(crate) fn pop_selected_registers(
875    memory: &mut MemoryInstance,
876    sp: RegMut<SP>,
877    ssp: Reg<SSP>,
878    hp: Reg<HP>,
879    pc: RegMut<PC>,
880    program_regs: &mut ProgramRegisters,
881    segment: ProgramRegistersSegment,
882    bitmask: Imm24,
883) -> SimpleResult<()> {
884    let bitmask = bitmask.to_u32();
885
886    // First update the stack pointer, as that's the only error condition
887    let count: u64 = bitmask.count_ones().into();
888    let size_in_stack = count
889        .checked_mul(WORD_SIZE as u64)
890        .expect("Bitmask size times 8 can never oveflow");
891    let new_sp = sp
892        .checked_sub(size_in_stack)
893        .ok_or(PanicReason::MemoryOverflow)?;
894    try_update_stack_pointer(sp, ssp, hp, new_sp, memory)?;
895
896    // Restore registers from the stack
897    let mut it = memory.read(new_sp, size_in_stack)?.chunks_exact(WORD_SIZE);
898    for (i, reg) in program_regs.segment_mut(segment).iter_mut().enumerate() {
899        if (bitmask & (1 << i)) != 0 {
900            let mut buf = [0u8; WORD_SIZE];
901            buf.copy_from_slice(it.next().expect("Count mismatch"));
902            *reg = Word::from_be_bytes(buf);
903        }
904    }
905
906    Ok(inc_pc(pc)?)
907}
908
909macro_rules! store_load {
910    ($t:ident) => { paste::paste! {
911        impl<M, S, Tx, Ecal, V> Interpreter<M, S, Tx, Ecal, V>
912        where
913            M: Memory,
914        {
915            pub(crate) fn [< store_ $t >](
916                &mut self,
917                dst_addr: Word,
918                value: Word,
919                offset: Imm12,
920            ) -> SimpleResult<()> {
921                let owner = self.ownership_registers();
922                let (SystemRegisters { pc, .. }, _) = split_registers(&mut self.registers);
923
924
925                let offset = u64::from(offset)
926                .checked_mul(core::mem::size_of::<$t>() as u64)
927                .expect("u12 * size_of cannot overflow a Word");
928                let addr = dst_addr.checked_add(offset).ok_or(PanicReason::MemoryOverflow)?;
929
930                #[allow(clippy::cast_possible_truncation)] // We truncate here
931                let value = value as $t;
932
933                self.memory.as_mut().write_bytes(owner, addr, value.to_be_bytes())?;
934
935                Ok(inc_pc(pc)?)
936            }
937
938            pub(crate) fn [< load_ $t >](
939                &mut self,
940                result: RegId,
941                src_addr: Word,
942                offset: Imm12,
943            ) -> SimpleResult<()> {
944                let (SystemRegisters { pc, .. }, mut w) = split_registers(&mut self.registers);
945                let result = &mut w[WriteRegKey::try_from(result)?];
946
947                let offset = u64::from(offset)
948                    .checked_mul(core::mem::size_of::<$t>() as u64)
949                    .expect("u12 * size_of cannot overflow a Word");
950                let addr = src_addr.checked_add(offset).ok_or(PanicReason::MemoryOverflow)?;
951                *result = $t::from_be_bytes(self.memory.as_ref().read_bytes(addr)?) as u64;
952
953                Ok(inc_pc(pc)?)
954            }
955        }
956    }};
957}
958
959store_load!(u8);
960store_load!(u16);
961store_load!(u32);
962store_load!(u64);
963
964pub(crate) fn malloc(
965    hp: RegMut<HP>,
966    sp: Reg<SP>,
967    pc: RegMut<PC>,
968    amount: Word,
969    memory: &mut MemoryInstance,
970) -> SimpleResult<()> {
971    memory.grow_heap_by(sp, hp, amount)?;
972    Ok(inc_pc(pc)?)
973}
974
975pub(crate) fn memclear(
976    memory: &mut MemoryInstance,
977    owner: OwnershipRegisters,
978    pc: RegMut<PC>,
979    a: Word,
980    b: Word,
981) -> SimpleResult<()> {
982    memory.write(owner, a, b)?.fill(0);
983    Ok(inc_pc(pc)?)
984}
985
986pub(crate) fn memcopy(
987    memory: &mut MemoryInstance,
988    owner: OwnershipRegisters,
989    pc: RegMut<PC>,
990    dst: Word,
991    src: Word,
992    length: Word,
993) -> SimpleResult<()> {
994    memory.memcopy(dst, src, length, owner)?;
995
996    Ok(inc_pc(pc)?)
997}
998
999pub(crate) fn memeq(
1000    memory: &mut MemoryInstance,
1001    result: &mut Word,
1002    pc: RegMut<PC>,
1003    b: Word,
1004    c: Word,
1005    d: Word,
1006) -> SimpleResult<()> {
1007    *result = (memory.read(b, d)? == memory.read(c, d)?) as Word;
1008    Ok(inc_pc(pc)?)
1009}
1010
1011#[derive(Debug, Clone, Copy)]
1012pub struct OwnershipRegisters {
1013    pub(crate) sp: u64,
1014    pub(crate) ssp: u64,
1015    pub(crate) hp: u64,
1016    /// Previous heap pointer, used for external contexts.
1017    /// Otherwise, it's just memory size.
1018    pub(crate) prev_hp: u64,
1019}
1020
1021impl OwnershipRegisters {
1022    pub(crate) fn new<M, S, Tx, Ecal, V>(vm: &Interpreter<M, S, Tx, Ecal, V>) -> Self {
1023        let prev_hp = vm
1024            .frames
1025            .last()
1026            .map(|frame| frame.registers()[RegId::HP])
1027            .unwrap_or(VM_MAX_RAM);
1028
1029        OwnershipRegisters {
1030            sp: vm.registers[RegId::SP],
1031            ssp: vm.registers[RegId::SSP],
1032            hp: vm.registers[RegId::HP],
1033            prev_hp,
1034        }
1035    }
1036
1037    /// Create an instance that only allows stack writes.
1038    pub(crate) fn only_allow_stack_write(sp: u64, ssp: u64, hp: u64) -> Self {
1039        debug_assert!(sp <= VM_MAX_RAM);
1040        debug_assert!(ssp <= VM_MAX_RAM);
1041        debug_assert!(hp <= VM_MAX_RAM);
1042        debug_assert!(ssp <= sp);
1043        debug_assert!(sp <= hp);
1044        OwnershipRegisters {
1045            sp,
1046            ssp,
1047            hp,
1048            prev_hp: hp,
1049        }
1050    }
1051
1052    /// Allows all writes, whole memory is stack.allocated
1053    #[cfg(test)]
1054    pub(crate) fn test_full_stack() -> Self {
1055        OwnershipRegisters {
1056            sp: VM_MAX_RAM,
1057            ssp: 0,
1058            hp: VM_MAX_RAM,
1059            prev_hp: VM_MAX_RAM,
1060        }
1061    }
1062
1063    pub(crate) fn verify_ownership(
1064        &self,
1065        range: &MemoryRange,
1066    ) -> Result<(), PanicReason> {
1067        if self.has_ownership_range(&range.words()) {
1068            Ok(())
1069        } else {
1070            Err(PanicReason::MemoryOwnership)
1071        }
1072    }
1073
1074    pub fn has_ownership_range(&self, range: &Range<Word>) -> bool {
1075        self.has_ownership_stack(range) || self.has_ownership_heap(range)
1076    }
1077
1078    /// Empty range is owned iff the range.start is owned
1079    pub(crate) fn has_ownership_stack(&self, range: &Range<Word>) -> bool {
1080        if range.is_empty() && range.start == self.ssp {
1081            return true
1082        }
1083
1084        if !(self.ssp..self.sp).contains(&range.start) {
1085            return false
1086        }
1087
1088        if range.end > VM_MAX_RAM {
1089            return false
1090        }
1091
1092        (self.ssp..=self.sp).contains(&range.end)
1093    }
1094
1095    /// Empty range is owned iff the range.start is owned
1096    pub(crate) fn has_ownership_heap(&self, range: &Range<Word>) -> bool {
1097        if range.is_empty() && range.start == self.hp {
1098            return true
1099        }
1100
1101        if range.start < self.hp {
1102            return false
1103        }
1104
1105        self.hp != self.prev_hp && range.end <= self.prev_hp
1106    }
1107}
1108
1109/// Attempt copy from the storage to memory, filling zero bytes when exceeding slice
1110/// boundaries. Performs overflow and memory range checks, but no ownership checks.
1111/// Note that if `src_offset` is larger than `src.len()`, the whole range will be
1112/// zero-filled.
1113#[allow(clippy::too_many_arguments)]
1114pub(crate) fn copy_from_storage_zero_fill<M, S>(
1115    memory: &mut MemoryInstance,
1116    owner: OwnershipRegisters,
1117    storage: &S,
1118    dst_addr: Word,
1119    dst_len: Word,
1120    src_id: &M::Key,
1121    src_offset: u64,
1122    src_len: usize,
1123    not_found_error: PanicReason,
1124) -> IoResult<(), S::Error>
1125where
1126    M: Mappable,
1127    S: StorageRead<M>,
1128{
1129    let write_buffer = memory.write(owner, dst_addr, dst_len)?;
1130    let mut empty_offset = 0;
1131
1132    if src_offset < src_len as Word {
1133        let src_offset =
1134            u32::try_from(src_offset).map_err(|_| PanicReason::MemoryOverflow)?;
1135
1136        let src_read_length = src_len.saturating_sub(src_offset as usize);
1137        let src_read_length = src_read_length.min(write_buffer.len());
1138
1139        let (src_read_buffer, _) = write_buffer.split_at_mut(src_read_length);
1140        let found = storage
1141            .read(src_id, src_offset as usize, src_read_buffer)
1142            .map_err(RuntimeError::Storage)?;
1143        if !found {
1144            return Err(not_found_error.into());
1145        }
1146
1147        empty_offset = src_read_length;
1148    }
1149
1150    write_buffer[empty_offset..].fill(0);
1151
1152    Ok(())
1153}