fuel_vm/interpreter/
memory.rs

1#![cfg(feature = "alloc")]
2
3use super::{
4    internal::inc_pc,
5    Interpreter,
6};
7use crate::{
8    constraints::reg_key::*,
9    consts::*,
10    error::SimpleResult,
11};
12
13use fuel_asm::{
14    Imm12,
15    Imm24,
16    PanicReason,
17    RegId,
18};
19use fuel_types::{
20    fmt_truncated_hex,
21    Word,
22};
23
24use core::{
25    fmt,
26    ops::Range,
27};
28
29#[cfg(any(test, feature = "test-helpers"))]
30use core::ops::{
31    Index,
32    IndexMut,
33    RangeFrom,
34    RangeTo,
35};
36
37use crate::error::{
38    IoResult,
39    RuntimeError,
40};
41use alloc::vec::Vec;
42use fuel_storage::{
43    Mappable,
44    StorageRead,
45};
46
47#[cfg(test)]
48mod tests;
49
50#[cfg(test)]
51mod impl_tests;
52
53#[allow(non_snake_case)]
54#[cfg(test)]
55mod allocation_tests;
56
57#[cfg(test)]
58mod stack_tests;
59
60/// The trait for the memory.
61pub trait Memory: AsRef<MemoryInstance> + AsMut<MemoryInstance> {}
62
63impl<M> Memory for M where M: AsRef<MemoryInstance> + AsMut<MemoryInstance> {}
64
65/// The memory of the VM, represented as stack and heap.
66#[derive(Clone, Eq)]
67pub struct MemoryInstance {
68    /// Stack. Grows upwards.
69    stack: Vec<u8>,
70    /// Heap. Grows downwards from MEM_SIZE.
71    heap: Vec<u8>,
72    /// Lowest allowed heap address, i.e. hp register value.
73    /// This is needed since we can allocate extra heap for performance reasons.
74    hp: usize,
75}
76
77impl Default for MemoryInstance {
78    fn default() -> Self {
79        Self::new()
80    }
81}
82
83impl fmt::Debug for MemoryInstance {
84    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
85        write!(f, "Memory {{ stack: ")?;
86        fmt_truncated_hex::<16>(&self.stack, f)?;
87        write!(f, ", heap: ")?;
88        let off = self.hp.saturating_sub(self.heap_offset());
89        fmt_truncated_hex::<16>(&self.heap[off..], f)?;
90        write!(f, ", hp: {} }}", self.hp)
91    }
92}
93
94impl PartialEq for MemoryInstance {
95    /// Equality comparison of the accessible memory.
96    #[allow(clippy::arithmetic_side_effects)] // Safety: hp is kept valid everywhere
97    fn eq(&self, other: &Self) -> bool {
98        self.stack == other.stack && self.hp == other.hp && {
99            let self_hs = self.hp - self.heap_offset();
100            let other_hs = other.hp - other.heap_offset();
101            self.heap[self_hs..] == other.heap[other_hs..]
102        }
103    }
104}
105
106impl AsRef<MemoryInstance> for MemoryInstance {
107    fn as_ref(&self) -> &MemoryInstance {
108        self
109    }
110}
111impl AsMut<MemoryInstance> for MemoryInstance {
112    fn as_mut(&mut self) -> &mut MemoryInstance {
113        self
114    }
115}
116
117impl MemoryInstance {
118    /// Create a new VM memory.
119    pub fn new() -> Self {
120        Self {
121            stack: Vec::new(),
122            heap: Vec::new(),
123            hp: MEM_SIZE,
124        }
125    }
126
127    /// Resets memory to initial state, keeping the original allocations.
128    pub fn reset(&mut self) {
129        self.stack.truncate(0);
130        self.hp = MEM_SIZE;
131    }
132
133    /// Offset of the heap section
134    fn heap_offset(&self) -> usize {
135        MEM_SIZE.saturating_sub(self.heap.len())
136    }
137
138    /// Grows the stack to be at least `new_sp` bytes.
139    pub fn grow_stack(&mut self, new_sp: Word) -> Result<(), PanicReason> {
140        if new_sp > VM_MAX_RAM {
141            return Err(PanicReason::MemoryOverflow);
142        }
143        #[allow(clippy::cast_possible_truncation)] // Safety: VM_MAX_RAM is usize
144        let new_sp = new_sp as usize;
145
146        if new_sp > self.stack.len() {
147            if new_sp > self.hp {
148                return Err(PanicReason::MemoryGrowthOverlap)
149            }
150
151            self.stack.resize(new_sp, 0);
152        }
153        Ok(())
154    }
155
156    /// Grows the heap by `amount` bytes. Updates hp register.
157    pub fn grow_heap_by(
158        &mut self,
159        sp_reg: Reg<SP>,
160        mut hp_reg: RegMut<HP>,
161        amount: Word,
162    ) -> Result<(), PanicReason> {
163        debug_assert_eq!(
164            self.hp as Word, *hp_reg,
165            "HP register changed without memory update"
166        );
167
168        let amount = usize::try_from(amount).map_err(|_| PanicReason::MemoryOverflow)?;
169        let new_hp = self
170            .hp
171            .checked_sub(amount)
172            .ok_or(PanicReason::MemoryOverflow)?;
173
174        if (new_hp as Word) < *sp_reg {
175            return Err(PanicReason::MemoryGrowthOverlap)
176        }
177
178        #[allow(clippy::arithmetic_side_effects)] // Safety: self.hp is in heap
179        let new_len = MEM_SIZE - new_hp;
180
181        #[allow(clippy::arithmetic_side_effects)] // Safety: self.hp is in heap
182        if self.heap.len() >= new_len {
183            // No need to reallocate, but we need to zero the new space
184            // in case it was used before a memory reset.
185            let start = new_hp - self.heap_offset();
186            let end = self.hp - self.heap_offset();
187            self.heap[start..end].fill(0);
188        } else {
189            // Reallocation is needed.
190
191            // Need to clear dirty memory before expanding it. An example:
192            // Heap vector: [dirty, dirty, dirty, 0, 0, 0]
193            //                                   /|\
194            //                                    |
195            //                                   HP
196            //
197            // If we copy from [0, old_len), it means we copy the dirty memory as well.
198            // Ending up with:
199            // Heap vector: [0, 0, dirty, dirty, dirty, 0, 0, 0]
200            //              /|\
201            //               |
202            //              HP
203            //
204            // So, either we need to clear the memory before copying,
205            // or after we copied dirty parts.
206            // Clearing before looks like more readable solution.
207            let end = self.hp.saturating_sub(self.heap_offset());
208            self.heap[..end].fill(0);
209
210            // To reduce frequent reallocations, allocate at least 256 bytes at once.
211            // After that, double the allocation every time.
212            let cap = new_len.next_power_of_two().clamp(256, MEM_SIZE);
213            let old_len = self.heap.len();
214            let prefix_zeroes = cap - old_len;
215            self.heap.resize(cap, 0);
216            self.heap.copy_within(..old_len, prefix_zeroes);
217            self.heap[..prefix_zeroes].fill(0);
218        }
219
220        self.hp = new_hp;
221        *hp_reg = new_hp as Word;
222
223        // If heap enters region where stack has been, truncate the stack
224        self.stack.truncate(new_hp);
225
226        Ok(())
227    }
228
229    /// Verify that the memory range is accessble and return it as a range.
230    pub fn verify<A: ToAddr, B: ToAddr>(
231        &self,
232        addr: A,
233        count: B,
234    ) -> Result<MemoryRange, PanicReason> {
235        let start = addr.to_addr()?;
236        let len = count.to_addr()?;
237        let end = start.saturating_add(len);
238        if end > MEM_SIZE {
239            return Err(PanicReason::MemoryOverflow)
240        }
241
242        if end <= self.stack.len() || start >= self.hp {
243            Ok(MemoryRange(start..end))
244        } else {
245            Err(PanicReason::UninitalizedMemoryAccess)
246        }
247    }
248
249    /// Verify a constant-sized memory range.
250    pub fn verify_const<A: ToAddr, const C: usize>(
251        &self,
252        addr: A,
253    ) -> Result<MemoryRange, PanicReason> {
254        self.verify(addr, C)
255    }
256
257    /// Returns a reference to memory for reading, if possible.
258    #[allow(clippy::arithmetic_side_effects)] // Safety: subtractions are checked
259    pub fn read<A: ToAddr, C: ToAddr>(
260        &self,
261        addr: A,
262        count: C,
263    ) -> Result<&[u8], PanicReason> {
264        let range = self.verify(addr, count)?;
265
266        if range.end() <= self.stack.len() {
267            Ok(&self.stack[range.usizes()])
268        } else if range.start() >= self.heap_offset() {
269            let start = range.start() - self.heap_offset();
270            let end = range.end() - self.heap_offset();
271            Ok(&self.heap[start..end])
272        } else {
273            unreachable!("Range was verified to be valid")
274        }
275    }
276
277    /// Reads a constant-sized byte array from memory, if possible.
278    pub fn read_bytes<A: ToAddr, const C: usize>(
279        &self,
280        at: A,
281    ) -> Result<[u8; C], PanicReason> {
282        let mut result = [0; C];
283        result.copy_from_slice(self.read(at, C)?);
284        Ok(result)
285    }
286
287    /// Gets write access to memory, if possible.
288    /// Doesn't perform any ownership checks.
289    #[allow(clippy::arithmetic_side_effects)] // Safety: subtractions are checked
290    pub fn write_noownerchecks<A: ToAddr, B: ToAddr>(
291        &mut self,
292        addr: A,
293        len: B,
294    ) -> Result<&mut [u8], PanicReason> {
295        let range = self.verify(addr, len)?;
296        if range.end() <= self.stack.len() {
297            Ok(&mut self.stack[range.usizes()])
298        } else if range.start() >= self.heap_offset() {
299            let start = range.start() - self.heap_offset();
300            let end = range.end() - self.heap_offset();
301            Ok(&mut self.heap[start..end])
302        } else {
303            unreachable!("Range was verified to be valid")
304        }
305    }
306
307    /// Writes a constant-sized byte array to memory, if possible.
308    /// Doesn't perform any ownership checks.
309    pub fn write_bytes_noownerchecks<A: ToAddr, const C: usize>(
310        &mut self,
311        addr: A,
312        data: [u8; C],
313    ) -> Result<(), PanicReason> {
314        self.write_noownerchecks(addr, C)?.copy_from_slice(&data);
315        Ok(())
316    }
317
318    /// Checks that memory is writable and returns a mutable slice to it.
319    pub fn write<A: ToAddr, C: ToAddr>(
320        &mut self,
321        owner: OwnershipRegisters,
322        addr: A,
323        len: C,
324    ) -> Result<&mut [u8], PanicReason> {
325        let range = self.verify(addr, len)?;
326        owner.verify_ownership(&range)?;
327        self.write_noownerchecks(range.start(), range.len())
328    }
329
330    /// Writes a constant-sized byte array to memory, checking for ownership.
331    pub fn write_bytes<A: ToAddr, const C: usize>(
332        &mut self,
333        owner: OwnershipRegisters,
334        addr: A,
335        data: [u8; C],
336    ) -> Result<(), PanicReason> {
337        self.write(owner, addr, data.len())?.copy_from_slice(&data);
338        Ok(())
339    }
340
341    /// Copies the memory from `src` to `dst` verifying ownership.
342    #[inline]
343    #[track_caller]
344    pub fn memcopy(
345        &mut self,
346        dst: Word,
347        src: Word,
348        length: Word,
349        owner: OwnershipRegisters,
350    ) -> Result<(), PanicReason> {
351        let dst_range = self.verify(dst, length)?;
352        let src_range = self.verify(src, length)?;
353
354        if dst_range.start() <= src_range.start() && src_range.start() < dst_range.end()
355            || src_range.start() <= dst_range.start()
356                && dst_range.start() < src_range.end()
357            || dst_range.start() < src_range.end() && src_range.end() <= dst_range.end()
358            || src_range.start() < dst_range.end() && dst_range.end() <= src_range.end()
359        {
360            return Err(PanicReason::MemoryWriteOverlap)
361        }
362
363        owner.verify_ownership(&dst_range)?;
364
365        if src_range.end() <= self.stack.len() {
366            if dst_range.end() <= self.stack.len() {
367                self.stack
368                    .copy_within(src_range.usizes(), dst_range.start());
369            } else if dst_range.start() >= self.heap_offset() {
370                #[allow(clippy::arithmetic_side_effects)]
371                // Safety: subtractions are checked above
372                let dst_start = dst_range.start() - self.heap_offset();
373                #[allow(clippy::arithmetic_side_effects)]
374                // Safety: subtractions are checked above
375                let dst_end = dst_range.end() - self.heap_offset();
376
377                let src_array = &self.stack[src_range.usizes()];
378                let dst_array = &mut self.heap[dst_start..dst_end];
379                dst_array.copy_from_slice(src_array);
380            } else {
381                unreachable!("Range was verified to be valid")
382            }
383        } else if src_range.start() >= self.heap_offset() {
384            #[allow(clippy::arithmetic_side_effects)]
385            // Safety: subtractions are checked above
386            let src_start = src_range.start() - self.heap_offset();
387            #[allow(clippy::arithmetic_side_effects)]
388            // Safety: subtractions are checked above
389            let src_end = src_range.end() - self.heap_offset();
390
391            if dst_range.end() <= self.stack.len() {
392                let src_array = &self.heap[src_start..src_end];
393
394                let dst_array = &mut self.stack[dst_range.usizes()];
395                dst_array.copy_from_slice(src_array);
396            } else if dst_range.start() >= self.heap_offset() {
397                #[allow(clippy::arithmetic_side_effects)]
398                // Safety: subtractions are checked above
399                let dst_start = dst_range.start() - self.heap_offset();
400
401                self.heap.copy_within(src_start..src_end, dst_start);
402            } else {
403                unreachable!("Range was verified to be valid")
404            }
405        } else {
406            unreachable!("Range was verified to be valid")
407        }
408
409        Ok(())
410    }
411
412    /// Memory access to the raw stack buffer.
413    /// Note that for efficiency reasons this might not match sp value.
414    #[cfg(any(test, feature = "test-helpers"))]
415    pub fn stack_raw(&self) -> &[u8] {
416        &self.stack
417    }
418
419    /// Memory access to the raw heap buffer.
420    /// Note that for efficiency reasons this might not match hp value.
421    #[cfg(any(test, feature = "test-helpers"))]
422    pub fn heap_raw(&self) -> &[u8] {
423        &self.heap
424    }
425
426    /// Returns a `MemoryRollbackData` that can be used to achieve the state of the
427    /// `desired_memory_state` instance.
428    pub fn collect_rollback_data(
429        &self,
430        desired_memory_state: &MemoryInstance,
431    ) -> Option<MemoryRollbackData> {
432        if self == desired_memory_state {
433            return None
434        }
435
436        let sp = desired_memory_state.stack.len();
437        let hp = desired_memory_state.hp;
438
439        assert!(
440            hp >= self.hp,
441            "We only allow shrinking of the heap during rollback"
442        );
443
444        let stack_changes =
445            get_changes(&self.stack[..sp], &desired_memory_state.stack[..sp], 0);
446
447        let heap_start = hp
448            .checked_sub(self.heap_offset())
449            .expect("Memory is invalid, hp is out of bounds");
450        let heap = &self.heap[heap_start..];
451        let desired_heap_start = hp
452            .checked_sub(desired_memory_state.heap_offset())
453            .expect("Memory is invalid, hp is out of bounds");
454        let desired_heap = &desired_memory_state.heap[desired_heap_start..];
455
456        let heap_changes = get_changes(heap, desired_heap, hp);
457
458        Some(MemoryRollbackData {
459            sp,
460            hp,
461            stack_changes,
462            heap_changes,
463        })
464    }
465
466    /// Rollbacks the memory changes returning the memory to the old state.
467    pub fn rollback(&mut self, data: &MemoryRollbackData) {
468        self.stack.resize(data.sp, 0);
469        assert!(
470            data.hp >= self.hp,
471            "We only allow shrinking of the heap during rollback"
472        );
473        self.hp = data.hp;
474
475        for change in &data.stack_changes {
476            self.stack[change.global_start
477                ..change.global_start.saturating_add(change.data.len())]
478                .copy_from_slice(&change.data);
479        }
480
481        let offset = self.heap_offset();
482        for change in &data.heap_changes {
483            let local_start = change
484                .global_start
485                .checked_sub(offset)
486                .expect("Invalid offset");
487            self.heap[local_start..local_start.saturating_add(change.data.len())]
488                .copy_from_slice(&change.data);
489        }
490    }
491}
492
493fn get_changes(
494    latest_array: &[u8],
495    desired_array: &[u8],
496    offset: usize,
497) -> Vec<MemorySliceChange> {
498    let mut changes = Vec::new();
499    let mut range = None;
500    for (i, (old, new)) in latest_array.iter().zip(desired_array.iter()).enumerate() {
501        if old != new {
502            range = match range {
503                None => Some((i, 1usize)),
504                Some((start, count)) => Some((start, count.saturating_add(1))),
505            };
506        } else if let Some((start, count)) = range.take() {
507            changes.push(MemorySliceChange {
508                global_start: offset.saturating_add(start),
509                data: desired_array[start..start.saturating_add(count)].to_vec(),
510            });
511        }
512    }
513    if let Some((start, count)) = range.take() {
514        changes.push(MemorySliceChange {
515            global_start: offset.saturating_add(start),
516            data: desired_array[start..start.saturating_add(count)].to_vec(),
517        });
518    }
519    changes
520}
521
522#[derive(Debug, Clone)]
523struct MemorySliceChange {
524    global_start: usize,
525    data: Vec<u8>,
526}
527
528/// The container for the data used to rollback memory changes.
529#[derive(Debug, Clone)]
530pub struct MemoryRollbackData {
531    /// Desired stack pointer.
532    sp: usize,
533    /// Desired heap pointer. Desired heap pointer can't be less than the current one.
534    hp: usize,
535    /// Changes to the stack to achieve the desired state of the stack.
536    stack_changes: Vec<MemorySliceChange>,
537    /// Changes to the heap to achieve the desired state of the heap.
538    heap_changes: Vec<MemorySliceChange>,
539}
540
541#[cfg(feature = "test-helpers")]
542impl From<Vec<u8>> for MemoryInstance {
543    fn from(stack: Vec<u8>) -> Self {
544        Self {
545            stack,
546            ..Self::new()
547        }
548    }
549}
550
551#[cfg(any(test, feature = "test-helpers"))]
552impl Index<Range<usize>> for MemoryInstance {
553    type Output = [u8];
554
555    fn index(&self, index: Range<usize>) -> &Self::Output {
556        self.read(index.start, index.len())
557            .expect("Memory range out of bounds")
558    }
559}
560
561#[cfg(any(test, feature = "test-helpers"))]
562impl Index<RangeFrom<usize>> for MemoryInstance {
563    type Output = [u8];
564
565    fn index(&self, index: RangeFrom<usize>) -> &Self::Output {
566        &self[index.start..MEM_SIZE]
567    }
568}
569
570#[cfg(any(test, feature = "test-helpers"))]
571impl Index<RangeTo<usize>> for MemoryInstance {
572    type Output = [u8];
573
574    fn index(&self, index: RangeTo<usize>) -> &Self::Output {
575        &self[0..index.end]
576    }
577}
578
579#[cfg(any(test, feature = "test-helpers"))]
580impl IndexMut<Range<usize>> for MemoryInstance {
581    fn index_mut(&mut self, index: Range<usize>) -> &mut Self::Output {
582        self.write_noownerchecks(index.start, index.len())
583            .expect("Memory range out of bounds")
584    }
585}
586
587/// Used to handle `Word` to `usize` conversions for memory addresses,
588/// as well as checking that the resulting value is withing the VM ram boundaries.
589pub trait ToAddr {
590    /// Converts a value to `usize` used for memory addresses.
591    /// Returns `Err` with `MemoryOverflow` if the resulting value does't fit in the VM
592    /// memory. This can be used for both addresses and offsets.
593    fn to_addr(self) -> Result<usize, PanicReason>;
594}
595
596impl ToAddr for usize {
597    fn to_addr(self) -> Result<usize, PanicReason> {
598        if self > MEM_SIZE {
599            return Err(PanicReason::MemoryOverflow)
600        }
601        Ok(self)
602    }
603}
604
605impl ToAddr for Word {
606    fn to_addr(self) -> Result<usize, PanicReason> {
607        let value = usize::try_from(self).map_err(|_| PanicReason::MemoryOverflow)?;
608        value.to_addr()
609    }
610}
611
612#[cfg(feature = "test-helpers")]
613/// Implemented for `i32` to allow integer literals. Panics on negative values.
614impl ToAddr for i32 {
615    fn to_addr(self) -> Result<usize, PanicReason> {
616        if self < 0 {
617            panic!("Negative memory address");
618        }
619        let value = usize::try_from(self).map_err(|_| PanicReason::MemoryOverflow)?;
620        value.to_addr()
621    }
622}
623
624/// A range of memory. No guarantees are made about validity of access.
625#[derive(Debug, Clone, PartialEq, Eq, Hash)]
626#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
627pub struct MemoryRange(Range<usize>);
628
629impl MemoryRange {
630    /// Create a new memory range. Cannot panic, but the range may be invalid.
631    pub const fn new(start: usize, len: usize) -> Self {
632        Self(start..start.saturating_add(len))
633    }
634
635    /// Start of the range.
636    pub fn start(&self) -> usize {
637        self.0.start
638    }
639
640    /// End of the range. One past the last byte.
641    pub fn end(&self) -> usize {
642        self.0.end
643    }
644
645    /// Is the range empty?
646    pub fn is_empty(&self) -> bool {
647        self.len() == 0
648    }
649
650    /// Length of the range.
651    pub fn len(&self) -> usize {
652        self.0.len()
653    }
654
655    /// Returns the range as a `usize` range.
656    pub fn usizes(&self) -> Range<usize> {
657        self.0.clone()
658    }
659
660    /// Returns the range as a `Word` range.
661    pub fn words(&self) -> Range<Word> {
662        self.0.start as Word..self.0.end as Word
663    }
664
665    /// Splits range at given relative offset. Panics if offset > range length.
666    pub fn split_at_offset(self, at: usize) -> (Self, Self) {
667        let mid = self.0.start.saturating_add(at);
668        assert!(mid <= self.0.end);
669        (Self(self.0.start..mid), Self(mid..self.0.end))
670    }
671}
672
673impl<M, S, Tx, Ecal, V> Interpreter<M, S, Tx, Ecal, V>
674where
675    M: Memory,
676{
677    /// Return the registers used to determine ownership.
678    pub(crate) fn ownership_registers(&self) -> OwnershipRegisters {
679        OwnershipRegisters::new(self)
680    }
681
682    pub(crate) fn stack_pointer_overflow<F>(&mut self, f: F, v: Word) -> SimpleResult<()>
683    where
684        F: FnOnce(Word, Word) -> (Word, bool),
685    {
686        let (
687            SystemRegisters {
688                sp, ssp, hp, pc, ..
689            },
690            _,
691        ) = split_registers(&mut self.registers);
692        stack_pointer_overflow(
693            sp,
694            ssp.as_ref(),
695            hp.as_ref(),
696            pc,
697            f,
698            v,
699            self.memory.as_mut(),
700        )
701    }
702
703    pub(crate) fn push_selected_registers(
704        &mut self,
705        segment: ProgramRegistersSegment,
706        bitmask: Imm24,
707    ) -> SimpleResult<()> {
708        let (
709            SystemRegisters {
710                sp, ssp, hp, pc, ..
711            },
712            program_regs,
713        ) = split_registers(&mut self.registers);
714        push_selected_registers(
715            self.memory.as_mut(),
716            sp,
717            ssp.as_ref(),
718            hp.as_ref(),
719            pc,
720            &program_regs,
721            segment,
722            bitmask,
723        )
724    }
725
726    pub(crate) fn pop_selected_registers(
727        &mut self,
728        segment: ProgramRegistersSegment,
729        bitmask: Imm24,
730    ) -> SimpleResult<()> {
731        let (
732            SystemRegisters {
733                sp, ssp, hp, pc, ..
734            },
735            mut program_regs,
736        ) = split_registers(&mut self.registers);
737        pop_selected_registers(
738            self.memory.as_mut(),
739            sp,
740            ssp.as_ref(),
741            hp.as_ref(),
742            pc,
743            &mut program_regs,
744            segment,
745            bitmask,
746        )
747    }
748
749    pub(crate) fn load_byte(&mut self, ra: RegId, b: Word, c: Word) -> SimpleResult<()> {
750        let (SystemRegisters { pc, .. }, mut w) = split_registers(&mut self.registers);
751        let result = &mut w[WriteRegKey::try_from(ra)?];
752        load_byte(self.memory.as_ref(), pc, result, b, c)
753    }
754
755    pub(crate) fn load_word(&mut self, ra: RegId, b: Word, c: Imm12) -> SimpleResult<()> {
756        let (SystemRegisters { pc, .. }, mut w) = split_registers(&mut self.registers);
757        let result = &mut w[WriteRegKey::try_from(ra)?];
758        load_word(self.memory.as_ref(), pc, result, b, c)
759    }
760
761    pub(crate) fn store_byte(&mut self, a: Word, b: Word, c: Word) -> SimpleResult<()> {
762        let owner = self.ownership_registers();
763        store_byte(
764            self.memory.as_mut(),
765            owner,
766            self.registers.pc_mut(),
767            a,
768            b,
769            c,
770        )
771    }
772
773    pub(crate) fn store_word(&mut self, a: Word, b: Word, c: Imm12) -> SimpleResult<()> {
774        let owner = self.ownership_registers();
775        store_word(
776            self.memory.as_mut(),
777            owner,
778            self.registers.pc_mut(),
779            a,
780            b,
781            c,
782        )
783    }
784
785    /// Expand heap by `amount` bytes.
786    pub fn allocate(&mut self, amount: Word) -> SimpleResult<()> {
787        let (SystemRegisters { hp, sp, .. }, _) = split_registers(&mut self.registers);
788        self.memory.as_mut().grow_heap_by(sp.as_ref(), hp, amount)?;
789        Ok(())
790    }
791
792    pub(crate) fn malloc(&mut self, a: Word) -> SimpleResult<()> {
793        let (SystemRegisters { hp, sp, pc, .. }, _) =
794            split_registers(&mut self.registers);
795        malloc(hp, sp.as_ref(), pc, a, self.memory.as_mut())
796    }
797
798    pub(crate) fn memclear(&mut self, a: Word, b: Word) -> SimpleResult<()> {
799        let owner = self.ownership_registers();
800        memclear(self.memory.as_mut(), owner, self.registers.pc_mut(), a, b)
801    }
802
803    pub(crate) fn memcopy(&mut self, a: Word, b: Word, c: Word) -> SimpleResult<()> {
804        let owner = self.ownership_registers();
805        memcopy(
806            self.memory.as_mut(),
807            owner,
808            self.registers.pc_mut(),
809            a,
810            b,
811            c,
812        )
813    }
814
815    pub(crate) fn memeq(
816        &mut self,
817        ra: RegId,
818        b: Word,
819        c: Word,
820        d: Word,
821    ) -> SimpleResult<()> {
822        let (SystemRegisters { pc, .. }, mut w) = split_registers(&mut self.registers);
823        let result = &mut w[WriteRegKey::try_from(ra)?];
824        memeq(self.memory.as_mut(), result, pc, b, c, d)
825    }
826}
827
828/// Update stack pointer, checking for validity first.
829pub(crate) fn try_update_stack_pointer(
830    mut sp: RegMut<SP>,
831    ssp: Reg<SSP>,
832    hp: Reg<HP>,
833    new_sp: Word,
834    memory: &mut MemoryInstance,
835) -> SimpleResult<()> {
836    if new_sp < *ssp {
837        Err(PanicReason::MemoryOverflow.into())
838    } else if new_sp > *hp {
839        Err(PanicReason::MemoryGrowthOverlap.into())
840    } else {
841        *sp = new_sp;
842        memory.grow_stack(new_sp)?;
843        Ok(())
844    }
845}
846
847pub(crate) fn stack_pointer_overflow<F>(
848    sp: RegMut<SP>,
849    ssp: Reg<SSP>,
850    hp: Reg<HP>,
851    pc: RegMut<PC>,
852    f: F,
853    v: Word,
854    memory: &mut MemoryInstance,
855) -> SimpleResult<()>
856where
857    F: FnOnce(Word, Word) -> (Word, bool),
858{
859    let (new_sp, overflow) = f(*sp, v);
860
861    if overflow {
862        return Err(PanicReason::MemoryOverflow.into())
863    }
864
865    try_update_stack_pointer(sp, ssp, hp, new_sp, memory)?;
866    Ok(inc_pc(pc)?)
867}
868
869#[allow(clippy::too_many_arguments)]
870pub(crate) fn push_selected_registers(
871    memory: &mut MemoryInstance,
872    sp: RegMut<SP>,
873    ssp: Reg<SSP>,
874    hp: Reg<HP>,
875    pc: RegMut<PC>,
876    program_regs: &ProgramRegisters,
877    segment: ProgramRegistersSegment,
878    bitmask: Imm24,
879) -> SimpleResult<()> {
880    let bitmask = bitmask.to_u32();
881
882    // First update the new stack pointer, as that's the only error condition
883    let count: u64 = bitmask.count_ones().into();
884    let write_size = count
885        .checked_mul(WORD_SIZE as u64)
886        .expect("Bitmask size times 8 can never oveflow");
887    let write_at = *sp;
888    // If this would overflow, the stack pointer update below will fail
889    let new_sp = write_at.saturating_add(write_size);
890    try_update_stack_pointer(sp, ssp, hp, new_sp, memory)?;
891
892    // Write the registers to the stack
893    let mut it = memory
894        .write_noownerchecks(write_at, write_size)?
895        .chunks_exact_mut(WORD_SIZE);
896    for (i, reg) in program_regs.segment(segment).iter().enumerate() {
897        if (bitmask & (1 << i)) != 0 {
898            let item = it
899                .next()
900                .expect("Memory range mismatched with register count");
901            item.copy_from_slice(&reg.to_be_bytes());
902        }
903    }
904
905    Ok(inc_pc(pc)?)
906}
907
908#[allow(clippy::too_many_arguments)]
909pub(crate) fn pop_selected_registers(
910    memory: &mut MemoryInstance,
911    sp: RegMut<SP>,
912    ssp: Reg<SSP>,
913    hp: Reg<HP>,
914    pc: RegMut<PC>,
915    program_regs: &mut ProgramRegisters,
916    segment: ProgramRegistersSegment,
917    bitmask: Imm24,
918) -> SimpleResult<()> {
919    let bitmask = bitmask.to_u32();
920
921    // First update the stack pointer, as that's the only error condition
922    let count: u64 = bitmask.count_ones().into();
923    let size_in_stack = count
924        .checked_mul(WORD_SIZE as u64)
925        .expect("Bitmask size times 8 can never oveflow");
926    let new_sp = sp
927        .checked_sub(size_in_stack)
928        .ok_or(PanicReason::MemoryOverflow)?;
929    try_update_stack_pointer(sp, ssp, hp, new_sp, memory)?;
930
931    // Restore registers from the stack
932    let mut it = memory.read(new_sp, size_in_stack)?.chunks_exact(WORD_SIZE);
933    for (i, reg) in program_regs.segment_mut(segment).iter_mut().enumerate() {
934        if (bitmask & (1 << i)) != 0 {
935            let mut buf = [0u8; WORD_SIZE];
936            buf.copy_from_slice(it.next().expect("Count mismatch"));
937            *reg = Word::from_be_bytes(buf);
938        }
939    }
940
941    Ok(inc_pc(pc)?)
942}
943
944pub(crate) fn load_byte(
945    memory: &MemoryInstance,
946    pc: RegMut<PC>,
947    result: &mut Word,
948    b: Word,
949    c: Word,
950) -> SimpleResult<()> {
951    let [b] = memory.read_bytes(b.saturating_add(c))?;
952    *result = b as Word;
953    Ok(inc_pc(pc)?)
954}
955
956pub(crate) fn load_word(
957    memory: &MemoryInstance,
958    pc: RegMut<PC>,
959    result: &mut Word,
960    b: Word,
961    c: Imm12,
962) -> SimpleResult<()> {
963    let offset = u64::from(c)
964        .checked_mul(WORD_SIZE as u64)
965        .expect("u12 * 8 cannot overflow a Word");
966    let addr = b.checked_add(offset).ok_or(PanicReason::MemoryOverflow)?;
967    *result = Word::from_be_bytes(memory.read_bytes(addr)?);
968    Ok(inc_pc(pc)?)
969}
970
971#[allow(clippy::cast_possible_truncation)]
972pub(crate) fn store_byte(
973    memory: &mut MemoryInstance,
974    owner: OwnershipRegisters,
975    pc: RegMut<PC>,
976    a: Word,
977    b: Word,
978    c: Word,
979) -> SimpleResult<()> {
980    memory.write_bytes(owner, a.saturating_add(c), [b as u8])?;
981    Ok(inc_pc(pc)?)
982}
983
984pub(crate) fn store_word(
985    memory: &mut MemoryInstance,
986    owner: OwnershipRegisters,
987    pc: RegMut<PC>,
988    a: Word,
989    b: Word,
990    c: Imm12,
991) -> SimpleResult<()> {
992    #[allow(clippy::arithmetic_side_effects)]
993    let offset = u64::from(c)
994        .checked_mul(WORD_SIZE as u64)
995        .expect("12-bits number multiplied by 8 cannot overflow a Word");
996    let addr = a.saturating_add(offset);
997    memory.write_bytes(owner, addr, b.to_be_bytes())?;
998    Ok(inc_pc(pc)?)
999}
1000
1001pub(crate) fn malloc(
1002    hp: RegMut<HP>,
1003    sp: Reg<SP>,
1004    pc: RegMut<PC>,
1005    amount: Word,
1006    memory: &mut MemoryInstance,
1007) -> SimpleResult<()> {
1008    memory.grow_heap_by(sp, hp, amount)?;
1009    Ok(inc_pc(pc)?)
1010}
1011
1012pub(crate) fn memclear(
1013    memory: &mut MemoryInstance,
1014    owner: OwnershipRegisters,
1015    pc: RegMut<PC>,
1016    a: Word,
1017    b: Word,
1018) -> SimpleResult<()> {
1019    memory.write(owner, a, b)?.fill(0);
1020    Ok(inc_pc(pc)?)
1021}
1022
1023pub(crate) fn memcopy(
1024    memory: &mut MemoryInstance,
1025    owner: OwnershipRegisters,
1026    pc: RegMut<PC>,
1027    dst: Word,
1028    src: Word,
1029    length: Word,
1030) -> SimpleResult<()> {
1031    memory.memcopy(dst, src, length, owner)?;
1032
1033    Ok(inc_pc(pc)?)
1034}
1035
1036pub(crate) fn memeq(
1037    memory: &mut MemoryInstance,
1038    result: &mut Word,
1039    pc: RegMut<PC>,
1040    b: Word,
1041    c: Word,
1042    d: Word,
1043) -> SimpleResult<()> {
1044    *result = (memory.read(b, d)? == memory.read(c, d)?) as Word;
1045    Ok(inc_pc(pc)?)
1046}
1047
1048#[derive(Debug, Clone, Copy)]
1049pub struct OwnershipRegisters {
1050    pub(crate) sp: u64,
1051    pub(crate) ssp: u64,
1052    pub(crate) hp: u64,
1053    /// Previous heap pointer, used for external contexts.
1054    /// Otherwise, it's just memory size.
1055    pub(crate) prev_hp: u64,
1056}
1057
1058impl OwnershipRegisters {
1059    pub(crate) fn new<M, S, Tx, Ecal, V>(vm: &Interpreter<M, S, Tx, Ecal, V>) -> Self {
1060        let prev_hp = vm
1061            .frames
1062            .last()
1063            .map(|frame| frame.registers()[RegId::HP])
1064            .unwrap_or(VM_MAX_RAM);
1065
1066        OwnershipRegisters {
1067            sp: vm.registers[RegId::SP],
1068            ssp: vm.registers[RegId::SSP],
1069            hp: vm.registers[RegId::HP],
1070            prev_hp,
1071        }
1072    }
1073
1074    /// Create an instance that only allows stack writes.
1075    pub(crate) fn only_allow_stack_write(sp: u64, ssp: u64, hp: u64) -> Self {
1076        debug_assert!(sp <= VM_MAX_RAM);
1077        debug_assert!(ssp <= VM_MAX_RAM);
1078        debug_assert!(hp <= VM_MAX_RAM);
1079        debug_assert!(ssp <= sp);
1080        debug_assert!(sp <= hp);
1081        OwnershipRegisters {
1082            sp,
1083            ssp,
1084            hp,
1085            prev_hp: hp,
1086        }
1087    }
1088
1089    /// Allows all writes, whole memory is stack.allocated
1090    #[cfg(test)]
1091    pub(crate) fn test_full_stack() -> Self {
1092        OwnershipRegisters {
1093            sp: VM_MAX_RAM,
1094            ssp: 0,
1095            hp: VM_MAX_RAM,
1096            prev_hp: VM_MAX_RAM,
1097        }
1098    }
1099
1100    pub(crate) fn verify_ownership(
1101        &self,
1102        range: &MemoryRange,
1103    ) -> Result<(), PanicReason> {
1104        if self.has_ownership_range(&range.words()) {
1105            Ok(())
1106        } else {
1107            Err(PanicReason::MemoryOwnership)
1108        }
1109    }
1110
1111    pub fn has_ownership_range(&self, range: &Range<Word>) -> bool {
1112        self.has_ownership_stack(range) || self.has_ownership_heap(range)
1113    }
1114
1115    /// Empty range is owned iff the range.start is owned
1116    pub(crate) fn has_ownership_stack(&self, range: &Range<Word>) -> bool {
1117        if range.is_empty() && range.start == self.ssp {
1118            return true
1119        }
1120
1121        if !(self.ssp..self.sp).contains(&range.start) {
1122            return false
1123        }
1124
1125        if range.end > VM_MAX_RAM {
1126            return false
1127        }
1128
1129        (self.ssp..=self.sp).contains(&range.end)
1130    }
1131
1132    /// Empty range is owned iff the range.start is owned
1133    pub(crate) fn has_ownership_heap(&self, range: &Range<Word>) -> bool {
1134        if range.is_empty() && range.start == self.hp {
1135            return true
1136        }
1137
1138        if range.start < self.hp {
1139            return false
1140        }
1141
1142        self.hp != self.prev_hp && range.end <= self.prev_hp
1143    }
1144}
1145
1146/// Attempt copy from the storage to memory, filling zero bytes when exceeding slice
1147/// boundaries. Performs overflow and memory range checks, but no ownership checks.
1148/// Note that if `src_offset` is larger than `src.len()`, the whole range will be
1149/// zero-filled.
1150#[allow(clippy::too_many_arguments)]
1151pub(crate) fn copy_from_storage_zero_fill<M, S>(
1152    memory: &mut MemoryInstance,
1153    owner: OwnershipRegisters,
1154    storage: &S,
1155    dst_addr: Word,
1156    dst_len: Word,
1157    src_id: &M::Key,
1158    src_offset: u64,
1159    src_len: usize,
1160    not_found_error: PanicReason,
1161) -> IoResult<(), S::Error>
1162where
1163    M: Mappable,
1164    S: StorageRead<M>,
1165{
1166    let write_buffer = memory.write(owner, dst_addr, dst_len)?;
1167    let mut empty_offset = 0;
1168
1169    if src_offset < src_len as Word {
1170        let src_offset =
1171            u32::try_from(src_offset).map_err(|_| PanicReason::MemoryOverflow)?;
1172
1173        let src_read_length = src_len.saturating_sub(src_offset as usize);
1174        let src_read_length = src_read_length.min(write_buffer.len());
1175
1176        let (src_read_buffer, _) = write_buffer.split_at_mut(src_read_length);
1177        let found = storage
1178            .read(src_id, src_offset as usize, src_read_buffer)
1179            .map_err(RuntimeError::Storage)?;
1180        if !found {
1181            return Err(not_found_error.into());
1182        }
1183
1184        empty_offset = src_read_length;
1185    }
1186
1187    write_buffer[empty_offset..].fill(0);
1188
1189    Ok(())
1190}