casper_wasmi/memory/
mod.rs

1use crate::{
2    memory_units::{Bytes, Pages, RoundUpTo},
3    value::LittleEndianConvert,
4    Error,
5};
6use alloc::{rc::Rc, string::ToString, vec::Vec};
7use casper_wasm::elements::ResizableLimits;
8use core::{
9    cell::{Cell, Ref, RefCell, RefMut},
10    cmp,
11    fmt,
12    ops::Range,
13};
14
15#[cfg(all(feature = "virtual_memory", target_pointer_width = "64"))]
16#[path = "mmap_bytebuf.rs"]
17mod bytebuf;
18
19#[cfg(not(all(feature = "virtual_memory", target_pointer_width = "64")))]
20#[path = "vec_bytebuf.rs"]
21mod bytebuf;
22
23use self::bytebuf::ByteBuf;
24
25/// Size of a page of [linear memory][`MemoryInstance`] - 64KiB.
26///
27/// The size of a memory is always a integer multiple of a page size.
28///
29/// [`MemoryInstance`]: struct.MemoryInstance.html
30pub const LINEAR_MEMORY_PAGE_SIZE: Bytes = Bytes(65536);
31
32/// Reference to a memory (See [`MemoryInstance`] for details).
33///
34/// This reference has a reference-counting semantics.
35///
36/// [`MemoryInstance`]: struct.MemoryInstance.html
37///
38#[derive(Clone, Debug)]
39pub struct MemoryRef(Rc<MemoryInstance>);
40
41impl ::core::ops::Deref for MemoryRef {
42    type Target = MemoryInstance;
43    fn deref(&self) -> &MemoryInstance {
44        &self.0
45    }
46}
47
48/// Runtime representation of a linear memory (or `memory` for short).
49///
50/// A memory is a contiguous, mutable array of raw bytes. Wasm code can load and store values
51/// from/to a linear memory at any byte address.
52/// A trap occurs if an access is not within the bounds of the current memory size.
53///
54/// A memory is created with an initial size but can be grown dynamically.
55/// The growth can be limited by specifying maximum size.
56/// The size of a memory is always a integer multiple of a [page size][`LINEAR_MEMORY_PAGE_SIZE`] - 64KiB.
57///
58/// At the moment, wasm doesn't provide any way to shrink the memory.
59///
60/// [`LINEAR_MEMORY_PAGE_SIZE`]: constant.LINEAR_MEMORY_PAGE_SIZE.html
61pub struct MemoryInstance {
62    /// Memory limits.
63    limits: ResizableLimits,
64    /// Linear memory buffer with lazy allocation.
65    buffer: RefCell<ByteBuf>,
66    initial: Pages,
67    current_size: Cell<usize>,
68    maximum: Option<Pages>,
69}
70
71impl fmt::Debug for MemoryInstance {
72    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
73        f.debug_struct("MemoryInstance")
74            .field("limits", &self.limits)
75            .field("buffer.len", &self.buffer.borrow().len())
76            .field("maximum", &self.maximum)
77            .field("initial", &self.initial)
78            .finish()
79    }
80}
81
82struct CheckedRegion {
83    offset: usize,
84    size: usize,
85}
86
87impl CheckedRegion {
88    fn range(&self) -> Range<usize> {
89        self.offset..self.offset + self.size
90    }
91
92    fn intersects(&self, other: &Self) -> bool {
93        let low = cmp::max(self.offset, other.offset);
94        let high = cmp::min(self.offset + self.size, other.offset + other.size);
95
96        low < high
97    }
98}
99
100impl MemoryInstance {
101    /// Allocate a memory instance.
102    ///
103    /// The memory allocated with initial number of pages specified by `initial`.
104    /// Minimal possible value for `initial` is 0 and maximum possible is `65536`.
105    /// (Since maximum addressible memory is 2<sup>32</sup> = 4GiB = 65536 * [64KiB][`LINEAR_MEMORY_PAGE_SIZE`]).
106    ///
107    /// It is possible to limit maximum number of pages this memory instance can have by specifying
108    /// `maximum`. If not specified, this memory instance would be able to allocate up to 4GiB.
109    ///
110    /// Allocated memory is always zeroed.
111    ///
112    /// # Errors
113    ///
114    /// Returns `Err` if:
115    ///
116    /// - `initial` is greater than `maximum`
117    /// - either `initial` or `maximum` is greater than `65536`.
118    ///
119    /// [`LINEAR_MEMORY_PAGE_SIZE`]: constant.LINEAR_MEMORY_PAGE_SIZE.html
120    pub fn alloc(initial: Pages, maximum: Option<Pages>) -> Result<MemoryRef, Error> {
121        {
122            let initial_u32: u32 = initial.0.try_into().map_err(|_| {
123                Error::Memory(format!("initial ({}) can't be coerced to u32", initial.0))
124            })?;
125            let maximum_u32: Option<u32> = maximum
126                .map(|maximum_pages| {
127                    maximum_pages.0.try_into().map_err(|_| {
128                        Error::Memory(format!(
129                            "maximum ({}) can't be coerced to u32",
130                            maximum_pages.0
131                        ))
132                    })
133                })
134                .transpose()?;
135            validation::validate_memory(initial_u32, maximum_u32).map_err(Error::Memory)?;
136        }
137
138        let memory = MemoryInstance::new(initial, maximum)?;
139        Ok(MemoryRef(Rc::new(memory)))
140    }
141
142    /// Create new linear memory instance.
143    fn new(initial: Pages, maximum: Option<Pages>) -> Result<Self, Error> {
144        let limits = ResizableLimits::new(initial.0 as u32, maximum.map(|p| p.0 as u32));
145
146        let initial_size: Bytes = initial.into();
147        Ok(MemoryInstance {
148            limits,
149            buffer: RefCell::new(ByteBuf::new(initial_size.0).map_err(Error::Memory)?),
150            initial,
151            current_size: Cell::new(initial_size.0),
152            maximum,
153        })
154    }
155
156    /// Return linear memory limits.
157    pub(crate) fn limits(&self) -> &ResizableLimits {
158        &self.limits
159    }
160
161    /// Returns number of pages this `MemoryInstance` was created with.
162    pub fn initial(&self) -> Pages {
163        self.initial
164    }
165
166    /// Returns maximum amount of pages this `MemoryInstance` can grow to.
167    ///
168    /// Returns `None` if there is no limit set.
169    /// Maximum memory size cannot exceed `65536` pages or 4GiB.
170    pub fn maximum(&self) -> Option<Pages> {
171        self.maximum
172    }
173
174    /// Returns current linear memory size.
175    ///
176    /// Maximum memory size cannot exceed `65536` pages or 4GiB.
177    ///
178    /// # Example
179    ///
180    /// To convert number of pages to number of bytes you can use the following code:
181    ///
182    /// ```rust
183    /// use casper_wasmi::MemoryInstance;
184    /// use casper_wasmi::memory_units::*;
185    ///
186    /// let memory = MemoryInstance::alloc(Pages(1), None).unwrap();
187    /// let byte_size: Bytes = memory.current_size().into();
188    /// assert_eq!(
189    ///     byte_size,
190    ///     Bytes(65536),
191    /// );
192    /// ```
193    pub fn current_size(&self) -> Pages {
194        Bytes(self.buffer.borrow().len()).round_up_to()
195    }
196
197    /// Get value from memory at given offset.
198    pub fn get_value<T: LittleEndianConvert>(&self, offset: u32) -> Result<T, Error> {
199        let mut bytes = <<T as LittleEndianConvert>::Bytes as Default>::default();
200        self.get_into(offset, bytes.as_mut())?;
201        let value = T::from_le_bytes(bytes);
202        Ok(value)
203    }
204
205    /// Copy data from memory at given offset.
206    ///
207    /// This will allocate vector for you.
208    /// If you can provide a mutable slice you can use [`get_into`].
209    ///
210    /// [`get_into`]: #method.get_into
211    #[deprecated(since = "0.10.0", note = "use get_into or get_value method instead")]
212    pub fn get(&self, offset: u32, size: usize) -> Result<Vec<u8>, Error> {
213        let mut buffer = self.buffer.borrow_mut();
214        let region = self.checked_region(&mut buffer, offset as usize, size)?;
215
216        Ok(buffer.as_slice_mut()[region.range()].to_vec())
217    }
218
219    /// Copy data from given offset in the memory into `target` slice.
220    ///
221    /// # Errors
222    ///
223    /// Returns `Err` if the specified region is out of bounds.
224    pub fn get_into(&self, offset: u32, target: &mut [u8]) -> Result<(), Error> {
225        let mut buffer = self.buffer.borrow_mut();
226        let region = self.checked_region(&mut buffer, offset as usize, target.len())?;
227
228        target.copy_from_slice(&buffer.as_slice_mut()[region.range()]);
229
230        Ok(())
231    }
232
233    /// Copy data in the memory at given offset.
234    pub fn set(&self, offset: u32, value: &[u8]) -> Result<(), Error> {
235        let mut buffer = self.buffer.borrow_mut();
236        let range = self
237            .checked_region(&mut buffer, offset as usize, value.len())?
238            .range();
239
240        buffer.as_slice_mut()[range].copy_from_slice(value);
241
242        Ok(())
243    }
244
245    /// Copy value in the memory at given offset.
246    pub fn set_value<T: LittleEndianConvert>(&self, offset: u32, value: T) -> Result<(), Error> {
247        let bytes = T::into_le_bytes(value);
248        self.set(offset, bytes.as_ref())?;
249        Ok(())
250    }
251
252    /// Increases the size of the linear memory by given number of pages.
253    /// Returns previous memory size if succeeds.
254    ///
255    /// # Errors
256    ///
257    /// Returns `Err` if attempted to allocate more memory than permited by the limit.
258    pub fn grow(&self, additional: Pages) -> Result<Pages, Error> {
259        let size_before_grow: Pages = self.current_size();
260
261        if additional == Pages(0) {
262            return Ok(size_before_grow);
263        }
264        if additional > Pages(65536) {
265            return Err(Error::Memory(
266                "Trying to grow memory by more than 65536 pages".to_string(),
267            ));
268        }
269
270        let new_size: Pages = size_before_grow + additional;
271        let maximum = self
272            .maximum
273            .unwrap_or(Pages(validation::LINEAR_MEMORY_MAX_PAGES as usize));
274        if new_size > maximum {
275            return Err(Error::Memory(format!(
276                "Trying to grow memory by {} pages when already have {}",
277                additional.0, size_before_grow.0,
278            )));
279        }
280
281        let new_buffer_length: Bytes = new_size.into();
282        self.buffer
283            .borrow_mut()
284            .realloc(new_buffer_length.0)
285            .map_err(Error::Memory)?;
286
287        self.current_size.set(new_buffer_length.0);
288
289        Ok(size_before_grow)
290    }
291
292    fn checked_region(
293        &self,
294        buffer: &mut ByteBuf,
295        offset: usize,
296        size: usize,
297    ) -> Result<CheckedRegion, Error> {
298        let end = offset.checked_add(size).ok_or_else(|| {
299            Error::Memory(format!(
300                "trying to access memory block of size {} from offset {}",
301                size, offset
302            ))
303        })?;
304
305        if end > buffer.len() {
306            return Err(Error::Memory(format!(
307                "trying to access region [{}..{}] in memory [0..{}]",
308                offset,
309                end,
310                buffer.len()
311            )));
312        }
313
314        Ok(CheckedRegion { offset, size })
315    }
316
317    fn checked_region_pair(
318        &self,
319        buffer: &mut ByteBuf,
320        offset1: usize,
321        size1: usize,
322        offset2: usize,
323        size2: usize,
324    ) -> Result<(CheckedRegion, CheckedRegion), Error> {
325        let end1 = offset1.checked_add(size1).ok_or_else(|| {
326            Error::Memory(format!(
327                "trying to access memory block of size {} from offset {}",
328                size1, offset1
329            ))
330        })?;
331
332        let end2 = offset2.checked_add(size2).ok_or_else(|| {
333            Error::Memory(format!(
334                "trying to access memory block of size {} from offset {}",
335                size2, offset2
336            ))
337        })?;
338
339        if end1 > buffer.len() {
340            return Err(Error::Memory(format!(
341                "trying to access region [{}..{}] in memory [0..{}]",
342                offset1,
343                end1,
344                buffer.len()
345            )));
346        }
347
348        if end2 > buffer.len() {
349            return Err(Error::Memory(format!(
350                "trying to access region [{}..{}] in memory [0..{}]",
351                offset2,
352                end2,
353                buffer.len()
354            )));
355        }
356
357        Ok((
358            CheckedRegion {
359                offset: offset1,
360                size: size1,
361            },
362            CheckedRegion {
363                offset: offset2,
364                size: size2,
365            },
366        ))
367    }
368
369    /// Copy contents of one memory region to another.
370    ///
371    /// Semantically equivalent to `memmove`.
372    ///
373    /// # Errors
374    ///
375    /// Returns `Err` if either of specified regions is out of bounds.
376    pub fn copy(&self, src_offset: usize, dst_offset: usize, len: usize) -> Result<(), Error> {
377        let mut buffer = self.buffer.borrow_mut();
378
379        let (read_region, write_region) =
380            self.checked_region_pair(&mut buffer, src_offset, len, dst_offset, len)?;
381
382        unsafe {
383            ::core::ptr::copy(
384                buffer.as_slice()[read_region.range()].as_ptr(),
385                buffer.as_slice_mut()[write_region.range()].as_mut_ptr(),
386                len,
387            )
388        }
389
390        Ok(())
391    }
392
393    /// Copy contents of one memory region to another (non-overlapping version).
394    ///
395    /// Semantically equivalent to `memcpy`.
396    /// but returns Error if source overlaping with destination.
397    ///
398    /// # Errors
399    ///
400    /// Returns `Err` if:
401    ///
402    /// - either of specified regions is out of bounds,
403    /// - these regions overlaps.
404    pub fn copy_nonoverlapping(
405        &self,
406        src_offset: usize,
407        dst_offset: usize,
408        len: usize,
409    ) -> Result<(), Error> {
410        let mut buffer = self.buffer.borrow_mut();
411
412        let (read_region, write_region) =
413            self.checked_region_pair(&mut buffer, src_offset, len, dst_offset, len)?;
414
415        if read_region.intersects(&write_region) {
416            return Err(Error::Memory(
417                "non-overlapping copy is used for overlapping regions".to_string(),
418            ));
419        }
420
421        unsafe {
422            ::core::ptr::copy_nonoverlapping(
423                buffer.as_slice()[read_region.range()].as_ptr(),
424                buffer.as_slice_mut()[write_region.range()].as_mut_ptr(),
425                len,
426            )
427        }
428
429        Ok(())
430    }
431
432    /// Copy memory between two (possibly distinct) memory instances.
433    ///
434    /// If the same memory instance passed as `src` and `dst` then usual `copy` will be used.
435    pub fn transfer(
436        src: &MemoryRef,
437        src_offset: usize,
438        dst: &MemoryRef,
439        dst_offset: usize,
440        len: usize,
441    ) -> Result<(), Error> {
442        if Rc::ptr_eq(&src.0, &dst.0) {
443            // `transfer` is invoked with with same source and destination. Let's assume that regions may
444            // overlap and use `copy`.
445            return src.copy(src_offset, dst_offset, len);
446        }
447
448        // Because memory references point to different memory instances, it is safe to `borrow_mut`
449        // both buffers at once (modulo `with_direct_access_mut`).
450        let mut src_buffer = src.buffer.borrow_mut();
451        let mut dst_buffer = dst.buffer.borrow_mut();
452
453        let src_range = src
454            .checked_region(&mut src_buffer, src_offset, len)?
455            .range();
456        let dst_range = dst
457            .checked_region(&mut dst_buffer, dst_offset, len)?
458            .range();
459
460        dst_buffer.as_slice_mut()[dst_range].copy_from_slice(&src_buffer.as_slice()[src_range]);
461
462        Ok(())
463    }
464
465    /// Fill the memory region with the specified value.
466    ///
467    /// Semantically equivalent to `memset`.
468    ///
469    /// # Errors
470    ///
471    /// Returns `Err` if the specified region is out of bounds.
472    pub fn clear(&self, offset: usize, new_val: u8, len: usize) -> Result<(), Error> {
473        let mut buffer = self.buffer.borrow_mut();
474
475        let range = self.checked_region(&mut buffer, offset, len)?.range();
476
477        for val in &mut buffer.as_slice_mut()[range] {
478            *val = new_val
479        }
480        Ok(())
481    }
482
483    /// Fill the specified memory region with zeroes.
484    ///
485    /// # Errors
486    ///
487    /// Returns `Err` if the specified region is out of bounds.
488    pub fn zero(&self, offset: usize, len: usize) -> Result<(), Error> {
489        self.clear(offset, 0, len)
490    }
491
492    /// Set every byte in the entire linear memory to 0, preserving its size.
493    ///
494    /// Might be useful for some optimization shenanigans.
495    pub fn erase(&self) -> Result<(), Error> {
496        self.buffer.borrow_mut().erase().map_err(Error::Memory)
497    }
498
499    /// Provides direct access to the underlying memory buffer.
500    ///
501    /// # Panics
502    ///
503    /// Any call that requires write access to memory (such as [`set`], [`clear`], etc) made within
504    /// the closure will panic.
505    ///
506    /// [`set`]: #method.get
507    /// [`clear`]: #method.set
508    pub fn with_direct_access<R, F: FnOnce(&[u8]) -> R>(&self, f: F) -> R {
509        let buf = self.buffer.borrow();
510        f(buf.as_slice())
511    }
512
513    /// Provides direct mutable access to the underlying memory buffer.
514    ///
515    /// # Panics
516    ///
517    /// Any calls that requires either read or write access to memory (such as [`get`], [`set`], [`copy`], etc) made
518    /// within the closure will panic. Proceed with caution.
519    ///
520    /// [`get`]: #method.get
521    /// [`set`]: #method.set
522    /// [`copy`]: #method.copy
523    pub fn with_direct_access_mut<R, F: FnOnce(&mut [u8]) -> R>(&self, f: F) -> R {
524        let mut buf = self.buffer.borrow_mut();
525        f(buf.as_slice_mut())
526    }
527
528    /// Provides direct access to the underlying memory buffer.
529    ///
530    /// # Panics
531    ///
532    /// Any call that requires write access to memory (such as [`set`], [`clear`], etc) made while
533    /// the returned value is alive will panic.
534    ///
535    /// [`set`]: #method.get
536    /// [`clear`]: #method.set
537    pub fn direct_access(&self) -> impl AsRef<[u8]> + '_ {
538        struct Buffer<'a>(Ref<'a, ByteBuf>);
539        impl<'a> AsRef<[u8]> for Buffer<'a> {
540            fn as_ref(&self) -> &[u8] {
541                self.0.as_slice()
542            }
543        }
544
545        Buffer(self.buffer.borrow())
546    }
547
548    /// Provides direct mutable access to the underlying memory buffer.
549    ///
550    /// # Panics
551    ///
552    /// Any call that requires either read or write access to memory (such as [`get`], [`set`],
553    /// [`copy`], etc) made while the returned value is alive will panic. Proceed with caution.
554    ///
555    /// [`get`]: #method.get
556    /// [`set`]: #method.set
557    /// [`copy`]: #method.copy
558    pub fn direct_access_mut(&self) -> impl AsMut<[u8]> + '_ {
559        struct Buffer<'a>(RefMut<'a, ByteBuf>);
560        impl<'a> AsMut<[u8]> for Buffer<'a> {
561            fn as_mut(&mut self) -> &mut [u8] {
562                self.0.as_slice_mut()
563            }
564        }
565
566        Buffer(self.buffer.borrow_mut())
567    }
568}
569
570#[cfg(test)]
571mod tests {
572
573    use super::{MemoryInstance, MemoryRef, LINEAR_MEMORY_PAGE_SIZE};
574    use crate::{memory_units::Pages, Error};
575    use alloc::rc::Rc;
576
577    #[test]
578    fn alloc() {
579        let mut fixtures = vec![
580            (0, None, true),
581            (0, Some(0), true),
582            (1, None, true),
583            (1, Some(1), true),
584            (0, Some(1), true),
585            (1, Some(0), false),
586        ];
587
588        #[cfg(target_pointer_width = "64")]
589        fixtures.extend(&[
590            (65536, Some(65536), true),
591            (65536, Some(0), false),
592            (65536, None, true),
593        ]);
594
595        for (index, &(initial, maybe_max, expected_ok)) in fixtures.iter().enumerate() {
596            let initial: Pages = Pages(initial);
597            let maximum: Option<Pages> = maybe_max.map(Pages);
598            let result = MemoryInstance::alloc(initial, maximum);
599            if result.is_ok() != expected_ok {
600                panic!(
601                    "unexpected error at {}, initial={:?}, max={:?}, expected={}, result={:?}",
602                    index, initial, maybe_max, expected_ok, result,
603                );
604            }
605        }
606    }
607
608    #[test]
609    fn ensure_page_size() {
610        use crate::memory_units::ByteSize;
611        assert_eq!(LINEAR_MEMORY_PAGE_SIZE, Pages::BYTE_SIZE);
612    }
613
614    fn create_memory(initial_content: &[u8]) -> MemoryInstance {
615        let mem = MemoryInstance::new(Pages(1), Some(Pages(1))).unwrap();
616        mem.set(0, initial_content)
617            .expect("Successful initialize the memory");
618        mem
619    }
620
621    fn get_into_vec(mem: &MemoryInstance, offset: u32, size: usize) -> Vec<u8> {
622        let mut buffer = vec![0x00; size];
623        mem.get_into(offset, &mut buffer[..])
624            .unwrap_or_else(|error| {
625                panic!(
626                    "failed to retrieve data from linear memory at offset {} with size {}: {}",
627                    offset, size, error
628                )
629            });
630        buffer
631    }
632
633    #[test]
634    fn copy_overlaps_1() {
635        let mem = create_memory(&[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]);
636        mem.copy(0, 4, 6).expect("Successfully copy the elements");
637        let result = get_into_vec(&mem, 0, 10);
638        assert_eq!(result, &[0, 1, 2, 3, 0, 1, 2, 3, 4, 5]);
639    }
640
641    #[test]
642    fn copy_overlaps_2() {
643        let mem = create_memory(&[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]);
644        mem.copy(4, 0, 6).expect("Successfully copy the elements");
645        let result = get_into_vec(&mem, 0, 10);
646        assert_eq!(result, &[4, 5, 6, 7, 8, 9, 6, 7, 8, 9]);
647    }
648
649    #[test]
650    fn copy_nonoverlapping() {
651        let mem = create_memory(&[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]);
652        mem.copy_nonoverlapping(0, 10, 10)
653            .expect("Successfully copy the elements");
654        let result = get_into_vec(&mem, 10, 10);
655        assert_eq!(result, &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]);
656    }
657
658    #[test]
659    fn copy_nonoverlapping_overlaps_1() {
660        let mem = create_memory(&[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]);
661        let result = mem.copy_nonoverlapping(0, 4, 6);
662        match result {
663            Err(Error::Memory(_)) => {}
664            _ => panic!("Expected Error::Memory(_) result, but got {:?}", result),
665        }
666    }
667
668    #[test]
669    fn copy_nonoverlapping_overlaps_2() {
670        let mem = create_memory(&[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]);
671        let result = mem.copy_nonoverlapping(4, 0, 6);
672        match result {
673            Err(Error::Memory(_)) => {}
674            _ => panic!("Expected Error::Memory(_), but got {:?}", result),
675        }
676    }
677
678    #[test]
679    fn transfer_works() {
680        let src = MemoryRef(Rc::new(create_memory(&[0, 1, 2, 3, 4, 5, 6, 7, 8, 9])));
681        let dst = MemoryRef(Rc::new(create_memory(&[
682            10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
683        ])));
684
685        MemoryInstance::transfer(&src, 4, &dst, 0, 3).unwrap();
686
687        assert_eq!(get_into_vec(&src, 0, 10), &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]);
688        assert_eq!(
689            get_into_vec(&dst, 0, 10),
690            &[4, 5, 6, 13, 14, 15, 16, 17, 18, 19]
691        );
692    }
693
694    #[test]
695    fn transfer_still_works_with_same_memory() {
696        let src = MemoryRef(Rc::new(create_memory(&[0, 1, 2, 3, 4, 5, 6, 7, 8, 9])));
697
698        MemoryInstance::transfer(&src, 4, &src, 0, 3).unwrap();
699
700        assert_eq!(get_into_vec(&src, 0, 10), &[4, 5, 6, 3, 4, 5, 6, 7, 8, 9]);
701    }
702
703    #[test]
704    fn transfer_oob_with_same_memory_errors() {
705        let src = MemoryRef(Rc::new(create_memory(&[0, 1, 2, 3, 4, 5, 6, 7, 8, 9])));
706        assert!(MemoryInstance::transfer(&src, 65535, &src, 0, 3).is_err());
707
708        // Check that memories content left untouched
709        assert_eq!(get_into_vec(&src, 0, 10), &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]);
710    }
711
712    #[test]
713    fn transfer_oob_errors() {
714        let src = MemoryRef(Rc::new(create_memory(&[0, 1, 2, 3, 4, 5, 6, 7, 8, 9])));
715        let dst = MemoryRef(Rc::new(create_memory(&[
716            10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
717        ])));
718
719        assert!(MemoryInstance::transfer(&src, 65535, &dst, 0, 3).is_err());
720
721        // Check that memories content left untouched
722        assert_eq!(get_into_vec(&src, 0, 10), &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]);
723        assert_eq!(
724            get_into_vec(&dst, 0, 10),
725            &[10, 11, 12, 13, 14, 15, 16, 17, 18, 19]
726        );
727    }
728
729    #[test]
730    fn clear() {
731        let mem = create_memory(&[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]);
732        mem.clear(0, 0x4A, 10)
733            .expect("To successfully clear the memory");
734        let result = get_into_vec(&mem, 0, 10);
735        assert_eq!(result, &[0x4A; 10]);
736    }
737
738    #[test]
739    fn get_into() {
740        let mem = MemoryInstance::new(Pages(1), None).unwrap();
741        mem.set(6, &[13, 17, 129])
742            .expect("memory set should not fail");
743
744        let mut data = [0u8; 2];
745        mem.get_into(7, &mut data[..])
746            .expect("get_into should not fail");
747
748        assert_eq!(data, [17, 129]);
749    }
750
751    #[test]
752    fn zero_copy() {
753        let mem = MemoryInstance::alloc(Pages(1), None).unwrap();
754        mem.set(100, &[0]).expect("memory set should not fail");
755        mem.with_direct_access_mut(|buf| {
756            assert_eq!(
757                buf.len(),
758                65536,
759                "the buffer length is expected to be 1 page long"
760            );
761            buf[..10].copy_from_slice(&[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]);
762        });
763        mem.with_direct_access(|buf| {
764            assert_eq!(
765                buf.len(),
766                65536,
767                "the buffer length is expected to be 1 page long"
768            );
769            assert_eq!(&buf[..10], &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]);
770        });
771    }
772
773    #[should_panic]
774    #[test]
775    fn zero_copy_panics_on_nested_access() {
776        let mem = MemoryInstance::alloc(Pages(1), None).unwrap();
777        let mem_inner = mem.clone();
778        mem.with_direct_access(move |_| {
779            let _ = mem_inner.set(0, &[11, 12, 13]);
780        });
781    }
782}