ic_stable_structures/
memory_manager.rs

1//! A module for simulating multiple memories within a single memory.
2//!
3//! The typical way for a canister to have multiple stable structures is by dividing the memory into
4//! distinct ranges, dedicating each range to a stable structure. This approach has two problems:
5//!
6//! 1. The developer needs to put in advance an upper bound on the memory of each stable structure.
7//! 2. It wastes the canister's memory allocation. For example, if a canister creates two stable
8//!    structures A and B, and gives each one of them a 1GiB region of memory, then writing to B will
9//!    require growing > 1GiB of memory just to be able to write to it.
10//!
11//! The [`MemoryManager`] in this module solves both of these problems. It simulates having
12//! multiple memories, each being able to grow without bound. That way, a developer doesn't need to
13//! put an upper bound to how much stable structures can grow, and the canister's memory allocation
14//! becomes less wasteful.
15//!
16//! Example Usage:
17//!
18//! ```
19//! use ic_stable_structures::{DefaultMemoryImpl, Memory};
20//! use ic_stable_structures::memory_manager::{MemoryManager, MemoryId};
21//!
22//! let mem_mgr = MemoryManager::init(DefaultMemoryImpl::default());
23//!
24//! // Create different memories, each with a unique ID.
25//! let memory_0 = mem_mgr.get(MemoryId::new(0));
26//! let memory_1 = mem_mgr.get(MemoryId::new(1));
27//!
28//! // Each memory can be used independently.
29//! memory_0.grow(1);
30//! memory_0.write(0, &[1, 2, 3]);
31//!
32//! memory_1.grow(1);
33//! memory_1.write(0, &[4, 5, 6]);
34//!
35//! let mut bytes = vec![0; 3];
36//! memory_0.read(0, &mut bytes);
37//! assert_eq!(bytes, vec![1, 2, 3]);
38//!
39//! let mut bytes = vec![0; 3];
40//! memory_1.read(0, &mut bytes);
41//! assert_eq!(bytes, vec![4, 5, 6]);
42//! ```
43use crate::{
44    read_struct, read_to_vec,
45    types::{Address, Bytes},
46    write, write_struct, Memory, WASM_PAGE_SIZE,
47};
48use std::cell::{Cell, RefCell};
49use std::rc::Rc;
50
51const MAGIC: &[u8; 3] = b"MGR";
52const LAYOUT_VERSION: u8 = 1;
53
54// The maximum number of memories that can be created.
55const MAX_NUM_MEMORIES: u8 = 255;
56
57// The maximum number of buckets the memory manager can handle.
58// With a bucket size of 128 pages this can support up to 256GiB of memory.
59const MAX_NUM_BUCKETS: u64 = 32768;
60
61const BUCKET_SIZE_IN_PAGES: u64 = 128;
62
63// A value used internally to indicate that a bucket is unallocated.
64const UNALLOCATED_BUCKET_MARKER: u8 = MAX_NUM_MEMORIES;
65
66// The offset where buckets are in memory.
67const BUCKETS_OFFSET_IN_PAGES: u64 = 1;
68const BUCKETS_OFFSET_IN_BYTES: u64 = BUCKETS_OFFSET_IN_PAGES * WASM_PAGE_SIZE;
69
70// Reserved bytes in the header for future extensions.
71const HEADER_RESERVED_BYTES: usize = 32;
72
73/// A memory manager simulates multiple memories within a single memory.
74///
75/// The memory manager can return up to 255 unique instances of [`VirtualMemory`], and each can be
76/// used independently and can grow up to the bounds of the underlying memory.
77///
78/// By default, the memory manager divides the memory into "buckets" of 128 pages. Each
79/// [`VirtualMemory`] is internally represented as a list of buckets. Buckets of different memories
80/// can be interleaved, but the [`VirtualMemory`] interface gives the illusion of a continuous
81/// address space.
82///
83/// Because a [`VirtualMemory`] is a list of buckets, this implies that internally it grows one
84/// bucket at a time.
85///
86/// The first page of the memory is reserved for the memory manager's own state. The layout for
87/// this state is as follows:
88///
89/// # V1 layout
90///
91/// ```text
92/// -------------------------------------------------- <- Address 0
93/// Magic "MGR"                           ↕ 3 bytes
94/// --------------------------------------------------
95/// Layout version                        ↕ 1 byte
96/// --------------------------------------------------
97/// Number of allocated buckets           ↕ 2 bytes
98/// --------------------------------------------------
99/// Bucket size (in pages) = N            ↕ 2 bytes
100/// --------------------------------------------------
101/// Reserved space                        ↕ 32 bytes
102/// --------------------------------------------------
103/// Size of memory 0 (in pages)           ↕ 8 bytes
104/// --------------------------------------------------
105/// Size of memory 1 (in pages)           ↕ 8 bytes
106/// --------------------------------------------------
107/// ...
108/// --------------------------------------------------
109/// Size of memory 254 (in pages)         ↕ 8 bytes
110/// -------------------------------------------------- <- Bucket allocations
111/// Bucket 1                              ↕ 1 byte        (1 byte indicating which memory owns it)
112/// --------------------------------------------------
113/// Bucket 2                              ↕ 1 byte
114/// --------------------------------------------------
115/// ...
116/// --------------------------------------------------
117/// Bucket `MAX_NUM_BUCKETS`              ↕ 1 byte
118/// --------------------------------------------------
119/// Unallocated space                     ↕ 30'688 bytes
120/// -------------------------------------------------- <- Buckets (Page 1)
121/// Bucket 1                              ↕ N pages
122/// -------------------------------------------------- <- Page N + 1
123/// Bucket 2                              ↕ N pages
124/// --------------------------------------------------
125/// ...
126/// -------------------------------------------------- <- Page ((MAX_NUM_BUCKETS - 1) * N + 1)
127/// Bucket MAX_NUM_BUCKETS                ↕ N pages
128/// ```
129pub struct MemoryManager<M: Memory> {
130    inner: Rc<RefCell<MemoryManagerInner<M>>>,
131}
132
133impl<M: Memory> MemoryManager<M> {
134    /// Initializes a `MemoryManager` with the given memory.
135    pub fn init(memory: M) -> Self {
136        Self::init_with_bucket_size(memory, BUCKET_SIZE_IN_PAGES as u16)
137    }
138
139    /// Initializes a `MemoryManager` with the given memory and bucket size in pages.
140    pub fn init_with_bucket_size(memory: M, bucket_size_in_pages: u16) -> Self {
141        Self {
142            inner: Rc::new(RefCell::new(MemoryManagerInner::init(
143                memory,
144                bucket_size_in_pages,
145            ))),
146        }
147    }
148
149    /// Returns the memory associated with the given ID.
150    pub fn get(&self, id: MemoryId) -> VirtualMemory<M> {
151        VirtualMemory {
152            id,
153            memory_manager: self.inner.clone(),
154            cache: BucketCache::new(),
155        }
156    }
157
158    /// Returns the underlying memory.
159    ///
160    /// # Returns
161    /// - The underlying memory, if there is exactly one strong reference to the memory manager.  Please see [`Rc::try_unwrap`](https://doc.rust-lang.org/std/rc/struct.Rc.html#method.try_unwrap) for more details.
162    /// - None otherwise.
163    pub fn into_memory(self) -> Option<M> {
164        Rc::into_inner(self.inner).map(|inner| inner.into_inner().into_memory())
165    }
166}
167
168#[repr(C, packed)]
169struct Header {
170    magic: [u8; 3],
171
172    version: u8,
173
174    /// The number of buckets allocated by the memory manager.
175    num_allocated_buckets: u16,
176
177    /// The size of a bucket in Wasm pages.
178    bucket_size_in_pages: u16,
179
180    /// Reserved bytes for future extensions
181    _reserved: [u8; HEADER_RESERVED_BYTES],
182
183    /// The size of each individual memory that can be created by the memory manager.
184    memory_sizes_in_pages: [u64; MAX_NUM_MEMORIES as usize],
185}
186
187impl Header {
188    fn size() -> Bytes {
189        Bytes::new(core::mem::size_of::<Self>() as u64)
190    }
191}
192
193#[derive(Clone)]
194pub struct VirtualMemory<M: Memory> {
195    id: MemoryId,
196    memory_manager: Rc<RefCell<MemoryManagerInner<M>>>,
197    cache: BucketCache,
198}
199
200impl<M: Memory> Memory for VirtualMemory<M> {
201    fn size(&self) -> u64 {
202        self.memory_manager.borrow().memory_size(self.id)
203    }
204
205    fn grow(&self, pages: u64) -> i64 {
206        self.memory_manager.borrow_mut().grow(self.id, pages)
207    }
208
209    fn read(&self, offset: u64, dst: &mut [u8]) {
210        self.memory_manager
211            .borrow()
212            .read(self.id, offset, dst, &self.cache)
213    }
214
215    unsafe fn read_unsafe(&self, offset: u64, dst: *mut u8, count: usize) {
216        self.memory_manager
217            .borrow()
218            .read_unsafe(self.id, offset, dst, count, &self.cache)
219    }
220
221    fn write(&self, offset: u64, src: &[u8]) {
222        self.memory_manager
223            .borrow()
224            .write(self.id, offset, src, &self.cache)
225    }
226}
227
228#[derive(Clone)]
229struct MemoryManagerInner<M: Memory> {
230    memory: M,
231
232    /// The number of buckets that have been allocated.
233    allocated_buckets: u16,
234
235    bucket_size_in_pages: u16,
236
237    /// An array storing the size (in pages) of each of the managed memories.
238    memory_sizes_in_pages: [u64; MAX_NUM_MEMORIES as usize],
239
240    /// A map mapping each managed memory to the bucket ids that are allocated to it.
241    memory_buckets: Vec<Vec<BucketId>>,
242}
243
244impl<M: Memory> MemoryManagerInner<M> {
245    fn init(memory: M, bucket_size_in_pages: u16) -> Self {
246        if memory.size() == 0 {
247            // Memory is empty. Create a new map.
248            return Self::new(memory, bucket_size_in_pages);
249        }
250
251        // Check if the magic in the memory corresponds to this object.
252        let mut dst = [0; 3];
253        memory.read(0, &mut dst);
254        if &dst != MAGIC {
255            // No memory manager found. Create a new instance.
256            MemoryManagerInner::new(memory, bucket_size_in_pages)
257        } else {
258            // The memory already contains a memory manager. Load it.
259            MemoryManagerInner::load(memory)
260        }
261    }
262
263    fn new(memory: M, bucket_size_in_pages: u16) -> Self {
264        let mem_mgr = Self {
265            memory,
266            allocated_buckets: 0,
267            memory_sizes_in_pages: [0; MAX_NUM_MEMORIES as usize],
268            memory_buckets: vec![vec![]; MAX_NUM_MEMORIES as usize],
269            bucket_size_in_pages,
270        };
271
272        mem_mgr.save_header();
273
274        // Mark all the buckets as unallocated.
275        write(
276            &mem_mgr.memory,
277            bucket_allocations_address(BucketId(0)).get(),
278            &[UNALLOCATED_BUCKET_MARKER; MAX_NUM_BUCKETS as usize],
279        );
280
281        mem_mgr
282    }
283
284    fn load(memory: M) -> Self {
285        // Read the header from memory.
286        let header: Header = read_struct(Address::from(0), &memory);
287        assert_eq!(&header.magic, MAGIC, "Bad magic.");
288        assert_eq!(header.version, LAYOUT_VERSION, "Unsupported version.");
289
290        let mut buckets = vec![];
291        read_to_vec(
292            &memory,
293            bucket_allocations_address(BucketId(0)),
294            &mut buckets,
295            MAX_NUM_BUCKETS as usize,
296        );
297
298        let mut memory_buckets = vec![vec![]; MAX_NUM_MEMORIES as usize];
299        for (bucket_idx, memory) in buckets.into_iter().enumerate() {
300            if memory != UNALLOCATED_BUCKET_MARKER {
301                memory_buckets[memory as usize].push(BucketId(bucket_idx as u16));
302            }
303        }
304
305        Self {
306            memory,
307            allocated_buckets: header.num_allocated_buckets,
308            bucket_size_in_pages: header.bucket_size_in_pages,
309            memory_sizes_in_pages: header.memory_sizes_in_pages,
310            memory_buckets,
311        }
312    }
313
314    fn save_header(&self) {
315        let header = Header {
316            magic: *MAGIC,
317            version: LAYOUT_VERSION,
318            num_allocated_buckets: self.allocated_buckets,
319            bucket_size_in_pages: self.bucket_size_in_pages,
320            _reserved: [0; HEADER_RESERVED_BYTES],
321            memory_sizes_in_pages: self.memory_sizes_in_pages,
322        };
323
324        write_struct(&header, Address::from(0), &self.memory);
325    }
326
327    /// Returns the size of a memory (in pages).
328    fn memory_size(&self, id: MemoryId) -> u64 {
329        self.memory_sizes_in_pages[id.0 as usize]
330    }
331
332    /// Grows the memory with the given id by the given number of pages.
333    fn grow(&mut self, id: MemoryId, pages: u64) -> i64 {
334        // Compute how many additional buckets are needed.
335        let old_size = self.memory_size(id);
336        let new_size = old_size + pages;
337        let current_buckets = self.num_buckets_needed(old_size);
338        let required_buckets = self.num_buckets_needed(new_size);
339        let new_buckets_needed = required_buckets - current_buckets;
340
341        if new_buckets_needed + self.allocated_buckets as u64 > MAX_NUM_BUCKETS {
342            // Exceeded the memory that can be managed.
343            return -1;
344        }
345
346        let memory_bucket = &mut self.memory_buckets[id.0 as usize];
347        // Allocate new buckets as needed.
348        memory_bucket.reserve(new_buckets_needed as usize);
349        for _ in 0..new_buckets_needed {
350            let new_bucket_id = BucketId(self.allocated_buckets);
351            memory_bucket.push(new_bucket_id);
352
353            // Write in stable store that this bucket belongs to the memory with the provided `id`.
354            write(
355                &self.memory,
356                bucket_allocations_address(new_bucket_id).get(),
357                &[id.0],
358            );
359
360            self.allocated_buckets += 1;
361        }
362
363        // Grow the underlying memory if necessary.
364        let pages_needed = BUCKETS_OFFSET_IN_PAGES
365            + self.bucket_size_in_pages as u64 * self.allocated_buckets as u64;
366        if pages_needed > self.memory.size() {
367            let additional_pages_needed = pages_needed - self.memory.size();
368            let prev_pages = self.memory.grow(additional_pages_needed);
369            if prev_pages == -1 {
370                panic!("{id:?}: grow failed");
371            }
372        }
373
374        // Update the memory with the new size.
375        self.memory_sizes_in_pages[id.0 as usize] = new_size;
376
377        // Update the header and return the old size.
378        self.save_header();
379        old_size as i64
380    }
381
382    fn write(&self, id: MemoryId, offset: u64, src: &[u8], bucket_cache: &BucketCache) {
383        if let Some(real_address) = bucket_cache.get(VirtualSegment {
384            address: offset.into(),
385            length: src.len().into(),
386        }) {
387            self.memory.write(real_address.get(), src);
388            return;
389        }
390
391        if (offset + src.len() as u64) > self.memory_size(id) * WASM_PAGE_SIZE {
392            panic!("{id:?}: write out of bounds");
393        }
394
395        let mut bytes_written = 0;
396        self.for_each_bucket(
397            id,
398            VirtualSegment {
399                address: offset.into(),
400                length: src.len().into(),
401            },
402            bucket_cache,
403            |RealSegment { address, length }| {
404                self.memory.write(
405                    address.get(),
406                    &src[bytes_written as usize..(bytes_written + length.get()) as usize],
407                );
408
409                bytes_written += length.get();
410            },
411        );
412    }
413
414    #[inline]
415    fn read(&self, id: MemoryId, offset: u64, dst: &mut [u8], bucket_cache: &BucketCache) {
416        // SAFETY: this is trivially safe because dst has dst.len() space.
417        unsafe { self.read_unsafe(id, offset, dst.as_mut_ptr(), dst.len(), bucket_cache) }
418    }
419
420    /// # Safety
421    ///
422    /// Callers must guarantee that
423    ///   * it is valid to write `count` number of bytes starting from `dst`,
424    ///   * `dst..dst + count` does not overlap with `self`.
425    unsafe fn read_unsafe(
426        &self,
427        id: MemoryId,
428        offset: u64,
429        dst: *mut u8,
430        count: usize,
431        bucket_cache: &BucketCache,
432    ) {
433        // First try to find the virtual segment in the cache.
434        if let Some(real_address) = bucket_cache.get(VirtualSegment {
435            address: offset.into(),
436            length: count.into(),
437        }) {
438            self.memory.read_unsafe(real_address.get(), dst, count);
439            return;
440        }
441
442        if (offset + count as u64) > self.memory_size(id) * WASM_PAGE_SIZE {
443            panic!("{id:?}: read out of bounds");
444        }
445
446        let mut bytes_read = Bytes::new(0);
447        self.for_each_bucket(
448            id,
449            VirtualSegment {
450                address: offset.into(),
451                length: count.into(),
452            },
453            bucket_cache,
454            |RealSegment { address, length }| {
455                self.memory.read_unsafe(
456                    address.get(),
457                    // The cast to usize is safe because `bytes_read` and `length` are bounded by
458                    // usize `count`.
459                    dst.add(bytes_read.get() as usize),
460                    length.get() as usize,
461                );
462
463                bytes_read += length;
464            },
465        )
466    }
467
468    /// Maps a segment of virtual memory to segments of real memory.
469    ///
470    /// `func` is invoked with real memory segments of real memory that `virtual_segment` is mapped
471    /// to.
472    ///
473    /// A segment in virtual memory can map to multiple segments of real memory. Here's an example:
474    ///
475    /// Virtual Memory
476    /// ```text
477    /// --------------------------------------------------------
478    ///          (A) ---------- SEGMENT ---------- (B)
479    /// --------------------------------------------------------
480    /// ↑               ↑               ↑               ↑
481    /// Bucket 0        Bucket 1        Bucket 2        Bucket 3
482    /// ```
483    ///
484    /// The [`VirtualMemory`] is internally divided into fixed-size buckets. In the memory's virtual
485    /// address space, all these buckets are consecutive, but in real memory this may not be the case.
486    ///
487    /// A virtual segment would first be split at the bucket boundaries. The example virtual segment
488    /// above would be split into the following segments:
489    ///
490    ///    (A, end of bucket 0)
491    ///    (start of bucket 1, end of bucket 1)
492    ///    (start of bucket 2, B)
493    ///
494    /// Each of the segments above can then be translated into the real address space by looking up
495    /// the underlying buckets' addresses in real memory.
496    fn for_each_bucket(
497        &self,
498        MemoryId(id): MemoryId,
499        virtual_segment: VirtualSegment,
500        bucket_cache: &BucketCache,
501        mut func: impl FnMut(RealSegment),
502    ) {
503        // Get the buckets allocated to the given memory id.
504        let buckets = self.memory_buckets[id as usize].as_slice();
505        let bucket_size_in_bytes = self.bucket_size_in_bytes().get();
506
507        let virtual_offset = virtual_segment.address.get();
508
509        let mut length = virtual_segment.length.get();
510        let mut bucket_idx = (virtual_offset / bucket_size_in_bytes) as usize;
511        // The start offset where we start reading from in a bucket. In the first iteration the
512        // value is calculated from `virtual_offset`, in later iterations, it's always 0.
513        let mut start_offset_in_bucket = virtual_offset % bucket_size_in_bytes;
514
515        while length > 0 {
516            let bucket_address =
517                self.bucket_address(buckets.get(bucket_idx).expect("bucket idx out of bounds"));
518
519            let bucket_start = bucket_idx as u64 * bucket_size_in_bytes;
520            let segment_len = (bucket_size_in_bytes - start_offset_in_bucket).min(length);
521
522            // Cache this bucket.
523            bucket_cache.store(
524                VirtualSegment {
525                    address: bucket_start.into(),
526                    length: self.bucket_size_in_bytes(),
527                },
528                bucket_address,
529            );
530
531            func(RealSegment {
532                address: bucket_address + start_offset_in_bucket.into(),
533                length: segment_len.into(),
534            });
535
536            length -= segment_len;
537            bucket_idx += 1;
538            start_offset_in_bucket = 0;
539        }
540    }
541
542    fn bucket_size_in_bytes(&self) -> Bytes {
543        Bytes::from(self.bucket_size_in_pages as u64 * WASM_PAGE_SIZE)
544    }
545
546    /// Returns the number of buckets needed to accommodate the given number of pages.
547    fn num_buckets_needed(&self, num_pages: u64) -> u64 {
548        // Ceiling division.
549        num_pages.div_ceil(self.bucket_size_in_pages as u64)
550    }
551
552    /// Returns the underlying memory.
553    pub fn into_memory(self) -> M {
554        self.memory
555    }
556
557    #[inline]
558    fn bucket_address(&self, id: &BucketId) -> Address {
559        Address::from(BUCKETS_OFFSET_IN_BYTES) + self.bucket_size_in_bytes() * Bytes::from(id.0)
560    }
561}
562
563#[derive(Copy, Clone)]
564struct VirtualSegment {
565    address: Address,
566    length: Bytes,
567}
568
569impl VirtualSegment {
570    fn contains_segment(&self, other: &VirtualSegment) -> bool {
571        self.address <= other.address && other.address + other.length <= self.address + self.length
572    }
573}
574
575struct RealSegment {
576    address: Address,
577    length: Bytes,
578}
579
580#[derive(Clone, Copy, Ord, Eq, PartialEq, PartialOrd, Debug)]
581pub struct MemoryId(u8);
582
583impl MemoryId {
584    pub const fn new(id: u8) -> Self {
585        // Any ID can be used except the special value that's used internally to
586        // mark a bucket as unallocated.
587        assert!(id != UNALLOCATED_BUCKET_MARKER);
588
589        Self(id)
590    }
591}
592
593// Referring to a bucket.
594#[derive(Clone, Copy, Debug, PartialEq)]
595struct BucketId(u16);
596
597fn bucket_allocations_address(id: BucketId) -> Address {
598    Address::from(0) + Header::size() + Bytes::from(id.0)
599}
600
601/// Cache which stores the last touched bucket and the corresponding real address.
602///
603/// If a segment from this bucket is accessed, we can return the real address faster.
604#[derive(Clone)]
605struct BucketCache {
606    bucket: Cell<VirtualSegment>,
607    /// The real address that corresponds to bucket.address
608    real_address: Cell<Address>,
609}
610
611impl BucketCache {
612    #[inline]
613    fn new() -> Self {
614        BucketCache {
615            bucket: Cell::new(VirtualSegment {
616                address: Address::from(0),
617                length: Bytes::new(0),
618            }),
619            real_address: Cell::new(Address::from(0)),
620        }
621    }
622}
623
624impl BucketCache {
625    /// Returns the real address corresponding to `virtual_segment.address` if `virtual_segment`
626    /// is fully contained within the cached bucket, otherwise `None`.
627    #[inline]
628    fn get(&self, virtual_segment: VirtualSegment) -> Option<Address> {
629        let cached_bucket = self.bucket.get();
630
631        cached_bucket
632            .contains_segment(&virtual_segment)
633            .then(|| self.real_address.get() + (virtual_segment.address - cached_bucket.address))
634    }
635
636    /// Stores the mapping of a bucket to a real address.
637    #[inline]
638    fn store(&self, bucket: VirtualSegment, real_address: Address) {
639        self.bucket.set(bucket);
640        self.real_address.set(real_address);
641    }
642}
643
644#[cfg(test)]
645mod test {
646    use super::*;
647    use crate::safe_write;
648    use proptest::prelude::*;
649
650    const MAX_MEMORY_IN_PAGES: u64 = MAX_NUM_BUCKETS * BUCKET_SIZE_IN_PAGES;
651
652    fn make_memory() -> Rc<RefCell<Vec<u8>>> {
653        Rc::new(RefCell::new(Vec::new()))
654    }
655
656    #[test]
657    fn can_get_memory() {
658        let mem_mgr = MemoryManager::init(make_memory());
659        let memory = mem_mgr.get(MemoryId(0));
660        assert_eq!(memory.size(), 0);
661    }
662
663    #[test]
664    fn can_allocate_and_use_memory() {
665        let mem_mgr = MemoryManager::init(make_memory());
666        let memory = mem_mgr.get(MemoryId(0));
667        assert_eq!(memory.grow(1), 0);
668        assert_eq!(memory.size(), 1);
669
670        memory.write(0, &[1, 2, 3]);
671
672        let mut bytes = vec![0; 3];
673        memory.read(0, &mut bytes);
674        assert_eq!(bytes, vec![1, 2, 3]);
675
676        assert_eq!(mem_mgr.inner.borrow().memory_buckets[0], vec![BucketId(0)]);
677
678        assert!(mem_mgr.inner.borrow().memory_buckets[1..]
679            .iter()
680            .all(|x| x.is_empty()));
681    }
682
683    #[test]
684    fn can_allocate_and_use_multiple_memories() {
685        let mem = make_memory();
686        let mem_mgr = MemoryManager::init(mem.clone());
687        let memory_0 = mem_mgr.get(MemoryId(0));
688        let memory_1 = mem_mgr.get(MemoryId(1));
689
690        assert_eq!(memory_0.grow(1), 0);
691        assert_eq!(memory_1.grow(1), 0);
692
693        assert_eq!(memory_0.size(), 1);
694        assert_eq!(memory_1.size(), 1);
695
696        assert_eq!(
697            &mem_mgr.inner.borrow().memory_buckets[..2],
698            &[vec![BucketId(0)], vec![BucketId(1)],]
699        );
700
701        assert!(mem_mgr.inner.borrow().memory_buckets[2..]
702            .iter()
703            .all(|x| x.is_empty()));
704
705        memory_0.write(0, &[1, 2, 3]);
706        memory_0.write(0, &[1, 2, 3]);
707        memory_1.write(0, &[4, 5, 6]);
708
709        let mut bytes = vec![0; 3];
710        memory_0.read(0, &mut bytes);
711        assert_eq!(bytes, vec![1, 2, 3]);
712
713        let mut bytes = vec![0; 3];
714        memory_1.read(0, &mut bytes);
715        assert_eq!(bytes, vec![4, 5, 6]);
716
717        // + 1 is for the header.
718        assert_eq!(mem.size(), 2 * BUCKET_SIZE_IN_PAGES + 1);
719    }
720
721    #[test]
722    fn can_be_reinitialized_from_memory() {
723        let mem = make_memory();
724        let mem_mgr = MemoryManager::init(mem.clone());
725        let memory_0 = mem_mgr.get(MemoryId(0));
726        let memory_1 = mem_mgr.get(MemoryId(1));
727
728        assert_eq!(memory_0.grow(1), 0);
729        assert_eq!(memory_1.grow(1), 0);
730
731        memory_0.write(0, &[1, 2, 3]);
732        memory_1.write(0, &[4, 5, 6]);
733
734        let mem_mgr = MemoryManager::init(mem);
735        let memory_0 = mem_mgr.get(MemoryId(0));
736        let memory_1 = mem_mgr.get(MemoryId(1));
737
738        let mut bytes = vec![0; 3];
739        memory_0.read(0, &mut bytes);
740        assert_eq!(bytes, vec![1, 2, 3]);
741
742        memory_1.read(0, &mut bytes);
743        assert_eq!(bytes, vec![4, 5, 6]);
744    }
745
746    #[test]
747    fn growing_same_memory_multiple_times_doesnt_increase_underlying_allocation() {
748        let mem = make_memory();
749        let mem_mgr = MemoryManager::init(mem.clone());
750        let memory_0 = mem_mgr.get(MemoryId(0));
751
752        // Grow the memory by 1 page. This should increase the underlying allocation
753        // by `BUCKET_SIZE_IN_PAGES` pages.
754        assert_eq!(memory_0.grow(1), 0);
755        assert_eq!(mem.size(), 1 + BUCKET_SIZE_IN_PAGES);
756
757        // Grow the memory again. This should NOT increase the underlying allocation.
758        assert_eq!(memory_0.grow(1), 1);
759        assert_eq!(memory_0.size(), 2);
760        assert_eq!(mem.size(), 1 + BUCKET_SIZE_IN_PAGES);
761
762        // Grow the memory up to the BUCKET_SIZE_IN_PAGES. This should NOT increase the underlying
763        // allocation.
764        assert_eq!(memory_0.grow(BUCKET_SIZE_IN_PAGES - 2), 2);
765        assert_eq!(memory_0.size(), BUCKET_SIZE_IN_PAGES);
766        assert_eq!(mem.size(), 1 + BUCKET_SIZE_IN_PAGES);
767
768        // Grow the memory by one more page. This should increase the underlying allocation.
769        assert_eq!(memory_0.grow(1), BUCKET_SIZE_IN_PAGES as i64);
770        assert_eq!(memory_0.size(), BUCKET_SIZE_IN_PAGES + 1);
771        assert_eq!(mem.size(), 1 + 2 * BUCKET_SIZE_IN_PAGES);
772    }
773
774    #[test]
775    fn does_not_grow_memory_unnecessarily() {
776        let mem = make_memory();
777        let initial_size = BUCKET_SIZE_IN_PAGES * 2;
778
779        // Grow the memory manually before passing it into the memory manager.
780        mem.grow(initial_size);
781
782        let mem_mgr = MemoryManager::init(mem.clone());
783        let memory_0 = mem_mgr.get(MemoryId(0));
784
785        // Grow the memory by 1 page.
786        assert_eq!(memory_0.grow(1), 0);
787        assert_eq!(mem.size(), initial_size);
788
789        // Grow the memory by BUCKET_SIZE_IN_PAGES more pages, which will cause the underlying
790        // allocation to increase.
791        assert_eq!(memory_0.grow(BUCKET_SIZE_IN_PAGES), 1);
792        assert_eq!(mem.size(), 1 + BUCKET_SIZE_IN_PAGES * 2);
793    }
794
795    #[test]
796    fn growing_beyond_capacity_fails() {
797        let mem = make_memory();
798        let mem_mgr = MemoryManager::init(mem);
799        let memory_0 = mem_mgr.get(MemoryId(0));
800
801        assert_eq!(memory_0.grow(MAX_MEMORY_IN_PAGES + 1), -1);
802
803        // Try to grow the memory by MAX_MEMORY_IN_PAGES + 1.
804        assert_eq!(memory_0.grow(1), 0); // should succeed
805        assert_eq!(memory_0.grow(MAX_MEMORY_IN_PAGES), -1); // should fail.
806    }
807
808    #[test]
809    fn can_write_across_bucket_boundaries() {
810        let mem = make_memory();
811        let mem_mgr = MemoryManager::init(mem);
812        let memory_0 = mem_mgr.get(MemoryId(0));
813
814        assert_eq!(memory_0.grow(BUCKET_SIZE_IN_PAGES + 1), 0);
815
816        memory_0.write(
817            mem_mgr.inner.borrow().bucket_size_in_bytes().get() - 1,
818            &[1, 2, 3],
819        );
820
821        let mut bytes = vec![0; 3];
822        memory_0.read(
823            mem_mgr.inner.borrow().bucket_size_in_bytes().get() - 1,
824            &mut bytes,
825        );
826        assert_eq!(bytes, vec![1, 2, 3]);
827    }
828
829    #[test]
830    fn can_write_across_bucket_boundaries_with_interleaving_memories() {
831        let mem = make_memory();
832        let mem_mgr = MemoryManager::init(mem);
833        let memory_0 = mem_mgr.get(MemoryId(0));
834        let memory_1 = mem_mgr.get(MemoryId(1));
835
836        assert_eq!(memory_0.grow(BUCKET_SIZE_IN_PAGES), 0);
837        assert_eq!(memory_1.grow(1), 0);
838        assert_eq!(memory_0.grow(1), BUCKET_SIZE_IN_PAGES as i64);
839
840        memory_0.write(
841            mem_mgr.inner.borrow().bucket_size_in_bytes().get() - 1,
842            &[1, 2, 3],
843        );
844        memory_1.write(0, &[4, 5, 6]);
845
846        let mut bytes = vec![0; 3];
847        memory_0.read(WASM_PAGE_SIZE * BUCKET_SIZE_IN_PAGES - 1, &mut bytes);
848        assert_eq!(bytes, vec![1, 2, 3]);
849
850        let mut bytes = vec![0; 3];
851        memory_1.read(0, &mut bytes);
852        assert_eq!(bytes, vec![4, 5, 6]);
853    }
854
855    #[test]
856    #[should_panic]
857    fn reading_out_of_bounds_should_panic() {
858        let mem = make_memory();
859        let mem_mgr = MemoryManager::init(mem);
860        let memory_0 = mem_mgr.get(MemoryId(0));
861        let memory_1 = mem_mgr.get(MemoryId(1));
862
863        assert_eq!(memory_0.grow(1), 0);
864        assert_eq!(memory_1.grow(1), 0);
865
866        let mut bytes = vec![0; WASM_PAGE_SIZE as usize + 1];
867        memory_0.read(0, &mut bytes);
868    }
869
870    #[test]
871    #[should_panic]
872    fn writing_out_of_bounds_should_panic() {
873        let mem = make_memory();
874        let mem_mgr = MemoryManager::init(mem);
875        let memory_0 = mem_mgr.get(MemoryId(0));
876        let memory_1 = mem_mgr.get(MemoryId(1));
877
878        assert_eq!(memory_0.grow(1), 0);
879        assert_eq!(memory_1.grow(1), 0);
880
881        let bytes = vec![0; WASM_PAGE_SIZE as usize + 1];
882        memory_0.write(0, &bytes);
883    }
884
885    #[test]
886    fn reading_zero_bytes_from_empty_memory_should_not_panic() {
887        let mem = make_memory();
888        let mem_mgr = MemoryManager::init(mem);
889        let memory_0 = mem_mgr.get(MemoryId(0));
890
891        assert_eq!(memory_0.size(), 0);
892        let mut bytes = vec![];
893        memory_0.read(0, &mut bytes);
894    }
895
896    #[test]
897    fn writing_zero_bytes_to_empty_memory_should_not_panic() {
898        let mem = make_memory();
899        let mem_mgr = MemoryManager::init(mem);
900        let memory_0 = mem_mgr.get(MemoryId(0));
901
902        assert_eq!(memory_0.size(), 0);
903        memory_0.write(0, &[]);
904    }
905
906    #[test]
907    fn write_and_read_random_bytes() {
908        let mem = make_memory();
909        let mem_mgr = MemoryManager::init_with_bucket_size(mem, 1); // very small bucket size.
910
911        let memories: Vec<_> = (0..MAX_NUM_MEMORIES)
912            .map(|id| mem_mgr.get(MemoryId(id)))
913            .collect();
914
915        proptest!(|(
916            num_memories in 0..255usize,
917            data in proptest::collection::vec(0..u8::MAX, 0..2*WASM_PAGE_SIZE as usize),
918            offset in 0..10*WASM_PAGE_SIZE
919        )| {
920            for memory in memories.iter().take(num_memories) {
921                // Write a random blob into the memory, growing the memory as it needs to.
922                write(memory, offset, &data);
923
924                {
925                    // Verify the blob can be read back using read.
926                    let mut bytes = vec![0; data.len()];
927                    memory.read(offset, &mut bytes);
928                    assert_eq!(bytes, data);
929                }
930
931                {
932                    // Verify the blob can be read back using read_to_vec.
933                    let mut bytes = vec![];
934                    read_to_vec(memory, offset.into(), &mut bytes, data.len());
935                    assert_eq!(bytes, data);
936                }
937
938                {
939                    // Verify the blob can be read back using read_unsafe.
940                    let mut bytes = vec![0; data.len()];
941                    unsafe {
942                        memory.read_unsafe(offset, bytes.as_mut_ptr(), data.len());
943                    }
944                    assert_eq!(bytes, data);
945                }
946            }
947        });
948    }
949
950    #[test]
951    fn init_with_non_default_bucket_size() {
952        // Choose a bucket size that's different from the default bucket size.
953        let bucket_size = 256;
954        assert_ne!(bucket_size, BUCKET_SIZE_IN_PAGES as u16);
955
956        // Initialize the memory manager.
957        let mem = make_memory();
958        let mem_mgr = MemoryManager::init_with_bucket_size(mem.clone(), bucket_size);
959
960        // Do some writes.
961        let memory_0 = mem_mgr.get(MemoryId(0));
962        let memory_1 = mem_mgr.get(MemoryId(1));
963        memory_0.grow(300);
964        memory_1.grow(100);
965        memory_0.write(0, &[1; 1000]);
966        memory_1.write(0, &[2; 1000]);
967
968        // Reinitializes the memory manager using the `init` method, without specifying
969        // the bucket size.
970        let mem_mgr = MemoryManager::init(mem);
971
972        // Assert the bucket size is correct.
973        assert_eq!(mem_mgr.inner.borrow().bucket_size_in_pages, bucket_size);
974
975        // Assert that the data written is correct.
976        let memory_0 = mem_mgr.get(MemoryId(0));
977        let memory_1 = mem_mgr.get(MemoryId(1));
978
979        assert_eq!(memory_0.size(), 300);
980        assert_eq!(memory_1.size(), 100);
981
982        let mut buf = vec![0; 1000];
983        memory_0.read(0, &mut buf);
984        assert_eq!(buf, vec![1; 1000]);
985
986        memory_1.read(0, &mut buf);
987        assert_eq!(buf, vec![2; 1000]);
988    }
989
990    // Make a few reads and writes and compare the results against golden files that should stay
991    // stable between commits.
992    // The operations were chosen so that they exercise most of the implementation.
993    #[test]
994    fn stability() {
995        let mem = make_memory();
996        let mem_mgr = MemoryManager::init_with_bucket_size(mem.clone(), 1); // very small bucket size.
997
998        let data: Vec<_> = (0u8..=255u8)
999            .cycle()
1000            .take((WASM_PAGE_SIZE * 2 + 901) as usize)
1001            .collect();
1002
1003        const MEMORIES: u8 = 3;
1004        let mut memory_id = 0;
1005        let mut next_memory = || {
1006            memory_id += 1;
1007            memory_id %= MEMORIES;
1008            mem_mgr.get(MemoryId(memory_id))
1009        };
1010
1011        // There are 5 operations
1012        for _ in 0..MEMORIES * 5 {
1013            safe_write(&next_memory(), 0, data.as_slice()).unwrap();
1014            safe_write(&next_memory(), WASM_PAGE_SIZE / 2, data.as_slice()).unwrap();
1015            safe_write(&next_memory(), WASM_PAGE_SIZE - 1, data.as_slice()).unwrap();
1016            safe_write(&next_memory(), WASM_PAGE_SIZE + 1, data.as_slice()).unwrap();
1017            safe_write(
1018                &next_memory(),
1019                2 * WASM_PAGE_SIZE + WASM_PAGE_SIZE / 2,
1020                data.as_slice(),
1021            )
1022            .unwrap();
1023        }
1024
1025        let expected_write = include_bytes!("memory_manager/stability_write.golden");
1026        assert!(expected_write.as_slice() == mem.borrow().as_slice());
1027
1028        let mut read = vec![0; 4 * WASM_PAGE_SIZE as usize];
1029
1030        // There are 4 operations
1031        for _ in 0..MEMORIES * 4 {
1032            next_memory().read(0, &mut read[0..WASM_PAGE_SIZE as usize / 2]);
1033            next_memory().read(
1034                WASM_PAGE_SIZE / 2 - 150,
1035                &mut read[WASM_PAGE_SIZE as usize / 2..WASM_PAGE_SIZE as usize],
1036            );
1037            next_memory().read(
1038                WASM_PAGE_SIZE - 473,
1039                &mut read[WASM_PAGE_SIZE as usize..WASM_PAGE_SIZE as usize * 2],
1040            );
1041            next_memory().read(WASM_PAGE_SIZE * 2, &mut read[WASM_PAGE_SIZE as usize * 2..]);
1042        }
1043
1044        let expected_read = include_bytes!("memory_manager/stability_read.golden");
1045        assert!(expected_read.as_slice() == read.as_slice());
1046    }
1047
1048    #[test]
1049    fn bucket_cache() {
1050        let bucket_cache = BucketCache::new();
1051
1052        // No match, nothing has been stored.
1053        assert_eq!(
1054            bucket_cache.get(VirtualSegment {
1055                address: Address::from(0),
1056                length: Bytes::from(1u64)
1057            }),
1058            None
1059        );
1060
1061        bucket_cache.store(
1062            VirtualSegment {
1063                address: Address::from(0),
1064                length: Bytes::from(335u64),
1065            },
1066            Address::from(983),
1067        );
1068
1069        // Match at the beginning
1070        assert_eq!(
1071            bucket_cache.get(VirtualSegment {
1072                address: Address::from(1),
1073                length: Bytes::from(2u64)
1074            }),
1075            Some(Address::from(984))
1076        );
1077
1078        // Match at the end
1079        assert_eq!(
1080            bucket_cache.get(VirtualSegment {
1081                address: Address::from(334),
1082                length: Bytes::from(1u64)
1083            }),
1084            Some(Address::from(1317))
1085        );
1086
1087        // Match entire segment
1088        assert_eq!(
1089            bucket_cache.get(VirtualSegment {
1090                address: Address::from(0),
1091                length: Bytes::from(335u64),
1092            }),
1093            Some(Address::from(983))
1094        );
1095
1096        // No match - outside cached segment
1097        assert_eq!(
1098            bucket_cache.get(VirtualSegment {
1099                address: Address::from(1),
1100                length: Bytes::from(335u64)
1101            }),
1102            None
1103        );
1104    }
1105}