ic_stable_structures/
memory_manager.rs

1//! A module for simulating multiple memories within a single memory.
2//!
3//! The typical way for a canister to have multiple stable structures is by dividing the memory into
4//! distinct ranges, dedicating each range to a stable structure. This approach has two problems:
5//!
6//! 1. The developer needs to put in advance an upper bound on the memory of each stable structure.
7//! 2. It wastes the canister's memory allocation. For example, if a canister creates two stable
8//!    structures A and B, and gives each one of them a 1GiB region of memory, then writing to B will
9//!    require growing > 1GiB of memory just to be able to write to it.
10//!
11//! The [`MemoryManager`] in this module solves both of these problems. It simulates having
12//! multiple memories, each being able to grow without bound. That way, a developer doesn't need to
13//! put an upper bound to how much stable structures can grow, and the canister's memory allocation
14//! becomes less wasteful.
15//!
16//! Example Usage:
17//!
18//! ```
19//! use ic_stable_structures::{DefaultMemoryImpl, Memory};
20//! use ic_stable_structures::memory_manager::{MemoryManager, MemoryId};
21//!
22//! let mem_mgr = MemoryManager::init(DefaultMemoryImpl::default());
23//!
24//! // Create different memories, each with a unique ID.
25//! let memory_0 = mem_mgr.get(MemoryId::new(0));
26//! let memory_1 = mem_mgr.get(MemoryId::new(1));
27//!
28//! // Each memory can be used independently.
29//! memory_0.grow(1);
30//! memory_0.write(0, &[1, 2, 3]);
31//!
32//! memory_1.grow(1);
33//! memory_1.write(0, &[4, 5, 6]);
34//!
35//! let mut bytes = vec![0; 3];
36//! memory_0.read(0, &mut bytes);
37//! assert_eq!(bytes, vec![1, 2, 3]);
38//!
39//! let mut bytes = vec![0; 3];
40//! memory_1.read(0, &mut bytes);
41//! assert_eq!(bytes, vec![4, 5, 6]);
42//! ```
43use crate::{
44    read_struct, read_to_vec,
45    types::{Address, Bytes},
46    write, write_struct, Memory, WASM_PAGE_SIZE,
47};
48use std::cell::{Cell, RefCell};
49use std::rc::Rc;
50
51const MAGIC: &[u8; 3] = b"MGR";
52const LAYOUT_VERSION: u8 = 1;
53
54// The maximum number of memories that can be created.
55const MAX_NUM_MEMORIES: u8 = 255;
56
57// The maximum number of buckets the memory manager can handle.
58// With a bucket size of 128 pages this can support up to 256GiB of memory.
59const MAX_NUM_BUCKETS: u64 = 32768;
60
61const BUCKET_SIZE_IN_PAGES: u64 = 128;
62
63// A value used internally to indicate that a bucket is unallocated.
64const UNALLOCATED_BUCKET_MARKER: u8 = MAX_NUM_MEMORIES;
65
66// The offset where buckets are in memory.
67const BUCKETS_OFFSET_IN_PAGES: u64 = 1;
68const BUCKETS_OFFSET_IN_BYTES: u64 = BUCKETS_OFFSET_IN_PAGES * WASM_PAGE_SIZE;
69
70// Reserved bytes in the header for future extensions.
71const HEADER_RESERVED_BYTES: usize = 32;
72
73/// A memory manager simulates multiple memories within a single memory.
74///
75/// The memory manager can return up to 255 unique instances of [`VirtualMemory`], and each can be
76/// used independently and can grow up to the bounds of the underlying memory.
77///
78/// By default, the memory manager divides the memory into "buckets" of 128 pages. Each
79/// [`VirtualMemory`] is internally represented as a list of buckets. Buckets of different memories
80/// can be interleaved, but the [`VirtualMemory`] interface gives the illusion of a continuous
81/// address space.
82///
83/// Because a [`VirtualMemory`] is a list of buckets, this implies that internally it grows one
84/// bucket at a time.
85///
86/// The first page of the memory is reserved for the memory manager's own state. The layout for
87/// this state is as follows:
88///
89/// # V1 layout
90///
91/// ```text
92/// -------------------------------------------------- <- Address 0
93/// Magic "MGR"                           ↕ 3 bytes
94/// --------------------------------------------------
95/// Layout version                        ↕ 1 byte
96/// --------------------------------------------------
97/// Number of allocated buckets           ↕ 2 bytes
98/// --------------------------------------------------
99/// Bucket size (in pages) = N            ↕ 2 bytes
100/// --------------------------------------------------
101/// Reserved space                        ↕ 32 bytes
102/// --------------------------------------------------
103/// Size of memory 0 (in pages)           ↕ 8 bytes
104/// --------------------------------------------------
105/// Size of memory 1 (in pages)           ↕ 8 bytes
106/// --------------------------------------------------
107/// ...
108/// --------------------------------------------------
109/// Size of memory 254 (in pages)         ↕ 8 bytes
110/// -------------------------------------------------- <- Bucket allocations
111/// Bucket 1                              ↕ 1 byte        (1 byte indicating which memory owns it)
112/// --------------------------------------------------
113/// Bucket 2                              ↕ 1 byte
114/// --------------------------------------------------
115/// ...
116/// --------------------------------------------------
117/// Bucket `MAX_NUM_BUCKETS`              ↕ 1 byte
118/// --------------------------------------------------
119/// Unallocated space                     ↕ 30'688 bytes
120/// -------------------------------------------------- <- Buckets (Page 1)
121/// Bucket 1                              ↕ N pages
122/// -------------------------------------------------- <- Page N + 1
123/// Bucket 2                              ↕ N pages
124/// --------------------------------------------------
125/// ...
126/// -------------------------------------------------- <- Page ((MAX_NUM_BUCKETS - 1) * N + 1)
127/// Bucket MAX_NUM_BUCKETS                ↕ N pages
128/// ```
129///
130/// # Current Limitations and Future Improvements
131///
132/// - Buckets are never deallocated once assigned to a memory ID, even when the memory becomes empty
133/// - Clearing data structures (BTreeMap, Vec) does not reclaim underlying bucket allocations
134/// - Long-running canisters may accumulate unused buckets, increasing storage costs
135/// - Future versions may consider automatic bucket reclamation and memory compaction features
136pub struct MemoryManager<M: Memory> {
137    inner: Rc<RefCell<MemoryManagerInner<M>>>,
138}
139
140impl<M: Memory> MemoryManager<M> {
141    /// Initializes a `MemoryManager` with the given memory.
142    pub fn init(memory: M) -> Self {
143        Self::init_with_bucket_size(memory, BUCKET_SIZE_IN_PAGES as u16)
144    }
145
146    /// Initializes a `MemoryManager` with the given memory and bucket size in pages.
147    pub fn init_with_bucket_size(memory: M, bucket_size_in_pages: u16) -> Self {
148        Self {
149            inner: Rc::new(RefCell::new(MemoryManagerInner::init(
150                memory,
151                bucket_size_in_pages,
152            ))),
153        }
154    }
155
156    /// Returns the memory associated with the given ID.
157    pub fn get(&self, id: MemoryId) -> VirtualMemory<M> {
158        VirtualMemory {
159            id,
160            memory_manager: self.inner.clone(),
161            cache: BucketCache::new(),
162        }
163    }
164
165    /// Returns the underlying memory.
166    ///
167    /// # Returns
168    /// - The underlying memory, if there is exactly one strong reference to the memory manager.  Please see [`Rc::try_unwrap`](https://doc.rust-lang.org/std/rc/struct.Rc.html#method.try_unwrap) for more details.
169    /// - None otherwise.
170    pub fn into_memory(self) -> Option<M> {
171        Rc::into_inner(self.inner).map(|inner| inner.into_inner().into_memory())
172    }
173}
174
175#[repr(C, packed)]
176struct Header {
177    magic: [u8; 3],
178
179    version: u8,
180
181    /// The number of buckets allocated by the memory manager.
182    num_allocated_buckets: u16,
183
184    /// The size of a bucket in Wasm pages.
185    bucket_size_in_pages: u16,
186
187    /// Reserved bytes for future extensions
188    _reserved: [u8; HEADER_RESERVED_BYTES],
189
190    /// The size of each individual memory that can be created by the memory manager.
191    memory_sizes_in_pages: [u64; MAX_NUM_MEMORIES as usize],
192}
193
194impl Header {
195    fn size() -> Bytes {
196        Bytes::new(core::mem::size_of::<Self>() as u64)
197    }
198}
199
200#[derive(Clone)]
201pub struct VirtualMemory<M: Memory> {
202    id: MemoryId,
203    memory_manager: Rc<RefCell<MemoryManagerInner<M>>>,
204    cache: BucketCache,
205}
206
207impl<M: Memory> Memory for VirtualMemory<M> {
208    fn size(&self) -> u64 {
209        self.memory_manager.borrow().memory_size(self.id)
210    }
211
212    fn grow(&self, pages: u64) -> i64 {
213        self.memory_manager.borrow_mut().grow(self.id, pages)
214    }
215
216    fn read(&self, offset: u64, dst: &mut [u8]) {
217        self.memory_manager
218            .borrow()
219            .read(self.id, offset, dst, &self.cache)
220    }
221
222    unsafe fn read_unsafe(&self, offset: u64, dst: *mut u8, count: usize) {
223        self.memory_manager
224            .borrow()
225            .read_unsafe(self.id, offset, dst, count, &self.cache)
226    }
227
228    fn write(&self, offset: u64, src: &[u8]) {
229        self.memory_manager
230            .borrow()
231            .write(self.id, offset, src, &self.cache)
232    }
233}
234
235#[derive(Clone)]
236struct MemoryManagerInner<M: Memory> {
237    memory: M,
238
239    /// The number of buckets that have been allocated.
240    allocated_buckets: u16,
241
242    bucket_size_in_pages: u16,
243
244    /// An array storing the size (in pages) of each of the managed memories.
245    memory_sizes_in_pages: [u64; MAX_NUM_MEMORIES as usize],
246
247    /// A map mapping each managed memory to the bucket ids that are allocated to it.
248    memory_buckets: Vec<Vec<BucketId>>,
249}
250
251impl<M: Memory> MemoryManagerInner<M> {
252    fn init(memory: M, bucket_size_in_pages: u16) -> Self {
253        if memory.size() == 0 {
254            // Memory is empty. Create a new map.
255            return Self::new(memory, bucket_size_in_pages);
256        }
257
258        // Check if the magic in the memory corresponds to this object.
259        let mut dst = [0; 3];
260        memory.read(0, &mut dst);
261        if &dst != MAGIC {
262            // No memory manager found. Create a new instance.
263            MemoryManagerInner::new(memory, bucket_size_in_pages)
264        } else {
265            // The memory already contains a memory manager. Load it.
266            MemoryManagerInner::load(memory)
267        }
268    }
269
270    fn new(memory: M, bucket_size_in_pages: u16) -> Self {
271        let mem_mgr = Self {
272            memory,
273            allocated_buckets: 0,
274            memory_sizes_in_pages: [0; MAX_NUM_MEMORIES as usize],
275            memory_buckets: vec![vec![]; MAX_NUM_MEMORIES as usize],
276            bucket_size_in_pages,
277        };
278
279        mem_mgr.save_header();
280
281        // Mark all the buckets as unallocated.
282        write(
283            &mem_mgr.memory,
284            bucket_allocations_address(BucketId(0)).get(),
285            &[UNALLOCATED_BUCKET_MARKER; MAX_NUM_BUCKETS as usize],
286        );
287
288        mem_mgr
289    }
290
291    fn load(memory: M) -> Self {
292        // Read the header from memory.
293        let header: Header = read_struct(Address::from(0), &memory);
294        assert_eq!(&header.magic, MAGIC, "Bad magic.");
295        assert_eq!(header.version, LAYOUT_VERSION, "Unsupported version.");
296
297        let mut buckets = vec![];
298        read_to_vec(
299            &memory,
300            bucket_allocations_address(BucketId(0)),
301            &mut buckets,
302            MAX_NUM_BUCKETS as usize,
303        );
304
305        let mut memory_buckets = vec![vec![]; MAX_NUM_MEMORIES as usize];
306        for (bucket_idx, memory) in buckets.into_iter().enumerate() {
307            if memory != UNALLOCATED_BUCKET_MARKER {
308                memory_buckets[memory as usize].push(BucketId(bucket_idx as u16));
309            }
310        }
311
312        Self {
313            memory,
314            allocated_buckets: header.num_allocated_buckets,
315            bucket_size_in_pages: header.bucket_size_in_pages,
316            memory_sizes_in_pages: header.memory_sizes_in_pages,
317            memory_buckets,
318        }
319    }
320
321    fn save_header(&self) {
322        let header = Header {
323            magic: *MAGIC,
324            version: LAYOUT_VERSION,
325            num_allocated_buckets: self.allocated_buckets,
326            bucket_size_in_pages: self.bucket_size_in_pages,
327            _reserved: [0; HEADER_RESERVED_BYTES],
328            memory_sizes_in_pages: self.memory_sizes_in_pages,
329        };
330
331        write_struct(&header, Address::from(0), &self.memory);
332    }
333
334    /// Returns the size of a memory (in pages).
335    fn memory_size(&self, id: MemoryId) -> u64 {
336        self.memory_sizes_in_pages[id.0 as usize]
337    }
338
339    /// Grows the memory with the given id by the given number of pages.
340    fn grow(&mut self, id: MemoryId, pages: u64) -> i64 {
341        // Compute how many additional buckets are needed.
342        let old_size = self.memory_size(id);
343        let new_size = old_size + pages;
344        let current_buckets = self.num_buckets_needed(old_size);
345        let required_buckets = self.num_buckets_needed(new_size);
346        let new_buckets_needed = required_buckets - current_buckets;
347
348        if new_buckets_needed + self.allocated_buckets as u64 > MAX_NUM_BUCKETS {
349            // Exceeded the memory that can be managed.
350            return -1;
351        }
352
353        let memory_bucket = &mut self.memory_buckets[id.0 as usize];
354        // Allocate new buckets as needed.
355        memory_bucket.reserve(new_buckets_needed as usize);
356        for _ in 0..new_buckets_needed {
357            let new_bucket_id = BucketId(self.allocated_buckets);
358            memory_bucket.push(new_bucket_id);
359
360            // Write in stable store that this bucket belongs to the memory with the provided `id`.
361            write(
362                &self.memory,
363                bucket_allocations_address(new_bucket_id).get(),
364                &[id.0],
365            );
366
367            self.allocated_buckets += 1;
368        }
369
370        // Grow the underlying memory if necessary.
371        let pages_needed = BUCKETS_OFFSET_IN_PAGES
372            + self.bucket_size_in_pages as u64 * self.allocated_buckets as u64;
373        if pages_needed > self.memory.size() {
374            let additional_pages_needed = pages_needed - self.memory.size();
375            let prev_pages = self.memory.grow(additional_pages_needed);
376            if prev_pages == -1 {
377                panic!("{id:?}: grow failed");
378            }
379        }
380
381        // Update the memory with the new size.
382        self.memory_sizes_in_pages[id.0 as usize] = new_size;
383
384        // Update the header and return the old size.
385        self.save_header();
386        old_size as i64
387    }
388
389    fn write(&self, id: MemoryId, offset: u64, src: &[u8], bucket_cache: &BucketCache) {
390        if let Some(real_address) = bucket_cache.get(VirtualSegment {
391            address: offset.into(),
392            length: src.len().into(),
393        }) {
394            self.memory.write(real_address.get(), src);
395            return;
396        }
397
398        if (offset + src.len() as u64) > self.memory_size(id) * WASM_PAGE_SIZE {
399            panic!("{id:?}: write out of bounds");
400        }
401
402        let mut bytes_written = 0;
403        self.for_each_bucket(
404            id,
405            VirtualSegment {
406                address: offset.into(),
407                length: src.len().into(),
408            },
409            bucket_cache,
410            |RealSegment { address, length }| {
411                self.memory.write(
412                    address.get(),
413                    &src[bytes_written as usize..(bytes_written + length.get()) as usize],
414                );
415
416                bytes_written += length.get();
417            },
418        );
419    }
420
421    #[inline]
422    fn read(&self, id: MemoryId, offset: u64, dst: &mut [u8], bucket_cache: &BucketCache) {
423        // SAFETY: this is trivially safe because dst has dst.len() space.
424        unsafe { self.read_unsafe(id, offset, dst.as_mut_ptr(), dst.len(), bucket_cache) }
425    }
426
427    /// # Safety
428    ///
429    /// Callers must guarantee that
430    ///   * it is valid to write `count` number of bytes starting from `dst`,
431    ///   * `dst..dst + count` does not overlap with `self`.
432    unsafe fn read_unsafe(
433        &self,
434        id: MemoryId,
435        offset: u64,
436        dst: *mut u8,
437        count: usize,
438        bucket_cache: &BucketCache,
439    ) {
440        // First try to find the virtual segment in the cache.
441        if let Some(real_address) = bucket_cache.get(VirtualSegment {
442            address: offset.into(),
443            length: count.into(),
444        }) {
445            self.memory.read_unsafe(real_address.get(), dst, count);
446            return;
447        }
448
449        if (offset + count as u64) > self.memory_size(id) * WASM_PAGE_SIZE {
450            panic!("{id:?}: read out of bounds");
451        }
452
453        let mut bytes_read = Bytes::new(0);
454        self.for_each_bucket(
455            id,
456            VirtualSegment {
457                address: offset.into(),
458                length: count.into(),
459            },
460            bucket_cache,
461            |RealSegment { address, length }| {
462                self.memory.read_unsafe(
463                    address.get(),
464                    // The cast to usize is safe because `bytes_read` and `length` are bounded by
465                    // usize `count`.
466                    dst.add(bytes_read.get() as usize),
467                    length.get() as usize,
468                );
469
470                bytes_read += length;
471            },
472        )
473    }
474
475    /// Maps a segment of virtual memory to segments of real memory.
476    ///
477    /// `func` is invoked with real memory segments of real memory that `virtual_segment` is mapped
478    /// to.
479    ///
480    /// A segment in virtual memory can map to multiple segments of real memory. Here's an example:
481    ///
482    /// Virtual Memory
483    /// ```text
484    /// --------------------------------------------------------
485    ///          (A) ---------- SEGMENT ---------- (B)
486    /// --------------------------------------------------------
487    /// ↑               ↑               ↑               ↑
488    /// Bucket 0        Bucket 1        Bucket 2        Bucket 3
489    /// ```
490    ///
491    /// The [`VirtualMemory`] is internally divided into fixed-size buckets. In the memory's virtual
492    /// address space, all these buckets are consecutive, but in real memory this may not be the case.
493    ///
494    /// A virtual segment would first be split at the bucket boundaries. The example virtual segment
495    /// above would be split into the following segments:
496    ///
497    ///    (A, end of bucket 0)
498    ///    (start of bucket 1, end of bucket 1)
499    ///    (start of bucket 2, B)
500    ///
501    /// Each of the segments above can then be translated into the real address space by looking up
502    /// the underlying buckets' addresses in real memory.
503    fn for_each_bucket(
504        &self,
505        MemoryId(id): MemoryId,
506        virtual_segment: VirtualSegment,
507        bucket_cache: &BucketCache,
508        mut func: impl FnMut(RealSegment),
509    ) {
510        // Get the buckets allocated to the given memory id.
511        let buckets = self.memory_buckets[id as usize].as_slice();
512        let bucket_size_in_bytes = self.bucket_size_in_bytes().get();
513
514        let virtual_offset = virtual_segment.address.get();
515
516        let mut length = virtual_segment.length.get();
517        let mut bucket_idx = (virtual_offset / bucket_size_in_bytes) as usize;
518        // The start offset where we start reading from in a bucket. In the first iteration the
519        // value is calculated from `virtual_offset`, in later iterations, it's always 0.
520        let mut start_offset_in_bucket = virtual_offset % bucket_size_in_bytes;
521
522        while length > 0 {
523            let bucket_address =
524                self.bucket_address(buckets.get(bucket_idx).expect("bucket idx out of bounds"));
525
526            let bucket_start = bucket_idx as u64 * bucket_size_in_bytes;
527            let segment_len = (bucket_size_in_bytes - start_offset_in_bucket).min(length);
528
529            // Cache this bucket.
530            bucket_cache.store(
531                VirtualSegment {
532                    address: bucket_start.into(),
533                    length: self.bucket_size_in_bytes(),
534                },
535                bucket_address,
536            );
537
538            func(RealSegment {
539                address: bucket_address + start_offset_in_bucket.into(),
540                length: segment_len.into(),
541            });
542
543            length -= segment_len;
544            bucket_idx += 1;
545            start_offset_in_bucket = 0;
546        }
547    }
548
549    fn bucket_size_in_bytes(&self) -> Bytes {
550        Bytes::from(self.bucket_size_in_pages as u64 * WASM_PAGE_SIZE)
551    }
552
553    /// Returns the number of buckets needed to accommodate the given number of pages.
554    fn num_buckets_needed(&self, num_pages: u64) -> u64 {
555        // Ceiling division.
556        num_pages.div_ceil(self.bucket_size_in_pages as u64)
557    }
558
559    /// Returns the underlying memory.
560    pub fn into_memory(self) -> M {
561        self.memory
562    }
563
564    #[inline]
565    fn bucket_address(&self, id: &BucketId) -> Address {
566        Address::from(BUCKETS_OFFSET_IN_BYTES) + self.bucket_size_in_bytes() * Bytes::from(id.0)
567    }
568}
569
570#[derive(Copy, Clone)]
571struct VirtualSegment {
572    address: Address,
573    length: Bytes,
574}
575
576impl VirtualSegment {
577    fn contains_segment(&self, other: &VirtualSegment) -> bool {
578        self.address <= other.address && other.address + other.length <= self.address + self.length
579    }
580}
581
582struct RealSegment {
583    address: Address,
584    length: Bytes,
585}
586
587#[derive(Clone, Copy, Ord, Eq, PartialEq, PartialOrd, Debug)]
588pub struct MemoryId(u8);
589
590impl MemoryId {
591    pub const fn new(id: u8) -> Self {
592        // Any ID can be used except the special value that's used internally to
593        // mark a bucket as unallocated.
594        assert!(id != UNALLOCATED_BUCKET_MARKER);
595
596        Self(id)
597    }
598}
599
600// Referring to a bucket.
601#[derive(Clone, Copy, Debug, PartialEq)]
602struct BucketId(u16);
603
604fn bucket_allocations_address(id: BucketId) -> Address {
605    Address::from(0) + Header::size() + Bytes::from(id.0)
606}
607
608/// Cache which stores the last touched bucket and the corresponding real address.
609///
610/// If a segment from this bucket is accessed, we can return the real address faster.
611#[derive(Clone)]
612struct BucketCache {
613    bucket: Cell<VirtualSegment>,
614    /// The real address that corresponds to bucket.address
615    real_address: Cell<Address>,
616}
617
618impl BucketCache {
619    #[inline]
620    fn new() -> Self {
621        BucketCache {
622            bucket: Cell::new(VirtualSegment {
623                address: Address::from(0),
624                length: Bytes::new(0),
625            }),
626            real_address: Cell::new(Address::from(0)),
627        }
628    }
629}
630
631impl BucketCache {
632    /// Returns the real address corresponding to `virtual_segment.address` if `virtual_segment`
633    /// is fully contained within the cached bucket, otherwise `None`.
634    #[inline]
635    fn get(&self, virtual_segment: VirtualSegment) -> Option<Address> {
636        let cached_bucket = self.bucket.get();
637
638        cached_bucket
639            .contains_segment(&virtual_segment)
640            .then(|| self.real_address.get() + (virtual_segment.address - cached_bucket.address))
641    }
642
643    /// Stores the mapping of a bucket to a real address.
644    #[inline]
645    fn store(&self, bucket: VirtualSegment, real_address: Address) {
646        self.bucket.set(bucket);
647        self.real_address.set(real_address);
648    }
649}
650
651#[cfg(test)]
652mod test {
653    use super::*;
654    use crate::safe_write;
655    use proptest::prelude::*;
656
657    const MAX_MEMORY_IN_PAGES: u64 = MAX_NUM_BUCKETS * BUCKET_SIZE_IN_PAGES;
658
659    fn make_memory() -> Rc<RefCell<Vec<u8>>> {
660        Rc::new(RefCell::new(Vec::new()))
661    }
662
663    #[test]
664    fn can_get_memory() {
665        let mem_mgr = MemoryManager::init(make_memory());
666        let memory = mem_mgr.get(MemoryId(0));
667        assert_eq!(memory.size(), 0);
668    }
669
670    #[test]
671    fn can_allocate_and_use_memory() {
672        let mem_mgr = MemoryManager::init(make_memory());
673        let memory = mem_mgr.get(MemoryId(0));
674        assert_eq!(memory.grow(1), 0);
675        assert_eq!(memory.size(), 1);
676
677        memory.write(0, &[1, 2, 3]);
678
679        let mut bytes = vec![0; 3];
680        memory.read(0, &mut bytes);
681        assert_eq!(bytes, vec![1, 2, 3]);
682
683        assert_eq!(mem_mgr.inner.borrow().memory_buckets[0], vec![BucketId(0)]);
684
685        assert!(mem_mgr.inner.borrow().memory_buckets[1..]
686            .iter()
687            .all(|x| x.is_empty()));
688    }
689
690    #[test]
691    fn can_allocate_and_use_multiple_memories() {
692        let mem = make_memory();
693        let mem_mgr = MemoryManager::init(mem.clone());
694        let memory_0 = mem_mgr.get(MemoryId(0));
695        let memory_1 = mem_mgr.get(MemoryId(1));
696
697        assert_eq!(memory_0.grow(1), 0);
698        assert_eq!(memory_1.grow(1), 0);
699
700        assert_eq!(memory_0.size(), 1);
701        assert_eq!(memory_1.size(), 1);
702
703        assert_eq!(
704            &mem_mgr.inner.borrow().memory_buckets[..2],
705            &[vec![BucketId(0)], vec![BucketId(1)],]
706        );
707
708        assert!(mem_mgr.inner.borrow().memory_buckets[2..]
709            .iter()
710            .all(|x| x.is_empty()));
711
712        memory_0.write(0, &[1, 2, 3]);
713        memory_0.write(0, &[1, 2, 3]);
714        memory_1.write(0, &[4, 5, 6]);
715
716        let mut bytes = vec![0; 3];
717        memory_0.read(0, &mut bytes);
718        assert_eq!(bytes, vec![1, 2, 3]);
719
720        let mut bytes = vec![0; 3];
721        memory_1.read(0, &mut bytes);
722        assert_eq!(bytes, vec![4, 5, 6]);
723
724        // + 1 is for the header.
725        assert_eq!(mem.size(), 2 * BUCKET_SIZE_IN_PAGES + 1);
726    }
727
728    #[test]
729    fn can_be_reinitialized_from_memory() {
730        let mem = make_memory();
731        let mem_mgr = MemoryManager::init(mem.clone());
732        let memory_0 = mem_mgr.get(MemoryId(0));
733        let memory_1 = mem_mgr.get(MemoryId(1));
734
735        assert_eq!(memory_0.grow(1), 0);
736        assert_eq!(memory_1.grow(1), 0);
737
738        memory_0.write(0, &[1, 2, 3]);
739        memory_1.write(0, &[4, 5, 6]);
740
741        let mem_mgr = MemoryManager::init(mem);
742        let memory_0 = mem_mgr.get(MemoryId(0));
743        let memory_1 = mem_mgr.get(MemoryId(1));
744
745        let mut bytes = vec![0; 3];
746        memory_0.read(0, &mut bytes);
747        assert_eq!(bytes, vec![1, 2, 3]);
748
749        memory_1.read(0, &mut bytes);
750        assert_eq!(bytes, vec![4, 5, 6]);
751    }
752
753    #[test]
754    fn growing_same_memory_multiple_times_doesnt_increase_underlying_allocation() {
755        let mem = make_memory();
756        let mem_mgr = MemoryManager::init(mem.clone());
757        let memory_0 = mem_mgr.get(MemoryId(0));
758
759        // Grow the memory by 1 page. This should increase the underlying allocation
760        // by `BUCKET_SIZE_IN_PAGES` pages.
761        assert_eq!(memory_0.grow(1), 0);
762        assert_eq!(mem.size(), 1 + BUCKET_SIZE_IN_PAGES);
763
764        // Grow the memory again. This should NOT increase the underlying allocation.
765        assert_eq!(memory_0.grow(1), 1);
766        assert_eq!(memory_0.size(), 2);
767        assert_eq!(mem.size(), 1 + BUCKET_SIZE_IN_PAGES);
768
769        // Grow the memory up to the BUCKET_SIZE_IN_PAGES. This should NOT increase the underlying
770        // allocation.
771        assert_eq!(memory_0.grow(BUCKET_SIZE_IN_PAGES - 2), 2);
772        assert_eq!(memory_0.size(), BUCKET_SIZE_IN_PAGES);
773        assert_eq!(mem.size(), 1 + BUCKET_SIZE_IN_PAGES);
774
775        // Grow the memory by one more page. This should increase the underlying allocation.
776        assert_eq!(memory_0.grow(1), BUCKET_SIZE_IN_PAGES as i64);
777        assert_eq!(memory_0.size(), BUCKET_SIZE_IN_PAGES + 1);
778        assert_eq!(mem.size(), 1 + 2 * BUCKET_SIZE_IN_PAGES);
779    }
780
781    #[test]
782    fn does_not_grow_memory_unnecessarily() {
783        let mem = make_memory();
784        let initial_size = BUCKET_SIZE_IN_PAGES * 2;
785
786        // Grow the memory manually before passing it into the memory manager.
787        mem.grow(initial_size);
788
789        let mem_mgr = MemoryManager::init(mem.clone());
790        let memory_0 = mem_mgr.get(MemoryId(0));
791
792        // Grow the memory by 1 page.
793        assert_eq!(memory_0.grow(1), 0);
794        assert_eq!(mem.size(), initial_size);
795
796        // Grow the memory by BUCKET_SIZE_IN_PAGES more pages, which will cause the underlying
797        // allocation to increase.
798        assert_eq!(memory_0.grow(BUCKET_SIZE_IN_PAGES), 1);
799        assert_eq!(mem.size(), 1 + BUCKET_SIZE_IN_PAGES * 2);
800    }
801
802    #[test]
803    fn growing_beyond_capacity_fails() {
804        let mem = make_memory();
805        let mem_mgr = MemoryManager::init(mem);
806        let memory_0 = mem_mgr.get(MemoryId(0));
807
808        assert_eq!(memory_0.grow(MAX_MEMORY_IN_PAGES + 1), -1);
809
810        // Try to grow the memory by MAX_MEMORY_IN_PAGES + 1.
811        assert_eq!(memory_0.grow(1), 0); // should succeed
812        assert_eq!(memory_0.grow(MAX_MEMORY_IN_PAGES), -1); // should fail.
813    }
814
815    #[test]
816    fn can_write_across_bucket_boundaries() {
817        let mem = make_memory();
818        let mem_mgr = MemoryManager::init(mem);
819        let memory_0 = mem_mgr.get(MemoryId(0));
820
821        assert_eq!(memory_0.grow(BUCKET_SIZE_IN_PAGES + 1), 0);
822
823        memory_0.write(
824            mem_mgr.inner.borrow().bucket_size_in_bytes().get() - 1,
825            &[1, 2, 3],
826        );
827
828        let mut bytes = vec![0; 3];
829        memory_0.read(
830            mem_mgr.inner.borrow().bucket_size_in_bytes().get() - 1,
831            &mut bytes,
832        );
833        assert_eq!(bytes, vec![1, 2, 3]);
834    }
835
836    #[test]
837    fn can_write_across_bucket_boundaries_with_interleaving_memories() {
838        let mem = make_memory();
839        let mem_mgr = MemoryManager::init(mem);
840        let memory_0 = mem_mgr.get(MemoryId(0));
841        let memory_1 = mem_mgr.get(MemoryId(1));
842
843        assert_eq!(memory_0.grow(BUCKET_SIZE_IN_PAGES), 0);
844        assert_eq!(memory_1.grow(1), 0);
845        assert_eq!(memory_0.grow(1), BUCKET_SIZE_IN_PAGES as i64);
846
847        memory_0.write(
848            mem_mgr.inner.borrow().bucket_size_in_bytes().get() - 1,
849            &[1, 2, 3],
850        );
851        memory_1.write(0, &[4, 5, 6]);
852
853        let mut bytes = vec![0; 3];
854        memory_0.read(WASM_PAGE_SIZE * BUCKET_SIZE_IN_PAGES - 1, &mut bytes);
855        assert_eq!(bytes, vec![1, 2, 3]);
856
857        let mut bytes = vec![0; 3];
858        memory_1.read(0, &mut bytes);
859        assert_eq!(bytes, vec![4, 5, 6]);
860    }
861
862    #[test]
863    #[should_panic]
864    fn reading_out_of_bounds_should_panic() {
865        let mem = make_memory();
866        let mem_mgr = MemoryManager::init(mem);
867        let memory_0 = mem_mgr.get(MemoryId(0));
868        let memory_1 = mem_mgr.get(MemoryId(1));
869
870        assert_eq!(memory_0.grow(1), 0);
871        assert_eq!(memory_1.grow(1), 0);
872
873        let mut bytes = vec![0; WASM_PAGE_SIZE as usize + 1];
874        memory_0.read(0, &mut bytes);
875    }
876
877    #[test]
878    #[should_panic]
879    fn writing_out_of_bounds_should_panic() {
880        let mem = make_memory();
881        let mem_mgr = MemoryManager::init(mem);
882        let memory_0 = mem_mgr.get(MemoryId(0));
883        let memory_1 = mem_mgr.get(MemoryId(1));
884
885        assert_eq!(memory_0.grow(1), 0);
886        assert_eq!(memory_1.grow(1), 0);
887
888        let bytes = vec![0; WASM_PAGE_SIZE as usize + 1];
889        memory_0.write(0, &bytes);
890    }
891
892    #[test]
893    fn reading_zero_bytes_from_empty_memory_should_not_panic() {
894        let mem = make_memory();
895        let mem_mgr = MemoryManager::init(mem);
896        let memory_0 = mem_mgr.get(MemoryId(0));
897
898        assert_eq!(memory_0.size(), 0);
899        let mut bytes = vec![];
900        memory_0.read(0, &mut bytes);
901    }
902
903    #[test]
904    fn writing_zero_bytes_to_empty_memory_should_not_panic() {
905        let mem = make_memory();
906        let mem_mgr = MemoryManager::init(mem);
907        let memory_0 = mem_mgr.get(MemoryId(0));
908
909        assert_eq!(memory_0.size(), 0);
910        memory_0.write(0, &[]);
911    }
912
913    #[test]
914    fn write_and_read_random_bytes() {
915        let mem = make_memory();
916        let mem_mgr = MemoryManager::init_with_bucket_size(mem, 1); // very small bucket size.
917
918        let memories: Vec<_> = (0..MAX_NUM_MEMORIES)
919            .map(|id| mem_mgr.get(MemoryId(id)))
920            .collect();
921
922        proptest!(|(
923            num_memories in 0..255usize,
924            data in proptest::collection::vec(0..u8::MAX, 0..2*WASM_PAGE_SIZE as usize),
925            offset in 0..10*WASM_PAGE_SIZE
926        )| {
927            for memory in memories.iter().take(num_memories) {
928                // Write a random blob into the memory, growing the memory as it needs to.
929                write(memory, offset, &data);
930
931                {
932                    // Verify the blob can be read back using read.
933                    let mut bytes = vec![0; data.len()];
934                    memory.read(offset, &mut bytes);
935                    assert_eq!(bytes, data);
936                }
937
938                {
939                    // Verify the blob can be read back using read_to_vec.
940                    let mut bytes = vec![];
941                    read_to_vec(memory, offset.into(), &mut bytes, data.len());
942                    assert_eq!(bytes, data);
943                }
944
945                {
946                    // Verify the blob can be read back using read_unsafe.
947                    let mut bytes = vec![0; data.len()];
948                    unsafe {
949                        memory.read_unsafe(offset, bytes.as_mut_ptr(), data.len());
950                    }
951                    assert_eq!(bytes, data);
952                }
953            }
954        });
955    }
956
957    #[test]
958    fn init_with_non_default_bucket_size() {
959        // Choose a bucket size that's different from the default bucket size.
960        let bucket_size = 256;
961        assert_ne!(bucket_size, BUCKET_SIZE_IN_PAGES as u16);
962
963        // Initialize the memory manager.
964        let mem = make_memory();
965        let mem_mgr = MemoryManager::init_with_bucket_size(mem.clone(), bucket_size);
966
967        // Do some writes.
968        let memory_0 = mem_mgr.get(MemoryId(0));
969        let memory_1 = mem_mgr.get(MemoryId(1));
970        memory_0.grow(300);
971        memory_1.grow(100);
972        memory_0.write(0, &[1; 1000]);
973        memory_1.write(0, &[2; 1000]);
974
975        // Reinitializes the memory manager using the `init` method, without specifying
976        // the bucket size.
977        let mem_mgr = MemoryManager::init(mem);
978
979        // Assert the bucket size is correct.
980        assert_eq!(mem_mgr.inner.borrow().bucket_size_in_pages, bucket_size);
981
982        // Assert that the data written is correct.
983        let memory_0 = mem_mgr.get(MemoryId(0));
984        let memory_1 = mem_mgr.get(MemoryId(1));
985
986        assert_eq!(memory_0.size(), 300);
987        assert_eq!(memory_1.size(), 100);
988
989        let mut buf = vec![0; 1000];
990        memory_0.read(0, &mut buf);
991        assert_eq!(buf, vec![1; 1000]);
992
993        memory_1.read(0, &mut buf);
994        assert_eq!(buf, vec![2; 1000]);
995    }
996
997    // Make a few reads and writes and compare the results against golden files that should stay
998    // stable between commits.
999    // The operations were chosen so that they exercise most of the implementation.
1000    #[test]
1001    fn stability() {
1002        let mem = make_memory();
1003        let mem_mgr = MemoryManager::init_with_bucket_size(mem.clone(), 1); // very small bucket size.
1004
1005        let data: Vec<_> = (0u8..=255u8)
1006            .cycle()
1007            .take((WASM_PAGE_SIZE * 2 + 901) as usize)
1008            .collect();
1009
1010        const MEMORIES: u8 = 3;
1011        let mut memory_id = 0;
1012        let mut next_memory = || {
1013            memory_id += 1;
1014            memory_id %= MEMORIES;
1015            mem_mgr.get(MemoryId(memory_id))
1016        };
1017
1018        // There are 5 operations
1019        for _ in 0..MEMORIES * 5 {
1020            safe_write(&next_memory(), 0, data.as_slice()).unwrap();
1021            safe_write(&next_memory(), WASM_PAGE_SIZE / 2, data.as_slice()).unwrap();
1022            safe_write(&next_memory(), WASM_PAGE_SIZE - 1, data.as_slice()).unwrap();
1023            safe_write(&next_memory(), WASM_PAGE_SIZE + 1, data.as_slice()).unwrap();
1024            safe_write(
1025                &next_memory(),
1026                2 * WASM_PAGE_SIZE + WASM_PAGE_SIZE / 2,
1027                data.as_slice(),
1028            )
1029            .unwrap();
1030        }
1031
1032        let expected_write = include_bytes!("memory_manager/stability_write.golden");
1033        assert!(expected_write.as_slice() == mem.borrow().as_slice());
1034
1035        let mut read = vec![0; 4 * WASM_PAGE_SIZE as usize];
1036
1037        // There are 4 operations
1038        for _ in 0..MEMORIES * 4 {
1039            next_memory().read(0, &mut read[0..WASM_PAGE_SIZE as usize / 2]);
1040            next_memory().read(
1041                WASM_PAGE_SIZE / 2 - 150,
1042                &mut read[WASM_PAGE_SIZE as usize / 2..WASM_PAGE_SIZE as usize],
1043            );
1044            next_memory().read(
1045                WASM_PAGE_SIZE - 473,
1046                &mut read[WASM_PAGE_SIZE as usize..WASM_PAGE_SIZE as usize * 2],
1047            );
1048            next_memory().read(WASM_PAGE_SIZE * 2, &mut read[WASM_PAGE_SIZE as usize * 2..]);
1049        }
1050
1051        let expected_read = include_bytes!("memory_manager/stability_read.golden");
1052        assert!(expected_read.as_slice() == read.as_slice());
1053    }
1054
1055    #[test]
1056    fn bucket_cache() {
1057        let bucket_cache = BucketCache::new();
1058
1059        // No match, nothing has been stored.
1060        assert_eq!(
1061            bucket_cache.get(VirtualSegment {
1062                address: Address::from(0),
1063                length: Bytes::from(1u64)
1064            }),
1065            None
1066        );
1067
1068        bucket_cache.store(
1069            VirtualSegment {
1070                address: Address::from(0),
1071                length: Bytes::from(335u64),
1072            },
1073            Address::from(983),
1074        );
1075
1076        // Match at the beginning
1077        assert_eq!(
1078            bucket_cache.get(VirtualSegment {
1079                address: Address::from(1),
1080                length: Bytes::from(2u64)
1081            }),
1082            Some(Address::from(984))
1083        );
1084
1085        // Match at the end
1086        assert_eq!(
1087            bucket_cache.get(VirtualSegment {
1088                address: Address::from(334),
1089                length: Bytes::from(1u64)
1090            }),
1091            Some(Address::from(1317))
1092        );
1093
1094        // Match entire segment
1095        assert_eq!(
1096            bucket_cache.get(VirtualSegment {
1097                address: Address::from(0),
1098                length: Bytes::from(335u64),
1099            }),
1100            Some(Address::from(983))
1101        );
1102
1103        // No match - outside cached segment
1104        assert_eq!(
1105            bucket_cache.get(VirtualSegment {
1106                address: Address::from(1),
1107                length: Bytes::from(335u64)
1108            }),
1109            None
1110        );
1111    }
1112}