1use crate::{
44 read_struct, read_to_vec,
45 types::{Address, Bytes},
46 write, write_struct, Memory, WASM_PAGE_SIZE,
47};
48use std::cell::{Cell, RefCell};
49use std::rc::Rc;
50
51const MAGIC: &[u8; 3] = b"MGR";
52const LAYOUT_VERSION: u8 = 1;
53
54const MAX_NUM_MEMORIES: u8 = 255;
56
57const MAX_NUM_BUCKETS: u64 = 32768;
60
61const BUCKET_SIZE_IN_PAGES: u64 = 128;
62
63const UNALLOCATED_BUCKET_MARKER: u8 = MAX_NUM_MEMORIES;
65
66const BUCKETS_OFFSET_IN_PAGES: u64 = 1;
68const BUCKETS_OFFSET_IN_BYTES: u64 = BUCKETS_OFFSET_IN_PAGES * WASM_PAGE_SIZE;
69
70const HEADER_RESERVED_BYTES: usize = 32;
72
73pub struct MemoryManager<M: Memory> {
130 inner: Rc<RefCell<MemoryManagerInner<M>>>,
131}
132
133impl<M: Memory> MemoryManager<M> {
134 pub fn init(memory: M) -> Self {
136 Self::init_with_bucket_size(memory, BUCKET_SIZE_IN_PAGES as u16)
137 }
138
139 pub fn init_with_bucket_size(memory: M, bucket_size_in_pages: u16) -> Self {
141 Self {
142 inner: Rc::new(RefCell::new(MemoryManagerInner::init(
143 memory,
144 bucket_size_in_pages,
145 ))),
146 }
147 }
148
149 pub fn get(&self, id: MemoryId) -> VirtualMemory<M> {
151 VirtualMemory {
152 id,
153 memory_manager: self.inner.clone(),
154 cache: BucketCache::new(),
155 }
156 }
157
158 pub fn into_memory(self) -> Option<M> {
164 Rc::into_inner(self.inner).map(|inner| inner.into_inner().into_memory())
165 }
166}
167
168#[repr(C, packed)]
169struct Header {
170 magic: [u8; 3],
171
172 version: u8,
173
174 num_allocated_buckets: u16,
176
177 bucket_size_in_pages: u16,
179
180 _reserved: [u8; HEADER_RESERVED_BYTES],
182
183 memory_sizes_in_pages: [u64; MAX_NUM_MEMORIES as usize],
185}
186
187impl Header {
188 fn size() -> Bytes {
189 Bytes::new(core::mem::size_of::<Self>() as u64)
190 }
191}
192
193#[derive(Clone)]
194pub struct VirtualMemory<M: Memory> {
195 id: MemoryId,
196 memory_manager: Rc<RefCell<MemoryManagerInner<M>>>,
197 cache: BucketCache,
198}
199
200impl<M: Memory> Memory for VirtualMemory<M> {
201 fn size(&self) -> u64 {
202 self.memory_manager.borrow().memory_size(self.id)
203 }
204
205 fn grow(&self, pages: u64) -> i64 {
206 self.memory_manager.borrow_mut().grow(self.id, pages)
207 }
208
209 fn read(&self, offset: u64, dst: &mut [u8]) {
210 self.memory_manager
211 .borrow()
212 .read(self.id, offset, dst, &self.cache)
213 }
214
215 unsafe fn read_unsafe(&self, offset: u64, dst: *mut u8, count: usize) {
216 self.memory_manager
217 .borrow()
218 .read_unsafe(self.id, offset, dst, count, &self.cache)
219 }
220
221 fn write(&self, offset: u64, src: &[u8]) {
222 self.memory_manager
223 .borrow()
224 .write(self.id, offset, src, &self.cache)
225 }
226}
227
228#[derive(Clone)]
229struct MemoryManagerInner<M: Memory> {
230 memory: M,
231
232 allocated_buckets: u16,
234
235 bucket_size_in_pages: u16,
236
237 memory_sizes_in_pages: [u64; MAX_NUM_MEMORIES as usize],
239
240 memory_buckets: Vec<Vec<BucketId>>,
242}
243
244impl<M: Memory> MemoryManagerInner<M> {
245 fn init(memory: M, bucket_size_in_pages: u16) -> Self {
246 if memory.size() == 0 {
247 return Self::new(memory, bucket_size_in_pages);
249 }
250
251 let mut dst = [0; 3];
253 memory.read(0, &mut dst);
254 if &dst != MAGIC {
255 MemoryManagerInner::new(memory, bucket_size_in_pages)
257 } else {
258 MemoryManagerInner::load(memory)
260 }
261 }
262
263 fn new(memory: M, bucket_size_in_pages: u16) -> Self {
264 let mem_mgr = Self {
265 memory,
266 allocated_buckets: 0,
267 memory_sizes_in_pages: [0; MAX_NUM_MEMORIES as usize],
268 memory_buckets: vec![vec![]; MAX_NUM_MEMORIES as usize],
269 bucket_size_in_pages,
270 };
271
272 mem_mgr.save_header();
273
274 write(
276 &mem_mgr.memory,
277 bucket_allocations_address(BucketId(0)).get(),
278 &[UNALLOCATED_BUCKET_MARKER; MAX_NUM_BUCKETS as usize],
279 );
280
281 mem_mgr
282 }
283
284 fn load(memory: M) -> Self {
285 let header: Header = read_struct(Address::from(0), &memory);
287 assert_eq!(&header.magic, MAGIC, "Bad magic.");
288 assert_eq!(header.version, LAYOUT_VERSION, "Unsupported version.");
289
290 let mut buckets = vec![];
291 read_to_vec(
292 &memory,
293 bucket_allocations_address(BucketId(0)),
294 &mut buckets,
295 MAX_NUM_BUCKETS as usize,
296 );
297
298 let mut memory_buckets = vec![vec![]; MAX_NUM_MEMORIES as usize];
299 for (bucket_idx, memory) in buckets.into_iter().enumerate() {
300 if memory != UNALLOCATED_BUCKET_MARKER {
301 memory_buckets[memory as usize].push(BucketId(bucket_idx as u16));
302 }
303 }
304
305 Self {
306 memory,
307 allocated_buckets: header.num_allocated_buckets,
308 bucket_size_in_pages: header.bucket_size_in_pages,
309 memory_sizes_in_pages: header.memory_sizes_in_pages,
310 memory_buckets,
311 }
312 }
313
314 fn save_header(&self) {
315 let header = Header {
316 magic: *MAGIC,
317 version: LAYOUT_VERSION,
318 num_allocated_buckets: self.allocated_buckets,
319 bucket_size_in_pages: self.bucket_size_in_pages,
320 _reserved: [0; HEADER_RESERVED_BYTES],
321 memory_sizes_in_pages: self.memory_sizes_in_pages,
322 };
323
324 write_struct(&header, Address::from(0), &self.memory);
325 }
326
327 fn memory_size(&self, id: MemoryId) -> u64 {
329 self.memory_sizes_in_pages[id.0 as usize]
330 }
331
332 fn grow(&mut self, id: MemoryId, pages: u64) -> i64 {
334 let old_size = self.memory_size(id);
336 let new_size = old_size + pages;
337 let current_buckets = self.num_buckets_needed(old_size);
338 let required_buckets = self.num_buckets_needed(new_size);
339 let new_buckets_needed = required_buckets - current_buckets;
340
341 if new_buckets_needed + self.allocated_buckets as u64 > MAX_NUM_BUCKETS {
342 return -1;
344 }
345
346 let memory_bucket = &mut self.memory_buckets[id.0 as usize];
347 memory_bucket.reserve(new_buckets_needed as usize);
349 for _ in 0..new_buckets_needed {
350 let new_bucket_id = BucketId(self.allocated_buckets);
351 memory_bucket.push(new_bucket_id);
352
353 write(
355 &self.memory,
356 bucket_allocations_address(new_bucket_id).get(),
357 &[id.0],
358 );
359
360 self.allocated_buckets += 1;
361 }
362
363 let pages_needed = BUCKETS_OFFSET_IN_PAGES
365 + self.bucket_size_in_pages as u64 * self.allocated_buckets as u64;
366 if pages_needed > self.memory.size() {
367 let additional_pages_needed = pages_needed - self.memory.size();
368 let prev_pages = self.memory.grow(additional_pages_needed);
369 if prev_pages == -1 {
370 panic!("{id:?}: grow failed");
371 }
372 }
373
374 self.memory_sizes_in_pages[id.0 as usize] = new_size;
376
377 self.save_header();
379 old_size as i64
380 }
381
382 fn write(&self, id: MemoryId, offset: u64, src: &[u8], bucket_cache: &BucketCache) {
383 if let Some(real_address) = bucket_cache.get(VirtualSegment {
384 address: offset.into(),
385 length: src.len().into(),
386 }) {
387 self.memory.write(real_address.get(), src);
388 return;
389 }
390
391 if (offset + src.len() as u64) > self.memory_size(id) * WASM_PAGE_SIZE {
392 panic!("{id:?}: write out of bounds");
393 }
394
395 let mut bytes_written = 0;
396 self.for_each_bucket(
397 id,
398 VirtualSegment {
399 address: offset.into(),
400 length: src.len().into(),
401 },
402 bucket_cache,
403 |RealSegment { address, length }| {
404 self.memory.write(
405 address.get(),
406 &src[bytes_written as usize..(bytes_written + length.get()) as usize],
407 );
408
409 bytes_written += length.get();
410 },
411 );
412 }
413
414 #[inline]
415 fn read(&self, id: MemoryId, offset: u64, dst: &mut [u8], bucket_cache: &BucketCache) {
416 unsafe { self.read_unsafe(id, offset, dst.as_mut_ptr(), dst.len(), bucket_cache) }
418 }
419
420 unsafe fn read_unsafe(
426 &self,
427 id: MemoryId,
428 offset: u64,
429 dst: *mut u8,
430 count: usize,
431 bucket_cache: &BucketCache,
432 ) {
433 if let Some(real_address) = bucket_cache.get(VirtualSegment {
435 address: offset.into(),
436 length: count.into(),
437 }) {
438 self.memory.read_unsafe(real_address.get(), dst, count);
439 return;
440 }
441
442 if (offset + count as u64) > self.memory_size(id) * WASM_PAGE_SIZE {
443 panic!("{id:?}: read out of bounds");
444 }
445
446 let mut bytes_read = Bytes::new(0);
447 self.for_each_bucket(
448 id,
449 VirtualSegment {
450 address: offset.into(),
451 length: count.into(),
452 },
453 bucket_cache,
454 |RealSegment { address, length }| {
455 self.memory.read_unsafe(
456 address.get(),
457 dst.add(bytes_read.get() as usize),
460 length.get() as usize,
461 );
462
463 bytes_read += length;
464 },
465 )
466 }
467
468 fn for_each_bucket(
497 &self,
498 MemoryId(id): MemoryId,
499 virtual_segment: VirtualSegment,
500 bucket_cache: &BucketCache,
501 mut func: impl FnMut(RealSegment),
502 ) {
503 let buckets = self.memory_buckets[id as usize].as_slice();
505 let bucket_size_in_bytes = self.bucket_size_in_bytes().get();
506
507 let virtual_offset = virtual_segment.address.get();
508
509 let mut length = virtual_segment.length.get();
510 let mut bucket_idx = (virtual_offset / bucket_size_in_bytes) as usize;
511 let mut start_offset_in_bucket = virtual_offset % bucket_size_in_bytes;
514
515 while length > 0 {
516 let bucket_address =
517 self.bucket_address(buckets.get(bucket_idx).expect("bucket idx out of bounds"));
518
519 let bucket_start = bucket_idx as u64 * bucket_size_in_bytes;
520 let segment_len = (bucket_size_in_bytes - start_offset_in_bucket).min(length);
521
522 bucket_cache.store(
524 VirtualSegment {
525 address: bucket_start.into(),
526 length: self.bucket_size_in_bytes(),
527 },
528 bucket_address,
529 );
530
531 func(RealSegment {
532 address: bucket_address + start_offset_in_bucket.into(),
533 length: segment_len.into(),
534 });
535
536 length -= segment_len;
537 bucket_idx += 1;
538 start_offset_in_bucket = 0;
539 }
540 }
541
542 fn bucket_size_in_bytes(&self) -> Bytes {
543 Bytes::from(self.bucket_size_in_pages as u64 * WASM_PAGE_SIZE)
544 }
545
546 fn num_buckets_needed(&self, num_pages: u64) -> u64 {
548 num_pages.div_ceil(self.bucket_size_in_pages as u64)
550 }
551
552 pub fn into_memory(self) -> M {
554 self.memory
555 }
556
557 #[inline]
558 fn bucket_address(&self, id: &BucketId) -> Address {
559 Address::from(BUCKETS_OFFSET_IN_BYTES) + self.bucket_size_in_bytes() * Bytes::from(id.0)
560 }
561}
562
563#[derive(Copy, Clone)]
564struct VirtualSegment {
565 address: Address,
566 length: Bytes,
567}
568
569impl VirtualSegment {
570 fn contains_segment(&self, other: &VirtualSegment) -> bool {
571 self.address <= other.address && other.address + other.length <= self.address + self.length
572 }
573}
574
575struct RealSegment {
576 address: Address,
577 length: Bytes,
578}
579
580#[derive(Clone, Copy, Ord, Eq, PartialEq, PartialOrd, Debug)]
581pub struct MemoryId(u8);
582
583impl MemoryId {
584 pub const fn new(id: u8) -> Self {
585 assert!(id != UNALLOCATED_BUCKET_MARKER);
588
589 Self(id)
590 }
591}
592
593#[derive(Clone, Copy, Debug, PartialEq)]
595struct BucketId(u16);
596
597fn bucket_allocations_address(id: BucketId) -> Address {
598 Address::from(0) + Header::size() + Bytes::from(id.0)
599}
600
601#[derive(Clone)]
605struct BucketCache {
606 bucket: Cell<VirtualSegment>,
607 real_address: Cell<Address>,
609}
610
611impl BucketCache {
612 #[inline]
613 fn new() -> Self {
614 BucketCache {
615 bucket: Cell::new(VirtualSegment {
616 address: Address::from(0),
617 length: Bytes::new(0),
618 }),
619 real_address: Cell::new(Address::from(0)),
620 }
621 }
622}
623
624impl BucketCache {
625 #[inline]
628 fn get(&self, virtual_segment: VirtualSegment) -> Option<Address> {
629 let cached_bucket = self.bucket.get();
630
631 cached_bucket
632 .contains_segment(&virtual_segment)
633 .then(|| self.real_address.get() + (virtual_segment.address - cached_bucket.address))
634 }
635
636 #[inline]
638 fn store(&self, bucket: VirtualSegment, real_address: Address) {
639 self.bucket.set(bucket);
640 self.real_address.set(real_address);
641 }
642}
643
644#[cfg(test)]
645mod test {
646 use super::*;
647 use crate::safe_write;
648 use proptest::prelude::*;
649
650 const MAX_MEMORY_IN_PAGES: u64 = MAX_NUM_BUCKETS * BUCKET_SIZE_IN_PAGES;
651
652 fn make_memory() -> Rc<RefCell<Vec<u8>>> {
653 Rc::new(RefCell::new(Vec::new()))
654 }
655
656 #[test]
657 fn can_get_memory() {
658 let mem_mgr = MemoryManager::init(make_memory());
659 let memory = mem_mgr.get(MemoryId(0));
660 assert_eq!(memory.size(), 0);
661 }
662
663 #[test]
664 fn can_allocate_and_use_memory() {
665 let mem_mgr = MemoryManager::init(make_memory());
666 let memory = mem_mgr.get(MemoryId(0));
667 assert_eq!(memory.grow(1), 0);
668 assert_eq!(memory.size(), 1);
669
670 memory.write(0, &[1, 2, 3]);
671
672 let mut bytes = vec![0; 3];
673 memory.read(0, &mut bytes);
674 assert_eq!(bytes, vec![1, 2, 3]);
675
676 assert_eq!(mem_mgr.inner.borrow().memory_buckets[0], vec![BucketId(0)]);
677
678 assert!(mem_mgr.inner.borrow().memory_buckets[1..]
679 .iter()
680 .all(|x| x.is_empty()));
681 }
682
683 #[test]
684 fn can_allocate_and_use_multiple_memories() {
685 let mem = make_memory();
686 let mem_mgr = MemoryManager::init(mem.clone());
687 let memory_0 = mem_mgr.get(MemoryId(0));
688 let memory_1 = mem_mgr.get(MemoryId(1));
689
690 assert_eq!(memory_0.grow(1), 0);
691 assert_eq!(memory_1.grow(1), 0);
692
693 assert_eq!(memory_0.size(), 1);
694 assert_eq!(memory_1.size(), 1);
695
696 assert_eq!(
697 &mem_mgr.inner.borrow().memory_buckets[..2],
698 &[vec![BucketId(0)], vec![BucketId(1)],]
699 );
700
701 assert!(mem_mgr.inner.borrow().memory_buckets[2..]
702 .iter()
703 .all(|x| x.is_empty()));
704
705 memory_0.write(0, &[1, 2, 3]);
706 memory_0.write(0, &[1, 2, 3]);
707 memory_1.write(0, &[4, 5, 6]);
708
709 let mut bytes = vec![0; 3];
710 memory_0.read(0, &mut bytes);
711 assert_eq!(bytes, vec![1, 2, 3]);
712
713 let mut bytes = vec![0; 3];
714 memory_1.read(0, &mut bytes);
715 assert_eq!(bytes, vec![4, 5, 6]);
716
717 assert_eq!(mem.size(), 2 * BUCKET_SIZE_IN_PAGES + 1);
719 }
720
721 #[test]
722 fn can_be_reinitialized_from_memory() {
723 let mem = make_memory();
724 let mem_mgr = MemoryManager::init(mem.clone());
725 let memory_0 = mem_mgr.get(MemoryId(0));
726 let memory_1 = mem_mgr.get(MemoryId(1));
727
728 assert_eq!(memory_0.grow(1), 0);
729 assert_eq!(memory_1.grow(1), 0);
730
731 memory_0.write(0, &[1, 2, 3]);
732 memory_1.write(0, &[4, 5, 6]);
733
734 let mem_mgr = MemoryManager::init(mem);
735 let memory_0 = mem_mgr.get(MemoryId(0));
736 let memory_1 = mem_mgr.get(MemoryId(1));
737
738 let mut bytes = vec![0; 3];
739 memory_0.read(0, &mut bytes);
740 assert_eq!(bytes, vec![1, 2, 3]);
741
742 memory_1.read(0, &mut bytes);
743 assert_eq!(bytes, vec![4, 5, 6]);
744 }
745
746 #[test]
747 fn growing_same_memory_multiple_times_doesnt_increase_underlying_allocation() {
748 let mem = make_memory();
749 let mem_mgr = MemoryManager::init(mem.clone());
750 let memory_0 = mem_mgr.get(MemoryId(0));
751
752 assert_eq!(memory_0.grow(1), 0);
755 assert_eq!(mem.size(), 1 + BUCKET_SIZE_IN_PAGES);
756
757 assert_eq!(memory_0.grow(1), 1);
759 assert_eq!(memory_0.size(), 2);
760 assert_eq!(mem.size(), 1 + BUCKET_SIZE_IN_PAGES);
761
762 assert_eq!(memory_0.grow(BUCKET_SIZE_IN_PAGES - 2), 2);
765 assert_eq!(memory_0.size(), BUCKET_SIZE_IN_PAGES);
766 assert_eq!(mem.size(), 1 + BUCKET_SIZE_IN_PAGES);
767
768 assert_eq!(memory_0.grow(1), BUCKET_SIZE_IN_PAGES as i64);
770 assert_eq!(memory_0.size(), BUCKET_SIZE_IN_PAGES + 1);
771 assert_eq!(mem.size(), 1 + 2 * BUCKET_SIZE_IN_PAGES);
772 }
773
774 #[test]
775 fn does_not_grow_memory_unnecessarily() {
776 let mem = make_memory();
777 let initial_size = BUCKET_SIZE_IN_PAGES * 2;
778
779 mem.grow(initial_size);
781
782 let mem_mgr = MemoryManager::init(mem.clone());
783 let memory_0 = mem_mgr.get(MemoryId(0));
784
785 assert_eq!(memory_0.grow(1), 0);
787 assert_eq!(mem.size(), initial_size);
788
789 assert_eq!(memory_0.grow(BUCKET_SIZE_IN_PAGES), 1);
792 assert_eq!(mem.size(), 1 + BUCKET_SIZE_IN_PAGES * 2);
793 }
794
795 #[test]
796 fn growing_beyond_capacity_fails() {
797 let mem = make_memory();
798 let mem_mgr = MemoryManager::init(mem);
799 let memory_0 = mem_mgr.get(MemoryId(0));
800
801 assert_eq!(memory_0.grow(MAX_MEMORY_IN_PAGES + 1), -1);
802
803 assert_eq!(memory_0.grow(1), 0); assert_eq!(memory_0.grow(MAX_MEMORY_IN_PAGES), -1); }
807
808 #[test]
809 fn can_write_across_bucket_boundaries() {
810 let mem = make_memory();
811 let mem_mgr = MemoryManager::init(mem);
812 let memory_0 = mem_mgr.get(MemoryId(0));
813
814 assert_eq!(memory_0.grow(BUCKET_SIZE_IN_PAGES + 1), 0);
815
816 memory_0.write(
817 mem_mgr.inner.borrow().bucket_size_in_bytes().get() - 1,
818 &[1, 2, 3],
819 );
820
821 let mut bytes = vec![0; 3];
822 memory_0.read(
823 mem_mgr.inner.borrow().bucket_size_in_bytes().get() - 1,
824 &mut bytes,
825 );
826 assert_eq!(bytes, vec![1, 2, 3]);
827 }
828
829 #[test]
830 fn can_write_across_bucket_boundaries_with_interleaving_memories() {
831 let mem = make_memory();
832 let mem_mgr = MemoryManager::init(mem);
833 let memory_0 = mem_mgr.get(MemoryId(0));
834 let memory_1 = mem_mgr.get(MemoryId(1));
835
836 assert_eq!(memory_0.grow(BUCKET_SIZE_IN_PAGES), 0);
837 assert_eq!(memory_1.grow(1), 0);
838 assert_eq!(memory_0.grow(1), BUCKET_SIZE_IN_PAGES as i64);
839
840 memory_0.write(
841 mem_mgr.inner.borrow().bucket_size_in_bytes().get() - 1,
842 &[1, 2, 3],
843 );
844 memory_1.write(0, &[4, 5, 6]);
845
846 let mut bytes = vec![0; 3];
847 memory_0.read(WASM_PAGE_SIZE * BUCKET_SIZE_IN_PAGES - 1, &mut bytes);
848 assert_eq!(bytes, vec![1, 2, 3]);
849
850 let mut bytes = vec![0; 3];
851 memory_1.read(0, &mut bytes);
852 assert_eq!(bytes, vec![4, 5, 6]);
853 }
854
855 #[test]
856 #[should_panic]
857 fn reading_out_of_bounds_should_panic() {
858 let mem = make_memory();
859 let mem_mgr = MemoryManager::init(mem);
860 let memory_0 = mem_mgr.get(MemoryId(0));
861 let memory_1 = mem_mgr.get(MemoryId(1));
862
863 assert_eq!(memory_0.grow(1), 0);
864 assert_eq!(memory_1.grow(1), 0);
865
866 let mut bytes = vec![0; WASM_PAGE_SIZE as usize + 1];
867 memory_0.read(0, &mut bytes);
868 }
869
870 #[test]
871 #[should_panic]
872 fn writing_out_of_bounds_should_panic() {
873 let mem = make_memory();
874 let mem_mgr = MemoryManager::init(mem);
875 let memory_0 = mem_mgr.get(MemoryId(0));
876 let memory_1 = mem_mgr.get(MemoryId(1));
877
878 assert_eq!(memory_0.grow(1), 0);
879 assert_eq!(memory_1.grow(1), 0);
880
881 let bytes = vec![0; WASM_PAGE_SIZE as usize + 1];
882 memory_0.write(0, &bytes);
883 }
884
885 #[test]
886 fn reading_zero_bytes_from_empty_memory_should_not_panic() {
887 let mem = make_memory();
888 let mem_mgr = MemoryManager::init(mem);
889 let memory_0 = mem_mgr.get(MemoryId(0));
890
891 assert_eq!(memory_0.size(), 0);
892 let mut bytes = vec![];
893 memory_0.read(0, &mut bytes);
894 }
895
896 #[test]
897 fn writing_zero_bytes_to_empty_memory_should_not_panic() {
898 let mem = make_memory();
899 let mem_mgr = MemoryManager::init(mem);
900 let memory_0 = mem_mgr.get(MemoryId(0));
901
902 assert_eq!(memory_0.size(), 0);
903 memory_0.write(0, &[]);
904 }
905
906 #[test]
907 fn write_and_read_random_bytes() {
908 let mem = make_memory();
909 let mem_mgr = MemoryManager::init_with_bucket_size(mem, 1); let memories: Vec<_> = (0..MAX_NUM_MEMORIES)
912 .map(|id| mem_mgr.get(MemoryId(id)))
913 .collect();
914
915 proptest!(|(
916 num_memories in 0..255usize,
917 data in proptest::collection::vec(0..u8::MAX, 0..2*WASM_PAGE_SIZE as usize),
918 offset in 0..10*WASM_PAGE_SIZE
919 )| {
920 for memory in memories.iter().take(num_memories) {
921 write(memory, offset, &data);
923
924 {
925 let mut bytes = vec![0; data.len()];
927 memory.read(offset, &mut bytes);
928 assert_eq!(bytes, data);
929 }
930
931 {
932 let mut bytes = vec![];
934 read_to_vec(memory, offset.into(), &mut bytes, data.len());
935 assert_eq!(bytes, data);
936 }
937
938 {
939 let mut bytes = vec![0; data.len()];
941 unsafe {
942 memory.read_unsafe(offset, bytes.as_mut_ptr(), data.len());
943 }
944 assert_eq!(bytes, data);
945 }
946 }
947 });
948 }
949
950 #[test]
951 fn init_with_non_default_bucket_size() {
952 let bucket_size = 256;
954 assert_ne!(bucket_size, BUCKET_SIZE_IN_PAGES as u16);
955
956 let mem = make_memory();
958 let mem_mgr = MemoryManager::init_with_bucket_size(mem.clone(), bucket_size);
959
960 let memory_0 = mem_mgr.get(MemoryId(0));
962 let memory_1 = mem_mgr.get(MemoryId(1));
963 memory_0.grow(300);
964 memory_1.grow(100);
965 memory_0.write(0, &[1; 1000]);
966 memory_1.write(0, &[2; 1000]);
967
968 let mem_mgr = MemoryManager::init(mem);
971
972 assert_eq!(mem_mgr.inner.borrow().bucket_size_in_pages, bucket_size);
974
975 let memory_0 = mem_mgr.get(MemoryId(0));
977 let memory_1 = mem_mgr.get(MemoryId(1));
978
979 assert_eq!(memory_0.size(), 300);
980 assert_eq!(memory_1.size(), 100);
981
982 let mut buf = vec![0; 1000];
983 memory_0.read(0, &mut buf);
984 assert_eq!(buf, vec![1; 1000]);
985
986 memory_1.read(0, &mut buf);
987 assert_eq!(buf, vec![2; 1000]);
988 }
989
990 #[test]
994 fn stability() {
995 let mem = make_memory();
996 let mem_mgr = MemoryManager::init_with_bucket_size(mem.clone(), 1); let data: Vec<_> = (0u8..=255u8)
999 .cycle()
1000 .take((WASM_PAGE_SIZE * 2 + 901) as usize)
1001 .collect();
1002
1003 const MEMORIES: u8 = 3;
1004 let mut memory_id = 0;
1005 let mut next_memory = || {
1006 memory_id += 1;
1007 memory_id %= MEMORIES;
1008 mem_mgr.get(MemoryId(memory_id))
1009 };
1010
1011 for _ in 0..MEMORIES * 5 {
1013 safe_write(&next_memory(), 0, data.as_slice()).unwrap();
1014 safe_write(&next_memory(), WASM_PAGE_SIZE / 2, data.as_slice()).unwrap();
1015 safe_write(&next_memory(), WASM_PAGE_SIZE - 1, data.as_slice()).unwrap();
1016 safe_write(&next_memory(), WASM_PAGE_SIZE + 1, data.as_slice()).unwrap();
1017 safe_write(
1018 &next_memory(),
1019 2 * WASM_PAGE_SIZE + WASM_PAGE_SIZE / 2,
1020 data.as_slice(),
1021 )
1022 .unwrap();
1023 }
1024
1025 let expected_write = include_bytes!("memory_manager/stability_write.golden");
1026 assert!(expected_write.as_slice() == mem.borrow().as_slice());
1027
1028 let mut read = vec![0; 4 * WASM_PAGE_SIZE as usize];
1029
1030 for _ in 0..MEMORIES * 4 {
1032 next_memory().read(0, &mut read[0..WASM_PAGE_SIZE as usize / 2]);
1033 next_memory().read(
1034 WASM_PAGE_SIZE / 2 - 150,
1035 &mut read[WASM_PAGE_SIZE as usize / 2..WASM_PAGE_SIZE as usize],
1036 );
1037 next_memory().read(
1038 WASM_PAGE_SIZE - 473,
1039 &mut read[WASM_PAGE_SIZE as usize..WASM_PAGE_SIZE as usize * 2],
1040 );
1041 next_memory().read(WASM_PAGE_SIZE * 2, &mut read[WASM_PAGE_SIZE as usize * 2..]);
1042 }
1043
1044 let expected_read = include_bytes!("memory_manager/stability_read.golden");
1045 assert!(expected_read.as_slice() == read.as_slice());
1046 }
1047
1048 #[test]
1049 fn bucket_cache() {
1050 let bucket_cache = BucketCache::new();
1051
1052 assert_eq!(
1054 bucket_cache.get(VirtualSegment {
1055 address: Address::from(0),
1056 length: Bytes::from(1u64)
1057 }),
1058 None
1059 );
1060
1061 bucket_cache.store(
1062 VirtualSegment {
1063 address: Address::from(0),
1064 length: Bytes::from(335u64),
1065 },
1066 Address::from(983),
1067 );
1068
1069 assert_eq!(
1071 bucket_cache.get(VirtualSegment {
1072 address: Address::from(1),
1073 length: Bytes::from(2u64)
1074 }),
1075 Some(Address::from(984))
1076 );
1077
1078 assert_eq!(
1080 bucket_cache.get(VirtualSegment {
1081 address: Address::from(334),
1082 length: Bytes::from(1u64)
1083 }),
1084 Some(Address::from(1317))
1085 );
1086
1087 assert_eq!(
1089 bucket_cache.get(VirtualSegment {
1090 address: Address::from(0),
1091 length: Bytes::from(335u64),
1092 }),
1093 Some(Address::from(983))
1094 );
1095
1096 assert_eq!(
1098 bucket_cache.get(VirtualSegment {
1099 address: Address::from(1),
1100 length: Bytes::from(335u64)
1101 }),
1102 None
1103 );
1104 }
1105}