1use crate::{
44 read_struct, read_to_vec,
45 types::{Address, Bytes},
46 write, write_struct, Memory, WASM_PAGE_SIZE,
47};
48use std::cell::{Cell, RefCell};
49use std::rc::Rc;
50
51const MAGIC: &[u8; 3] = b"MGR";
52const LAYOUT_VERSION: u8 = 1;
53
54const MAX_NUM_MEMORIES: u8 = 255;
56
57const MAX_NUM_BUCKETS: u64 = 32768;
60
61const BUCKET_SIZE_IN_PAGES: u64 = 128;
62
63const UNALLOCATED_BUCKET_MARKER: u8 = MAX_NUM_MEMORIES;
65
66const BUCKETS_OFFSET_IN_PAGES: u64 = 1;
68const BUCKETS_OFFSET_IN_BYTES: u64 = BUCKETS_OFFSET_IN_PAGES * WASM_PAGE_SIZE;
69
70const HEADER_RESERVED_BYTES: usize = 32;
72
73pub struct MemoryManager<M: Memory> {
137 inner: Rc<RefCell<MemoryManagerInner<M>>>,
138}
139
140impl<M: Memory> MemoryManager<M> {
141 pub fn init(memory: M) -> Self {
143 Self::init_with_bucket_size(memory, BUCKET_SIZE_IN_PAGES as u16)
144 }
145
146 pub fn init_with_bucket_size(memory: M, bucket_size_in_pages: u16) -> Self {
148 Self {
149 inner: Rc::new(RefCell::new(MemoryManagerInner::init(
150 memory,
151 bucket_size_in_pages,
152 ))),
153 }
154 }
155
156 pub fn get(&self, id: MemoryId) -> VirtualMemory<M> {
158 VirtualMemory {
159 id,
160 memory_manager: self.inner.clone(),
161 cache: BucketCache::new(),
162 }
163 }
164
165 pub fn into_memory(self) -> Option<M> {
171 Rc::into_inner(self.inner).map(|inner| inner.into_inner().into_memory())
172 }
173}
174
175#[repr(C, packed)]
176struct Header {
177 magic: [u8; 3],
178
179 version: u8,
180
181 num_allocated_buckets: u16,
183
184 bucket_size_in_pages: u16,
186
187 _reserved: [u8; HEADER_RESERVED_BYTES],
189
190 memory_sizes_in_pages: [u64; MAX_NUM_MEMORIES as usize],
192}
193
194impl Header {
195 fn size() -> Bytes {
196 Bytes::new(core::mem::size_of::<Self>() as u64)
197 }
198}
199
200#[derive(Clone)]
201pub struct VirtualMemory<M: Memory> {
202 id: MemoryId,
203 memory_manager: Rc<RefCell<MemoryManagerInner<M>>>,
204 cache: BucketCache,
205}
206
207impl<M: Memory> Memory for VirtualMemory<M> {
208 fn size(&self) -> u64 {
209 self.memory_manager.borrow().memory_size(self.id)
210 }
211
212 fn grow(&self, pages: u64) -> i64 {
213 self.memory_manager.borrow_mut().grow(self.id, pages)
214 }
215
216 fn read(&self, offset: u64, dst: &mut [u8]) {
217 self.memory_manager
218 .borrow()
219 .read(self.id, offset, dst, &self.cache)
220 }
221
222 unsafe fn read_unsafe(&self, offset: u64, dst: *mut u8, count: usize) {
223 self.memory_manager
224 .borrow()
225 .read_unsafe(self.id, offset, dst, count, &self.cache)
226 }
227
228 fn write(&self, offset: u64, src: &[u8]) {
229 self.memory_manager
230 .borrow()
231 .write(self.id, offset, src, &self.cache)
232 }
233}
234
235#[derive(Clone)]
236struct MemoryManagerInner<M: Memory> {
237 memory: M,
238
239 allocated_buckets: u16,
241
242 bucket_size_in_pages: u16,
243
244 memory_sizes_in_pages: [u64; MAX_NUM_MEMORIES as usize],
246
247 memory_buckets: Vec<Vec<BucketId>>,
249}
250
251impl<M: Memory> MemoryManagerInner<M> {
252 fn init(memory: M, bucket_size_in_pages: u16) -> Self {
253 if memory.size() == 0 {
254 return Self::new(memory, bucket_size_in_pages);
256 }
257
258 let mut dst = [0; 3];
260 memory.read(0, &mut dst);
261 if &dst != MAGIC {
262 MemoryManagerInner::new(memory, bucket_size_in_pages)
264 } else {
265 MemoryManagerInner::load(memory)
267 }
268 }
269
270 fn new(memory: M, bucket_size_in_pages: u16) -> Self {
271 let mem_mgr = Self {
272 memory,
273 allocated_buckets: 0,
274 memory_sizes_in_pages: [0; MAX_NUM_MEMORIES as usize],
275 memory_buckets: vec![vec![]; MAX_NUM_MEMORIES as usize],
276 bucket_size_in_pages,
277 };
278
279 mem_mgr.save_header();
280
281 write(
283 &mem_mgr.memory,
284 bucket_allocations_address(BucketId(0)).get(),
285 &[UNALLOCATED_BUCKET_MARKER; MAX_NUM_BUCKETS as usize],
286 );
287
288 mem_mgr
289 }
290
291 fn load(memory: M) -> Self {
292 let header: Header = read_struct(Address::from(0), &memory);
294 assert_eq!(&header.magic, MAGIC, "Bad magic.");
295 assert_eq!(header.version, LAYOUT_VERSION, "Unsupported version.");
296
297 let mut buckets = vec![];
298 read_to_vec(
299 &memory,
300 bucket_allocations_address(BucketId(0)),
301 &mut buckets,
302 MAX_NUM_BUCKETS as usize,
303 );
304
305 let mut memory_buckets = vec![vec![]; MAX_NUM_MEMORIES as usize];
306 for (bucket_idx, memory) in buckets.into_iter().enumerate() {
307 if memory != UNALLOCATED_BUCKET_MARKER {
308 memory_buckets[memory as usize].push(BucketId(bucket_idx as u16));
309 }
310 }
311
312 Self {
313 memory,
314 allocated_buckets: header.num_allocated_buckets,
315 bucket_size_in_pages: header.bucket_size_in_pages,
316 memory_sizes_in_pages: header.memory_sizes_in_pages,
317 memory_buckets,
318 }
319 }
320
321 fn save_header(&self) {
322 let header = Header {
323 magic: *MAGIC,
324 version: LAYOUT_VERSION,
325 num_allocated_buckets: self.allocated_buckets,
326 bucket_size_in_pages: self.bucket_size_in_pages,
327 _reserved: [0; HEADER_RESERVED_BYTES],
328 memory_sizes_in_pages: self.memory_sizes_in_pages,
329 };
330
331 write_struct(&header, Address::from(0), &self.memory);
332 }
333
334 fn memory_size(&self, id: MemoryId) -> u64 {
336 self.memory_sizes_in_pages[id.0 as usize]
337 }
338
339 fn grow(&mut self, id: MemoryId, pages: u64) -> i64 {
341 let old_size = self.memory_size(id);
343 let new_size = old_size + pages;
344 let current_buckets = self.num_buckets_needed(old_size);
345 let required_buckets = self.num_buckets_needed(new_size);
346 let new_buckets_needed = required_buckets - current_buckets;
347
348 if new_buckets_needed + self.allocated_buckets as u64 > MAX_NUM_BUCKETS {
349 return -1;
351 }
352
353 let memory_bucket = &mut self.memory_buckets[id.0 as usize];
354 memory_bucket.reserve(new_buckets_needed as usize);
356 for _ in 0..new_buckets_needed {
357 let new_bucket_id = BucketId(self.allocated_buckets);
358 memory_bucket.push(new_bucket_id);
359
360 write(
362 &self.memory,
363 bucket_allocations_address(new_bucket_id).get(),
364 &[id.0],
365 );
366
367 self.allocated_buckets += 1;
368 }
369
370 let pages_needed = BUCKETS_OFFSET_IN_PAGES
372 + self.bucket_size_in_pages as u64 * self.allocated_buckets as u64;
373 if pages_needed > self.memory.size() {
374 let additional_pages_needed = pages_needed - self.memory.size();
375 let prev_pages = self.memory.grow(additional_pages_needed);
376 if prev_pages == -1 {
377 panic!("{id:?}: grow failed");
378 }
379 }
380
381 self.memory_sizes_in_pages[id.0 as usize] = new_size;
383
384 self.save_header();
386 old_size as i64
387 }
388
389 fn write(&self, id: MemoryId, offset: u64, src: &[u8], bucket_cache: &BucketCache) {
390 if let Some(real_address) = bucket_cache.get(VirtualSegment {
391 address: offset.into(),
392 length: src.len().into(),
393 }) {
394 self.memory.write(real_address.get(), src);
395 return;
396 }
397
398 if (offset + src.len() as u64) > self.memory_size(id) * WASM_PAGE_SIZE {
399 panic!("{id:?}: write out of bounds");
400 }
401
402 let mut bytes_written = 0;
403 self.for_each_bucket(
404 id,
405 VirtualSegment {
406 address: offset.into(),
407 length: src.len().into(),
408 },
409 bucket_cache,
410 |RealSegment { address, length }| {
411 self.memory.write(
412 address.get(),
413 &src[bytes_written as usize..(bytes_written + length.get()) as usize],
414 );
415
416 bytes_written += length.get();
417 },
418 );
419 }
420
421 #[inline]
422 fn read(&self, id: MemoryId, offset: u64, dst: &mut [u8], bucket_cache: &BucketCache) {
423 unsafe { self.read_unsafe(id, offset, dst.as_mut_ptr(), dst.len(), bucket_cache) }
425 }
426
427 unsafe fn read_unsafe(
433 &self,
434 id: MemoryId,
435 offset: u64,
436 dst: *mut u8,
437 count: usize,
438 bucket_cache: &BucketCache,
439 ) {
440 if let Some(real_address) = bucket_cache.get(VirtualSegment {
442 address: offset.into(),
443 length: count.into(),
444 }) {
445 self.memory.read_unsafe(real_address.get(), dst, count);
446 return;
447 }
448
449 if (offset + count as u64) > self.memory_size(id) * WASM_PAGE_SIZE {
450 panic!("{id:?}: read out of bounds");
451 }
452
453 let mut bytes_read = Bytes::new(0);
454 self.for_each_bucket(
455 id,
456 VirtualSegment {
457 address: offset.into(),
458 length: count.into(),
459 },
460 bucket_cache,
461 |RealSegment { address, length }| {
462 self.memory.read_unsafe(
463 address.get(),
464 dst.add(bytes_read.get() as usize),
467 length.get() as usize,
468 );
469
470 bytes_read += length;
471 },
472 )
473 }
474
475 fn for_each_bucket(
504 &self,
505 MemoryId(id): MemoryId,
506 virtual_segment: VirtualSegment,
507 bucket_cache: &BucketCache,
508 mut func: impl FnMut(RealSegment),
509 ) {
510 let buckets = self.memory_buckets[id as usize].as_slice();
512 let bucket_size_in_bytes = self.bucket_size_in_bytes().get();
513
514 let virtual_offset = virtual_segment.address.get();
515
516 let mut length = virtual_segment.length.get();
517 let mut bucket_idx = (virtual_offset / bucket_size_in_bytes) as usize;
518 let mut start_offset_in_bucket = virtual_offset % bucket_size_in_bytes;
521
522 while length > 0 {
523 let bucket_address =
524 self.bucket_address(buckets.get(bucket_idx).expect("bucket idx out of bounds"));
525
526 let bucket_start = bucket_idx as u64 * bucket_size_in_bytes;
527 let segment_len = (bucket_size_in_bytes - start_offset_in_bucket).min(length);
528
529 bucket_cache.store(
531 VirtualSegment {
532 address: bucket_start.into(),
533 length: self.bucket_size_in_bytes(),
534 },
535 bucket_address,
536 );
537
538 func(RealSegment {
539 address: bucket_address + start_offset_in_bucket.into(),
540 length: segment_len.into(),
541 });
542
543 length -= segment_len;
544 bucket_idx += 1;
545 start_offset_in_bucket = 0;
546 }
547 }
548
549 fn bucket_size_in_bytes(&self) -> Bytes {
550 Bytes::from(self.bucket_size_in_pages as u64 * WASM_PAGE_SIZE)
551 }
552
553 fn num_buckets_needed(&self, num_pages: u64) -> u64 {
555 num_pages.div_ceil(self.bucket_size_in_pages as u64)
557 }
558
559 pub fn into_memory(self) -> M {
561 self.memory
562 }
563
564 #[inline]
565 fn bucket_address(&self, id: &BucketId) -> Address {
566 Address::from(BUCKETS_OFFSET_IN_BYTES) + self.bucket_size_in_bytes() * Bytes::from(id.0)
567 }
568}
569
570#[derive(Copy, Clone)]
571struct VirtualSegment {
572 address: Address,
573 length: Bytes,
574}
575
576impl VirtualSegment {
577 fn contains_segment(&self, other: &VirtualSegment) -> bool {
578 self.address <= other.address && other.address + other.length <= self.address + self.length
579 }
580}
581
582struct RealSegment {
583 address: Address,
584 length: Bytes,
585}
586
587#[derive(Clone, Copy, Ord, Eq, PartialEq, PartialOrd, Debug)]
588pub struct MemoryId(u8);
589
590impl MemoryId {
591 pub const fn new(id: u8) -> Self {
592 assert!(id != UNALLOCATED_BUCKET_MARKER);
595
596 Self(id)
597 }
598}
599
600#[derive(Clone, Copy, Debug, PartialEq)]
602struct BucketId(u16);
603
604fn bucket_allocations_address(id: BucketId) -> Address {
605 Address::from(0) + Header::size() + Bytes::from(id.0)
606}
607
608#[derive(Clone)]
612struct BucketCache {
613 bucket: Cell<VirtualSegment>,
614 real_address: Cell<Address>,
616}
617
618impl BucketCache {
619 #[inline]
620 fn new() -> Self {
621 BucketCache {
622 bucket: Cell::new(VirtualSegment {
623 address: Address::from(0),
624 length: Bytes::new(0),
625 }),
626 real_address: Cell::new(Address::from(0)),
627 }
628 }
629}
630
631impl BucketCache {
632 #[inline]
635 fn get(&self, virtual_segment: VirtualSegment) -> Option<Address> {
636 let cached_bucket = self.bucket.get();
637
638 cached_bucket
639 .contains_segment(&virtual_segment)
640 .then(|| self.real_address.get() + (virtual_segment.address - cached_bucket.address))
641 }
642
643 #[inline]
645 fn store(&self, bucket: VirtualSegment, real_address: Address) {
646 self.bucket.set(bucket);
647 self.real_address.set(real_address);
648 }
649}
650
651#[cfg(test)]
652mod test {
653 use super::*;
654 use crate::safe_write;
655 use proptest::prelude::*;
656
657 const MAX_MEMORY_IN_PAGES: u64 = MAX_NUM_BUCKETS * BUCKET_SIZE_IN_PAGES;
658
659 fn make_memory() -> Rc<RefCell<Vec<u8>>> {
660 Rc::new(RefCell::new(Vec::new()))
661 }
662
663 #[test]
664 fn can_get_memory() {
665 let mem_mgr = MemoryManager::init(make_memory());
666 let memory = mem_mgr.get(MemoryId(0));
667 assert_eq!(memory.size(), 0);
668 }
669
670 #[test]
671 fn can_allocate_and_use_memory() {
672 let mem_mgr = MemoryManager::init(make_memory());
673 let memory = mem_mgr.get(MemoryId(0));
674 assert_eq!(memory.grow(1), 0);
675 assert_eq!(memory.size(), 1);
676
677 memory.write(0, &[1, 2, 3]);
678
679 let mut bytes = vec![0; 3];
680 memory.read(0, &mut bytes);
681 assert_eq!(bytes, vec![1, 2, 3]);
682
683 assert_eq!(mem_mgr.inner.borrow().memory_buckets[0], vec![BucketId(0)]);
684
685 assert!(mem_mgr.inner.borrow().memory_buckets[1..]
686 .iter()
687 .all(|x| x.is_empty()));
688 }
689
690 #[test]
691 fn can_allocate_and_use_multiple_memories() {
692 let mem = make_memory();
693 let mem_mgr = MemoryManager::init(mem.clone());
694 let memory_0 = mem_mgr.get(MemoryId(0));
695 let memory_1 = mem_mgr.get(MemoryId(1));
696
697 assert_eq!(memory_0.grow(1), 0);
698 assert_eq!(memory_1.grow(1), 0);
699
700 assert_eq!(memory_0.size(), 1);
701 assert_eq!(memory_1.size(), 1);
702
703 assert_eq!(
704 &mem_mgr.inner.borrow().memory_buckets[..2],
705 &[vec![BucketId(0)], vec![BucketId(1)],]
706 );
707
708 assert!(mem_mgr.inner.borrow().memory_buckets[2..]
709 .iter()
710 .all(|x| x.is_empty()));
711
712 memory_0.write(0, &[1, 2, 3]);
713 memory_0.write(0, &[1, 2, 3]);
714 memory_1.write(0, &[4, 5, 6]);
715
716 let mut bytes = vec![0; 3];
717 memory_0.read(0, &mut bytes);
718 assert_eq!(bytes, vec![1, 2, 3]);
719
720 let mut bytes = vec![0; 3];
721 memory_1.read(0, &mut bytes);
722 assert_eq!(bytes, vec![4, 5, 6]);
723
724 assert_eq!(mem.size(), 2 * BUCKET_SIZE_IN_PAGES + 1);
726 }
727
728 #[test]
729 fn can_be_reinitialized_from_memory() {
730 let mem = make_memory();
731 let mem_mgr = MemoryManager::init(mem.clone());
732 let memory_0 = mem_mgr.get(MemoryId(0));
733 let memory_1 = mem_mgr.get(MemoryId(1));
734
735 assert_eq!(memory_0.grow(1), 0);
736 assert_eq!(memory_1.grow(1), 0);
737
738 memory_0.write(0, &[1, 2, 3]);
739 memory_1.write(0, &[4, 5, 6]);
740
741 let mem_mgr = MemoryManager::init(mem);
742 let memory_0 = mem_mgr.get(MemoryId(0));
743 let memory_1 = mem_mgr.get(MemoryId(1));
744
745 let mut bytes = vec![0; 3];
746 memory_0.read(0, &mut bytes);
747 assert_eq!(bytes, vec![1, 2, 3]);
748
749 memory_1.read(0, &mut bytes);
750 assert_eq!(bytes, vec![4, 5, 6]);
751 }
752
753 #[test]
754 fn growing_same_memory_multiple_times_doesnt_increase_underlying_allocation() {
755 let mem = make_memory();
756 let mem_mgr = MemoryManager::init(mem.clone());
757 let memory_0 = mem_mgr.get(MemoryId(0));
758
759 assert_eq!(memory_0.grow(1), 0);
762 assert_eq!(mem.size(), 1 + BUCKET_SIZE_IN_PAGES);
763
764 assert_eq!(memory_0.grow(1), 1);
766 assert_eq!(memory_0.size(), 2);
767 assert_eq!(mem.size(), 1 + BUCKET_SIZE_IN_PAGES);
768
769 assert_eq!(memory_0.grow(BUCKET_SIZE_IN_PAGES - 2), 2);
772 assert_eq!(memory_0.size(), BUCKET_SIZE_IN_PAGES);
773 assert_eq!(mem.size(), 1 + BUCKET_SIZE_IN_PAGES);
774
775 assert_eq!(memory_0.grow(1), BUCKET_SIZE_IN_PAGES as i64);
777 assert_eq!(memory_0.size(), BUCKET_SIZE_IN_PAGES + 1);
778 assert_eq!(mem.size(), 1 + 2 * BUCKET_SIZE_IN_PAGES);
779 }
780
781 #[test]
782 fn does_not_grow_memory_unnecessarily() {
783 let mem = make_memory();
784 let initial_size = BUCKET_SIZE_IN_PAGES * 2;
785
786 mem.grow(initial_size);
788
789 let mem_mgr = MemoryManager::init(mem.clone());
790 let memory_0 = mem_mgr.get(MemoryId(0));
791
792 assert_eq!(memory_0.grow(1), 0);
794 assert_eq!(mem.size(), initial_size);
795
796 assert_eq!(memory_0.grow(BUCKET_SIZE_IN_PAGES), 1);
799 assert_eq!(mem.size(), 1 + BUCKET_SIZE_IN_PAGES * 2);
800 }
801
802 #[test]
803 fn growing_beyond_capacity_fails() {
804 let mem = make_memory();
805 let mem_mgr = MemoryManager::init(mem);
806 let memory_0 = mem_mgr.get(MemoryId(0));
807
808 assert_eq!(memory_0.grow(MAX_MEMORY_IN_PAGES + 1), -1);
809
810 assert_eq!(memory_0.grow(1), 0); assert_eq!(memory_0.grow(MAX_MEMORY_IN_PAGES), -1); }
814
815 #[test]
816 fn can_write_across_bucket_boundaries() {
817 let mem = make_memory();
818 let mem_mgr = MemoryManager::init(mem);
819 let memory_0 = mem_mgr.get(MemoryId(0));
820
821 assert_eq!(memory_0.grow(BUCKET_SIZE_IN_PAGES + 1), 0);
822
823 memory_0.write(
824 mem_mgr.inner.borrow().bucket_size_in_bytes().get() - 1,
825 &[1, 2, 3],
826 );
827
828 let mut bytes = vec![0; 3];
829 memory_0.read(
830 mem_mgr.inner.borrow().bucket_size_in_bytes().get() - 1,
831 &mut bytes,
832 );
833 assert_eq!(bytes, vec![1, 2, 3]);
834 }
835
836 #[test]
837 fn can_write_across_bucket_boundaries_with_interleaving_memories() {
838 let mem = make_memory();
839 let mem_mgr = MemoryManager::init(mem);
840 let memory_0 = mem_mgr.get(MemoryId(0));
841 let memory_1 = mem_mgr.get(MemoryId(1));
842
843 assert_eq!(memory_0.grow(BUCKET_SIZE_IN_PAGES), 0);
844 assert_eq!(memory_1.grow(1), 0);
845 assert_eq!(memory_0.grow(1), BUCKET_SIZE_IN_PAGES as i64);
846
847 memory_0.write(
848 mem_mgr.inner.borrow().bucket_size_in_bytes().get() - 1,
849 &[1, 2, 3],
850 );
851 memory_1.write(0, &[4, 5, 6]);
852
853 let mut bytes = vec![0; 3];
854 memory_0.read(WASM_PAGE_SIZE * BUCKET_SIZE_IN_PAGES - 1, &mut bytes);
855 assert_eq!(bytes, vec![1, 2, 3]);
856
857 let mut bytes = vec![0; 3];
858 memory_1.read(0, &mut bytes);
859 assert_eq!(bytes, vec![4, 5, 6]);
860 }
861
862 #[test]
863 #[should_panic]
864 fn reading_out_of_bounds_should_panic() {
865 let mem = make_memory();
866 let mem_mgr = MemoryManager::init(mem);
867 let memory_0 = mem_mgr.get(MemoryId(0));
868 let memory_1 = mem_mgr.get(MemoryId(1));
869
870 assert_eq!(memory_0.grow(1), 0);
871 assert_eq!(memory_1.grow(1), 0);
872
873 let mut bytes = vec![0; WASM_PAGE_SIZE as usize + 1];
874 memory_0.read(0, &mut bytes);
875 }
876
877 #[test]
878 #[should_panic]
879 fn writing_out_of_bounds_should_panic() {
880 let mem = make_memory();
881 let mem_mgr = MemoryManager::init(mem);
882 let memory_0 = mem_mgr.get(MemoryId(0));
883 let memory_1 = mem_mgr.get(MemoryId(1));
884
885 assert_eq!(memory_0.grow(1), 0);
886 assert_eq!(memory_1.grow(1), 0);
887
888 let bytes = vec![0; WASM_PAGE_SIZE as usize + 1];
889 memory_0.write(0, &bytes);
890 }
891
892 #[test]
893 fn reading_zero_bytes_from_empty_memory_should_not_panic() {
894 let mem = make_memory();
895 let mem_mgr = MemoryManager::init(mem);
896 let memory_0 = mem_mgr.get(MemoryId(0));
897
898 assert_eq!(memory_0.size(), 0);
899 let mut bytes = vec![];
900 memory_0.read(0, &mut bytes);
901 }
902
903 #[test]
904 fn writing_zero_bytes_to_empty_memory_should_not_panic() {
905 let mem = make_memory();
906 let mem_mgr = MemoryManager::init(mem);
907 let memory_0 = mem_mgr.get(MemoryId(0));
908
909 assert_eq!(memory_0.size(), 0);
910 memory_0.write(0, &[]);
911 }
912
913 #[test]
914 fn write_and_read_random_bytes() {
915 let mem = make_memory();
916 let mem_mgr = MemoryManager::init_with_bucket_size(mem, 1); let memories: Vec<_> = (0..MAX_NUM_MEMORIES)
919 .map(|id| mem_mgr.get(MemoryId(id)))
920 .collect();
921
922 proptest!(|(
923 num_memories in 0..255usize,
924 data in proptest::collection::vec(0..u8::MAX, 0..2*WASM_PAGE_SIZE as usize),
925 offset in 0..10*WASM_PAGE_SIZE
926 )| {
927 for memory in memories.iter().take(num_memories) {
928 write(memory, offset, &data);
930
931 {
932 let mut bytes = vec![0; data.len()];
934 memory.read(offset, &mut bytes);
935 assert_eq!(bytes, data);
936 }
937
938 {
939 let mut bytes = vec![];
941 read_to_vec(memory, offset.into(), &mut bytes, data.len());
942 assert_eq!(bytes, data);
943 }
944
945 {
946 let mut bytes = vec![0; data.len()];
948 unsafe {
949 memory.read_unsafe(offset, bytes.as_mut_ptr(), data.len());
950 }
951 assert_eq!(bytes, data);
952 }
953 }
954 });
955 }
956
957 #[test]
958 fn init_with_non_default_bucket_size() {
959 let bucket_size = 256;
961 assert_ne!(bucket_size, BUCKET_SIZE_IN_PAGES as u16);
962
963 let mem = make_memory();
965 let mem_mgr = MemoryManager::init_with_bucket_size(mem.clone(), bucket_size);
966
967 let memory_0 = mem_mgr.get(MemoryId(0));
969 let memory_1 = mem_mgr.get(MemoryId(1));
970 memory_0.grow(300);
971 memory_1.grow(100);
972 memory_0.write(0, &[1; 1000]);
973 memory_1.write(0, &[2; 1000]);
974
975 let mem_mgr = MemoryManager::init(mem);
978
979 assert_eq!(mem_mgr.inner.borrow().bucket_size_in_pages, bucket_size);
981
982 let memory_0 = mem_mgr.get(MemoryId(0));
984 let memory_1 = mem_mgr.get(MemoryId(1));
985
986 assert_eq!(memory_0.size(), 300);
987 assert_eq!(memory_1.size(), 100);
988
989 let mut buf = vec![0; 1000];
990 memory_0.read(0, &mut buf);
991 assert_eq!(buf, vec![1; 1000]);
992
993 memory_1.read(0, &mut buf);
994 assert_eq!(buf, vec![2; 1000]);
995 }
996
997 #[test]
1001 fn stability() {
1002 let mem = make_memory();
1003 let mem_mgr = MemoryManager::init_with_bucket_size(mem.clone(), 1); let data: Vec<_> = (0u8..=255u8)
1006 .cycle()
1007 .take((WASM_PAGE_SIZE * 2 + 901) as usize)
1008 .collect();
1009
1010 const MEMORIES: u8 = 3;
1011 let mut memory_id = 0;
1012 let mut next_memory = || {
1013 memory_id += 1;
1014 memory_id %= MEMORIES;
1015 mem_mgr.get(MemoryId(memory_id))
1016 };
1017
1018 for _ in 0..MEMORIES * 5 {
1020 safe_write(&next_memory(), 0, data.as_slice()).unwrap();
1021 safe_write(&next_memory(), WASM_PAGE_SIZE / 2, data.as_slice()).unwrap();
1022 safe_write(&next_memory(), WASM_PAGE_SIZE - 1, data.as_slice()).unwrap();
1023 safe_write(&next_memory(), WASM_PAGE_SIZE + 1, data.as_slice()).unwrap();
1024 safe_write(
1025 &next_memory(),
1026 2 * WASM_PAGE_SIZE + WASM_PAGE_SIZE / 2,
1027 data.as_slice(),
1028 )
1029 .unwrap();
1030 }
1031
1032 let expected_write = include_bytes!("memory_manager/stability_write.golden");
1033 assert!(expected_write.as_slice() == mem.borrow().as_slice());
1034
1035 let mut read = vec![0; 4 * WASM_PAGE_SIZE as usize];
1036
1037 for _ in 0..MEMORIES * 4 {
1039 next_memory().read(0, &mut read[0..WASM_PAGE_SIZE as usize / 2]);
1040 next_memory().read(
1041 WASM_PAGE_SIZE / 2 - 150,
1042 &mut read[WASM_PAGE_SIZE as usize / 2..WASM_PAGE_SIZE as usize],
1043 );
1044 next_memory().read(
1045 WASM_PAGE_SIZE - 473,
1046 &mut read[WASM_PAGE_SIZE as usize..WASM_PAGE_SIZE as usize * 2],
1047 );
1048 next_memory().read(WASM_PAGE_SIZE * 2, &mut read[WASM_PAGE_SIZE as usize * 2..]);
1049 }
1050
1051 let expected_read = include_bytes!("memory_manager/stability_read.golden");
1052 assert!(expected_read.as_slice() == read.as_slice());
1053 }
1054
1055 #[test]
1056 fn bucket_cache() {
1057 let bucket_cache = BucketCache::new();
1058
1059 assert_eq!(
1061 bucket_cache.get(VirtualSegment {
1062 address: Address::from(0),
1063 length: Bytes::from(1u64)
1064 }),
1065 None
1066 );
1067
1068 bucket_cache.store(
1069 VirtualSegment {
1070 address: Address::from(0),
1071 length: Bytes::from(335u64),
1072 },
1073 Address::from(983),
1074 );
1075
1076 assert_eq!(
1078 bucket_cache.get(VirtualSegment {
1079 address: Address::from(1),
1080 length: Bytes::from(2u64)
1081 }),
1082 Some(Address::from(984))
1083 );
1084
1085 assert_eq!(
1087 bucket_cache.get(VirtualSegment {
1088 address: Address::from(334),
1089 length: Bytes::from(1u64)
1090 }),
1091 Some(Address::from(1317))
1092 );
1093
1094 assert_eq!(
1096 bucket_cache.get(VirtualSegment {
1097 address: Address::from(0),
1098 length: Bytes::from(335u64),
1099 }),
1100 Some(Address::from(983))
1101 );
1102
1103 assert_eq!(
1105 bucket_cache.get(VirtualSegment {
1106 address: Address::from(1),
1107 length: Bytes::from(335u64)
1108 }),
1109 None
1110 );
1111 }
1112}