1use crate::bitmap::{Bitmap, BS};
4use crate::guest_memory::Result;
5use crate::{
6 Address, AtomicAccess, Bytes, FileOffset, GuestAddress, GuestMemory, GuestMemoryError,
7 GuestUsize, MemoryRegionAddress, ReadVolatile, VolatileSlice, WriteVolatile,
8};
9use std::sync::atomic::Ordering;
10use std::sync::Arc;
11
12#[allow(clippy::len_without_is_empty)]
41pub trait GuestMemoryRegion: Bytes<MemoryRegionAddress, E = GuestMemoryError> {
42 type B: Bitmap;
44
45 fn len(&self) -> GuestUsize;
47
48 fn start_addr(&self) -> GuestAddress;
50
51 fn last_addr(&self) -> GuestAddress {
53 self.start_addr().unchecked_add(self.len() - 1)
55 }
56
57 fn bitmap(&self) -> BS<'_, Self::B>;
59
60 fn check_address(&self, addr: MemoryRegionAddress) -> Option<MemoryRegionAddress> {
62 if self.address_in_range(addr) {
63 Some(addr)
64 } else {
65 None
66 }
67 }
68
69 fn address_in_range(&self, addr: MemoryRegionAddress) -> bool {
71 addr.raw_value() < self.len()
72 }
73
74 fn checked_offset(
76 &self,
77 base: MemoryRegionAddress,
78 offset: usize,
79 ) -> Option<MemoryRegionAddress> {
80 base.checked_add(offset as u64)
81 .and_then(|addr| self.check_address(addr))
82 }
83
84 fn to_region_addr(&self, addr: GuestAddress) -> Option<MemoryRegionAddress> {
88 addr.checked_offset_from(self.start_addr())
89 .and_then(|offset| self.check_address(MemoryRegionAddress(offset)))
90 }
91
92 fn get_host_address(&self, _addr: MemoryRegionAddress) -> Result<*mut u8> {
103 Err(GuestMemoryError::HostAddressNotAvailable)
104 }
105
106 fn file_offset(&self) -> Option<&FileOffset> {
108 None
109 }
110
111 #[allow(unused_variables)]
114 fn get_slice(
115 &self,
116 offset: MemoryRegionAddress,
117 count: usize,
118 ) -> Result<VolatileSlice<BS<Self::B>>> {
119 Err(GuestMemoryError::HostAddressNotAvailable)
120 }
121
122 fn as_volatile_slice(&self) -> Result<VolatileSlice<BS<Self::B>>> {
147 self.get_slice(MemoryRegionAddress(0), self.len() as usize)
148 }
149
150 #[cfg(target_os = "linux")]
167 fn is_hugetlbfs(&self) -> Option<bool> {
168 None
169 }
170}
171
172#[derive(Debug, thiserror::Error)]
174pub enum GuestRegionCollectionError {
175 #[error("No memory region found")]
177 NoMemoryRegion,
178 #[error("Some of the memory regions intersect with each other")]
180 MemoryRegionOverlap,
181 #[error("The provided memory regions haven't been sorted")]
183 UnsortedMemoryRegions,
184}
185
186#[derive(Debug)]
191pub struct GuestRegionCollection<R> {
192 regions: Vec<Arc<R>>,
193}
194
195impl<R> Default for GuestRegionCollection<R> {
196 fn default() -> Self {
197 Self {
198 regions: Vec::new(),
199 }
200 }
201}
202
203impl<R> Clone for GuestRegionCollection<R> {
204 fn clone(&self) -> Self {
205 GuestRegionCollection {
206 regions: self.regions.iter().map(Arc::clone).collect(),
207 }
208 }
209}
210
211impl<R: GuestMemoryRegion> GuestRegionCollection<R> {
212 pub fn new() -> Self {
214 Self::default()
215 }
216
217 pub fn from_regions(
225 mut regions: Vec<R>,
226 ) -> std::result::Result<Self, GuestRegionCollectionError> {
227 Self::from_arc_regions(regions.drain(..).map(Arc::new).collect())
228 }
229
230 pub fn from_arc_regions(
243 regions: Vec<Arc<R>>,
244 ) -> std::result::Result<Self, GuestRegionCollectionError> {
245 if regions.is_empty() {
246 return Err(GuestRegionCollectionError::NoMemoryRegion);
247 }
248
249 for window in regions.windows(2) {
250 let prev = &window[0];
251 let next = &window[1];
252
253 if prev.start_addr() > next.start_addr() {
254 return Err(GuestRegionCollectionError::UnsortedMemoryRegions);
255 }
256
257 if prev.last_addr() >= next.start_addr() {
258 return Err(GuestRegionCollectionError::MemoryRegionOverlap);
259 }
260 }
261
262 Ok(Self { regions })
263 }
264
265 pub fn insert_region(
270 &self,
271 region: Arc<R>,
272 ) -> std::result::Result<GuestRegionCollection<R>, GuestRegionCollectionError> {
273 let mut regions = self.regions.clone();
274 regions.push(region);
275 regions.sort_by_key(|x| x.start_addr());
276
277 Self::from_arc_regions(regions)
278 }
279
280 pub fn remove_region(
287 &self,
288 base: GuestAddress,
289 size: GuestUsize,
290 ) -> std::result::Result<(GuestRegionCollection<R>, Arc<R>), GuestRegionCollectionError> {
291 if let Ok(region_index) = self.regions.binary_search_by_key(&base, |x| x.start_addr()) {
292 if self.regions.get(region_index).unwrap().len() == size {
293 let mut regions = self.regions.clone();
294 let region = regions.remove(region_index);
295 return Ok((Self { regions }, region));
296 }
297 }
298
299 Err(GuestRegionCollectionError::NoMemoryRegion)
300 }
301}
302
303impl<R: GuestMemoryRegion> GuestMemory for GuestRegionCollection<R> {
304 type R = R;
305
306 fn num_regions(&self) -> usize {
307 self.regions.len()
308 }
309
310 fn find_region(&self, addr: GuestAddress) -> Option<&R> {
311 let index = match self.regions.binary_search_by_key(&addr, |x| x.start_addr()) {
312 Ok(x) => Some(x),
313 Err(x) if (x > 0 && addr <= self.regions[x - 1].last_addr()) => Some(x - 1),
315 _ => None,
316 };
317 index.map(|x| self.regions[x].as_ref())
318 }
319
320 fn iter(&self) -> impl Iterator<Item = &Self::R> {
321 self.regions.iter().map(AsRef::as_ref)
322 }
323}
324
325pub trait GuestMemoryRegionBytes: GuestMemoryRegion {}
330
331impl<R: GuestMemoryRegionBytes> Bytes<MemoryRegionAddress> for R {
332 type E = GuestMemoryError;
333
334 fn write(&self, buf: &[u8], addr: MemoryRegionAddress) -> Result<usize> {
354 let maddr = addr.raw_value() as usize;
355 self.as_volatile_slice()?
356 .write(buf, maddr)
357 .map_err(Into::into)
358 }
359
360 fn read(&self, buf: &mut [u8], addr: MemoryRegionAddress) -> Result<usize> {
381 let maddr = addr.raw_value() as usize;
382 self.as_volatile_slice()?
383 .read(buf, maddr)
384 .map_err(Into::into)
385 }
386
387 fn write_slice(&self, buf: &[u8], addr: MemoryRegionAddress) -> Result<()> {
388 let maddr = addr.raw_value() as usize;
389 self.as_volatile_slice()?
390 .write_slice(buf, maddr)
391 .map_err(Into::into)
392 }
393
394 fn read_slice(&self, buf: &mut [u8], addr: MemoryRegionAddress) -> Result<()> {
395 let maddr = addr.raw_value() as usize;
396 self.as_volatile_slice()?
397 .read_slice(buf, maddr)
398 .map_err(Into::into)
399 }
400
401 fn read_volatile_from<F>(
402 &self,
403 addr: MemoryRegionAddress,
404 src: &mut F,
405 count: usize,
406 ) -> Result<usize>
407 where
408 F: ReadVolatile,
409 {
410 self.as_volatile_slice()?
411 .read_volatile_from(addr.0 as usize, src, count)
412 .map_err(Into::into)
413 }
414
415 fn read_exact_volatile_from<F>(
416 &self,
417 addr: MemoryRegionAddress,
418 src: &mut F,
419 count: usize,
420 ) -> Result<()>
421 where
422 F: ReadVolatile,
423 {
424 self.as_volatile_slice()?
425 .read_exact_volatile_from(addr.0 as usize, src, count)
426 .map_err(Into::into)
427 }
428
429 fn write_volatile_to<F>(
430 &self,
431 addr: MemoryRegionAddress,
432 dst: &mut F,
433 count: usize,
434 ) -> Result<usize>
435 where
436 F: WriteVolatile,
437 {
438 self.as_volatile_slice()?
439 .write_volatile_to(addr.0 as usize, dst, count)
440 .map_err(Into::into)
441 }
442
443 fn write_all_volatile_to<F>(
444 &self,
445 addr: MemoryRegionAddress,
446 dst: &mut F,
447 count: usize,
448 ) -> Result<()>
449 where
450 F: WriteVolatile,
451 {
452 self.as_volatile_slice()?
453 .write_all_volatile_to(addr.0 as usize, dst, count)
454 .map_err(Into::into)
455 }
456
457 fn store<T: AtomicAccess>(
458 &self,
459 val: T,
460 addr: MemoryRegionAddress,
461 order: Ordering,
462 ) -> Result<()> {
463 self.as_volatile_slice().and_then(|s| {
464 s.store(val, addr.raw_value() as usize, order)
465 .map_err(Into::into)
466 })
467 }
468
469 fn load<T: AtomicAccess>(&self, addr: MemoryRegionAddress, order: Ordering) -> Result<T> {
470 self.as_volatile_slice()
471 .and_then(|s| s.load(addr.raw_value() as usize, order).map_err(Into::into))
472 }
473}
474
475#[cfg(test)]
476pub(crate) mod tests {
477 use crate::region::{GuestMemoryRegionBytes, GuestRegionCollectionError};
478 use crate::{
479 Address, GuestAddress, GuestMemory, GuestMemoryRegion, GuestRegionCollection, GuestUsize,
480 };
481 use matches::assert_matches;
482 use std::sync::Arc;
483
484 #[derive(Debug, PartialEq, Eq)]
485 pub(crate) struct MockRegion {
486 pub(crate) start: GuestAddress,
487 pub(crate) len: GuestUsize,
488 }
489
490 impl GuestMemoryRegion for MockRegion {
491 type B = ();
492
493 fn len(&self) -> GuestUsize {
494 self.len
495 }
496
497 fn start_addr(&self) -> GuestAddress {
498 self.start
499 }
500
501 fn bitmap(&self) {}
502 }
503
504 impl GuestMemoryRegionBytes for MockRegion {}
505
506 pub(crate) type Collection = GuestRegionCollection<MockRegion>;
507
508 fn check_guest_memory_mmap(
509 maybe_guest_mem: Result<Collection, GuestRegionCollectionError>,
510 expected_regions_summary: &[(GuestAddress, u64)],
511 ) {
512 assert!(maybe_guest_mem.is_ok());
513
514 let guest_mem = maybe_guest_mem.unwrap();
515 assert_eq!(guest_mem.num_regions(), expected_regions_summary.len());
516 let maybe_last_mem_reg = expected_regions_summary.last();
517 if let Some((region_addr, region_size)) = maybe_last_mem_reg {
518 let mut last_addr = region_addr.unchecked_add(*region_size);
519 if last_addr.raw_value() != 0 {
520 last_addr = last_addr.unchecked_sub(1);
521 }
522 assert_eq!(guest_mem.last_addr(), last_addr);
523 }
524 for ((region_addr, region_size), mmap) in
525 expected_regions_summary.iter().zip(guest_mem.iter())
526 {
527 assert_eq!(region_addr, &mmap.start);
528 assert_eq!(region_size, &mmap.len);
529
530 assert!(guest_mem.find_region(*region_addr).is_some());
531 }
532 }
533
534 pub(crate) fn new_guest_memory_collection_from_regions(
535 regions_summary: &[(GuestAddress, u64)],
536 ) -> Result<Collection, GuestRegionCollectionError> {
537 Collection::from_regions(
538 regions_summary
539 .iter()
540 .map(|&(start, len)| MockRegion { start, len })
541 .collect(),
542 )
543 }
544
545 fn new_guest_memory_collection_from_arc_regions(
546 regions_summary: &[(GuestAddress, u64)],
547 ) -> Result<Collection, GuestRegionCollectionError> {
548 Collection::from_arc_regions(
549 regions_summary
550 .iter()
551 .map(|&(start, len)| Arc::new(MockRegion { start, len }))
552 .collect(),
553 )
554 }
555
556 #[test]
557 fn test_no_memory_region() {
558 let regions_summary = [];
559
560 assert_matches!(
561 new_guest_memory_collection_from_regions(®ions_summary).unwrap_err(),
562 GuestRegionCollectionError::NoMemoryRegion
563 );
564 assert_matches!(
565 new_guest_memory_collection_from_arc_regions(®ions_summary).unwrap_err(),
566 GuestRegionCollectionError::NoMemoryRegion
567 );
568 }
569
570 #[test]
571 fn test_overlapping_memory_regions() {
572 let regions_summary = [(GuestAddress(0), 100), (GuestAddress(99), 100)];
573
574 assert_matches!(
575 new_guest_memory_collection_from_regions(®ions_summary).unwrap_err(),
576 GuestRegionCollectionError::MemoryRegionOverlap
577 );
578 assert_matches!(
579 new_guest_memory_collection_from_arc_regions(®ions_summary).unwrap_err(),
580 GuestRegionCollectionError::MemoryRegionOverlap
581 );
582 }
583
584 #[test]
585 fn test_unsorted_memory_regions() {
586 let regions_summary = [(GuestAddress(100), 100), (GuestAddress(0), 100)];
587
588 assert_matches!(
589 new_guest_memory_collection_from_regions(®ions_summary).unwrap_err(),
590 GuestRegionCollectionError::UnsortedMemoryRegions
591 );
592 assert_matches!(
593 new_guest_memory_collection_from_arc_regions(®ions_summary).unwrap_err(),
594 GuestRegionCollectionError::UnsortedMemoryRegions
595 );
596 }
597
598 #[test]
599 fn test_valid_memory_regions() {
600 let regions_summary = [(GuestAddress(0), 100), (GuestAddress(100), 100)];
601
602 let guest_mem = Collection::new();
603 assert_eq!(guest_mem.num_regions(), 0);
604
605 check_guest_memory_mmap(
606 new_guest_memory_collection_from_regions(®ions_summary),
607 ®ions_summary,
608 );
609
610 check_guest_memory_mmap(
611 new_guest_memory_collection_from_arc_regions(®ions_summary),
612 ®ions_summary,
613 );
614 }
615
616 #[test]
617 fn test_mmap_insert_region() {
618 let region_size = 0x1000;
619 let regions = vec![
620 (GuestAddress(0x0), region_size),
621 (GuestAddress(0x10_0000), region_size),
622 ];
623 let mem_orig = new_guest_memory_collection_from_regions(®ions).unwrap();
624 let mut gm = mem_orig.clone();
625 assert_eq!(mem_orig.num_regions(), 2);
626
627 let new_regions = [
628 (GuestAddress(0x8000), 0x1000),
629 (GuestAddress(0x4000), 0x1000),
630 (GuestAddress(0xc000), 0x1000),
631 ];
632
633 for (start, len) in new_regions {
634 gm = gm
635 .insert_region(Arc::new(MockRegion { start, len }))
636 .unwrap();
637 }
638
639 gm.insert_region(Arc::new(MockRegion {
640 start: GuestAddress(0xc000),
641 len: 0x1000,
642 }))
643 .unwrap_err();
644
645 assert_eq!(mem_orig.num_regions(), 2);
646 assert_eq!(gm.num_regions(), 5);
647
648 let regions = gm.iter().collect::<Vec<_>>();
649
650 assert_eq!(regions[0].start_addr(), GuestAddress(0x0000));
651 assert_eq!(regions[1].start_addr(), GuestAddress(0x4000));
652 assert_eq!(regions[2].start_addr(), GuestAddress(0x8000));
653 assert_eq!(regions[3].start_addr(), GuestAddress(0xc000));
654 assert_eq!(regions[4].start_addr(), GuestAddress(0x10_0000));
655 }
656
657 #[test]
658 fn test_mmap_remove_region() {
659 let region_size = 0x1000;
660 let regions = vec![
661 (GuestAddress(0x0), region_size),
662 (GuestAddress(0x10_0000), region_size),
663 ];
664 let mem_orig = new_guest_memory_collection_from_regions(®ions).unwrap();
665 let gm = mem_orig.clone();
666 assert_eq!(mem_orig.num_regions(), 2);
667
668 gm.remove_region(GuestAddress(0), 128).unwrap_err();
669 gm.remove_region(GuestAddress(0x4000), 128).unwrap_err();
670 let (gm, region) = gm.remove_region(GuestAddress(0x10_0000), 0x1000).unwrap();
671
672 assert_eq!(mem_orig.num_regions(), 2);
673 assert_eq!(gm.num_regions(), 1);
674
675 assert_eq!(gm.iter().next().unwrap().start_addr(), GuestAddress(0x0000));
676 assert_eq!(region.start_addr(), GuestAddress(0x10_0000));
677 }
678
679 #[test]
680 fn test_iter() {
681 let region_size = 0x400;
682 let regions = vec![
683 (GuestAddress(0x0), region_size),
684 (GuestAddress(0x1000), region_size),
685 ];
686 let mut iterated_regions = Vec::new();
687 let gm = new_guest_memory_collection_from_regions(®ions).unwrap();
688
689 for region in gm.iter() {
690 assert_eq!(region.len(), region_size as GuestUsize);
691 }
692
693 for region in gm.iter() {
694 iterated_regions.push((region.start_addr(), region.len()));
695 }
696 assert_eq!(regions, iterated_regions);
697
698 assert!(regions
699 .iter()
700 .map(|x| (x.0, x.1))
701 .eq(iterated_regions.iter().copied()));
702
703 let mmap_regions = gm.iter().collect::<Vec<_>>();
704
705 assert_eq!(mmap_regions[0].start, regions[0].0);
706 assert_eq!(mmap_regions[1].start, regions[1].0);
707 }
708
709 #[test]
710 fn test_address_in_range() {
711 let start_addr1 = GuestAddress(0x0);
712 let start_addr2 = GuestAddress(0x800);
713 let guest_mem =
714 new_guest_memory_collection_from_regions(&[(start_addr1, 0x400), (start_addr2, 0x400)])
715 .unwrap();
716
717 assert!(guest_mem.address_in_range(GuestAddress(0x200)));
718 assert!(!guest_mem.address_in_range(GuestAddress(0x600)));
719 assert!(guest_mem.address_in_range(GuestAddress(0xa00)));
720 assert!(!guest_mem.address_in_range(GuestAddress(0xc00)));
721 }
722
723 #[test]
724 fn test_check_address() {
725 let start_addr1 = GuestAddress(0x0);
726 let start_addr2 = GuestAddress(0x800);
727 let guest_mem =
728 new_guest_memory_collection_from_regions(&[(start_addr1, 0x400), (start_addr2, 0x400)])
729 .unwrap();
730
731 assert_eq!(
732 guest_mem.check_address(GuestAddress(0x200)),
733 Some(GuestAddress(0x200))
734 );
735 assert_eq!(guest_mem.check_address(GuestAddress(0x600)), None);
736 assert_eq!(
737 guest_mem.check_address(GuestAddress(0xa00)),
738 Some(GuestAddress(0xa00))
739 );
740 assert_eq!(guest_mem.check_address(GuestAddress(0xc00)), None);
741 }
742
743 #[test]
744 fn test_checked_offset() {
745 let start_addr1 = GuestAddress(0);
746 let start_addr2 = GuestAddress(0x800);
747 let start_addr3 = GuestAddress(0xc00);
748 let guest_mem = new_guest_memory_collection_from_regions(&[
749 (start_addr1, 0x400),
750 (start_addr2, 0x400),
751 (start_addr3, 0x400),
752 ])
753 .unwrap();
754
755 assert_eq!(
756 guest_mem.checked_offset(start_addr1, 0x200),
757 Some(GuestAddress(0x200))
758 );
759 assert_eq!(
760 guest_mem.checked_offset(start_addr1, 0xa00),
761 Some(GuestAddress(0xa00))
762 );
763 assert_eq!(
764 guest_mem.checked_offset(start_addr2, 0x7ff),
765 Some(GuestAddress(0xfff))
766 );
767 assert_eq!(guest_mem.checked_offset(start_addr2, 0xc00), None);
768 assert_eq!(guest_mem.checked_offset(start_addr1, usize::MAX), None);
769
770 assert_eq!(guest_mem.checked_offset(start_addr1, 0x400), None);
771 assert_eq!(
772 guest_mem.checked_offset(start_addr1, 0x400 - 1),
773 Some(GuestAddress(0x400 - 1))
774 );
775 }
776}