vm_memory/
region.rs

1//! Module containing abstracts for dealing with contiguous regions of guest memory
2
3use crate::bitmap::{Bitmap, BS};
4use crate::guest_memory::Result;
5use crate::{
6    Address, AtomicAccess, Bytes, FileOffset, GuestAddress, GuestMemory, GuestMemoryError,
7    GuestUsize, MemoryRegionAddress, ReadVolatile, VolatileSlice, WriteVolatile,
8};
9use std::sync::atomic::Ordering;
10use std::sync::Arc;
11
12/// Represents a continuous region of guest physical memory.
13///
14/// Note that the [`Bytes`] super trait requirement can be satisfied by implementing
15/// [`GuestMemoryRegionBytes`], which provides a default implementation of `Bytes`
16/// for memory regions that are backed by physical RAM:
17///
18/// ```
19/// ///
20/// use vm_memory::bitmap::BS;
21/// use vm_memory::{GuestAddress, GuestMemoryRegion, GuestMemoryRegionBytes, GuestUsize};
22///
23/// struct MyRegion;
24///
25/// impl GuestMemoryRegion for MyRegion {
26///     type B = ();
27///     fn len(&self) -> GuestUsize {
28///         todo!()
29///     }
30///     fn start_addr(&self) -> GuestAddress {
31///         todo!()
32///     }
33///     fn bitmap(&self) {
34///         todo!()
35///     }
36/// }
37///
38/// impl GuestMemoryRegionBytes for MyRegion {}
39/// ```
40#[allow(clippy::len_without_is_empty)]
41pub trait GuestMemoryRegion: Bytes<MemoryRegionAddress, E = GuestMemoryError> {
42    /// Type used for dirty memory tracking.
43    type B: Bitmap;
44
45    /// Returns the size of the region.
46    fn len(&self) -> GuestUsize;
47
48    /// Returns the minimum (inclusive) address managed by the region.
49    fn start_addr(&self) -> GuestAddress;
50
51    /// Returns the maximum (inclusive) address managed by the region.
52    fn last_addr(&self) -> GuestAddress {
53        // unchecked_add is safe as the region bounds were checked when it was created.
54        self.start_addr().unchecked_add(self.len() - 1)
55    }
56
57    /// Borrow the associated `Bitmap` object.
58    fn bitmap(&self) -> BS<'_, Self::B>;
59
60    /// Returns the given address if it is within this region.
61    fn check_address(&self, addr: MemoryRegionAddress) -> Option<MemoryRegionAddress> {
62        if self.address_in_range(addr) {
63            Some(addr)
64        } else {
65            None
66        }
67    }
68
69    /// Returns `true` if the given address is within this region.
70    fn address_in_range(&self, addr: MemoryRegionAddress) -> bool {
71        addr.raw_value() < self.len()
72    }
73
74    /// Returns the address plus the offset if it is in this region.
75    fn checked_offset(
76        &self,
77        base: MemoryRegionAddress,
78        offset: usize,
79    ) -> Option<MemoryRegionAddress> {
80        base.checked_add(offset as u64)
81            .and_then(|addr| self.check_address(addr))
82    }
83
84    /// Tries to convert an absolute address to a relative address within this region.
85    ///
86    /// Returns `None` if `addr` is out of the bounds of this region.
87    fn to_region_addr(&self, addr: GuestAddress) -> Option<MemoryRegionAddress> {
88        addr.checked_offset_from(self.start_addr())
89            .and_then(|offset| self.check_address(MemoryRegionAddress(offset)))
90    }
91
92    /// Returns the host virtual address corresponding to the region address.
93    ///
94    /// Some [`GuestMemory`](trait.GuestMemory.html) implementations, like `GuestMemoryMmap`,
95    /// have the capability to mmap guest address range into host virtual address space for
96    /// direct access, so the corresponding host virtual address may be passed to other subsystems.
97    ///
98    /// # Note
99    /// The underlying guest memory is not protected from memory aliasing, which breaks the
100    /// Rust memory safety model. It's the caller's responsibility to ensure that there's no
101    /// concurrent accesses to the underlying guest memory.
102    fn get_host_address(&self, _addr: MemoryRegionAddress) -> Result<*mut u8> {
103        Err(GuestMemoryError::HostAddressNotAvailable)
104    }
105
106    /// Returns information regarding the file and offset backing this memory region.
107    fn file_offset(&self) -> Option<&FileOffset> {
108        None
109    }
110
111    /// Returns a [`VolatileSlice`](struct.VolatileSlice.html) of `count` bytes starting at
112    /// `offset`.
113    #[allow(unused_variables)]
114    fn get_slice(
115        &self,
116        offset: MemoryRegionAddress,
117        count: usize,
118    ) -> Result<VolatileSlice<BS<Self::B>>> {
119        Err(GuestMemoryError::HostAddressNotAvailable)
120    }
121
122    /// Gets a slice of memory for the entire region that supports volatile access.
123    ///
124    /// # Examples (uses the `backend-mmap` feature)
125    ///
126    /// ```
127    /// # #[cfg(feature = "backend-mmap")]
128    /// # {
129    /// # use vm_memory::{GuestAddress, MmapRegion, GuestRegionMmap, GuestMemoryRegion};
130    /// # use vm_memory::volatile_memory::{VolatileMemory, VolatileSlice, VolatileRef};
131    /// #
132    /// let region = GuestRegionMmap::<()>::from_range(GuestAddress(0x0), 0x400, None)
133    ///     .expect("Could not create guest memory");
134    /// let slice = region
135    ///     .as_volatile_slice()
136    ///     .expect("Could not get volatile slice");
137    ///
138    /// let v = 42u32;
139    /// let r = slice
140    ///     .get_ref::<u32>(0x200)
141    ///     .expect("Could not get reference");
142    /// r.store(v);
143    /// assert_eq!(r.load(), v);
144    /// # }
145    /// ```
146    fn as_volatile_slice(&self) -> Result<VolatileSlice<BS<Self::B>>> {
147        self.get_slice(MemoryRegionAddress(0), self.len() as usize)
148    }
149
150    /// Show if the region is based on the `HugeTLBFS`.
151    /// Returns Some(true) if the region is backed by hugetlbfs.
152    /// None represents that no information is available.
153    ///
154    /// # Examples (uses the `backend-mmap` feature)
155    ///
156    /// ```
157    /// # #[cfg(feature = "backend-mmap")]
158    /// # {
159    /// #   use vm_memory::{GuestAddress, GuestMemory, GuestMemoryMmap, GuestRegionMmap};
160    /// let addr = GuestAddress(0x1000);
161    /// let mem = GuestMemoryMmap::<()>::from_ranges(&[(addr, 0x1000)]).unwrap();
162    /// let r = mem.find_region(addr).unwrap();
163    /// assert_eq!(r.is_hugetlbfs(), None);
164    /// # }
165    /// ```
166    #[cfg(target_os = "linux")]
167    fn is_hugetlbfs(&self) -> Option<bool> {
168        None
169    }
170}
171
172/// Errors that can occur when dealing with [`GuestRegionCollection`]s
173#[derive(Debug, thiserror::Error)]
174pub enum GuestRegionCollectionError {
175    /// No memory region found.
176    #[error("No memory region found")]
177    NoMemoryRegion,
178    /// Some of the memory regions intersect with each other.
179    #[error("Some of the memory regions intersect with each other")]
180    MemoryRegionOverlap,
181    /// The provided memory regions haven't been sorted.
182    #[error("The provided memory regions haven't been sorted")]
183    UnsortedMemoryRegions,
184}
185
186/// [`GuestMemory`](trait.GuestMemory.html) implementation based on a homogeneous collection
187/// of [`GuestMemoryRegion`] implementations.
188///
189/// Represents a sorted set of non-overlapping physical guest memory regions.
190#[derive(Debug)]
191pub struct GuestRegionCollection<R> {
192    regions: Vec<Arc<R>>,
193}
194
195impl<R> Default for GuestRegionCollection<R> {
196    fn default() -> Self {
197        Self {
198            regions: Vec::new(),
199        }
200    }
201}
202
203impl<R> Clone for GuestRegionCollection<R> {
204    fn clone(&self) -> Self {
205        GuestRegionCollection {
206            regions: self.regions.iter().map(Arc::clone).collect(),
207        }
208    }
209}
210
211impl<R: GuestMemoryRegion> GuestRegionCollection<R> {
212    /// Creates an empty `GuestMemoryMmap` instance.
213    pub fn new() -> Self {
214        Self::default()
215    }
216
217    /// Creates a new [`GuestRegionCollection`] from a vector of regions.
218    ///
219    /// # Arguments
220    ///
221    /// * `regions` - The vector of regions.
222    ///   The regions shouldn't overlap, and they should be sorted
223    ///   by the starting address.
224    pub fn from_regions(
225        mut regions: Vec<R>,
226    ) -> std::result::Result<Self, GuestRegionCollectionError> {
227        Self::from_arc_regions(regions.drain(..).map(Arc::new).collect())
228    }
229
230    /// Creates a new [`GuestRegionCollection`] from a vector of Arc regions.
231    ///
232    /// Similar to the constructor `from_regions()` as it returns a
233    /// [`GuestRegionCollection`]. The need for this constructor is to provide a way for
234    /// consumer of this API to create a new [`GuestRegionCollection`] based on existing
235    /// regions coming from an existing [`GuestRegionCollection`] instance.
236    ///
237    /// # Arguments
238    ///
239    /// * `regions` - The vector of `Arc` regions.
240    ///   The regions shouldn't overlap and they should be sorted
241    ///   by the starting address.
242    pub fn from_arc_regions(
243        regions: Vec<Arc<R>>,
244    ) -> std::result::Result<Self, GuestRegionCollectionError> {
245        if regions.is_empty() {
246            return Err(GuestRegionCollectionError::NoMemoryRegion);
247        }
248
249        for window in regions.windows(2) {
250            let prev = &window[0];
251            let next = &window[1];
252
253            if prev.start_addr() > next.start_addr() {
254                return Err(GuestRegionCollectionError::UnsortedMemoryRegions);
255            }
256
257            if prev.last_addr() >= next.start_addr() {
258                return Err(GuestRegionCollectionError::MemoryRegionOverlap);
259            }
260        }
261
262        Ok(Self { regions })
263    }
264
265    /// Insert a region into the `GuestMemoryMmap` object and return a new `GuestMemoryMmap`.
266    ///
267    /// # Arguments
268    /// * `region`: the memory region to insert into the guest memory object.
269    pub fn insert_region(
270        &self,
271        region: Arc<R>,
272    ) -> std::result::Result<GuestRegionCollection<R>, GuestRegionCollectionError> {
273        let mut regions = self.regions.clone();
274        regions.push(region);
275        regions.sort_by_key(|x| x.start_addr());
276
277        Self::from_arc_regions(regions)
278    }
279
280    /// Remove a region from the [`GuestRegionCollection`] object and return a new `GuestRegionCollection`
281    /// on success, together with the removed region.
282    ///
283    /// # Arguments
284    /// * `base`: base address of the region to be removed
285    /// * `size`: size of the region to be removed
286    pub fn remove_region(
287        &self,
288        base: GuestAddress,
289        size: GuestUsize,
290    ) -> std::result::Result<(GuestRegionCollection<R>, Arc<R>), GuestRegionCollectionError> {
291        if let Ok(region_index) = self.regions.binary_search_by_key(&base, |x| x.start_addr()) {
292            if self.regions.get(region_index).unwrap().len() == size {
293                let mut regions = self.regions.clone();
294                let region = regions.remove(region_index);
295                return Ok((Self { regions }, region));
296            }
297        }
298
299        Err(GuestRegionCollectionError::NoMemoryRegion)
300    }
301}
302
303impl<R: GuestMemoryRegion> GuestMemory for GuestRegionCollection<R> {
304    type R = R;
305
306    fn num_regions(&self) -> usize {
307        self.regions.len()
308    }
309
310    fn find_region(&self, addr: GuestAddress) -> Option<&R> {
311        let index = match self.regions.binary_search_by_key(&addr, |x| x.start_addr()) {
312            Ok(x) => Some(x),
313            // Within the closest region with starting address < addr
314            Err(x) if (x > 0 && addr <= self.regions[x - 1].last_addr()) => Some(x - 1),
315            _ => None,
316        };
317        index.map(|x| self.regions[x].as_ref())
318    }
319
320    fn iter(&self) -> impl Iterator<Item = &Self::R> {
321        self.regions.iter().map(AsRef::as_ref)
322    }
323}
324
325/// A marker trait that if implemented on a type `R` makes available a default
326/// implementation of `Bytes<MemoryRegionAddress>` for `R`, based on the assumption
327/// that the entire `GuestMemoryRegion` is just traditional memory without any
328/// special access requirements.
329pub trait GuestMemoryRegionBytes: GuestMemoryRegion {}
330
331impl<R: GuestMemoryRegionBytes> Bytes<MemoryRegionAddress> for R {
332    type E = GuestMemoryError;
333
334    /// # Examples
335    /// * Write a slice at guest address 0x1200.
336    ///
337    /// ```
338    /// # #[cfg(feature = "backend-mmap")]
339    /// # use vm_memory::{Bytes, GuestAddress, GuestMemoryMmap};
340    /// #
341    /// # #[cfg(feature = "backend-mmap")]
342    /// # {
343    /// # let start_addr = GuestAddress(0x1000);
344    /// # let mut gm = GuestMemoryMmap::<()>::from_ranges(&vec![(start_addr, 0x400)])
345    /// #    .expect("Could not create guest memory");
346    /// #
347    /// let res = gm
348    ///     .write(&[1, 2, 3, 4, 5], GuestAddress(0x1200))
349    ///     .expect("Could not write to guest memory");
350    /// assert_eq!(5, res);
351    /// # }
352    /// ```
353    fn write(&self, buf: &[u8], addr: MemoryRegionAddress) -> Result<usize> {
354        let maddr = addr.raw_value() as usize;
355        self.as_volatile_slice()?
356            .write(buf, maddr)
357            .map_err(Into::into)
358    }
359
360    /// # Examples
361    /// * Read a slice of length 16 at guestaddress 0x1200.
362    ///
363    /// ```
364    /// # #[cfg(feature = "backend-mmap")]
365    /// # use vm_memory::{Bytes, GuestAddress, GuestMemoryMmap};
366    /// #
367    /// # #[cfg(feature = "backend-mmap")]
368    /// # {
369    /// # let start_addr = GuestAddress(0x1000);
370    /// # let mut gm = GuestMemoryMmap::<()>::from_ranges(&vec![(start_addr, 0x400)])
371    /// #    .expect("Could not create guest memory");
372    /// #
373    /// let buf = &mut [0u8; 16];
374    /// let res = gm
375    ///     .read(buf, GuestAddress(0x1200))
376    ///     .expect("Could not read from guest memory");
377    /// assert_eq!(16, res);
378    /// # }
379    /// ```
380    fn read(&self, buf: &mut [u8], addr: MemoryRegionAddress) -> Result<usize> {
381        let maddr = addr.raw_value() as usize;
382        self.as_volatile_slice()?
383            .read(buf, maddr)
384            .map_err(Into::into)
385    }
386
387    fn write_slice(&self, buf: &[u8], addr: MemoryRegionAddress) -> Result<()> {
388        let maddr = addr.raw_value() as usize;
389        self.as_volatile_slice()?
390            .write_slice(buf, maddr)
391            .map_err(Into::into)
392    }
393
394    fn read_slice(&self, buf: &mut [u8], addr: MemoryRegionAddress) -> Result<()> {
395        let maddr = addr.raw_value() as usize;
396        self.as_volatile_slice()?
397            .read_slice(buf, maddr)
398            .map_err(Into::into)
399    }
400
401    fn read_volatile_from<F>(
402        &self,
403        addr: MemoryRegionAddress,
404        src: &mut F,
405        count: usize,
406    ) -> Result<usize>
407    where
408        F: ReadVolatile,
409    {
410        self.as_volatile_slice()?
411            .read_volatile_from(addr.0 as usize, src, count)
412            .map_err(Into::into)
413    }
414
415    fn read_exact_volatile_from<F>(
416        &self,
417        addr: MemoryRegionAddress,
418        src: &mut F,
419        count: usize,
420    ) -> Result<()>
421    where
422        F: ReadVolatile,
423    {
424        self.as_volatile_slice()?
425            .read_exact_volatile_from(addr.0 as usize, src, count)
426            .map_err(Into::into)
427    }
428
429    fn write_volatile_to<F>(
430        &self,
431        addr: MemoryRegionAddress,
432        dst: &mut F,
433        count: usize,
434    ) -> Result<usize>
435    where
436        F: WriteVolatile,
437    {
438        self.as_volatile_slice()?
439            .write_volatile_to(addr.0 as usize, dst, count)
440            .map_err(Into::into)
441    }
442
443    fn write_all_volatile_to<F>(
444        &self,
445        addr: MemoryRegionAddress,
446        dst: &mut F,
447        count: usize,
448    ) -> Result<()>
449    where
450        F: WriteVolatile,
451    {
452        self.as_volatile_slice()?
453            .write_all_volatile_to(addr.0 as usize, dst, count)
454            .map_err(Into::into)
455    }
456
457    fn store<T: AtomicAccess>(
458        &self,
459        val: T,
460        addr: MemoryRegionAddress,
461        order: Ordering,
462    ) -> Result<()> {
463        self.as_volatile_slice().and_then(|s| {
464            s.store(val, addr.raw_value() as usize, order)
465                .map_err(Into::into)
466        })
467    }
468
469    fn load<T: AtomicAccess>(&self, addr: MemoryRegionAddress, order: Ordering) -> Result<T> {
470        self.as_volatile_slice()
471            .and_then(|s| s.load(addr.raw_value() as usize, order).map_err(Into::into))
472    }
473}
474
475#[cfg(test)]
476pub(crate) mod tests {
477    use crate::region::{GuestMemoryRegionBytes, GuestRegionCollectionError};
478    use crate::{
479        Address, GuestAddress, GuestMemory, GuestMemoryRegion, GuestRegionCollection, GuestUsize,
480    };
481    use matches::assert_matches;
482    use std::sync::Arc;
483
484    #[derive(Debug, PartialEq, Eq)]
485    pub(crate) struct MockRegion {
486        pub(crate) start: GuestAddress,
487        pub(crate) len: GuestUsize,
488    }
489
490    impl GuestMemoryRegion for MockRegion {
491        type B = ();
492
493        fn len(&self) -> GuestUsize {
494            self.len
495        }
496
497        fn start_addr(&self) -> GuestAddress {
498            self.start
499        }
500
501        fn bitmap(&self) {}
502    }
503
504    impl GuestMemoryRegionBytes for MockRegion {}
505
506    pub(crate) type Collection = GuestRegionCollection<MockRegion>;
507
508    fn check_guest_memory_mmap(
509        maybe_guest_mem: Result<Collection, GuestRegionCollectionError>,
510        expected_regions_summary: &[(GuestAddress, u64)],
511    ) {
512        assert!(maybe_guest_mem.is_ok());
513
514        let guest_mem = maybe_guest_mem.unwrap();
515        assert_eq!(guest_mem.num_regions(), expected_regions_summary.len());
516        let maybe_last_mem_reg = expected_regions_summary.last();
517        if let Some((region_addr, region_size)) = maybe_last_mem_reg {
518            let mut last_addr = region_addr.unchecked_add(*region_size);
519            if last_addr.raw_value() != 0 {
520                last_addr = last_addr.unchecked_sub(1);
521            }
522            assert_eq!(guest_mem.last_addr(), last_addr);
523        }
524        for ((region_addr, region_size), mmap) in
525            expected_regions_summary.iter().zip(guest_mem.iter())
526        {
527            assert_eq!(region_addr, &mmap.start);
528            assert_eq!(region_size, &mmap.len);
529
530            assert!(guest_mem.find_region(*region_addr).is_some());
531        }
532    }
533
534    pub(crate) fn new_guest_memory_collection_from_regions(
535        regions_summary: &[(GuestAddress, u64)],
536    ) -> Result<Collection, GuestRegionCollectionError> {
537        Collection::from_regions(
538            regions_summary
539                .iter()
540                .map(|&(start, len)| MockRegion { start, len })
541                .collect(),
542        )
543    }
544
545    fn new_guest_memory_collection_from_arc_regions(
546        regions_summary: &[(GuestAddress, u64)],
547    ) -> Result<Collection, GuestRegionCollectionError> {
548        Collection::from_arc_regions(
549            regions_summary
550                .iter()
551                .map(|&(start, len)| Arc::new(MockRegion { start, len }))
552                .collect(),
553        )
554    }
555
556    #[test]
557    fn test_no_memory_region() {
558        let regions_summary = [];
559
560        assert_matches!(
561            new_guest_memory_collection_from_regions(&regions_summary).unwrap_err(),
562            GuestRegionCollectionError::NoMemoryRegion
563        );
564        assert_matches!(
565            new_guest_memory_collection_from_arc_regions(&regions_summary).unwrap_err(),
566            GuestRegionCollectionError::NoMemoryRegion
567        );
568    }
569
570    #[test]
571    fn test_overlapping_memory_regions() {
572        let regions_summary = [(GuestAddress(0), 100), (GuestAddress(99), 100)];
573
574        assert_matches!(
575            new_guest_memory_collection_from_regions(&regions_summary).unwrap_err(),
576            GuestRegionCollectionError::MemoryRegionOverlap
577        );
578        assert_matches!(
579            new_guest_memory_collection_from_arc_regions(&regions_summary).unwrap_err(),
580            GuestRegionCollectionError::MemoryRegionOverlap
581        );
582    }
583
584    #[test]
585    fn test_unsorted_memory_regions() {
586        let regions_summary = [(GuestAddress(100), 100), (GuestAddress(0), 100)];
587
588        assert_matches!(
589            new_guest_memory_collection_from_regions(&regions_summary).unwrap_err(),
590            GuestRegionCollectionError::UnsortedMemoryRegions
591        );
592        assert_matches!(
593            new_guest_memory_collection_from_arc_regions(&regions_summary).unwrap_err(),
594            GuestRegionCollectionError::UnsortedMemoryRegions
595        );
596    }
597
598    #[test]
599    fn test_valid_memory_regions() {
600        let regions_summary = [(GuestAddress(0), 100), (GuestAddress(100), 100)];
601
602        let guest_mem = Collection::new();
603        assert_eq!(guest_mem.num_regions(), 0);
604
605        check_guest_memory_mmap(
606            new_guest_memory_collection_from_regions(&regions_summary),
607            &regions_summary,
608        );
609
610        check_guest_memory_mmap(
611            new_guest_memory_collection_from_arc_regions(&regions_summary),
612            &regions_summary,
613        );
614    }
615
616    #[test]
617    fn test_mmap_insert_region() {
618        let region_size = 0x1000;
619        let regions = vec![
620            (GuestAddress(0x0), region_size),
621            (GuestAddress(0x10_0000), region_size),
622        ];
623        let mem_orig = new_guest_memory_collection_from_regions(&regions).unwrap();
624        let mut gm = mem_orig.clone();
625        assert_eq!(mem_orig.num_regions(), 2);
626
627        let new_regions = [
628            (GuestAddress(0x8000), 0x1000),
629            (GuestAddress(0x4000), 0x1000),
630            (GuestAddress(0xc000), 0x1000),
631        ];
632
633        for (start, len) in new_regions {
634            gm = gm
635                .insert_region(Arc::new(MockRegion { start, len }))
636                .unwrap();
637        }
638
639        gm.insert_region(Arc::new(MockRegion {
640            start: GuestAddress(0xc000),
641            len: 0x1000,
642        }))
643        .unwrap_err();
644
645        assert_eq!(mem_orig.num_regions(), 2);
646        assert_eq!(gm.num_regions(), 5);
647
648        let regions = gm.iter().collect::<Vec<_>>();
649
650        assert_eq!(regions[0].start_addr(), GuestAddress(0x0000));
651        assert_eq!(regions[1].start_addr(), GuestAddress(0x4000));
652        assert_eq!(regions[2].start_addr(), GuestAddress(0x8000));
653        assert_eq!(regions[3].start_addr(), GuestAddress(0xc000));
654        assert_eq!(regions[4].start_addr(), GuestAddress(0x10_0000));
655    }
656
657    #[test]
658    fn test_mmap_remove_region() {
659        let region_size = 0x1000;
660        let regions = vec![
661            (GuestAddress(0x0), region_size),
662            (GuestAddress(0x10_0000), region_size),
663        ];
664        let mem_orig = new_guest_memory_collection_from_regions(&regions).unwrap();
665        let gm = mem_orig.clone();
666        assert_eq!(mem_orig.num_regions(), 2);
667
668        gm.remove_region(GuestAddress(0), 128).unwrap_err();
669        gm.remove_region(GuestAddress(0x4000), 128).unwrap_err();
670        let (gm, region) = gm.remove_region(GuestAddress(0x10_0000), 0x1000).unwrap();
671
672        assert_eq!(mem_orig.num_regions(), 2);
673        assert_eq!(gm.num_regions(), 1);
674
675        assert_eq!(gm.iter().next().unwrap().start_addr(), GuestAddress(0x0000));
676        assert_eq!(region.start_addr(), GuestAddress(0x10_0000));
677    }
678
679    #[test]
680    fn test_iter() {
681        let region_size = 0x400;
682        let regions = vec![
683            (GuestAddress(0x0), region_size),
684            (GuestAddress(0x1000), region_size),
685        ];
686        let mut iterated_regions = Vec::new();
687        let gm = new_guest_memory_collection_from_regions(&regions).unwrap();
688
689        for region in gm.iter() {
690            assert_eq!(region.len(), region_size as GuestUsize);
691        }
692
693        for region in gm.iter() {
694            iterated_regions.push((region.start_addr(), region.len()));
695        }
696        assert_eq!(regions, iterated_regions);
697
698        assert!(regions
699            .iter()
700            .map(|x| (x.0, x.1))
701            .eq(iterated_regions.iter().copied()));
702
703        let mmap_regions = gm.iter().collect::<Vec<_>>();
704
705        assert_eq!(mmap_regions[0].start, regions[0].0);
706        assert_eq!(mmap_regions[1].start, regions[1].0);
707    }
708
709    #[test]
710    fn test_address_in_range() {
711        let start_addr1 = GuestAddress(0x0);
712        let start_addr2 = GuestAddress(0x800);
713        let guest_mem =
714            new_guest_memory_collection_from_regions(&[(start_addr1, 0x400), (start_addr2, 0x400)])
715                .unwrap();
716
717        assert!(guest_mem.address_in_range(GuestAddress(0x200)));
718        assert!(!guest_mem.address_in_range(GuestAddress(0x600)));
719        assert!(guest_mem.address_in_range(GuestAddress(0xa00)));
720        assert!(!guest_mem.address_in_range(GuestAddress(0xc00)));
721    }
722
723    #[test]
724    fn test_check_address() {
725        let start_addr1 = GuestAddress(0x0);
726        let start_addr2 = GuestAddress(0x800);
727        let guest_mem =
728            new_guest_memory_collection_from_regions(&[(start_addr1, 0x400), (start_addr2, 0x400)])
729                .unwrap();
730
731        assert_eq!(
732            guest_mem.check_address(GuestAddress(0x200)),
733            Some(GuestAddress(0x200))
734        );
735        assert_eq!(guest_mem.check_address(GuestAddress(0x600)), None);
736        assert_eq!(
737            guest_mem.check_address(GuestAddress(0xa00)),
738            Some(GuestAddress(0xa00))
739        );
740        assert_eq!(guest_mem.check_address(GuestAddress(0xc00)), None);
741    }
742
743    #[test]
744    fn test_checked_offset() {
745        let start_addr1 = GuestAddress(0);
746        let start_addr2 = GuestAddress(0x800);
747        let start_addr3 = GuestAddress(0xc00);
748        let guest_mem = new_guest_memory_collection_from_regions(&[
749            (start_addr1, 0x400),
750            (start_addr2, 0x400),
751            (start_addr3, 0x400),
752        ])
753        .unwrap();
754
755        assert_eq!(
756            guest_mem.checked_offset(start_addr1, 0x200),
757            Some(GuestAddress(0x200))
758        );
759        assert_eq!(
760            guest_mem.checked_offset(start_addr1, 0xa00),
761            Some(GuestAddress(0xa00))
762        );
763        assert_eq!(
764            guest_mem.checked_offset(start_addr2, 0x7ff),
765            Some(GuestAddress(0xfff))
766        );
767        assert_eq!(guest_mem.checked_offset(start_addr2, 0xc00), None);
768        assert_eq!(guest_mem.checked_offset(start_addr1, usize::MAX), None);
769
770        assert_eq!(guest_mem.checked_offset(start_addr1, 0x400), None);
771        assert_eq!(
772            guest_mem.checked_offset(start_addr1, 0x400 - 1),
773            Some(GuestAddress(0x400 - 1))
774        );
775    }
776}