1use std::borrow::Borrow;
16use std::ops::Deref;
17use std::result;
18use std::sync::Arc;
19
20use crate::address::Address;
21use crate::bitmap::{Bitmap, BS};
22use crate::guest_memory::{self, FileOffset, GuestAddress, GuestUsize, MemoryRegionAddress};
23use crate::region::{
24 GuestMemoryRegion, GuestMemoryRegionBytes, GuestRegionCollection, GuestRegionCollectionError,
25};
26use crate::volatile_memory::{VolatileMemory, VolatileSlice};
27
28pub use crate::bitmap::NewBitmap;
30
31#[cfg(all(not(feature = "xen"), target_family = "unix"))]
32mod unix;
33
34#[cfg(all(feature = "xen", target_family = "unix"))]
35pub(crate) mod xen;
36
37#[cfg(target_family = "windows")]
38mod windows;
39
40#[cfg(all(not(feature = "xen"), target_family = "unix"))]
41pub use unix::{Error as MmapRegionError, MmapRegion, MmapRegionBuilder};
42
43#[cfg(all(feature = "xen", target_family = "unix"))]
44pub use xen::{Error as MmapRegionError, MmapRange, MmapRegion, MmapXenFlags};
45
46#[cfg(target_family = "windows")]
47pub use std::io::Error as MmapRegionError;
48#[cfg(target_family = "windows")]
49pub use windows::MmapRegion;
50
51#[derive(Debug)]
57pub struct GuestRegionMmap<B = ()> {
58 mapping: Arc<MmapRegion<B>>,
59 guest_base: GuestAddress,
60}
61
62impl<B> Deref for GuestRegionMmap<B> {
63 type Target = MmapRegion<B>;
64
65 fn deref(&self) -> &MmapRegion<B> {
66 self.mapping.as_ref()
67 }
68}
69
70impl<B: Bitmap> GuestRegionMmap<B> {
71 pub fn new(mapping: MmapRegion<B>, guest_base: GuestAddress) -> Option<Self> {
75 Self::with_arc(Arc::new(mapping), guest_base)
76 }
77
78 pub fn with_arc(mapping: Arc<MmapRegion<B>>, guest_base: GuestAddress) -> Option<Self> {
80 guest_base
81 .0
82 .checked_add(mapping.size() as u64)
83 .map(|_| Self {
84 mapping,
85 guest_base,
86 })
87 }
88
89 pub fn get_mmap(&self) -> Arc<MmapRegion<B>> {
96 Arc::clone(&self.mapping)
97 }
98}
99
100#[cfg(not(feature = "xen"))]
101impl<B: NewBitmap> GuestRegionMmap<B> {
102 pub fn from_range(
104 addr: GuestAddress,
105 size: usize,
106 file: Option<FileOffset>,
107 ) -> result::Result<Self, FromRangesError> {
108 let region = if let Some(ref f_off) = file {
109 MmapRegion::from_file(f_off.clone(), size)?
110 } else {
111 MmapRegion::new(size)?
112 };
113
114 Self::new(region, addr).ok_or(FromRangesError::InvalidGuestRegion)
115 }
116}
117
118#[cfg(feature = "xen")]
119impl<B: NewBitmap> GuestRegionMmap<B> {
120 pub fn from_range(
123 addr: GuestAddress,
124 size: usize,
125 file: Option<FileOffset>,
126 ) -> result::Result<Self, FromRangesError> {
127 let range = MmapRange::new_unix(size, file, addr);
128
129 let region = MmapRegion::from_range(range)?;
130 Self::new(region, addr).ok_or(FromRangesError::InvalidGuestRegion)
131 }
132}
133
134impl<B: Bitmap> GuestMemoryRegion for GuestRegionMmap<B> {
135 type B = B;
136
137 fn len(&self) -> GuestUsize {
138 self.mapping.size() as GuestUsize
139 }
140
141 fn start_addr(&self) -> GuestAddress {
142 self.guest_base
143 }
144
145 fn bitmap(&self) -> BS<'_, Self::B> {
146 self.mapping.bitmap().slice_at(0)
147 }
148
149 fn get_host_address(&self, addr: MemoryRegionAddress) -> guest_memory::Result<*mut u8> {
150 self.check_address(addr)
153 .ok_or(guest_memory::Error::InvalidBackendAddress)
154 .map(|addr| {
155 self.mapping
156 .as_ptr()
157 .wrapping_offset(addr.raw_value() as isize)
158 })
159 }
160
161 fn file_offset(&self) -> Option<&FileOffset> {
162 self.mapping.file_offset()
163 }
164
165 fn get_slice(
166 &self,
167 offset: MemoryRegionAddress,
168 count: usize,
169 ) -> guest_memory::Result<VolatileSlice<'_, BS<'_, B>>> {
170 let slice = self.mapping.get_slice(offset.raw_value() as usize, count)?;
171 Ok(slice)
172 }
173
174 #[cfg(target_os = "linux")]
175 fn is_hugetlbfs(&self) -> Option<bool> {
176 self.mapping.is_hugetlbfs()
177 }
178}
179
180impl<B: Bitmap> GuestMemoryRegionBytes for GuestRegionMmap<B> {}
181
182pub type GuestMemoryMmap<B = ()> = GuestRegionCollection<GuestRegionMmap<B>>;
189
190#[derive(Debug, thiserror::Error)]
192pub enum FromRangesError {
193 #[error("Error constructing guest region collection: {0}")]
195 Collection(#[from] GuestRegionCollectionError),
196 #[error("Error setting up raw memory for guest region: {0}")]
198 MmapRegion(#[from] MmapRegionError),
199 #[error("Combination of guest address and region length invalid (would overflow)")]
201 InvalidGuestRegion,
202}
203
204impl<B: NewBitmap> GuestMemoryMmap<B> {
205 pub fn from_ranges(ranges: &[(GuestAddress, usize)]) -> result::Result<Self, FromRangesError> {
209 Self::from_ranges_with_files(ranges.iter().map(|r| (r.0, r.1, None)))
210 }
211
212 pub fn from_ranges_with_files<A, T>(ranges: T) -> result::Result<Self, FromRangesError>
217 where
218 A: Borrow<(GuestAddress, usize, Option<FileOffset>)>,
219 T: IntoIterator<Item = A>,
220 {
221 Self::from_regions(
222 ranges
223 .into_iter()
224 .map(|x| {
225 GuestRegionMmap::from_range(x.borrow().0, x.borrow().1, x.borrow().2.clone())
226 })
227 .collect::<Result<Vec<_>, _>>()?,
228 )
229 .map_err(Into::into)
230 }
231}
232
233#[cfg(test)]
234mod tests {
235 #![allow(clippy::undocumented_unsafe_blocks)]
236 extern crate vmm_sys_util;
237
238 use super::*;
239
240 #[cfg(feature = "backend-bitmap")]
241 use crate::bitmap::AtomicBitmap;
242 use crate::{Bytes, GuestMemoryBackend, GuestMemoryError};
243
244 use std::io::Write;
245 #[cfg(feature = "rawfd")]
246 use std::{fs::File, path::Path};
247 use vmm_sys_util::tempfile::TempFile;
248
249 use matches::assert_matches;
250
251 type GuestRegionMmap = super::GuestRegionMmap<()>;
252 type GuestMemoryMmap = super::GuestRegionCollection<GuestRegionMmap>;
253 type MmapRegion = super::MmapRegion<()>;
254
255 #[test]
256 fn basic_map() {
257 let m = MmapRegion::new(1024).unwrap();
258 assert_eq!(1024, m.size());
259 }
260
261 #[test]
262 fn slice_addr() {
263 let m = GuestRegionMmap::from_range(GuestAddress(0), 5, None).unwrap();
264 let s = m.get_slice(MemoryRegionAddress(2), 3).unwrap();
265 let guard = s.ptr_guard();
266 assert_eq!(guard.as_ptr(), unsafe { m.as_ptr().offset(2) });
267 }
268
269 #[test]
270 #[cfg(not(miri))] fn mapped_file_read() {
272 let mut f = TempFile::new().unwrap().into_file();
273 let sample_buf = &[1, 2, 3, 4, 5];
274 assert!(f.write_all(sample_buf).is_ok());
275
276 let file = Some(FileOffset::new(f, 0));
277 let mem_map = GuestRegionMmap::from_range(GuestAddress(0), sample_buf.len(), file).unwrap();
278 let buf = &mut [0u8; 16];
279 assert_eq!(
280 mem_map.as_volatile_slice().unwrap().read(buf, 0).unwrap(),
281 sample_buf.len()
282 );
283 assert_eq!(buf[0..sample_buf.len()], sample_buf[..]);
284 }
285
286 #[test]
287 fn test_to_region_addr() {
288 let f1 = TempFile::new().unwrap().into_file();
289 f1.set_len(0x400).unwrap();
290 let f2 = TempFile::new().unwrap().into_file();
291 f2.set_len(0x400).unwrap();
292
293 let start_addr1 = GuestAddress(0x0);
294 let start_addr2 = GuestAddress(0x800);
295 let guest_mem =
296 GuestMemoryMmap::from_ranges(&[(start_addr1, 0x400), (start_addr2, 0x400)]).unwrap();
297 let guest_mem_backed_by_file = GuestMemoryMmap::from_ranges_with_files(&[
298 (start_addr1, 0x400, Some(FileOffset::new(f1, 0))),
299 (start_addr2, 0x400, Some(FileOffset::new(f2, 0))),
300 ])
301 .unwrap();
302
303 let guest_mem_list = [guest_mem, guest_mem_backed_by_file];
304 for guest_mem in guest_mem_list.iter() {
305 assert!(guest_mem.to_region_addr(GuestAddress(0x600)).is_none());
306 let (r0, addr0) = guest_mem.to_region_addr(GuestAddress(0x800)).unwrap();
307 let (r1, addr1) = guest_mem.to_region_addr(GuestAddress(0xa00)).unwrap();
308 assert!(r0.as_ptr() == r1.as_ptr());
309 assert_eq!(addr0, MemoryRegionAddress(0));
310 assert_eq!(addr1, MemoryRegionAddress(0x200));
311 }
312 }
313
314 #[test]
315 fn test_get_host_address() {
316 let f1 = TempFile::new().unwrap().into_file();
317 f1.set_len(0x400).unwrap();
318 let f2 = TempFile::new().unwrap().into_file();
319 f2.set_len(0x400).unwrap();
320
321 let start_addr1 = GuestAddress(0x0);
322 let start_addr2 = GuestAddress(0x800);
323 let guest_mem =
324 GuestMemoryMmap::from_ranges(&[(start_addr1, 0x400), (start_addr2, 0x400)]).unwrap();
325 let guest_mem_backed_by_file = GuestMemoryMmap::from_ranges_with_files(&[
326 (start_addr1, 0x400, Some(FileOffset::new(f1, 0))),
327 (start_addr2, 0x400, Some(FileOffset::new(f2, 0))),
328 ])
329 .unwrap();
330
331 let guest_mem_list = [guest_mem, guest_mem_backed_by_file];
332 for guest_mem in guest_mem_list.iter() {
333 assert!(guest_mem.get_host_address(GuestAddress(0x600)).is_err());
334 let ptr0 = guest_mem.get_host_address(GuestAddress(0x800)).unwrap();
335 let ptr1 = guest_mem.get_host_address(GuestAddress(0xa00)).unwrap();
336 assert_eq!(
337 ptr0,
338 guest_mem.find_region(GuestAddress(0x800)).unwrap().as_ptr()
339 );
340 assert_eq!(unsafe { ptr0.offset(0x200) }, ptr1);
341 }
342 }
343
344 #[test]
345 fn test_check_range() {
346 let start_addr1 = GuestAddress(0);
347 let start_addr2 = GuestAddress(0x800);
348 let start_addr3 = GuestAddress(0xc00);
349 let guest_mem = GuestMemoryMmap::from_ranges(&[
350 (start_addr1, 0x400),
351 (start_addr2, 0x400),
352 (start_addr3, 0x400),
353 ])
354 .unwrap();
355
356 assert!(guest_mem.check_range(start_addr1, 0x0));
357 assert!(guest_mem.check_range(start_addr1, 0x200));
358 assert!(guest_mem.check_range(start_addr1, 0x400));
359 assert!(!guest_mem.check_range(start_addr1, 0xa00));
360 assert!(guest_mem.check_range(start_addr2, 0x7ff));
361 assert!(guest_mem.check_range(start_addr2, 0x800));
362 assert!(!guest_mem.check_range(start_addr2, 0x801));
363 assert!(!guest_mem.check_range(start_addr2, 0xc00));
364 assert!(!guest_mem.check_range(start_addr1, usize::MAX));
365 }
366
367 #[test]
368 fn test_deref() {
369 let f = TempFile::new().unwrap().into_file();
370 f.set_len(0x400).unwrap();
371
372 let start_addr = GuestAddress(0x0);
373 let guest_mem = GuestMemoryMmap::from_ranges(&[(start_addr, 0x400)]).unwrap();
374 let guest_mem_backed_by_file = GuestMemoryMmap::from_ranges_with_files(&[(
375 start_addr,
376 0x400,
377 Some(FileOffset::new(f, 0)),
378 )])
379 .unwrap();
380
381 let guest_mem_list = [guest_mem, guest_mem_backed_by_file];
382 for guest_mem in guest_mem_list.iter() {
383 let sample_buf = &[1, 2, 3, 4, 5];
384
385 assert_eq!(guest_mem.write(sample_buf, start_addr).unwrap(), 5);
386 let slice = guest_mem
387 .find_region(GuestAddress(0))
388 .unwrap()
389 .as_volatile_slice()
390 .unwrap();
391
392 let buf = &mut [0, 0, 0, 0, 0];
393 assert_eq!(slice.read(buf, 0).unwrap(), 5);
394 assert_eq!(buf, sample_buf);
395 }
396 }
397
398 #[test]
399 fn test_read_u64() {
400 let f1 = TempFile::new().unwrap().into_file();
401 f1.set_len(0x1000).unwrap();
402 let f2 = TempFile::new().unwrap().into_file();
403 f2.set_len(0x1000).unwrap();
404
405 let start_addr1 = GuestAddress(0x0);
406 let start_addr2 = GuestAddress(0x1000);
407 let bad_addr = GuestAddress(0x2001);
408 let bad_addr2 = GuestAddress(0x1ffc);
409 let max_addr = GuestAddress(0x2000);
410
411 let gm =
412 GuestMemoryMmap::from_ranges(&[(start_addr1, 0x1000), (start_addr2, 0x1000)]).unwrap();
413 let gm_backed_by_file = GuestMemoryMmap::from_ranges_with_files(&[
414 (start_addr1, 0x1000, Some(FileOffset::new(f1, 0))),
415 (start_addr2, 0x1000, Some(FileOffset::new(f2, 0))),
416 ])
417 .unwrap();
418
419 let gm_list = [gm, gm_backed_by_file];
420 for gm in gm_list.iter() {
421 let val1: u64 = 0xaa55_aa55_aa55_aa55;
422 let val2: u64 = 0x55aa_55aa_55aa_55aa;
423 assert_matches!(
424 gm.write_obj(val1, bad_addr).unwrap_err(),
425 GuestMemoryError::InvalidGuestAddress(addr) if addr == bad_addr
426 );
427 assert_matches!(
428 gm.write_obj(val1, bad_addr2).unwrap_err(),
429 GuestMemoryError::PartialBuffer { expected, completed } if expected == size_of::<u64>() && completed == max_addr.checked_offset_from(bad_addr2).unwrap() as usize);
430
431 gm.write_obj(val1, GuestAddress(0x500)).unwrap();
432 gm.write_obj(val2, GuestAddress(0x1000 + 32)).unwrap();
433 let num1: u64 = gm.read_obj(GuestAddress(0x500)).unwrap();
434 let num2: u64 = gm.read_obj(GuestAddress(0x1000 + 32)).unwrap();
435 assert_eq!(val1, num1);
436 assert_eq!(val2, num2);
437 }
438 }
439
440 #[test]
441 fn write_and_read() {
442 let f = TempFile::new().unwrap().into_file();
443 f.set_len(0x400).unwrap();
444
445 let mut start_addr = GuestAddress(0x1000);
446 let gm = GuestMemoryMmap::from_ranges(&[(start_addr, 0x400)]).unwrap();
447 let gm_backed_by_file = GuestMemoryMmap::from_ranges_with_files(&[(
448 start_addr,
449 0x400,
450 Some(FileOffset::new(f, 0)),
451 )])
452 .unwrap();
453
454 let gm_list = [gm, gm_backed_by_file];
455 for gm in gm_list.iter() {
456 let sample_buf = &[1, 2, 3, 4, 5];
457
458 assert_eq!(gm.write(sample_buf, start_addr).unwrap(), 5);
459
460 let buf = &mut [0u8; 5];
461 assert_eq!(gm.read(buf, start_addr).unwrap(), 5);
462 assert_eq!(buf, sample_buf);
463
464 start_addr = GuestAddress(0x13ff);
465 assert_eq!(gm.write(sample_buf, start_addr).unwrap(), 1);
466 assert_eq!(gm.read(buf, start_addr).unwrap(), 1);
467 assert_eq!(buf[0], sample_buf[0]);
468 start_addr = GuestAddress(0x1000);
469 }
470 }
471
472 #[test]
473 #[cfg(feature = "rawfd")]
474 #[cfg(not(miri))]
475 fn read_to_and_write_from_mem() {
476 use std::mem;
477
478 let f = TempFile::new().unwrap().into_file();
479 f.set_len(0x400).unwrap();
480
481 let gm = GuestMemoryMmap::from_ranges(&[(GuestAddress(0x1000), 0x400)]).unwrap();
482 let gm_backed_by_file = GuestMemoryMmap::from_ranges_with_files(&[(
483 GuestAddress(0x1000),
484 0x400,
485 Some(FileOffset::new(f, 0)),
486 )])
487 .unwrap();
488
489 let gm_list = [gm, gm_backed_by_file];
490 for gm in gm_list.iter() {
491 let addr = GuestAddress(0x1010);
492 let mut file = if cfg!(target_family = "unix") {
493 File::open(Path::new("/dev/zero")).unwrap()
494 } else {
495 File::open(Path::new("c:\\Windows\\system32\\ntoskrnl.exe")).unwrap()
496 };
497 gm.write_obj(!0u32, addr).unwrap();
498 gm.read_exact_volatile_from(addr, &mut file, mem::size_of::<u32>())
499 .unwrap();
500 let value: u32 = gm.read_obj(addr).unwrap();
501 if cfg!(target_family = "unix") {
502 assert_eq!(value, 0);
503 } else {
504 assert_eq!(value, 0x0090_5a4d);
505 }
506
507 let mut sink = vec![0; mem::size_of::<u32>()];
508 gm.write_all_volatile_to(addr, &mut sink.as_mut_slice(), mem::size_of::<u32>())
509 .unwrap();
510 if cfg!(target_family = "unix") {
511 assert_eq!(sink, vec![0; mem::size_of::<u32>()]);
512 } else {
513 assert_eq!(sink, vec![0x4d, 0x5a, 0x90, 0x00]);
514 };
515 }
516 }
517
518 #[test]
519 fn test_access_cross_boundary() {
520 let f1 = TempFile::new().unwrap().into_file();
521 f1.set_len(0x1000).unwrap();
522 let f2 = TempFile::new().unwrap().into_file();
523 f2.set_len(0x1000).unwrap();
524
525 let start_addr1 = GuestAddress(0x0);
526 let start_addr2 = GuestAddress(0x1000);
527 let gm =
528 GuestMemoryMmap::from_ranges(&[(start_addr1, 0x1000), (start_addr2, 0x1000)]).unwrap();
529 let gm_backed_by_file = GuestMemoryMmap::from_ranges_with_files(&[
530 (start_addr1, 0x1000, Some(FileOffset::new(f1, 0))),
531 (start_addr2, 0x1000, Some(FileOffset::new(f2, 0))),
532 ])
533 .unwrap();
534
535 let gm_list = [gm, gm_backed_by_file];
536 for gm in gm_list.iter() {
537 let sample_buf = &[1, 2, 3, 4, 5];
538 assert_eq!(gm.write(sample_buf, GuestAddress(0xffc)).unwrap(), 5);
539 let buf = &mut [0u8; 5];
540 assert_eq!(gm.read(buf, GuestAddress(0xffc)).unwrap(), 5);
541 assert_eq!(buf, sample_buf);
542 }
543 }
544
545 #[test]
546 fn test_retrieve_fd_backing_memory_region() {
547 let f = TempFile::new().unwrap().into_file();
548 f.set_len(0x400).unwrap();
549
550 let start_addr = GuestAddress(0x0);
551 let gm = GuestMemoryMmap::from_ranges(&[(start_addr, 0x400)]).unwrap();
552 assert!(gm.find_region(start_addr).is_some());
553 let region = gm.find_region(start_addr).unwrap();
554 assert!(region.file_offset().is_none());
555
556 let gm = GuestMemoryMmap::from_ranges_with_files(&[(
557 start_addr,
558 0x400,
559 Some(FileOffset::new(f, 0)),
560 )])
561 .unwrap();
562 assert!(gm.find_region(start_addr).is_some());
563 let region = gm.find_region(start_addr).unwrap();
564 assert!(region.file_offset().is_some());
565 }
566
567 #[test]
572 #[cfg(target_family = "unix")]
573 fn test_retrieve_offset_from_fd_backing_memory_region() {
574 let f = TempFile::new().unwrap().into_file();
575 f.set_len(0x1400).unwrap();
576 let offset = 0x1000;
578
579 let start_addr = GuestAddress(0x0);
580 let gm = GuestMemoryMmap::from_ranges(&[(start_addr, 0x400)]).unwrap();
581 assert!(gm.find_region(start_addr).is_some());
582 let region = gm.find_region(start_addr).unwrap();
583 assert!(region.file_offset().is_none());
584
585 let gm = GuestMemoryMmap::from_ranges_with_files(&[(
586 start_addr,
587 0x400,
588 Some(FileOffset::new(f, offset)),
589 )])
590 .unwrap();
591 assert!(gm.find_region(start_addr).is_some());
592 let region = gm.find_region(start_addr).unwrap();
593 assert!(region.file_offset().is_some());
594 assert_eq!(region.file_offset().unwrap().start(), offset);
595 }
596
597 #[test]
598 fn test_guest_memory_mmap_get_slice() {
599 let region = GuestRegionMmap::from_range(GuestAddress(0), 0x400, None).unwrap();
600
601 let slice_addr = MemoryRegionAddress(0x100);
603 let slice_size = 0x200;
604 let slice = region.get_slice(slice_addr, slice_size).unwrap();
605 assert_eq!(slice.len(), slice_size);
606
607 let slice_addr = MemoryRegionAddress(0x200);
609 let slice_size = 0x0;
610 let slice = region.get_slice(slice_addr, slice_size).unwrap();
611 assert!(slice.is_empty());
612
613 let slice_addr = MemoryRegionAddress(0x300);
615 let slice_size = 0x200;
616 assert!(region.get_slice(slice_addr, slice_size).is_err());
617 }
618
619 #[test]
620 fn test_guest_memory_mmap_as_volatile_slice() {
621 let region_size = 0x400;
622 let region = GuestRegionMmap::from_range(GuestAddress(0), region_size, None).unwrap();
623
624 let slice = region.as_volatile_slice().unwrap();
626 assert_eq!(slice.len(), region_size);
627
628 let v = 0x1234_5678u32;
630 let r = slice.get_ref::<u32>(0x200).unwrap();
631 r.store(v);
632 assert_eq!(r.load(), v);
633 }
634
635 #[test]
636 fn test_guest_memory_get_slice() {
637 let start_addr1 = GuestAddress(0);
638 let start_addr2 = GuestAddress(0x800);
639 let guest_mem =
640 GuestMemoryMmap::from_ranges(&[(start_addr1, 0x400), (start_addr2, 0x400)]).unwrap();
641
642 let slice_size = 0x200;
644 let slice = guest_mem
645 .get_slice(GuestAddress(0x100), slice_size)
646 .unwrap();
647 assert_eq!(slice.len(), slice_size);
648
649 let slice_size = 0x400;
650 let slice = guest_mem
651 .get_slice(GuestAddress(0x800), slice_size)
652 .unwrap();
653 assert_eq!(slice.len(), slice_size);
654
655 assert!(guest_mem
657 .get_slice(GuestAddress(0x900), 0)
658 .unwrap()
659 .is_empty());
660
661 assert!(guest_mem.get_slice(GuestAddress(0), 0x500).is_err());
663 assert!(guest_mem.get_slice(GuestAddress(0x600), 0x100).is_err());
664 assert!(guest_mem.get_slice(GuestAddress(0xc00), 0x100).is_err());
665 }
666
667 #[test]
668 fn test_guest_memory_get_slices() {
669 let start_addr1 = GuestAddress(0);
670 let start_addr2 = GuestAddress(0x800);
671 let start_addr3 = GuestAddress(0xc00);
672 let guest_mem = GuestMemoryMmap::from_ranges(&[
673 (start_addr1, 0x400),
674 (start_addr2, 0x400),
675 (start_addr3, 0x400),
676 ])
677 .unwrap();
678
679 let slice_size = 0x200;
681 let mut slices = guest_mem.get_slices(GuestAddress(0x100), slice_size);
682 let slice = slices.next().unwrap().unwrap();
683 assert!(slices.next().is_none());
684 assert_eq!(slice.len(), slice_size);
685
686 let slice_size = 0x400;
687 let mut slices = guest_mem.get_slices(GuestAddress(0x800), slice_size);
688 let slice = slices.next().unwrap().unwrap();
689 assert!(slices.next().is_none());
690 assert_eq!(slice.len(), slice_size);
691
692 assert!(guest_mem
694 .get_slices(GuestAddress(0x900), 0)
695 .next()
696 .is_none());
697
698 let mut slices = guest_mem.get_slices(GuestAddress(0), 0x500);
700 assert_eq!(slices.next().unwrap().unwrap().len(), 0x400);
701 assert!(slices.next().unwrap().is_err());
702 assert!(slices.next().is_none());
703 let mut slices = guest_mem.get_slices(GuestAddress(0x600), 0x100);
704 assert!(slices.next().unwrap().is_err());
705 assert!(slices.next().is_none());
706 let mut slices = guest_mem.get_slices(GuestAddress(0x1000), 0x100);
707 assert!(slices.next().unwrap().is_err());
708 assert!(slices.next().is_none());
709
710 let mut slices = guest_mem.get_slices(GuestAddress(0xa00), 0x400);
712 assert_eq!(slices.next().unwrap().unwrap().len(), 0x200);
713 assert_eq!(slices.next().unwrap().unwrap().len(), 0x200);
714 assert!(slices.next().is_none());
715 }
716
717 #[test]
718 fn test_atomic_accesses() {
719 let region = GuestRegionMmap::from_range(GuestAddress(0), 0x1000, None).unwrap();
720
721 crate::bytes::tests::check_atomic_accesses(
722 region,
723 MemoryRegionAddress(0),
724 MemoryRegionAddress(0x1000),
725 );
726 }
727
728 #[test]
729 #[cfg(feature = "backend-bitmap")]
730 fn test_dirty_tracking() {
731 crate::bitmap::tests::test_guest_memory_and_region(|| {
732 crate::GuestMemoryMmap::<AtomicBitmap>::from_ranges(&[(GuestAddress(0), 0x1_0000)])
733 .unwrap()
734 });
735 }
736
737 #[test]
738 fn test_change_region_addr() {
739 let addr1 = GuestAddress(0x1000);
740 let addr2 = GuestAddress(0x2000);
741 let gm = GuestMemoryMmap::from_ranges(&[(addr1, 0x1000)]).unwrap();
742
743 assert!(gm.find_region(addr1).is_some());
744 assert!(gm.find_region(addr2).is_none());
745
746 let (gm, region) = gm.remove_region(addr1, 0x1000).unwrap();
747
748 assert!(gm.find_region(addr1).is_none());
749 assert!(gm.find_region(addr2).is_none());
750
751 let region = GuestRegionMmap::with_arc(region.get_mmap(), addr2).unwrap();
755
756 let gm = gm.insert_region(Arc::new(region)).unwrap();
757
758 assert!(gm.find_region(addr1).is_none());
759 assert!(gm.find_region(addr2).is_some());
760 }
761}