dbs_address_space/memory/raw_region.rs
1// Copyright (C) 2022 Alibaba Cloud. All rights reserved.
2// SPDX-License-Identifier: Apache-2.0
3
4use std::io::{Read, Write};
5use std::sync::atomic::Ordering;
6
7use vm_memory::bitmap::{Bitmap, BS};
8use vm_memory::mmap::NewBitmap;
9use vm_memory::volatile_memory::compute_offset;
10use vm_memory::{
11 guest_memory, volatile_memory, Address, AtomicAccess, Bytes, FileOffset, GuestAddress,
12 GuestMemoryRegion, GuestUsize, MemoryRegionAddress, VolatileSlice,
13};
14
15/// Guest memory region for virtio-fs DAX window.
16#[derive(Debug)]
17pub struct GuestRegionRaw<B = ()> {
18 guest_base: GuestAddress,
19 addr: *mut u8,
20 size: usize,
21 bitmap: B,
22}
23
24impl<B: NewBitmap> GuestRegionRaw<B> {
25 /// Create a `GuestRegionRaw` object from raw pointer.
26 ///
27 /// # Safety
28 /// Caller needs to ensure `addr` and `size` are valid with static lifetime.
29 pub unsafe fn new(guest_base: GuestAddress, addr: *mut u8, size: usize) -> Self {
30 let bitmap = B::with_len(size);
31
32 GuestRegionRaw {
33 guest_base,
34 addr,
35 size,
36 bitmap,
37 }
38 }
39}
40
41impl<B: Bitmap> Bytes<MemoryRegionAddress> for GuestRegionRaw<B> {
42 type E = guest_memory::Error;
43
44 fn write(&self, buf: &[u8], addr: MemoryRegionAddress) -> guest_memory::Result<usize> {
45 let maddr = addr.raw_value() as usize;
46 self.as_volatile_slice()
47 .unwrap()
48 .write(buf, maddr)
49 .map_err(Into::into)
50 }
51
52 fn read(&self, buf: &mut [u8], addr: MemoryRegionAddress) -> guest_memory::Result<usize> {
53 let maddr = addr.raw_value() as usize;
54 self.as_volatile_slice()
55 .unwrap()
56 .read(buf, maddr)
57 .map_err(Into::into)
58 }
59
60 fn write_slice(&self, buf: &[u8], addr: MemoryRegionAddress) -> guest_memory::Result<()> {
61 let maddr = addr.raw_value() as usize;
62 self.as_volatile_slice()
63 .unwrap()
64 .write_slice(buf, maddr)
65 .map_err(Into::into)
66 }
67
68 fn read_slice(&self, buf: &mut [u8], addr: MemoryRegionAddress) -> guest_memory::Result<()> {
69 let maddr = addr.raw_value() as usize;
70 self.as_volatile_slice()
71 .unwrap()
72 .read_slice(buf, maddr)
73 .map_err(Into::into)
74 }
75
76 fn read_from<F>(
77 &self,
78 addr: MemoryRegionAddress,
79 src: &mut F,
80 count: usize,
81 ) -> guest_memory::Result<usize>
82 where
83 F: Read,
84 {
85 let maddr = addr.raw_value() as usize;
86 self.as_volatile_slice()
87 .unwrap()
88 .read_from::<F>(maddr, src, count)
89 .map_err(Into::into)
90 }
91
92 fn read_exact_from<F>(
93 &self,
94 addr: MemoryRegionAddress,
95 src: &mut F,
96 count: usize,
97 ) -> guest_memory::Result<()>
98 where
99 F: Read,
100 {
101 let maddr = addr.raw_value() as usize;
102 self.as_volatile_slice()
103 .unwrap()
104 .read_exact_from::<F>(maddr, src, count)
105 .map_err(Into::into)
106 }
107
108 fn write_to<F>(
109 &self,
110 addr: MemoryRegionAddress,
111 dst: &mut F,
112 count: usize,
113 ) -> guest_memory::Result<usize>
114 where
115 F: Write,
116 {
117 let maddr = addr.raw_value() as usize;
118 self.as_volatile_slice()
119 .unwrap()
120 .write_to::<F>(maddr, dst, count)
121 .map_err(Into::into)
122 }
123
124 fn write_all_to<F>(
125 &self,
126 addr: MemoryRegionAddress,
127 dst: &mut F,
128 count: usize,
129 ) -> guest_memory::Result<()>
130 where
131 F: Write,
132 {
133 let maddr = addr.raw_value() as usize;
134 self.as_volatile_slice()
135 .unwrap()
136 .write_all_to::<F>(maddr, dst, count)
137 .map_err(Into::into)
138 }
139
140 fn store<T: AtomicAccess>(
141 &self,
142 val: T,
143 addr: MemoryRegionAddress,
144 order: Ordering,
145 ) -> guest_memory::Result<()> {
146 self.as_volatile_slice().and_then(|s| {
147 s.store(val, addr.raw_value() as usize, order)
148 .map_err(Into::into)
149 })
150 }
151
152 fn load<T: AtomicAccess>(
153 &self,
154 addr: MemoryRegionAddress,
155 order: Ordering,
156 ) -> guest_memory::Result<T> {
157 self.as_volatile_slice()
158 .and_then(|s| s.load(addr.raw_value() as usize, order).map_err(Into::into))
159 }
160}
161
162impl<B: Bitmap> GuestMemoryRegion for GuestRegionRaw<B> {
163 type B = B;
164
165 fn len(&self) -> GuestUsize {
166 self.size as GuestUsize
167 }
168
169 fn start_addr(&self) -> GuestAddress {
170 self.guest_base
171 }
172
173 fn bitmap(&self) -> &Self::B {
174 &self.bitmap
175 }
176
177 fn get_host_address(&self, addr: MemoryRegionAddress) -> guest_memory::Result<*mut u8> {
178 // Not sure why wrapping_offset is not unsafe. Anyway this
179 // is safe because we've just range-checked addr using check_address.
180 self.check_address(addr)
181 .ok_or(guest_memory::Error::InvalidBackendAddress)
182 .map(|addr| self.addr.wrapping_offset(addr.raw_value() as isize))
183 }
184
185 fn file_offset(&self) -> Option<&FileOffset> {
186 None
187 }
188
189 unsafe fn as_slice(&self) -> Option<&[u8]> {
190 // This is safe because we mapped the area at addr ourselves, so this slice will not
191 // overflow. However, it is possible to alias.
192 Some(std::slice::from_raw_parts(self.addr, self.size))
193 }
194
195 unsafe fn as_mut_slice(&self) -> Option<&mut [u8]> {
196 // This is safe because we mapped the area at addr ourselves, so this slice will not
197 // overflow. However, it is possible to alias.
198 Some(std::slice::from_raw_parts_mut(self.addr, self.size))
199 }
200
201 fn get_slice(
202 &self,
203 offset: MemoryRegionAddress,
204 count: usize,
205 ) -> guest_memory::Result<VolatileSlice<BS<B>>> {
206 let offset = offset.raw_value() as usize;
207 let end = compute_offset(offset, count)?;
208 if end > self.size {
209 return Err(volatile_memory::Error::OutOfBounds { addr: end }.into());
210 }
211
212 // Safe because we checked that offset + count was within our range and we only ever hand
213 // out volatile accessors.
214 Ok(unsafe {
215 VolatileSlice::with_bitmap(
216 (self.addr as usize + offset) as *mut _,
217 count,
218 self.bitmap.slice_at(offset),
219 )
220 })
221 }
222
223 #[cfg(target_os = "linux")]
224 fn is_hugetlbfs(&self) -> Option<bool> {
225 None
226 }
227}
228
229#[cfg(test)]
230mod tests {
231 extern crate vmm_sys_util;
232
233 use super::*;
234 use crate::{GuestMemoryHybrid, GuestRegionHybrid};
235 use std::sync::Arc;
236 use vm_memory::{GuestAddressSpace, GuestMemory, VolatileMemory};
237
238 /*
239 use crate::bitmap::tests::test_guest_memory_and_region;
240 use crate::bitmap::AtomicBitmap;
241 use crate::GuestAddressSpace;
242
243 use std::fs::File;
244 use std::mem;
245 use std::path::Path;
246 use vmm_sys_util::tempfile::TempFile;
247
248 type GuestMemoryMmap = super::GuestMemoryMmap<()>;
249 type GuestRegionMmap = super::GuestRegionMmap<()>;
250 type MmapRegion = super::MmapRegion<()>;
251 */
252
253 #[test]
254 fn test_region_raw_new() {
255 let mut buf = [0u8; 1024];
256 let m =
257 unsafe { GuestRegionRaw::<()>::new(GuestAddress(0x10_0000), &mut buf as *mut _, 1024) };
258
259 assert_eq!(m.start_addr(), GuestAddress(0x10_0000));
260 assert_eq!(m.len(), 1024);
261 }
262
263 /*
264 fn check_guest_memory_mmap(
265 maybe_guest_mem: Result<GuestMemoryMmap, Error>,
266 expected_regions_summary: &[(GuestAddress, usize)],
267 ) {
268 assert!(maybe_guest_mem.is_ok());
269
270 let guest_mem = maybe_guest_mem.unwrap();
271 assert_eq!(guest_mem.num_regions(), expected_regions_summary.len());
272 let maybe_last_mem_reg = expected_regions_summary.last();
273 if let Some((region_addr, region_size)) = maybe_last_mem_reg {
274 let mut last_addr = region_addr.unchecked_add(*region_size as u64);
275 if last_addr.raw_value() != 0 {
276 last_addr = last_addr.unchecked_sub(1);
277 }
278 assert_eq!(guest_mem.last_addr(), last_addr);
279 }
280 for ((region_addr, region_size), mmap) in expected_regions_summary
281 .iter()
282 .zip(guest_mem.regions.iter())
283 {
284 assert_eq!(region_addr, &mmap.guest_base);
285 assert_eq!(region_size, &mmap.mapping.size());
286
287 assert!(guest_mem.find_region(*region_addr).is_some());
288 }
289 }
290
291 fn new_guest_memory_mmap(
292 regions_summary: &[(GuestAddress, usize)],
293 ) -> Result<GuestMemoryMmap, Error> {
294 GuestMemoryMmap::from_ranges(regions_summary)
295 }
296
297 fn new_guest_memory_mmap_from_regions(
298 regions_summary: &[(GuestAddress, usize)],
299 ) -> Result<GuestMemoryMmap, Error> {
300 GuestMemoryMmap::from_regions(
301 regions_summary
302 .iter()
303 .map(|(region_addr, region_size)| {
304 GuestRegionMmap::new(MmapRegion::new(*region_size).unwrap(), *region_addr)
305 .unwrap()
306 })
307 .collect(),
308 )
309 }
310
311 fn new_guest_memory_mmap_from_arc_regions(
312 regions_summary: &[(GuestAddress, usize)],
313 ) -> Result<GuestMemoryMmap, Error> {
314 GuestMemoryMmap::from_arc_regions(
315 regions_summary
316 .iter()
317 .map(|(region_addr, region_size)| {
318 Arc::new(
319 GuestRegionMmap::new(MmapRegion::new(*region_size).unwrap(), *region_addr)
320 .unwrap(),
321 )
322 })
323 .collect(),
324 )
325 }
326
327 fn new_guest_memory_mmap_with_files(
328 regions_summary: &[(GuestAddress, usize)],
329 ) -> Result<GuestMemoryMmap, Error> {
330 let regions: Vec<(GuestAddress, usize, Option<FileOffset>)> = regions_summary
331 .iter()
332 .map(|(region_addr, region_size)| {
333 let f = TempFile::new().unwrap().into_file();
334 f.set_len(*region_size as u64).unwrap();
335
336 (*region_addr, *region_size, Some(FileOffset::new(f, 0)))
337 })
338 .collect();
339
340 GuestMemoryMmap::from_ranges_with_files(®ions)
341 }
342 */
343
344 #[test]
345 fn slice_addr() {
346 let mut buf = [0u8; 1024];
347 let m =
348 unsafe { GuestRegionRaw::<()>::new(GuestAddress(0x10_0000), &mut buf as *mut _, 1024) };
349
350 let s = m.get_slice(MemoryRegionAddress(2), 3).unwrap();
351 assert_eq!(s.as_ptr(), &mut buf[2] as *mut _);
352 }
353
354 /*
355 #[test]
356 fn test_address_in_range() {
357 let f1 = TempFile::new().unwrap().into_file();
358 f1.set_len(0x400).unwrap();
359 let f2 = TempFile::new().unwrap().into_file();
360 f2.set_len(0x400).unwrap();
361
362 let start_addr1 = GuestAddress(0x0);
363 let start_addr2 = GuestAddress(0x800);
364 let guest_mem =
365 GuestMemoryMmap::from_ranges(&[(start_addr1, 0x400), (start_addr2, 0x400)]).unwrap();
366 let guest_mem_backed_by_file = GuestMemoryMmap::from_ranges_with_files(&[
367 (start_addr1, 0x400, Some(FileOffset::new(f1, 0))),
368 (start_addr2, 0x400, Some(FileOffset::new(f2, 0))),
369 ])
370 .unwrap();
371
372 let guest_mem_list = vec![guest_mem, guest_mem_backed_by_file];
373 for guest_mem in guest_mem_list.iter() {
374 assert!(guest_mem.address_in_range(GuestAddress(0x200)));
375 assert!(!guest_mem.address_in_range(GuestAddress(0x600)));
376 assert!(guest_mem.address_in_range(GuestAddress(0xa00)));
377 assert!(!guest_mem.address_in_range(GuestAddress(0xc00)));
378 }
379 }
380
381 #[test]
382 fn test_check_address() {
383 let f1 = TempFile::new().unwrap().into_file();
384 f1.set_len(0x400).unwrap();
385 let f2 = TempFile::new().unwrap().into_file();
386 f2.set_len(0x400).unwrap();
387
388 let start_addr1 = GuestAddress(0x0);
389 let start_addr2 = GuestAddress(0x800);
390 let guest_mem =
391 GuestMemoryMmap::from_ranges(&[(start_addr1, 0x400), (start_addr2, 0x400)]).unwrap();
392 let guest_mem_backed_by_file = GuestMemoryMmap::from_ranges_with_files(&[
393 (start_addr1, 0x400, Some(FileOffset::new(f1, 0))),
394 (start_addr2, 0x400, Some(FileOffset::new(f2, 0))),
395 ])
396 .unwrap();
397
398 let guest_mem_list = vec![guest_mem, guest_mem_backed_by_file];
399 for guest_mem in guest_mem_list.iter() {
400 assert_eq!(
401 guest_mem.check_address(GuestAddress(0x200)),
402 Some(GuestAddress(0x200))
403 );
404 assert_eq!(guest_mem.check_address(GuestAddress(0x600)), None);
405 assert_eq!(
406 guest_mem.check_address(GuestAddress(0xa00)),
407 Some(GuestAddress(0xa00))
408 );
409 assert_eq!(guest_mem.check_address(GuestAddress(0xc00)), None);
410 }
411 }
412
413 #[test]
414 fn test_to_region_addr() {
415 let f1 = TempFile::new().unwrap().into_file();
416 f1.set_len(0x400).unwrap();
417 let f2 = TempFile::new().unwrap().into_file();
418 f2.set_len(0x400).unwrap();
419
420 let start_addr1 = GuestAddress(0x0);
421 let start_addr2 = GuestAddress(0x800);
422 let guest_mem =
423 GuestMemoryMmap::from_ranges(&[(start_addr1, 0x400), (start_addr2, 0x400)]).unwrap();
424 let guest_mem_backed_by_file = GuestMemoryMmap::from_ranges_with_files(&[
425 (start_addr1, 0x400, Some(FileOffset::new(f1, 0))),
426 (start_addr2, 0x400, Some(FileOffset::new(f2, 0))),
427 ])
428 .unwrap();
429
430 let guest_mem_list = vec![guest_mem, guest_mem_backed_by_file];
431 for guest_mem in guest_mem_list.iter() {
432 assert!(guest_mem.to_region_addr(GuestAddress(0x600)).is_none());
433 let (r0, addr0) = guest_mem.to_region_addr(GuestAddress(0x800)).unwrap();
434 let (r1, addr1) = guest_mem.to_region_addr(GuestAddress(0xa00)).unwrap();
435 assert!(r0.as_ptr() == r1.as_ptr());
436 assert_eq!(addr0, MemoryRegionAddress(0));
437 assert_eq!(addr1, MemoryRegionAddress(0x200));
438 }
439 }
440
441 #[test]
442 fn test_get_host_address() {
443 let f1 = TempFile::new().unwrap().into_file();
444 f1.set_len(0x400).unwrap();
445 let f2 = TempFile::new().unwrap().into_file();
446 f2.set_len(0x400).unwrap();
447
448 let start_addr1 = GuestAddress(0x0);
449 let start_addr2 = GuestAddress(0x800);
450 let guest_mem =
451 GuestMemoryMmap::from_ranges(&[(start_addr1, 0x400), (start_addr2, 0x400)]).unwrap();
452 let guest_mem_backed_by_file = GuestMemoryMmap::from_ranges_with_files(&[
453 (start_addr1, 0x400, Some(FileOffset::new(f1, 0))),
454 (start_addr2, 0x400, Some(FileOffset::new(f2, 0))),
455 ])
456 .unwrap();
457
458 let guest_mem_list = vec![guest_mem, guest_mem_backed_by_file];
459 for guest_mem in guest_mem_list.iter() {
460 assert!(guest_mem.get_host_address(GuestAddress(0x600)).is_err());
461 let ptr0 = guest_mem.get_host_address(GuestAddress(0x800)).unwrap();
462 let ptr1 = guest_mem.get_host_address(GuestAddress(0xa00)).unwrap();
463 assert_eq!(
464 ptr0,
465 guest_mem.find_region(GuestAddress(0x800)).unwrap().as_ptr()
466 );
467 assert_eq!(unsafe { ptr0.offset(0x200) }, ptr1);
468 }
469 }
470
471 #[test]
472 fn test_deref() {
473 let f = TempFile::new().unwrap().into_file();
474 f.set_len(0x400).unwrap();
475
476 let start_addr = GuestAddress(0x0);
477 let guest_mem = GuestMemoryMmap::from_ranges(&[(start_addr, 0x400)]).unwrap();
478 let guest_mem_backed_by_file = GuestMemoryMmap::from_ranges_with_files(&[(
479 start_addr,
480 0x400,
481 Some(FileOffset::new(f, 0)),
482 )])
483 .unwrap();
484
485 let guest_mem_list = vec![guest_mem, guest_mem_backed_by_file];
486 for guest_mem in guest_mem_list.iter() {
487 let sample_buf = &[1, 2, 3, 4, 5];
488
489 assert_eq!(guest_mem.write(sample_buf, start_addr).unwrap(), 5);
490 let slice = guest_mem
491 .find_region(GuestAddress(0))
492 .unwrap()
493 .as_volatile_slice()
494 .unwrap();
495
496 let buf = &mut [0, 0, 0, 0, 0];
497 assert_eq!(slice.read(buf, 0).unwrap(), 5);
498 assert_eq!(buf, sample_buf);
499 }
500 }
501
502 #[test]
503 fn test_read_u64() {
504 let f1 = TempFile::new().unwrap().into_file();
505 f1.set_len(0x1000).unwrap();
506 let f2 = TempFile::new().unwrap().into_file();
507 f2.set_len(0x1000).unwrap();
508
509 let start_addr1 = GuestAddress(0x0);
510 let start_addr2 = GuestAddress(0x1000);
511 let bad_addr = GuestAddress(0x2001);
512 let bad_addr2 = GuestAddress(0x1ffc);
513 let max_addr = GuestAddress(0x2000);
514
515 let gm =
516 GuestMemoryMmap::from_ranges(&[(start_addr1, 0x1000), (start_addr2, 0x1000)]).unwrap();
517 let gm_backed_by_file = GuestMemoryMmap::from_ranges_with_files(&[
518 (start_addr1, 0x1000, Some(FileOffset::new(f1, 0))),
519 (start_addr2, 0x1000, Some(FileOffset::new(f2, 0))),
520 ])
521 .unwrap();
522
523 let gm_list = vec![gm, gm_backed_by_file];
524 for gm in gm_list.iter() {
525 let val1: u64 = 0xaa55_aa55_aa55_aa55;
526 let val2: u64 = 0x55aa_55aa_55aa_55aa;
527 assert_eq!(
528 format!("{:?}", gm.write_obj(val1, bad_addr).err().unwrap()),
529 format!("InvalidGuestAddress({:?})", bad_addr,)
530 );
531 assert_eq!(
532 format!("{:?}", gm.write_obj(val1, bad_addr2).err().unwrap()),
533 format!(
534 "PartialBuffer {{ expected: {:?}, completed: {:?} }}",
535 mem::size_of::<u64>(),
536 max_addr.checked_offset_from(bad_addr2).unwrap()
537 )
538 );
539
540 gm.write_obj(val1, GuestAddress(0x500)).unwrap();
541 gm.write_obj(val2, GuestAddress(0x1000 + 32)).unwrap();
542 let num1: u64 = gm.read_obj(GuestAddress(0x500)).unwrap();
543 let num2: u64 = gm.read_obj(GuestAddress(0x1000 + 32)).unwrap();
544 assert_eq!(val1, num1);
545 assert_eq!(val2, num2);
546 }
547 }
548
549 #[test]
550 fn write_and_read() {
551 let f = TempFile::new().unwrap().into_file();
552 f.set_len(0x400).unwrap();
553
554 let mut start_addr = GuestAddress(0x1000);
555 let gm = GuestMemoryMmap::from_ranges(&[(start_addr, 0x400)]).unwrap();
556 let gm_backed_by_file = GuestMemoryMmap::from_ranges_with_files(&[(
557 start_addr,
558 0x400,
559 Some(FileOffset::new(f, 0)),
560 )])
561 .unwrap();
562
563 let gm_list = vec![gm, gm_backed_by_file];
564 for gm in gm_list.iter() {
565 let sample_buf = &[1, 2, 3, 4, 5];
566
567 assert_eq!(gm.write(sample_buf, start_addr).unwrap(), 5);
568
569 let buf = &mut [0u8; 5];
570 assert_eq!(gm.read(buf, start_addr).unwrap(), 5);
571 assert_eq!(buf, sample_buf);
572
573 start_addr = GuestAddress(0x13ff);
574 assert_eq!(gm.write(sample_buf, start_addr).unwrap(), 1);
575 assert_eq!(gm.read(buf, start_addr).unwrap(), 1);
576 assert_eq!(buf[0], sample_buf[0]);
577 start_addr = GuestAddress(0x1000);
578 }
579 }
580
581 #[test]
582 fn read_to_and_write_from_mem() {
583 let f = TempFile::new().unwrap().into_file();
584 f.set_len(0x400).unwrap();
585
586 let gm = GuestMemoryMmap::from_ranges(&[(GuestAddress(0x1000), 0x400)]).unwrap();
587 let gm_backed_by_file = GuestMemoryMmap::from_ranges_with_files(&[(
588 GuestAddress(0x1000),
589 0x400,
590 Some(FileOffset::new(f, 0)),
591 )])
592 .unwrap();
593
594 let gm_list = vec![gm, gm_backed_by_file];
595 for gm in gm_list.iter() {
596 let addr = GuestAddress(0x1010);
597 let mut file = if cfg!(unix) {
598 File::open(Path::new("/dev/zero")).unwrap()
599 } else {
600 File::open(Path::new("c:\\Windows\\system32\\ntoskrnl.exe")).unwrap()
601 };
602 gm.write_obj(!0u32, addr).unwrap();
603 gm.read_exact_from(addr, &mut file, mem::size_of::<u32>())
604 .unwrap();
605 let value: u32 = gm.read_obj(addr).unwrap();
606 if cfg!(unix) {
607 assert_eq!(value, 0);
608 } else {
609 assert_eq!(value, 0x0090_5a4d);
610 }
611
612 let mut sink = Vec::new();
613 gm.write_all_to(addr, &mut sink, mem::size_of::<u32>())
614 .unwrap();
615 if cfg!(unix) {
616 assert_eq!(sink, vec![0; mem::size_of::<u32>()]);
617 } else {
618 assert_eq!(sink, vec![0x4d, 0x5a, 0x90, 0x00]);
619 };
620 }
621 }
622
623 #[test]
624 fn create_vec_with_regions() {
625 let region_size = 0x400;
626 let regions = vec![
627 (GuestAddress(0x0), region_size),
628 (GuestAddress(0x1000), region_size),
629 ];
630 let mut iterated_regions = Vec::new();
631 let gm = GuestMemoryMmap::from_ranges(®ions).unwrap();
632
633 for region in gm.iter() {
634 assert_eq!(region.len(), region_size as GuestUsize);
635 }
636
637 for region in gm.iter() {
638 iterated_regions.push((region.start_addr(), region.len() as usize));
639 }
640 assert_eq!(regions, iterated_regions);
641
642 assert!(regions
643 .iter()
644 .map(|x| (x.0, x.1))
645 .eq(iterated_regions.iter().copied()));
646
647 assert_eq!(gm.regions[0].guest_base, regions[0].0);
648 assert_eq!(gm.regions[1].guest_base, regions[1].0);
649 }
650
651 #[test]
652 fn test_memory() {
653 let region_size = 0x400;
654 let regions = vec![
655 (GuestAddress(0x0), region_size),
656 (GuestAddress(0x1000), region_size),
657 ];
658 let mut iterated_regions = Vec::new();
659 let gm = Arc::new(GuestMemoryMmap::from_ranges(®ions).unwrap());
660 let mem = gm.memory();
661
662 for region in mem.iter() {
663 assert_eq!(region.len(), region_size as GuestUsize);
664 }
665
666 for region in mem.iter() {
667 iterated_regions.push((region.start_addr(), region.len() as usize));
668 }
669 assert_eq!(regions, iterated_regions);
670
671 assert!(regions
672 .iter()
673 .map(|x| (x.0, x.1))
674 .eq(iterated_regions.iter().copied()));
675
676 assert_eq!(gm.regions[0].guest_base, regions[0].0);
677 assert_eq!(gm.regions[1].guest_base, regions[1].0);
678 }
679
680 #[test]
681 fn test_access_cross_boundary() {
682 let f1 = TempFile::new().unwrap().into_file();
683 f1.set_len(0x1000).unwrap();
684 let f2 = TempFile::new().unwrap().into_file();
685 f2.set_len(0x1000).unwrap();
686
687 let start_addr1 = GuestAddress(0x0);
688 let start_addr2 = GuestAddress(0x1000);
689 let gm =
690 GuestMemoryMmap::from_ranges(&[(start_addr1, 0x1000), (start_addr2, 0x1000)]).unwrap();
691 let gm_backed_by_file = GuestMemoryMmap::from_ranges_with_files(&[
692 (start_addr1, 0x1000, Some(FileOffset::new(f1, 0))),
693 (start_addr2, 0x1000, Some(FileOffset::new(f2, 0))),
694 ])
695 .unwrap();
696
697 let gm_list = vec![gm, gm_backed_by_file];
698 for gm in gm_list.iter() {
699 let sample_buf = &[1, 2, 3, 4, 5];
700 assert_eq!(gm.write(sample_buf, GuestAddress(0xffc)).unwrap(), 5);
701 let buf = &mut [0u8; 5];
702 assert_eq!(gm.read(buf, GuestAddress(0xffc)).unwrap(), 5);
703 assert_eq!(buf, sample_buf);
704 }
705 }
706
707 #[test]
708 fn test_retrieve_fd_backing_memory_region() {
709 let f = TempFile::new().unwrap().into_file();
710 f.set_len(0x400).unwrap();
711
712 let start_addr = GuestAddress(0x0);
713 let gm = GuestMemoryMmap::from_ranges(&[(start_addr, 0x400)]).unwrap();
714 assert!(gm.find_region(start_addr).is_some());
715 let region = gm.find_region(start_addr).unwrap();
716 assert!(region.file_offset().is_none());
717
718 let gm = GuestMemoryMmap::from_ranges_with_files(&[(
719 start_addr,
720 0x400,
721 Some(FileOffset::new(f, 0)),
722 )])
723 .unwrap();
724 assert!(gm.find_region(start_addr).is_some());
725 let region = gm.find_region(start_addr).unwrap();
726 assert!(region.file_offset().is_some());
727 }
728
729 // Windows needs a dedicated test where it will retrieve the allocation
730 // granularity to determine a proper offset (other than 0) that can be
731 // used for the backing file. Refer to Microsoft docs here:
732 // https://docs.microsoft.com/en-us/windows/desktop/api/memoryapi/nf-memoryapi-mapviewoffile
733 #[test]
734 #[cfg(unix)]
735 fn test_retrieve_offset_from_fd_backing_memory_region() {
736 let f = TempFile::new().unwrap().into_file();
737 f.set_len(0x1400).unwrap();
738 // Needs to be aligned on 4k, otherwise mmap will fail.
739 let offset = 0x1000;
740
741 let start_addr = GuestAddress(0x0);
742 let gm = GuestMemoryMmap::from_ranges(&[(start_addr, 0x400)]).unwrap();
743 assert!(gm.find_region(start_addr).is_some());
744 let region = gm.find_region(start_addr).unwrap();
745 assert!(region.file_offset().is_none());
746
747 let gm = GuestMemoryMmap::from_ranges_with_files(&[(
748 start_addr,
749 0x400,
750 Some(FileOffset::new(f, offset)),
751 )])
752 .unwrap();
753 assert!(gm.find_region(start_addr).is_some());
754 let region = gm.find_region(start_addr).unwrap();
755 assert!(region.file_offset().is_some());
756 assert_eq!(region.file_offset().unwrap().start(), offset);
757 }
758 */
759
760 #[test]
761 fn test_mmap_insert_region() {
762 let start_addr1 = GuestAddress(0);
763 let start_addr2 = GuestAddress(0x10_0000);
764
765 let guest_mem = GuestMemoryHybrid::<()>::new();
766 let mut raw_buf = [0u8; 0x1000];
767 let raw_ptr = &mut raw_buf as *mut u8;
768 let reg = unsafe { GuestRegionRaw::<()>::new(start_addr1, raw_ptr, 0x1000) };
769 let guest_mem = guest_mem
770 .insert_region(Arc::new(GuestRegionHybrid::from_raw_region(reg)))
771 .unwrap();
772 let reg = unsafe { GuestRegionRaw::<()>::new(start_addr2, raw_ptr, 0x1000) };
773 let gm = &guest_mem
774 .insert_region(Arc::new(GuestRegionHybrid::from_raw_region(reg)))
775 .unwrap();
776 let mem_orig = gm.memory();
777 assert_eq!(mem_orig.num_regions(), 2);
778
779 let reg = unsafe { GuestRegionRaw::new(GuestAddress(0x8000), raw_ptr, 0x1000) };
780 let mmap = Arc::new(GuestRegionHybrid::from_raw_region(reg));
781 let gm = gm.insert_region(mmap).unwrap();
782 let reg = unsafe { GuestRegionRaw::new(GuestAddress(0x4000), raw_ptr, 0x1000) };
783 let mmap = Arc::new(GuestRegionHybrid::from_raw_region(reg));
784 let gm = gm.insert_region(mmap).unwrap();
785 let reg = unsafe { GuestRegionRaw::new(GuestAddress(0xc000), raw_ptr, 0x1000) };
786 let mmap = Arc::new(GuestRegionHybrid::from_raw_region(reg));
787 let gm = gm.insert_region(mmap).unwrap();
788 let reg = unsafe { GuestRegionRaw::new(GuestAddress(0xc000), raw_ptr, 0x1000) };
789 let mmap = Arc::new(GuestRegionHybrid::from_raw_region(reg));
790 gm.insert_region(mmap).unwrap_err();
791
792 assert_eq!(mem_orig.num_regions(), 2);
793 assert_eq!(gm.num_regions(), 5);
794
795 assert_eq!(gm.regions[0].start_addr(), GuestAddress(0x0000));
796 assert_eq!(gm.regions[1].start_addr(), GuestAddress(0x4000));
797 assert_eq!(gm.regions[2].start_addr(), GuestAddress(0x8000));
798 assert_eq!(gm.regions[3].start_addr(), GuestAddress(0xc000));
799 assert_eq!(gm.regions[4].start_addr(), GuestAddress(0x10_0000));
800 }
801
802 #[test]
803 fn test_mmap_remove_region() {
804 let start_addr1 = GuestAddress(0);
805 let start_addr2 = GuestAddress(0x10_0000);
806
807 let guest_mem = GuestMemoryHybrid::<()>::new();
808 let mut raw_buf = [0u8; 0x1000];
809 let reg = unsafe { GuestRegionRaw::<()>::new(start_addr1, &mut raw_buf as *mut _, 0x1000) };
810 let guest_mem = guest_mem
811 .insert_region(Arc::new(GuestRegionHybrid::from_raw_region(reg)))
812 .unwrap();
813 let reg = unsafe { GuestRegionRaw::<()>::new(start_addr2, &mut raw_buf as *mut _, 0x1000) };
814 let gm = &guest_mem
815 .insert_region(Arc::new(GuestRegionHybrid::from_raw_region(reg)))
816 .unwrap();
817 let mem_orig = gm.memory();
818 assert_eq!(mem_orig.num_regions(), 2);
819
820 gm.remove_region(GuestAddress(0), 128).unwrap_err();
821 gm.remove_region(GuestAddress(0x4000), 128).unwrap_err();
822 let (gm, region) = gm.remove_region(GuestAddress(0x10_0000), 0x1000).unwrap();
823
824 assert_eq!(mem_orig.num_regions(), 2);
825 assert_eq!(gm.num_regions(), 1);
826
827 assert_eq!(gm.regions[0].start_addr(), GuestAddress(0x0000));
828 assert_eq!(region.start_addr(), GuestAddress(0x10_0000));
829 }
830
831 #[test]
832 fn test_guest_memory_mmap_get_slice() {
833 let start_addr1 = GuestAddress(0);
834 let mut raw_buf = [0u8; 0x400];
835 let region =
836 unsafe { GuestRegionRaw::<()>::new(start_addr1, &mut raw_buf as *mut _, 0x400) };
837
838 // Normal case.
839 let slice_addr = MemoryRegionAddress(0x100);
840 let slice_size = 0x200;
841 let slice = region.get_slice(slice_addr, slice_size).unwrap();
842 assert_eq!(slice.len(), slice_size);
843
844 // Empty slice.
845 let slice_addr = MemoryRegionAddress(0x200);
846 let slice_size = 0x0;
847 let slice = region.get_slice(slice_addr, slice_size).unwrap();
848 assert!(slice.is_empty());
849
850 // Error case when slice_size is beyond the boundary.
851 let slice_addr = MemoryRegionAddress(0x300);
852 let slice_size = 0x200;
853 assert!(region.get_slice(slice_addr, slice_size).is_err());
854 }
855
856 #[test]
857 fn test_guest_memory_mmap_as_volatile_slice() {
858 let start_addr1 = GuestAddress(0);
859 let mut raw_buf = [0u8; 0x400];
860 let region =
861 unsafe { GuestRegionRaw::<()>::new(start_addr1, &mut raw_buf as *mut _, 0x400) };
862 let region_size = 0x400;
863
864 // Test slice length.
865 let slice = region.as_volatile_slice().unwrap();
866 assert_eq!(slice.len(), region_size);
867
868 // Test slice data.
869 let v = 0x1234_5678u32;
870 let r = slice.get_ref::<u32>(0x200).unwrap();
871 r.store(v);
872 assert_eq!(r.load(), v);
873 }
874
875 #[test]
876 fn test_guest_memory_get_slice() {
877 let start_addr1 = GuestAddress(0);
878 let start_addr2 = GuestAddress(0x800);
879
880 let guest_mem = GuestMemoryHybrid::<()>::new();
881 let mut raw_buf = [0u8; 0x400];
882 let reg = unsafe { GuestRegionRaw::<()>::new(start_addr1, &mut raw_buf as *mut _, 0x400) };
883 let guest_mem = guest_mem
884 .insert_region(Arc::new(GuestRegionHybrid::from_raw_region(reg)))
885 .unwrap();
886 let reg = unsafe { GuestRegionRaw::<()>::new(start_addr2, &mut raw_buf as *mut _, 0x400) };
887 let guest_mem = guest_mem
888 .insert_region(Arc::new(GuestRegionHybrid::from_raw_region(reg)))
889 .unwrap();
890
891 // Normal cases.
892 let slice_size = 0x200;
893 let slice = guest_mem
894 .get_slice(GuestAddress(0x100), slice_size)
895 .unwrap();
896 assert_eq!(slice.len(), slice_size);
897
898 let slice_size = 0x400;
899 let slice = guest_mem
900 .get_slice(GuestAddress(0x800), slice_size)
901 .unwrap();
902 assert_eq!(slice.len(), slice_size);
903
904 // Empty slice.
905 assert!(guest_mem
906 .get_slice(GuestAddress(0x900), 0)
907 .unwrap()
908 .is_empty());
909
910 // Error cases, wrong size or base address.
911 assert!(guest_mem.get_slice(GuestAddress(0), 0x500).is_err());
912 assert!(guest_mem.get_slice(GuestAddress(0x600), 0x100).is_err());
913 assert!(guest_mem.get_slice(GuestAddress(0xc00), 0x100).is_err());
914 }
915
916 #[test]
917 fn test_checked_offset() {
918 let start_addr1 = GuestAddress(0);
919 let start_addr2 = GuestAddress(0x800);
920 let start_addr3 = GuestAddress(0xc00);
921
922 let guest_mem = GuestMemoryHybrid::<()>::new();
923 let mut raw_buf = [0u8; 0x400];
924 let reg = unsafe { GuestRegionRaw::<()>::new(start_addr1, &mut raw_buf as *mut _, 0x400) };
925 let guest_mem = guest_mem
926 .insert_region(Arc::new(GuestRegionHybrid::from_raw_region(reg)))
927 .unwrap();
928 let reg = unsafe { GuestRegionRaw::<()>::new(start_addr2, &mut raw_buf as *mut _, 0x400) };
929 let guest_mem = guest_mem
930 .insert_region(Arc::new(GuestRegionHybrid::from_raw_region(reg)))
931 .unwrap();
932 let reg = unsafe { GuestRegionRaw::<()>::new(start_addr3, &mut raw_buf as *mut _, 0x400) };
933 let guest_mem = guest_mem
934 .insert_region(Arc::new(GuestRegionHybrid::from_raw_region(reg)))
935 .unwrap();
936
937 assert_eq!(
938 guest_mem.checked_offset(start_addr1, 0x200),
939 Some(GuestAddress(0x200))
940 );
941 assert_eq!(
942 guest_mem.checked_offset(start_addr1, 0xa00),
943 Some(GuestAddress(0xa00))
944 );
945 assert_eq!(
946 guest_mem.checked_offset(start_addr2, 0x7ff),
947 Some(GuestAddress(0xfff))
948 );
949 assert_eq!(guest_mem.checked_offset(start_addr2, 0xc00), None);
950 assert_eq!(guest_mem.checked_offset(start_addr1, std::usize::MAX), None);
951
952 assert_eq!(guest_mem.checked_offset(start_addr1, 0x400), None);
953 assert_eq!(
954 guest_mem.checked_offset(start_addr1, 0x400 - 1),
955 Some(GuestAddress(0x400 - 1))
956 );
957 }
958
959 #[test]
960 fn test_check_range() {
961 let start_addr1 = GuestAddress(0);
962 let start_addr2 = GuestAddress(0x800);
963 let start_addr3 = GuestAddress(0xc00);
964
965 let guest_mem = GuestMemoryHybrid::<()>::new();
966 let mut raw_buf = [0u8; 0x400];
967 let reg = unsafe { GuestRegionRaw::<()>::new(start_addr1, &mut raw_buf as *mut _, 0x400) };
968 let guest_mem = guest_mem
969 .insert_region(Arc::new(GuestRegionHybrid::from_raw_region(reg)))
970 .unwrap();
971 let reg = unsafe { GuestRegionRaw::<()>::new(start_addr2, &mut raw_buf as *mut _, 0x400) };
972 let guest_mem = guest_mem
973 .insert_region(Arc::new(GuestRegionHybrid::from_raw_region(reg)))
974 .unwrap();
975 let reg = unsafe { GuestRegionRaw::<()>::new(start_addr3, &mut raw_buf as *mut _, 0x400) };
976 let guest_mem = guest_mem
977 .insert_region(Arc::new(GuestRegionHybrid::from_raw_region(reg)))
978 .unwrap();
979
980 assert!(guest_mem.check_range(start_addr1, 0x0));
981 assert!(guest_mem.check_range(start_addr1, 0x200));
982 assert!(guest_mem.check_range(start_addr1, 0x400));
983 assert!(!guest_mem.check_range(start_addr1, 0xa00));
984 assert!(guest_mem.check_range(start_addr2, 0x7ff));
985 assert!(guest_mem.check_range(start_addr2, 0x800));
986 assert!(!guest_mem.check_range(start_addr2, 0x801));
987 assert!(!guest_mem.check_range(start_addr2, 0xc00));
988 assert!(!guest_mem.check_range(start_addr1, usize::MAX));
989 }
990}