1use bitflags::bitflags;
11use libc::{c_int, c_void, MAP_SHARED, _SC_PAGESIZE};
12use std::{io, mem::size_of, os::raw::c_ulong, os::unix::io::AsRawFd, ptr::null_mut, result};
13
14use vmm_sys_util::{
15 fam::{Error as FamError, FamStruct, FamStructWrapper},
16 generate_fam_struct_impl,
17 ioctl::{ioctl_expr, _IOC_NONE},
18};
19
20#[cfg(not(test))]
22use vmm_sys_util::ioctl::ioctl_with_ref;
23
24#[cfg(test)]
25use tests::ioctl_with_ref;
26
27use crate::bitmap::{Bitmap, NewBitmap, BS};
28use crate::guest_memory::{FileOffset, GuestAddress};
29use crate::volatile_memory::{self, VolatileMemory, VolatileSlice};
30
31#[derive(Debug, thiserror::Error)]
33pub enum Error {
34 #[error("The specified file offset and length cause overflow when added")]
36 InvalidOffsetLength,
37 #[error("The forbidden `MAP_FIXED` flag was specified")]
39 MapFixed,
40 #[error("The specified file offset and length is greater then file length")]
42 MappingPastEof,
43 #[error("{0}")]
45 Mmap(io::Error),
46 #[error("Invalid file offset")]
48 InvalidFileOffset,
49 #[error("Memory mapped in advance")]
51 MappedInAdvance,
52 #[error("Invalid Xen Mmap flags: {0:x}")]
54 MmapFlags(u32),
55 #[error("Fam error: {0}")]
57 Fam(FamError),
58 #[error("Unexpected error")]
60 UnexpectedError,
61}
62
63type Result<T> = result::Result<T, Error>;
64
65#[derive(Clone, Debug)]
67pub struct MmapRange {
68 size: usize,
69 file_offset: Option<FileOffset>,
70 prot: Option<i32>,
71 flags: Option<i32>,
72 hugetlbfs: Option<bool>,
73 addr: GuestAddress,
74 mmap_flags: u32,
75 mmap_data: u32,
76}
77
78impl MmapRange {
79 pub fn new(
81 size: usize,
82 file_offset: Option<FileOffset>,
83 addr: GuestAddress,
84 mmap_flags: u32,
85 mmap_data: u32,
86 ) -> Self {
87 Self {
88 size,
89 file_offset,
90 prot: None,
91 flags: None,
92 hugetlbfs: None,
93 addr,
94 mmap_flags,
95 mmap_data,
96 }
97 }
98
99 pub fn new_unix(size: usize, file_offset: Option<FileOffset>, addr: GuestAddress) -> Self {
101 let flags = Some(match file_offset {
102 Some(_) => libc::MAP_NORESERVE | libc::MAP_SHARED,
103 None => libc::MAP_ANONYMOUS | libc::MAP_PRIVATE,
104 });
105
106 Self {
107 size,
108 file_offset,
109 prot: None,
110 flags,
111 hugetlbfs: None,
112 addr,
113 mmap_flags: MmapXenFlags::UNIX.bits(),
114 mmap_data: 0,
115 }
116 }
117
118 pub fn set_prot(&mut self, prot: i32) {
120 self.prot = Some(prot)
121 }
122
123 pub fn set_flags(&mut self, flags: i32) {
125 self.flags = Some(flags)
126 }
127
128 pub fn set_hugetlbfs(&mut self, hugetlbfs: bool) {
130 self.hugetlbfs = Some(hugetlbfs)
131 }
132}
133
134#[derive(Debug)]
144pub struct MmapRegion<B = ()> {
145 bitmap: B,
146 size: usize,
147 prot: i32,
148 flags: i32,
149 file_offset: Option<FileOffset>,
150 hugetlbfs: Option<bool>,
151 mmap: MmapXen,
152}
153
154unsafe impl<B: Send> Send for MmapRegion<B> {}
159unsafe impl<B: Sync> Sync for MmapRegion<B> {}
161
162impl<B: NewBitmap> MmapRegion<B> {
163 pub fn from_range(mut range: MmapRange) -> Result<Self> {
241 if range.prot.is_none() {
242 range.prot = Some(libc::PROT_READ | libc::PROT_WRITE);
243 }
244
245 match range.flags {
246 Some(flags) => {
247 if flags & libc::MAP_FIXED != 0 {
248 return Err(Error::MapFixed);
251 }
252 }
253 None => range.flags = Some(libc::MAP_NORESERVE | libc::MAP_SHARED),
254 }
255
256 let mmap = MmapXen::new(&range)?;
257
258 Ok(MmapRegion {
259 bitmap: B::with_len(range.size),
260 size: range.size,
261 prot: range.prot.ok_or(Error::UnexpectedError)?,
262 flags: range.flags.ok_or(Error::UnexpectedError)?,
263 file_offset: range.file_offset,
264 hugetlbfs: range.hugetlbfs,
265 mmap,
266 })
267 }
268}
269
270impl<B: Bitmap> MmapRegion<B> {
271 pub fn as_ptr(&self) -> *mut u8 {
277 self.mmap.addr()
278 }
279
280 pub fn size(&self) -> usize {
282 self.size
283 }
284
285 pub fn file_offset(&self) -> Option<&FileOffset> {
287 self.file_offset.as_ref()
288 }
289
290 pub fn prot(&self) -> i32 {
292 self.prot
293 }
294
295 pub fn flags(&self) -> i32 {
297 self.flags
298 }
299
300 pub fn fds_overlap<T: Bitmap>(&self, other: &MmapRegion<T>) -> bool {
306 if let Some(f_off1) = self.file_offset() {
307 if let Some(f_off2) = other.file_offset() {
308 if f_off1.file().as_raw_fd() == f_off2.file().as_raw_fd() {
309 let s1 = f_off1.start();
310 let s2 = f_off2.start();
311 let l1 = self.len() as u64;
312 let l2 = other.len() as u64;
313
314 if s1 < s2 {
315 return s1 + l1 > s2;
316 } else {
317 return s2 + l2 > s1;
318 }
319 }
320 }
321 }
322 false
323 }
324
325 pub fn set_hugetlbfs(&mut self, hugetlbfs: bool) {
327 self.hugetlbfs = Some(hugetlbfs)
328 }
329
330 pub fn is_hugetlbfs(&self) -> Option<bool> {
332 self.hugetlbfs
333 }
334
335 pub fn bitmap(&self) -> &B {
337 &self.bitmap
338 }
339
340 pub fn xen_mmap_flags(&self) -> u32 {
342 self.mmap.flags()
343 }
344
345 pub fn xen_mmap_data(&self) -> u32 {
347 self.mmap.data()
348 }
349}
350
351impl<B: Bitmap> VolatileMemory for MmapRegion<B> {
352 type B = B;
353
354 fn len(&self) -> usize {
355 self.size
356 }
357
358 fn get_slice(
359 &self,
360 offset: usize,
361 count: usize,
362 ) -> volatile_memory::Result<VolatileSlice<BS<B>>> {
363 let _ = self.compute_end_offset(offset, count)?;
364
365 let mmap_info = if self.mmap.mmap_in_advance() {
366 None
367 } else {
368 Some(&self.mmap)
369 };
370
371 Ok(
372 unsafe {
375 VolatileSlice::with_bitmap(
376 self.as_ptr().add(offset),
377 count,
378 self.bitmap.slice_at(offset),
379 mmap_info,
380 )
381 },
382 )
383 }
384}
385
386#[derive(Clone, Debug, PartialEq)]
387struct MmapUnix {
388 addr: *mut u8,
389 size: usize,
390}
391
392impl MmapUnix {
393 fn new(size: usize, prot: i32, flags: i32, fd: i32, f_offset: u64) -> Result<Self> {
394 let addr =
395 unsafe { libc::mmap(null_mut(), size, prot, flags, fd, f_offset as libc::off_t) };
399
400 if addr == libc::MAP_FAILED {
401 return Err(Error::Mmap(io::Error::last_os_error()));
402 }
403
404 Ok(Self {
405 addr: addr as *mut u8,
406 size,
407 })
408 }
409
410 fn addr(&self) -> *mut u8 {
411 self.addr
412 }
413}
414
415impl Drop for MmapUnix {
416 fn drop(&mut self) {
417 unsafe {
420 libc::munmap(self.addr as *mut libc::c_void, self.size);
421 }
422 }
423}
424
425bitflags! {
427 #[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
429 pub struct MmapXenFlags: u32 {
430 const UNIX = 0x0;
432 const FOREIGN = 0x1;
434 const GRANT = 0x2;
436 const NO_ADVANCE_MAP = 0x8;
438 const ALL = Self::FOREIGN.bits() | Self::GRANT.bits();
440 }
441}
442
443impl MmapXenFlags {
444 pub fn is_valid(&self) -> bool {
446 if self.is_grant() {
449 !self.is_foreign()
450 } else if self.is_foreign() || self.is_unix() {
451 self.mmap_in_advance()
452 } else {
453 false
454 }
455 }
456
457 pub fn is_unix(&self) -> bool {
459 self.bits() == Self::UNIX.bits()
460 }
461
462 pub fn is_foreign(&self) -> bool {
464 self.contains(Self::FOREIGN)
465 }
466
467 pub fn is_grant(&self) -> bool {
469 self.contains(Self::GRANT)
470 }
471
472 pub fn mmap_in_advance(&self) -> bool {
474 !self.contains(Self::NO_ADVANCE_MAP)
475 }
476}
477
478fn page_size() -> u64 {
479 unsafe { libc::sysconf(_SC_PAGESIZE) as u64 }
481}
482
483fn pages(size: usize) -> (usize, usize) {
484 let page_size = page_size() as usize;
485 let num = size.div_ceil(page_size);
486
487 (num, page_size * num)
488}
489
490fn validate_file(file_offset: &Option<FileOffset>) -> Result<(i32, u64)> {
491 let file_offset = match file_offset {
492 Some(f) => f,
493 None => return Err(Error::InvalidFileOffset),
494 };
495
496 let fd = file_offset.file().as_raw_fd();
497 let f_offset = file_offset.start();
498
499 if f_offset != 0 {
501 return Err(Error::InvalidOffsetLength);
502 }
503
504 Ok((fd, f_offset))
505}
506
507trait MmapXenTrait: std::fmt::Debug {
509 fn mmap_slice(&self, addr: *const u8, prot: i32, len: usize) -> Result<MmapXenSlice>;
510 fn addr(&self) -> *mut u8;
511}
512
513#[derive(Clone, Debug, PartialEq)]
515struct MmapXenUnix(MmapUnix);
516
517impl MmapXenUnix {
518 fn new(range: &MmapRange) -> Result<Self> {
519 let (fd, offset) = if let Some(ref f_off) = range.file_offset {
520 (f_off.file().as_raw_fd(), f_off.start())
521 } else {
522 (-1, 0)
523 };
524
525 Ok(Self(MmapUnix::new(
526 range.size,
527 range.prot.ok_or(Error::UnexpectedError)?,
528 range.flags.ok_or(Error::UnexpectedError)?,
529 fd,
530 offset,
531 )?))
532 }
533}
534
535impl MmapXenTrait for MmapXenUnix {
536 #[allow(unused_variables)]
537 fn mmap_slice(&self, addr: *const u8, prot: i32, len: usize) -> Result<MmapXenSlice> {
538 Err(Error::MappedInAdvance)
539 }
540
541 fn addr(&self) -> *mut u8 {
542 self.0.addr()
543 }
544}
545
546#[repr(C)]
550#[derive(Debug, Copy, Clone)]
551struct PrivCmdMmapBatchV2 {
552 num: u32,
554 domid: u16,
556 addr: *mut c_void,
558 arr: *const u64,
560 err: *mut c_int,
562}
563
564const XEN_PRIVCMD_TYPE: u32 = 'P' as u32;
565
566fn ioctl_privcmd_mmapbatch_v2() -> c_ulong {
568 ioctl_expr(
569 _IOC_NONE,
570 XEN_PRIVCMD_TYPE,
571 4,
572 size_of::<PrivCmdMmapBatchV2>() as u32,
573 )
574}
575
576#[derive(Clone, Debug, PartialEq)]
578struct MmapXenForeign {
579 domid: u32,
580 guest_base: GuestAddress,
581 unix_mmap: MmapUnix,
582 fd: i32,
583}
584
585impl AsRawFd for MmapXenForeign {
586 fn as_raw_fd(&self) -> i32 {
587 self.fd
588 }
589}
590
591impl MmapXenForeign {
592 fn new(range: &MmapRange) -> Result<Self> {
593 let (fd, f_offset) = validate_file(&range.file_offset)?;
594 let (count, size) = pages(range.size);
595
596 let unix_mmap = MmapUnix::new(
597 size,
598 range.prot.ok_or(Error::UnexpectedError)?,
599 range.flags.ok_or(Error::UnexpectedError)? | MAP_SHARED,
600 fd,
601 f_offset,
602 )?;
603
604 let foreign = Self {
605 domid: range.mmap_data,
606 guest_base: range.addr,
607 unix_mmap,
608 fd,
609 };
610
611 foreign.mmap_ioctl(count)?;
612 Ok(foreign)
613 }
614
615 fn mmap_ioctl(&self, count: usize) -> Result<()> {
617 let base = self.guest_base.0 / page_size();
618
619 let mut pfn = Vec::with_capacity(count);
620 for i in 0..count {
621 pfn.push(base + i as u64);
622 }
623
624 let mut err: Vec<c_int> = vec![0; count];
625
626 let map = PrivCmdMmapBatchV2 {
627 num: count as u32,
628 domid: self.domid as u16,
629 addr: self.addr() as *mut c_void,
630 arr: pfn.as_ptr(),
631 err: err.as_mut_ptr(),
632 };
633
634 let ret = unsafe { ioctl_with_ref(self, ioctl_privcmd_mmapbatch_v2(), &map) };
636
637 if ret == 0 {
638 Ok(())
639 } else {
640 Err(Error::Mmap(io::Error::last_os_error()))
641 }
642 }
643}
644
645impl MmapXenTrait for MmapXenForeign {
646 #[allow(unused_variables)]
647 fn mmap_slice(&self, addr: *const u8, prot: i32, len: usize) -> Result<MmapXenSlice> {
648 Err(Error::MappedInAdvance)
649 }
650
651 fn addr(&self) -> *mut u8 {
652 self.unix_mmap.addr()
653 }
654}
655
656const XEN_GRANT_ADDR_OFF: u64 = 1 << 63;
659
660#[repr(C)]
664#[derive(Copy, Clone, Debug, Default, PartialEq)]
665struct GntDevGrantRef {
666 domid: u32,
668 reference: u32,
670}
671
672#[repr(C)]
673#[derive(Debug, Default, PartialEq, Eq)]
674struct __IncompleteArrayField<T>(::std::marker::PhantomData<T>, [T; 0]);
675impl<T> __IncompleteArrayField<T> {
676 #[inline]
677 unsafe fn as_ptr(&self) -> *const T {
678 self as *const __IncompleteArrayField<T> as *const T
679 }
680 #[inline]
681 unsafe fn as_mut_ptr(&mut self) -> *mut T {
682 self as *mut __IncompleteArrayField<T> as *mut T
683 }
684 #[inline]
685 unsafe fn as_slice(&self, len: usize) -> &[T] {
686 ::std::slice::from_raw_parts(self.as_ptr(), len)
687 }
688 #[inline]
689 unsafe fn as_mut_slice(&mut self, len: usize) -> &mut [T] {
690 ::std::slice::from_raw_parts_mut(self.as_mut_ptr(), len)
691 }
692}
693
694#[repr(C)]
698#[derive(Debug, Default)]
699struct GntDevMapGrantRef {
700 count: u32,
702 pad: u32,
704 index: u64,
706 refs: __IncompleteArrayField<GntDevGrantRef>,
708}
709
710generate_fam_struct_impl!(
711 GntDevMapGrantRef,
712 GntDevGrantRef,
713 refs,
714 u32,
715 count,
716 usize::MAX
717);
718
719type GntDevMapGrantRefWrapper = FamStructWrapper<GntDevMapGrantRef>;
720
721impl GntDevMapGrantRef {
722 fn new(domid: u32, base: u32, count: usize) -> Result<GntDevMapGrantRefWrapper> {
723 let mut wrapper = GntDevMapGrantRefWrapper::new(count).map_err(Error::Fam)?;
724 let refs = wrapper.as_mut_slice();
725
726 for (i, r) in refs.iter_mut().enumerate().take(count) {
728 r.domid = domid;
729 r.reference = base + i as u32;
730 }
731
732 Ok(wrapper)
733 }
734}
735
736#[repr(C)]
740#[derive(Debug, Copy, Clone)]
741struct GntDevUnmapGrantRef {
742 index: u64,
744 count: u32,
746 pad: u32,
748}
749
750impl GntDevUnmapGrantRef {
751 fn new(index: u64, count: u32) -> Self {
752 Self {
753 index,
754 count,
755 pad: 0,
756 }
757 }
758}
759
760const XEN_GNTDEV_TYPE: u32 = 'G' as u32;
761
762fn ioctl_gntdev_map_grant_ref() -> c_ulong {
764 ioctl_expr(
765 _IOC_NONE,
766 XEN_GNTDEV_TYPE,
767 0,
768 (size_of::<GntDevMapGrantRef>() + size_of::<GntDevGrantRef>()) as u32,
769 )
770}
771
772fn ioctl_gntdev_unmap_grant_ref() -> c_ulong {
774 ioctl_expr(
775 _IOC_NONE,
776 XEN_GNTDEV_TYPE,
777 1,
778 size_of::<GntDevUnmapGrantRef>() as u32,
779 )
780}
781
782#[derive(Clone, Debug)]
784struct MmapXenGrant {
785 guest_base: GuestAddress,
786 unix_mmap: Option<MmapUnix>,
787 file_offset: FileOffset,
788 flags: i32,
789 size: usize,
790 index: u64,
791 domid: u32,
792}
793
794impl AsRawFd for MmapXenGrant {
795 fn as_raw_fd(&self) -> i32 {
796 self.file_offset.file().as_raw_fd()
797 }
798}
799
800impl MmapXenGrant {
801 fn new(range: &MmapRange, mmap_flags: MmapXenFlags) -> Result<Self> {
802 validate_file(&range.file_offset)?;
803
804 let mut grant = Self {
805 guest_base: range.addr,
806 unix_mmap: None,
807 file_offset: range.file_offset.as_ref().unwrap().clone(),
808 flags: range.flags.ok_or(Error::UnexpectedError)?,
809 size: 0,
810 index: 0,
811 domid: range.mmap_data,
812 };
813
814 if mmap_flags.mmap_in_advance() {
817 let (unix_mmap, index) = grant.mmap_range(
818 range.addr,
819 range.size,
820 range.prot.ok_or(Error::UnexpectedError)?,
821 )?;
822
823 grant.unix_mmap = Some(unix_mmap);
824 grant.index = index;
825 grant.size = range.size;
826 }
827
828 Ok(grant)
829 }
830
831 fn mmap_range(&self, addr: GuestAddress, size: usize, prot: i32) -> Result<(MmapUnix, u64)> {
832 let (count, size) = pages(size);
833 let index = self.mmap_ioctl(addr, count)?;
834 let unix_mmap = MmapUnix::new(size, prot, self.flags, self.as_raw_fd(), index)?;
835
836 Ok((unix_mmap, index))
837 }
838
839 fn unmap_range(&self, unix_mmap: MmapUnix, size: usize, index: u64) {
840 let (count, _) = pages(size);
841
842 drop(unix_mmap);
844 self.unmap_ioctl(count as u32, index).unwrap();
845 }
846
847 fn mmap_ioctl(&self, addr: GuestAddress, count: usize) -> Result<u64> {
848 let base = ((addr.0 & !XEN_GRANT_ADDR_OFF) / page_size()) as u32;
849 let wrapper = GntDevMapGrantRef::new(self.domid, base, count)?;
850 let reference = wrapper.as_fam_struct_ref();
851
852 let ret = unsafe { ioctl_with_ref(self, ioctl_gntdev_map_grant_ref(), reference) };
854
855 if ret == 0 {
856 Ok(reference.index)
857 } else {
858 Err(Error::Mmap(io::Error::last_os_error()))
859 }
860 }
861
862 fn unmap_ioctl(&self, count: u32, index: u64) -> Result<()> {
863 let unmap = GntDevUnmapGrantRef::new(index, count);
864
865 let ret = unsafe { ioctl_with_ref(self, ioctl_gntdev_unmap_grant_ref(), &unmap) };
867
868 if ret == 0 {
869 Ok(())
870 } else {
871 Err(Error::Mmap(io::Error::last_os_error()))
872 }
873 }
874}
875
876impl MmapXenTrait for MmapXenGrant {
877 fn mmap_slice(&self, addr: *const u8, prot: i32, len: usize) -> Result<MmapXenSlice> {
879 MmapXenSlice::new_with(self.clone(), addr as usize, prot, len)
880 }
881
882 fn addr(&self) -> *mut u8 {
883 if let Some(ref unix_mmap) = self.unix_mmap {
884 unix_mmap.addr()
885 } else {
886 null_mut()
887 }
888 }
889}
890
891impl Drop for MmapXenGrant {
892 fn drop(&mut self) {
893 if let Some(unix_mmap) = self.unix_mmap.take() {
894 self.unmap_range(unix_mmap, self.size, self.index);
895 }
896 }
897}
898
899#[derive(Debug)]
900pub(crate) struct MmapXenSlice {
901 grant: Option<MmapXenGrant>,
902 unix_mmap: Option<MmapUnix>,
903 addr: *mut u8,
904 size: usize,
905 index: u64,
906}
907
908impl MmapXenSlice {
909 fn raw(addr: *mut u8) -> Self {
910 Self {
911 grant: None,
912 unix_mmap: None,
913 addr,
914 size: 0,
915 index: 0,
916 }
917 }
918
919 fn new_with(grant: MmapXenGrant, offset: usize, prot: i32, size: usize) -> Result<Self> {
920 let page_size = page_size() as usize;
921 let page_base: usize = (offset / page_size) * page_size;
922 let offset = offset - page_base;
923 let size = offset + size;
924
925 let addr = grant.guest_base.0 + page_base as u64;
926 let (unix_mmap, index) = grant.mmap_range(GuestAddress(addr), size, prot)?;
927
928 let addr = unsafe { unix_mmap.addr().add(offset) };
930
931 Ok(Self {
932 grant: Some(grant),
933 unix_mmap: Some(unix_mmap),
934 addr,
935 size,
936 index,
937 })
938 }
939
940 pub(crate) fn addr(&self) -> *mut u8 {
942 self.addr
943 }
944}
945
946impl Drop for MmapXenSlice {
947 fn drop(&mut self) {
948 if let Some(unix_mmap) = self.unix_mmap.take() {
950 self.grant
951 .as_ref()
952 .unwrap()
953 .unmap_range(unix_mmap, self.size, self.index);
954 }
955 }
956}
957
958#[derive(Debug)]
959pub struct MmapXen {
960 xen_flags: MmapXenFlags,
961 domid: u32,
962 mmap: Box<dyn MmapXenTrait>,
963}
964
965impl MmapXen {
966 fn new(range: &MmapRange) -> Result<Self> {
967 let xen_flags = match MmapXenFlags::from_bits(range.mmap_flags) {
968 Some(flags) => flags,
969 None => return Err(Error::MmapFlags(range.mmap_flags)),
970 };
971
972 if !xen_flags.is_valid() {
973 return Err(Error::MmapFlags(xen_flags.bits()));
974 }
975
976 Ok(Self {
977 xen_flags,
978 domid: range.mmap_data,
979 mmap: if xen_flags.is_foreign() {
980 Box::new(MmapXenForeign::new(range)?)
981 } else if xen_flags.is_grant() {
982 Box::new(MmapXenGrant::new(range, xen_flags)?)
983 } else {
984 Box::new(MmapXenUnix::new(range)?)
985 },
986 })
987 }
988
989 fn addr(&self) -> *mut u8 {
990 self.mmap.addr()
991 }
992
993 fn flags(&self) -> u32 {
994 self.xen_flags.bits()
995 }
996
997 fn data(&self) -> u32 {
998 self.domid
999 }
1000
1001 fn mmap_in_advance(&self) -> bool {
1002 self.xen_flags.mmap_in_advance()
1003 }
1004
1005 pub(crate) fn mmap(
1006 mmap_xen: Option<&Self>,
1007 addr: *mut u8,
1008 prot: i32,
1009 len: usize,
1010 ) -> MmapXenSlice {
1011 match mmap_xen {
1012 Some(mmap_xen) => mmap_xen.mmap.mmap_slice(addr, prot, len).unwrap(),
1013 None => MmapXenSlice::raw(addr),
1014 }
1015 }
1016}
1017
1018#[cfg(test)]
1019mod tests {
1020 #![allow(clippy::undocumented_unsafe_blocks)]
1021
1022 use super::*;
1023 use matches::assert_matches;
1024 use vmm_sys_util::tempfile::TempFile;
1025
1026 impl Error {
1029 fn raw_os_error(&self) -> i32 {
1030 match self {
1031 Error::Mmap(e) => e.raw_os_error().unwrap(),
1032 _ => i32::MIN,
1033 }
1034 }
1035 }
1036
1037 #[allow(unused_variables)]
1038 pub unsafe fn ioctl_with_ref<F: AsRawFd, T>(fd: &F, req: c_ulong, arg: &T) -> c_int {
1039 0
1040 }
1041
1042 impl MmapRange {
1043 fn initialized(is_file: bool) -> Self {
1044 let file_offset = if is_file {
1045 Some(FileOffset::new(TempFile::new().unwrap().into_file(), 0))
1046 } else {
1047 None
1048 };
1049
1050 let mut range = MmapRange::new_unix(0x1000, file_offset, GuestAddress(0x1000));
1051 range.prot = Some(libc::PROT_READ | libc::PROT_WRITE);
1052 range.mmap_data = 1;
1053
1054 range
1055 }
1056 }
1057
1058 impl MmapRegion {
1059 pub fn new(size: usize) -> Result<Self> {
1061 let range = MmapRange::new_unix(size, None, GuestAddress(0));
1062 Self::from_range(range)
1063 }
1064 }
1065
1066 #[test]
1067 fn test_mmap_xen_failures() {
1068 let mut range = MmapRange::initialized(true);
1069 range.mmap_flags = 16;
1071
1072 let r = MmapXen::new(&range);
1073 assert_matches!(r.unwrap_err(), Error::MmapFlags(flags) if flags == range.mmap_flags);
1074
1075 range.mmap_flags = MmapXenFlags::FOREIGN.bits() | MmapXenFlags::GRANT.bits();
1076 let r = MmapXen::new(&range);
1077 assert_matches!(r.unwrap_err(), Error::MmapFlags(flags) if flags == MmapXenFlags::ALL.bits());
1078
1079 range.mmap_flags = MmapXenFlags::FOREIGN.bits() | MmapXenFlags::NO_ADVANCE_MAP.bits();
1080 let r = MmapXen::new(&range);
1081 assert_matches!(r.unwrap_err(), Error::MmapFlags(flags) if flags == MmapXenFlags::NO_ADVANCE_MAP.bits() | MmapXenFlags::FOREIGN.bits());
1082 }
1083
1084 #[test]
1085 fn test_mmap_xen_success() {
1086 let mut range = MmapRange::initialized(true);
1087 range.mmap_flags = MmapXenFlags::FOREIGN.bits();
1088
1089 let r = MmapXen::new(&range).unwrap();
1090 assert_eq!(r.flags(), range.mmap_flags);
1091 assert_eq!(r.data(), range.mmap_data);
1092 assert_ne!(r.addr(), null_mut());
1093 assert!(r.mmap_in_advance());
1094
1095 range.mmap_flags = MmapXenFlags::GRANT.bits();
1096 let r = MmapXen::new(&range).unwrap();
1097 assert_eq!(r.flags(), range.mmap_flags);
1098 assert_eq!(r.data(), range.mmap_data);
1099 assert_ne!(r.addr(), null_mut());
1100 assert!(r.mmap_in_advance());
1101
1102 range.mmap_flags = MmapXenFlags::GRANT.bits() | MmapXenFlags::NO_ADVANCE_MAP.bits();
1103 let r = MmapXen::new(&range).unwrap();
1104 assert_eq!(r.flags(), range.mmap_flags);
1105 assert_eq!(r.data(), range.mmap_data);
1106 assert_eq!(r.addr(), null_mut());
1107 assert!(!r.mmap_in_advance());
1108 }
1109
1110 #[test]
1111 fn test_foreign_map_failure() {
1112 let mut range = MmapRange::initialized(true);
1113 range.file_offset = Some(FileOffset::new(TempFile::new().unwrap().into_file(), 0));
1114 range.prot = None;
1115 let r = MmapXenForeign::new(&range);
1116 assert_matches!(r.unwrap_err(), Error::UnexpectedError);
1117
1118 let mut range = MmapRange::initialized(true);
1119 range.flags = None;
1120 let r = MmapXenForeign::new(&range);
1121 assert_matches!(r.unwrap_err(), Error::UnexpectedError);
1122
1123 let mut range = MmapRange::initialized(true);
1124 range.file_offset = Some(FileOffset::new(TempFile::new().unwrap().into_file(), 1));
1125 let r = MmapXenForeign::new(&range);
1126 assert_matches!(r.unwrap_err(), Error::InvalidOffsetLength);
1127
1128 let mut range = MmapRange::initialized(true);
1129 range.size = 0;
1130 let r = MmapXenForeign::new(&range);
1131 assert_eq!(r.unwrap_err().raw_os_error(), libc::EINVAL);
1132 }
1133
1134 #[test]
1135 fn test_foreign_map_success() {
1136 let range = MmapRange::initialized(true);
1137 let r = MmapXenForeign::new(&range).unwrap();
1138 assert_ne!(r.addr(), null_mut());
1139 assert_eq!(r.domid, range.mmap_data);
1140 assert_eq!(r.guest_base, range.addr);
1141 }
1142
1143 #[test]
1144 fn test_grant_map_failure() {
1145 let mut range = MmapRange::initialized(true);
1146 range.prot = None;
1147 let r = MmapXenGrant::new(&range, MmapXenFlags::empty());
1148 assert_matches!(r.unwrap_err(), Error::UnexpectedError);
1149
1150 let mut range = MmapRange::initialized(true);
1151 range.prot = None;
1152 MmapXenGrant::new(&range, MmapXenFlags::NO_ADVANCE_MAP).unwrap();
1154
1155 let mut range = MmapRange::initialized(true);
1156 range.flags = None;
1157 let r = MmapXenGrant::new(&range, MmapXenFlags::NO_ADVANCE_MAP);
1158 assert_matches!(r.unwrap_err(), Error::UnexpectedError);
1159
1160 let mut range = MmapRange::initialized(true);
1161 range.file_offset = Some(FileOffset::new(TempFile::new().unwrap().into_file(), 1));
1162 let r = MmapXenGrant::new(&range, MmapXenFlags::NO_ADVANCE_MAP);
1163 assert_matches!(r.unwrap_err(), Error::InvalidOffsetLength);
1164
1165 let mut range = MmapRange::initialized(true);
1166 range.size = 0;
1167 let r = MmapXenGrant::new(&range, MmapXenFlags::empty());
1168 assert_eq!(r.unwrap_err().raw_os_error(), libc::EINVAL);
1169 }
1170
1171 #[test]
1172 fn test_grant_map_success() {
1173 let range = MmapRange::initialized(true);
1174 let r = MmapXenGrant::new(&range, MmapXenFlags::NO_ADVANCE_MAP).unwrap();
1175 assert_eq!(r.addr(), null_mut());
1176 assert_eq!(r.domid, range.mmap_data);
1177 assert_eq!(r.guest_base, range.addr);
1178
1179 let mut range = MmapRange::initialized(true);
1180 range.size = 0;
1182 MmapXenGrant::new(&range, MmapXenFlags::NO_ADVANCE_MAP).unwrap();
1183
1184 let range = MmapRange::initialized(true);
1185 let r = MmapXenGrant::new(&range, MmapXenFlags::empty()).unwrap();
1186 assert_ne!(r.addr(), null_mut());
1187 assert_eq!(r.domid, range.mmap_data);
1188 assert_eq!(r.guest_base, range.addr);
1189 }
1190
1191 #[test]
1192 fn test_grant_ref_alloc() {
1193 let wrapper = GntDevMapGrantRef::new(0, 0x1000, 0x100).unwrap();
1194 let r = wrapper.as_fam_struct_ref();
1195 assert_eq!(r.count, 0x100);
1196 assert_eq!(r.pad, 0);
1197 assert_eq!(r.index, 0);
1198 }
1199}