vm_memory/mmap/
xen.rs

1// Copyright 2023 Linaro Ltd. All Rights Reserved.
2//          Viresh Kumar <viresh.kumar@linaro.org>
3//
4// Xen specific memory mapping implementations
5//
6// SPDX-License-Identifier: Apache-2.0 or BSD-3-Clause
7
8//! Helper structure for working with mmap'ed memory regions on Xen.
9
10use bitflags::bitflags;
11use libc::{c_int, c_void, MAP_SHARED, _SC_PAGESIZE};
12use std::{io, mem::size_of, os::raw::c_ulong, os::unix::io::AsRawFd, ptr::null_mut, result};
13
14use vmm_sys_util::{
15    fam::{Error as FamError, FamStruct, FamStructWrapper},
16    generate_fam_struct_impl,
17    ioctl::{ioctl_expr, _IOC_NONE},
18};
19
20// Use a dummy ioctl implementation for tests instead.
21#[cfg(not(test))]
22use vmm_sys_util::ioctl::ioctl_with_ref;
23
24#[cfg(test)]
25use tests::ioctl_with_ref;
26
27use crate::bitmap::{Bitmap, NewBitmap, BS};
28use crate::guest_memory::{FileOffset, GuestAddress};
29use crate::volatile_memory::{self, VolatileMemory, VolatileSlice};
30
31/// Error conditions that may arise when creating a new `MmapRegion` object.
32#[derive(Debug, thiserror::Error)]
33pub enum Error {
34    /// The specified file offset and length cause overflow when added.
35    #[error("The specified file offset and length cause overflow when added")]
36    InvalidOffsetLength,
37    /// The forbidden `MAP_FIXED` flag was specified.
38    #[error("The forbidden `MAP_FIXED` flag was specified")]
39    MapFixed,
40    /// A mapping with offset + length > EOF was attempted.
41    #[error("The specified file offset and length is greater then file length")]
42    MappingPastEof,
43    /// The `mmap` call returned an error.
44    #[error("{0}")]
45    Mmap(io::Error),
46    /// Invalid file offset.
47    #[error("Invalid file offset")]
48    InvalidFileOffset,
49    /// Memory mapped in advance.
50    #[error("Memory mapped in advance")]
51    MappedInAdvance,
52    /// Invalid Xen mmap flags.
53    #[error("Invalid Xen Mmap flags: {0:x}")]
54    MmapFlags(u32),
55    /// Fam error.
56    #[error("Fam error: {0}")]
57    Fam(FamError),
58    /// Unexpected error.
59    #[error("Unexpected error")]
60    UnexpectedError,
61}
62
63type Result<T> = result::Result<T, Error>;
64
65/// `MmapRange` represents a range of arguments required to create Mmap regions.
66#[derive(Clone, Debug)]
67pub struct MmapRange {
68    size: usize,
69    file_offset: Option<FileOffset>,
70    prot: Option<i32>,
71    flags: Option<i32>,
72    hugetlbfs: Option<bool>,
73    addr: GuestAddress,
74    mmap_flags: u32,
75    mmap_data: u32,
76}
77
78impl MmapRange {
79    /// Creates instance of the range with multiple arguments.
80    pub fn new(
81        size: usize,
82        file_offset: Option<FileOffset>,
83        addr: GuestAddress,
84        mmap_flags: u32,
85        mmap_data: u32,
86    ) -> Self {
87        Self {
88            size,
89            file_offset,
90            prot: None,
91            flags: None,
92            hugetlbfs: None,
93            addr,
94            mmap_flags,
95            mmap_data,
96        }
97    }
98
99    /// Creates instance of the range for `MmapXenFlags::UNIX` type mapping.
100    pub fn new_unix(size: usize, file_offset: Option<FileOffset>, addr: GuestAddress) -> Self {
101        let flags = Some(match file_offset {
102            Some(_) => libc::MAP_NORESERVE | libc::MAP_SHARED,
103            None => libc::MAP_ANONYMOUS | libc::MAP_PRIVATE,
104        });
105
106        Self {
107            size,
108            file_offset,
109            prot: None,
110            flags,
111            hugetlbfs: None,
112            addr,
113            mmap_flags: MmapXenFlags::UNIX.bits(),
114            mmap_data: 0,
115        }
116    }
117
118    /// Set the prot of the range.
119    pub fn set_prot(&mut self, prot: i32) {
120        self.prot = Some(prot)
121    }
122
123    /// Set the flags of the range.
124    pub fn set_flags(&mut self, flags: i32) {
125        self.flags = Some(flags)
126    }
127
128    /// Set the hugetlbfs of the range.
129    pub fn set_hugetlbfs(&mut self, hugetlbfs: bool) {
130        self.hugetlbfs = Some(hugetlbfs)
131    }
132}
133
134/// Helper structure for working with mmaped memory regions with Xen.
135///
136/// The structure is used for accessing the guest's physical memory by mmapping it into
137/// the current process.
138///
139/// # Limitations
140/// When running a 64-bit virtual machine on a 32-bit hypervisor, only part of the guest's
141/// physical memory may be mapped into the current process due to the limited virtual address
142/// space size of the process.
143#[derive(Debug)]
144pub struct MmapRegion<B = ()> {
145    bitmap: B,
146    size: usize,
147    prot: i32,
148    flags: i32,
149    file_offset: Option<FileOffset>,
150    hugetlbfs: Option<bool>,
151    mmap: MmapXen,
152}
153
154// SAFETY: Send and Sync aren't automatically inherited for the raw address pointer.
155// Accessing that pointer is only done through the stateless interface which
156// allows the object to be shared by multiple threads without a decrease in
157// safety.
158unsafe impl<B: Send> Send for MmapRegion<B> {}
159// SAFETY: See comment above.
160unsafe impl<B: Sync> Sync for MmapRegion<B> {}
161
162impl<B: NewBitmap> MmapRegion<B> {
163    /// Creates a shared anonymous mapping of `size` bytes.
164    ///
165    /// # Arguments
166    /// * `range` - An instance of type `MmapRange`.
167    ///
168    /// # Examples
169    /// * Write a slice at guest address 0x1200 with Xen's Grant mapping.
170    ///
171    /// ```
172    /// use std::fs::File;
173    /// use std::path::Path;
174    /// use vm_memory::{
175    ///     Bytes, FileOffset, GuestAddress, GuestMemoryMmap, GuestRegionMmap, MmapRange, MmapRegion,
176    ///     MmapXenFlags,
177    /// };
178    /// # use vmm_sys_util::tempfile::TempFile;
179    ///
180    /// let addr = GuestAddress(0x1000);
181    /// # if false {
182    /// let file = Some(FileOffset::new(
183    ///     File::open(Path::new("/dev/xen/gntdev")).expect("Could not open file"),
184    ///     0,
185    /// ));
186    ///
187    /// let range = MmapRange::new(0x400, file, addr, MmapXenFlags::GRANT.bits(), 0);
188    /// # }
189    /// # // We need a UNIX mapping for tests to succeed.
190    /// # let range = MmapRange::new_unix(0x400, None, addr);
191    ///
192    /// let r = GuestRegionMmap::new(
193    ///     MmapRegion::<()>::from_range(range).expect("Could not create mmap region"),
194    ///     addr,
195    /// )
196    /// .expect("Could not create guest region");
197    ///
198    /// let mut gm = GuestMemoryMmap::from_regions(vec![r]).expect("Could not create guest memory");
199    /// let res = gm
200    ///     .write(&[1, 2, 3, 4, 5], GuestAddress(0x1200))
201    ///     .expect("Could not write to guest memory");
202    /// assert_eq!(5, res);
203    /// ```
204    ///
205    /// * Write a slice at guest address 0x1200 with Xen's Foreign mapping.
206    ///
207    /// ```
208    /// use std::fs::File;
209    /// use std::path::Path;
210    /// use vm_memory::{
211    ///     Bytes, FileOffset, GuestAddress, GuestMemoryMmap, GuestRegionMmap, MmapRange, MmapRegion,
212    ///     MmapXenFlags,
213    /// };
214    /// # use vmm_sys_util::tempfile::TempFile;
215    ///
216    /// let addr = GuestAddress(0x1000);
217    /// # if false {
218    /// let file = Some(FileOffset::new(
219    ///     File::open(Path::new("/dev/xen/privcmd")).expect("Could not open file"),
220    ///     0,
221    /// ));
222    ///
223    /// let range = MmapRange::new(0x400, file, addr, MmapXenFlags::FOREIGN.bits(), 0);
224    /// # }
225    /// # // We need a UNIX mapping for tests to succeed.
226    /// # let range = MmapRange::new_unix(0x400, None, addr);
227    ///
228    /// let r = GuestRegionMmap::new(
229    ///     MmapRegion::<()>::from_range(range).expect("Could not create mmap region"),
230    ///     addr,
231    /// )
232    /// .expect("Could not create guest region");
233    ///
234    /// let mut gm = GuestMemoryMmap::from_regions(vec![r]).expect("Could not create guest memory");
235    /// let res = gm
236    ///     .write(&[1, 2, 3, 4, 5], GuestAddress(0x1200))
237    ///     .expect("Could not write to guest memory");
238    /// assert_eq!(5, res);
239    /// ```
240    pub fn from_range(mut range: MmapRange) -> Result<Self> {
241        if range.prot.is_none() {
242            range.prot = Some(libc::PROT_READ | libc::PROT_WRITE);
243        }
244
245        match range.flags {
246            Some(flags) => {
247                if flags & libc::MAP_FIXED != 0 {
248                    // Forbid MAP_FIXED, as it doesn't make sense in this context, and is pretty dangerous
249                    // in general.
250                    return Err(Error::MapFixed);
251                }
252            }
253            None => range.flags = Some(libc::MAP_NORESERVE | libc::MAP_SHARED),
254        }
255
256        let mmap = MmapXen::new(&range)?;
257
258        Ok(MmapRegion {
259            bitmap: B::with_len(range.size),
260            size: range.size,
261            prot: range.prot.ok_or(Error::UnexpectedError)?,
262            flags: range.flags.ok_or(Error::UnexpectedError)?,
263            file_offset: range.file_offset,
264            hugetlbfs: range.hugetlbfs,
265            mmap,
266        })
267    }
268}
269
270impl<B: Bitmap> MmapRegion<B> {
271    /// Returns a pointer to the beginning of the memory region. Mutable accesses performed
272    /// using the resulting pointer are not automatically accounted for by the dirty bitmap
273    /// tracking functionality.
274    ///
275    /// Should only be used for passing this region to ioctls for setting guest memory.
276    pub fn as_ptr(&self) -> *mut u8 {
277        self.mmap.addr()
278    }
279
280    /// Returns the size of this region.
281    pub fn size(&self) -> usize {
282        self.size
283    }
284
285    /// Returns information regarding the offset into the file backing this region (if any).
286    pub fn file_offset(&self) -> Option<&FileOffset> {
287        self.file_offset.as_ref()
288    }
289
290    /// Returns the value of the `prot` parameter passed to `mmap` when mapping this region.
291    pub fn prot(&self) -> i32 {
292        self.prot
293    }
294
295    /// Returns the value of the `flags` parameter passed to `mmap` when mapping this region.
296    pub fn flags(&self) -> i32 {
297        self.flags
298    }
299
300    /// Checks whether this region and `other` are backed by overlapping
301    /// [`FileOffset`](struct.FileOffset.html) objects.
302    ///
303    /// This is mostly a sanity check available for convenience, as different file descriptors
304    /// can alias the same file.
305    pub fn fds_overlap<T: Bitmap>(&self, other: &MmapRegion<T>) -> bool {
306        if let Some(f_off1) = self.file_offset() {
307            if let Some(f_off2) = other.file_offset() {
308                if f_off1.file().as_raw_fd() == f_off2.file().as_raw_fd() {
309                    let s1 = f_off1.start();
310                    let s2 = f_off2.start();
311                    let l1 = self.len() as u64;
312                    let l2 = other.len() as u64;
313
314                    if s1 < s2 {
315                        return s1 + l1 > s2;
316                    } else {
317                        return s2 + l2 > s1;
318                    }
319                }
320            }
321        }
322        false
323    }
324
325    /// Set the hugetlbfs of the region
326    pub fn set_hugetlbfs(&mut self, hugetlbfs: bool) {
327        self.hugetlbfs = Some(hugetlbfs)
328    }
329
330    /// Returns `true` if the region is hugetlbfs
331    pub fn is_hugetlbfs(&self) -> Option<bool> {
332        self.hugetlbfs
333    }
334
335    /// Returns a reference to the inner bitmap object.
336    pub fn bitmap(&self) -> &B {
337        &self.bitmap
338    }
339
340    /// Returns xen mmap flags.
341    pub fn xen_mmap_flags(&self) -> u32 {
342        self.mmap.flags()
343    }
344
345    /// Returns xen mmap data.
346    pub fn xen_mmap_data(&self) -> u32 {
347        self.mmap.data()
348    }
349}
350
351impl<B: Bitmap> VolatileMemory for MmapRegion<B> {
352    type B = B;
353
354    fn len(&self) -> usize {
355        self.size
356    }
357
358    fn get_slice(
359        &self,
360        offset: usize,
361        count: usize,
362    ) -> volatile_memory::Result<VolatileSlice<BS<B>>> {
363        let _ = self.compute_end_offset(offset, count)?;
364
365        let mmap_info = if self.mmap.mmap_in_advance() {
366            None
367        } else {
368            Some(&self.mmap)
369        };
370
371        Ok(
372            // SAFETY: Safe because we checked that offset + count was within our range and we only
373            // ever hand out volatile accessors.
374            unsafe {
375                VolatileSlice::with_bitmap(
376                    self.as_ptr().add(offset),
377                    count,
378                    self.bitmap.slice_at(offset),
379                    mmap_info,
380                )
381            },
382        )
383    }
384}
385
386#[derive(Clone, Debug, PartialEq)]
387struct MmapUnix {
388    addr: *mut u8,
389    size: usize,
390}
391
392impl MmapUnix {
393    fn new(size: usize, prot: i32, flags: i32, fd: i32, f_offset: u64) -> Result<Self> {
394        let addr =
395        // SAFETY: This is safe because we're not allowing MAP_FIXED, and invalid parameters
396        // cannot break Rust safety guarantees (things may change if we're mapping /dev/mem or
397        // some wacky file).
398            unsafe { libc::mmap(null_mut(), size, prot, flags, fd, f_offset as libc::off_t) };
399
400        if addr == libc::MAP_FAILED {
401            return Err(Error::Mmap(io::Error::last_os_error()));
402        }
403
404        Ok(Self {
405            addr: addr as *mut u8,
406            size,
407        })
408    }
409
410    fn addr(&self) -> *mut u8 {
411        self.addr
412    }
413}
414
415impl Drop for MmapUnix {
416    fn drop(&mut self) {
417        // SAFETY: This is safe because we mmap the area at addr ourselves, and nobody
418        // else is holding a reference to it.
419        unsafe {
420            libc::munmap(self.addr as *mut libc::c_void, self.size);
421        }
422    }
423}
424
425// Bit mask for the vhost-user xen mmap message.
426bitflags! {
427    /// Flags for the Xen mmap message.
428    #[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
429    pub struct MmapXenFlags: u32 {
430        /// Standard Unix memory mapping.
431        const UNIX = 0x0;
432        /// Xen foreign memory (accessed via /dev/privcmd).
433        const FOREIGN = 0x1;
434        /// Xen grant memory (accessed via /dev/gntdev).
435        const GRANT = 0x2;
436        /// Xen no advance mapping.
437        const NO_ADVANCE_MAP = 0x8;
438        /// All valid mappings.
439        const ALL = Self::FOREIGN.bits() | Self::GRANT.bits();
440    }
441}
442
443impl MmapXenFlags {
444    /// Mmap flags are valid.
445    pub fn is_valid(&self) -> bool {
446        // only one of unix, foreign or grant should be set and mmap_in_advance() should be true
447        // with foreign and unix.
448        if self.is_grant() {
449            !self.is_foreign()
450        } else if self.is_foreign() || self.is_unix() {
451            self.mmap_in_advance()
452        } else {
453            false
454        }
455    }
456
457    /// Is standard Unix memory.
458    pub fn is_unix(&self) -> bool {
459        self.bits() == Self::UNIX.bits()
460    }
461
462    /// Is xen foreign memory.
463    pub fn is_foreign(&self) -> bool {
464        self.contains(Self::FOREIGN)
465    }
466
467    /// Is xen grant memory.
468    pub fn is_grant(&self) -> bool {
469        self.contains(Self::GRANT)
470    }
471
472    /// Can mmap entire region in advance.
473    pub fn mmap_in_advance(&self) -> bool {
474        !self.contains(Self::NO_ADVANCE_MAP)
475    }
476}
477
478fn page_size() -> u64 {
479    // SAFETY: Safe because this call just returns the page size and doesn't have any side effects.
480    unsafe { libc::sysconf(_SC_PAGESIZE) as u64 }
481}
482
483fn pages(size: usize) -> (usize, usize) {
484    let page_size = page_size() as usize;
485    let num = size.div_ceil(page_size);
486
487    (num, page_size * num)
488}
489
490fn validate_file(file_offset: &Option<FileOffset>) -> Result<(i32, u64)> {
491    let file_offset = match file_offset {
492        Some(f) => f,
493        None => return Err(Error::InvalidFileOffset),
494    };
495
496    let fd = file_offset.file().as_raw_fd();
497    let f_offset = file_offset.start();
498
499    // We don't allow file offsets with Xen foreign mappings.
500    if f_offset != 0 {
501        return Err(Error::InvalidOffsetLength);
502    }
503
504    Ok((fd, f_offset))
505}
506
507// Xen Foreign memory mapping interface.
508trait MmapXenTrait: std::fmt::Debug {
509    fn mmap_slice(&self, addr: *const u8, prot: i32, len: usize) -> Result<MmapXenSlice>;
510    fn addr(&self) -> *mut u8;
511}
512
513// Standard Unix memory mapping for testing other crates.
514#[derive(Clone, Debug, PartialEq)]
515struct MmapXenUnix(MmapUnix);
516
517impl MmapXenUnix {
518    fn new(range: &MmapRange) -> Result<Self> {
519        let (fd, offset) = if let Some(ref f_off) = range.file_offset {
520            (f_off.file().as_raw_fd(), f_off.start())
521        } else {
522            (-1, 0)
523        };
524
525        Ok(Self(MmapUnix::new(
526            range.size,
527            range.prot.ok_or(Error::UnexpectedError)?,
528            range.flags.ok_or(Error::UnexpectedError)?,
529            fd,
530            offset,
531        )?))
532    }
533}
534
535impl MmapXenTrait for MmapXenUnix {
536    #[allow(unused_variables)]
537    fn mmap_slice(&self, addr: *const u8, prot: i32, len: usize) -> Result<MmapXenSlice> {
538        Err(Error::MappedInAdvance)
539    }
540
541    fn addr(&self) -> *mut u8 {
542        self.0.addr()
543    }
544}
545
546// Privcmd mmap batch v2 command
547//
548// include/uapi/xen/privcmd.h: `privcmd_mmapbatch_v2`
549#[repr(C)]
550#[derive(Debug, Copy, Clone)]
551struct PrivCmdMmapBatchV2 {
552    // number of pages to populate
553    num: u32,
554    // target domain
555    domid: u16,
556    // virtual address
557    addr: *mut c_void,
558    // array of mfns
559    arr: *const u64,
560    // array of error codes
561    err: *mut c_int,
562}
563
564const XEN_PRIVCMD_TYPE: u32 = 'P' as u32;
565
566// #define IOCTL_PRIVCMD_MMAPBATCH_V2 _IOC(_IOC_NONE, 'P', 4, sizeof(privcmd_mmapbatch_v2_t))
567fn ioctl_privcmd_mmapbatch_v2() -> c_ulong {
568    ioctl_expr(
569        _IOC_NONE,
570        XEN_PRIVCMD_TYPE,
571        4,
572        size_of::<PrivCmdMmapBatchV2>() as u32,
573    )
574}
575
576// Xen foreign memory specific implementation.
577#[derive(Clone, Debug, PartialEq)]
578struct MmapXenForeign {
579    domid: u32,
580    guest_base: GuestAddress,
581    unix_mmap: MmapUnix,
582    fd: i32,
583}
584
585impl AsRawFd for MmapXenForeign {
586    fn as_raw_fd(&self) -> i32 {
587        self.fd
588    }
589}
590
591impl MmapXenForeign {
592    fn new(range: &MmapRange) -> Result<Self> {
593        let (fd, f_offset) = validate_file(&range.file_offset)?;
594        let (count, size) = pages(range.size);
595
596        let unix_mmap = MmapUnix::new(
597            size,
598            range.prot.ok_or(Error::UnexpectedError)?,
599            range.flags.ok_or(Error::UnexpectedError)? | MAP_SHARED,
600            fd,
601            f_offset,
602        )?;
603
604        let foreign = Self {
605            domid: range.mmap_data,
606            guest_base: range.addr,
607            unix_mmap,
608            fd,
609        };
610
611        foreign.mmap_ioctl(count)?;
612        Ok(foreign)
613    }
614
615    // Ioctl to pass additional information to mmap infrastructure of privcmd driver.
616    fn mmap_ioctl(&self, count: usize) -> Result<()> {
617        let base = self.guest_base.0 / page_size();
618
619        let mut pfn = Vec::with_capacity(count);
620        for i in 0..count {
621            pfn.push(base + i as u64);
622        }
623
624        let mut err: Vec<c_int> = vec![0; count];
625
626        let map = PrivCmdMmapBatchV2 {
627            num: count as u32,
628            domid: self.domid as u16,
629            addr: self.addr() as *mut c_void,
630            arr: pfn.as_ptr(),
631            err: err.as_mut_ptr(),
632        };
633
634        // SAFETY: This is safe because the ioctl guarantees to not access memory beyond `map`.
635        let ret = unsafe { ioctl_with_ref(self, ioctl_privcmd_mmapbatch_v2(), &map) };
636
637        if ret == 0 {
638            Ok(())
639        } else {
640            Err(Error::Mmap(io::Error::last_os_error()))
641        }
642    }
643}
644
645impl MmapXenTrait for MmapXenForeign {
646    #[allow(unused_variables)]
647    fn mmap_slice(&self, addr: *const u8, prot: i32, len: usize) -> Result<MmapXenSlice> {
648        Err(Error::MappedInAdvance)
649    }
650
651    fn addr(&self) -> *mut u8 {
652        self.unix_mmap.addr()
653    }
654}
655
656// Xen Grant memory mapping interface.
657
658const XEN_GRANT_ADDR_OFF: u64 = 1 << 63;
659
660// Grant reference
661//
662// include/uapi/xen/gntdev.h: `ioctl_gntdev_grant_ref`
663#[repr(C)]
664#[derive(Copy, Clone, Debug, Default, PartialEq)]
665struct GntDevGrantRef {
666    // The domain ID of the grant to be mapped.
667    domid: u32,
668    // The grant reference of the grant to be mapped.
669    reference: u32,
670}
671
672#[repr(C)]
673#[derive(Debug, Default, PartialEq, Eq)]
674struct __IncompleteArrayField<T>(::std::marker::PhantomData<T>, [T; 0]);
675impl<T> __IncompleteArrayField<T> {
676    #[inline]
677    unsafe fn as_ptr(&self) -> *const T {
678        self as *const __IncompleteArrayField<T> as *const T
679    }
680    #[inline]
681    unsafe fn as_mut_ptr(&mut self) -> *mut T {
682        self as *mut __IncompleteArrayField<T> as *mut T
683    }
684    #[inline]
685    unsafe fn as_slice(&self, len: usize) -> &[T] {
686        ::std::slice::from_raw_parts(self.as_ptr(), len)
687    }
688    #[inline]
689    unsafe fn as_mut_slice(&mut self, len: usize) -> &mut [T] {
690        ::std::slice::from_raw_parts_mut(self.as_mut_ptr(), len)
691    }
692}
693
694// Grant dev mapping reference
695//
696// include/uapi/xen/gntdev.h: `ioctl_gntdev_map_grant_ref`
697#[repr(C)]
698#[derive(Debug, Default)]
699struct GntDevMapGrantRef {
700    // The number of grants to be mapped.
701    count: u32,
702    // Unused padding
703    pad: u32,
704    // The offset to be used on a subsequent call to mmap().
705    index: u64,
706    // Array of grant references, of size @count.
707    refs: __IncompleteArrayField<GntDevGrantRef>,
708}
709
710generate_fam_struct_impl!(
711    GntDevMapGrantRef,
712    GntDevGrantRef,
713    refs,
714    u32,
715    count,
716    usize::MAX
717);
718
719type GntDevMapGrantRefWrapper = FamStructWrapper<GntDevMapGrantRef>;
720
721impl GntDevMapGrantRef {
722    fn new(domid: u32, base: u32, count: usize) -> Result<GntDevMapGrantRefWrapper> {
723        let mut wrapper = GntDevMapGrantRefWrapper::new(count).map_err(Error::Fam)?;
724        let refs = wrapper.as_mut_slice();
725
726        // GntDevMapGrantRef's pad and index are initialized to 0 by Fam layer.
727        for (i, r) in refs.iter_mut().enumerate().take(count) {
728            r.domid = domid;
729            r.reference = base + i as u32;
730        }
731
732        Ok(wrapper)
733    }
734}
735
736// Grant dev un-mapping reference
737//
738// include/uapi/xen/gntdev.h: `ioctl_gntdev_unmap_grant_ref`
739#[repr(C)]
740#[derive(Debug, Copy, Clone)]
741struct GntDevUnmapGrantRef {
742    // The offset returned by the map operation.
743    index: u64,
744    // The number of grants to be unmapped.
745    count: u32,
746    // Unused padding
747    pad: u32,
748}
749
750impl GntDevUnmapGrantRef {
751    fn new(index: u64, count: u32) -> Self {
752        Self {
753            index,
754            count,
755            pad: 0,
756        }
757    }
758}
759
760const XEN_GNTDEV_TYPE: u32 = 'G' as u32;
761
762// #define IOCTL_GNTDEV_MAP_GRANT_REF _IOC(_IOC_NONE, 'G', 0, sizeof(ioctl_gntdev_map_grant_ref))
763fn ioctl_gntdev_map_grant_ref() -> c_ulong {
764    ioctl_expr(
765        _IOC_NONE,
766        XEN_GNTDEV_TYPE,
767        0,
768        (size_of::<GntDevMapGrantRef>() + size_of::<GntDevGrantRef>()) as u32,
769    )
770}
771
772// #define IOCTL_GNTDEV_UNMAP_GRANT_REF _IOC(_IOC_NONE, 'G', 1, sizeof(struct ioctl_gntdev_unmap_grant_ref))
773fn ioctl_gntdev_unmap_grant_ref() -> c_ulong {
774    ioctl_expr(
775        _IOC_NONE,
776        XEN_GNTDEV_TYPE,
777        1,
778        size_of::<GntDevUnmapGrantRef>() as u32,
779    )
780}
781
782// Xen grant memory specific implementation.
783#[derive(Clone, Debug)]
784struct MmapXenGrant {
785    guest_base: GuestAddress,
786    unix_mmap: Option<MmapUnix>,
787    file_offset: FileOffset,
788    flags: i32,
789    size: usize,
790    index: u64,
791    domid: u32,
792}
793
794impl AsRawFd for MmapXenGrant {
795    fn as_raw_fd(&self) -> i32 {
796        self.file_offset.file().as_raw_fd()
797    }
798}
799
800impl MmapXenGrant {
801    fn new(range: &MmapRange, mmap_flags: MmapXenFlags) -> Result<Self> {
802        validate_file(&range.file_offset)?;
803
804        let mut grant = Self {
805            guest_base: range.addr,
806            unix_mmap: None,
807            file_offset: range.file_offset.as_ref().unwrap().clone(),
808            flags: range.flags.ok_or(Error::UnexpectedError)?,
809            size: 0,
810            index: 0,
811            domid: range.mmap_data,
812        };
813
814        // Region can't be mapped in advance, partial mapping will be done later via
815        // `MmapXenSlice`.
816        if mmap_flags.mmap_in_advance() {
817            let (unix_mmap, index) = grant.mmap_range(
818                range.addr,
819                range.size,
820                range.prot.ok_or(Error::UnexpectedError)?,
821            )?;
822
823            grant.unix_mmap = Some(unix_mmap);
824            grant.index = index;
825            grant.size = range.size;
826        }
827
828        Ok(grant)
829    }
830
831    fn mmap_range(&self, addr: GuestAddress, size: usize, prot: i32) -> Result<(MmapUnix, u64)> {
832        let (count, size) = pages(size);
833        let index = self.mmap_ioctl(addr, count)?;
834        let unix_mmap = MmapUnix::new(size, prot, self.flags, self.as_raw_fd(), index)?;
835
836        Ok((unix_mmap, index))
837    }
838
839    fn unmap_range(&self, unix_mmap: MmapUnix, size: usize, index: u64) {
840        let (count, _) = pages(size);
841
842        // Unmap the address first.
843        drop(unix_mmap);
844        self.unmap_ioctl(count as u32, index).unwrap();
845    }
846
847    fn mmap_ioctl(&self, addr: GuestAddress, count: usize) -> Result<u64> {
848        let base = ((addr.0 & !XEN_GRANT_ADDR_OFF) / page_size()) as u32;
849        let wrapper = GntDevMapGrantRef::new(self.domid, base, count)?;
850        let reference = wrapper.as_fam_struct_ref();
851
852        // SAFETY: This is safe because the ioctl guarantees to not access memory beyond reference.
853        let ret = unsafe { ioctl_with_ref(self, ioctl_gntdev_map_grant_ref(), reference) };
854
855        if ret == 0 {
856            Ok(reference.index)
857        } else {
858            Err(Error::Mmap(io::Error::last_os_error()))
859        }
860    }
861
862    fn unmap_ioctl(&self, count: u32, index: u64) -> Result<()> {
863        let unmap = GntDevUnmapGrantRef::new(index, count);
864
865        // SAFETY: This is safe because the ioctl guarantees to not access memory beyond unmap.
866        let ret = unsafe { ioctl_with_ref(self, ioctl_gntdev_unmap_grant_ref(), &unmap) };
867
868        if ret == 0 {
869            Ok(())
870        } else {
871            Err(Error::Mmap(io::Error::last_os_error()))
872        }
873    }
874}
875
876impl MmapXenTrait for MmapXenGrant {
877    // Maps a slice out of the entire region.
878    fn mmap_slice(&self, addr: *const u8, prot: i32, len: usize) -> Result<MmapXenSlice> {
879        MmapXenSlice::new_with(self.clone(), addr as usize, prot, len)
880    }
881
882    fn addr(&self) -> *mut u8 {
883        if let Some(ref unix_mmap) = self.unix_mmap {
884            unix_mmap.addr()
885        } else {
886            null_mut()
887        }
888    }
889}
890
891impl Drop for MmapXenGrant {
892    fn drop(&mut self) {
893        if let Some(unix_mmap) = self.unix_mmap.take() {
894            self.unmap_range(unix_mmap, self.size, self.index);
895        }
896    }
897}
898
899#[derive(Debug)]
900pub(crate) struct MmapXenSlice {
901    grant: Option<MmapXenGrant>,
902    unix_mmap: Option<MmapUnix>,
903    addr: *mut u8,
904    size: usize,
905    index: u64,
906}
907
908impl MmapXenSlice {
909    fn raw(addr: *mut u8) -> Self {
910        Self {
911            grant: None,
912            unix_mmap: None,
913            addr,
914            size: 0,
915            index: 0,
916        }
917    }
918
919    fn new_with(grant: MmapXenGrant, offset: usize, prot: i32, size: usize) -> Result<Self> {
920        let page_size = page_size() as usize;
921        let page_base: usize = (offset / page_size) * page_size;
922        let offset = offset - page_base;
923        let size = offset + size;
924
925        let addr = grant.guest_base.0 + page_base as u64;
926        let (unix_mmap, index) = grant.mmap_range(GuestAddress(addr), size, prot)?;
927
928        // SAFETY: We have already mapped the range including offset.
929        let addr = unsafe { unix_mmap.addr().add(offset) };
930
931        Ok(Self {
932            grant: Some(grant),
933            unix_mmap: Some(unix_mmap),
934            addr,
935            size,
936            index,
937        })
938    }
939
940    // Mapped address for the region.
941    pub(crate) fn addr(&self) -> *mut u8 {
942        self.addr
943    }
944}
945
946impl Drop for MmapXenSlice {
947    fn drop(&mut self) {
948        // Unmaps memory automatically once this instance goes out of scope.
949        if let Some(unix_mmap) = self.unix_mmap.take() {
950            self.grant
951                .as_ref()
952                .unwrap()
953                .unmap_range(unix_mmap, self.size, self.index);
954        }
955    }
956}
957
958#[derive(Debug)]
959pub struct MmapXen {
960    xen_flags: MmapXenFlags,
961    domid: u32,
962    mmap: Box<dyn MmapXenTrait>,
963}
964
965impl MmapXen {
966    fn new(range: &MmapRange) -> Result<Self> {
967        let xen_flags = match MmapXenFlags::from_bits(range.mmap_flags) {
968            Some(flags) => flags,
969            None => return Err(Error::MmapFlags(range.mmap_flags)),
970        };
971
972        if !xen_flags.is_valid() {
973            return Err(Error::MmapFlags(xen_flags.bits()));
974        }
975
976        Ok(Self {
977            xen_flags,
978            domid: range.mmap_data,
979            mmap: if xen_flags.is_foreign() {
980                Box::new(MmapXenForeign::new(range)?)
981            } else if xen_flags.is_grant() {
982                Box::new(MmapXenGrant::new(range, xen_flags)?)
983            } else {
984                Box::new(MmapXenUnix::new(range)?)
985            },
986        })
987    }
988
989    fn addr(&self) -> *mut u8 {
990        self.mmap.addr()
991    }
992
993    fn flags(&self) -> u32 {
994        self.xen_flags.bits()
995    }
996
997    fn data(&self) -> u32 {
998        self.domid
999    }
1000
1001    fn mmap_in_advance(&self) -> bool {
1002        self.xen_flags.mmap_in_advance()
1003    }
1004
1005    pub(crate) fn mmap(
1006        mmap_xen: Option<&Self>,
1007        addr: *mut u8,
1008        prot: i32,
1009        len: usize,
1010    ) -> MmapXenSlice {
1011        match mmap_xen {
1012            Some(mmap_xen) => mmap_xen.mmap.mmap_slice(addr, prot, len).unwrap(),
1013            None => MmapXenSlice::raw(addr),
1014        }
1015    }
1016}
1017
1018#[cfg(test)]
1019mod tests {
1020    #![allow(clippy::undocumented_unsafe_blocks)]
1021
1022    use super::*;
1023    use matches::assert_matches;
1024    use vmm_sys_util::tempfile::TempFile;
1025
1026    // Adding a helper method to extract the errno within an Error::Mmap(e), or return a
1027    // distinctive value when the error is represented by another variant.
1028    impl Error {
1029        fn raw_os_error(&self) -> i32 {
1030            match self {
1031                Error::Mmap(e) => e.raw_os_error().unwrap(),
1032                _ => i32::MIN,
1033            }
1034        }
1035    }
1036
1037    #[allow(unused_variables)]
1038    pub unsafe fn ioctl_with_ref<F: AsRawFd, T>(fd: &F, req: c_ulong, arg: &T) -> c_int {
1039        0
1040    }
1041
1042    impl MmapRange {
1043        fn initialized(is_file: bool) -> Self {
1044            let file_offset = if is_file {
1045                Some(FileOffset::new(TempFile::new().unwrap().into_file(), 0))
1046            } else {
1047                None
1048            };
1049
1050            let mut range = MmapRange::new_unix(0x1000, file_offset, GuestAddress(0x1000));
1051            range.prot = Some(libc::PROT_READ | libc::PROT_WRITE);
1052            range.mmap_data = 1;
1053
1054            range
1055        }
1056    }
1057
1058    impl MmapRegion {
1059        /// Create an `MmapRegion` with specified `size` at GuestAdress(0)
1060        pub fn new(size: usize) -> Result<Self> {
1061            let range = MmapRange::new_unix(size, None, GuestAddress(0));
1062            Self::from_range(range)
1063        }
1064    }
1065
1066    #[test]
1067    fn test_mmap_xen_failures() {
1068        let mut range = MmapRange::initialized(true);
1069        // Invalid flags
1070        range.mmap_flags = 16;
1071
1072        let r = MmapXen::new(&range);
1073        assert_matches!(r.unwrap_err(), Error::MmapFlags(flags) if flags == range.mmap_flags);
1074
1075        range.mmap_flags = MmapXenFlags::FOREIGN.bits() | MmapXenFlags::GRANT.bits();
1076        let r = MmapXen::new(&range);
1077        assert_matches!(r.unwrap_err(), Error::MmapFlags(flags) if flags == MmapXenFlags::ALL.bits());
1078
1079        range.mmap_flags = MmapXenFlags::FOREIGN.bits() | MmapXenFlags::NO_ADVANCE_MAP.bits();
1080        let r = MmapXen::new(&range);
1081        assert_matches!(r.unwrap_err(), Error::MmapFlags(flags) if flags ==  MmapXenFlags::NO_ADVANCE_MAP.bits() | MmapXenFlags::FOREIGN.bits());
1082    }
1083
1084    #[test]
1085    fn test_mmap_xen_success() {
1086        let mut range = MmapRange::initialized(true);
1087        range.mmap_flags = MmapXenFlags::FOREIGN.bits();
1088
1089        let r = MmapXen::new(&range).unwrap();
1090        assert_eq!(r.flags(), range.mmap_flags);
1091        assert_eq!(r.data(), range.mmap_data);
1092        assert_ne!(r.addr(), null_mut());
1093        assert!(r.mmap_in_advance());
1094
1095        range.mmap_flags = MmapXenFlags::GRANT.bits();
1096        let r = MmapXen::new(&range).unwrap();
1097        assert_eq!(r.flags(), range.mmap_flags);
1098        assert_eq!(r.data(), range.mmap_data);
1099        assert_ne!(r.addr(), null_mut());
1100        assert!(r.mmap_in_advance());
1101
1102        range.mmap_flags = MmapXenFlags::GRANT.bits() | MmapXenFlags::NO_ADVANCE_MAP.bits();
1103        let r = MmapXen::new(&range).unwrap();
1104        assert_eq!(r.flags(), range.mmap_flags);
1105        assert_eq!(r.data(), range.mmap_data);
1106        assert_eq!(r.addr(), null_mut());
1107        assert!(!r.mmap_in_advance());
1108    }
1109
1110    #[test]
1111    fn test_foreign_map_failure() {
1112        let mut range = MmapRange::initialized(true);
1113        range.file_offset = Some(FileOffset::new(TempFile::new().unwrap().into_file(), 0));
1114        range.prot = None;
1115        let r = MmapXenForeign::new(&range);
1116        assert_matches!(r.unwrap_err(), Error::UnexpectedError);
1117
1118        let mut range = MmapRange::initialized(true);
1119        range.flags = None;
1120        let r = MmapXenForeign::new(&range);
1121        assert_matches!(r.unwrap_err(), Error::UnexpectedError);
1122
1123        let mut range = MmapRange::initialized(true);
1124        range.file_offset = Some(FileOffset::new(TempFile::new().unwrap().into_file(), 1));
1125        let r = MmapXenForeign::new(&range);
1126        assert_matches!(r.unwrap_err(), Error::InvalidOffsetLength);
1127
1128        let mut range = MmapRange::initialized(true);
1129        range.size = 0;
1130        let r = MmapXenForeign::new(&range);
1131        assert_eq!(r.unwrap_err().raw_os_error(), libc::EINVAL);
1132    }
1133
1134    #[test]
1135    fn test_foreign_map_success() {
1136        let range = MmapRange::initialized(true);
1137        let r = MmapXenForeign::new(&range).unwrap();
1138        assert_ne!(r.addr(), null_mut());
1139        assert_eq!(r.domid, range.mmap_data);
1140        assert_eq!(r.guest_base, range.addr);
1141    }
1142
1143    #[test]
1144    fn test_grant_map_failure() {
1145        let mut range = MmapRange::initialized(true);
1146        range.prot = None;
1147        let r = MmapXenGrant::new(&range, MmapXenFlags::empty());
1148        assert_matches!(r.unwrap_err(), Error::UnexpectedError);
1149
1150        let mut range = MmapRange::initialized(true);
1151        range.prot = None;
1152        // Protection isn't used for no-advance mappings
1153        MmapXenGrant::new(&range, MmapXenFlags::NO_ADVANCE_MAP).unwrap();
1154
1155        let mut range = MmapRange::initialized(true);
1156        range.flags = None;
1157        let r = MmapXenGrant::new(&range, MmapXenFlags::NO_ADVANCE_MAP);
1158        assert_matches!(r.unwrap_err(), Error::UnexpectedError);
1159
1160        let mut range = MmapRange::initialized(true);
1161        range.file_offset = Some(FileOffset::new(TempFile::new().unwrap().into_file(), 1));
1162        let r = MmapXenGrant::new(&range, MmapXenFlags::NO_ADVANCE_MAP);
1163        assert_matches!(r.unwrap_err(), Error::InvalidOffsetLength);
1164
1165        let mut range = MmapRange::initialized(true);
1166        range.size = 0;
1167        let r = MmapXenGrant::new(&range, MmapXenFlags::empty());
1168        assert_eq!(r.unwrap_err().raw_os_error(), libc::EINVAL);
1169    }
1170
1171    #[test]
1172    fn test_grant_map_success() {
1173        let range = MmapRange::initialized(true);
1174        let r = MmapXenGrant::new(&range, MmapXenFlags::NO_ADVANCE_MAP).unwrap();
1175        assert_eq!(r.addr(), null_mut());
1176        assert_eq!(r.domid, range.mmap_data);
1177        assert_eq!(r.guest_base, range.addr);
1178
1179        let mut range = MmapRange::initialized(true);
1180        // Size isn't used with no-advance mapping.
1181        range.size = 0;
1182        MmapXenGrant::new(&range, MmapXenFlags::NO_ADVANCE_MAP).unwrap();
1183
1184        let range = MmapRange::initialized(true);
1185        let r = MmapXenGrant::new(&range, MmapXenFlags::empty()).unwrap();
1186        assert_ne!(r.addr(), null_mut());
1187        assert_eq!(r.domid, range.mmap_data);
1188        assert_eq!(r.guest_base, range.addr);
1189    }
1190
1191    #[test]
1192    fn test_grant_ref_alloc() {
1193        let wrapper = GntDevMapGrantRef::new(0, 0x1000, 0x100).unwrap();
1194        let r = wrapper.as_fam_struct_ref();
1195        assert_eq!(r.count, 0x100);
1196        assert_eq!(r.pad, 0);
1197        assert_eq!(r.index, 0);
1198    }
1199}