Skip to main content

mshv_ioctls/ioctls/
vm.rs

1// Copyright © 2020, Microsoft Corporation
2//
3// SPDX-License-Identifier: Apache-2.0 OR BSD-3-Clause
4//
5use crate::ioctls::device::{new_device, DeviceFd};
6use crate::ioctls::vcpu::{new_vcpu, VcpuFd};
7use crate::ioctls::{MshvError, Result};
8use crate::mshv_ioctls::*;
9use crate::set_bits;
10use mshv_bindings::*;
11
12use std::cmp;
13use std::convert::TryFrom;
14use std::fs::File;
15
16use std::os::unix::io::{AsRawFd, FromRawFd, RawFd};
17use vmm_sys_util::errno;
18use vmm_sys_util::eventfd::EventFd;
19use vmm_sys_util::ioctl::{ioctl, ioctl_with_mut_ref, ioctl_with_ref};
20
21/// Batch size for processing page access states
22const PAGE_ACCESS_STATES_BATCH_SIZE: u64 = 0x10000;
23
24/// An address either in programmable I/O space or in memory mapped I/O space.
25///
26/// The `IoEventAddress` is used for specifying the type when registering an event
27/// in [register_ioevent](struct.VmFd.html#method.register_ioevent).
28#[derive(Eq, PartialEq, Hash, Clone, Debug, Copy)]
29pub enum IoEventAddress {
30    /// Representation of an programmable I/O address.
31    Pio(u64),
32    /// Representation of an memory mapped I/O address.
33    Mmio(u64),
34}
35
36/// VMType represents the type of VM.
37#[derive(Eq, PartialEq, Hash, Clone, Debug, Copy)]
38pub enum VmType {
39    /// Normal VM with no support for confidential computing
40    Normal,
41    /// AMD's SEV-SNP
42    Snp,
43}
44
45impl TryFrom<u64> for VmType {
46    type Error = ();
47
48    fn try_from(v: u64) -> std::result::Result<Self, Self::Error> {
49        match v {
50            x if x == VmType::Normal as u64 => Ok(VmType::Normal),
51            x if x == VmType::Snp as u64 => Ok(VmType::Snp),
52            _ => Err(()),
53        }
54    }
55}
56
57/// Helper structure for disabling datamatch.
58///
59/// The structure can be used as a parameter to
60/// [`register_ioevent`](struct.VmFd.html#method.register_ioevent)
61/// to disable filtering of events based on the datamatch flag.
62#[derive(Debug)]
63pub struct NoDatamatch;
64
65impl From<NoDatamatch> for u64 {
66    fn from(_s: NoDatamatch) -> u64 {
67        0
68    }
69}
70
71/// Structure for injecting interurpt
72///
73/// This struct is passed to request_virtual_interrupt function as an argument
74#[derive(Debug)]
75pub struct InterruptRequest {
76    /// Type of interrupt
77    pub interrupt_type: hv_interrupt_type,
78    /// Advanced Programmable Interrupt Controller Identification Number
79    pub apic_id: u64,
80    /// APIC Vector (entry of Interrupt Vector Table i.e IVT)
81    pub vector: u32,
82    /// True means level triggered, false means edge triggered
83    pub level_triggered: bool,
84    /// True means the APIC ID is logical, false means physical
85    pub logical_destination_mode: bool,
86    /// True means CPU is in long mode
87    pub long_mode: bool,
88}
89
90/// Wrapper over Mshv VM ioctls.
91#[derive(Debug)]
92pub struct VmFd {
93    vm: File,
94}
95
96impl AsRawFd for VmFd {
97    fn as_raw_fd(&self) -> RawFd {
98        self.vm.as_raw_fd()
99    }
100}
101
102impl VmFd {
103    /// Initialize the partition after creation
104    pub fn initialize(&self) -> Result<()> {
105        // SAFETY: IOCTL with correct types
106        let ret = unsafe { ioctl(self, MSHV_INITIALIZE_PARTITION()) };
107        if ret == 0 {
108            Ok(())
109        } else {
110            Err(errno::Error::last().into())
111        }
112    }
113
114    /// Install intercept to enable some VM exits like MSR, CPUId etc
115    pub fn install_intercept(&self, install_intercept_args: mshv_install_intercept) -> Result<()> {
116        self.hvcall_install_intercept(
117            install_intercept_args.access_type_mask,
118            install_intercept_args.intercept_type,
119            install_intercept_args.intercept_parameter,
120        )
121    }
122
123    /// Generic hvcall version of install_intercept
124    fn hvcall_install_intercept(
125        &self,
126        access_type_mask: u32,
127        intercept_type: u32,
128        intercept_param: hv_intercept_parameters,
129    ) -> Result<()> {
130        let input = hv_input_install_intercept {
131            access_type: access_type_mask,
132            intercept_type,
133            intercept_parameter: intercept_param,
134            ..Default::default() // NOTE: Kernel will populate partition_id field
135        };
136
137        let mut args = make_args!(HVCALL_INSTALL_INTERCEPT, input);
138        self.hvcall(&mut args)
139    }
140
141    /// Modify host visibility for a range of GPA
142    pub fn modify_gpa_host_access(
143        &self,
144        gpa_host_access_args: &mshv_modify_gpa_host_access,
145    ) -> Result<()> {
146        // SAFETY: IOCTL with correct types
147        let ret =
148            unsafe { ioctl_with_ref(self, MSHV_MODIFY_GPA_HOST_ACCESS(), gpa_host_access_args) };
149        if ret == 0 {
150            Ok(())
151        } else {
152            Err(errno::Error::last().into())
153        }
154    }
155
156    /// Import the isolated pages
157    pub fn import_isolated_pages(
158        &self,
159        isolate_page_list: &mshv_import_isolated_pages,
160    ) -> Result<()> {
161        // SAFETY: IOCTL with correct types
162        let ret = unsafe { ioctl_with_ref(self, MSHV_IMPORT_ISOLATED_PAGES(), isolate_page_list) };
163        if ret == 0 {
164            Ok(())
165        } else {
166            Err(errno::Error::last().into())
167        }
168    }
169
170    /// Mark completion of importing the isoalted pages
171    pub fn complete_isolated_import(&self, data: &mshv_complete_isolated_import) -> Result<()> {
172        // SAFETY: IOCTL with correct types
173        let ret = unsafe { ioctl_with_ref(self, MSHV_COMPLETE_ISOLATED_IMPORT(), data) };
174        if ret == 0 {
175            Ok(())
176        } else {
177            Err(errno::Error::last().into())
178        }
179    }
180
181    /// Issue PSP request from guest side
182    pub fn psp_issue_guest_request(&self, data: &mshv_issue_psp_guest_request) -> Result<()> {
183        // SAFETY: IOCTL with correct types
184        let ret = unsafe { ioctl_with_ref(self, MSHV_ISSUE_PSP_GUEST_REQUEST(), data) };
185        if ret == 0 {
186            Ok(())
187        } else {
188            Err(errno::Error::last().into())
189        }
190    }
191
192    /// Create AP threads for SEV-SNP guest
193    pub fn sev_snp_ap_create(&self, data: &mshv_sev_snp_ap_create) -> Result<()> {
194        // SAFETY: IOCTL with correct types
195        let ret = unsafe { ioctl_with_ref(self, MSHV_SEV_SNP_AP_CREATE(), data) };
196        if ret == 0 {
197            Ok(())
198        } else {
199            Err(errno::Error::last().into())
200        }
201    }
202
203    /// Creates/removes a guest memory mapping to userspace
204    pub fn set_guest_memory(&self, user_memory_region: mshv_user_mem_region) -> Result<()> {
205        // SAFETY: IOCTL with correct types
206        let ret = unsafe { ioctl_with_ref(self, MSHV_SET_GUEST_MEMORY(), &user_memory_region) };
207        if ret == 0 {
208            Ok(())
209        } else {
210            Err(errno::Error::last().into())
211        }
212    }
213
214    /// Helper for mapping region
215    pub fn map_user_memory(&self, user_memory_region: mshv_user_mem_region) -> Result<()> {
216        let mut region = user_memory_region;
217        region.flags &= !set_bits!(u8, MSHV_SET_MEM_BIT_UNMAP);
218        self.set_guest_memory(region)
219    }
220
221    /// Helper for unmapping region
222    pub fn unmap_user_memory(&self, user_memory_region: mshv_user_mem_region) -> Result<()> {
223        let mut region = user_memory_region;
224        region.flags = set_bits!(u8, MSHV_SET_MEM_BIT_UNMAP);
225        self.set_guest_memory(region)
226    }
227
228    /// Creates a new MSHV vCPU file descriptor
229    pub fn create_vcpu(&self, id: u8) -> Result<VcpuFd> {
230        let vp_arg = mshv_create_vp {
231            vp_index: id as __u32,
232        };
233        // SAFETY: IOCTL with correct types
234        let vcpu_fd = unsafe { ioctl_with_ref(&self.vm, MSHV_CREATE_VP(), &vp_arg) };
235        if vcpu_fd < 0 {
236            return Err(errno::Error::last().into());
237        }
238
239        // Wrap the vCPU now in case the following ? returns early. This is safe because we verified
240        // the value of the fd and we own the fd.
241        // SAFETY: we're sure vcpu_fd is valid.
242        let vcpu = unsafe { File::from_raw_fd(vcpu_fd) };
243
244        // SAFETY: Safe to call as VCPU has this map already available upon creation
245        let addr = unsafe {
246            libc::mmap(
247                std::ptr::null_mut(),
248                HV_PAGE_SIZE,
249                libc::PROT_READ | libc::PROT_WRITE,
250                libc::MAP_SHARED,
251                vcpu_fd,
252                MSHV_VP_MMAP_OFFSET_REGISTERS as i64 * libc::sysconf(libc::_SC_PAGE_SIZE),
253            )
254        };
255        let vp_page = if addr == libc::MAP_FAILED {
256            // If the MSHV driver returns ENODEV that means it is not supported
257            // We just set None in that case.
258            // Otherise there is an error with mmap, return the error.
259            let err_no = errno::Error::last();
260            if err_no.errno() != libc::ENODEV {
261                return Err(errno::Error::last().into());
262            }
263            None
264        } else {
265            Some(RegisterPage(addr as *mut hv_vp_register_page))
266        };
267
268        Ok(new_vcpu(id as u32, vcpu, vp_page))
269    }
270
271    /// Inject an interrupt into the guest..
272    #[cfg(target_arch = "x86_64")]
273    pub fn request_virtual_interrupt(&self, request: &InterruptRequest) -> Result<()> {
274        self.hvcall_assert_virtual_interrupt(request)
275    }
276
277    /// MSHV_ROOT_HVCALL version of request_virtual_interrupt
278    #[cfg(target_arch = "x86_64")]
279    fn hvcall_assert_virtual_interrupt(&self, request: &InterruptRequest) -> Result<()> {
280        let mut control_flags: u32 = 0;
281        if request.level_triggered {
282            control_flags |= 0x1;
283        }
284        if request.logical_destination_mode {
285            control_flags |= 0x2;
286        }
287        if request.long_mode {
288            control_flags |= 1 << 30;
289        }
290
291        let input = hv_input_assert_virtual_interrupt {
292            control: hv_interrupt_control {
293                as_uint64: request.interrupt_type as u64 | ((control_flags as u64) << 32),
294            },
295            dest_addr: request.apic_id,
296            vector: request.vector,
297            ..Default::default() // NOTE: Kernel will populate partition_id field
298        };
299
300        let mut args = make_args!(HVCALL_ASSERT_VIRTUAL_INTERRUPT, input);
301        self.hvcall(&mut args)
302    }
303
304    /// signal_event_direct: Send a sint signal event to the vp.
305    #[cfg(target_arch = "x86_64")]
306    pub fn signal_event_direct(&self, vp: u32, sint: u8, flag: u16) -> Result<bool> {
307        self.hvcall_signal_event_direct(vp, sint, flag)
308    }
309
310    /// MSHV_ROOT_HVCALL version of signal_event_direct
311    #[cfg(target_arch = "x86_64")]
312    fn hvcall_signal_event_direct(&self, vp: u32, sint: u8, flag: u16) -> Result<bool> {
313        let input = hv_input_signal_event_direct {
314            target_vp: vp,
315            target_vtl: 0,
316            target_sint: sint,
317            flag_number: flag,
318            ..Default::default() // NOTE: Kernel will populate partition_id field
319        };
320        let mut output = hv_output_signal_event_direct {
321            newly_signaled: 0,
322            ..Default::default()
323        };
324
325        let mut args = make_args!(HVCALL_SIGNAL_EVENT_DIRECT, input, output);
326        self.hvcall(&mut args)?;
327
328        Ok(output.newly_signaled != 0)
329    }
330
331    /// post_message_direct: Post a message to the vp using a given sint.
332    #[cfg(target_arch = "x86_64")]
333    pub fn post_message_direct(&self, vp: u32, sint: u8, msg: &[u8]) -> Result<()> {
334        self.hvcall_post_message_direct(vp, sint, msg)
335    }
336
337    /// MSHV_ROOT_HVCALL version of post_message_direct
338    #[cfg(target_arch = "x86_64")]
339    fn hvcall_post_message_direct(&self, vp: u32, sint: u8, msg: &[u8]) -> Result<()> {
340        let mut input = hv_input_post_message_direct {
341            vp_index: vp,
342            vtl: 0,
343            sint_index: sint as u32,
344            ..Default::default() // NOTE: Kernel will populate partition_id field
345        };
346        if msg.len() > input.message.len() {
347            return Err(errno::Error::new(libc::EINVAL).into());
348        }
349        let len = cmp::min(msg.len(), input.message.len());
350        input.message[..len].copy_from_slice(&msg[..len]);
351
352        let mut args = make_args!(HVCALL_POST_MESSAGE_DIRECT, input);
353        self.hvcall(&mut args)
354    }
355
356    /// register_deliverabilty_notifications: Register for a notification when
357    /// hypervisor is ready to process more post_message_direct(s).
358    pub fn register_deliverabilty_notifications(&self, vp: u32, flag: u64) -> Result<()> {
359        self.hvcall_register_deliverability_notifications(vp, flag)
360    }
361
362    /// Generic hypercall version of set_reg, with vp specified by index
363    fn hvcall_set_reg(&self, vp: u32, reg_assocs: &[hv_register_assoc]) -> Result<()> {
364        let input = make_rep_input!(
365            hv_input_set_vp_registers {
366                vp_index: vp,
367                ..Default::default()
368            },
369            elements,
370            reg_assocs
371        );
372        let mut args = make_rep_args!(HVCALL_SET_VP_REGISTERS, input);
373        self.hvcall(&mut args)?;
374
375        if args.reps as usize != reg_assocs.len() {
376            return Err(libc::EINTR.into());
377        }
378
379        Ok(())
380    }
381
382    /// MSHV_ROOT_HVCALL version of register_deliverability_notifications
383    fn hvcall_register_deliverability_notifications(&self, vp: u32, flag: u64) -> Result<()> {
384        self.hvcall_set_reg(
385            vp,
386            &[hv_register_assoc {
387                name: hv_register_name_HV_REGISTER_DELIVERABILITY_NOTIFICATIONS,
388                value: hv_register_value { reg64: flag },
389                ..Default::default()
390            }],
391        )
392    }
393
394    /// irqfd: Passes in an eventfd which is to be used for injecting
395    /// interrupts from userland.
396    fn irqfd(&self, fd: RawFd, resamplefd: RawFd, gsi: u32, flags: u32) -> Result<()> {
397        let irqfd_arg = mshv_user_irqfd {
398            fd,
399            flags,
400            resamplefd,
401            gsi,
402        };
403
404        // SAFETY: IOCTL with correct types
405        let ret = unsafe { ioctl_with_ref(&self.vm, MSHV_IRQFD(), &irqfd_arg) };
406        if ret == 0 {
407            Ok(())
408        } else {
409            Err(errno::Error::last().into())
410        }
411    }
412
413    /// Registers an event that will, when signaled, trigger the `gsi` IRQ.
414    ///
415    /// # Arguments
416    ///
417    /// * `fd` - `EventFd` to be signaled.
418    /// * `gsi` - IRQ to be triggered.
419    /// * `req` - Interrupt Request
420    ///
421    /// # Example
422    /// ```no_run
423    /// # extern crate libc;
424    /// # extern crate vmm_sys_util;
425    /// # use libc::EFD_NONBLOCK;
426    /// # use vmm_sys_util::eventfd::EventFd;
427    /// # use crate::mshv_ioctls::*;
428    /// # use mshv_bindings::*;
429    /// let hv = Mshv::new().unwrap();
430    /// let vm = hv.create_vm().unwrap();
431    /// let evtfd = EventFd::new(EFD_NONBLOCK).unwrap();
432    /// vm.register_irqfd(&evtfd, 30).unwrap();
433    /// ```
434    pub fn register_irqfd(&self, fd: &EventFd, gsi: u32) -> Result<()> {
435        self.irqfd(fd.as_raw_fd(), 0, gsi, 0)
436    }
437
438    /// Registers an event that will, when signaled, assert the `gsi` IRQ.
439    /// If the irqchip is resampled by the guest, the IRQ is de-asserted,
440    /// and `resamplefd` is notified.
441    ///
442    /// # Arguments
443    ///
444    /// * `fd` - `EventFd` to be signaled.
445    /// * `resamplefd` - `Eventfd` to be notified on resample.
446    /// * `gsi` - IRQ to be triggered.
447    /// * `req` - Interrupt Request
448    ///
449    /// # Example
450    /// ```no_run
451    /// # extern crate libc;
452    /// # extern crate vmm_sys_util;
453    /// # use libc::EFD_NONBLOCK;
454    /// # use vmm_sys_util::eventfd::EventFd;
455    /// # use crate::mshv_ioctls::*;
456    /// # use mshv_bindings::*;
457    /// let hv = Mshv::new().unwrap();
458    /// let vm = hv.create_vm().unwrap();
459    /// let evtfd = EventFd::new(EFD_NONBLOCK).unwrap();
460    /// let resamplefd = EventFd::new(EFD_NONBLOCK).unwrap();
461    /// vm.register_irqfd_with_resample(&evtfd, &resamplefd, 30)
462    ///     .unwrap();
463    /// ```
464    pub fn register_irqfd_with_resample(
465        &self,
466        fd: &EventFd,
467        resamplefd: &EventFd,
468        gsi: u32,
469    ) -> Result<()> {
470        self.irqfd(
471            fd.as_raw_fd(),
472            resamplefd.as_raw_fd(),
473            gsi,
474            set_bits!(u32, MSHV_IRQFD_BIT_RESAMPLE),
475        )
476    }
477
478    /// Unregisters an event that will, when signaled, trigger the `gsi` IRQ.
479    ///
480    /// # Arguments
481    ///
482    /// * `fd` - `EventFd` to be signaled.
483    /// * `gsi` - IRQ to be triggered.
484    ///
485    /// # Example
486    /// ```no_run
487    /// # extern crate libc;
488    /// # extern crate vmm_sys_util;
489    /// # use libc::EFD_NONBLOCK;
490    /// # use vmm_sys_util::eventfd::EventFd;
491    /// # use crate::mshv_ioctls::*;
492    /// # use mshv_bindings::*;
493    /// let hv = Mshv::new().unwrap();
494    /// let vm = hv.create_vm().unwrap();
495    /// let evtfd = EventFd::new(EFD_NONBLOCK).unwrap();
496    /// vm.register_irqfd(&evtfd, 30).unwrap();
497    /// vm.unregister_irqfd(&evtfd, 30).unwrap();
498    /// ```
499    pub fn unregister_irqfd(&self, fd: &EventFd, gsi: u32) -> Result<()> {
500        self.irqfd(
501            fd.as_raw_fd(),
502            0,
503            gsi,
504            set_bits!(u32, MSHV_IRQFD_BIT_DEASSIGN),
505        )
506    }
507
508    /// Sets the MSI routing table entries, overwriting any previously set
509    /// entries, as per the `MSHV_SET_MSI_ROUTING` ioctl.
510    ///
511    /// Returns an io::Error when the table could not be updated.
512    ///
513    /// # Arguments
514    ///
515    /// * msi_routing - MSI routing configuration.
516    ///
517    /// # Example
518    /// ```no_run
519    /// # extern crate libc;
520    /// # extern crate vmm_sys_util;
521    /// # use libc::EFD_NONBLOCK;
522    /// # use vmm_sys_util::eventfd::EventFd;
523    /// # use crate::mshv_ioctls::*;
524    /// # use mshv_bindings::*;
525    /// let hv = Mshv::new().unwrap();
526    /// let vm = hv.create_vm().unwrap();
527    ///
528    /// let msi_routing = mshv_user_irq_table::default();
529    /// vm.set_msi_routing(&msi_routing).unwrap();
530    /// ```
531    pub fn set_msi_routing(&self, msi_routing: &mshv_user_irq_table) -> Result<()> {
532        // SAFETY: we allocated the structure and we know the kernel
533        // will read exactly the size of the structure.
534        let ret = unsafe { ioctl_with_ref(self, MSHV_SET_MSI_ROUTING(), msi_routing) };
535        if ret == 0 {
536            Ok(())
537        } else {
538            Err(errno::Error::last().into())
539        }
540    }
541
542    /// ioeventfd: Passes in an eventfd which the kernel would signal when
543    /// an mmio region is written into.
544    fn ioeventfd<T: Into<u64>>(
545        &self,
546        fd: &EventFd,
547        addr: &IoEventAddress,
548        datamatch: T,
549        mut flags: u32,
550    ) -> Result<()> {
551        //
552        // mshv doesn't support PIO ioeventfds now.
553        //
554        let mmio_addr = match addr {
555            IoEventAddress::Pio(_) => {
556                return Err(libc::ENOTSUP.into());
557            }
558            IoEventAddress::Mmio(ref m) => *m,
559        };
560
561        if std::mem::size_of::<T>() > 0 {
562            flags |= set_bits!(u32, MSHV_IOEVENTFD_BIT_DATAMATCH);
563        }
564
565        let ioeventfd = mshv_user_ioeventfd {
566            datamatch: datamatch.into(),
567            len: std::mem::size_of::<T>() as u32,
568            addr: mmio_addr,
569            fd: fd.as_raw_fd(),
570            flags,
571            ..Default::default()
572        };
573        // SAFETY: we know that our file is a VM fd, we know the kernel will only read the
574        // correct amount of memory from our pointer, and we verify the return result.
575        let ret = unsafe { ioctl_with_ref(self, MSHV_IOEVENTFD(), &ioeventfd) };
576        if ret == 0 {
577            Ok(())
578        } else {
579            Err(errno::Error::last().into())
580        }
581    }
582
583    /// Registers an event to be signaled whenever a certain address is written to.
584    ///
585    /// # Arguments
586    ///
587    /// * `fd` - `EventFd` which will be signaled. When signaling, the usual `vmexit` to userspace
588    ///  is prevented.
589    /// * `addr` - Address being written to.
590    /// * `datamatch` - Limits signaling `fd` to only the cases where the value being written is
591    /// equal to this parameter. The size of `datamatch` is important and it must
592    /// match the expected size of the guest's write.
593    ///
594    /// # Example
595    /// ```no_run
596    /// # extern crate libc;
597    /// # extern crate vmm_sys_util;
598    /// # use libc::EFD_NONBLOCK;
599    /// # use vmm_sys_util::eventfd::EventFd;
600    /// # use crate::mshv_ioctls::*;
601    /// # use mshv_bindings::*;
602    /// let hv = Mshv::new().unwrap();
603    /// let vm = hv.create_vm().unwrap();
604    /// let evtfd = EventFd::new(EFD_NONBLOCK).unwrap();
605    /// vm.register_ioevent(&evtfd, &IoEventAddress::Mmio(0x1000), NoDatamatch)
606    ///     .unwrap();
607    /// ```
608    pub fn register_ioevent<T: Into<u64>>(
609        &self,
610        fd: &EventFd,
611        addr: &IoEventAddress,
612        datamatch: T,
613    ) -> Result<()> {
614        self.ioeventfd(fd, addr, datamatch, 0)
615    }
616
617    /// Unregisters an event from a certain address it has been previously registered to.
618    ///
619    /// # Arguments
620    ///
621    /// * `fd` - FD which will be unregistered.
622    /// * `addr` - Address being written to.
623    ///
624    /// # Safety
625    ///
626    /// This function is unsafe because it relies on RawFd.
627    ///
628    /// # Example
629    ///
630    /// ```no_run
631    /// # extern crate libc;
632    /// # extern crate vmm_sys_util;
633    /// # use libc::EFD_NONBLOCK;
634    /// # use vmm_sys_util::eventfd::EventFd;
635    /// # use crate::mshv_ioctls::*;
636    /// # use mshv_bindings::*;
637    /// let hv = Mshv::new().unwrap();
638    /// let vm = hv.create_vm().unwrap();
639    /// let evtfd = EventFd::new(EFD_NONBLOCK).unwrap();
640    /// vm.register_ioevent(&evtfd, &IoEventAddress::Mmio(0x1000), NoDatamatch)
641    ///     .unwrap();
642    /// vm.unregister_ioevent(&evtfd, &IoEventAddress::Mmio(0x1000), NoDatamatch)
643    ///     .unwrap();
644    /// ```
645    pub fn unregister_ioevent<T: Into<u64>>(
646        &self,
647        fd: &EventFd,
648        addr: &IoEventAddress,
649        datamatch: T,
650    ) -> Result<()> {
651        self.ioeventfd(
652            fd,
653            addr,
654            datamatch,
655            set_bits!(u32, MSHV_IOEVENTFD_BIT_DEASSIGN),
656        )
657    }
658
659    /// Get property of the VM partition: For example , CPU Frequency, Size of the Xsave state and more.
660    /// For more of the codes, please see the hv_partition_property_code type definitions in the bindings.rs
661    pub fn get_partition_property(&self, code: u32) -> Result<u64> {
662        self.hvcall_get_partition_property(code)
663    }
664
665    /// Generic hvcall version of get_partition_property
666    fn hvcall_get_partition_property(&self, code: u32) -> Result<u64> {
667        let input = hv_input_get_partition_property {
668            property_code: code,
669            ..Default::default() // NOTE: Kernel will populate partition_id field
670        };
671        let mut output = hv_output_get_partition_property {
672            ..Default::default()
673        };
674        let mut args = make_args!(HVCALL_GET_PARTITION_PROPERTY, input, output);
675        self.hvcall(&mut args)?;
676
677        Ok(output.property_value)
678    }
679
680    /// Sets a partion property
681    pub fn set_partition_property(&self, code: u32, value: u64) -> Result<()> {
682        self.hvcall_set_partition_property(code, value)
683    }
684
685    /// Generic hvcall version of set_partition_property
686    fn hvcall_set_partition_property(&self, code: u32, value: u64) -> Result<()> {
687        let input = hv_input_set_partition_property {
688            property_code: code,
689            property_value: value,
690            ..Default::default() // NOTE: Kernel will populate partition_id field
691        };
692        let mut args = make_args!(HVCALL_SET_PARTITION_PROPERTY, input);
693        self.hvcall(&mut args)
694    }
695
696    /// Enable dirty page tracking by hypervisor
697    /// Flags:
698    ///         bit 1: Enabled
699    ///         bit 2: Granularity
700    pub fn enable_dirty_page_tracking(&self) -> Result<()> {
701        let flag: u64 = 0x1;
702        self.set_partition_property(
703            hv_partition_property_code_HV_PARTITION_PROPERTY_GPA_PAGE_ACCESS_TRACKING,
704            flag,
705        )
706    }
707
708    /// Disable dirty page tracking by hypervisor
709    /// Prerequisite: It is required to set the dirty bits if cleared
710    /// previously, otherwise this hypercall will be failed.
711    /// Flags:
712    ///         bit 1: Enabled
713    ///         bit 2: Granularity
714    pub fn disable_dirty_page_tracking(&self) -> Result<()> {
715        let flag: u64 = 0x0;
716        self.set_partition_property(
717            hv_partition_property_code_HV_PARTITION_PROPERTY_GPA_PAGE_ACCESS_TRACKING,
718            flag,
719        )
720    }
721
722    /// Get page access states as bitmap
723    /// A bitmap of dirty or accessed bits for a range of guest pages
724    /// Prerequisite: Need to enable page_acess_tracking
725    /// Args:
726    ///     base_pfn: Guest page number
727    ///     page_count: Number of pages
728    ///     access_type: MSHV_GPAP_ACCESS_TYPE_*
729    ///     access_op: MSHV_GPAP_ACCESS_OP_* to optionally clear or set bits
730    pub fn get_gpap_access_bitmap(
731        &self,
732        base_pfn: u64,
733        page_count: u64,
734        access_type: u8,
735        access_op: u8,
736    ) -> Result<Vec<u64>> {
737        let buf_sz = page_count.div_ceil(64);
738        let mut bitmap: Vec<u64> = vec![0u64; buf_sz as usize];
739        let mut args = mshv_gpap_access_bitmap {
740            access_type,
741            access_op,
742            page_count,
743            gpap_base: base_pfn,
744            bitmap_ptr: bitmap.as_mut_ptr() as u64,
745            ..Default::default()
746        };
747
748        // SAFETY: IOCTL with correct types
749        let ret = unsafe { ioctl_with_mut_ref(self, MSHV_GET_GPAP_ACCESS_BITMAP(), &mut args) };
750        if ret == 0 {
751            Ok(bitmap)
752        } else {
753            Err(errno::Error::last().into())
754        }
755    }
756
757    /// Gets the bitmap of pages dirtied since the last call of this function
758    /// Args:
759    ///     base_pfn: Guest page number
760    ///     memory_size: In bytes
761    ///     access_op: MSHV_GPAP_ACCESS_OP_* to optionally clear or set bits
762    pub fn get_dirty_log(
763        &self,
764        base_pfn: u64,
765        memory_size: usize,
766        access_op: u8,
767    ) -> Result<Vec<u64>> {
768        // For ease of access we are saving the bitmap in a u64 vector. We are using ceil to
769        // make sure we count all dirty pages even when `memory_size` is not a multiple of
770        // `page_size * 64`.
771        let div_ceil = |dividend: usize, divisor| dividend.div_ceil(divisor);
772        let bitmap_size = div_ceil(memory_size, HV_PAGE_SIZE * 64);
773        let mut bitmap: Vec<u64> = Vec::with_capacity(bitmap_size);
774        let mut completed = 0;
775        let total = (memory_size / HV_PAGE_SIZE) as u64;
776
777        while completed < total {
778            let remaining = total - completed;
779            let batch_size = cmp::min(PAGE_ACCESS_STATES_BATCH_SIZE, remaining);
780            let mut bitmap_part = self.get_gpap_access_bitmap(
781                base_pfn + completed,
782                batch_size,
783                MSHV_GPAP_ACCESS_TYPE_DIRTY as u8,
784                access_op,
785            )?;
786            bitmap.append(&mut bitmap_part);
787            completed += batch_size;
788        }
789        Ok(bitmap)
790    }
791
792    /// Create an in-kernel device
793    ///
794    /// See the documentation for `MSHV_CREATE_DEVICE`.
795    pub fn create_device(&self, device: &mut mshv_create_device) -> Result<DeviceFd> {
796        // SAFETY: IOCTL with correct types
797        let ret = unsafe { ioctl_with_mut_ref(self, MSHV_CREATE_DEVICE(), device) };
798        if ret == 0 {
799            // SAFETY: fd is valid
800            Ok(new_device(unsafe { File::from_raw_fd(device.fd as i32) }))
801        } else {
802            Err(errno::Error::last().into())
803        }
804    }
805
806    /// Execute a hypercall for this partition
807    pub fn hvcall(&self, args: &mut mshv_root_hvcall) -> Result<()> {
808        // SAFETY: IOCTL with correct types
809        let ret = unsafe { ioctl_with_mut_ref(self, MSHV_ROOT_HVCALL(), args) };
810        if ret == 0 {
811            Ok(())
812        } else {
813            Err(MshvError::from_hvcall(errno::Error::last(), *args))
814        }
815    }
816
817    #[cfg(target_arch = "x86_64")]
818    /// X86 specific call to get list of supported MSRs
819    pub fn get_msr_index_list(&self) -> Result<Vec<u32>> {
820        let xsave_feature_val = self.get_partition_property(
821            hv_partition_property_code_HV_PARTITION_PROPERTY_PROCESSOR_XSAVE_FEATURES,
822        )?;
823        let proc_features0 = self.get_partition_property(
824            hv_partition_property_code_HV_PARTITION_PROPERTY_PROCESSOR_FEATURES0,
825        )?;
826        let proc_features1 = self.get_partition_property(
827            hv_partition_property_code_HV_PARTITION_PROPERTY_PROCESSOR_FEATURES1,
828        )?;
829        let syn_feature = self.get_partition_property(
830            hv_partition_property_code_HV_PARTITION_PROPERTY_SYNTHETIC_PROC_FEATURES,
831        )?;
832        let mut proc_features = hv_partition_processor_features::default();
833        // SAFETY: access union fields
834        unsafe {
835            proc_features.as_uint64[0] = proc_features0;
836            proc_features.as_uint64[1] = proc_features1;
837        }
838        let synthetic_features = hv_partition_synthetic_processor_features {
839            as_uint64: [syn_feature],
840        };
841        let xsave_features = hv_partition_processor_xsave_features {
842            as_uint64: xsave_feature_val,
843        };
844        let vp_features: VpFeatures = VpFeatures {
845            proc_features,
846            xsave_features,
847            synthetic_features,
848        };
849        Ok(get_partition_supported_msrs(&vp_features))
850    }
851}
852/// Helper function to create a new `VmFd`.
853///
854/// This should not be exported as a public function because the preferred way is to use
855/// `create_vm` from `Mshv`. The function cannot be part of the `VmFd` implementation because
856/// then it would be exported with the public `VmFd` interface.
857pub fn new_vmfd(vm: File) -> VmFd {
858    VmFd { vm }
859}
860#[cfg(test)]
861mod tests {
862    use libc::c_void;
863
864    // Note this useful idiom: importing names from outer (for mod tests) scope.
865    use super::*;
866    use crate::ioctls::system::Mshv;
867    use crate::ioctls::MshvError;
868    #[cfg(target_arch = "x86_64")]
869    use std::mem;
870
871    #[test]
872    fn test_user_memory() {
873        let hv = Mshv::new().unwrap();
874        let vm = hv.create_vm().unwrap();
875        vm.initialize().unwrap();
876        let addr = unsafe {
877            libc::mmap(
878                std::ptr::null_mut(),
879                0x1000,
880                libc::PROT_READ | libc::PROT_WRITE,
881                libc::MAP_ANONYMOUS | libc::MAP_SHARED | libc::MAP_NORESERVE,
882                -1,
883                0,
884            )
885        };
886        let mem = mshv_user_mem_region {
887            flags: set_bits!(u8, MSHV_SET_MEM_BIT_WRITABLE, MSHV_SET_MEM_BIT_EXECUTABLE),
888            guest_pfn: 0x1,
889            size: 0x1000,
890            userspace_addr: addr as u64,
891            ..Default::default()
892        };
893
894        vm.map_user_memory(mem).unwrap();
895
896        vm.unmap_user_memory(mem).unwrap();
897    }
898
899    #[test]
900    fn test_create_vcpu() {
901        let hv = Mshv::new().unwrap();
902        let vm = hv.create_vm().unwrap();
903        vm.initialize().unwrap();
904        let vcpu = vm.create_vcpu(0);
905        assert!(vcpu.is_ok());
906    }
907
908    #[cfg(target_arch = "x86_64")]
909    #[test]
910    fn test_assert_virtual_interrupt() {
911        /* TODO better test with some code */
912        let hv = Mshv::new().unwrap();
913        let vm = hv.create_vm().unwrap();
914        vm.initialize().unwrap();
915        let vcpu = vm.create_vcpu(0).unwrap();
916        let state = vcpu.get_lapic().unwrap();
917        let buffer = Buffer::try_from(&state).unwrap();
918        let hv_state = unsafe { &*(buffer.buf as *const hv_local_interrupt_controller_state) };
919        let cfg = InterruptRequest {
920            interrupt_type: hv_interrupt_type_HV_X64_INTERRUPT_TYPE_EXTINT,
921            apic_id: hv_state.apic_id as u64,
922            vector: 0,
923            level_triggered: false,
924            logical_destination_mode: false,
925            long_mode: false,
926        };
927        vm.request_virtual_interrupt(&cfg).unwrap();
928        vm.hvcall_assert_virtual_interrupt(&cfg).unwrap();
929    }
930
931    #[cfg(target_arch = "x86_64")]
932    #[test]
933    fn test_install_intercept() {
934        let hv = Mshv::new().unwrap();
935        let vm = hv.create_vm().unwrap();
936        vm.initialize().unwrap();
937        let intercept_args = mshv_install_intercept {
938            access_type_mask: HV_INTERCEPT_ACCESS_MASK_EXECUTE,
939            intercept_type: hv_intercept_type_HV_INTERCEPT_TYPE_X64_CPUID,
940            intercept_parameter: hv_intercept_parameters { cpuid_index: 0x100 },
941        };
942        assert!(vm.install_intercept(intercept_args).is_ok());
943        assert!(vm
944            .hvcall_install_intercept(
945                HV_INTERCEPT_ACCESS_MASK_EXECUTE,
946                hv_intercept_type_HV_INTERCEPT_TYPE_X64_CPUID,
947                hv_intercept_parameters { cpuid_index: 0x101 },
948            )
949            .is_ok());
950    }
951
952    #[cfg(target_arch = "x86_64")]
953    #[test]
954    fn test_get_property() {
955        let hv = Mshv::new().unwrap();
956        let vm = hv.create_vm().unwrap();
957        vm.initialize().unwrap();
958
959        let mut val = vm
960            .get_partition_property(
961                hv_partition_property_code_HV_PARTITION_PROPERTY_MAX_XSAVE_DATA_SIZE,
962            )
963            .unwrap();
964        let mut hvcall_val = vm
965            .hvcall_get_partition_property(
966                hv_partition_property_code_HV_PARTITION_PROPERTY_MAX_XSAVE_DATA_SIZE,
967            )
968            .unwrap();
969        assert!(val == hvcall_val);
970        println!("Max xsave data size: {val} bytes");
971        val = vm
972            .get_partition_property(
973                hv_partition_property_code_HV_PARTITION_PROPERTY_PROCESSOR_XSAVE_FEATURES,
974            )
975            .unwrap();
976        hvcall_val = vm
977            .hvcall_get_partition_property(
978                hv_partition_property_code_HV_PARTITION_PROPERTY_PROCESSOR_XSAVE_FEATURES,
979            )
980            .unwrap();
981        assert!(val == hvcall_val);
982        println!("Xsave feature: {val}");
983        val = vm
984            .get_partition_property(
985                hv_partition_property_code_HV_PARTITION_PROPERTY_PROCESSOR_CLOCK_FREQUENCY,
986            )
987            .unwrap();
988        hvcall_val = vm
989            .hvcall_get_partition_property(
990                hv_partition_property_code_HV_PARTITION_PROPERTY_PROCESSOR_CLOCK_FREQUENCY,
991            )
992            .unwrap();
993        assert!(val == hvcall_val);
994        println!("Processor frequency: {val}");
995        vm.set_partition_property(
996            hv_partition_property_code_HV_PARTITION_PROPERTY_UNIMPLEMENTED_MSR_ACTION,
997            hv_unimplemented_msr_action_HV_UNIMPLEMENTED_MSR_ACTION_IGNORE_WRITE_READ_ZERO as u64,
998        )
999        .unwrap();
1000        val = vm
1001            .get_partition_property(
1002                hv_partition_property_code_HV_PARTITION_PROPERTY_UNIMPLEMENTED_MSR_ACTION,
1003            )
1004            .unwrap();
1005        assert!(
1006            val == hv_unimplemented_msr_action_HV_UNIMPLEMENTED_MSR_ACTION_IGNORE_WRITE_READ_ZERO
1007                .into()
1008        );
1009        hvcall_val = vm
1010            .hvcall_get_partition_property(
1011                hv_partition_property_code_HV_PARTITION_PROPERTY_UNIMPLEMENTED_MSR_ACTION,
1012            )
1013            .unwrap();
1014        assert!(val == hvcall_val);
1015    }
1016
1017    #[cfg(target_arch = "x86_64")]
1018    #[test]
1019    fn test_set_property() {
1020        let hv = Mshv::new().unwrap();
1021        let vm = hv.create_vm().unwrap();
1022        vm.initialize().unwrap();
1023
1024        let code = hv_partition_property_code_HV_PARTITION_PROPERTY_UNIMPLEMENTED_MSR_ACTION;
1025        let ignore =
1026            hv_unimplemented_msr_action_HV_UNIMPLEMENTED_MSR_ACTION_IGNORE_WRITE_READ_ZERO as u64;
1027        let fault = hv_unimplemented_msr_action_HV_UNIMPLEMENTED_MSR_ACTION_FAULT as u64;
1028
1029        vm.set_partition_property(code, ignore).unwrap();
1030        let ignore_ret = vm.get_partition_property(code).unwrap();
1031        assert!(ignore_ret == ignore);
1032
1033        vm.set_partition_property(code, fault).unwrap();
1034        let fault_ret = vm.get_partition_property(code).unwrap();
1035        assert!(fault_ret == fault);
1036
1037        // Test the same with hvcall_ equivalent
1038        vm.hvcall_set_partition_property(code, ignore).unwrap();
1039        let ignore_ret = vm.get_partition_property(code).unwrap();
1040        assert!(ignore_ret == ignore);
1041
1042        vm.hvcall_set_partition_property(code, fault).unwrap();
1043        let fault_ret = vm.get_partition_property(code).unwrap();
1044        assert!(fault_ret == fault);
1045    }
1046
1047    #[test]
1048    fn test_set_partition_property_invalid() {
1049        let hv = Mshv::new().unwrap();
1050        let vm = hv.create_vm().unwrap();
1051        vm.initialize().unwrap();
1052        let code = hv_partition_property_code_HV_PARTITION_PROPERTY_PRIVILEGE_FLAGS;
1053
1054        // old IOCTL
1055        let res_0 = vm.set_partition_property(code, 0);
1056        assert!(res_0.is_err());
1057
1058        // generic hvcall
1059        let res_1 = vm.hvcall_set_partition_property(code, 0);
1060        let mshv_err_check = MshvError::Hypercall {
1061            code: HVCALL_SET_PARTITION_PROPERTY as u16,
1062            status_raw: HV_STATUS_INVALID_PARTITION_STATE as u16,
1063            status: Some(HvError::InvalidPartitionState),
1064        };
1065        assert!(res_1.err().unwrap() == mshv_err_check);
1066    }
1067
1068    #[test]
1069    fn test_irqfd() {
1070        use libc::EFD_NONBLOCK;
1071        let hv = Mshv::new().unwrap();
1072        let vm = hv.create_vm().unwrap();
1073        vm.initialize().unwrap();
1074        let efd = EventFd::new(EFD_NONBLOCK).unwrap();
1075        vm.register_irqfd(&efd, 30).unwrap();
1076        vm.unregister_irqfd(&efd, 30).unwrap();
1077    }
1078
1079    #[test]
1080    fn test_ioeventfd() {
1081        let efd = EventFd::new(0).unwrap();
1082        let addr = IoEventAddress::Mmio(0xe7e85004);
1083        let hv = Mshv::new().unwrap();
1084        let vm = hv.create_vm().unwrap();
1085        vm.initialize().unwrap();
1086        vm.register_ioevent(&efd, &addr, NoDatamatch).unwrap();
1087        vm.unregister_ioevent(&efd, &addr, NoDatamatch).unwrap();
1088    }
1089
1090    #[test]
1091    fn test_set_msi_routing() {
1092        let hv = Mshv::new().unwrap();
1093        let vm = hv.create_vm().unwrap();
1094        vm.initialize().unwrap();
1095        let msi_routing = mshv_user_irq_table::default();
1096        assert!(vm.set_msi_routing(&msi_routing).is_ok());
1097    }
1098
1099    fn _test_clear_set_get_dirty_log(mem_size: usize) {
1100        let hv = Mshv::new().unwrap();
1101        let vm = hv.create_vm().unwrap();
1102        vm.initialize().unwrap();
1103        // Try to allocate 32 MB memory
1104        let load_addr = unsafe {
1105            libc::mmap(
1106                std::ptr::null_mut(),
1107                mem_size,
1108                libc::PROT_READ | libc::PROT_WRITE,
1109                libc::MAP_ANONYMOUS | libc::MAP_SHARED | libc::MAP_NORESERVE,
1110                -1,
1111                0,
1112            )
1113        } as *mut u8;
1114        let mem_region = mshv_user_mem_region {
1115            flags: set_bits!(u8, MSHV_SET_MEM_BIT_WRITABLE, MSHV_SET_MEM_BIT_EXECUTABLE),
1116            guest_pfn: 0x0_u64,
1117            size: mem_size as u64,
1118            userspace_addr: load_addr as u64,
1119            ..Default::default()
1120        };
1121        vm.map_user_memory(mem_region).unwrap();
1122        vm.enable_dirty_page_tracking().unwrap();
1123
1124        let bitmap_len = ((mem_size + HV_PAGE_SIZE - 1) >> HV_HYP_PAGE_SHIFT) / 64;
1125        {
1126            let bitmap = vm
1127                .get_dirty_log(0, mem_size, MSHV_GPAP_ACCESS_OP_CLEAR as u8)
1128                .unwrap();
1129            assert!(bitmap.len() == bitmap_len);
1130        }
1131        // get the clear bits and verify cleared, set the bits again
1132        // (not all are really set; due to mmio or overlay pages gaps)
1133        let clear_bitmap = {
1134            let bitmap = vm
1135                .get_dirty_log(0, mem_size, MSHV_GPAP_ACCESS_OP_SET as u8)
1136                .unwrap();
1137            assert!(bitmap.len() == bitmap_len);
1138            bitmap
1139        };
1140        for x in clear_bitmap {
1141            assert!(x == 0);
1142        }
1143        // get the set bits, noop
1144        let set_bitmap_0 = {
1145            let bitmap = vm
1146                .get_dirty_log(0, mem_size, MSHV_GPAP_ACCESS_OP_NOOP as u8)
1147                .unwrap();
1148            assert!(bitmap.len() == bitmap_len);
1149            bitmap
1150        };
1151        // get the set bits after noop
1152        let set_bitmap_1 = {
1153            let bitmap = vm
1154                .get_dirty_log(0, mem_size, MSHV_GPAP_ACCESS_OP_NOOP as u8)
1155                .unwrap();
1156            assert!(bitmap.len() == bitmap_len);
1157            bitmap
1158        };
1159        for i in 0..bitmap_len {
1160            assert!(set_bitmap_0[i] == set_bitmap_1[i]);
1161        }
1162
1163        vm.disable_dirty_page_tracking().unwrap();
1164        vm.unmap_user_memory(mem_region).unwrap();
1165        unsafe { libc::munmap(load_addr as *mut c_void, mem_size) };
1166    }
1167
1168    #[test]
1169    fn test_get_dirty_log_32M() {
1170        let mem_size = 32 * 1024 * 1024;
1171        _test_clear_set_get_dirty_log(mem_size);
1172    }
1173
1174    #[test]
1175    fn test_get_dirty_log_8G() {
1176        let mem_size = 8 * 1024 * 1024 * 1024;
1177        _test_clear_set_get_dirty_log(mem_size);
1178    }
1179
1180    #[cfg(target_arch = "x86_64")]
1181    #[test]
1182    #[ignore]
1183    fn test_signal_event_direct() {
1184        // TODO this is used by MSHV synic.
1185        // Enable the test once synic is implemented.
1186        let hv = Mshv::new().unwrap();
1187        let vm = hv.create_vm().unwrap();
1188        vm.initialize().unwrap();
1189        let _vcpu = vm.create_vcpu(0).unwrap();
1190        vm.signal_event_direct(0, 0, 1).unwrap();
1191        vm.hvcall_signal_event_direct(0, 0, 1).unwrap();
1192    }
1193
1194    #[cfg(target_arch = "x86_64")]
1195    #[test]
1196    #[ignore]
1197    fn test_post_message_direct() {
1198        // TODO this is used by MSHV synic.
1199        // Enable the test once synic is implemented.
1200        let hv = Mshv::new().unwrap();
1201        let vm = hv.create_vm().unwrap();
1202        vm.initialize().unwrap();
1203        let _vcpu = vm.create_vcpu(0).unwrap();
1204        let hv_message: [u8; mem::size_of::<HvMessage>()] = [0; mem::size_of::<HvMessage>()];
1205        vm.post_message_direct(0, 0, &hv_message).unwrap();
1206        vm.hvcall_post_message_direct(0, 0, &hv_message).unwrap();
1207    }
1208
1209    #[test]
1210    #[cfg(target_arch = "x86_64")]
1211    fn test_register_deliverabilty_notifications() {
1212        let hv = Mshv::new().unwrap();
1213        let vm = hv.create_vm().unwrap();
1214        vm.initialize().unwrap();
1215        let _vcpu = vm.create_vcpu(0).unwrap();
1216        vm.register_deliverabilty_notifications(0, 0).unwrap();
1217        vm.hvcall_register_deliverability_notifications(0, 0)
1218            .unwrap();
1219        let res = vm.register_deliverabilty_notifications(0, 1);
1220        assert!(res.is_err());
1221        if let Err(e) = res {
1222            assert!(matches!(e, MshvError::Hypercall { .. }));
1223            assert!(e.errno() == libc::EIO);
1224            match e {
1225                MshvError::Hypercall {
1226                    code,
1227                    status_raw,
1228                    status,
1229                } => {
1230                    assert!(code == HVCALL_SET_VP_REGISTERS as u16);
1231                    assert!(status_raw == HV_STATUS_INVALID_PARAMETER as u16);
1232                    assert!(status.unwrap() as u32 == HV_STATUS_INVALID_PARAMETER);
1233                }
1234                _ => unreachable!(),
1235            }
1236        }
1237    }
1238
1239    #[test]
1240    #[cfg(target_arch = "x86_64")]
1241    fn test_get_msr_index_list() {
1242        /* Check system list contains IA32_MSR_SYSENTER_CS */
1243        let hv = Mshv::new().unwrap();
1244        let msr_list = hv.get_msr_index_list().unwrap();
1245
1246        let mut found = false;
1247        for index in msr_list {
1248            if index == IA32_MSR_SYSENTER_CS {
1249                found = true;
1250                break;
1251            }
1252        }
1253        assert!(found);
1254
1255        /* Test vm list: each MSR returned should be settable/gettable */
1256        let vm = hv.create_vm().unwrap();
1257        vm.initialize().unwrap();
1258        let vcpu = vm.create_vcpu(0).unwrap();
1259        let mut num_errors = 0;
1260        for idx in vm.get_msr_index_list().unwrap() {
1261            let mut get_set_msrs = Msrs::from_entries(&[msr_entry {
1262                index: idx,
1263                ..Default::default()
1264            }])
1265            .unwrap();
1266            vcpu.get_msrs(&mut get_set_msrs).unwrap_or_else(|_| {
1267                println!("Error getting MSR: 0x{idx:x}");
1268                num_errors += 1;
1269                0
1270            });
1271            vcpu.set_msrs(&get_set_msrs).unwrap_or_else(|_| {
1272                println!("Error setting MSR: 0x{idx:x}");
1273                num_errors += 1;
1274                0
1275            });
1276        }
1277        assert!(num_errors == 0);
1278    }
1279}