1use crate::ioctls::device::{new_device, DeviceFd};
6use crate::ioctls::vcpu::{new_vcpu, VcpuFd};
7use crate::ioctls::{MshvError, Result};
8use crate::mshv_ioctls::*;
9use crate::set_bits;
10use mshv_bindings::*;
11
12use std::cmp;
13use std::convert::TryFrom;
14use std::fs::File;
15
16use std::os::unix::io::{AsRawFd, FromRawFd, RawFd};
17use vmm_sys_util::errno;
18use vmm_sys_util::eventfd::EventFd;
19use vmm_sys_util::ioctl::{ioctl, ioctl_with_mut_ref, ioctl_with_ref};
20
21const PAGE_ACCESS_STATES_BATCH_SIZE: u64 = 0x10000;
23
24#[derive(Eq, PartialEq, Hash, Clone, Debug, Copy)]
29pub enum IoEventAddress {
30 Pio(u64),
32 Mmio(u64),
34}
35
36#[derive(Eq, PartialEq, Hash, Clone, Debug, Copy)]
38pub enum VmType {
39 Normal,
41 Snp,
43}
44
45impl TryFrom<u64> for VmType {
46 type Error = ();
47
48 fn try_from(v: u64) -> std::result::Result<Self, Self::Error> {
49 match v {
50 x if x == VmType::Normal as u64 => Ok(VmType::Normal),
51 x if x == VmType::Snp as u64 => Ok(VmType::Snp),
52 _ => Err(()),
53 }
54 }
55}
56
57#[derive(Debug)]
63pub struct NoDatamatch;
64
65impl From<NoDatamatch> for u64 {
66 fn from(_s: NoDatamatch) -> u64 {
67 0
68 }
69}
70
71#[derive(Debug)]
75pub struct InterruptRequest {
76 pub interrupt_type: hv_interrupt_type,
78 pub apic_id: u64,
80 pub vector: u32,
82 pub level_triggered: bool,
84 pub logical_destination_mode: bool,
86 pub long_mode: bool,
88}
89
90#[derive(Debug)]
92pub struct VmFd {
93 vm: File,
94}
95
96impl AsRawFd for VmFd {
97 fn as_raw_fd(&self) -> RawFd {
98 self.vm.as_raw_fd()
99 }
100}
101
102impl VmFd {
103 pub fn initialize(&self) -> Result<()> {
105 let ret = unsafe { ioctl(self, MSHV_INITIALIZE_PARTITION()) };
107 if ret == 0 {
108 Ok(())
109 } else {
110 Err(errno::Error::last().into())
111 }
112 }
113
114 pub fn install_intercept(&self, install_intercept_args: mshv_install_intercept) -> Result<()> {
116 self.hvcall_install_intercept(
117 install_intercept_args.access_type_mask,
118 install_intercept_args.intercept_type,
119 install_intercept_args.intercept_parameter,
120 )
121 }
122
123 fn hvcall_install_intercept(
125 &self,
126 access_type_mask: u32,
127 intercept_type: u32,
128 intercept_param: hv_intercept_parameters,
129 ) -> Result<()> {
130 let input = hv_input_install_intercept {
131 access_type: access_type_mask,
132 intercept_type,
133 intercept_parameter: intercept_param,
134 ..Default::default() };
136
137 let mut args = make_args!(HVCALL_INSTALL_INTERCEPT, input);
138 self.hvcall(&mut args)
139 }
140
141 pub fn modify_gpa_host_access(
143 &self,
144 gpa_host_access_args: &mshv_modify_gpa_host_access,
145 ) -> Result<()> {
146 let ret =
148 unsafe { ioctl_with_ref(self, MSHV_MODIFY_GPA_HOST_ACCESS(), gpa_host_access_args) };
149 if ret == 0 {
150 Ok(())
151 } else {
152 Err(errno::Error::last().into())
153 }
154 }
155
156 pub fn import_isolated_pages(
158 &self,
159 isolate_page_list: &mshv_import_isolated_pages,
160 ) -> Result<()> {
161 let ret = unsafe { ioctl_with_ref(self, MSHV_IMPORT_ISOLATED_PAGES(), isolate_page_list) };
163 if ret == 0 {
164 Ok(())
165 } else {
166 Err(errno::Error::last().into())
167 }
168 }
169
170 pub fn complete_isolated_import(&self, data: &mshv_complete_isolated_import) -> Result<()> {
172 let ret = unsafe { ioctl_with_ref(self, MSHV_COMPLETE_ISOLATED_IMPORT(), data) };
174 if ret == 0 {
175 Ok(())
176 } else {
177 Err(errno::Error::last().into())
178 }
179 }
180
181 pub fn psp_issue_guest_request(&self, data: &mshv_issue_psp_guest_request) -> Result<()> {
183 let ret = unsafe { ioctl_with_ref(self, MSHV_ISSUE_PSP_GUEST_REQUEST(), data) };
185 if ret == 0 {
186 Ok(())
187 } else {
188 Err(errno::Error::last().into())
189 }
190 }
191
192 pub fn sev_snp_ap_create(&self, data: &mshv_sev_snp_ap_create) -> Result<()> {
194 let ret = unsafe { ioctl_with_ref(self, MSHV_SEV_SNP_AP_CREATE(), data) };
196 if ret == 0 {
197 Ok(())
198 } else {
199 Err(errno::Error::last().into())
200 }
201 }
202
203 pub fn set_guest_memory(&self, user_memory_region: mshv_user_mem_region) -> Result<()> {
205 let ret = unsafe { ioctl_with_ref(self, MSHV_SET_GUEST_MEMORY(), &user_memory_region) };
207 if ret == 0 {
208 Ok(())
209 } else {
210 Err(errno::Error::last().into())
211 }
212 }
213
214 pub fn map_user_memory(&self, user_memory_region: mshv_user_mem_region) -> Result<()> {
216 let mut region = user_memory_region;
217 region.flags &= !set_bits!(u8, MSHV_SET_MEM_BIT_UNMAP);
218 self.set_guest_memory(region)
219 }
220
221 pub fn unmap_user_memory(&self, user_memory_region: mshv_user_mem_region) -> Result<()> {
223 let mut region = user_memory_region;
224 region.flags = set_bits!(u8, MSHV_SET_MEM_BIT_UNMAP);
225 self.set_guest_memory(region)
226 }
227
228 pub fn create_vcpu(&self, id: u8) -> Result<VcpuFd> {
230 let vp_arg = mshv_create_vp {
231 vp_index: id as __u32,
232 };
233 let vcpu_fd = unsafe { ioctl_with_ref(&self.vm, MSHV_CREATE_VP(), &vp_arg) };
235 if vcpu_fd < 0 {
236 return Err(errno::Error::last().into());
237 }
238
239 let vcpu = unsafe { File::from_raw_fd(vcpu_fd) };
243
244 let addr = unsafe {
246 libc::mmap(
247 std::ptr::null_mut(),
248 HV_PAGE_SIZE,
249 libc::PROT_READ | libc::PROT_WRITE,
250 libc::MAP_SHARED,
251 vcpu_fd,
252 MSHV_VP_MMAP_OFFSET_REGISTERS as i64 * libc::sysconf(libc::_SC_PAGE_SIZE),
253 )
254 };
255 let vp_page = if addr == libc::MAP_FAILED {
256 let err_no = errno::Error::last();
260 if err_no.errno() != libc::ENODEV {
261 return Err(errno::Error::last().into());
262 }
263 None
264 } else {
265 Some(RegisterPage(addr as *mut hv_vp_register_page))
266 };
267
268 Ok(new_vcpu(id as u32, vcpu, vp_page))
269 }
270
271 #[cfg(target_arch = "x86_64")]
273 pub fn request_virtual_interrupt(&self, request: &InterruptRequest) -> Result<()> {
274 self.hvcall_assert_virtual_interrupt(request)
275 }
276
277 #[cfg(target_arch = "x86_64")]
279 fn hvcall_assert_virtual_interrupt(&self, request: &InterruptRequest) -> Result<()> {
280 let mut control_flags: u32 = 0;
281 if request.level_triggered {
282 control_flags |= 0x1;
283 }
284 if request.logical_destination_mode {
285 control_flags |= 0x2;
286 }
287 if request.long_mode {
288 control_flags |= 1 << 30;
289 }
290
291 let input = hv_input_assert_virtual_interrupt {
292 control: hv_interrupt_control {
293 as_uint64: request.interrupt_type as u64 | ((control_flags as u64) << 32),
294 },
295 dest_addr: request.apic_id,
296 vector: request.vector,
297 ..Default::default() };
299
300 let mut args = make_args!(HVCALL_ASSERT_VIRTUAL_INTERRUPT, input);
301 self.hvcall(&mut args)
302 }
303
304 #[cfg(target_arch = "x86_64")]
306 pub fn signal_event_direct(&self, vp: u32, sint: u8, flag: u16) -> Result<bool> {
307 self.hvcall_signal_event_direct(vp, sint, flag)
308 }
309
310 #[cfg(target_arch = "x86_64")]
312 fn hvcall_signal_event_direct(&self, vp: u32, sint: u8, flag: u16) -> Result<bool> {
313 let input = hv_input_signal_event_direct {
314 target_vp: vp,
315 target_vtl: 0,
316 target_sint: sint,
317 flag_number: flag,
318 ..Default::default() };
320 let mut output = hv_output_signal_event_direct {
321 newly_signaled: 0,
322 ..Default::default()
323 };
324
325 let mut args = make_args!(HVCALL_SIGNAL_EVENT_DIRECT, input, output);
326 self.hvcall(&mut args)?;
327
328 Ok(output.newly_signaled != 0)
329 }
330
331 #[cfg(target_arch = "x86_64")]
333 pub fn post_message_direct(&self, vp: u32, sint: u8, msg: &[u8]) -> Result<()> {
334 self.hvcall_post_message_direct(vp, sint, msg)
335 }
336
337 #[cfg(target_arch = "x86_64")]
339 fn hvcall_post_message_direct(&self, vp: u32, sint: u8, msg: &[u8]) -> Result<()> {
340 let mut input = hv_input_post_message_direct {
341 vp_index: vp,
342 vtl: 0,
343 sint_index: sint as u32,
344 ..Default::default() };
346 if msg.len() > input.message.len() {
347 return Err(errno::Error::new(libc::EINVAL).into());
348 }
349 let len = cmp::min(msg.len(), input.message.len());
350 input.message[..len].copy_from_slice(&msg[..len]);
351
352 let mut args = make_args!(HVCALL_POST_MESSAGE_DIRECT, input);
353 self.hvcall(&mut args)
354 }
355
356 pub fn register_deliverabilty_notifications(&self, vp: u32, flag: u64) -> Result<()> {
359 self.hvcall_register_deliverability_notifications(vp, flag)
360 }
361
362 fn hvcall_set_reg(&self, vp: u32, reg_assocs: &[hv_register_assoc]) -> Result<()> {
364 let input = make_rep_input!(
365 hv_input_set_vp_registers {
366 vp_index: vp,
367 ..Default::default()
368 },
369 elements,
370 reg_assocs
371 );
372 let mut args = make_rep_args!(HVCALL_SET_VP_REGISTERS, input);
373 self.hvcall(&mut args)?;
374
375 if args.reps as usize != reg_assocs.len() {
376 return Err(libc::EINTR.into());
377 }
378
379 Ok(())
380 }
381
382 fn hvcall_register_deliverability_notifications(&self, vp: u32, flag: u64) -> Result<()> {
384 self.hvcall_set_reg(
385 vp,
386 &[hv_register_assoc {
387 name: hv_register_name_HV_REGISTER_DELIVERABILITY_NOTIFICATIONS,
388 value: hv_register_value { reg64: flag },
389 ..Default::default()
390 }],
391 )
392 }
393
394 fn irqfd(&self, fd: RawFd, resamplefd: RawFd, gsi: u32, flags: u32) -> Result<()> {
397 let irqfd_arg = mshv_user_irqfd {
398 fd,
399 flags,
400 resamplefd,
401 gsi,
402 };
403
404 let ret = unsafe { ioctl_with_ref(&self.vm, MSHV_IRQFD(), &irqfd_arg) };
406 if ret == 0 {
407 Ok(())
408 } else {
409 Err(errno::Error::last().into())
410 }
411 }
412
413 pub fn register_irqfd(&self, fd: &EventFd, gsi: u32) -> Result<()> {
435 self.irqfd(fd.as_raw_fd(), 0, gsi, 0)
436 }
437
438 pub fn register_irqfd_with_resample(
465 &self,
466 fd: &EventFd,
467 resamplefd: &EventFd,
468 gsi: u32,
469 ) -> Result<()> {
470 self.irqfd(
471 fd.as_raw_fd(),
472 resamplefd.as_raw_fd(),
473 gsi,
474 set_bits!(u32, MSHV_IRQFD_BIT_RESAMPLE),
475 )
476 }
477
478 pub fn unregister_irqfd(&self, fd: &EventFd, gsi: u32) -> Result<()> {
500 self.irqfd(
501 fd.as_raw_fd(),
502 0,
503 gsi,
504 set_bits!(u32, MSHV_IRQFD_BIT_DEASSIGN),
505 )
506 }
507
508 pub fn set_msi_routing(&self, msi_routing: &mshv_user_irq_table) -> Result<()> {
532 let ret = unsafe { ioctl_with_ref(self, MSHV_SET_MSI_ROUTING(), msi_routing) };
535 if ret == 0 {
536 Ok(())
537 } else {
538 Err(errno::Error::last().into())
539 }
540 }
541
542 fn ioeventfd<T: Into<u64>>(
545 &self,
546 fd: &EventFd,
547 addr: &IoEventAddress,
548 datamatch: T,
549 mut flags: u32,
550 ) -> Result<()> {
551 let mmio_addr = match addr {
555 IoEventAddress::Pio(_) => {
556 return Err(libc::ENOTSUP.into());
557 }
558 IoEventAddress::Mmio(ref m) => *m,
559 };
560
561 if std::mem::size_of::<T>() > 0 {
562 flags |= set_bits!(u32, MSHV_IOEVENTFD_BIT_DATAMATCH);
563 }
564
565 let ioeventfd = mshv_user_ioeventfd {
566 datamatch: datamatch.into(),
567 len: std::mem::size_of::<T>() as u32,
568 addr: mmio_addr,
569 fd: fd.as_raw_fd(),
570 flags,
571 ..Default::default()
572 };
573 let ret = unsafe { ioctl_with_ref(self, MSHV_IOEVENTFD(), &ioeventfd) };
576 if ret == 0 {
577 Ok(())
578 } else {
579 Err(errno::Error::last().into())
580 }
581 }
582
583 pub fn register_ioevent<T: Into<u64>>(
609 &self,
610 fd: &EventFd,
611 addr: &IoEventAddress,
612 datamatch: T,
613 ) -> Result<()> {
614 self.ioeventfd(fd, addr, datamatch, 0)
615 }
616
617 pub fn unregister_ioevent<T: Into<u64>>(
646 &self,
647 fd: &EventFd,
648 addr: &IoEventAddress,
649 datamatch: T,
650 ) -> Result<()> {
651 self.ioeventfd(
652 fd,
653 addr,
654 datamatch,
655 set_bits!(u32, MSHV_IOEVENTFD_BIT_DEASSIGN),
656 )
657 }
658
659 pub fn get_partition_property(&self, code: u32) -> Result<u64> {
662 self.hvcall_get_partition_property(code)
663 }
664
665 fn hvcall_get_partition_property(&self, code: u32) -> Result<u64> {
667 let input = hv_input_get_partition_property {
668 property_code: code,
669 ..Default::default() };
671 let mut output = hv_output_get_partition_property {
672 ..Default::default()
673 };
674 let mut args = make_args!(HVCALL_GET_PARTITION_PROPERTY, input, output);
675 self.hvcall(&mut args)?;
676
677 Ok(output.property_value)
678 }
679
680 pub fn set_partition_property(&self, code: u32, value: u64) -> Result<()> {
682 self.hvcall_set_partition_property(code, value)
683 }
684
685 fn hvcall_set_partition_property(&self, code: u32, value: u64) -> Result<()> {
687 let input = hv_input_set_partition_property {
688 property_code: code,
689 property_value: value,
690 ..Default::default() };
692 let mut args = make_args!(HVCALL_SET_PARTITION_PROPERTY, input);
693 self.hvcall(&mut args)
694 }
695
696 pub fn enable_dirty_page_tracking(&self) -> Result<()> {
701 let flag: u64 = 0x1;
702 self.set_partition_property(
703 hv_partition_property_code_HV_PARTITION_PROPERTY_GPA_PAGE_ACCESS_TRACKING,
704 flag,
705 )
706 }
707
708 pub fn disable_dirty_page_tracking(&self) -> Result<()> {
715 let flag: u64 = 0x0;
716 self.set_partition_property(
717 hv_partition_property_code_HV_PARTITION_PROPERTY_GPA_PAGE_ACCESS_TRACKING,
718 flag,
719 )
720 }
721
722 pub fn get_gpap_access_bitmap(
731 &self,
732 base_pfn: u64,
733 page_count: u64,
734 access_type: u8,
735 access_op: u8,
736 ) -> Result<Vec<u64>> {
737 let buf_sz = page_count.div_ceil(64);
738 let mut bitmap: Vec<u64> = vec![0u64; buf_sz as usize];
739 let mut args = mshv_gpap_access_bitmap {
740 access_type,
741 access_op,
742 page_count,
743 gpap_base: base_pfn,
744 bitmap_ptr: bitmap.as_mut_ptr() as u64,
745 ..Default::default()
746 };
747
748 let ret = unsafe { ioctl_with_mut_ref(self, MSHV_GET_GPAP_ACCESS_BITMAP(), &mut args) };
750 if ret == 0 {
751 Ok(bitmap)
752 } else {
753 Err(errno::Error::last().into())
754 }
755 }
756
757 pub fn get_dirty_log(
763 &self,
764 base_pfn: u64,
765 memory_size: usize,
766 access_op: u8,
767 ) -> Result<Vec<u64>> {
768 let div_ceil = |dividend: usize, divisor| dividend.div_ceil(divisor);
772 let bitmap_size = div_ceil(memory_size, HV_PAGE_SIZE * 64);
773 let mut bitmap: Vec<u64> = Vec::with_capacity(bitmap_size);
774 let mut completed = 0;
775 let total = (memory_size / HV_PAGE_SIZE) as u64;
776
777 while completed < total {
778 let remaining = total - completed;
779 let batch_size = cmp::min(PAGE_ACCESS_STATES_BATCH_SIZE, remaining);
780 let mut bitmap_part = self.get_gpap_access_bitmap(
781 base_pfn + completed,
782 batch_size,
783 MSHV_GPAP_ACCESS_TYPE_DIRTY as u8,
784 access_op,
785 )?;
786 bitmap.append(&mut bitmap_part);
787 completed += batch_size;
788 }
789 Ok(bitmap)
790 }
791
792 pub fn create_device(&self, device: &mut mshv_create_device) -> Result<DeviceFd> {
796 let ret = unsafe { ioctl_with_mut_ref(self, MSHV_CREATE_DEVICE(), device) };
798 if ret == 0 {
799 Ok(new_device(unsafe { File::from_raw_fd(device.fd as i32) }))
801 } else {
802 Err(errno::Error::last().into())
803 }
804 }
805
806 pub fn hvcall(&self, args: &mut mshv_root_hvcall) -> Result<()> {
808 let ret = unsafe { ioctl_with_mut_ref(self, MSHV_ROOT_HVCALL(), args) };
810 if ret == 0 {
811 Ok(())
812 } else {
813 Err(MshvError::from_hvcall(errno::Error::last(), *args))
814 }
815 }
816
817 #[cfg(target_arch = "x86_64")]
818 pub fn get_msr_index_list(&self) -> Result<Vec<u32>> {
820 let xsave_feature_val = self.get_partition_property(
821 hv_partition_property_code_HV_PARTITION_PROPERTY_PROCESSOR_XSAVE_FEATURES,
822 )?;
823 let proc_features0 = self.get_partition_property(
824 hv_partition_property_code_HV_PARTITION_PROPERTY_PROCESSOR_FEATURES0,
825 )?;
826 let proc_features1 = self.get_partition_property(
827 hv_partition_property_code_HV_PARTITION_PROPERTY_PROCESSOR_FEATURES1,
828 )?;
829 let syn_feature = self.get_partition_property(
830 hv_partition_property_code_HV_PARTITION_PROPERTY_SYNTHETIC_PROC_FEATURES,
831 )?;
832 let mut proc_features = hv_partition_processor_features::default();
833 unsafe {
835 proc_features.as_uint64[0] = proc_features0;
836 proc_features.as_uint64[1] = proc_features1;
837 }
838 let synthetic_features = hv_partition_synthetic_processor_features {
839 as_uint64: [syn_feature],
840 };
841 let xsave_features = hv_partition_processor_xsave_features {
842 as_uint64: xsave_feature_val,
843 };
844 let vp_features: VpFeatures = VpFeatures {
845 proc_features,
846 xsave_features,
847 synthetic_features,
848 };
849 Ok(get_partition_supported_msrs(&vp_features))
850 }
851}
852pub fn new_vmfd(vm: File) -> VmFd {
858 VmFd { vm }
859}
860#[cfg(test)]
861mod tests {
862 use libc::c_void;
863
864 use super::*;
866 use crate::ioctls::system::Mshv;
867 use crate::ioctls::MshvError;
868 #[cfg(target_arch = "x86_64")]
869 use std::mem;
870
871 #[test]
872 fn test_user_memory() {
873 let hv = Mshv::new().unwrap();
874 let vm = hv.create_vm().unwrap();
875 vm.initialize().unwrap();
876 let addr = unsafe {
877 libc::mmap(
878 std::ptr::null_mut(),
879 0x1000,
880 libc::PROT_READ | libc::PROT_WRITE,
881 libc::MAP_ANONYMOUS | libc::MAP_SHARED | libc::MAP_NORESERVE,
882 -1,
883 0,
884 )
885 };
886 let mem = mshv_user_mem_region {
887 flags: set_bits!(u8, MSHV_SET_MEM_BIT_WRITABLE, MSHV_SET_MEM_BIT_EXECUTABLE),
888 guest_pfn: 0x1,
889 size: 0x1000,
890 userspace_addr: addr as u64,
891 ..Default::default()
892 };
893
894 vm.map_user_memory(mem).unwrap();
895
896 vm.unmap_user_memory(mem).unwrap();
897 }
898
899 #[test]
900 fn test_create_vcpu() {
901 let hv = Mshv::new().unwrap();
902 let vm = hv.create_vm().unwrap();
903 vm.initialize().unwrap();
904 let vcpu = vm.create_vcpu(0);
905 assert!(vcpu.is_ok());
906 }
907
908 #[cfg(target_arch = "x86_64")]
909 #[test]
910 fn test_assert_virtual_interrupt() {
911 let hv = Mshv::new().unwrap();
913 let vm = hv.create_vm().unwrap();
914 vm.initialize().unwrap();
915 let vcpu = vm.create_vcpu(0).unwrap();
916 let state = vcpu.get_lapic().unwrap();
917 let buffer = Buffer::try_from(&state).unwrap();
918 let hv_state = unsafe { &*(buffer.buf as *const hv_local_interrupt_controller_state) };
919 let cfg = InterruptRequest {
920 interrupt_type: hv_interrupt_type_HV_X64_INTERRUPT_TYPE_EXTINT,
921 apic_id: hv_state.apic_id as u64,
922 vector: 0,
923 level_triggered: false,
924 logical_destination_mode: false,
925 long_mode: false,
926 };
927 vm.request_virtual_interrupt(&cfg).unwrap();
928 vm.hvcall_assert_virtual_interrupt(&cfg).unwrap();
929 }
930
931 #[cfg(target_arch = "x86_64")]
932 #[test]
933 fn test_install_intercept() {
934 let hv = Mshv::new().unwrap();
935 let vm = hv.create_vm().unwrap();
936 vm.initialize().unwrap();
937 let intercept_args = mshv_install_intercept {
938 access_type_mask: HV_INTERCEPT_ACCESS_MASK_EXECUTE,
939 intercept_type: hv_intercept_type_HV_INTERCEPT_TYPE_X64_CPUID,
940 intercept_parameter: hv_intercept_parameters { cpuid_index: 0x100 },
941 };
942 assert!(vm.install_intercept(intercept_args).is_ok());
943 assert!(vm
944 .hvcall_install_intercept(
945 HV_INTERCEPT_ACCESS_MASK_EXECUTE,
946 hv_intercept_type_HV_INTERCEPT_TYPE_X64_CPUID,
947 hv_intercept_parameters { cpuid_index: 0x101 },
948 )
949 .is_ok());
950 }
951
952 #[cfg(target_arch = "x86_64")]
953 #[test]
954 fn test_get_property() {
955 let hv = Mshv::new().unwrap();
956 let vm = hv.create_vm().unwrap();
957 vm.initialize().unwrap();
958
959 let mut val = vm
960 .get_partition_property(
961 hv_partition_property_code_HV_PARTITION_PROPERTY_MAX_XSAVE_DATA_SIZE,
962 )
963 .unwrap();
964 let mut hvcall_val = vm
965 .hvcall_get_partition_property(
966 hv_partition_property_code_HV_PARTITION_PROPERTY_MAX_XSAVE_DATA_SIZE,
967 )
968 .unwrap();
969 assert!(val == hvcall_val);
970 println!("Max xsave data size: {val} bytes");
971 val = vm
972 .get_partition_property(
973 hv_partition_property_code_HV_PARTITION_PROPERTY_PROCESSOR_XSAVE_FEATURES,
974 )
975 .unwrap();
976 hvcall_val = vm
977 .hvcall_get_partition_property(
978 hv_partition_property_code_HV_PARTITION_PROPERTY_PROCESSOR_XSAVE_FEATURES,
979 )
980 .unwrap();
981 assert!(val == hvcall_val);
982 println!("Xsave feature: {val}");
983 val = vm
984 .get_partition_property(
985 hv_partition_property_code_HV_PARTITION_PROPERTY_PROCESSOR_CLOCK_FREQUENCY,
986 )
987 .unwrap();
988 hvcall_val = vm
989 .hvcall_get_partition_property(
990 hv_partition_property_code_HV_PARTITION_PROPERTY_PROCESSOR_CLOCK_FREQUENCY,
991 )
992 .unwrap();
993 assert!(val == hvcall_val);
994 println!("Processor frequency: {val}");
995 vm.set_partition_property(
996 hv_partition_property_code_HV_PARTITION_PROPERTY_UNIMPLEMENTED_MSR_ACTION,
997 hv_unimplemented_msr_action_HV_UNIMPLEMENTED_MSR_ACTION_IGNORE_WRITE_READ_ZERO as u64,
998 )
999 .unwrap();
1000 val = vm
1001 .get_partition_property(
1002 hv_partition_property_code_HV_PARTITION_PROPERTY_UNIMPLEMENTED_MSR_ACTION,
1003 )
1004 .unwrap();
1005 assert!(
1006 val == hv_unimplemented_msr_action_HV_UNIMPLEMENTED_MSR_ACTION_IGNORE_WRITE_READ_ZERO
1007 .into()
1008 );
1009 hvcall_val = vm
1010 .hvcall_get_partition_property(
1011 hv_partition_property_code_HV_PARTITION_PROPERTY_UNIMPLEMENTED_MSR_ACTION,
1012 )
1013 .unwrap();
1014 assert!(val == hvcall_val);
1015 }
1016
1017 #[cfg(target_arch = "x86_64")]
1018 #[test]
1019 fn test_set_property() {
1020 let hv = Mshv::new().unwrap();
1021 let vm = hv.create_vm().unwrap();
1022 vm.initialize().unwrap();
1023
1024 let code = hv_partition_property_code_HV_PARTITION_PROPERTY_UNIMPLEMENTED_MSR_ACTION;
1025 let ignore =
1026 hv_unimplemented_msr_action_HV_UNIMPLEMENTED_MSR_ACTION_IGNORE_WRITE_READ_ZERO as u64;
1027 let fault = hv_unimplemented_msr_action_HV_UNIMPLEMENTED_MSR_ACTION_FAULT as u64;
1028
1029 vm.set_partition_property(code, ignore).unwrap();
1030 let ignore_ret = vm.get_partition_property(code).unwrap();
1031 assert!(ignore_ret == ignore);
1032
1033 vm.set_partition_property(code, fault).unwrap();
1034 let fault_ret = vm.get_partition_property(code).unwrap();
1035 assert!(fault_ret == fault);
1036
1037 vm.hvcall_set_partition_property(code, ignore).unwrap();
1039 let ignore_ret = vm.get_partition_property(code).unwrap();
1040 assert!(ignore_ret == ignore);
1041
1042 vm.hvcall_set_partition_property(code, fault).unwrap();
1043 let fault_ret = vm.get_partition_property(code).unwrap();
1044 assert!(fault_ret == fault);
1045 }
1046
1047 #[test]
1048 fn test_set_partition_property_invalid() {
1049 let hv = Mshv::new().unwrap();
1050 let vm = hv.create_vm().unwrap();
1051 vm.initialize().unwrap();
1052 let code = hv_partition_property_code_HV_PARTITION_PROPERTY_PRIVILEGE_FLAGS;
1053
1054 let res_0 = vm.set_partition_property(code, 0);
1056 assert!(res_0.is_err());
1057
1058 let res_1 = vm.hvcall_set_partition_property(code, 0);
1060 let mshv_err_check = MshvError::Hypercall {
1061 code: HVCALL_SET_PARTITION_PROPERTY as u16,
1062 status_raw: HV_STATUS_INVALID_PARTITION_STATE as u16,
1063 status: Some(HvError::InvalidPartitionState),
1064 };
1065 assert!(res_1.err().unwrap() == mshv_err_check);
1066 }
1067
1068 #[test]
1069 fn test_irqfd() {
1070 use libc::EFD_NONBLOCK;
1071 let hv = Mshv::new().unwrap();
1072 let vm = hv.create_vm().unwrap();
1073 vm.initialize().unwrap();
1074 let efd = EventFd::new(EFD_NONBLOCK).unwrap();
1075 vm.register_irqfd(&efd, 30).unwrap();
1076 vm.unregister_irqfd(&efd, 30).unwrap();
1077 }
1078
1079 #[test]
1080 fn test_ioeventfd() {
1081 let efd = EventFd::new(0).unwrap();
1082 let addr = IoEventAddress::Mmio(0xe7e85004);
1083 let hv = Mshv::new().unwrap();
1084 let vm = hv.create_vm().unwrap();
1085 vm.initialize().unwrap();
1086 vm.register_ioevent(&efd, &addr, NoDatamatch).unwrap();
1087 vm.unregister_ioevent(&efd, &addr, NoDatamatch).unwrap();
1088 }
1089
1090 #[test]
1091 fn test_set_msi_routing() {
1092 let hv = Mshv::new().unwrap();
1093 let vm = hv.create_vm().unwrap();
1094 vm.initialize().unwrap();
1095 let msi_routing = mshv_user_irq_table::default();
1096 assert!(vm.set_msi_routing(&msi_routing).is_ok());
1097 }
1098
1099 fn _test_clear_set_get_dirty_log(mem_size: usize) {
1100 let hv = Mshv::new().unwrap();
1101 let vm = hv.create_vm().unwrap();
1102 vm.initialize().unwrap();
1103 let load_addr = unsafe {
1105 libc::mmap(
1106 std::ptr::null_mut(),
1107 mem_size,
1108 libc::PROT_READ | libc::PROT_WRITE,
1109 libc::MAP_ANONYMOUS | libc::MAP_SHARED | libc::MAP_NORESERVE,
1110 -1,
1111 0,
1112 )
1113 } as *mut u8;
1114 let mem_region = mshv_user_mem_region {
1115 flags: set_bits!(u8, MSHV_SET_MEM_BIT_WRITABLE, MSHV_SET_MEM_BIT_EXECUTABLE),
1116 guest_pfn: 0x0_u64,
1117 size: mem_size as u64,
1118 userspace_addr: load_addr as u64,
1119 ..Default::default()
1120 };
1121 vm.map_user_memory(mem_region).unwrap();
1122 vm.enable_dirty_page_tracking().unwrap();
1123
1124 let bitmap_len = ((mem_size + HV_PAGE_SIZE - 1) >> HV_HYP_PAGE_SHIFT) / 64;
1125 {
1126 let bitmap = vm
1127 .get_dirty_log(0, mem_size, MSHV_GPAP_ACCESS_OP_CLEAR as u8)
1128 .unwrap();
1129 assert!(bitmap.len() == bitmap_len);
1130 }
1131 let clear_bitmap = {
1134 let bitmap = vm
1135 .get_dirty_log(0, mem_size, MSHV_GPAP_ACCESS_OP_SET as u8)
1136 .unwrap();
1137 assert!(bitmap.len() == bitmap_len);
1138 bitmap
1139 };
1140 for x in clear_bitmap {
1141 assert!(x == 0);
1142 }
1143 let set_bitmap_0 = {
1145 let bitmap = vm
1146 .get_dirty_log(0, mem_size, MSHV_GPAP_ACCESS_OP_NOOP as u8)
1147 .unwrap();
1148 assert!(bitmap.len() == bitmap_len);
1149 bitmap
1150 };
1151 let set_bitmap_1 = {
1153 let bitmap = vm
1154 .get_dirty_log(0, mem_size, MSHV_GPAP_ACCESS_OP_NOOP as u8)
1155 .unwrap();
1156 assert!(bitmap.len() == bitmap_len);
1157 bitmap
1158 };
1159 for i in 0..bitmap_len {
1160 assert!(set_bitmap_0[i] == set_bitmap_1[i]);
1161 }
1162
1163 vm.disable_dirty_page_tracking().unwrap();
1164 vm.unmap_user_memory(mem_region).unwrap();
1165 unsafe { libc::munmap(load_addr as *mut c_void, mem_size) };
1166 }
1167
1168 #[test]
1169 fn test_get_dirty_log_32M() {
1170 let mem_size = 32 * 1024 * 1024;
1171 _test_clear_set_get_dirty_log(mem_size);
1172 }
1173
1174 #[test]
1175 fn test_get_dirty_log_8G() {
1176 let mem_size = 8 * 1024 * 1024 * 1024;
1177 _test_clear_set_get_dirty_log(mem_size);
1178 }
1179
1180 #[cfg(target_arch = "x86_64")]
1181 #[test]
1182 #[ignore]
1183 fn test_signal_event_direct() {
1184 let hv = Mshv::new().unwrap();
1187 let vm = hv.create_vm().unwrap();
1188 vm.initialize().unwrap();
1189 let _vcpu = vm.create_vcpu(0).unwrap();
1190 vm.signal_event_direct(0, 0, 1).unwrap();
1191 vm.hvcall_signal_event_direct(0, 0, 1).unwrap();
1192 }
1193
1194 #[cfg(target_arch = "x86_64")]
1195 #[test]
1196 #[ignore]
1197 fn test_post_message_direct() {
1198 let hv = Mshv::new().unwrap();
1201 let vm = hv.create_vm().unwrap();
1202 vm.initialize().unwrap();
1203 let _vcpu = vm.create_vcpu(0).unwrap();
1204 let hv_message: [u8; mem::size_of::<HvMessage>()] = [0; mem::size_of::<HvMessage>()];
1205 vm.post_message_direct(0, 0, &hv_message).unwrap();
1206 vm.hvcall_post_message_direct(0, 0, &hv_message).unwrap();
1207 }
1208
1209 #[test]
1210 #[cfg(target_arch = "x86_64")]
1211 fn test_register_deliverabilty_notifications() {
1212 let hv = Mshv::new().unwrap();
1213 let vm = hv.create_vm().unwrap();
1214 vm.initialize().unwrap();
1215 let _vcpu = vm.create_vcpu(0).unwrap();
1216 vm.register_deliverabilty_notifications(0, 0).unwrap();
1217 vm.hvcall_register_deliverability_notifications(0, 0)
1218 .unwrap();
1219 let res = vm.register_deliverabilty_notifications(0, 1);
1220 assert!(res.is_err());
1221 if let Err(e) = res {
1222 assert!(matches!(e, MshvError::Hypercall { .. }));
1223 assert!(e.errno() == libc::EIO);
1224 match e {
1225 MshvError::Hypercall {
1226 code,
1227 status_raw,
1228 status,
1229 } => {
1230 assert!(code == HVCALL_SET_VP_REGISTERS as u16);
1231 assert!(status_raw == HV_STATUS_INVALID_PARAMETER as u16);
1232 assert!(status.unwrap() as u32 == HV_STATUS_INVALID_PARAMETER);
1233 }
1234 _ => unreachable!(),
1235 }
1236 }
1237 }
1238
1239 #[test]
1240 #[cfg(target_arch = "x86_64")]
1241 fn test_get_msr_index_list() {
1242 let hv = Mshv::new().unwrap();
1244 let msr_list = hv.get_msr_index_list().unwrap();
1245
1246 let mut found = false;
1247 for index in msr_list {
1248 if index == IA32_MSR_SYSENTER_CS {
1249 found = true;
1250 break;
1251 }
1252 }
1253 assert!(found);
1254
1255 let vm = hv.create_vm().unwrap();
1257 vm.initialize().unwrap();
1258 let vcpu = vm.create_vcpu(0).unwrap();
1259 let mut num_errors = 0;
1260 for idx in vm.get_msr_index_list().unwrap() {
1261 let mut get_set_msrs = Msrs::from_entries(&[msr_entry {
1262 index: idx,
1263 ..Default::default()
1264 }])
1265 .unwrap();
1266 vcpu.get_msrs(&mut get_set_msrs).unwrap_or_else(|_| {
1267 println!("Error getting MSR: 0x{idx:x}");
1268 num_errors += 1;
1269 0
1270 });
1271 vcpu.set_msrs(&get_set_msrs).unwrap_or_else(|_| {
1272 println!("Error setting MSR: 0x{idx:x}");
1273 num_errors += 1;
1274 0
1275 });
1276 }
1277 assert!(num_errors == 0);
1278 }
1279}