1use crate::ioctls::{MshvError, Result};
6use crate::mshv_ioctls::*;
7use mshv_bindings::*;
8#[cfg(target_arch = "x86_64")]
9use std::convert::TryFrom;
10use std::fs::File;
11use std::os::unix::io::{AsRawFd, RawFd};
12#[cfg(all(test, target_arch = "x86_64"))]
13use std::slice;
14use vmm_sys_util::errno;
15use vmm_sys_util::ioctl::ioctl_with_mut_ref;
16#[cfg(target_arch = "x86_64")]
17use vmm_sys_util::ioctl::ioctl_with_ref;
18
19#[allow(unused_macros)]
24#[macro_export]
25macro_rules! set_registers_64 {
26 ($vcpu:expr, $arr_t:expr ) => {{
27 let len = $arr_t.len();
28 let mut assocs: Vec<hv_register_assoc> = vec![
31 hv_register_assoc {
32 ..Default::default()
33 };
34 len
35 ];
36 for (i, x) in $arr_t.iter().enumerate() {
37 let (a, b) = x;
38 assocs[i].name = *a as u32;
39 assocs[i].value = hv_register_value { reg64: *b };
40 }
41 #[allow(unused_parens)]
42 $vcpu.set_reg(&assocs)
43 }};
44}
45
46#[derive(Debug)]
47pub struct VcpuFd {
49 index: u32,
50 vcpu: File,
51 vp_page: Option<RegisterPage>,
52}
53
54pub fn new_vcpu(index: u32, vcpu: File, vp_page: Option<RegisterPage>) -> VcpuFd {
60 VcpuFd {
61 index,
62 vcpu,
63 vp_page,
64 }
65}
66
67impl AsRawFd for VcpuFd {
68 fn as_raw_fd(&self) -> RawFd {
69 self.vcpu.as_raw_fd()
70 }
71}
72
73#[cfg(not(target_arch = "aarch64"))]
77fn update_interrupt_bitmap(ret_regs: &mut SpecialRegisters, pending_reg: u64) {
78 if (pending_reg & 0x1) == 1 && (pending_reg >> 1).trailing_zeros() >= 3
80 {
81 let interrupt_nr = (pending_reg >> 16) & 0xFFFF;
83 if interrupt_nr > 255 {
84 panic!("Invalid interrupt vector number > 255");
85 }
86 let bit_offset = interrupt_nr & 0x3F; let index = interrupt_nr >> 6;
90 ret_regs.interrupt_bitmap[index as usize] |= 1 << bit_offset;
91 }
92}
93
94#[cfg(not(target_arch = "aarch64"))]
95static NON_VP_PAGE_SP_REGS: &[::std::os::raw::c_uint; 7] = &[
96 hv_register_name_HV_X64_REGISTER_TR,
97 hv_register_name_HV_X64_REGISTER_LDTR,
98 hv_register_name_HV_X64_REGISTER_GDTR,
99 hv_register_name_HV_X64_REGISTER_IDTR,
100 hv_register_name_HV_X64_REGISTER_CR2,
101 hv_register_name_HV_X64_REGISTER_APIC_BASE,
102 hv_register_name_HV_REGISTER_PENDING_INTERRUPTION,
103];
104
105#[cfg(not(target_arch = "aarch64"))]
106static VP_PAGE_SP_REGS: &[::std::os::raw::c_uint; 11] = &[
107 hv_register_name_HV_X64_REGISTER_CS,
108 hv_register_name_HV_X64_REGISTER_DS,
109 hv_register_name_HV_X64_REGISTER_ES,
110 hv_register_name_HV_X64_REGISTER_FS,
111 hv_register_name_HV_X64_REGISTER_GS,
112 hv_register_name_HV_X64_REGISTER_SS,
113 hv_register_name_HV_X64_REGISTER_INTERMEDIATE_CR0,
114 hv_register_name_HV_X64_REGISTER_INTERMEDIATE_CR3,
115 hv_register_name_HV_X64_REGISTER_INTERMEDIATE_CR4,
116 hv_register_name_HV_X64_REGISTER_CR8,
117 hv_register_name_HV_X64_REGISTER_EFER,
118];
119
120impl VcpuFd {
121 pub fn get_vp_reg_page(&self) -> Option<&RegisterPage> {
123 self.vp_page.as_ref()
124 }
125
126 #[cfg(not(target_arch = "aarch64"))]
128 fn is_valid_vp_reg_page(&self) -> bool {
129 let vp_reg_page = match self.get_vp_reg_page() {
130 Some(page) => page.0,
131 None => return false,
132 };
133 unsafe { (*vp_reg_page).isvalid != 0 }
134 }
135
136 pub fn get_reg(&self, reg_names: &mut [hv_register_assoc]) -> Result<()> {
138 self.hvcall_get_reg(reg_names)
139 }
140 fn hvcall_get_reg(&self, reg_assocs: &mut [hv_register_assoc]) -> Result<()> {
142 if reg_assocs.is_empty() {
143 return Err(libc::EINVAL.into());
144 }
145 let reg_names: Vec<hv_register_name> = reg_assocs.iter().map(|assoc| assoc.name).collect();
146 let input = make_rep_input!(
147 hv_input_get_vp_registers {
148 vp_index: self.index,
149 ..Default::default()
150 },
151 names,
152 reg_names.as_slice()
153 );
154 let mut output: Vec<hv_register_value> = reg_names
155 .iter()
156 .map(|_| hv_register_value {
157 reg128: hv_u128 {
158 ..Default::default()
159 },
160 })
161 .collect();
162 let output_slice = output.as_mut_slice();
163
164 let mut args = make_rep_args!(HVCALL_GET_VP_REGISTERS, input, output_slice);
165 self.hvcall(&mut args)?;
166
167 if args.reps as usize != reg_assocs.len() {
168 return Err(libc::EINTR.into());
169 }
170
171 for (assoc, value) in reg_assocs.iter_mut().zip(output.iter()) {
172 assoc.value = *value;
173 }
174
175 Ok(())
176 }
177 pub fn set_reg(&self, regs: &[hv_register_assoc]) -> Result<()> {
179 self.hvcall_set_reg(regs)
180 }
181 fn hvcall_set_reg(&self, reg_assocs: &[hv_register_assoc]) -> Result<()> {
183 let input = make_rep_input!(
184 hv_input_set_vp_registers {
185 vp_index: self.index,
186 ..Default::default()
187 },
188 elements,
189 reg_assocs
190 );
191 let mut args = make_rep_args!(HVCALL_SET_VP_REGISTERS, input);
192 self.hvcall(&mut args)?;
193
194 if args.reps as usize != reg_assocs.len() {
195 return Err(libc::EINTR.into());
196 }
197
198 Ok(())
199 }
200
201 #[cfg(not(target_arch = "aarch64"))]
203 fn set_standard_regs_ioctl(&self, regs: &StandardRegisters) -> Result<()> {
204 let reg_assocs = [
205 hv_register_assoc {
206 name: hv_register_name_HV_X64_REGISTER_RAX,
207 value: hv_register_value { reg64: regs.rax },
208 ..Default::default()
209 },
210 hv_register_assoc {
211 name: hv_register_name_HV_X64_REGISTER_RBX,
212 value: hv_register_value { reg64: regs.rbx },
213 ..Default::default()
214 },
215 hv_register_assoc {
216 name: hv_register_name_HV_X64_REGISTER_RCX,
217 value: hv_register_value { reg64: regs.rcx },
218 ..Default::default()
219 },
220 hv_register_assoc {
221 name: hv_register_name_HV_X64_REGISTER_RDX,
222 value: hv_register_value { reg64: regs.rdx },
223 ..Default::default()
224 },
225 hv_register_assoc {
226 name: hv_register_name_HV_X64_REGISTER_RSI,
227 value: hv_register_value { reg64: regs.rsi },
228 ..Default::default()
229 },
230 hv_register_assoc {
231 name: hv_register_name_HV_X64_REGISTER_RDI,
232 value: hv_register_value { reg64: regs.rdi },
233 ..Default::default()
234 },
235 hv_register_assoc {
236 name: hv_register_name_HV_X64_REGISTER_RSP,
237 value: hv_register_value { reg64: regs.rsp },
238 ..Default::default()
239 },
240 hv_register_assoc {
241 name: hv_register_name_HV_X64_REGISTER_RBP,
242 value: hv_register_value { reg64: regs.rbp },
243 ..Default::default()
244 },
245 hv_register_assoc {
246 name: hv_register_name_HV_X64_REGISTER_R8,
247 value: hv_register_value { reg64: regs.r8 },
248 ..Default::default()
249 },
250 hv_register_assoc {
251 name: hv_register_name_HV_X64_REGISTER_R9,
252 value: hv_register_value { reg64: regs.r9 },
253 ..Default::default()
254 },
255 hv_register_assoc {
256 name: hv_register_name_HV_X64_REGISTER_R10,
257 value: hv_register_value { reg64: regs.r10 },
258 ..Default::default()
259 },
260 hv_register_assoc {
261 name: hv_register_name_HV_X64_REGISTER_R11,
262 value: hv_register_value { reg64: regs.r11 },
263 ..Default::default()
264 },
265 hv_register_assoc {
266 name: hv_register_name_HV_X64_REGISTER_R12,
267 value: hv_register_value { reg64: regs.r12 },
268 ..Default::default()
269 },
270 hv_register_assoc {
271 name: hv_register_name_HV_X64_REGISTER_R13,
272 value: hv_register_value { reg64: regs.r13 },
273 ..Default::default()
274 },
275 hv_register_assoc {
276 name: hv_register_name_HV_X64_REGISTER_R14,
277 value: hv_register_value { reg64: regs.r14 },
278 ..Default::default()
279 },
280 hv_register_assoc {
281 name: hv_register_name_HV_X64_REGISTER_R15,
282 value: hv_register_value { reg64: regs.r15 },
283 ..Default::default()
284 },
285 hv_register_assoc {
286 name: hv_register_name_HV_X64_REGISTER_RIP,
287 value: hv_register_value { reg64: regs.rip },
288 ..Default::default()
289 },
290 hv_register_assoc {
291 name: hv_register_name_HV_X64_REGISTER_RFLAGS,
292 value: hv_register_value { reg64: regs.rflags },
293 ..Default::default()
294 },
295 ];
296 self.set_reg(®_assocs)?;
297 Ok(())
298 }
299 #[cfg(target_arch = "aarch64")]
301 pub fn set_regs(&self, regs: &StandardRegisters) -> Result<()> {
302 let mut reg_assocs = Vec::with_capacity(38);
303
304 for i in 0..29 as usize {
305 reg_assocs.push(hv_register_assoc {
306 name: hv_register_name_HV_ARM64_REGISTER_X0 + i as u32,
307 value: hv_register_value {
308 reg64: regs.regs[i],
309 },
310 ..Default::default()
311 });
312 }
313
314 reg_assocs.push(hv_register_assoc {
315 name: hv_register_name_HV_ARM64_REGISTER_FP,
316 value: hv_register_value {
317 reg64: regs.regs[29],
318 },
319 ..Default::default()
320 });
321
322 reg_assocs.push(hv_register_assoc {
323 name: hv_register_name_HV_ARM64_REGISTER_LR,
324 value: hv_register_value {
325 reg64: regs.regs[30],
326 },
327 ..Default::default()
328 });
329
330 reg_assocs.push(hv_register_assoc {
331 name: hv_register_name_HV_ARM64_REGISTER_SP,
332 value: hv_register_value { reg64: regs.sp },
333 ..Default::default()
334 });
335
336 reg_assocs.push(hv_register_assoc {
337 name: hv_register_name_HV_ARM64_REGISTER_PC,
338 value: hv_register_value { reg64: regs.pc },
339 ..Default::default()
340 });
341
342 reg_assocs.push(hv_register_assoc {
343 name: hv_register_name_HV_ARM64_REGISTER_PSTATE,
344 value: hv_register_value { reg64: regs.pstate },
345 ..Default::default()
346 });
347
348 reg_assocs.push(hv_register_assoc {
349 name: hv_register_name_HV_ARM64_REGISTER_SP_EL1,
350 value: hv_register_value { reg64: regs.sp_el1 },
351 ..Default::default()
352 });
353
354 reg_assocs.push(hv_register_assoc {
355 name: hv_register_name_HV_ARM64_REGISTER_ELR_EL1,
356 value: hv_register_value {
357 reg64: regs.elr_el1,
358 },
359 ..Default::default()
360 });
361
362 reg_assocs.push(hv_register_assoc {
363 name: hv_register_name_HV_ARM64_REGISTER_FPSR,
364 value: hv_register_value { reg64: regs.fpsr },
365 ..Default::default()
366 });
367
368 reg_assocs.push(hv_register_assoc {
369 name: hv_register_name_HV_ARM64_REGISTER_FPCR,
370 value: hv_register_value { reg64: regs.fpcr },
371 ..Default::default()
372 });
373
374 self.hvcall_set_reg(®_assocs)?;
375 Ok(())
376 }
377 #[cfg(not(target_arch = "aarch64"))]
379 fn set_standard_regs_vp_page(&self, regs: &StandardRegisters) -> Result<()> {
380 let vp_reg_page = self.get_vp_reg_page().unwrap().0;
381 set_gp_regs_field_ptr!(vp_reg_page, rax, regs.rax);
382 set_gp_regs_field_ptr!(vp_reg_page, rbx, regs.rbx);
383 set_gp_regs_field_ptr!(vp_reg_page, rcx, regs.rcx);
384 set_gp_regs_field_ptr!(vp_reg_page, rdx, regs.rdx);
385 set_gp_regs_field_ptr!(vp_reg_page, rsi, regs.rsi);
386 set_gp_regs_field_ptr!(vp_reg_page, rdi, regs.rdi);
387 set_gp_regs_field_ptr!(vp_reg_page, rsp, regs.rsp);
388 set_gp_regs_field_ptr!(vp_reg_page, rbp, regs.rbp);
389 set_gp_regs_field_ptr!(vp_reg_page, r8, regs.r8);
390 set_gp_regs_field_ptr!(vp_reg_page, r9, regs.r9);
391 set_gp_regs_field_ptr!(vp_reg_page, r10, regs.r10);
392 set_gp_regs_field_ptr!(vp_reg_page, r11, regs.r11);
393 set_gp_regs_field_ptr!(vp_reg_page, r12, regs.r12);
394 set_gp_regs_field_ptr!(vp_reg_page, r13, regs.r13);
395 set_gp_regs_field_ptr!(vp_reg_page, r14, regs.r14);
396 set_gp_regs_field_ptr!(vp_reg_page, r15, regs.r15);
397
398 unsafe {
400 (*vp_reg_page).dirty |= 1 << HV_X64_REGISTER_CLASS_GENERAL;
401 (*vp_reg_page).__bindgen_anon_1.__bindgen_anon_1.rip = regs.rip;
402 (*vp_reg_page).dirty |= 1 << HV_X64_REGISTER_CLASS_IP;
403 (*vp_reg_page).__bindgen_anon_1.__bindgen_anon_1.rflags = regs.rflags;
404 (*vp_reg_page).dirty |= 1 << HV_X64_REGISTER_CLASS_FLAGS;
405 }
406 Ok(())
407 }
408
409 #[cfg(not(target_arch = "aarch64"))]
411 pub fn set_regs(&self, regs: &StandardRegisters) -> Result<()> {
412 if self.is_valid_vp_reg_page() {
413 self.set_standard_regs_vp_page(regs)
414 } else {
415 self.set_standard_regs_ioctl(regs)
416 }
417 }
418
419 #[cfg(target_arch = "x86_64")]
421 pub fn get_regs(&self) -> Result<StandardRegisters> {
422 if self.is_valid_vp_reg_page() {
423 self.get_standard_regs_vp_page()
424 } else {
425 self.get_standard_regs_ioctl()
426 }
427 }
428
429 #[cfg(not(target_arch = "aarch64"))]
431 fn get_standard_regs_ioctl(&self) -> Result<StandardRegisters> {
432 let reg_names = [
433 hv_register_name_HV_X64_REGISTER_RAX,
434 hv_register_name_HV_X64_REGISTER_RBX,
435 hv_register_name_HV_X64_REGISTER_RCX,
436 hv_register_name_HV_X64_REGISTER_RDX,
437 hv_register_name_HV_X64_REGISTER_RSI,
438 hv_register_name_HV_X64_REGISTER_RDI,
439 hv_register_name_HV_X64_REGISTER_RSP,
440 hv_register_name_HV_X64_REGISTER_RBP,
441 hv_register_name_HV_X64_REGISTER_R8,
442 hv_register_name_HV_X64_REGISTER_R9,
443 hv_register_name_HV_X64_REGISTER_R10,
444 hv_register_name_HV_X64_REGISTER_R11,
445 hv_register_name_HV_X64_REGISTER_R12,
446 hv_register_name_HV_X64_REGISTER_R13,
447 hv_register_name_HV_X64_REGISTER_R14,
448 hv_register_name_HV_X64_REGISTER_R15,
449 hv_register_name_HV_X64_REGISTER_RIP,
450 hv_register_name_HV_X64_REGISTER_RFLAGS,
451 ];
452
453 let mut reg_assocs: [hv_register_assoc; 18] = [hv_register_assoc::default(); 18];
454 for (it, elem) in reg_assocs.iter_mut().zip(reg_names) {
455 it.name = elem;
456 }
457
458 self.get_reg(&mut reg_assocs)?;
459 let mut ret_regs = StandardRegisters::default();
460 unsafe {
462 ret_regs.rax = reg_assocs[0].value.reg64;
463 ret_regs.rbx = reg_assocs[1].value.reg64;
464 ret_regs.rcx = reg_assocs[2].value.reg64;
465 ret_regs.rdx = reg_assocs[3].value.reg64;
466 ret_regs.rsi = reg_assocs[4].value.reg64;
467 ret_regs.rdi = reg_assocs[5].value.reg64;
468 ret_regs.rsp = reg_assocs[6].value.reg64;
469 ret_regs.rbp = reg_assocs[7].value.reg64;
470 ret_regs.r8 = reg_assocs[8].value.reg64;
471 ret_regs.r9 = reg_assocs[9].value.reg64;
472 ret_regs.r10 = reg_assocs[10].value.reg64;
473 ret_regs.r11 = reg_assocs[11].value.reg64;
474 ret_regs.r12 = reg_assocs[12].value.reg64;
475 ret_regs.r13 = reg_assocs[13].value.reg64;
476 ret_regs.r14 = reg_assocs[14].value.reg64;
477 ret_regs.r15 = reg_assocs[15].value.reg64;
478 ret_regs.rip = reg_assocs[16].value.reg64;
479 ret_regs.rflags = reg_assocs[17].value.reg64;
480 }
481
482 Ok(ret_regs)
483 }
484 #[cfg(target_arch = "aarch64")]
486 pub fn get_regs(&self) -> Result<StandardRegisters> {
487 let mut reg_assocs: Vec<hv_register_assoc> = Vec::with_capacity(38);
488 for i in 0..29 as usize {
489 reg_assocs.push(hv_register_assoc {
490 name: hv_register_name_HV_ARM64_REGISTER_X0 + i as u32,
491 ..Default::default()
492 });
493 }
494 reg_assocs.push(hv_register_assoc {
495 name: hv_register_name_HV_ARM64_REGISTER_FP,
496 ..Default::default()
497 });
498 reg_assocs.push(hv_register_assoc {
499 name: hv_register_name_HV_ARM64_REGISTER_LR,
500 ..Default::default()
501 });
502 reg_assocs.push(hv_register_assoc {
503 name: hv_register_name_HV_ARM64_REGISTER_SP,
504 ..Default::default()
505 });
506 reg_assocs.push(hv_register_assoc {
507 name: hv_register_name_HV_ARM64_REGISTER_PC,
508 ..Default::default()
509 });
510 reg_assocs.push(hv_register_assoc {
511 name: hv_register_name_HV_ARM64_REGISTER_PSTATE,
512 ..Default::default()
513 });
514 reg_assocs.push(hv_register_assoc {
515 name: hv_register_name_HV_ARM64_REGISTER_SP_EL1,
516 ..Default::default()
517 });
518 reg_assocs.push(hv_register_assoc {
519 name: hv_register_name_HV_ARM64_REGISTER_ELR_EL1,
520 ..Default::default()
521 });
522 reg_assocs.push(hv_register_assoc {
523 name: hv_register_name_HV_ARM64_REGISTER_FPSR,
524 ..Default::default()
525 });
526 reg_assocs.push(hv_register_assoc {
527 name: hv_register_name_HV_ARM64_REGISTER_FPCR,
528 ..Default::default()
529 });
530
531 self.hvcall_get_reg(&mut reg_assocs)?;
532 let mut ret_regs = StandardRegisters::default();
533 unsafe {
535 for i in 0..31 as usize {
536 ret_regs.regs[i] = reg_assocs[i].value.reg64;
537 }
538
539 ret_regs.sp = reg_assocs[31].value.reg64;
540 ret_regs.pc = reg_assocs[32].value.reg64;
541 ret_regs.pstate = reg_assocs[33].value.reg64;
542 ret_regs.sp_el1 = reg_assocs[34].value.reg64;
543 ret_regs.elr_el1 = reg_assocs[35].value.reg64;
544 ret_regs.fpsr = reg_assocs[36].value.reg64;
545 ret_regs.fpcr = reg_assocs[37].value.reg64;
546 }
547 Ok(ret_regs)
548 }
549
550 #[cfg(not(target_arch = "aarch64"))]
552 pub fn get_standard_regs_vp_page(&self) -> Result<StandardRegisters> {
553 let vp_reg_page = self.get_vp_reg_page().unwrap().0;
554 let mut ret_regs = StandardRegisters::default();
555 unsafe {
557 ret_regs.rax = get_gp_regs_field_ptr!(vp_reg_page, rax);
558 ret_regs.rbx = get_gp_regs_field_ptr!(vp_reg_page, rbx);
559 ret_regs.rcx = get_gp_regs_field_ptr!(vp_reg_page, rcx);
560 ret_regs.rdx = get_gp_regs_field_ptr!(vp_reg_page, rdx);
561 ret_regs.rsi = get_gp_regs_field_ptr!(vp_reg_page, rsi);
562 ret_regs.rdi = get_gp_regs_field_ptr!(vp_reg_page, rdi);
563 ret_regs.rsp = get_gp_regs_field_ptr!(vp_reg_page, rsp);
564 ret_regs.rbp = get_gp_regs_field_ptr!(vp_reg_page, rbp);
565 ret_regs.r8 = get_gp_regs_field_ptr!(vp_reg_page, r8);
566 ret_regs.r9 = get_gp_regs_field_ptr!(vp_reg_page, r9);
567 ret_regs.r10 = get_gp_regs_field_ptr!(vp_reg_page, r10);
568 ret_regs.r11 = get_gp_regs_field_ptr!(vp_reg_page, r11);
569 ret_regs.r12 = get_gp_regs_field_ptr!(vp_reg_page, r12);
570 ret_regs.r13 = get_gp_regs_field_ptr!(vp_reg_page, r13);
571 ret_regs.r14 = get_gp_regs_field_ptr!(vp_reg_page, r14);
572 ret_regs.r15 = get_gp_regs_field_ptr!(vp_reg_page, r15);
573 ret_regs.rip = (*vp_reg_page).__bindgen_anon_1.__bindgen_anon_1.rip;
574 ret_regs.rflags = (*vp_reg_page).__bindgen_anon_1.__bindgen_anon_1.rflags;
575 }
576
577 Ok(ret_regs)
578 }
579
580 #[cfg(not(target_arch = "aarch64"))]
582 fn get_special_regs_vp_page(&self) -> Result<SpecialRegisters> {
583 let vp_reg_page = match self.get_vp_reg_page() {
584 Some(page) => page.0,
585 None => return Err(libc::EINVAL.into()),
586 };
587 let mut ret_regs = SpecialRegisters::default();
588 unsafe {
590 ret_regs.cr0 = (*vp_reg_page).cr0;
591 ret_regs.cr3 = (*vp_reg_page).cr3;
592 ret_regs.cr4 = (*vp_reg_page).cr4;
593 ret_regs.cr8 = (*vp_reg_page).cr8;
594 ret_regs.cs = (*vp_reg_page).__bindgen_anon_3.__bindgen_anon_1.cs.into();
595 ret_regs.ds = (*vp_reg_page).__bindgen_anon_3.__bindgen_anon_1.ds.into();
596 ret_regs.es = (*vp_reg_page).__bindgen_anon_3.__bindgen_anon_1.es.into();
597 ret_regs.fs = (*vp_reg_page).__bindgen_anon_3.__bindgen_anon_1.fs.into();
598 ret_regs.gs = (*vp_reg_page).__bindgen_anon_3.__bindgen_anon_1.gs.into();
599 ret_regs.ss = (*vp_reg_page).__bindgen_anon_3.__bindgen_anon_1.ss.into();
600 ret_regs.efer = (*vp_reg_page).efer;
601 }
602
603 let mut reg_assocs: [hv_register_assoc; 7] = [hv_register_assoc::default(); 7];
604 for (it, elem) in reg_assocs.iter_mut().zip(NON_VP_PAGE_SP_REGS) {
605 it.name = *elem;
606 }
607 self.get_reg(&mut reg_assocs)?;
608 unsafe {
610 ret_regs.tr = SegmentRegister::from(reg_assocs[0].value.segment);
611 ret_regs.ldt = SegmentRegister::from(reg_assocs[1].value.segment);
612 ret_regs.gdt = TableRegister::from(reg_assocs[2].value.table);
613 ret_regs.idt = TableRegister::from(reg_assocs[3].value.table);
614 ret_regs.cr2 = reg_assocs[4].value.reg64;
615 ret_regs.apic_base = reg_assocs[5].value.reg64;
616 update_interrupt_bitmap(
617 &mut ret_regs,
618 reg_assocs[6].value.pending_interruption.as_uint64,
619 );
620 }
621 Ok(ret_regs)
622 }
623
624 #[cfg(not(target_arch = "aarch64"))]
626 fn get_special_regs_ioctl(&self) -> Result<SpecialRegisters> {
627 let mut reg_names: [::std::os::raw::c_uint; 18] = [0u32; 18];
628 reg_names[..11].copy_from_slice(VP_PAGE_SP_REGS);
629 reg_names[11..].copy_from_slice(NON_VP_PAGE_SP_REGS);
630 let mut reg_assocs: [hv_register_assoc; 18] = [hv_register_assoc::default(); 18];
631 for (it, elem) in reg_assocs.iter_mut().zip(reg_names) {
632 it.name = elem;
633 }
634
635 self.get_reg(&mut reg_assocs)?;
636 let mut ret_regs = SpecialRegisters::default();
637 unsafe {
661 ret_regs.cs = SegmentRegister::from(reg_assocs[0].value.segment);
662 ret_regs.ds = SegmentRegister::from(reg_assocs[1].value.segment);
663 ret_regs.es = SegmentRegister::from(reg_assocs[2].value.segment);
664 ret_regs.fs = SegmentRegister::from(reg_assocs[3].value.segment);
665 ret_regs.gs = SegmentRegister::from(reg_assocs[4].value.segment);
666 ret_regs.ss = SegmentRegister::from(reg_assocs[5].value.segment);
667 ret_regs.tr = SegmentRegister::from(reg_assocs[11].value.segment);
668 ret_regs.ldt = SegmentRegister::from(reg_assocs[12].value.segment);
669 ret_regs.gdt = TableRegister::from(reg_assocs[13].value.table);
670 ret_regs.idt = TableRegister::from(reg_assocs[14].value.table);
671 ret_regs.cr0 = reg_assocs[6].value.reg64;
672 ret_regs.cr2 = reg_assocs[15].value.reg64;
673 ret_regs.cr3 = reg_assocs[7].value.reg64;
674 ret_regs.cr4 = reg_assocs[8].value.reg64;
675 ret_regs.cr8 = reg_assocs[9].value.reg64;
676 ret_regs.efer = reg_assocs[10].value.reg64;
677 ret_regs.apic_base = reg_assocs[16].value.reg64;
678 update_interrupt_bitmap(
679 &mut ret_regs,
680 reg_assocs[17].value.pending_interruption.as_uint64,
681 );
682 };
683
684 Ok(ret_regs)
685 }
686
687 #[cfg(not(target_arch = "aarch64"))]
689 pub fn get_sregs(&self) -> Result<SpecialRegisters> {
690 if self.is_valid_vp_reg_page() {
691 self.get_special_regs_vp_page()
692 } else {
693 self.get_special_regs_ioctl()
694 }
695 }
696
697 #[cfg(not(target_arch = "aarch64"))]
699 fn set_special_regs_vp_page(&self, sregs: &SpecialRegisters) -> Result<()> {
700 let vp_reg_page = self.get_vp_reg_page().unwrap().0;
701 unsafe {
702 (*vp_reg_page).__bindgen_anon_3.__bindgen_anon_1.cs = sregs.cs.into();
703 (*vp_reg_page).__bindgen_anon_3.__bindgen_anon_1.ds = sregs.ds.into();
704 (*vp_reg_page).__bindgen_anon_3.__bindgen_anon_1.es = sregs.es.into();
705 (*vp_reg_page).__bindgen_anon_3.__bindgen_anon_1.fs = sregs.fs.into();
706 (*vp_reg_page).__bindgen_anon_3.__bindgen_anon_1.gs = sregs.gs.into();
707 (*vp_reg_page).__bindgen_anon_3.__bindgen_anon_1.ss = sregs.ss.into();
708 (*vp_reg_page).dirty |= 1 << HV_X64_REGISTER_CLASS_SEGMENT;
710 }
711 let reg_assocs = [
712 hv_register_assoc {
713 name: hv_register_name_HV_X64_REGISTER_TR,
714 value: hv_register_value {
715 segment: sregs.tr.into(),
716 },
717 ..Default::default()
718 },
719 hv_register_assoc {
720 name: hv_register_name_HV_X64_REGISTER_LDTR,
721 value: hv_register_value {
722 segment: sregs.ldt.into(),
723 },
724 ..Default::default()
725 },
726 hv_register_assoc {
727 name: hv_register_name_HV_X64_REGISTER_GDTR,
728 value: hv_register_value {
729 table: sregs.gdt.into(),
730 },
731 ..Default::default()
732 },
733 hv_register_assoc {
734 name: hv_register_name_HV_X64_REGISTER_IDTR,
735 value: hv_register_value {
736 table: sregs.idt.into(),
737 },
738 ..Default::default()
739 },
740 hv_register_assoc {
741 name: hv_register_name_HV_X64_REGISTER_INTERMEDIATE_CR0,
742 value: hv_register_value { reg64: sregs.cr0 },
743 ..Default::default()
744 },
745 hv_register_assoc {
746 name: hv_register_name_HV_X64_REGISTER_CR2,
747 value: hv_register_value { reg64: sregs.cr2 },
748 ..Default::default()
749 },
750 hv_register_assoc {
751 name: hv_register_name_HV_X64_REGISTER_INTERMEDIATE_CR3,
752 value: hv_register_value { reg64: sregs.cr3 },
753 ..Default::default()
754 },
755 hv_register_assoc {
756 name: hv_register_name_HV_X64_REGISTER_INTERMEDIATE_CR4,
757 value: hv_register_value { reg64: sregs.cr4 },
758 ..Default::default()
759 },
760 hv_register_assoc {
761 name: hv_register_name_HV_X64_REGISTER_CR8,
762 value: hv_register_value { reg64: sregs.cr8 },
763 ..Default::default()
764 },
765 hv_register_assoc {
766 name: hv_register_name_HV_X64_REGISTER_EFER,
767 value: hv_register_value { reg64: sregs.efer },
768 ..Default::default()
769 },
770 hv_register_assoc {
771 name: hv_register_name_HV_X64_REGISTER_APIC_BASE,
772 value: hv_register_value {
773 reg64: sregs.apic_base,
774 },
775 ..Default::default()
776 },
777 ];
778
779 for bits in &sregs.interrupt_bitmap {
782 if *bits != 0 {
783 return Err(libc::EINVAL.into());
784 }
785 }
786 self.set_reg(®_assocs)?;
787 Ok(())
788 }
789
790 #[cfg(not(target_arch = "aarch64"))]
792 fn set_special_regs_ioctl(&self, sregs: &SpecialRegisters) -> Result<()> {
793 let reg_names: [hv_register_name; 17] = [
794 hv_register_name_HV_X64_REGISTER_CS,
795 hv_register_name_HV_X64_REGISTER_DS,
796 hv_register_name_HV_X64_REGISTER_ES,
797 hv_register_name_HV_X64_REGISTER_FS,
798 hv_register_name_HV_X64_REGISTER_GS,
799 hv_register_name_HV_X64_REGISTER_SS,
800 hv_register_name_HV_X64_REGISTER_TR,
801 hv_register_name_HV_X64_REGISTER_LDTR,
802 hv_register_name_HV_X64_REGISTER_GDTR,
803 hv_register_name_HV_X64_REGISTER_IDTR,
804 hv_register_name_HV_X64_REGISTER_INTERMEDIATE_CR0,
805 hv_register_name_HV_X64_REGISTER_CR2,
806 hv_register_name_HV_X64_REGISTER_INTERMEDIATE_CR3,
807 hv_register_name_HV_X64_REGISTER_INTERMEDIATE_CR4,
808 hv_register_name_HV_X64_REGISTER_CR8,
809 hv_register_name_HV_X64_REGISTER_EFER,
810 hv_register_name_HV_X64_REGISTER_APIC_BASE,
811 ];
812 let reg_values: [hv_register_value; 17] = [
813 hv_register_value {
814 segment: sregs.cs.into(),
815 },
816 hv_register_value {
817 segment: sregs.ds.into(),
818 },
819 hv_register_value {
820 segment: sregs.es.into(),
821 },
822 hv_register_value {
823 segment: sregs.fs.into(),
824 },
825 hv_register_value {
826 segment: sregs.gs.into(),
827 },
828 hv_register_value {
829 segment: sregs.ss.into(),
830 },
831 hv_register_value {
832 segment: sregs.tr.into(),
833 },
834 hv_register_value {
835 segment: sregs.ldt.into(),
836 },
837 hv_register_value {
838 table: sregs.gdt.into(),
839 },
840 hv_register_value {
841 table: sregs.idt.into(),
842 },
843 hv_register_value { reg64: sregs.cr0 },
844 hv_register_value { reg64: sregs.cr2 },
845 hv_register_value { reg64: sregs.cr3 },
846 hv_register_value { reg64: sregs.cr4 },
847 hv_register_value { reg64: sregs.cr8 },
848 hv_register_value { reg64: sregs.efer },
849 hv_register_value {
850 reg64: sregs.apic_base,
851 },
852 ];
853
854 for bits in &sregs.interrupt_bitmap {
857 if *bits != 0 {
858 return Err(libc::EINVAL.into());
859 }
860 }
861
862 let reg_assocs: Vec<hv_register_assoc> = reg_names
863 .iter()
864 .zip(reg_values.iter())
865 .map(|t| hv_register_assoc {
866 name: *t.0,
867 value: *t.1,
868 ..Default::default()
869 })
870 .collect();
871 self.set_reg(®_assocs)?;
872 Ok(())
873 }
874
875 #[cfg(not(target_arch = "aarch64"))]
877 pub fn set_sregs(&self, sregs: &SpecialRegisters) -> Result<()> {
878 if self.is_valid_vp_reg_page() {
879 self.set_special_regs_vp_page(sregs)
880 } else {
881 self.set_special_regs_ioctl(sregs)
882 }
883 }
884
885 #[cfg(not(target_arch = "aarch64"))]
886 fn fpu_registers() -> [hv_register_name; 26] {
887 [
888 hv_register_name_HV_X64_REGISTER_XMM0,
889 hv_register_name_HV_X64_REGISTER_XMM1,
890 hv_register_name_HV_X64_REGISTER_XMM2,
891 hv_register_name_HV_X64_REGISTER_XMM3,
892 hv_register_name_HV_X64_REGISTER_XMM4,
893 hv_register_name_HV_X64_REGISTER_XMM5,
894 hv_register_name_HV_X64_REGISTER_XMM6,
895 hv_register_name_HV_X64_REGISTER_XMM7,
896 hv_register_name_HV_X64_REGISTER_XMM8,
897 hv_register_name_HV_X64_REGISTER_XMM9,
898 hv_register_name_HV_X64_REGISTER_XMM10,
899 hv_register_name_HV_X64_REGISTER_XMM11,
900 hv_register_name_HV_X64_REGISTER_XMM12,
901 hv_register_name_HV_X64_REGISTER_XMM13,
902 hv_register_name_HV_X64_REGISTER_XMM14,
903 hv_register_name_HV_X64_REGISTER_XMM15,
904 hv_register_name_HV_X64_REGISTER_FP_MMX0,
905 hv_register_name_HV_X64_REGISTER_FP_MMX1,
906 hv_register_name_HV_X64_REGISTER_FP_MMX2,
907 hv_register_name_HV_X64_REGISTER_FP_MMX3,
908 hv_register_name_HV_X64_REGISTER_FP_MMX4,
909 hv_register_name_HV_X64_REGISTER_FP_MMX5,
910 hv_register_name_HV_X64_REGISTER_FP_MMX6,
911 hv_register_name_HV_X64_REGISTER_FP_MMX7,
912 hv_register_name_HV_X64_REGISTER_FP_CONTROL_STATUS,
913 hv_register_name_HV_X64_REGISTER_XMM_CONTROL_STATUS,
914 ]
915 }
916
917 #[cfg(not(target_arch = "aarch64"))]
919 pub fn set_fpu(&self, fpu: &FloatingPointUnit) -> Result<()> {
920 let reg_names = Self::fpu_registers();
921 let mut reg_values: [hv_register_value; 26] = [hv_register_value { reg64: 0 }; 26];
922 for (i, reg) in reg_values.iter_mut().enumerate().take(16) {
924 unsafe {
926 *reg = hv_register_value {
927 reg128: std::mem::transmute::<[u8; 16usize], hv_u128>(fpu.xmm[i]),
928 };
929 }
930 }
931 for (i, reg) in reg_values.iter_mut().enumerate().take(24).skip(16) {
933 let fp_i = i - 16;
934 unsafe {
936 *reg = hv_register_value {
937 fp: hv_x64_fp_register {
938 as_uint128: std::mem::transmute::<[u8; 16usize], hv_u128>(fpu.fpr[fp_i]),
939 },
940 };
941 }
942 }
943 reg_values[24] = hv_register_value {
944 fp_control_status: hv_x64_fp_control_status_register {
945 __bindgen_anon_1: hv_x64_fp_control_status_register__bindgen_ty_1 {
946 fp_control: fpu.fcw,
947 fp_status: fpu.fsw,
948 fp_tag: fpu.ftwx,
949 reserved: 0x0,
950 last_fp_op: fpu.last_opcode,
951 __bindgen_anon_1:
952 hv_x64_fp_control_status_register__bindgen_ty_1__bindgen_ty_1 {
953 last_fp_rip: fpu.last_ip,
954 },
955 },
956 },
957 };
958 reg_values[25] = hv_register_value {
959 xmm_control_status: hv_x64_xmm_control_status_register {
960 __bindgen_anon_1: hv_x64_xmm_control_status_register__bindgen_ty_1 {
961 xmm_status_control: fpu.mxcsr,
962 xmm_status_control_mask: 0x0,
963 __bindgen_anon_1:
964 hv_x64_xmm_control_status_register__bindgen_ty_1__bindgen_ty_1 {
965 last_fp_rdp: fpu.last_dp,
966 },
967 },
968 },
969 };
970
971 let reg_assocs: Vec<hv_register_assoc> = reg_names
972 .iter()
973 .zip(reg_values.iter())
974 .map(|t| hv_register_assoc {
975 name: *t.0,
976 value: *t.1,
977 ..Default::default()
978 })
979 .collect();
980
981 self.set_reg(®_assocs)?;
982 Ok(())
983 }
984 #[cfg(not(target_arch = "aarch64"))]
986 pub fn get_fpu(&self) -> Result<FloatingPointUnit> {
987 let reg_names = Self::fpu_registers();
988 let mut reg_assocs: Vec<hv_register_assoc> = reg_names
989 .iter()
990 .map(|name| hv_register_assoc {
991 name: *name,
992 ..Default::default()
993 })
994 .collect();
995 self.get_reg(&mut reg_assocs)?;
996
997 let fp_control_status: hv_x64_fp_control_status_register__bindgen_ty_1 =
999 unsafe { reg_assocs[24].value.fp_control_status.__bindgen_anon_1 };
1000 let xmm_control_status: hv_x64_xmm_control_status_register__bindgen_ty_1 =
1001 unsafe { reg_assocs[25].value.xmm_control_status.__bindgen_anon_1 };
1002 let mut ret_regs = unsafe {
1003 FloatingPointUnit {
1004 fpr: [[0x0; 16usize]; 8usize],
1005 fcw: fp_control_status.fp_control,
1006 fsw: fp_control_status.fp_status,
1007 ftwx: fp_control_status.fp_tag,
1008 pad1: 0x0,
1009 last_opcode: fp_control_status.last_fp_op,
1010 last_ip: fp_control_status.__bindgen_anon_1.last_fp_rip,
1011 last_dp: xmm_control_status.__bindgen_anon_1.last_fp_rdp,
1012 xmm: [[0; 16usize]; 16usize],
1013 mxcsr: xmm_control_status.xmm_status_control,
1014 pad2: 0x0,
1015 }
1016 };
1017
1018 for (i, reg) in reg_assocs.iter().enumerate().take(16) {
1020 unsafe {
1022 ret_regs.xmm[i] = std::mem::transmute::<hv_u128, [u8; 16usize]>(reg.value.reg128);
1023 }
1024 }
1025 for (i, reg) in reg_assocs.iter().enumerate().take(24).skip(16) {
1027 let fp_i = i - 16;
1028 unsafe {
1030 ret_regs.fpr[fp_i] =
1031 std::mem::transmute::<hv_u128, [u8; 16usize]>(reg.value.fp.as_uint128);
1032 }
1033 }
1034
1035 Ok(ret_regs)
1036 }
1037 #[cfg(not(target_arch = "aarch64"))]
1039 pub fn get_debug_regs(&self) -> Result<DebugRegisters> {
1040 let reg_names: [hv_register_name; 6] = [
1041 hv_register_name_HV_X64_REGISTER_DR0,
1042 hv_register_name_HV_X64_REGISTER_DR1,
1043 hv_register_name_HV_X64_REGISTER_DR2,
1044 hv_register_name_HV_X64_REGISTER_DR3,
1045 hv_register_name_HV_X64_REGISTER_DR6,
1046 hv_register_name_HV_X64_REGISTER_DR7,
1047 ];
1048
1049 let mut reg_assocs: Vec<hv_register_assoc> = reg_names
1050 .iter()
1051 .map(|name| hv_register_assoc {
1052 name: *name,
1053 ..Default::default()
1054 })
1055 .collect();
1056
1057 self.get_reg(&mut reg_assocs)?;
1058
1059 let ret_regs = unsafe {
1060 DebugRegisters {
1061 dr0: reg_assocs[0].value.reg64,
1062 dr1: reg_assocs[1].value.reg64,
1063 dr2: reg_assocs[2].value.reg64,
1064 dr3: reg_assocs[3].value.reg64,
1065 dr6: reg_assocs[4].value.reg64,
1066 dr7: reg_assocs[5].value.reg64,
1067 }
1068 };
1069
1070 Ok(ret_regs)
1071 }
1072 #[cfg(not(target_arch = "aarch64"))]
1074 pub fn set_debug_regs(&self, d_regs: &DebugRegisters) -> Result<()> {
1075 let reg_names = [
1076 hv_register_name_HV_X64_REGISTER_DR0,
1077 hv_register_name_HV_X64_REGISTER_DR1,
1078 hv_register_name_HV_X64_REGISTER_DR2,
1079 hv_register_name_HV_X64_REGISTER_DR3,
1080 hv_register_name_HV_X64_REGISTER_DR6,
1081 hv_register_name_HV_X64_REGISTER_DR7,
1082 ];
1083 let reg_values = [
1084 hv_register_value { reg64: d_regs.dr0 },
1085 hv_register_value { reg64: d_regs.dr1 },
1086 hv_register_value { reg64: d_regs.dr2 },
1087 hv_register_value { reg64: d_regs.dr3 },
1088 hv_register_value { reg64: d_regs.dr6 },
1089 hv_register_value { reg64: d_regs.dr7 },
1090 ];
1091
1092 let reg_assocs: Vec<hv_register_assoc> = reg_names
1093 .iter()
1094 .zip(reg_values.iter())
1095 .map(|t| hv_register_assoc {
1096 name: *t.0,
1097 value: *t.1,
1098 ..Default::default()
1099 })
1100 .collect();
1101
1102 self.set_reg(®_assocs)?;
1103 Ok(())
1104 }
1105 #[cfg(not(target_arch = "aarch64"))]
1107 pub fn get_msrs(&self, msrs: &mut Msrs) -> Result<usize> {
1108 let nmsrs = msrs.as_fam_struct_ref().nmsrs as usize;
1109 let mut reg_assocs: Vec<hv_register_assoc> = Vec::with_capacity(nmsrs);
1110
1111 for i in 0..nmsrs {
1112 let name = match msr_to_hv_reg_name(msrs.as_slice()[i].index) {
1113 Ok(n) => n,
1114 Err(_) => return Err(libc::EINVAL.into()),
1115 };
1116 reg_assocs.push(hv_register_assoc {
1117 name,
1118 ..Default::default()
1119 });
1120 }
1121
1122 self.get_reg(&mut reg_assocs)?;
1123
1124 for (i, reg) in reg_assocs.iter().enumerate().take(nmsrs) {
1125 unsafe {
1128 msrs.as_mut_slice()[i].data = reg.value.reg64;
1129 }
1130 }
1131
1132 Ok(nmsrs)
1133 }
1134 #[cfg(not(target_arch = "aarch64"))]
1137 pub fn set_msrs(&self, msrs: &Msrs) -> Result<usize> {
1138 let nmsrs = msrs.as_fam_struct_ref().nmsrs as usize;
1139 let mut reg_assocs: Vec<hv_register_assoc> = Vec::with_capacity(nmsrs);
1140
1141 for i in 0..nmsrs {
1142 let name = match msr_to_hv_reg_name(msrs.as_slice()[i].index) {
1143 Ok(n) => n,
1144 Err(_) => return Err(libc::EINVAL.into()),
1145 };
1146 reg_assocs.push(hv_register_assoc {
1147 name,
1148 value: hv_register_value {
1149 reg64: msrs.as_slice()[i].data,
1150 },
1151 ..Default::default()
1152 });
1153 }
1154
1155 self.set_reg(®_assocs)?;
1156 Ok(0_usize)
1157 }
1158 pub fn run(&self) -> Result<hv_message> {
1160 let mut msg = hv_message::default();
1161 let ret = unsafe { ioctl_with_mut_ref(self, MSHV_RUN_VP(), &mut msg) };
1163 if ret != 0 {
1164 return Err(errno::Error::last().into());
1165 }
1166 Ok(msg)
1167 }
1168 #[cfg(not(target_arch = "aarch64"))]
1171 pub fn get_vcpu_events(&self) -> Result<VcpuEvents> {
1172 let reg_names: [hv_register_name; 5] = [
1173 hv_register_name_HV_REGISTER_PENDING_INTERRUPTION,
1174 hv_register_name_HV_REGISTER_INTERRUPT_STATE,
1175 hv_register_name_HV_REGISTER_INTERNAL_ACTIVITY_STATE,
1176 hv_register_name_HV_REGISTER_PENDING_EVENT0,
1177 hv_register_name_HV_REGISTER_PENDING_EVENT1,
1178 ];
1179 let mut reg_assocs: Vec<hv_register_assoc> = reg_names
1180 .iter()
1181 .map(|name| hv_register_assoc {
1182 name: *name,
1183 ..Default::default()
1184 })
1185 .collect();
1186 self.get_reg(&mut reg_assocs)?;
1187 let mut ret_regs = VcpuEvents::default();
1188 unsafe {
1190 ret_regs.pending_interruption = reg_assocs[0].value.reg64;
1191 ret_regs.interrupt_state = reg_assocs[1].value.reg64;
1192 ret_regs.internal_activity_state = reg_assocs[2].value.reg64;
1193 ret_regs.pending_event0 =
1194 std::mem::transmute::<hv_u128, [u8; 16usize]>(reg_assocs[3].value.reg128);
1195 ret_regs.pending_event1 =
1196 std::mem::transmute::<hv_u128, [u8; 16usize]>(reg_assocs[4].value.reg128);
1197 }
1198 Ok(ret_regs)
1199 }
1200 #[cfg(not(target_arch = "aarch64"))]
1202 pub fn set_vcpu_events(&self, events: &VcpuEvents) -> Result<()> {
1203 let reg_names: [hv_register_name; 5] = [
1204 hv_register_name_HV_REGISTER_PENDING_INTERRUPTION,
1205 hv_register_name_HV_REGISTER_INTERRUPT_STATE,
1206 hv_register_name_HV_REGISTER_INTERNAL_ACTIVITY_STATE,
1207 hv_register_name_HV_REGISTER_PENDING_EVENT0,
1208 hv_register_name_HV_REGISTER_PENDING_EVENT1,
1209 ];
1210 let reg_values: [hv_register_value; 5] = unsafe {
1213 [
1214 hv_register_value {
1215 reg64: events.pending_interruption,
1216 },
1217 hv_register_value {
1218 reg64: events.interrupt_state,
1219 },
1220 hv_register_value {
1221 reg64: events.internal_activity_state,
1222 },
1223 hv_register_value {
1224 reg128: std::mem::transmute::<[u8; 16usize], hv_u128>(events.pending_event0),
1225 },
1226 hv_register_value {
1227 reg128: std::mem::transmute::<[u8; 16usize], hv_u128>(events.pending_event1),
1228 },
1229 ]
1230 };
1231
1232 let reg_assocs: Vec<hv_register_assoc> = reg_names
1233 .iter()
1234 .zip(reg_values.iter())
1235 .map(|t| hv_register_assoc {
1236 name: *t.0,
1237 value: *t.1,
1238 ..Default::default()
1239 })
1240 .collect();
1241 self.set_reg(®_assocs)?;
1242 Ok(())
1243 }
1244
1245 #[cfg(not(target_arch = "aarch64"))]
1248 fn get_xcrs_vp_page(&self) -> Result<Xcrs> {
1249 let vp_reg_page = self.get_vp_reg_page().unwrap().0;
1250 let ret_regs = unsafe {
1252 Xcrs {
1253 xcr0: (*vp_reg_page).xfem,
1254 }
1255 };
1256
1257 Ok(ret_regs)
1258 }
1259
1260 #[cfg(not(target_arch = "aarch64"))]
1262 fn get_xcrs_ioctl(&self) -> Result<Xcrs> {
1263 let mut reg_assocs: [hv_register_assoc; 1] = [hv_register_assoc {
1264 name: hv_register_name_HV_X64_REGISTER_XFEM,
1265 ..Default::default()
1266 }];
1267 self.get_reg(&mut reg_assocs)?;
1268
1269 let ret_regs = unsafe {
1271 Xcrs {
1272 xcr0: reg_assocs[0].value.reg64,
1273 }
1274 };
1275
1276 Ok(ret_regs)
1277 }
1278
1279 #[cfg(not(target_arch = "aarch64"))]
1281 pub fn get_xcrs(&self) -> Result<Xcrs> {
1282 if self.is_valid_vp_reg_page() {
1283 self.get_xcrs_vp_page()
1284 } else {
1285 self.get_xcrs_ioctl()
1286 }
1287 }
1288
1289 #[cfg(not(target_arch = "aarch64"))]
1291 pub fn set_xcrs(&self, xcrs: &Xcrs) -> Result<()> {
1292 self.set_reg(&[hv_register_assoc {
1293 name: hv_register_name_HV_X64_REGISTER_XFEM,
1294 value: hv_register_value { reg64: xcrs.xcr0 },
1295 ..Default::default()
1296 }])
1297 }
1298 #[cfg(not(target_arch = "aarch64"))]
1300 pub fn get_misc_regs(&self) -> Result<MiscRegs> {
1301 let mut reg_assocs: [hv_register_assoc; 1] = [hv_register_assoc {
1302 name: hv_register_name_HV_X64_REGISTER_HYPERCALL,
1303 ..Default::default()
1304 }];
1305 self.get_reg(&mut reg_assocs)?;
1306
1307 let mut ret_regs = unsafe {
1309 MiscRegs {
1310 hypercall: reg_assocs[0].value.reg64,
1311 ..Default::default()
1312 }
1313 };
1314 if let Some(vp_page) = self.get_vp_reg_page() {
1315 let vp_reg_page = vp_page.0;
1316 unsafe {
1318 ret_regs.int_vec = (*vp_reg_page).interrupt_vectors.as_uint64;
1319 }
1320 }
1321 Ok(ret_regs)
1322 }
1323 #[cfg(not(target_arch = "aarch64"))]
1325 pub fn set_misc_regs(&self, misc: &MiscRegs) -> Result<()> {
1326 if let Some(vp_page) = self.get_vp_reg_page() {
1327 let vp_reg_page = vp_page.0;
1328 unsafe {
1330 (*vp_reg_page).interrupt_vectors.as_uint64 = misc.int_vec;
1331 }
1332 }
1333
1334 self.set_reg(&[hv_register_assoc {
1335 name: hv_register_name_HV_X64_REGISTER_HYPERCALL,
1336 value: hv_register_value {
1337 reg64: misc.hypercall,
1338 },
1339 ..Default::default()
1340 }])
1341 }
1342 #[cfg(target_arch = "x86_64")]
1343 pub fn get_vp_state_ioctl(&self, state: &mut mshv_get_set_vp_state) -> Result<()> {
1345 let ret = unsafe { ioctl_with_mut_ref(self, MSHV_GET_VP_STATE(), state) };
1347 if ret != 0 {
1348 return Err(errno::Error::last().into());
1349 }
1350 Ok(())
1351 }
1352 #[cfg(target_arch = "x86_64")]
1353 pub fn set_vp_state_ioctl(&self, state: &mshv_get_set_vp_state) -> Result<()> {
1356 let ret = unsafe { ioctl_with_ref(self, MSHV_SET_VP_STATE(), state) };
1358 if ret != 0 {
1359 return Err(errno::Error::last().into());
1360 }
1361 Ok(())
1362 }
1363 #[cfg(target_arch = "x86_64")]
1364 pub fn get_lapic(&self) -> Result<LapicState> {
1366 let buffer = Buffer::new(HV_PAGE_SIZE, HV_PAGE_SIZE)?;
1367 let mut vp_state = mshv_get_set_vp_state {
1368 buf_ptr: buffer.buf as u64,
1369 buf_sz: buffer.size() as u32,
1370 type_: MSHV_VP_STATE_LAPIC as u8,
1371 ..Default::default()
1372 };
1373 self.get_vp_state_ioctl(&mut vp_state)?;
1374 Ok(LapicState::try_from(buffer)?)
1375 }
1376 #[cfg(target_arch = "x86_64")]
1377 pub fn set_lapic(&self, lapic_state: &LapicState) -> Result<()> {
1379 let buffer = Buffer::try_from(lapic_state)?;
1380 let vp_state = mshv_get_set_vp_state {
1381 type_: MSHV_VP_STATE_LAPIC as u8,
1382 buf_sz: buffer.size() as u32,
1383 buf_ptr: buffer.buf as u64,
1384 ..Default::default()
1385 };
1386 self.set_vp_state_ioctl(&vp_state)
1387 }
1388 #[cfg(target_arch = "x86_64")]
1389 pub fn get_xsave(&self) -> Result<XSave> {
1391 let buffer = Buffer::new(HV_PAGE_SIZE, HV_PAGE_SIZE)?;
1392 let mut vp_state = mshv_get_set_vp_state {
1393 buf_ptr: buffer.buf as u64,
1394 buf_sz: buffer.size() as u32,
1395 type_: MSHV_VP_STATE_XSAVE as u8,
1396 ..Default::default()
1397 };
1398 self.get_vp_state_ioctl(&mut vp_state)?;
1399 Ok(XSave::try_from(buffer)?)
1400 }
1401 #[cfg(target_arch = "x86_64")]
1402 pub fn set_xsave(&self, data: &XSave) -> Result<()> {
1404 let buffer = Buffer::try_from(data)?;
1405 let vp_state = mshv_get_set_vp_state {
1406 type_: MSHV_VP_STATE_XSAVE as u8,
1407 buf_sz: buffer.size() as u32,
1408 buf_ptr: buffer.buf as u64,
1409 ..Default::default()
1410 };
1411 self.set_vp_state_ioctl(&vp_state)
1412 }
1413 pub fn translate_gva(&self, gva: u64, flags: u64) -> Result<(u64, hv_translate_gva_result)> {
1415 self.hvcall_translate_gva(gva, flags)
1416 }
1417 fn hvcall_translate_gva(&self, gva: u64, flags: u64) -> Result<(u64, hv_translate_gva_result)> {
1419 let input = hv_input_translate_virtual_address {
1420 vp_index: self.index,
1421 control_flags: flags,
1422 gva_page: gva >> HV_HYP_PAGE_SHIFT,
1423 ..Default::default() };
1425 let mut output = hv_output_translate_virtual_address {
1426 ..Default::default()
1427 };
1428 let mut args = make_args!(HVCALL_TRANSLATE_VIRTUAL_ADDRESS, input, output);
1429 self.hvcall(&mut args)?;
1430
1431 let gpa = (output.gpa_page << HV_HYP_PAGE_SHIFT) | (gva & !(HV_HYP_PAGE_MASK as u64));
1432
1433 Ok((gpa, output.translation_result))
1434 }
1435
1436 #[cfg(not(target_arch = "aarch64"))]
1438 pub fn get_suspend_regs(&self) -> Result<SuspendRegisters> {
1439 let reg_names: [hv_register_name; 2] = [
1440 hv_register_name_HV_REGISTER_EXPLICIT_SUSPEND,
1441 hv_register_name_HV_REGISTER_INTERCEPT_SUSPEND,
1442 ];
1443
1444 let mut reg_assocs: Vec<hv_register_assoc> = reg_names
1445 .iter()
1446 .map(|name| hv_register_assoc {
1447 name: *name,
1448 ..Default::default()
1449 })
1450 .collect();
1451
1452 self.get_reg(&mut reg_assocs)?;
1453
1454 let ret_regs = unsafe {
1456 SuspendRegisters {
1457 explicit_register: reg_assocs[0].value.reg64,
1458 intercept_register: reg_assocs[1].value.reg64,
1459 }
1460 };
1461
1462 Ok(ret_regs)
1463 }
1464 #[cfg(target_arch = "x86_64")]
1465 pub fn register_intercept_result_cpuid_entry(
1467 &self,
1468 entry: &hv_cpuid_entry,
1469 always_override: Option<u8>,
1470 subleaf_specific: Option<u8>,
1471 ) -> Result<()> {
1472 self.hvcall_register_intercept_result_cpuid_entry(entry, always_override, subleaf_specific)
1473 }
1474
1475 #[cfg(target_arch = "x86_64")]
1476 fn hvcall_register_intercept_result_cpuid_entry(
1478 &self,
1479 entry: &hv_cpuid_entry,
1480 always_override: Option<u8>,
1481 subleaf_specific: Option<u8>,
1482 ) -> Result<()> {
1483 let mshv_cpuid = hv_register_x64_cpuid_result_parameters {
1484 input: hv_register_x64_cpuid_result_parameters__bindgen_ty_1 {
1485 eax: entry.function,
1486 ecx: entry.index,
1491 subleaf_specific: subleaf_specific.unwrap_or(0),
1494 always_override: always_override.unwrap_or(1),
1497 padding: 0,
1499 },
1500 result: hv_register_x64_cpuid_result_parameters__bindgen_ty_2 {
1507 eax: entry.eax,
1508 eax_mask: entry.eax,
1509 ebx: entry.ebx,
1510 ebx_mask: entry.ebx,
1511 ecx: entry.ecx,
1512 ecx_mask: entry.ecx,
1513 edx: entry.edx,
1514 edx_mask: entry.edx,
1515 },
1516 };
1517 let input = hv_input_register_intercept_result {
1518 vp_index: self.index,
1519 intercept_type: hv_intercept_type_HV_INTERCEPT_TYPE_X64_CPUID,
1520 parameters: hv_register_intercept_result_parameters { cpuid: mshv_cpuid },
1521 ..Default::default() };
1523 let mut args = make_args!(HVCALL_REGISTER_INTERCEPT_RESULT, input);
1524 self.hvcall(&mut args)?;
1525
1526 Ok(())
1527 }
1528
1529 #[cfg(target_arch = "x86_64")]
1530 pub fn register_intercept_result_cpuid(&self, cpuid: &CpuId) -> Result<()> {
1532 let mut ret = Ok(());
1533
1534 for entry in cpuid.as_slice().iter() {
1535 let mut override_arg = None;
1536 let mut subleaf_specific = None;
1537
1538 match entry.function {
1539 0xb | 0x1f | 0x8000_001e | 0x8000_0026 => {
1546 subleaf_specific = Some(1);
1547 override_arg = None;
1548 }
1549 0x0000_0001 | 0x8000_0000 | 0x8000_0001 | 0x8000_0008 => {
1550 subleaf_specific = None;
1551 override_arg = Some(1);
1552 }
1553 _ => {}
1554 }
1555 let eret =
1556 self.register_intercept_result_cpuid_entry(entry, override_arg, subleaf_specific);
1557 if eret.is_err() && ret.is_ok() {
1558 ret = eret;
1559 }
1560 }
1561
1562 ret
1563 }
1564 #[cfg(not(target_arch = "aarch64"))]
1567 pub fn get_cpuid_values(&self, eax: u32, ecx: u32, xfem: u64, xss: u64) -> Result<[u32; 4]> {
1568 self.hvcall_get_cpuid_values(eax, ecx, xfem, xss)
1569 }
1570 #[cfg(not(target_arch = "aarch64"))]
1572 fn hvcall_get_cpuid_values(&self, eax: u32, ecx: u32, xfem: u64, xss: u64) -> Result<[u32; 4]> {
1573 let mut input = make_rep_input!(
1574 hv_input_get_vp_cpuid_values {
1575 vp_index: self.index,
1576 ..Default::default() },
1578 cpuid_leaf_info,
1579 [hv_cpuid_leaf_info {
1580 eax,
1581 ecx,
1582 xfem,
1583 xss,
1584 }]
1585 );
1586 unsafe {
1587 input
1588 .as_mut_struct_ref()
1589 .flags
1590 .__bindgen_anon_1
1591 .set_use_vp_xfem_xss(1);
1592 input
1593 .as_mut_struct_ref()
1594 .flags
1595 .__bindgen_anon_1
1596 .set_apply_registered_values(1);
1597 }
1598 let mut output_arr: [hv_output_get_vp_cpuid_values; 1] = [Default::default()];
1599 let mut args = make_rep_args!(HVCALL_GET_VP_CPUID_VALUES, input, output_arr);
1600 self.hvcall(&mut args)?;
1601
1602 Ok(unsafe { output_arr[0].as_uint32 })
1605 }
1606 pub fn gpa_read(&self, input: &mut mshv_read_write_gpa) -> Result<mshv_read_write_gpa> {
1608 let flags = hv_access_gpa_control_flags {
1609 as_uint64: input.flags as u64,
1610 };
1611 let res = self.hvcall_gpa_read(input.byte_count, input.base_gpa, flags)?;
1612 input.data = res.data;
1613 Ok(*input)
1614 }
1615
1616 fn hvcall_gpa_read(
1618 &self,
1619 byte_count: u32,
1620 gpa: u64,
1621 flags: hv_access_gpa_control_flags,
1622 ) -> Result<hv_output_read_gpa> {
1623 let input = hv_input_read_gpa {
1624 vp_index: self.index,
1625 byte_count,
1626 base_gpa: gpa,
1627 control_flags: flags,
1628 ..Default::default() };
1630 let mut output = hv_output_read_gpa::default();
1631 let mut args = make_args!(HVCALL_READ_GPA, input, output);
1632 self.hvcall(&mut args)?;
1633
1634 Ok(output)
1635 }
1636
1637 pub fn gpa_write(&self, input: &mut mshv_read_write_gpa) -> Result<mshv_read_write_gpa> {
1639 let flags = hv_access_gpa_control_flags {
1640 as_uint64: input.flags as u64,
1641 };
1642 self.hvcall_gpa_write(input.byte_count, input.base_gpa, flags, input.data)?;
1644 Ok(*input)
1645 }
1646 fn hvcall_gpa_write(
1648 &self,
1649 byte_count: u32,
1650 gpa: u64,
1651 flags: hv_access_gpa_control_flags,
1652 data: [__u8; 16usize],
1653 ) -> Result<hv_output_write_gpa> {
1654 let input = hv_input_write_gpa {
1655 vp_index: self.index,
1656 byte_count,
1657 base_gpa: gpa,
1658 control_flags: flags,
1659 data,
1660 ..Default::default() };
1662 let mut output = hv_output_write_gpa::default();
1663 let mut args = make_args!(HVCALL_WRITE_GPA, input, output);
1664 self.hvcall(&mut args)?;
1665
1666 Ok(output)
1667 }
1668
1669 #[cfg(not(target_arch = "aarch64"))]
1671 pub fn set_sev_control_register(&self, reg: u64) -> Result<()> {
1672 let reg_assocs = [hv_register_assoc {
1673 name: hv_register_name_HV_X64_REGISTER_SEV_CONTROL,
1674 value: hv_register_value { reg64: reg },
1675 ..Default::default()
1676 }];
1677
1678 self.set_reg(®_assocs)?;
1679 Ok(())
1680 }
1681
1682 #[cfg(not(target_arch = "aarch64"))]
1684 pub fn get_all_vp_state_components(&self) -> Result<AllVpStateComponents> {
1685 let mut states: AllVpStateComponents = AllVpStateComponents::default();
1686 let mut buffer = Buffer::new(HV_PAGE_SIZE, HV_PAGE_SIZE)?;
1687
1688 for i in 0..MSHV_VP_STATE_COUNT {
1689 buffer.zero_out_buf();
1690 let mut vp_state = mshv_get_set_vp_state {
1691 buf_ptr: buffer.buf as u64,
1692 buf_sz: buffer.size() as u32,
1693 type_: i as u8,
1694 ..Default::default()
1695 };
1696 self.get_vp_state_ioctl(&mut vp_state)?;
1697 states.copy_to_or_from_buffer(i as usize, &mut buffer, false);
1698 }
1699 Ok(states)
1700 }
1701
1702 #[cfg(not(target_arch = "aarch64"))]
1704 pub fn set_all_vp_state_components(&self, states: &mut AllVpStateComponents) -> Result<()> {
1705 let mut buffer = Buffer::new(HV_PAGE_SIZE, HV_PAGE_SIZE)?;
1706
1707 for i in 0..MSHV_VP_STATE_COUNT {
1708 buffer.zero_out_buf();
1709 states.copy_to_or_from_buffer(i as usize, &mut buffer, true);
1710 let vp_state = mshv_get_set_vp_state {
1711 type_: i as u8,
1712 buf_sz: buffer.size() as u32,
1713 buf_ptr: buffer.buf as u64,
1714 ..Default::default()
1715 };
1716 self.set_vp_state_ioctl(&vp_state)?;
1717 }
1718 Ok(())
1719 }
1720
1721 pub fn hvcall(&self, args: &mut mshv_root_hvcall) -> Result<()> {
1723 let ret = unsafe { ioctl_with_mut_ref(self, MSHV_ROOT_HVCALL(), args) };
1725 if ret == 0 {
1726 Ok(())
1727 } else {
1728 Err(MshvError::from_hvcall(errno::Error::last(), *args))
1729 }
1730 }
1731
1732 #[cfg(target_arch = "aarch64")]
1734 pub fn get_reg_list(&self) -> Result<MshvRegList> {
1735 let mut reg_list = MshvRegList::default();
1736 reg_list.reg_list = vec![
1737 hv_register_name_HV_ARM64_REGISTER_CNTVCT_EL0,
1738 hv_register_name_HV_ARM64_REGISTER_PAR_EL1,
1739 hv_register_name_HV_ARM64_REGISTER_SPSR_EL1,
1740 hv_register_name_HV_ARM64_REGISTER_MPIDR_EL1,
1741 hv_register_name_HV_ARM64_REGISTER_MIDR_EL1,
1742 hv_register_name_HV_ARM64_REGISTER_SCTLR_EL1,
1743 hv_register_name_HV_ARM64_REGISTER_ACTLR_EL1,
1744 hv_register_name_HV_ARM64_REGISTER_TCR_EL1,
1745 hv_register_name_HV_ARM64_REGISTER_MAIR_EL1,
1746 hv_register_name_HV_ARM64_REGISTER_TPIDR_EL1,
1747 hv_register_name_HV_ARM64_REGISTER_AMAIR_EL1,
1748 hv_register_name_HV_ARM64_REGISTER_TPIDRRO_EL0,
1749 hv_register_name_HV_ARM64_REGISTER_TPIDR_EL0,
1750 hv_register_name_HV_ARM64_REGISTER_CONTEXTIDR_EL1,
1751 hv_register_name_HV_ARM64_REGISTER_CPACR_EL1,
1752 hv_register_name_HV_ARM64_REGISTER_CSSELR_EL1,
1753 hv_register_name_HV_ARM64_REGISTER_CNTKCTL_EL1,
1754 hv_register_name_HV_ARM64_REGISTER_CNTV_CTL_EL0,
1755 hv_register_name_HV_ARM64_REGISTER_CNTV_CVAL_EL0,
1756 hv_register_name_HV_ARM64_REGISTER_TTBR0_EL1,
1757 hv_register_name_HV_ARM64_REGISTER_TTBR1_EL1,
1758 hv_register_name_HV_ARM64_REGISTER_VBAR_EL1,
1759 hv_register_name_HV_ARM64_REGISTER_ESR_EL1,
1760 hv_register_name_HV_ARM64_REGISTER_FAR_EL1,
1761 hv_register_name_HV_ARM64_REGISTER_PAR_EL1,
1762 hv_register_name_HV_ARM64_REGISTER_SP_EL0,
1763 hv_register_name_HV_ARM64_REGISTER_SP_EL1,
1764 hv_register_name_HV_ARM64_REGISTER_AFSR0_EL1,
1765 hv_register_name_HV_ARM64_REGISTER_AFSR1_EL1,
1766 hv_register_name_HV_ARM64_REGISTER_SYNTHETIC_VBAR_EL1,
1767 hv_register_name_HV_REGISTER_PENDING_EVENT0,
1768 hv_register_name_HV_REGISTER_PENDING_EVENT1,
1769 hv_register_name_HV_REGISTER_DELIVERABILITY_NOTIFICATIONS,
1770 hv_register_name_HV_REGISTER_INTERNAL_ACTIVITY_STATE,
1771 ];
1772 Ok(reg_list)
1773 }
1774}
1775
1776impl Drop for VcpuFd {
1777 fn drop(&mut self) {
1778 if let Some(vp_page) = &self.vp_page {
1779 unsafe {
1781 let _ = libc::munmap(vp_page.0 as *mut libc::c_void, HV_PAGE_SIZE);
1782 }
1783 }
1784 }
1785}
1786
1787#[allow(dead_code)]
1788#[cfg(test)]
1789mod tests {
1790 use super::*;
1792 use crate::ioctls::system::Mshv;
1793
1794 #[cfg(target_arch = "x86_64")]
1795 #[test]
1796 fn test_set_get_regs() {
1797 let set_reg_assocs: [hv_register_assoc; 2] = [
1798 hv_register_assoc {
1799 name: hv_register_name_HV_X64_REGISTER_RIP,
1800 value: hv_register_value { reg64: 0x1000 },
1801 ..Default::default()
1802 },
1803 hv_register_assoc {
1804 name: hv_register_name_HV_X64_REGISTER_RFLAGS,
1805 value: hv_register_value { reg64: 0x2 },
1806 ..Default::default()
1807 },
1808 ];
1809 let get_reg_assocs: [hv_register_assoc; 2] = [
1810 hv_register_assoc {
1811 name: hv_register_name_HV_X64_REGISTER_RIP,
1812 ..Default::default()
1813 },
1814 hv_register_assoc {
1815 name: hv_register_name_HV_X64_REGISTER_RFLAGS,
1816 ..Default::default()
1817 },
1818 ];
1819
1820 for i in [0, 1] {
1821 let hv = Mshv::new().unwrap();
1822 let vm = hv.create_vm().unwrap();
1823 vm.initialize().unwrap();
1824 let vcpu = vm.create_vcpu(0).unwrap();
1825
1826 if i == 0 {
1827 vcpu.set_reg(&set_reg_assocs).unwrap();
1828 } else {
1829 vcpu.hvcall_set_reg(&set_reg_assocs).unwrap();
1830 }
1831
1832 let mut get_regs: [hv_register_assoc; 2] = get_reg_assocs;
1833
1834 if i == 0 {
1835 vcpu.get_reg(&mut get_regs).unwrap();
1836 } else {
1837 vcpu.hvcall_get_reg(&mut get_regs).unwrap();
1838 }
1839
1840 unsafe {
1842 assert!(get_regs[0].value.reg64 == 0x1000);
1843 assert!(get_regs[1].value.reg64 == 0x2);
1844 }
1845 }
1846 }
1847
1848 #[cfg(target_arch = "aarch64")]
1849 #[test]
1850 fn test_set_get_regs() {
1851 let set_regs_assocs = [
1852 hv_register_assoc {
1853 name: hv_register_name_HV_ARM64_REGISTER_PC,
1854 value: hv_register_value { reg64: 0x1000 },
1855 ..Default::default()
1856 },
1857 hv_register_assoc {
1858 name: hv_register_name_HV_ARM64_REGISTER_ELR_EL1,
1859 value: hv_register_value { reg64: 0x2 },
1860 ..Default::default()
1861 },
1862 ];
1863
1864 let get_reg_assocs = [
1865 hv_register_assoc {
1866 name: hv_register_name_HV_ARM64_REGISTER_PC,
1867 ..Default::default()
1868 },
1869 hv_register_assoc {
1870 name: hv_register_name_HV_ARM64_REGISTER_ELR_EL1,
1871 ..Default::default()
1872 },
1873 ];
1874
1875 let hv = Mshv::new().unwrap();
1876 let vm = hv.create_vm().unwrap();
1877 vm.initialize().unwrap();
1878 let vcpu = vm.create_vcpu(0).unwrap();
1879
1880 vcpu.hvcall_set_reg(&set_regs_assocs).unwrap();
1881
1882 let mut get_regs = get_reg_assocs;
1883 vcpu.hvcall_get_reg(&mut get_regs).unwrap();
1884
1885 unsafe {
1887 assert!(get_regs[0].value.reg64 == 0x1000);
1888 assert!(get_regs[1].value.reg64 == 0x2);
1889 }
1890 }
1891
1892 #[cfg(target_arch = "x86_64")]
1893 #[test]
1894 fn test_set_get_sregs() {
1895 let hv = Mshv::new().unwrap();
1896 let vm = hv.create_vm().unwrap();
1897 vm.initialize().unwrap();
1898 let vcpu = vm.create_vcpu(0).unwrap();
1899 let s_sregs = vcpu.get_sregs().unwrap();
1900 vcpu.set_sregs(&s_sregs).unwrap();
1901 let g_sregs = vcpu.get_sregs().unwrap();
1902 assert!(g_sregs.cr0 == s_sregs.cr0);
1903 assert!(g_sregs.cr2 == s_sregs.cr2);
1904 assert!(g_sregs.cr3 == s_sregs.cr3);
1905 assert!(g_sregs.cr4 == s_sregs.cr4);
1906 assert!(g_sregs.cr8 == s_sregs.cr8);
1907 assert!(g_sregs.cr8 == s_sregs.cr8);
1908 assert!(g_sregs.apic_base == s_sregs.apic_base);
1909 assert!(g_sregs.efer == s_sregs.efer);
1910 }
1911
1912 #[cfg(target_arch = "x86_64")]
1913 #[test]
1914 fn test_set_get_standard_registers() {
1915 let hv = Mshv::new().unwrap();
1916 let vm = hv.create_vm().unwrap();
1917 vm.initialize().unwrap();
1918 let vcpu = vm.create_vcpu(0).unwrap();
1919
1920 let s_regs = vcpu.get_regs().unwrap();
1921 vcpu.set_regs(&s_regs).unwrap();
1922 let g_regs = vcpu.get_regs().unwrap();
1923 assert!(g_regs.rax == s_regs.rax);
1924 assert!(g_regs.rbx == s_regs.rbx);
1925 assert!(g_regs.rcx == s_regs.rcx);
1926 assert!(g_regs.rdx == s_regs.rdx);
1927 }
1928
1929 #[cfg(target_arch = "x86_64")]
1930 #[test]
1931 fn test_set_get_debug_registers() {
1932 let hv = Mshv::new().unwrap();
1933 let vm = hv.create_vm().unwrap();
1934 vm.initialize().unwrap();
1935 let vcpu = vm.create_vcpu(0).unwrap();
1936
1937 let s_regs = vcpu.get_debug_regs().unwrap();
1938 vcpu.set_debug_regs(&s_regs).unwrap();
1939 let g_regs = vcpu.get_debug_regs().unwrap();
1940 assert!(g_regs.dr0 == s_regs.dr0);
1941 assert!(g_regs.dr1 == s_regs.dr1);
1942 assert!(g_regs.dr2 == s_regs.dr2);
1943 assert!(g_regs.dr3 == s_regs.dr3);
1944 assert!(g_regs.dr6 == s_regs.dr6);
1945 assert!(g_regs.dr7 == s_regs.dr7);
1946 }
1947
1948 #[cfg(target_arch = "x86_64")]
1949 #[test]
1950 fn test_set_get_fpu() {
1951 let hv = Mshv::new().unwrap();
1952 let vm = hv.create_vm().unwrap();
1953 vm.initialize().unwrap();
1954 let vcpu = vm.create_vcpu(0).unwrap();
1955
1956 let s_regs = vcpu.get_fpu().unwrap();
1957 vcpu.set_fpu(&s_regs).unwrap();
1958 let g_regs = vcpu.get_fpu().unwrap();
1959 for i in 0..16 {
1960 for j in 0..16 {
1961 assert!(g_regs.xmm[i][j] == s_regs.xmm[i][j]);
1962 }
1963 }
1964 for i in 0..8 {
1965 for j in 0..16 {
1966 assert!(g_regs.fpr[i][j] == s_regs.fpr[i][j]);
1967 }
1968 }
1969 assert!(g_regs.fcw == s_regs.fcw);
1970 assert!(g_regs.fsw == s_regs.fsw);
1971 assert!(g_regs.ftwx == s_regs.ftwx);
1972 assert!(g_regs.last_opcode == s_regs.last_opcode);
1973 assert!(g_regs.last_ip == s_regs.last_ip);
1974 assert!(g_regs.last_dp == s_regs.last_dp);
1975 assert!(g_regs.mxcsr == s_regs.mxcsr);
1976 }
1977
1978 #[cfg(target_arch = "x86_64")]
1979 #[test]
1980 fn test_run_code() {
1981 use libc::c_void;
1982
1983 use super::*;
1984 use crate::ioctls::system::Mshv;
1985 use crate::set_bits;
1986 use std::io::Write;
1987
1988 let mshv = Mshv::new().unwrap();
1989 let vm = mshv.create_vm().unwrap();
1990 vm.initialize().unwrap();
1991 let vcpu = vm.create_vcpu(0).unwrap();
1992 #[rustfmt::skip]
1994 let code:[u8;11] = [
1995 0xba, 0xf8, 0x03, 0x00, 0xd8, 0x04, b'0', 0xee, 0xb0, b'\0', 0xee, ];
2003
2004 let mem_size = 0x4000;
2005 let load_addr = unsafe {
2006 libc::mmap(
2007 std::ptr::null_mut(),
2008 mem_size,
2009 libc::PROT_READ | libc::PROT_WRITE,
2010 libc::MAP_ANONYMOUS | libc::MAP_SHARED | libc::MAP_NORESERVE,
2011 -1,
2012 0,
2013 )
2014 } as *mut u8;
2015 let mem_region = mshv_user_mem_region {
2016 flags: set_bits!(u8, MSHV_SET_MEM_BIT_WRITABLE, MSHV_SET_MEM_BIT_EXECUTABLE),
2017 guest_pfn: 0x1,
2018 size: 0x1000,
2019 userspace_addr: load_addr as u64,
2020 ..Default::default()
2021 };
2022
2023 vm.map_user_memory(mem_region).unwrap();
2024
2025 unsafe {
2026 let mut slice = slice::from_raw_parts_mut(load_addr, mem_size);
2029 slice.write_all(&code).unwrap();
2030 }
2031
2032 let mut cs_reg = hv_register_assoc {
2034 name: hv_register_name_HV_X64_REGISTER_CS,
2035 ..Default::default()
2036 };
2037 vcpu.get_reg(slice::from_mut(&mut cs_reg)).unwrap();
2038
2039 unsafe {
2040 assert_ne!({ cs_reg.value.segment.base }, 0);
2041 assert_ne!({ cs_reg.value.segment.selector }, 0);
2042 };
2043
2044 cs_reg.value.segment.base = 0;
2045 cs_reg.value.segment.selector = 0;
2046
2047 vcpu.set_reg(&[
2048 cs_reg,
2049 hv_register_assoc {
2050 name: hv_register_name_HV_X64_REGISTER_RAX,
2051 value: hv_register_value { reg64: 2 },
2052 ..Default::default()
2053 },
2054 hv_register_assoc {
2055 name: hv_register_name_HV_X64_REGISTER_RBX,
2056 value: hv_register_value { reg64: 2 },
2057 ..Default::default()
2058 },
2059 hv_register_assoc {
2060 name: hv_register_name_HV_X64_REGISTER_RIP,
2061 value: hv_register_value { reg64: 0x1000 },
2062 ..Default::default()
2063 },
2064 hv_register_assoc {
2065 name: hv_register_name_HV_X64_REGISTER_RFLAGS,
2066 value: hv_register_value { reg64: 0x2 },
2067 ..Default::default()
2068 },
2069 ])
2070 .unwrap();
2071
2072 let mut done = false;
2073 loop {
2074 let ret_hv_message = vcpu.run().unwrap();
2075 match ret_hv_message.header.message_type {
2076 hv_message_type_HVMSG_X64_HALT => {
2077 println!("VM Halted!");
2078 break;
2079 }
2080 hv_message_type_HVMSG_X64_IO_PORT_INTERCEPT => {
2081 let io_message = ret_hv_message.to_ioport_info().unwrap();
2082
2083 if !done {
2084 assert!(io_message.rax == b'4' as u64);
2085 assert!(io_message.port_number == 0x3f8);
2086 unsafe {
2087 assert!(io_message.access_info.__bindgen_anon_1.string_op() == 0);
2088 assert!(io_message.access_info.__bindgen_anon_1.access_size() == 1);
2089 }
2090 assert!(
2091 io_message.header.intercept_access_type == 1_u8
2092 );
2093 done = true;
2094 vcpu.set_reg(&[hv_register_assoc {
2096 name: hv_register_name_HV_X64_REGISTER_RIP,
2097 value: hv_register_value {
2098 reg64: io_message.header.rip + 1,
2099 },
2100 ..Default::default()
2101 }])
2102 .unwrap();
2103 } else {
2104 assert!(io_message.rax == b'\0' as u64);
2105 assert!(io_message.port_number == 0x3f8);
2106 unsafe {
2107 assert!(io_message.access_info.__bindgen_anon_1.string_op() == 0);
2108 assert!(io_message.access_info.__bindgen_anon_1.access_size() == 1);
2109 }
2110 assert!(
2111 io_message.header.intercept_access_type == 1_u8
2112 );
2113 break;
2114 }
2115 }
2116 _ => {
2117 println!("Message type: 0x{:x?}", {
2118 ret_hv_message.header.message_type
2119 });
2120 panic!("Unexpected Exit Type");
2121 }
2122 };
2123 }
2124 assert!(done);
2125 vm.unmap_user_memory(mem_region).unwrap();
2126 unsafe { libc::munmap(load_addr as *mut c_void, mem_size) };
2127 }
2128
2129 #[cfg(target_arch = "x86_64")]
2130 #[test]
2131 fn test_run_code_mmap() {
2132 use super::*;
2133 use crate::ioctls::system::Mshv;
2134 use crate::set_bits;
2135 use libc::c_void;
2136 use std::io::Write;
2137
2138 let mshv = Mshv::new().unwrap();
2139 let vm = mshv.create_vm().unwrap();
2140 vm.initialize().unwrap();
2141 let vcpu = vm.create_vcpu(0).unwrap();
2142 #[rustfmt::skip]
2144 let code:[u8;11] = [
2145 0xba, 0xf8, 0x03, 0x00, 0xd8, 0x04, b'0', 0xee, 0xb0, b'\0', 0xee, ];
2153
2154 let mem_size = 0x4000;
2155 let load_addr = unsafe {
2156 libc::mmap(
2157 std::ptr::null_mut(),
2158 mem_size,
2159 libc::PROT_READ | libc::PROT_WRITE,
2160 libc::MAP_ANONYMOUS | libc::MAP_SHARED | libc::MAP_NORESERVE,
2161 -1,
2162 0,
2163 )
2164 } as *mut u8;
2165 let mem_region = mshv_user_mem_region {
2166 flags: set_bits!(u8, MSHV_SET_MEM_BIT_WRITABLE, MSHV_SET_MEM_BIT_EXECUTABLE),
2167 guest_pfn: 0x1,
2168 size: 0x1000,
2169 userspace_addr: load_addr as u64,
2170 ..Default::default()
2171 };
2172
2173 let registers_addr = unsafe {
2174 libc::mmap(
2175 std::ptr::null_mut(),
2176 0x1000,
2177 libc::PROT_READ | libc::PROT_WRITE,
2178 libc::MAP_SHARED,
2179 vcpu.as_raw_fd(),
2180 MSHV_VP_MMAP_OFFSET_REGISTERS as i64 * libc::sysconf(libc::_SC_PAGE_SIZE),
2181 )
2182 } as *mut u8;
2183
2184 if registers_addr as *mut c_void == libc::MAP_FAILED {
2185 panic!(
2186 "Could not mmap register page, error:{}",
2187 std::io::Error::last_os_error()
2188 );
2189 }
2190
2191 let hv_msg_addr = unsafe {
2192 libc::mmap(
2193 std::ptr::null_mut(),
2194 0x1000,
2195 libc::PROT_READ | libc::PROT_WRITE,
2196 libc::MAP_SHARED,
2197 vcpu.as_raw_fd(),
2198 MSHV_VP_MMAP_OFFSET_INTERCEPT_MESSAGE as i64 * libc::sysconf(libc::_SC_PAGE_SIZE),
2199 )
2200 } as *mut u8;
2201
2202 if hv_msg_addr as *mut c_void == libc::MAP_FAILED {
2203 panic!(
2204 "Could not mmap HV page, error:{}",
2205 std::io::Error::last_os_error()
2206 );
2207 }
2208
2209 vm.map_user_memory(mem_region).unwrap();
2210
2211 let reg_page: *mut hv_vp_register_page = registers_addr as *mut hv_vp_register_page;
2212 let hv_msg_page: *mut hv_message = hv_msg_addr as *mut hv_message;
2213 let mut done = false;
2214
2215 unsafe {
2216 let mut slice = slice::from_raw_parts_mut(load_addr, mem_size);
2219 slice.write_all(&code).unwrap();
2220 }
2221
2222 let mut cs_reg = hv_register_assoc {
2224 name: hv_register_name_HV_X64_REGISTER_CS,
2225 ..Default::default()
2226 };
2227 vcpu.get_reg(slice::from_mut(&mut cs_reg)).unwrap();
2228
2229 unsafe {
2230 assert_ne!({ cs_reg.value.segment.base }, 0);
2231 assert_ne!({ cs_reg.value.segment.selector }, 0);
2232 };
2233
2234 cs_reg.value.segment.base = 0;
2235 cs_reg.value.segment.selector = 0;
2236
2237 vcpu.set_reg(&[
2238 cs_reg,
2239 hv_register_assoc {
2240 name: hv_register_name_HV_X64_REGISTER_RAX,
2241 value: hv_register_value { reg64: 2 },
2242 ..Default::default()
2243 },
2244 hv_register_assoc {
2245 name: hv_register_name_HV_X64_REGISTER_RBX,
2246 value: hv_register_value { reg64: 2 },
2247 ..Default::default()
2248 },
2249 hv_register_assoc {
2250 name: hv_register_name_HV_X64_REGISTER_RIP,
2251 value: hv_register_value { reg64: 0x1000 },
2252 ..Default::default()
2253 },
2254 hv_register_assoc {
2255 name: hv_register_name_HV_X64_REGISTER_RFLAGS,
2256 value: hv_register_value { reg64: 0x2 },
2257 ..Default::default()
2258 },
2259 ])
2260 .unwrap();
2261
2262 unsafe {
2263 assert!((*reg_page).version == HV_VP_REGISTER_PAGE_VERSION_1 as u16);
2264 assert!((*reg_page).isvalid == 1);
2265 assert!((*reg_page).dirty == 0);
2266 }
2267
2268 loop {
2269 vcpu.run().unwrap();
2270 let msg_header = unsafe { (*hv_msg_page).header };
2271 match msg_header.message_type {
2272 hv_message_type_HVMSG_X64_HALT => {
2273 println!("VM Halted!");
2274 break;
2275 }
2276 hv_message_type_HVMSG_X64_IO_PORT_INTERCEPT => {
2277 let io_message = unsafe { (*hv_msg_page).to_ioport_info().unwrap() };
2278 assert!(io_message.port_number == 0x3f8);
2279 unsafe {
2280 assert!(io_message.access_info.__bindgen_anon_1.string_op() == 0);
2281 assert!(io_message.access_info.__bindgen_anon_1.access_size() == 1);
2282 }
2283 if !done {
2284 assert!(io_message.rax == b'4' as u64);
2285 assert!(io_message.port_number == 0x3f8);
2286 unsafe {
2287 assert!(io_message.access_info.__bindgen_anon_1.string_op() == 0);
2288 assert!(io_message.access_info.__bindgen_anon_1.access_size() == 1);
2289 }
2290 assert!(
2291 io_message.header.intercept_access_type == 1_u8
2292 );
2293 done = true;
2294 unsafe {
2296 (*reg_page).__bindgen_anon_1.__bindgen_anon_1.rip =
2297 io_message.header.rip + 1;
2298 }
2299 unsafe {
2300 (*reg_page).dirty = 1 << HV_X64_REGISTER_CLASS_IP;
2301 }
2302 } else {
2303 assert!(io_message.rax == b'\0' as u64);
2304 assert!(
2305 io_message.header.intercept_access_type == 1_u8
2306 );
2307 break;
2308 }
2309 }
2310 _ => {
2311 println!("Message type: 0x{:x?}", { msg_header.message_type });
2312 panic!("Unexpected Exit Type");
2313 }
2314 };
2315 }
2316 assert!(done);
2317
2318 vm.unmap_user_memory(mem_region).unwrap();
2319 unsafe { libc::munmap(load_addr as *mut c_void, mem_size) };
2320 unsafe { libc::munmap(registers_addr as *mut c_void, 0x1000) };
2321 unsafe { libc::munmap(hv_msg_addr as *mut c_void, 0x1000) };
2322 }
2323
2324 #[cfg(target_arch = "x86_64")]
2325 #[test]
2326 fn test_set_get_msrs() {
2327 let hv = Mshv::new().unwrap();
2328 let vm = hv.create_vm().unwrap();
2329 vm.initialize().unwrap();
2330 let vcpu = vm.create_vcpu(0).unwrap();
2331
2332 let s_regs = Msrs::from_entries(&[
2333 msr_entry {
2334 index: IA32_MSR_SYSENTER_CS,
2335 data: 0x1,
2336 ..Default::default()
2337 },
2338 msr_entry {
2339 index: IA32_MSR_SYSENTER_ESP,
2340 data: 0x2,
2341 ..Default::default()
2342 },
2343 ])
2344 .unwrap();
2345 let mut g_regs = Msrs::from_entries(&[
2346 msr_entry {
2347 index: IA32_MSR_SYSENTER_CS,
2348 ..Default::default()
2349 },
2350 msr_entry {
2351 index: IA32_MSR_SYSENTER_ESP,
2352 ..Default::default()
2353 },
2354 ])
2355 .unwrap();
2356 vcpu.set_msrs(&s_regs).unwrap();
2357 vcpu.get_msrs(&mut g_regs).unwrap();
2358 assert!(g_regs.as_fam_struct_ref().nmsrs == s_regs.as_fam_struct_ref().nmsrs);
2359 assert!(g_regs.as_slice()[0].data == s_regs.as_slice()[0].data);
2360 assert!(g_regs.as_slice()[1].data == s_regs.as_slice()[1].data);
2361 }
2362
2363 #[cfg(target_arch = "x86_64")]
2364 #[test]
2365 fn test_set_get_vcpu_events() {
2366 let hv = Mshv::new().unwrap();
2367 let vm = hv.create_vm().unwrap();
2368 vm.initialize().unwrap();
2369 let vcpu = vm.create_vcpu(0).unwrap();
2370
2371 let s_regs = vcpu.get_vcpu_events().unwrap();
2372 vcpu.set_vcpu_events(&s_regs).unwrap();
2373 let g_regs = vcpu.get_vcpu_events().unwrap();
2374 assert!(g_regs.pending_interruption == s_regs.pending_interruption);
2375 assert!(g_regs.interrupt_state == s_regs.interrupt_state);
2376 assert!(g_regs.internal_activity_state == s_regs.internal_activity_state);
2377 for i in 0..16 {
2378 assert!(g_regs.pending_event0[i] == s_regs.pending_event0[i]);
2379 assert!(g_regs.pending_event1[i] == s_regs.pending_event1[i]);
2380 }
2381 }
2382
2383 #[cfg(target_arch = "x86_64")]
2384 #[test]
2385 fn test_set_get_xcrs() {
2386 let hv = Mshv::new().unwrap();
2387 let vm = hv.create_vm().unwrap();
2388 vm.initialize().unwrap();
2389 let vcpu = vm.create_vcpu(0).unwrap();
2390
2391 let s_regs = vcpu.get_xcrs().unwrap();
2392 vcpu.set_xcrs(&s_regs).unwrap();
2393 let g_regs = vcpu.get_xcrs().unwrap();
2394 assert!(g_regs.xcr0 == s_regs.xcr0);
2395 }
2396
2397 #[cfg(target_arch = "x86_64")]
2398 #[test]
2399 fn test_set_get_lapic() {
2400 let hv = Mshv::new().unwrap();
2401 let vm = hv.create_vm().unwrap();
2402 vm.initialize().unwrap();
2403 let vcpu = vm.create_vcpu(0).unwrap();
2404
2405 let state = vcpu.get_lapic().unwrap();
2406 vcpu.set_lapic(&state).unwrap();
2407 let g_state = vcpu.get_lapic().unwrap();
2408 for i in 0..1024 {
2409 assert!(state.regs[i] == g_state.regs[i]);
2410 }
2411 }
2412
2413 #[cfg(target_arch = "x86_64")]
2414 #[test]
2415 fn test_set_registers_64() {
2416 let hv = Mshv::new().unwrap();
2417 let vm = hv.create_vm().unwrap();
2418 vm.initialize().unwrap();
2419 let vcpu = vm.create_vcpu(0).unwrap();
2420 let arr_reg_name_value = [
2421 (hv_register_name_HV_X64_REGISTER_RIP, 0x1000),
2422 (hv_register_name_HV_X64_REGISTER_RFLAGS, 0x2),
2423 ];
2424 set_registers_64!(vcpu, &arr_reg_name_value).unwrap();
2425 let mut get_regs: [hv_register_assoc; 2] = [
2426 hv_register_assoc {
2427 name: hv_register_name_HV_X64_REGISTER_RIP,
2428 ..Default::default()
2429 },
2430 hv_register_assoc {
2431 name: hv_register_name_HV_X64_REGISTER_RFLAGS,
2432 ..Default::default()
2433 },
2434 ];
2435
2436 vcpu.get_reg(&mut get_regs).unwrap();
2437
2438 unsafe {
2439 assert!(get_regs[0].value.reg64 == 0x1000);
2441 assert!(get_regs[1].value.reg64 == 0x2);
2442 }
2443 }
2444
2445 #[cfg(target_arch = "x86_64")]
2446 #[test]
2447 fn test_get_set_xsave() {
2448 let hv = Mshv::new().unwrap();
2449 let vm = hv.create_vm().unwrap();
2450 vm.initialize().unwrap();
2451 let vcpu = vm.create_vcpu(0).unwrap();
2452
2453 let state = vcpu.get_xsave().unwrap();
2454
2455 vcpu.set_xsave(&state).unwrap();
2456 }
2457
2458 #[cfg(target_arch = "x86_64")]
2459 #[test]
2460 fn test_get_suspend_regs() {
2461 let hv = Mshv::new().unwrap();
2462 let vm = hv.create_vm().unwrap();
2463 vm.initialize().unwrap();
2464 let vcpu = vm.create_vcpu(0).unwrap();
2465
2466 let regs = vcpu.get_suspend_regs().unwrap();
2467 assert!(regs.explicit_register == 0x1);
2469 assert!(regs.intercept_register == 0x0);
2470 }
2471
2472 #[cfg(target_arch = "x86_64")]
2473 #[test]
2474 fn test_set_get_misc_regs() {
2475 let hv = Mshv::new().unwrap();
2476 let vm = hv.create_vm().unwrap();
2477 vm.initialize().unwrap();
2478 let vcpu = vm.create_vcpu(0).unwrap();
2479
2480 let s_regs = vcpu.get_misc_regs().unwrap();
2481 vcpu.set_misc_regs(&s_regs).unwrap();
2482 let g_regs = vcpu.get_misc_regs().unwrap();
2483 assert!(g_regs.hypercall == s_regs.hypercall);
2484 }
2485
2486 #[cfg(target_arch = "x86_64")]
2487 #[test]
2488 fn test_get_cpuid_values() {
2489 let hv = Mshv::new().unwrap();
2490 let vm = hv.create_vm().unwrap();
2491 vm.initialize().unwrap();
2492 let vcpu = vm.create_vcpu(0).unwrap();
2493 let res_0 = vcpu.get_cpuid_values(0, 0, 0, 0).unwrap();
2494 let max_function = res_0[0];
2495 assert!(max_function >= 1);
2496 let res_1 = vcpu.hvcall_get_cpuid_values(0, 0, 0, 0).unwrap();
2497 assert!(res_1[0] >= 1);
2498 assert!(res_0[0] == res_1[0]);
2499 }
2500
2501 #[cfg(target_arch = "x86_64")]
2502 #[test]
2503 fn test_get_set_vp_state_components() {
2504 let hv = Mshv::new().unwrap();
2505 let vm = hv.create_vm().unwrap();
2506 vm.initialize().unwrap();
2507 let vcpu = vm.create_vcpu(0).unwrap();
2508 let mut states = vcpu.get_all_vp_state_components().unwrap();
2509 vcpu.set_all_vp_state_components(&mut states).unwrap();
2510 let ret_states = vcpu.get_all_vp_state_components().unwrap();
2511 assert!(states
2512 .buffer
2513 .iter()
2514 .zip(ret_states.buffer)
2515 .all(|(a, b)| *a == b));
2516 }
2517}