mshv_bindings/x86_64/
regs.rs

1// Copyright © 2020, Microsoft Corporation
2//
3// SPDX-License-Identifier: Apache-2.0 OR BSD-3-Clause
4//
5
6use crate::bindings::*;
7use crate::HV_PAGE_SIZE;
8#[cfg(feature = "with-serde")]
9use serde_derive::{Deserialize, Serialize};
10use std::convert::TryFrom;
11use std::fmt;
12use std::ptr;
13use vmm_sys_util::errno;
14use zerocopy::{FromBytes, IntoBytes};
15
16#[repr(C)]
17#[derive(Debug, Default, Copy, Clone, Eq, PartialEq, IntoBytes, FromBytes)]
18#[cfg_attr(feature = "with-serde", derive(Deserialize, Serialize))]
19pub struct StandardRegisters {
20    pub rax: u64,
21    pub rbx: u64,
22    pub rcx: u64,
23    pub rdx: u64,
24    pub rsi: u64,
25    pub rdi: u64,
26    pub rsp: u64,
27    pub rbp: u64,
28    pub r8: u64,
29    pub r9: u64,
30    pub r10: u64,
31    pub r11: u64,
32    pub r12: u64,
33    pub r13: u64,
34    pub r14: u64,
35    pub r15: u64,
36    pub rip: u64,
37    pub rflags: u64,
38}
39
40#[repr(C)]
41#[derive(Debug, Default, Copy, Clone, IntoBytes, FromBytes)]
42#[cfg_attr(feature = "with-serde", derive(Deserialize, Serialize))]
43pub struct SegmentRegister {
44    /* segment register + descriptor */
45    pub base: u64,
46    pub limit: u32,
47    pub selector: u16,
48    pub type_: u8,   /* type, writeable etc: 4 */
49    pub present: u8, /* if not present, exception generated: 1 */
50    pub dpl: u8,     /* descriptor privilege level (ring): 2 */
51    pub db: u8,      /* default/big (16 or 32 bit size offset): 1 */
52    pub s: u8,       /* non-system segment */
53    pub l: u8,       /* long (64 bit): 1 */
54    pub g: u8,       /* granularity (bytes or 4096 byte pages): 1 */
55    pub avl: u8,     /* available (free bit for software to use): 1 */
56    pub unusable: __u8,
57    pub padding: __u8,
58}
59
60impl From<hv_x64_segment_register> for SegmentRegister {
61    fn from(hv_reg: hv_x64_segment_register) -> Self {
62        let mut reg = SegmentRegister {
63            base: hv_reg.base,
64            limit: hv_reg.limit,
65            selector: hv_reg.selector,
66            unusable: 0_u8,
67            padding: 0_u8,
68            ..Default::default()
69        };
70
71        // SAFETY: Getting a bunch of bitfields. Functions and unions are generated by bindgen
72        // so we have to use unsafe here. We trust bindgen to generate the correct accessors.
73        unsafe {
74            reg.type_ = hv_reg.__bindgen_anon_1.__bindgen_anon_1.segment_type() as u8;
75            reg.present = hv_reg.__bindgen_anon_1.__bindgen_anon_1.present() as u8;
76            reg.dpl = hv_reg
77                .__bindgen_anon_1
78                .__bindgen_anon_1
79                .descriptor_privilege_level() as u8;
80            reg.db = hv_reg.__bindgen_anon_1.__bindgen_anon_1._default() as u8;
81            reg.s = hv_reg
82                .__bindgen_anon_1
83                .__bindgen_anon_1
84                .non_system_segment() as u8;
85            reg.l = hv_reg.__bindgen_anon_1.__bindgen_anon_1._long() as u8;
86            reg.g = hv_reg.__bindgen_anon_1.__bindgen_anon_1.granularity() as u8;
87            reg.avl = hv_reg.__bindgen_anon_1.__bindgen_anon_1.available() as u8;
88        }
89
90        reg
91    }
92}
93impl From<SegmentRegister> for hv_x64_segment_register {
94    fn from(reg: SegmentRegister) -> Self {
95        let mut hv_reg = hv_x64_segment_register {
96            base: reg.base,
97            limit: reg.limit,
98            selector: reg.selector,
99            ..Default::default()
100        };
101
102        // SAFETY: Setting a bunch of bitfields. Functions and unions are generated by bindgen
103        // so we have to use unsafe here. We trust bindgen to generate the correct accessors.
104        unsafe {
105            hv_reg
106                .__bindgen_anon_1
107                .__bindgen_anon_1
108                .set_segment_type(reg.type_ as u16);
109            hv_reg
110                .__bindgen_anon_1
111                .__bindgen_anon_1
112                .set_present(reg.present as u16);
113            hv_reg
114                .__bindgen_anon_1
115                .__bindgen_anon_1
116                .set_descriptor_privilege_level(reg.dpl as u16);
117            hv_reg
118                .__bindgen_anon_1
119                .__bindgen_anon_1
120                .set__default(reg.db as u16);
121            hv_reg
122                .__bindgen_anon_1
123                .__bindgen_anon_1
124                .set_non_system_segment(reg.s as u16);
125            hv_reg
126                .__bindgen_anon_1
127                .__bindgen_anon_1
128                .set__long(reg.l as u16);
129            hv_reg
130                .__bindgen_anon_1
131                .__bindgen_anon_1
132                .set_granularity(reg.g as u16);
133            hv_reg
134                .__bindgen_anon_1
135                .__bindgen_anon_1
136                .set_available(reg.avl as u16);
137        }
138
139        hv_reg
140    }
141}
142
143#[repr(C)]
144#[derive(Debug, Default, Copy, Clone, FromBytes)]
145#[cfg_attr(feature = "with-serde", derive(Deserialize, Serialize))]
146pub struct TableRegister {
147    pub base: u64,
148    pub limit: u16,
149}
150
151impl From<hv_x64_table_register> for TableRegister {
152    fn from(reg: hv_x64_table_register) -> Self {
153        TableRegister {
154            base: reg.base,
155            limit: reg.limit,
156        }
157    }
158}
159
160impl From<TableRegister> for hv_x64_table_register {
161    fn from(reg: TableRegister) -> Self {
162        hv_x64_table_register {
163            limit: reg.limit,
164            base: reg.base,
165            pad: [0; 3],
166        }
167    }
168}
169
170#[repr(C)]
171#[derive(Debug, Default, Copy, Clone, FromBytes)]
172#[cfg_attr(feature = "with-serde", derive(Deserialize, Serialize))]
173pub struct SpecialRegisters {
174    pub cs: SegmentRegister,
175    pub ds: SegmentRegister,
176    pub es: SegmentRegister,
177    pub fs: SegmentRegister,
178    pub gs: SegmentRegister,
179    pub ss: SegmentRegister,
180    pub tr: SegmentRegister,
181    pub ldt: SegmentRegister,
182    pub gdt: TableRegister,
183    pub idt: TableRegister,
184    pub cr0: u64,
185    pub cr2: u64,
186    pub cr3: u64,
187    pub cr4: u64,
188    pub cr8: u64,
189    pub efer: u64,
190    pub apic_base: u64,
191    pub interrupt_bitmap: [u64; 4usize],
192}
193
194#[repr(C)]
195#[derive(Debug, Default, Copy, Clone, Eq, PartialEq, IntoBytes, FromBytes)]
196#[cfg_attr(feature = "with-serde", derive(Deserialize, Serialize))]
197pub struct DebugRegisters {
198    pub dr0: u64,
199    pub dr1: u64,
200    pub dr2: u64,
201    pub dr3: u64,
202    pub dr6: u64,
203    pub dr7: u64,
204}
205
206#[repr(C)]
207#[derive(Debug, Default, Copy, Clone, Eq, PartialEq, IntoBytes, FromBytes)]
208#[cfg_attr(feature = "with-serde", derive(Deserialize, Serialize))]
209pub struct FloatingPointUnit {
210    pub fpr: [[u8; 16usize]; 8usize],
211    pub fcw: u16,
212    pub fsw: u16,
213    pub ftwx: u8,
214    pub pad1: u8,
215    pub last_opcode: u16,
216    pub last_ip: u64,
217    pub last_dp: u64,
218    pub xmm: [[u8; 16usize]; 16usize],
219    pub mxcsr: u32,
220    pub pad2: u32,
221}
222
223pub const IA32_MSR_TSC: u32 = 0x00000010;
224pub const IA32_MSR_EFER: u32 = 0xC0000080;
225pub const IA32_MSR_KERNEL_GS_BASE: u32 = 0xC0000102;
226pub const IA32_MSR_APIC_BASE: u32 = 0x0000001B;
227pub const IA32_MSR_PAT: u32 = 0x0277;
228pub const IA32_MSR_SYSENTER_CS: u32 = 0x00000174;
229pub const IA32_MSR_SYSENTER_ESP: u32 = 0x00000175;
230pub const IA32_MSR_SYSENTER_EIP: u32 = 0x00000176;
231pub const IA32_MSR_STAR: u32 = 0xC0000081;
232pub const IA32_MSR_LSTAR: u32 = 0xC0000082;
233pub const IA32_MSR_CSTAR: u32 = 0xC0000083;
234pub const IA32_MSR_SFMASK: u32 = 0xC0000084;
235
236pub const IA32_MSR_MTRR_CAP: u32 = 0x00FE;
237pub const IA32_MSR_MTRR_DEF_TYPE: u32 = 0x02FF;
238pub const IA32_MSR_MTRR_PHYSBASE0: u32 = 0x0200;
239pub const IA32_MSR_MTRR_PHYSMASK0: u32 = 0x0201;
240pub const IA32_MSR_MTRR_PHYSBASE1: u32 = 0x0202;
241pub const IA32_MSR_MTRR_PHYSMASK1: u32 = 0x0203;
242pub const IA32_MSR_MTRR_PHYSBASE2: u32 = 0x0204;
243pub const IA32_MSR_MTRR_PHYSMASK2: u32 = 0x0205;
244pub const IA32_MSR_MTRR_PHYSBASE3: u32 = 0x0206;
245pub const IA32_MSR_MTRR_PHYSMASK3: u32 = 0x0207;
246pub const IA32_MSR_MTRR_PHYSBASE4: u32 = 0x0208;
247pub const IA32_MSR_MTRR_PHYSMASK4: u32 = 0x0209;
248pub const IA32_MSR_MTRR_PHYSBASE5: u32 = 0x020A;
249pub const IA32_MSR_MTRR_PHYSMASK5: u32 = 0x020B;
250pub const IA32_MSR_MTRR_PHYSBASE6: u32 = 0x020C;
251pub const IA32_MSR_MTRR_PHYSMASK6: u32 = 0x020D;
252pub const IA32_MSR_MTRR_PHYSBASE7: u32 = 0x020E;
253pub const IA32_MSR_MTRR_PHYSMASK7: u32 = 0x020F;
254
255pub const IA32_MSR_MTRR_FIX64K_00000: u32 = 0x0250;
256pub const IA32_MSR_MTRR_FIX16K_80000: u32 = 0x0258;
257pub const IA32_MSR_MTRR_FIX16K_A0000: u32 = 0x0259;
258pub const IA32_MSR_MTRR_FIX4K_C0000: u32 = 0x0268;
259pub const IA32_MSR_MTRR_FIX4K_C8000: u32 = 0x0269;
260pub const IA32_MSR_MTRR_FIX4K_D0000: u32 = 0x026A;
261pub const IA32_MSR_MTRR_FIX4K_D8000: u32 = 0x026B;
262pub const IA32_MSR_MTRR_FIX4K_E0000: u32 = 0x026C;
263pub const IA32_MSR_MTRR_FIX4K_E8000: u32 = 0x026D;
264pub const IA32_MSR_MTRR_FIX4K_F0000: u32 = 0x026E;
265pub const IA32_MSR_MTRR_FIX4K_F8000: u32 = 0x026F;
266
267pub const IA32_MSR_TSC_AUX: u32 = 0xC0000103;
268pub const IA32_MSR_BNDCFGS: u32 = 0x00000d90;
269pub const IA32_MSR_DEBUG_CTL: u32 = 0x1D9;
270pub const IA32_MSR_SPEC_CTRL: u32 = 0x00000048;
271pub const IA32_MSR_TSC_ADJUST: u32 = 0x0000003b;
272
273pub const IA32_MSR_MISC_ENABLE: u32 = 0x000001a0;
274pub const MSR_IA32_SSP: u32 = 0x000007a0;
275pub const MSR_IA32_U_CET: u32 = 0x000006a0; /* user mode cet */
276pub const MSR_IA32_S_CET: u32 = 0x000006a2; /* kernel mode cet */
277pub const MSR_IA32_PL0_SSP: u32 = 0x000006a4; /* ring-0 shadow stack pointer */
278pub const MSR_IA32_PL1_SSP: u32 = 0x000006a5; /* ring-1 shadow stack pointer */
279pub const MSR_IA32_PL2_SSP: u32 = 0x000006a6; /* ring-2 shadow stack pointer */
280pub const MSR_IA32_PL3_SSP: u32 = 0x000006a7; /* ring-3 shadow stack pointer */
281pub const MSR_IA32_INTERRUPT_SSP_TABLE_ADDR: u32 = 0x000006A8;
282pub const MSR_IA32_REGISTER_U_XSS: u32 = 0x8008B;
283
284pub fn msr_to_hv_reg_name(msr: u32) -> Result<::std::os::raw::c_uint, &'static str> {
285    match msr {
286        IA32_MSR_TSC => Ok(hv_register_name_HV_X64_REGISTER_TSC),
287
288        IA32_MSR_EFER => Ok(hv_register_name_HV_X64_REGISTER_EFER),
289        IA32_MSR_KERNEL_GS_BASE => Ok(hv_register_name_HV_X64_REGISTER_KERNEL_GS_BASE),
290        IA32_MSR_APIC_BASE => Ok(hv_register_name_HV_X64_REGISTER_APIC_BASE),
291        IA32_MSR_PAT => Ok(hv_register_name_HV_X64_REGISTER_PAT),
292        IA32_MSR_SYSENTER_CS => Ok(hv_register_name_HV_X64_REGISTER_SYSENTER_CS),
293        IA32_MSR_SYSENTER_ESP => Ok(hv_register_name_HV_X64_REGISTER_SYSENTER_ESP),
294        IA32_MSR_SYSENTER_EIP => Ok(hv_register_name_HV_X64_REGISTER_SYSENTER_EIP),
295        IA32_MSR_STAR => Ok(hv_register_name_HV_X64_REGISTER_STAR),
296        IA32_MSR_LSTAR => Ok(hv_register_name_HV_X64_REGISTER_LSTAR),
297        IA32_MSR_CSTAR => Ok(hv_register_name_HV_X64_REGISTER_CSTAR),
298        IA32_MSR_SFMASK => Ok(hv_register_name_HV_X64_REGISTER_SFMASK),
299
300        IA32_MSR_MTRR_CAP => Ok(hv_register_name_HV_X64_REGISTER_MSR_MTRR_CAP),
301        IA32_MSR_MTRR_DEF_TYPE => Ok(hv_register_name_HV_X64_REGISTER_MSR_MTRR_DEF_TYPE),
302        IA32_MSR_MTRR_PHYSBASE0 => Ok(hv_register_name_HV_X64_REGISTER_MSR_MTRR_PHYS_BASE0),
303        IA32_MSR_MTRR_PHYSMASK0 => Ok(hv_register_name_HV_X64_REGISTER_MSR_MTRR_PHYS_MASK0),
304        IA32_MSR_MTRR_PHYSBASE1 => Ok(hv_register_name_HV_X64_REGISTER_MSR_MTRR_PHYS_BASE1),
305        IA32_MSR_MTRR_PHYSMASK1 => Ok(hv_register_name_HV_X64_REGISTER_MSR_MTRR_PHYS_MASK1),
306        IA32_MSR_MTRR_PHYSBASE2 => Ok(hv_register_name_HV_X64_REGISTER_MSR_MTRR_PHYS_BASE2),
307        IA32_MSR_MTRR_PHYSMASK2 => Ok(hv_register_name_HV_X64_REGISTER_MSR_MTRR_PHYS_MASK2),
308        IA32_MSR_MTRR_PHYSBASE3 => Ok(hv_register_name_HV_X64_REGISTER_MSR_MTRR_PHYS_BASE3),
309        IA32_MSR_MTRR_PHYSMASK3 => Ok(hv_register_name_HV_X64_REGISTER_MSR_MTRR_PHYS_MASK3),
310        IA32_MSR_MTRR_PHYSBASE4 => Ok(hv_register_name_HV_X64_REGISTER_MSR_MTRR_PHYS_BASE4),
311        IA32_MSR_MTRR_PHYSMASK4 => Ok(hv_register_name_HV_X64_REGISTER_MSR_MTRR_PHYS_MASK4),
312        IA32_MSR_MTRR_PHYSBASE5 => Ok(hv_register_name_HV_X64_REGISTER_MSR_MTRR_PHYS_BASE5),
313        IA32_MSR_MTRR_PHYSMASK5 => Ok(hv_register_name_HV_X64_REGISTER_MSR_MTRR_PHYS_MASK5),
314        IA32_MSR_MTRR_PHYSBASE6 => Ok(hv_register_name_HV_X64_REGISTER_MSR_MTRR_PHYS_BASE6),
315        IA32_MSR_MTRR_PHYSMASK6 => Ok(hv_register_name_HV_X64_REGISTER_MSR_MTRR_PHYS_MASK6),
316        IA32_MSR_MTRR_PHYSBASE7 => Ok(hv_register_name_HV_X64_REGISTER_MSR_MTRR_PHYS_BASE7),
317        IA32_MSR_MTRR_PHYSMASK7 => Ok(hv_register_name_HV_X64_REGISTER_MSR_MTRR_PHYS_MASK7),
318
319        IA32_MSR_MTRR_FIX64K_00000 => Ok(hv_register_name_HV_X64_REGISTER_MSR_MTRR_FIX64K00000),
320        IA32_MSR_MTRR_FIX16K_80000 => Ok(hv_register_name_HV_X64_REGISTER_MSR_MTRR_FIX16K80000),
321        IA32_MSR_MTRR_FIX16K_A0000 => Ok(hv_register_name_HV_X64_REGISTER_MSR_MTRR_FIX16KA0000),
322        IA32_MSR_MTRR_FIX4K_C0000 => Ok(hv_register_name_HV_X64_REGISTER_MSR_MTRR_FIX4KC0000),
323        IA32_MSR_MTRR_FIX4K_C8000 => Ok(hv_register_name_HV_X64_REGISTER_MSR_MTRR_FIX4KC8000),
324        IA32_MSR_MTRR_FIX4K_D0000 => Ok(hv_register_name_HV_X64_REGISTER_MSR_MTRR_FIX4KD0000),
325        IA32_MSR_MTRR_FIX4K_D8000 => Ok(hv_register_name_HV_X64_REGISTER_MSR_MTRR_FIX4KD8000),
326        IA32_MSR_MTRR_FIX4K_E0000 => Ok(hv_register_name_HV_X64_REGISTER_MSR_MTRR_FIX4KE0000),
327        IA32_MSR_MTRR_FIX4K_E8000 => Ok(hv_register_name_HV_X64_REGISTER_MSR_MTRR_FIX4KE8000),
328        IA32_MSR_MTRR_FIX4K_F0000 => Ok(hv_register_name_HV_X64_REGISTER_MSR_MTRR_FIX4KF0000),
329        IA32_MSR_MTRR_FIX4K_F8000 => Ok(hv_register_name_HV_X64_REGISTER_MSR_MTRR_FIX4KF8000),
330
331        IA32_MSR_TSC_AUX => Ok(hv_register_name_HV_X64_REGISTER_TSC_AUX),
332        IA32_MSR_BNDCFGS => Ok(hv_register_name_HV_X64_REGISTER_BNDCFGS),
333        IA32_MSR_DEBUG_CTL => Ok(hv_register_name_HV_X64_REGISTER_DEBUG_CTL),
334        IA32_MSR_TSC_ADJUST => Ok(hv_register_name_HV_X64_REGISTER_TSC_ADJUST),
335        IA32_MSR_SPEC_CTRL => Ok(hv_register_name_HV_X64_REGISTER_SPEC_CTRL),
336        HV_X64_MSR_GUEST_OS_ID => Ok(hv_register_name_HV_REGISTER_GUEST_OS_ID),
337        HV_X64_MSR_SINT0 => Ok(hv_register_name_HV_REGISTER_SINT0),
338        HV_X64_MSR_SINT1 => Ok(hv_register_name_HV_REGISTER_SINT1),
339        HV_X64_MSR_SINT2 => Ok(hv_register_name_HV_REGISTER_SINT2),
340        HV_X64_MSR_SINT3 => Ok(hv_register_name_HV_REGISTER_SINT3),
341        HV_X64_MSR_SINT4 => Ok(hv_register_name_HV_REGISTER_SINT4),
342        HV_X64_MSR_SINT5 => Ok(hv_register_name_HV_REGISTER_SINT5),
343        HV_X64_MSR_SINT6 => Ok(hv_register_name_HV_REGISTER_SINT6),
344        HV_X64_MSR_SINT7 => Ok(hv_register_name_HV_REGISTER_SINT7),
345        HV_X64_MSR_SINT8 => Ok(hv_register_name_HV_REGISTER_SINT8),
346        HV_X64_MSR_SINT9 => Ok(hv_register_name_HV_REGISTER_SINT9),
347        HV_X64_MSR_SINT10 => Ok(hv_register_name_HV_REGISTER_SINT10),
348        HV_X64_MSR_SINT11 => Ok(hv_register_name_HV_REGISTER_SINT11),
349        HV_X64_MSR_SINT12 => Ok(hv_register_name_HV_REGISTER_SINT12),
350        HV_X64_MSR_SINT13 => Ok(hv_register_name_HV_REGISTER_SINT13),
351        HV_X64_MSR_SINT14 => Ok(hv_register_name_HV_REGISTER_SINT14),
352        HV_X64_MSR_SINT15 => Ok(hv_register_name_HV_REGISTER_SINT15),
353        IA32_MSR_MISC_ENABLE => Ok(hv_register_name_HV_X64_REGISTER_MSR_IA32_MISC_ENABLE),
354        HV_X64_MSR_SCONTROL => Ok(hv_register_name_HV_REGISTER_SCONTROL),
355        HV_X64_MSR_SIEFP => Ok(hv_register_name_HV_REGISTER_SIEFP),
356        HV_X64_MSR_SIMP => Ok(hv_register_name_HV_REGISTER_SIMP),
357        HV_X64_MSR_REFERENCE_TSC => Ok(hv_register_name_HV_REGISTER_REFERENCE_TSC),
358        HV_X64_MSR_EOM => Ok(hv_register_name_HV_REGISTER_EOM),
359        MSR_IA32_REGISTER_U_XSS => Ok(hv_register_name_HV_X64_REGISTER_U_XSS),
360        MSR_IA32_U_CET => Ok(hv_register_name_HV_X64_REGISTER_U_CET),
361        MSR_IA32_S_CET => Ok(hv_register_name_HV_X64_REGISTER_S_CET),
362        MSR_IA32_SSP => Ok(hv_register_name_HV_X64_REGISTER_SSP),
363        MSR_IA32_PL0_SSP => Ok(hv_register_name_HV_X64_REGISTER_PL0_SSP),
364        MSR_IA32_PL1_SSP => Ok(hv_register_name_HV_X64_REGISTER_PL1_SSP),
365        MSR_IA32_PL2_SSP => Ok(hv_register_name_HV_X64_REGISTER_PL2_SSP),
366        MSR_IA32_PL3_SSP => Ok(hv_register_name_HV_X64_REGISTER_PL3_SSP),
367        MSR_IA32_INTERRUPT_SSP_TABLE_ADDR => {
368            Ok(hv_register_name_HV_X64_REGISTER_INTERRUPT_SSP_TABLE_ADDR)
369        }
370        _ => Err("Not a supported hv_register_name msr"),
371    }
372}
373
374#[repr(C)]
375#[derive(Debug, Default, Copy, Clone, Eq, PartialEq, IntoBytes, FromBytes)]
376#[cfg_attr(feature = "with-serde", derive(Deserialize, Serialize))]
377pub struct msr_entry {
378    pub index: u32,
379    pub reserved: u32,
380    pub data: u64,
381}
382
383#[repr(C)]
384#[derive(Debug, Default)]
385#[cfg_attr(feature = "with-serde", derive(Deserialize, Serialize))]
386pub struct msrs {
387    pub nmsrs: u32,
388    #[cfg_attr(feature = "with-serde", serde(skip))]
389    pub pad: u32,
390    #[cfg_attr(feature = "with-serde", serde(skip))]
391    pub entries: __IncompleteArrayField<msr_entry>,
392}
393
394#[repr(C)]
395#[derive(Debug, Default)]
396pub struct msr_list {
397    pub nmsrs: u32,
398    pub indices: __IncompleteArrayField<u32>,
399}
400
401#[repr(C)]
402#[derive(Debug, Default, Copy, Clone, Eq, PartialEq, IntoBytes, FromBytes)]
403#[cfg_attr(feature = "with-serde", derive(Deserialize, Serialize))]
404pub struct VcpuEvents {
405    pub pending_interruption: u64,
406    pub interrupt_state: u64,
407    pub internal_activity_state: u64,
408    pub pending_event0: [u8; 16usize],
409    pub pending_event1: [u8; 16usize],
410}
411
412#[repr(C)]
413#[derive(Debug, Default, Copy, Clone, Eq, PartialEq, IntoBytes, FromBytes)]
414#[cfg_attr(feature = "with-serde", derive(Deserialize, Serialize))]
415pub struct Xcrs {
416    pub xcr0: u64,
417}
418
419#[repr(C)]
420#[derive(Debug, Default, Copy, Clone, Eq, PartialEq, IntoBytes, FromBytes)]
421pub struct hv_cpuid_entry {
422    pub function: __u32,
423    pub index: __u32,
424    pub flags: __u32,
425    pub eax: __u32,
426    pub ebx: __u32,
427    pub ecx: __u32,
428    pub edx: __u32,
429    pub padding: [__u32; 3usize],
430}
431
432#[repr(C)]
433#[derive(Debug, Default)]
434#[cfg_attr(feature = "with-serde", derive(Deserialize, Serialize))]
435pub struct hv_cpuid {
436    pub nent: __u32,
437    #[cfg_attr(feature = "with-serde", serde(skip))]
438    pub padding: __u32,
439    #[cfg_attr(feature = "with-serde", serde(skip))]
440    pub entries: __IncompleteArrayField<hv_cpuid_entry>,
441}
442
443pub const LOCAL_APIC_OFFSET_APIC_ID: isize = 0x20; // APIC ID Register.
444pub const LOCAL_APIC_OFFSET_VERSION: isize = 0x30; // APIC Version Register.
445pub const LOCAL_APIC_OFFSET_TPR: isize = 0x80; // Task Priority Register
446pub const LOCAL_APIC_OFFSET_APR: isize = 0x90; // Arbitration Priority Register.
447pub const LOCAL_APIC_OFFSET_PPR: isize = 0xA0; // Processor Priority Register.
448pub const LOCAL_APIC_OFFSET_EOI: isize = 0xB0; // End Of Interrupt Register.
449pub const LOCAL_APIC_OFFSET_REMOTE_READ: isize = 0xC0; // Remote Read Register
450pub const LOCAL_APIC_OFFSET_LDR: isize = 0xD0; // Logical Destination Register.
451pub const LOCAL_APIC_OFFSET_DFR: isize = 0xE0; // Destination Format Register.
452pub const LOCAL_APIC_OFFSET_SPURIOUS: isize = 0xF0; // Spurious Interrupt Vector.
453pub const LOCAL_APIC_OFFSET_ISR: isize = 0x100; // In-Service Register.
454pub const LOCAL_APIC_OFFSET_TMR: isize = 0x180; // Trigger Mode Register.
455pub const LOCAL_APIC_OFFSET_IRR: isize = 0x200; // Interrupt Request Register.
456pub const LOCAL_APIC_OFFSET_ERROR: isize = 0x280; // Error Status Register.
457pub const LOCAL_APIC_OFFSET_ICR_LOW: isize = 0x300; // ICR Low.
458pub const LOCAL_APIC_OFFSET_ICR_HIGH: isize = 0x310; // ICR High.
459pub const LOCAL_APIC_OFFSET_TIMER_LVT: isize = 0x320; // LVT Timer Register.
460pub const LOCAL_APIC_OFFSET_THERMAL_LVT: isize = 0x330; // LVT Thermal Register.
461pub const LOCAL_APIC_OFFSET_PERFMON_LVT: isize = 0x340; // LVT Performance Monitor Register.
462pub const LOCAL_APIC_OFFSET_LINT0_LVT: isize = 0x350; // LVT Local Int0; Register.
463pub const LOCAL_APIC_OFFSET_LINT1_LVT: isize = 0x360; // LVT Local Int1 Register.
464pub const LOCAL_APIC_OFFSET_ERROR_LVT: isize = 0x370; // LVT Error Register.
465pub const LOCAL_APIC_OFFSET_INITIAL_COUNT: isize = 0x380; // Initial count Register.
466pub const LOCAL_APIC_OFFSET_CURRENT_COUNT: isize = 0x390; // R/O Current count Register.
467pub const LOCAL_APIC_OFFSET_DIVIDER: isize = 0x3e0; // Divide configuration Register.
468pub const LOCAL_X2APIC_OFFSET_SELF_IPI: isize = 0x3f0; // Self IPI register, only present in x2APIC.
469
470pub struct Buffer {
471    pub layout: std::alloc::Layout,
472    pub buf: *mut u8,
473}
474
475impl Buffer {
476    pub fn new(size: usize, align: usize) -> Result<Buffer, errno::Error> {
477        let layout = std::alloc::Layout::from_size_align(size, align).unwrap();
478        // SAFETY: layout is valid
479        let buf = unsafe { std::alloc::alloc(layout) };
480        if buf.is_null() {
481            return Err(errno::Error::new(libc::ENOMEM));
482        }
483
484        let buf = Buffer { layout, buf };
485
486        Ok(buf)
487    }
488
489    pub fn dealloc(&mut self) {
490        // SAFETY: buf was allocated with layout
491        unsafe {
492            std::alloc::dealloc(self.buf, self.layout);
493        }
494    }
495
496    pub fn size(&self) -> usize {
497        self.layout.size()
498    }
499
500    pub fn zero_out_buf(&mut self) {
501        // SAFETY: We write zeros to a valid pointer and the size is valid and allocated from a valid layout.
502        unsafe {
503            ::std::ptr::write_bytes(self.buf, 0u8, self.size());
504        }
505    }
506}
507
508impl Drop for Buffer {
509    fn drop(&mut self) {
510        self.dealloc();
511    }
512}
513
514#[repr(C)]
515#[derive(Copy, Clone, Debug, IntoBytes, FromBytes)]
516/// Fixed buffer for lapic state
517pub struct LapicState {
518    pub regs: [::std::os::raw::c_char; 1024usize],
519}
520
521impl Default for LapicState {
522    fn default() -> Self {
523        unsafe { ::std::mem::zeroed() }
524    }
525}
526
527#[repr(C)]
528#[derive(Copy, Clone, Debug, IntoBytes, FromBytes)]
529/// Fixed buffer for xsave state
530pub struct XSave {
531    pub buffer: [u8; 4096usize],
532}
533
534impl Default for XSave {
535    fn default() -> Self {
536        unsafe { ::std::mem::zeroed() }
537    }
538}
539
540impl TryFrom<Buffer> for XSave {
541    type Error = errno::Error;
542    fn try_from(buf: Buffer) -> Result<Self, Self::Error> {
543        let mut ret = XSave {
544            ..Default::default()
545        };
546        let ret_size = std::mem::size_of_val(&ret.buffer);
547        if ret_size < buf.size() {
548            return Err(errno::Error::new(libc::EINVAL));
549        }
550        // SAFETY: ret is large enough to hold buffer
551        unsafe { ptr::copy(buf.buf, ret.buffer.as_mut_ptr(), buf.size()) };
552        Ok(ret)
553    }
554}
555
556impl TryFrom<&XSave> for Buffer {
557    type Error = errno::Error;
558    fn try_from(reg: &XSave) -> Result<Self, Self::Error> {
559        let reg_size = std::mem::size_of_val(&reg.buffer);
560        let num_pages = (reg_size + HV_PAGE_SIZE - 1) >> HV_HYP_PAGE_SHIFT;
561        let buffer = Buffer::new(num_pages * HV_PAGE_SIZE, HV_PAGE_SIZE)?;
562        // SAFETY: buffer is large enough to hold reg
563        unsafe { ptr::copy(reg.buffer.as_ptr(), buffer.buf, reg_size) };
564        Ok(buffer)
565    }
566}
567
568impl TryFrom<Buffer> for LapicState {
569    type Error = errno::Error;
570    fn try_from(buf: Buffer) -> Result<Self, Self::Error> {
571        let mut ret: LapicState = LapicState::default();
572        let state = ret.regs.as_mut_ptr();
573        if buf.size() < std::mem::size_of::<hv_local_interrupt_controller_state>() {
574            return Err(errno::Error::new(libc::EINVAL));
575        }
576        // SAFETY: buf is large enough for hv_local_interrupt_controller_state
577        unsafe {
578            let hv_state = &*(buf.buf as *const hv_local_interrupt_controller_state);
579            *(state.offset(LOCAL_APIC_OFFSET_APIC_ID) as *mut u32) = hv_state.apic_id;
580            *(state.offset(LOCAL_APIC_OFFSET_VERSION) as *mut u32) = hv_state.apic_version;
581            *(state.offset(LOCAL_APIC_OFFSET_REMOTE_READ) as *mut u32) = hv_state.apic_remote_read;
582            *(state.offset(LOCAL_APIC_OFFSET_LDR) as *mut u32) = hv_state.apic_ldr;
583            *(state.offset(LOCAL_APIC_OFFSET_DFR) as *mut u32) = hv_state.apic_dfr;
584            *(state.offset(LOCAL_APIC_OFFSET_SPURIOUS) as *mut u32) = hv_state.apic_spurious;
585            *(state.offset(LOCAL_APIC_OFFSET_ERROR) as *mut u32) = hv_state.apic_esr;
586            *(state.offset(LOCAL_APIC_OFFSET_ICR_LOW) as *mut u32) = hv_state.apic_icr_low;
587            *(state.offset(LOCAL_APIC_OFFSET_ICR_HIGH) as *mut u32) = hv_state.apic_icr_high;
588            *(state.offset(LOCAL_APIC_OFFSET_TIMER_LVT) as *mut u32) = hv_state.apic_lvt_timer;
589            *(state.offset(LOCAL_APIC_OFFSET_THERMAL_LVT) as *mut u32) = hv_state.apic_lvt_thermal;
590            *(state.offset(LOCAL_APIC_OFFSET_PERFMON_LVT) as *mut u32) = hv_state.apic_lvt_perfmon;
591            *(state.offset(LOCAL_APIC_OFFSET_LINT0_LVT) as *mut u32) = hv_state.apic_lvt_lint0;
592            *(state.offset(LOCAL_APIC_OFFSET_LINT1_LVT) as *mut u32) = hv_state.apic_lvt_lint1;
593            *(state.offset(LOCAL_APIC_OFFSET_ERROR_LVT) as *mut u32) = hv_state.apic_lvt_error;
594            *(state.offset(LOCAL_APIC_OFFSET_INITIAL_COUNT) as *mut u32) =
595                hv_state.apic_initial_count;
596            *(state.offset(LOCAL_APIC_OFFSET_CURRENT_COUNT) as *mut u32) =
597                hv_state.apic_counter_value;
598            *(state.offset(LOCAL_APIC_OFFSET_DIVIDER) as *mut u32) =
599                hv_state.apic_divide_configuration;
600
601            /* vectors ISR TMR IRR */
602            for i in 0..8 {
603                *(state.offset(LOCAL_APIC_OFFSET_ISR + i * 16) as *mut u32) =
604                    hv_state.apic_isr[i as usize];
605                *(state.offset(LOCAL_APIC_OFFSET_TMR + i * 16) as *mut u32) =
606                    hv_state.apic_tmr[i as usize];
607                *(state.offset(LOCAL_APIC_OFFSET_IRR + i * 16) as *mut u32) =
608                    hv_state.apic_irr[i as usize];
609            }
610
611            // Highest priority interrupt (isr = in service register) this is how WHP computes it
612            let mut isrv: u32 = 0;
613            for i in (0..8).rev() {
614                let val: u32 = hv_state.apic_isr[i as usize];
615                if val != 0 {
616                    isrv = 31 - val.leading_zeros(); // index of most significant set bit
617                    isrv += i * 4 * 8; // i don't know
618                    break;
619                }
620            }
621
622            // TODO This is meant to be max(tpr, isrv), but tpr is not populated!
623            *(state.offset(LOCAL_APIC_OFFSET_PPR) as *mut u32) = isrv;
624        }
625        Ok(ret)
626    }
627}
628
629impl TryFrom<&LapicState> for Buffer {
630    type Error = errno::Error;
631    fn try_from(reg: &LapicState) -> Result<Self, Self::Error> {
632        let hv_state_size = std::mem::size_of::<hv_local_interrupt_controller_state>();
633        let num_pages = (hv_state_size + HV_PAGE_SIZE - 1) >> HV_HYP_PAGE_SHIFT;
634        let buffer = Buffer::new(num_pages * HV_PAGE_SIZE, HV_PAGE_SIZE)?;
635        // SAFETY: buf is large enough for hv_local_interrupt_controller_state
636        unsafe {
637            let state = reg.regs.as_ptr();
638            let hv_state = &mut *(buffer.buf as *mut hv_local_interrupt_controller_state);
639            *hv_state = hv_local_interrupt_controller_state {
640                apic_id: *(state.offset(LOCAL_APIC_OFFSET_APIC_ID) as *mut u32),
641                apic_version: *(state.offset(LOCAL_APIC_OFFSET_VERSION) as *mut u32),
642                apic_remote_read: *(state.offset(LOCAL_APIC_OFFSET_REMOTE_READ) as *mut u32),
643                apic_ldr: *(state.offset(LOCAL_APIC_OFFSET_LDR) as *mut u32),
644                apic_dfr: *(state.offset(LOCAL_APIC_OFFSET_DFR) as *mut u32),
645                apic_spurious: *(state.offset(LOCAL_APIC_OFFSET_SPURIOUS) as *mut u32),
646                apic_esr: *(state.offset(LOCAL_APIC_OFFSET_ERROR) as *mut u32),
647                apic_icr_low: *(state.offset(LOCAL_APIC_OFFSET_ICR_LOW) as *mut u32),
648                apic_icr_high: *(state.offset(LOCAL_APIC_OFFSET_ICR_HIGH) as *mut u32),
649                apic_lvt_timer: *(state.offset(LOCAL_APIC_OFFSET_TIMER_LVT) as *mut u32),
650                apic_lvt_thermal: *(state.offset(LOCAL_APIC_OFFSET_THERMAL_LVT) as *mut u32),
651                apic_lvt_perfmon: *(state.offset(LOCAL_APIC_OFFSET_PERFMON_LVT) as *mut u32),
652                apic_lvt_lint0: *(state.offset(LOCAL_APIC_OFFSET_LINT0_LVT) as *mut u32),
653                apic_lvt_lint1: *(state.offset(LOCAL_APIC_OFFSET_LINT1_LVT) as *mut u32),
654                apic_lvt_error: *(state.offset(LOCAL_APIC_OFFSET_ERROR_LVT) as *mut u32),
655                apic_initial_count: *(state.offset(LOCAL_APIC_OFFSET_INITIAL_COUNT) as *mut u32),
656                apic_counter_value: *(state.offset(LOCAL_APIC_OFFSET_CURRENT_COUNT) as *mut u32),
657                apic_divide_configuration: *(state.offset(LOCAL_APIC_OFFSET_DIVIDER) as *mut u32),
658                apic_error_status: 0,
659                apic_lvt_cmci: 0,
660                apic_isr: [0; 8],
661                apic_tmr: [0; 8],
662                apic_irr: [0; 8],
663            };
664
665            /* vectors ISR TMR IRR */
666            for i in 0..8 {
667                hv_state.apic_isr[i as usize] =
668                    *(state.offset(LOCAL_APIC_OFFSET_ISR + i * 16) as *mut u32);
669                hv_state.apic_tmr[i as usize] =
670                    *(state.offset(LOCAL_APIC_OFFSET_TMR + i * 16) as *mut u32);
671                hv_state.apic_irr[i as usize] =
672                    *(state.offset(LOCAL_APIC_OFFSET_IRR + i * 16) as *mut u32);
673            }
674        }
675
676        Ok(buffer)
677    }
678}
679
680// implement `Display` for `XSave`
681impl fmt::Display for XSave {
682    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
683        write!(
684            f,
685            "buffer: {:?}\n data: {:02X?}",
686            self.buffer.as_ptr(),
687            self.buffer,
688        )
689    }
690}
691
692#[repr(C)]
693#[derive(Debug, Default, Copy, Clone, Eq, PartialEq, IntoBytes, FromBytes)]
694#[cfg_attr(feature = "with-serde", derive(Deserialize, Serialize))]
695pub struct SuspendRegisters {
696    pub explicit_register: u64,
697    pub intercept_register: u64,
698}
699
700#[repr(C)]
701#[derive(Debug, Default, Copy, Clone, Eq, PartialEq, IntoBytes, FromBytes)]
702#[cfg_attr(feature = "with-serde", derive(Deserialize, Serialize))]
703pub struct MiscRegs {
704    pub hypercall: u64,
705    pub int_vec: u64,
706}
707
708const fn initialize_comp_sizes() -> [usize; MSHV_VP_STATE_COUNT as usize] {
709    let mut vp_state_comp_size = [0; MSHV_VP_STATE_COUNT as usize];
710
711    vp_state_comp_size[MSHV_VP_STATE_LAPIC as usize] = std::mem::size_of::<LapicState>();
712    vp_state_comp_size[MSHV_VP_STATE_XSAVE as usize] = std::mem::size_of::<XSave>();
713    vp_state_comp_size[MSHV_VP_STATE_SIMP as usize] = HV_PAGE_SIZE; // Assuming SIMP page is
714                                                                    // allocated by the Hypervisor
715                                                                    // which is of PAGE_SIZE
716    vp_state_comp_size[MSHV_VP_STATE_SIEFP as usize] = HV_PAGE_SIZE; // Assuming SIEFP page is
717                                                                     // allocated by the Hypervisor
718                                                                     // which is of PAGE_SIZE
719    vp_state_comp_size[MSHV_VP_STATE_SYNTHETIC_TIMERS as usize] =
720        std::mem::size_of::<hv_synthetic_timers_state>();
721
722    vp_state_comp_size
723}
724
725// Total size: 13512 bytes
726// 1. MSHV_VP_STATE_LAPIC, Size: 1024 bytes;
727// 2. MSHV_VP_STATE_XSAVE, Size: 4096 bytes;
728// 3. MSHV_VP_STATE_SIMP, Size: 4096 bytes;
729// 4. MSHV_VP_STATE_SIEFP, Size: 4096 bytes;
730// 5. MSHV_VP_STATE_SYNTHETIC_TIMERS, Size: 200 bytes;
731const VP_STATE_COMP_SIZES: [usize; MSHV_VP_STATE_COUNT as usize] = initialize_comp_sizes();
732
733pub const VP_STATE_COMPONENTS_BUFFER_SIZE: usize = VP_STATE_COMP_SIZES
734    [MSHV_VP_STATE_LAPIC as usize]
735    + VP_STATE_COMP_SIZES[MSHV_VP_STATE_XSAVE as usize]
736    + VP_STATE_COMP_SIZES[MSHV_VP_STATE_SIMP as usize]
737    + VP_STATE_COMP_SIZES[MSHV_VP_STATE_SIEFP as usize]
738    + VP_STATE_COMP_SIZES[MSHV_VP_STATE_SYNTHETIC_TIMERS as usize];
739
740#[inline(always)]
741fn get_vp_state_comp_start_offset(index: usize) -> usize {
742    VP_STATE_COMP_SIZES[0..index].iter().copied().sum()
743}
744
745// Total five components are stored in a single buffer serially
746// Components are:
747// Local APIC, Xsave, Synthetic Message Page, Synthetic Event Flags Page
748// and Synthetic Timers.
749#[repr(C)]
750#[derive(Copy, Clone, Debug, IntoBytes, FromBytes)]
751/// Fixed buffer for VP state components
752pub struct AllVpStateComponents {
753    pub buffer: [u8; VP_STATE_COMPONENTS_BUFFER_SIZE],
754}
755
756impl Default for AllVpStateComponents {
757    fn default() -> Self {
758        unsafe { ::std::mem::zeroed() }
759    }
760}
761
762impl AllVpStateComponents {
763    pub fn copy_to_or_from_buffer(&mut self, index: usize, buffer: &mut Buffer, to_buffer: bool) {
764        let len: usize = VP_STATE_COMP_SIZES[index];
765
766        if len > buffer.size() {
767            panic!("Invalid buffer length for state components");
768        }
769
770        let start = get_vp_state_comp_start_offset(index);
771        let end = start + len;
772
773        if to_buffer {
774            // SAFETY: buffer is large enough to hold state data
775            unsafe { ptr::copy(self.buffer[start..end].as_ptr(), buffer.buf, len) };
776        } else {
777            // SAFETY: buffer is large enough to hold state data
778            unsafe { ptr::copy(buffer.buf, self.buffer[start..end].as_mut_ptr(), len) };
779        }
780    }
781}
782
783#[macro_export]
784macro_rules! set_gp_regs_field_ptr {
785    ($this: ident, $name: ident, $value: expr) => {
786        #[allow(clippy::macro_metavars_in_unsafe)]
787        // SAFETY: access union fields
788        unsafe {
789            (*$this)
790                .__bindgen_anon_1
791                .__bindgen_anon_1
792                .__bindgen_anon_1
793                .__bindgen_anon_1
794                .$name = $value;
795        }
796    };
797}
798
799#[macro_export]
800macro_rules! get_gp_regs_field_ptr {
801    ($this: ident, $name: ident) => {
802        (*$this)
803            .__bindgen_anon_1
804            .__bindgen_anon_1
805            .__bindgen_anon_1
806            .__bindgen_anon_1
807            .$name
808    };
809}
810
811pub static MSRS_SYNIC: &[u32; 19] = &[
812    HV_X64_MSR_SINT0,
813    HV_X64_MSR_SINT1,
814    HV_X64_MSR_SINT2,
815    HV_X64_MSR_SINT3,
816    HV_X64_MSR_SINT4,
817    HV_X64_MSR_SINT5,
818    HV_X64_MSR_SINT6,
819    HV_X64_MSR_SINT7,
820    HV_X64_MSR_SINT8,
821    HV_X64_MSR_SINT9,
822    HV_X64_MSR_SINT10,
823    HV_X64_MSR_SINT11,
824    HV_X64_MSR_SINT12,
825    HV_X64_MSR_SINT13,
826    HV_X64_MSR_SINT14,
827    HV_X64_MSR_SINT15,
828    HV_X64_MSR_SCONTROL,
829    HV_X64_MSR_SIEFP,
830    HV_X64_MSR_SIMP,
831];
832
833pub static MSRS_COMMON: &[u32; 42] = &[
834    IA32_MSR_TSC,
835    IA32_MSR_EFER,
836    IA32_MSR_KERNEL_GS_BASE,
837    IA32_MSR_APIC_BASE,
838    IA32_MSR_PAT,
839    IA32_MSR_SYSENTER_CS,
840    IA32_MSR_SYSENTER_ESP,
841    IA32_MSR_SYSENTER_EIP,
842    IA32_MSR_STAR,
843    IA32_MSR_LSTAR,
844    IA32_MSR_CSTAR,
845    IA32_MSR_SFMASK,
846    IA32_MSR_MTRR_DEF_TYPE,
847    IA32_MSR_MTRR_PHYSBASE0,
848    IA32_MSR_MTRR_PHYSMASK0,
849    IA32_MSR_MTRR_PHYSBASE1,
850    IA32_MSR_MTRR_PHYSMASK1,
851    IA32_MSR_MTRR_PHYSBASE2,
852    IA32_MSR_MTRR_PHYSMASK2,
853    IA32_MSR_MTRR_PHYSBASE3,
854    IA32_MSR_MTRR_PHYSMASK3,
855    IA32_MSR_MTRR_PHYSBASE4,
856    IA32_MSR_MTRR_PHYSMASK4,
857    IA32_MSR_MTRR_PHYSBASE5,
858    IA32_MSR_MTRR_PHYSMASK5,
859    IA32_MSR_MTRR_PHYSBASE6,
860    IA32_MSR_MTRR_PHYSMASK6,
861    IA32_MSR_MTRR_PHYSBASE7,
862    IA32_MSR_MTRR_PHYSMASK7,
863    IA32_MSR_MTRR_FIX64K_00000,
864    IA32_MSR_MTRR_FIX16K_80000,
865    IA32_MSR_MTRR_FIX16K_A0000,
866    IA32_MSR_MTRR_FIX4K_C0000,
867    IA32_MSR_MTRR_FIX4K_C8000,
868    IA32_MSR_MTRR_FIX4K_D0000,
869    IA32_MSR_MTRR_FIX4K_D8000,
870    IA32_MSR_MTRR_FIX4K_E0000,
871    IA32_MSR_MTRR_FIX4K_E8000,
872    IA32_MSR_MTRR_FIX4K_F0000,
873    IA32_MSR_MTRR_FIX4K_F8000,
874    IA32_MSR_DEBUG_CTL,
875    HV_X64_MSR_EOM,
876];
877
878pub static MSRS_CET_SS: &[u32; 8] = &[
879    MSR_IA32_U_CET,
880    MSR_IA32_S_CET,
881    MSR_IA32_SSP,
882    MSR_IA32_PL0_SSP,
883    MSR_IA32_PL1_SSP,
884    MSR_IA32_PL2_SSP,
885    MSR_IA32_PL3_SSP,
886    MSR_IA32_INTERRUPT_SSP_TABLE_ADDR,
887];
888
889pub static MSRS_OTHER: &[u32; 4] = &[
890    MSR_IA32_REGISTER_U_XSS,
891    IA32_MSR_TSC_AUX,
892    HV_X64_MSR_REFERENCE_TSC,
893    HV_X64_MSR_GUEST_OS_ID,
894];
895
896#[derive(Default, Copy, Clone)]
897pub struct VpFeatures {
898    pub proc_features: hv_partition_processor_features,
899    pub xsave_features: hv_partition_processor_xsave_features,
900    pub synthetic_features: hv_partition_synthetic_processor_features,
901}
902
903/// Return the MSR indexes based on supported CPU features
904pub fn get_partition_supported_msrs(features: &VpFeatures) -> Vec<u32> {
905    let mut msrs: Vec<u32> = Vec::new();
906    msrs.extend_from_slice(MSRS_COMMON);
907
908    // SAFETY: access union fields
909    unsafe {
910        if features.proc_features.__bindgen_anon_1.cet_ss_support() == 1u64 {
911            msrs.extend_from_slice(MSRS_CET_SS);
912        }
913        if features.proc_features.__bindgen_anon_1.rdtscp_support() == 1u64 {
914            msrs.push(IA32_MSR_TSC_AUX);
915        }
916        if features
917            .xsave_features
918            .__bindgen_anon_1
919            .xsave_supervisor_support()
920            == 1u64
921        {
922            msrs.push(MSR_IA32_REGISTER_U_XSS);
923        }
924        if features
925            .synthetic_features
926            .__bindgen_anon_1
927            .access_synic_regs()
928            == 1u64
929        {
930            msrs.extend_from_slice(MSRS_SYNIC);
931        }
932        if features
933            .synthetic_features
934            .__bindgen_anon_1
935            .access_partition_reference_tsc()
936            == 1u64
937        {
938            msrs.push(HV_X64_MSR_REFERENCE_TSC);
939        }
940        if features
941            .synthetic_features
942            .__bindgen_anon_1
943            .access_hypercall_regs()
944            == 1u64
945        {
946            msrs.push(HV_X64_MSR_GUEST_OS_ID);
947        }
948    }
949
950    /* return all the MSRs we currently support */
951    msrs
952}
953
954#[cfg(test)]
955mod tests {
956    use super::*;
957    use std::slice::from_raw_parts_mut;
958
959    #[test]
960    fn test_all_vp_state_components_copy_to_buffer() {
961        let mut states: AllVpStateComponents = AllVpStateComponents::default();
962        let mut buffer = Buffer::new(HV_PAGE_SIZE, HV_PAGE_SIZE).unwrap();
963
964        for i in 0..VP_STATE_COMPONENTS_BUFFER_SIZE {
965            states.buffer[i] = 0xB9;
966        }
967
968        //test copy to buffer
969        for i in 0..MSHV_VP_STATE_COUNT {
970            let len = VP_STATE_COMP_SIZES[i as usize];
971            let start = get_vp_state_comp_start_offset(i as usize);
972            let end = start + len;
973            states.copy_to_or_from_buffer(i as usize, &mut buffer, true);
974            // SAFETY: We read less than or equal to buffer length and the slice is valid.
975            let buf_arr = unsafe { std::slice::from_raw_parts(buffer.buf, len) };
976            assert!(states.buffer[start..end]
977                .iter()
978                .zip(buf_arr)
979                .all(|(a, b)| a == b));
980        }
981    }
982
983    #[test]
984    fn test_all_vp_state_components_copy_from_buffer() {
985        let mut states: AllVpStateComponents = AllVpStateComponents::default();
986        let buffer = Buffer::new(HV_PAGE_SIZE, HV_PAGE_SIZE).unwrap();
987        let mut copy_buffer = Buffer::new(HV_PAGE_SIZE, HV_PAGE_SIZE).unwrap();
988
989        // SAFETY: Safe because the entire buffer is accessible as bytes,
990        // modifying them in the form of a byte slice is valid
991        let mut_buf = unsafe { from_raw_parts_mut(buffer.buf, buffer.layout.size()) };
992        for itm in mut_buf.iter_mut().take(HV_PAGE_SIZE) {
993            *itm = 0xA5;
994        }
995
996        // SAFETY: buffer is large enough to hold state data
997        unsafe { ptr::copy(mut_buf.as_mut_ptr(), copy_buffer.buf, HV_PAGE_SIZE) };
998
999        //test copy to buffer
1000        for i in 0..MSHV_VP_STATE_COUNT {
1001            let len = VP_STATE_COMP_SIZES[i as usize];
1002            let start = get_vp_state_comp_start_offset(i as usize);
1003            let end = start + len;
1004
1005            states.copy_to_or_from_buffer(i as usize, &mut copy_buffer, false);
1006            let buf_arr = &mut_buf[0..len];
1007            assert!(states.buffer[start..end]
1008                .iter()
1009                .zip(buf_arr)
1010                .all(|(a, b)| a == b));
1011        }
1012    }
1013}