1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
use alloc::boxed::Box;
use bit_field::BitField;
use crate::{error::HypervisorError, x86::{controlregs::{Cr4, cr4, cr4_write}, msr::{self, rdmsr}}, x86_64::addr::Addrtransfer};
#[derive(Debug, Clone, Copy)]
#[repr(C, align(4096))]
pub struct __VMXON {
pub revision_id: u32,
pub reserved: [u8; 0x1000 - size_of::<u32>()],
}
impl __VMXON {
pub fn vmxon_init_cpu<T: Addrtransfer>(vgp_index: usize, vmxon_region: &mut Box<__VMXON, impl core::alloc::Allocator>, trans: &mut T) -> Result<(), HypervisorError> {
// let mut vmxon_region: Box<__VMCS, PhysicalAllocator> =unsafe { Box::try_new_zeroed_in(PhysicalAllocator).unwrap().assume_init() };
//let mut vcpu = Vec::<__VMCPU>::new();
//let mut current_vcpu_ptr = unsafe { VMCPU }[vgp_index];
vmxon_region.revision_id = rdmsr(msr::IA32_VMX_BASIC) as u32;
log::debug!("rust: ------------------------------------------------");
vmxon_region.revision_id.set_bit(31, false);
log::debug!("rust: ------------------------------------------------");
log::debug!("rust: vmxon revision_id{}", vmxon_region.revision_id);
log::debug!("rust: ------------------------------------------------");
Self::enable_vmx_operation()?;
let va = vmxon_region.as_ref() as *const _ as u64;
log::debug!("rust: cr0 cr4 vmxon executed for cpu core:{}", vgp_index);
let phys_addr = trans.va_into_pa(va as u64);
log::debug!("rust: addr cast vmxon executed for cpu core:{} va:{:#16x} pa:{:#16x}", vgp_index, va, phys_addr);
crate::x86::vmx_support::vmxon(phys_addr)?;
// let mut cpu = __VMX::default();
// cpu.vmxon = Some(vmxon_region.to_owned());
// cpu.cpu_index =Some(vgp_index);
// // vcpu.push(cpu);
// VMCPU.lock().get_mut().unwrap().push(cpu);
// match enable_vmx_operation() {
// Ok(_) => {
// let va = vmxon_region.as_ref() as *const _ as u64;
// log::debug!("rust: cr0 cr4 vmxon executed for cpu core:{}", vgp_index);
// let phys_addr = va_into_pa(va as u64);
// log::debug!("rust: addr cast vmxon executed for cpu core:{} va:{:#16x} pa:{:#16x}", vgp_index, va, phys_addr);
// match crate::vmx_support::vmxon(phys_addr) {
// Ok(_) => {
// let mut cpu = __VMCPU::default();
// cpu.vmxon = Some(vmxon_region);
// cpu.cpu_index =Some(vgp_index);
// // vcpu.push(cpu);
// VMCPU.lock().get_mut().unwrap().push(cpu);
// // let arc_for_thread = Arc::clone(vmcpu_arc);
// //guard.set(vcpu).unwrap();
// // guard[vgp_index].vmxon = Some(vmxon_region);
// return STATUS_SUCCESS;
// },
// Err(e) => match e {
// crate::vmx_support::VmFail::VmFailValid => {drop(vmxon_region); printlnw!("rust: vmxon vmfailvaild other err core:{}", vgp_index); return STATUS_UNSUCCESSFUL;},
// crate::vmx_support::VmFail::VmFailInvalid => {drop(vmxon_region); printlnw!("rust: vmxon vmfailvaild not vaild core:{}", vgp_index); return STATUS_UNSUCCESSFUL;},
// },
// }
// },
// Err(e) => {
// //HypervisorError::ERR => {drop(vmxon_region);printlnw!("rust: vmxon 未知道错误 core:{}", vgp_index); return STATUS_UNSUCCESSFUL;},
// drop(vmxon_region);
// log::debug!("rust: vmxon HypervisorError{} core:{}", e, vgp_index);
// return STATUS_UNSUCCESSFUL;
// },
// };
Ok(())
}
pub fn enable_vmx_operation() -> Result<(), HypervisorError> {
const CR4_VMX_ENABLE_BIT: usize = 13;
let cr4 = cr4() ;
let mut cr4 = cr4.bits();
cr4.set_bit(CR4_VMX_ENABLE_BIT, true);
let cr4 = Cr4::from_bits_truncate(cr4);
cr4_write(cr4);
/* Intel® 64 and IA-32 Architectures Software Developer's Manual: 24.7 ENABLING AND ENTERING VMX OPERATION */
Self::set_lock_bit()?;
log::debug!("rust: Setting Lock Bit set via IA32_FEATURE_CONTROL");
/* Intel® 64 and IA-32 Architectures Software Developer's Manual: 24.8 RESTRICTIONS ON VMX OPERATION */
set_cr4_cr0();
log::debug!("rust: Adjusting Control Registers");
Ok(())
}
pub fn set_lock_bit() -> Result<(), HypervisorError> {
const VMX_LOCK_BIT: u64 = 1 << 0;
const VMXON_OUTSIDE_SMX: u64 = 1 << 2;
let ia32_feature_control = msr::rdmsr(msr::IA32_FEATURE_CONTROL);
if (ia32_feature_control & VMX_LOCK_BIT) == 0 {
unsafe {
msr::wrmsr(
msr::IA32_FEATURE_CONTROL,
VMXON_OUTSIDE_SMX | VMX_LOCK_BIT | ia32_feature_control,
)
};
} else if (ia32_feature_control & VMXON_OUTSIDE_SMX) == 0 {
return Err(HypervisorError::VMXBIOSLock);
}
Ok(())
}
}
pub fn set_cr0_bits() {
let ia32_vmx_cr0_fixed0 = msr::rdmsr(msr::IA32_VMX_CR0_FIXED0);
let ia32_vmx_cr0_fixed1 = msr::rdmsr(msr::IA32_VMX_CR0_FIXED1);
let mut cr0 = crate::x86::controlregs::cr0();
cr0 |= crate::x86::controlregs::Cr0::from_bits_truncate(ia32_vmx_cr0_fixed0 as usize);
cr0 &= crate::x86::controlregs::Cr0::from_bits_truncate(ia32_vmx_cr0_fixed1 as usize);
crate::x86::controlregs::cr0_write(cr0);
}
/// Modifies CR4 to set and clear mandatory bits.
pub fn set_cr4_bits() {
let ia32_vmx_cr4_fixed0 = msr::rdmsr(msr::IA32_VMX_CR4_FIXED0);
let ia32_vmx_cr4_fixed1 = msr::rdmsr(msr::IA32_VMX_CR4_FIXED1);
let mut cr4 = crate::x86::controlregs::cr4();
cr4 |= crate::x86::controlregs::Cr4::from_bits_truncate(ia32_vmx_cr4_fixed0 as usize);
cr4 &= crate::x86::controlregs::Cr4::from_bits_truncate(ia32_vmx_cr4_fixed1 as usize);
crate::x86::controlregs::cr4_write(cr4);
}
pub fn set_cr4_cr0(){
set_cr0_bits();
set_cr4_bits();
}