use core::cell::{RefCell, UnsafeCell};
use axaddrspace::{GuestPhysAddr, HostPhysAddr};
use axerrno::{AxResult, ax_err};
use axvisor_api::vmm::{VCpuId, VMId};
use super::{AxArchVCpu, AxVCpuExitReason};
struct AxVCpuInnerConst {
vm_id: VMId,
vcpu_id: VCpuId,
favor_phys_cpu: usize,
phys_cpu_set: Option<usize>,
}
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum VCpuState {
Invalid = 0,
Created = 1,
Free = 2,
Ready = 3,
Running = 4,
Blocked = 5,
}
pub struct AxVCpuInnerMut {
state: VCpuState,
}
pub struct AxVCpu<A: AxArchVCpu> {
inner_const: AxVCpuInnerConst,
inner_mut: RefCell<AxVCpuInnerMut>,
arch_vcpu: UnsafeCell<A>,
}
impl<A: AxArchVCpu> AxVCpu<A> {
pub fn new(
vm_id: VMId,
vcpu_id: VCpuId,
favor_phys_cpu: usize,
phys_cpu_set: Option<usize>,
arch_config: A::CreateConfig,
) -> AxResult<Self> {
Ok(Self {
inner_const: AxVCpuInnerConst {
vm_id,
vcpu_id,
favor_phys_cpu,
phys_cpu_set,
},
inner_mut: RefCell::new(AxVCpuInnerMut {
state: VCpuState::Created,
}),
arch_vcpu: UnsafeCell::new(A::new(vm_id, vcpu_id, arch_config)?),
})
}
pub fn setup(
&self,
entry: GuestPhysAddr,
ept_root: HostPhysAddr,
arch_config: A::SetupConfig,
) -> AxResult {
self.manipulate_arch_vcpu(VCpuState::Created, VCpuState::Free, |arch_vcpu| {
arch_vcpu.set_entry(entry)?;
arch_vcpu.set_ept_root(ept_root)?;
arch_vcpu.setup(arch_config)?;
Ok(())
})
}
pub const fn id(&self) -> VCpuId {
self.inner_const.vcpu_id
}
pub const fn vm_id(&self) -> VMId {
self.inner_const.vm_id
}
pub const fn favor_phys_cpu(&self) -> usize {
self.inner_const.favor_phys_cpu
}
pub const fn phys_cpu_set(&self) -> Option<usize> {
self.inner_const.phys_cpu_set
}
pub const fn is_bsp(&self) -> bool {
self.inner_const.vcpu_id == 0
}
pub fn state(&self) -> VCpuState {
self.inner_mut.borrow().state
}
pub unsafe fn set_state(&self, state: VCpuState) {
self.inner_mut.borrow_mut().state = state;
}
pub fn with_state_transition<F, T>(&self, from: VCpuState, to: VCpuState, f: F) -> AxResult<T>
where
F: FnOnce() -> AxResult<T>,
{
let mut inner_mut = self.inner_mut.borrow_mut();
if inner_mut.state != from {
inner_mut.state = VCpuState::Invalid;
ax_err!(
BadState,
format!("VCpu state is not {:?}, but {:?}", from, inner_mut.state)
)
} else {
let result = f();
inner_mut.state = if result.is_err() {
VCpuState::Invalid
} else {
to
};
result
}
}
pub fn with_current_cpu_set<F, T>(&self, f: F) -> T
where
F: FnOnce() -> T,
{
if get_current_vcpu::<A>().is_some() {
panic!("Nested vcpu operation is not allowed!");
} else {
unsafe {
set_current_vcpu(self);
}
let result = f();
unsafe {
clear_current_vcpu::<A>();
}
result
}
}
pub fn manipulate_arch_vcpu<F, T>(&self, from: VCpuState, to: VCpuState, f: F) -> AxResult<T>
where
F: FnOnce(&mut A) -> AxResult<T>,
{
self.with_state_transition(from, to, || {
self.with_current_cpu_set(|| f(self.get_arch_vcpu()))
})
}
pub fn transition_state(&self, from: VCpuState, to: VCpuState) -> AxResult {
self.with_state_transition(from, to, || Ok(()))
}
#[allow(clippy::mut_from_ref)]
pub fn get_arch_vcpu(&self) -> &mut A {
unsafe { &mut *self.arch_vcpu.get() }
}
pub fn run(&self) -> AxResult<AxVCpuExitReason> {
self.transition_state(VCpuState::Ready, VCpuState::Running)?;
self.manipulate_arch_vcpu(VCpuState::Running, VCpuState::Ready, |arch_vcpu| {
arch_vcpu.run()
})
}
pub fn bind(&self) -> AxResult {
self.manipulate_arch_vcpu(VCpuState::Free, VCpuState::Ready, |arch_vcpu| {
arch_vcpu.bind()
})
}
pub fn unbind(&self) -> AxResult {
self.manipulate_arch_vcpu(VCpuState::Ready, VCpuState::Free, |arch_vcpu| {
arch_vcpu.unbind()
})
}
pub fn set_entry(&self, entry: GuestPhysAddr) -> AxResult {
self.get_arch_vcpu().set_entry(entry)
}
pub fn set_gpr(&self, reg: usize, val: usize) {
self.get_arch_vcpu().set_gpr(reg, val);
}
pub fn inject_interrupt(&self, vector: usize) -> AxResult {
self.get_arch_vcpu().inject_interrupt(vector)
}
pub fn set_return_value(&self, val: usize) {
self.get_arch_vcpu().set_return_value(val);
}
}
#[percpu::def_percpu]
static mut CURRENT_VCPU: Option<*mut u8> = None;
#[allow(static_mut_refs)]
pub fn get_current_vcpu<'a, A: AxArchVCpu>() -> Option<&'a AxVCpu<A>> {
unsafe {
CURRENT_VCPU
.current_ref_raw()
.as_ref()
.copied()
.and_then(|p| (p as *const AxVCpu<A>).as_ref())
}
}
#[allow(static_mut_refs)]
pub fn get_current_vcpu_mut<'a, A: AxArchVCpu>() -> Option<&'a mut AxVCpu<A>> {
unsafe {
CURRENT_VCPU
.current_ref_mut_raw()
.as_mut()
.copied()
.and_then(|p| (p as *mut AxVCpu<A>).as_mut())
}
}
#[allow(static_mut_refs)]
pub unsafe fn set_current_vcpu<A: AxArchVCpu>(vcpu: &AxVCpu<A>) {
unsafe {
CURRENT_VCPU
.current_ref_mut_raw()
.replace(vcpu as *const _ as *mut u8);
}
}
#[allow(static_mut_refs)]
pub unsafe fn clear_current_vcpu<A: AxArchVCpu>() {
unsafe {
CURRENT_VCPU.current_ref_mut_raw().take();
}
}