use aarch64_cpu::registers::*;
use ax_errno::AxResult;
use axaddrspace::{GuestPhysAddr, HostPhysAddr, device::SysRegAddr};
use axvcpu::{AxArchVCpu, AxVCpuExitReason};
use crate::{
TrapFrame,
context_frame::GuestSystemRegisters,
exception::{TrapKind, handle_exception_sync},
exception_utils::exception_class_value,
};
#[ax_percpu::def_percpu]
static HOST_SP_EL0: u64 = 0;
unsafe fn save_host_sp_el0() {
unsafe { HOST_SP_EL0.write_current_raw(SP_EL0.get()) }
}
unsafe fn restore_host_sp_el0() {
SP_EL0.set(unsafe { HOST_SP_EL0.read_current_raw() });
}
#[repr(C)]
#[derive(Clone, Debug, Copy, Default)]
pub struct VmCpuRegisters {
pub trap_context_regs: TrapFrame,
pub vm_system_regs: GuestSystemRegisters,
}
#[repr(C)]
#[derive(Debug)]
pub struct Aarch64VCpu {
ctx: TrapFrame,
host_stack_top: u64,
guest_system_regs: GuestSystemRegisters,
mpidr: u64,
}
#[derive(Clone, Debug, Default)]
pub struct Aarch64VCpuCreateConfig {
pub mpidr_el1: u64,
pub dtb_addr: usize,
}
#[derive(Clone, Debug, Default)]
pub struct Aarch64VCpuSetupConfig {
pub passthrough_interrupt: bool,
pub passthrough_timer: bool,
}
impl axvcpu::AxArchVCpu for Aarch64VCpu {
type CreateConfig = Aarch64VCpuCreateConfig;
type SetupConfig = Aarch64VCpuSetupConfig;
fn new(_vm_id: usize, _vcpu_id: usize, config: Self::CreateConfig) -> AxResult<Self> {
let mut ctx = TrapFrame::default();
ctx.set_argument(config.dtb_addr);
Ok(Self {
ctx,
host_stack_top: 0,
guest_system_regs: GuestSystemRegisters::default(),
mpidr: config.mpidr_el1,
})
}
fn setup(&mut self, config: Self::SetupConfig) -> AxResult {
self.init_hv(config);
Ok(())
}
fn set_entry(&mut self, entry: GuestPhysAddr) -> AxResult {
debug!("set vcpu entry:{entry:?}");
self.set_elr(entry.as_usize());
Ok(())
}
fn set_ept_root(&mut self, ept_root: HostPhysAddr) -> AxResult {
debug!("set vcpu ept root:{ept_root:#x}");
self.guest_system_regs.vttbr_el2 = ept_root.as_usize() as u64;
Ok(())
}
fn run(&mut self) -> AxResult<AxVCpuExitReason> {
unsafe {
core::arch::asm!("msr daifset, #2");
}
let exit_reson = unsafe {
save_host_sp_el0();
self.restore_vm_system_regs();
self.run_guest()
};
unsafe {
core::arch::asm!("msr daifclr, #2");
}
let trap_kind = TrapKind::try_from(exit_reson as u8).expect("Invalid TrapKind");
self.vmexit_handler(trap_kind)
}
fn bind(&mut self) -> AxResult {
Ok(())
}
fn unbind(&mut self) -> AxResult {
Ok(())
}
fn set_gpr(&mut self, idx: usize, val: usize) {
self.ctx.set_gpr(idx, val);
}
fn inject_interrupt(&mut self, vector: usize) -> AxResult {
axvisor_api::arch::hardware_inject_virtual_interrupt(vector as u8);
Ok(())
}
fn set_return_value(&mut self, val: usize) {
self.ctx.set_argument(val);
}
}
impl Aarch64VCpu {
fn init_hv(&mut self, config: Aarch64VCpuSetupConfig) {
self.ctx.spsr = (SPSR_EL1::M::EL1h
+ SPSR_EL1::I::Masked
+ SPSR_EL1::F::Masked
+ SPSR_EL1::A::Masked
+ SPSR_EL1::D::Masked)
.value;
self.init_vm_context(config);
}
fn init_vm_context(&mut self, config: Aarch64VCpuSetupConfig) {
let cntpct: u64;
unsafe { core::arch::asm!("mrs {0}, CNTPCT_EL0", out(reg) cntpct) };
self.guest_system_regs.cntvoff_el2 = cntpct;
self.guest_system_regs.cntkctl_el1 = 0;
self.guest_system_regs.cnthctl_el2 = if config.passthrough_timer {
(CNTHCTL_EL2::EL1PCEN::SET + CNTHCTL_EL2::EL1PCTEN::SET).into()
} else {
(CNTHCTL_EL2::EL1PCEN::CLEAR + CNTHCTL_EL2::EL1PCTEN::CLEAR).into()
};
self.guest_system_regs.sctlr_el1 = 0x30C50830;
self.guest_system_regs.pmcr_el0 = 0;
self.guest_system_regs.vtcr_el2 = probe_vtcr_support()
+ (VTCR_EL2::TG0::Granule4KB
+ VTCR_EL2::SH0::Inner
+ VTCR_EL2::ORGN0::NormalWBRAWA
+ VTCR_EL2::IRGN0::NormalWBRAWA)
.value;
let mut hcr_el2 =
HCR_EL2::VM::Enable + HCR_EL2::TSC::EnableTrapEl1SmcToEl2 + HCR_EL2::RW::EL1IsAarch64;
if !config.passthrough_interrupt {
hcr_el2 += HCR_EL2::IMO::EnableVirtualIRQ + HCR_EL2::FMO::EnableVirtualFIQ;
}
self.guest_system_regs.hcr_el2 = hcr_el2.into();
let mut vmpidr = 1 << 31;
vmpidr |= self.mpidr;
self.guest_system_regs.vmpidr_el2 = vmpidr;
}
fn set_elr(&mut self, elr: usize) {
self.ctx.set_exception_pc(elr);
}
#[allow(unused)]
fn get_gpr(&self, idx: usize) {
self.ctx.gpr(idx);
}
}
impl Aarch64VCpu {
#[unsafe(naked)]
unsafe extern "C" fn run_guest(&mut self) -> usize {
core::arch::naked_asm!(
save_regs_to_stack!(),
"mov x9, sp",
"add x0, x0, {host_stack_top_offset}",
"str x9, [x0]",
"b context_vm_entry",
"b {run_guest_panic}",
host_stack_top_offset = const core::mem::size_of::<TrapFrame>(),
run_guest_panic = sym Self::run_guest_panic,
);
}
unsafe fn run_guest_panic() -> ! {
panic!("run_guest_panic");
}
unsafe fn restore_vm_system_regs(&mut self) {
unsafe {
core::arch::asm!(
"
mov x3, xzr // Trap nothing from EL1 to El2.
msr cptr_el2, x3"
);
self.guest_system_regs.restore();
core::arch::asm!(
"
ic iallu
tlbi alle2
tlbi alle1 // Flush tlb
dsb nsh
isb"
);
}
}
fn vmexit_handler(&mut self, exit_reason: TrapKind) -> AxResult<AxVCpuExitReason> {
trace!(
"Aarch64VCpu vmexit_handler() esr:{:#x} ctx:{:#x?}",
exception_class_value(),
self.ctx
);
unsafe {
self.guest_system_regs.store();
self.ctx.sp_el0 = self.guest_system_regs.sp_el0;
restore_host_sp_el0();
}
let result = match exit_reason {
TrapKind::Synchronous => handle_exception_sync(&mut self.ctx),
TrapKind::Irq => Ok(AxVCpuExitReason::ExternalInterrupt {
vector: axvisor_api::arch::fetch_irq(),
}),
_ => panic!("Unhandled exception {:?}", exit_reason),
};
match result {
Ok(AxVCpuExitReason::SysRegRead { addr, reg }) => {
if let Some(exit_reason) =
self.builtin_sysreg_access_handler(addr, false, 0, reg)?
{
return Ok(exit_reason);
}
result
}
Ok(AxVCpuExitReason::SysRegWrite { addr, value }) => {
if let Some(exit_reason) =
self.builtin_sysreg_access_handler(addr, true, value, 0)?
{
return Ok(exit_reason);
}
result
}
r => r,
}
}
fn builtin_sysreg_access_handler(
&mut self,
addr: SysRegAddr,
write: bool,
value: u64,
reg: usize,
) -> AxResult<Option<AxVCpuExitReason>> {
const SYSREG_ICC_SGI1R_EL1: SysRegAddr = SysRegAddr::new(0x3A_3016);
match (addr, write) {
(SYSREG_ICC_SGI1R_EL1, true) => {
debug!("arm_vcpu ICC_SGI1R_EL1 write: {value:#x}");
let intid = (value >> 24) & 0b1111;
let irm = ((value >> 40) & 0b1) != 0;
if irm {
debug!("arm_vcpu ICC_SGI1R_EL1 write: irm == 1, send to all except self");
return Ok(Some(AxVCpuExitReason::SendIPI {
target_cpu: 0,
target_cpu_aux: 0,
send_to_all: true,
send_to_self: false,
vector: intid,
}));
}
let aff3 = (value >> 48) & 0xff;
let aff2 = (value >> 32) & 0xff;
let aff1 = (value >> 16) & 0xff;
let target_list = value & 0xffff;
debug!(
"arm_vcpu ICC_SGI1R_EL1 write: aff3:{aff3:#x} aff2:{aff2:#x} aff1:{aff1:#x} \
intid:{intid:#x} target_list:{target_list:#x}"
);
Ok(Some(AxVCpuExitReason::SendIPI {
target_cpu: (aff3 << 24) | (aff2 << 16) | (aff1 << 8),
target_cpu_aux: target_list,
send_to_all: false,
send_to_self: false,
vector: intid,
}))
}
(SYSREG_ICC_SGI1R_EL1, false) => {
self.set_gpr(reg, 0);
Ok(Some(AxVCpuExitReason::Nothing))
}
_ => {
Ok(None)
}
}
}
}
pub(crate) fn pa_bits() -> usize {
match ID_AA64MMFR0_EL1.read_as_enum(ID_AA64MMFR0_EL1::PARange) {
Some(ID_AA64MMFR0_EL1::PARange::Value::Bits_32) => 32,
Some(ID_AA64MMFR0_EL1::PARange::Value::Bits_36) => 36,
Some(ID_AA64MMFR0_EL1::PARange::Value::Bits_40) => 40,
Some(ID_AA64MMFR0_EL1::PARange::Value::Bits_42) => 42,
Some(ID_AA64MMFR0_EL1::PARange::Value::Bits_44) => 44,
Some(ID_AA64MMFR0_EL1::PARange::Value::Bits_48) => 48,
Some(ID_AA64MMFR0_EL1::PARange::Value::Bits_52) => 52,
_ => 32,
}
}
#[allow(dead_code)]
pub(crate) fn current_gpt_level() -> usize {
let t0sz = VTCR_EL2.read(VTCR_EL2::T0SZ) as usize;
match t0sz {
16..=25 => 4,
26..=35 => 3,
_ => 2,
}
}
pub(crate) fn max_gpt_level(pa_bits: usize) -> usize {
match pa_bits {
44.. => 4,
_ => 3,
}
}
fn probe_vtcr_support() -> u64 {
let pa_bits = pa_bits();
let mut val = match max_gpt_level(pa_bits) {
4 => VTCR_EL2::SL0::Granule4KBLevel0 + VTCR_EL2::T0SZ.val(64 - 48),
_ => VTCR_EL2::SL0::Granule4KBLevel1 + VTCR_EL2::T0SZ.val(64 - 39),
};
match pa_bits {
52..=64 => val += VTCR_EL2::PS::PA_52B_4PB,
48..=51 => val += VTCR_EL2::PS::PA_48B_256TB,
44..=47 => val += VTCR_EL2::PS::PA_44B_16TB,
42..=43 => val += VTCR_EL2::PS::PA_42B_4TB,
40..=41 => val += VTCR_EL2::PS::PA_40B_1TB,
36..=39 => val += VTCR_EL2::PS::PA_36B_64GB,
_ => val += VTCR_EL2::PS::PA_32B_4GB,
}
val.value
}