use core::cell::UnsafeCell;
use axaddrspace::{GuestPhysAddr, GuestPhysAddrRange, HostPhysAddr, device::AccessWidth};
use axdevice_base::{BaseDeviceOps, EmuDeviceType};
use axerrno::AxResult;
use axvisor_api::memory::phys_to_virt;
use bitmaps::Bitmap;
use log::debug;
use super::{
registers::*,
utils::{perform_mmio_read, perform_mmio_write},
};
pub const DEFAULT_GICD_SIZE: usize = 0x10000;
pub struct VGicD {
pub addr: GuestPhysAddr,
pub size: usize,
pub assigned_irqs: UnsafeCell<Bitmap<{ MAX_IRQ_V3 }>>,
pub host_gicd_addr: HostPhysAddr,
}
impl VGicD {
pub fn new(addr: GuestPhysAddr, size: Option<usize>) -> Self {
let size = size.unwrap_or(DEFAULT_GICD_SIZE);
Self {
addr,
size,
assigned_irqs: UnsafeCell::new(Bitmap::new()),
host_gicd_addr: crate::api_reexp::get_host_gicd_base(),
}
}
pub fn assign_irq(&self, irq: u32, cpu_phys_id: usize, target_cpu_affinity: (u8, u8, u8, u8)) {
debug!(
"Physically assigning IRQ {irq} to CPU {cpu_phys_id} with affinity \
{target_cpu_affinity:?}"
);
if irq >= MAX_IRQ_V3 as u32 {
panic!("IRQ {} is out of range for VGicD", irq);
}
unsafe {
(*self.assigned_irqs.get()).set(irq as usize, true);
}
let gicd_itargetsr_paddr = self.host_gicd_addr + GICD_ITARGETSR + irq as usize;
let gicd_itargetsr_vaddr = phys_to_virt(gicd_itargetsr_paddr);
unsafe {
core::ptr::write_volatile(
gicd_itargetsr_vaddr.as_mut_ptr_of::<u8>(),
1u8 << (cpu_phys_id),
);
}
let gicd_irouter_paddr = self.host_gicd_addr + GICD_IROUTER + (irq as usize) * 8;
let gicd_irouter_vaddr = phys_to_virt(gicd_irouter_paddr);
unsafe {
core::ptr::write_volatile(
gicd_irouter_vaddr.as_mut_ptr_of::<u64>(),
(target_cpu_affinity.0 as u64) << 32
| 1 << 31 | (target_cpu_affinity.1 as u64) << 16
| (target_cpu_affinity.2 as u64) << 8
| target_cpu_affinity.3 as u64,
);
}
}
}
impl BaseDeviceOps<GuestPhysAddrRange> for VGicD {
fn emu_type(&self) -> axdevice_base::EmuDeviceType {
EmuDeviceType::GPPTDistributor
}
fn address_range(&self) -> GuestPhysAddrRange {
GuestPhysAddrRange::from_start_size(self.addr, self.size)
}
fn handle_read(
&self,
addr: <GuestPhysAddrRange as axaddrspace::device::DeviceAddrRange>::Addr,
width: axaddrspace::device::AccessWidth,
) -> axerrno::AxResult<usize> {
let gicd_base = self.host_gicd_addr;
let reg = addr - self.addr;
debug!("vGICD read reg {reg:#x} width {width:?}");
match reg {
reg if GICD_IROUTER_RANGE.contains(®) => {
let irq = (reg - GICD_IROUTER) as u32 / 8;
if self.is_irq_assigned(irq) && self.is_irq_spi(irq) {
perform_mmio_read(gicd_base + reg, width)
} else {
Ok(0)
}
}
reg if GICD_ITARGETSR_RANGE.contains(®) => {
let irq = (reg - GICD_ITARGETSR) as u32;
if self.is_irq_assigned(irq) && self.is_irq_spi(irq) {
perform_mmio_read(gicd_base + reg, width)
} else {
Ok(0)
}
}
reg if GICD_ICENABLER_RANGE.contains(®)
|| GICD_ISENABLER_RANGE.contains(®)
|| GICD_ICPENDR_RANGE.contains(®)
|| GICD_ISPENDR_RANGE.contains(®)
|| GICD_ICACTIVER_RANGE.contains(®)
|| GICD_ISACTIVER_RANGE.contains(®) =>
{
self.irq_masked_read(reg, reg & 0x7f, 0, width, true)
}
reg if GICD_IGROUPR_RANGE.contains(®) => {
self.irq_masked_read(reg, reg & 0x7f, 0, width, false)
}
reg if GICD_IGRPMODR_RANGE.contains(®) => {
self.irq_masked_read(reg, reg & 0x7f, 0, width, false)
}
reg if GICD_ICFGR_RANGE.contains(®) => {
self.irq_masked_read(reg, reg & 0xff, 1, width, false)
}
reg if GICD_IPRIORITYR_RANGE.contains(®) => {
self.irq_masked_read(reg, reg & 0x3ff, 3, width, false)
}
reg if GICDV3_PIDR0_RANGE.contains(®)
|| GICDV3_PIDR4_RANGE.contains(®)
|| GICDV3_CIDR0_RANGE.contains(®)
|| reg == GICD_CTLR
|| reg == GICD_TYPER
|| reg == GICD_IIDR
|| reg == GICD_TYPER2 =>
{
perform_mmio_read(gicd_base + reg, width)
}
_ => {
todo!("vgicdv3 read unimplemented for reg {:#x}", reg);
}
}
}
fn handle_write(
&self,
addr: <GuestPhysAddrRange as axaddrspace::device::DeviceAddrRange>::Addr,
width: axaddrspace::device::AccessWidth,
val: usize,
) -> axerrno::AxResult {
let gicd_base = self.host_gicd_addr;
let reg = addr - self.addr;
debug!("vGICD write reg {reg:#x} width {width:?} val {val:#x}");
match reg {
reg if GICD_IROUTER_RANGE.contains(®) => {
let irq = (reg - GICD_IROUTER) as u32 / 8;
if self.is_irq_assigned(irq) && self.is_irq_spi(irq) {
perform_mmio_write(gicd_base + reg, width, val)
} else {
Ok(())
}
}
reg if GICD_ITARGETSR_RANGE.contains(®) => {
let irq = (reg - GICD_ITARGETSR) as u32;
if self.is_irq_assigned(irq) && self.is_irq_spi(irq) {
perform_mmio_write(gicd_base + reg, width, val)
} else {
Ok(())
}
}
reg if GICD_ICENABLER_RANGE.contains(®)
|| GICD_ISENABLER_RANGE.contains(®)
|| GICD_ICPENDR_RANGE.contains(®)
|| GICD_ISPENDR_RANGE.contains(®)
|| GICD_ICACTIVER_RANGE.contains(®)
|| GICD_ISACTIVER_RANGE.contains(®) =>
{
self.irq_masked_write(reg, reg & 0x7f, 0, width, true, val)
}
reg if GICD_IGROUPR_RANGE.contains(®) => {
self.irq_masked_write(reg, reg & 0x7f, 0, width, false, val)
}
reg if GICD_IGRPMODR_RANGE.contains(®) => {
self.irq_masked_write(reg, reg & 0x7f, 0, width, false, val)
}
reg if GICD_ICFGR_RANGE.contains(®) => {
self.irq_masked_write(reg, reg & 0xff, 1, width, false, val)
}
reg if GICD_IPRIORITYR_RANGE.contains(®) => {
self.irq_masked_write(reg, reg & 0x3ff, 3, width, false, val)
}
reg if GICDV3_PIDR0_RANGE.contains(®)
|| GICDV3_PIDR4_RANGE.contains(®)
|| GICDV3_CIDR0_RANGE.contains(®)
|| reg == GICD_CTLR
|| reg == GICD_TYPER
|| reg == GICD_IIDR
|| reg == GICD_TYPER2 =>
{
Ok(())
}
_ => {
todo!("vgicdv3 write unimplemented for reg {:#x}", reg);
}
}
}
}
impl VGicD {
pub fn is_irq_assigned(&self, irq: u32) -> bool {
unsafe { (*self.assigned_irqs.get()).get(irq as usize) }
}
pub fn is_irq_sgi(&self, irq: u32) -> bool {
irq < 16
}
pub fn is_irq_spi(&self, irq: u32) -> bool {
(16..1020).contains(&irq)
}
pub fn irq_access_mask(
&self,
reg_offset: usize,
bits_per_irq_shift: usize,
width: AccessWidth,
) -> usize {
if bits_per_irq_shift > 3 {
panic!(
"bits_per_irq_shift must be <= 3, got {}",
bits_per_irq_shift
);
}
let irqs_in_access_width = width.size() << (3 - bits_per_irq_shift);
let first_irq = reg_offset << (3 - bits_per_irq_shift);
let single_irq_mask = (1 << (bits_per_irq_shift + 1)) - 1;
let mut mask = 0;
for irq in 0..irqs_in_access_width {
if self.is_irq_assigned((first_irq + irq) as _) {
mask |= single_irq_mask << (irq << bits_per_irq_shift);
}
}
mask
}
pub fn irq_masked_read(
&self,
offset: usize,
reg_offset: usize,
bits_per_irq_shift: usize,
width: AccessWidth,
_is_poke: bool,
) -> AxResult<usize> {
let mask = self.irq_access_mask(reg_offset, bits_per_irq_shift, width);
Ok(perform_mmio_read(self.host_gicd_addr + offset, width)? & mask)
}
pub fn irq_masked_write(
&self,
offset: usize,
reg_offset: usize,
bits_per_irq_shift: usize,
width: AccessWidth,
is_poke: bool,
val: usize,
) -> AxResult<()> {
let mask = self.irq_access_mask(reg_offset, bits_per_irq_shift, width);
if is_poke {
perform_mmio_write(self.host_gicd_addr + offset, width, val & mask)
} else {
let _lock = GICD_LOCK.lock();
let current_value = perform_mmio_read(self.host_gicd_addr + offset, width)?;
let new_value = (current_value & !mask) | (val & mask);
perform_mmio_write(self.host_gicd_addr + offset, width, new_value)
}
}
}
static GICD_LOCK: spin::Mutex<()> = spin::Mutex::new(());