use core::{
marker::PhantomData,
ptr::NonNull, };
use crate::{
alloc,
bindings,
device::{
Bound,
Device, },
devres::Devres,
error::to_result,
io::PhysAddr,
prelude::*, };
use bindings::io_pgtable_fmt;
pub mod prot {
pub const READ: u32 = bindings::IOMMU_READ;
pub const WRITE: u32 = bindings::IOMMU_WRITE;
pub const CACHE: u32 = bindings::IOMMU_CACHE;
pub const NOEXEC: u32 = bindings::IOMMU_NOEXEC;
pub const MMIO: u32 = bindings::IOMMU_MMIO;
pub const PRIVILEGED: u32 = bindings::IOMMU_PRIV;
}
pub struct Config {
pub quirks: usize,
pub pgsize_bitmap: usize,
pub ias: u32,
pub oas: u32,
pub coherent_walk: bool,
}
pub struct IoPageTable<F: IoPageTableFmt> {
ptr: NonNull<bindings::io_pgtable_ops>,
_marker: PhantomData<F>,
}
unsafe impl<F: IoPageTableFmt> Send for IoPageTable<F> {}
unsafe impl<F: IoPageTableFmt> Sync for IoPageTable<F> {}
pub trait IoPageTableFmt: 'static {
const FORMAT: io_pgtable_fmt;
}
impl<F: IoPageTableFmt> IoPageTable<F> {
#[inline]
pub fn new(
dev: &Device<Bound>,
config: Config,
) -> impl PinInit<Devres<IoPageTable<F>>, Error> + '_ {
Devres::new(dev, unsafe { Self::new_raw(dev, config) })
}
#[inline]
pub unsafe fn new_raw(dev: &Device<Bound>, config: Config) -> Result<IoPageTable<F>> {
let mut raw_cfg = bindings::io_pgtable_cfg {
quirks: config.quirks,
pgsize_bitmap: config.pgsize_bitmap,
ias: config.ias,
oas: config.oas,
coherent_walk: config.coherent_walk,
tlb: &raw const NOOP_FLUSH_OPS,
iommu_dev: dev.as_raw(),
..unsafe { core::mem::zeroed() }
};
let ops = unsafe {
bindings::alloc_io_pgtable_ops(F::FORMAT, &mut raw_cfg, core::ptr::null_mut())
};
Ok(IoPageTable {
ptr: NonNull::new(ops).ok_or(ENOMEM)?,
_marker: PhantomData,
})
}
#[inline]
pub fn raw_ops(&self) -> *mut bindings::io_pgtable_ops {
self.ptr.as_ptr()
}
#[inline]
pub fn raw_pgtable(&self) -> *mut bindings::io_pgtable {
unsafe { kernel::container_of!(self.raw_ops(), bindings::io_pgtable, ops) }
}
#[inline]
pub fn raw_cfg(&self) -> *mut bindings::io_pgtable_cfg {
unsafe { &raw mut (*self.raw_pgtable()).cfg }
}
#[inline]
pub unsafe fn map_pages(
&self,
iova: usize,
paddr: PhysAddr,
pgsize: usize,
pgcount: usize,
prot: u32,
flags: alloc::Flags,
) -> (usize, Result) {
let mut mapped: usize = 0;
let map_pages = unsafe { (*self.raw_ops()).map_pages.unwrap_unchecked() };
let ret = to_result(unsafe {
(map_pages)(
self.raw_ops(),
iova,
paddr,
pgsize,
pgcount,
prot as i32,
flags.as_raw(),
&mut mapped,
)
});
(mapped, ret)
}
#[inline]
#[must_use]
pub unsafe fn unmap_pages(&self, iova: usize, pgsize: usize, pgcount: usize) -> usize {
let unmap_pages = unsafe { (*self.raw_ops()).unmap_pages.unwrap_unchecked() };
unsafe { (unmap_pages)(self.raw_ops(), iova, pgsize, pgcount, core::ptr::null_mut()) }
}
}
static NOOP_FLUSH_OPS: bindings::iommu_flush_ops = bindings::iommu_flush_ops {
tlb_flush_all: Some(rust_tlb_flush_all_noop),
tlb_flush_walk: Some(rust_tlb_flush_walk_noop),
tlb_add_page: None,
};
#[no_mangle]
extern "C" fn rust_tlb_flush_all_noop(_cookie: *mut core::ffi::c_void) {}
#[no_mangle]
extern "C" fn rust_tlb_flush_walk_noop(
_iova: usize,
_size: usize,
_granule: usize,
_cookie: *mut core::ffi::c_void,
) {
}
impl<F: IoPageTableFmt> Drop for IoPageTable<F> {
fn drop(&mut self) {
unsafe { bindings::free_io_pgtable_ops(self.raw_ops()) };
}
}
pub enum ARM64LPAES1 {}
impl IoPageTableFmt for ARM64LPAES1 {
const FORMAT: io_pgtable_fmt = bindings::io_pgtable_fmt_ARM_64_LPAE_S1 as io_pgtable_fmt;
}
impl IoPageTable<ARM64LPAES1> {
#[inline]
pub unsafe fn ttbr(&self) -> u64 {
unsafe { (*self.raw_cfg()).__bindgen_anon_1.arm_lpae_s1_cfg.ttbr }
}
#[inline]
pub fn mair(&self) -> u64 {
unsafe { (*self.raw_cfg()).__bindgen_anon_1.arm_lpae_s1_cfg.mair }
}
}