use core::{arch::asm, fmt};
use ax_page_table_entry::{GenericPTE, MappingFlags};
use ax_page_table_multiarch::PagingMetaData;
use crate::{GuestPhysAddr, HostPhysAddr};
bitflags::bitflags! {
#[derive(Debug)]
pub struct DescriptorAttr: u64 {
const VALID = 1 << 0;
const NON_BLOCK = 1 << 1;
const ATTR = 0b1111 << 2;
const S2AP_RO = 1 << 6;
const S2AP_WO = 1 << 7;
const INNER = 1 << 8;
const SHAREABLE = 1 << 9;
const AF = 1 << 10;
const NG = 1 << 11;
const CONTIGUOUS = 1 << 52;
const XN = 1 << 54;
const NS = 1 << 55;
const PXN_TABLE = 1 << 59;
const XN_TABLE = 1 << 60;
const AP_NO_EL0_TABLE = 1 << 61;
const AP_NO_WRITE_TABLE = 1 << 62;
const NS_TABLE = 1 << 63;
}
}
#[repr(u64)]
#[derive(Debug, Clone, Copy, Eq, PartialEq)]
enum MemType {
Device = 0,
Normal = 1,
NormalNonCache = 2,
}
impl DescriptorAttr {
#[allow(clippy::unusual_byte_groupings)]
const ATTR_INDEX_MASK: u64 = 0b1111_00;
const PTE_S2_MEM_ATTR_NORMAL_INNER_WRITE_BACK_CACHEABLE: u64 = 0b11 << 2;
const PTE_S2_MEM_ATTR_NORMAL_OUTER_WRITE_BACK_CACHEABLE: u64 = 0b11 << 4;
const PTE_S2_MEM_ATTR_NORMAL_OUTER_WRITE_BACK_NOCACHEABLE: u64 = 0b1 << 4;
const NORMAL_BIT: u64 = Self::PTE_S2_MEM_ATTR_NORMAL_INNER_WRITE_BACK_CACHEABLE
| Self::PTE_S2_MEM_ATTR_NORMAL_OUTER_WRITE_BACK_CACHEABLE;
const fn from_mem_type(mem_type: MemType) -> Self {
let bits = match mem_type {
MemType::Normal => Self::NORMAL_BIT | Self::SHAREABLE.bits(),
MemType::NormalNonCache => {
Self::PTE_S2_MEM_ATTR_NORMAL_INNER_WRITE_BACK_CACHEABLE
| Self::PTE_S2_MEM_ATTR_NORMAL_OUTER_WRITE_BACK_NOCACHEABLE
| Self::SHAREABLE.bits()
}
MemType::Device => Self::SHAREABLE.bits(),
};
Self::from_bits_retain(bits)
}
fn mem_type(&self) -> MemType {
let idx = self.bits() & Self::ATTR_INDEX_MASK;
match idx {
Self::NORMAL_BIT => MemType::Normal,
Self::PTE_S2_MEM_ATTR_NORMAL_OUTER_WRITE_BACK_NOCACHEABLE => MemType::NormalNonCache,
0 => MemType::Device,
_ => panic!("Invalid memory attribute index"),
}
}
}
impl From<DescriptorAttr> for MappingFlags {
fn from(attr: DescriptorAttr) -> Self {
let mut flags = Self::empty();
if attr.contains(DescriptorAttr::VALID) {
flags |= Self::READ;
}
if !attr.contains(DescriptorAttr::S2AP_WO) {
flags |= Self::WRITE;
}
if !attr.contains(DescriptorAttr::XN) {
flags |= Self::EXECUTE;
}
if attr.mem_type() == MemType::Device {
flags |= Self::DEVICE;
}
flags
}
}
impl From<MappingFlags> for DescriptorAttr {
fn from(flags: MappingFlags) -> Self {
let mut attr = if flags.contains(MappingFlags::DEVICE) {
if flags.contains(MappingFlags::UNCACHED) {
Self::from_mem_type(MemType::NormalNonCache)
} else {
Self::from_mem_type(MemType::Device)
}
} else {
Self::from_mem_type(MemType::Normal)
};
if flags.contains(MappingFlags::READ) {
attr |= Self::VALID | Self::S2AP_RO;
}
if flags.contains(MappingFlags::WRITE) {
attr |= Self::S2AP_WO;
}
attr
}
}
#[derive(Clone, Copy)]
#[repr(transparent)]
pub struct A64PTEHV(u64);
impl A64PTEHV {
const PHYS_ADDR_MASK: u64 = 0x0000_ffff_ffff_f000;
pub const fn empty() -> Self {
Self(0)
}
}
impl GenericPTE for A64PTEHV {
fn bits(self) -> usize {
self.0 as usize
}
fn new_page(paddr: HostPhysAddr, flags: MappingFlags, is_huge: bool) -> Self {
let mut attr = DescriptorAttr::from(flags) | DescriptorAttr::AF;
if !is_huge {
attr |= DescriptorAttr::NON_BLOCK;
}
Self(attr.bits() | (paddr.as_usize() as u64 & Self::PHYS_ADDR_MASK))
}
fn new_table(paddr: HostPhysAddr) -> Self {
let attr = DescriptorAttr::NON_BLOCK | DescriptorAttr::VALID;
Self(attr.bits() | (paddr.as_usize() as u64 & Self::PHYS_ADDR_MASK))
}
fn paddr(&self) -> HostPhysAddr {
HostPhysAddr::from((self.0 & Self::PHYS_ADDR_MASK) as usize)
}
fn flags(&self) -> MappingFlags {
DescriptorAttr::from_bits_truncate(self.0).into()
}
fn set_paddr(&mut self, paddr: HostPhysAddr) {
self.0 = (self.0 & !Self::PHYS_ADDR_MASK) | (paddr.as_usize() as u64 & Self::PHYS_ADDR_MASK)
}
fn set_flags(&mut self, flags: MappingFlags, is_huge: bool) {
let mut attr = DescriptorAttr::from(flags) | DescriptorAttr::AF;
if !is_huge {
attr |= DescriptorAttr::NON_BLOCK;
}
self.0 = (self.0 & Self::PHYS_ADDR_MASK) | attr.bits();
}
fn is_unused(&self) -> bool {
self.0 == 0
}
fn is_present(&self) -> bool {
DescriptorAttr::from_bits_truncate(self.0).contains(DescriptorAttr::VALID)
}
fn is_huge(&self) -> bool {
self.is_present()
&& !DescriptorAttr::from_bits_truncate(self.0).contains(DescriptorAttr::NON_BLOCK)
}
fn clear(&mut self) {
self.0 = 0
}
}
impl fmt::Debug for A64PTEHV {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let mut f = f.debug_struct("A64PTE");
f.field("raw", &self.0)
.field("paddr", &self.paddr())
.field("attr", &DescriptorAttr::from_bits_truncate(self.0))
.field("flags", &self.flags())
.finish()
}
}
#[derive(Copy, Clone)]
pub struct A64HVPagingMetaDataL3;
impl PagingMetaData for A64HVPagingMetaDataL3 {
const LEVELS: usize = 3;
const VA_MAX_BITS: usize = 40;
const PA_MAX_BITS: usize = 48;
type VirtAddr = GuestPhysAddr;
fn flush_tlb(vaddr: Option<Self::VirtAddr>) {
unsafe {
if let Some(vaddr) = vaddr {
#[cfg(not(feature = "arm-el2"))]
{
asm!("tlbi vaae1is, {}; dsb sy; isb", in(reg) vaddr.as_usize())
}
#[cfg(feature = "arm-el2")]
{
asm!("tlbi vae2is, {}; dsb sy; isb", in(reg) vaddr.as_usize())
}
} else {
#[cfg(not(feature = "arm-el2"))]
{
asm!("tlbi vmalle1; dsb sy; isb")
}
#[cfg(feature = "arm-el2")]
{
asm!("tlbi alle2is; dsb sy; isb")
}
}
}
}
}
#[derive(Copy, Clone)]
pub struct A64HVPagingMetaDataL4;
impl PagingMetaData for A64HVPagingMetaDataL4 {
const LEVELS: usize = 4;
const VA_MAX_BITS: usize = 48;
const PA_MAX_BITS: usize = 48;
type VirtAddr = GuestPhysAddr;
fn flush_tlb(vaddr: Option<Self::VirtAddr>) {
unsafe {
if let Some(vaddr) = vaddr {
#[cfg(not(feature = "arm-el2"))]
{
asm!("tlbi vaae1is, {}; dsb sy; isb", in(reg) vaddr.as_usize())
}
#[cfg(feature = "arm-el2")]
{
asm!("tlbi vae2is, {}; dsb sy; isb", in(reg) vaddr.as_usize())
}
} else {
#[cfg(not(feature = "arm-el2"))]
{
asm!("tlbi vmalle1; dsb sy; isb")
}
#[cfg(feature = "arm-el2")]
{
asm!("tlbi alle2is; dsb sy; isb")
}
}
}
}
}