axaddrspace 0.5.5

ArceOS-Hypervisor guest address space management module
Documentation
// Copyright 2025 The Axvisor Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

use core::{arch::asm, fmt};

use ax_page_table_entry::{GenericPTE, MappingFlags};
use ax_page_table_multiarch::PagingMetaData;

// use ax_memory_addr::HostPhysAddr;
use crate::{GuestPhysAddr, HostPhysAddr};

bitflags::bitflags! {
    /// Memory attribute fields in the VMSAv8-64 translation table format descriptors.
    #[derive(Debug)]
    pub struct DescriptorAttr: u64 {
        // Attribute fields in stage 1 VMSAv8-64 Block and Page descriptors:

        /// Whether the descriptor is valid.
        const VALID =       1 << 0;
        /// The descriptor gives the address of the next level of translation table or 4KB page.
        /// (not a 2M, 1G block)
        const NON_BLOCK =   1 << 1;
        /// Memory attributes index field.
        const ATTR =   0b1111 << 2;
       /// Access permission: read-only.
        const S2AP_RO =      1 << 6;
        /// Access permission: write-only.
        const S2AP_WO =       1 << 7;
        /// Shareability: Inner Shareable (otherwise Outer Shareable).
        const INNER =       1 << 8;
        /// Shareability: Inner or Outer Shareable (otherwise Non-shareable).
        const SHAREABLE =   1 << 9;
        /// The Access flag.
        const AF =          1 << 10;
        /// The not global bit.
        const NG =          1 << 11;
        /// Indicates that 16 adjacent translation table entries point to contiguous memory regions.
        const CONTIGUOUS =  1 <<  52;
        /// The Privileged execute-never field.
        // const PXN =         1 <<  53;
        /// The Execute-never or Unprivileged execute-never field.
        const XN =         1 <<  54;
        /// Non-secure bit. For memory accesses from Secure state, specifies whether the output
        /// address is in Secure or Non-secure memory.
        const NS =          1 << 55;
        // Next-level attributes in stage 1 VMSAv8-64 Table descriptors:

        /// PXN limit for subsequent levels of lookup.
        const PXN_TABLE =           1 << 59;
        /// XN limit for subsequent levels of lookup.
        const XN_TABLE =            1 << 60;
        /// Access permissions limit for subsequent levels of lookup: access at EL0 not permitted.
        const AP_NO_EL0_TABLE =     1 << 61;
        /// Access permissions limit for subsequent levels of lookup: write access not permitted.
        const AP_NO_WRITE_TABLE =   1 << 62;
        /// For memory accesses from Secure state, specifies the Security state for subsequent
        /// levels of lookup.
        const NS_TABLE =            1 << 63;
    }
}

#[repr(u64)]
#[derive(Debug, Clone, Copy, Eq, PartialEq)]
enum MemType {
    Device         = 0,
    Normal         = 1,
    NormalNonCache = 2,
}

impl DescriptorAttr {
    #[allow(clippy::unusual_byte_groupings)]
    const ATTR_INDEX_MASK: u64 = 0b1111_00;
    const PTE_S2_MEM_ATTR_NORMAL_INNER_WRITE_BACK_CACHEABLE: u64 = 0b11 << 2;
    const PTE_S2_MEM_ATTR_NORMAL_OUTER_WRITE_BACK_CACHEABLE: u64 = 0b11 << 4;
    const PTE_S2_MEM_ATTR_NORMAL_OUTER_WRITE_BACK_NOCACHEABLE: u64 = 0b1 << 4;
    const NORMAL_BIT: u64 = Self::PTE_S2_MEM_ATTR_NORMAL_INNER_WRITE_BACK_CACHEABLE
        | Self::PTE_S2_MEM_ATTR_NORMAL_OUTER_WRITE_BACK_CACHEABLE;

    const fn from_mem_type(mem_type: MemType) -> Self {
        let bits = match mem_type {
            MemType::Normal => Self::NORMAL_BIT | Self::SHAREABLE.bits(),
            MemType::NormalNonCache => {
                Self::PTE_S2_MEM_ATTR_NORMAL_INNER_WRITE_BACK_CACHEABLE
                    | Self::PTE_S2_MEM_ATTR_NORMAL_OUTER_WRITE_BACK_NOCACHEABLE
                    | Self::SHAREABLE.bits()
            }
            MemType::Device => Self::SHAREABLE.bits(),
        };
        Self::from_bits_retain(bits)
    }

    fn mem_type(&self) -> MemType {
        let idx = self.bits() & Self::ATTR_INDEX_MASK;
        match idx {
            Self::NORMAL_BIT => MemType::Normal,
            Self::PTE_S2_MEM_ATTR_NORMAL_OUTER_WRITE_BACK_NOCACHEABLE => MemType::NormalNonCache,
            0 => MemType::Device,
            _ => panic!("Invalid memory attribute index"),
        }
    }
}

impl From<DescriptorAttr> for MappingFlags {
    fn from(attr: DescriptorAttr) -> Self {
        let mut flags = Self::empty();
        if attr.contains(DescriptorAttr::VALID) {
            flags |= Self::READ;
        }
        if !attr.contains(DescriptorAttr::S2AP_WO) {
            flags |= Self::WRITE;
        }
        if !attr.contains(DescriptorAttr::XN) {
            flags |= Self::EXECUTE;
        }
        if attr.mem_type() == MemType::Device {
            flags |= Self::DEVICE;
        }
        flags
    }
}

impl From<MappingFlags> for DescriptorAttr {
    fn from(flags: MappingFlags) -> Self {
        let mut attr = if flags.contains(MappingFlags::DEVICE) {
            if flags.contains(MappingFlags::UNCACHED) {
                Self::from_mem_type(MemType::NormalNonCache)
            } else {
                Self::from_mem_type(MemType::Device)
            }
        } else {
            Self::from_mem_type(MemType::Normal)
        };
        if flags.contains(MappingFlags::READ) {
            attr |= Self::VALID | Self::S2AP_RO;
        }
        if flags.contains(MappingFlags::WRITE) {
            attr |= Self::S2AP_WO;
        }
        attr
    }
}

/// A VMSAv8-64 translation table descriptor.
///
/// Note that the **AttrIndx\[2:0\]** (bit\[4:2\]) field is set to `0` for device
/// memory, and `1` for normal memory. The system must configure the MAIR_ELx
/// system register accordingly.
#[derive(Clone, Copy)]
#[repr(transparent)]
pub struct A64PTEHV(u64);

impl A64PTEHV {
    const PHYS_ADDR_MASK: u64 = 0x0000_ffff_ffff_f000; // bits 12..48

    /// Creates an empty descriptor with all bits set to zero.
    pub const fn empty() -> Self {
        Self(0)
    }
}

impl GenericPTE for A64PTEHV {
    fn bits(self) -> usize {
        self.0 as usize
    }
    fn new_page(paddr: HostPhysAddr, flags: MappingFlags, is_huge: bool) -> Self {
        let mut attr = DescriptorAttr::from(flags) | DescriptorAttr::AF;
        if !is_huge {
            attr |= DescriptorAttr::NON_BLOCK;
        }
        Self(attr.bits() | (paddr.as_usize() as u64 & Self::PHYS_ADDR_MASK))
    }
    fn new_table(paddr: HostPhysAddr) -> Self {
        let attr = DescriptorAttr::NON_BLOCK | DescriptorAttr::VALID;
        Self(attr.bits() | (paddr.as_usize() as u64 & Self::PHYS_ADDR_MASK))
    }
    fn paddr(&self) -> HostPhysAddr {
        HostPhysAddr::from((self.0 & Self::PHYS_ADDR_MASK) as usize)
    }
    fn flags(&self) -> MappingFlags {
        DescriptorAttr::from_bits_truncate(self.0).into()
    }
    fn set_paddr(&mut self, paddr: HostPhysAddr) {
        self.0 = (self.0 & !Self::PHYS_ADDR_MASK) | (paddr.as_usize() as u64 & Self::PHYS_ADDR_MASK)
    }
    fn set_flags(&mut self, flags: MappingFlags, is_huge: bool) {
        let mut attr = DescriptorAttr::from(flags) | DescriptorAttr::AF;
        if !is_huge {
            attr |= DescriptorAttr::NON_BLOCK;
        }
        self.0 = (self.0 & Self::PHYS_ADDR_MASK) | attr.bits();
    }
    fn is_unused(&self) -> bool {
        self.0 == 0
    }
    fn is_present(&self) -> bool {
        DescriptorAttr::from_bits_truncate(self.0).contains(DescriptorAttr::VALID)
    }
    fn is_huge(&self) -> bool {
        self.is_present()
            && !DescriptorAttr::from_bits_truncate(self.0).contains(DescriptorAttr::NON_BLOCK)
    }
    fn clear(&mut self) {
        self.0 = 0
    }
}

impl fmt::Debug for A64PTEHV {
    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
        let mut f = f.debug_struct("A64PTE");
        f.field("raw", &self.0)
            .field("paddr", &self.paddr())
            .field("attr", &DescriptorAttr::from_bits_truncate(self.0))
            .field("flags", &self.flags())
            .finish()
    }
}

/// Metadata of AArch64 hypervisor page tables (ipa to hpa) - Level 3.
#[derive(Copy, Clone)]
pub struct A64HVPagingMetaDataL3;

impl PagingMetaData for A64HVPagingMetaDataL3 {
    // The levels of the page table for 3-level configuration.
    const LEVELS: usize = 3;
    // The size of the IPA space for 3-level configuration.
    const VA_MAX_BITS: usize = 40;
    // In Armv8.0-A, the maximum size for a physical address is 48 bits.
    const PA_MAX_BITS: usize = 48;

    type VirtAddr = GuestPhysAddr;

    fn flush_tlb(vaddr: Option<Self::VirtAddr>) {
        unsafe {
            if let Some(vaddr) = vaddr {
                #[cfg(not(feature = "arm-el2"))]
                {
                    asm!("tlbi vaae1is, {}; dsb sy; isb", in(reg) vaddr.as_usize())
                }
                #[cfg(feature = "arm-el2")]
                {
                    asm!("tlbi vae2is, {}; dsb sy; isb", in(reg) vaddr.as_usize())
                }
            } else {
                // flush the entire TLB
                #[cfg(not(feature = "arm-el2"))]
                {
                    asm!("tlbi vmalle1; dsb sy; isb")
                }
                #[cfg(feature = "arm-el2")]
                {
                    asm!("tlbi alle2is; dsb sy; isb")
                }
            }
        }
    }
}

/// Metadata of AArch64 hypervisor page tables (ipa to hpa) - Level 4.
#[derive(Copy, Clone)]
pub struct A64HVPagingMetaDataL4;

impl PagingMetaData for A64HVPagingMetaDataL4 {
    // The levels of the page table for 4-level configuration.
    const LEVELS: usize = 4;
    // The size of the IPA space for 4-level configuration.
    const VA_MAX_BITS: usize = 48;
    // In Armv8.0-A, the maximum size for a physical address is 48 bits.
    const PA_MAX_BITS: usize = 48;

    type VirtAddr = GuestPhysAddr;

    fn flush_tlb(vaddr: Option<Self::VirtAddr>) {
        unsafe {
            if let Some(vaddr) = vaddr {
                #[cfg(not(feature = "arm-el2"))]
                {
                    asm!("tlbi vaae1is, {}; dsb sy; isb", in(reg) vaddr.as_usize())
                }
                #[cfg(feature = "arm-el2")]
                {
                    asm!("tlbi vae2is, {}; dsb sy; isb", in(reg) vaddr.as_usize())
                }
            } else {
                // flush the entire TLB
                #[cfg(not(feature = "arm-el2"))]
                {
                    asm!("tlbi vmalle1; dsb sy; isb")
                }
                #[cfg(feature = "arm-el2")]
                {
                    asm!("tlbi alle2is; dsb sy; isb")
                }
            }
        }
    }
}