#![no_std]
#![deny(clippy::undocumented_unsafe_blocks)]
#![deny(unsafe_op_in_unsafe_fn)]
#![cfg_attr(docsrs, feature(doc_cfg))]
pub mod descriptor;
#[cfg(feature = "alloc")]
pub mod idmap;
#[cfg(feature = "alloc")]
pub mod linearmap;
pub mod mair;
pub mod paging;
#[cfg(feature = "alloc")]
pub mod target;
#[cfg(any(test, feature = "alloc"))]
extern crate alloc;
#[cfg(target_arch = "aarch64")]
use core::arch::asm;
use core::sync::atomic::{AtomicUsize, Ordering};
use descriptor::{
Descriptor, DescriptorBits, PagingAttributes, PhysicalAddress, UpdatableDescriptor,
VirtualAddress,
};
use paging::{Constraints, MemoryRegion, RootTable, Translation, TranslationRegime, VaRange};
use thiserror::Error;
#[derive(Clone, Debug, Eq, Error, PartialEq)]
pub enum MapError {
#[error("Virtual address {0} out of range")]
AddressRange(VirtualAddress),
#[error("Invalid virtual address {0} for mapping")]
InvalidVirtualAddress(VirtualAddress),
#[error("End of memory region {0} is before start.")]
RegionBackwards(MemoryRegion),
#[error("Error updating page table entry {0:?}")]
PteUpdateFault(DescriptorBits),
#[error("Flags {0:#x} unsupported for mapping.")]
InvalidFlags(usize),
#[error("Cannot remap region {0} while translation is live.")]
BreakBeforeMakeViolation(MemoryRegion),
}
#[derive(Debug)]
pub struct Mapping<T: Translation<R::Attributes>, R: TranslationRegime> {
root: RootTable<R, T>,
asid: R::Asid,
active_count: AtomicUsize,
}
fn wait_for_tlb_maintenance() {
#[cfg(target_arch = "aarch64")]
unsafe {
asm!("dsb ish", "isb", options(preserves_flags, nostack));
}
}
impl<T: Translation<R::Attributes>, R: TranslationRegime<Asid = (), VaRange = ()>> Mapping<T, R> {
pub fn new(translation: T, rootlevel: usize, regime: R) -> Self {
Self::new_impl(RootTable::new(translation, rootlevel, regime), ())
}
}
impl<T: Translation<R::Attributes>, R: TranslationRegime<Asid = usize, VaRange = VaRange>>
Mapping<T, R>
{
pub fn with_asid_and_va_range(
translation: T,
asid: usize,
rootlevel: usize,
regime: R,
va_range: VaRange,
) -> Self {
Self::new_impl(
RootTable::with_va_range(translation, rootlevel, regime, va_range),
asid,
)
}
}
impl<T: Translation<R::Attributes>, R: TranslationRegime> Mapping<T, R> {
fn new_impl(root: RootTable<R, T>, asid: R::Asid) -> Self {
Self {
root,
asid,
active_count: AtomicUsize::new(0),
}
}
pub fn translation(&self) -> &T {
self.root.translation()
}
pub fn active(&self) -> bool {
self.active_count.load(Ordering::Acquire) != 0
}
pub fn size(&self) -> usize {
self.root.size()
}
pub unsafe fn activate(&self) -> usize {
#[allow(unused_mut, unused_assignments)]
let mut previous_ttbr = usize::MAX;
self.mark_active();
#[cfg(all(not(test), target_arch = "aarch64"))]
unsafe {
asm!("dmb ishst", "isb", options(preserves_flags),);
previous_ttbr =
R::activate(self.root_address(), self.asid, self.root.va_range_or_unit());
}
previous_ttbr
}
pub unsafe fn deactivate(&self, #[allow(unused)] previous_ttbr: usize) {
assert!(self.active());
#[cfg(all(not(test), target_arch = "aarch64"))]
unsafe {
R::deactivate(previous_ttbr, self.asid, self.root.va_range_or_unit());
}
self.mark_inactive();
}
fn check_range_bbm<F>(&self, range: &MemoryRegion, updater: &F) -> Result<(), MapError>
where
F: Fn(&MemoryRegion, &mut UpdatableDescriptor<R::Attributes>) -> Result<(), ()> + ?Sized,
{
self.root.visit_range(
range,
&mut |mr: &MemoryRegion, d: &Descriptor<R::Attributes>, level: usize| {
let err = MapError::BreakBeforeMakeViolation(mr.clone());
let mut desc = UpdatableDescriptor::clone_from(d, level);
updater(mr, &mut desc)
.and_then(|_| {
if d.is_valid() && !mr.is_block(level) && d.flags() != desc.flags() {
Err(())
} else {
Ok(())
}
})
.or(Err(err))
},
)
}
fn invalidate_range(&self, range: &MemoryRegion) {
if self.active() {
self.root
.visit_range(
range,
&mut |mr: &MemoryRegion, _: &Descriptor<R::Attributes>, _: usize| {
R::invalidate_va(mr.start());
Ok(())
},
)
.unwrap();
wait_for_tlb_maintenance();
}
}
pub fn map_range(
&mut self,
range: &MemoryRegion,
pa: PhysicalAddress,
flags: R::Attributes,
constraints: Constraints,
) -> Result<(), MapError> {
if self.active() {
let c = |mr: &MemoryRegion, d: &mut UpdatableDescriptor<R::Attributes>| {
let mask = !(paging::granularity_at_level(d.level()) - 1);
let pa = (mr.start() - range.start() + pa.0) & mask;
let flags = if d.level() == 3 {
flags | R::Attributes::TABLE_OR_PAGE
} else {
flags
};
d.set(PhysicalAddress(pa), flags)
};
self.check_range_bbm(range, &c)?;
}
self.root.map_range(range, pa, flags, constraints)?;
self.invalidate_range(range);
Ok(())
}
pub fn modify_range<F>(&mut self, range: &MemoryRegion, f: &F) -> Result<(), MapError>
where
F: Fn(&MemoryRegion, &mut UpdatableDescriptor<R::Attributes>) -> Result<(), ()> + ?Sized,
{
if self.active() {
self.check_range_bbm(range, f)?;
}
if self.root.modify_range(range, f, self.active())? && self.active() {
wait_for_tlb_maintenance();
}
Ok(())
}
pub fn walk_range<F>(&self, range: &MemoryRegion, f: &mut F) -> Result<(), MapError>
where
F: FnMut(&MemoryRegion, &Descriptor<R::Attributes>, usize) -> Result<(), ()>,
{
self.root.walk_range(range, f)
}
pub fn compact_subtables(&mut self) {
self.root.compact_subtables();
}
pub fn root_address(&self) -> PhysicalAddress {
self.root.to_physical()
}
pub fn asid(&self) -> R::Asid {
self.asid
}
pub fn mark_active(&self) {
self.active_count.fetch_add(1, Ordering::Release);
}
pub fn mark_inactive(&self) {
let l = self.active_count.fetch_sub(1, Ordering::Release);
if l == 0 {
panic!("Underflow in active count.");
}
}
}
impl<T: Translation<R::Attributes>, R: TranslationRegime> Drop for Mapping<T, R> {
fn drop(&mut self) {
if self.active() {
panic!("Dropping active page table mapping!");
}
}
}