1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397
// Copyright 2022 The aarch64-paging Authors.
// This project is dual-licensed under Apache 2.0 and MIT terms.
// See LICENSE-APACHE and LICENSE-MIT for details.
//! A library to manipulate AArch64 VMSA page tables.
//!
//! Currently it only supports:
//! - stage 1 page tables
//! - EL1
//! - 4 KiB pages
//!
//! Full support is provided for identity mapping ([`IdMap`](idmap::IdMap)) and linear mapping
//! ([`LinearMap`](linearmap::LinearMap)). If you want to use a different mapping scheme, you must
//! provide an implementation of the [`Translation`](paging::Translation) trait and then use
//! [`Mapping`] directly.
//!
//! # Example
//!
//! ```no_run
//! # #[cfg(feature = "alloc")] {
//! use aarch64_paging::{
//! idmap::IdMap,
//! paging::{Attributes, MemoryRegion},
//! };
//!
//! const ASID: usize = 1;
//! const ROOT_LEVEL: usize = 1;
//!
//! // Create a new page table with identity mapping.
//! let mut idmap = IdMap::new(ASID, ROOT_LEVEL);
//! // Map a 2 MiB region of memory as read-write.
//! idmap.map_range(
//! &MemoryRegion::new(0x80200000, 0x80400000),
//! Attributes::NORMAL | Attributes::NON_GLOBAL | Attributes::VALID,
//! ).unwrap();
//! // SAFETY: Everything the program uses is within the 2 MiB region mapped above.
//! unsafe {
//! // Set `TTBR0_EL1` to activate the page table.
//! idmap.activate();
//! }
//! # }
//! ```
#![no_std]
#![deny(clippy::undocumented_unsafe_blocks)]
#[cfg(feature = "alloc")]
pub mod idmap;
#[cfg(feature = "alloc")]
pub mod linearmap;
pub mod paging;
#[cfg(feature = "alloc")]
extern crate alloc;
#[cfg(target_arch = "aarch64")]
use core::arch::asm;
use core::fmt::{self, Display, Formatter};
use paging::{
Attributes, Constraints, Descriptor, MemoryRegion, PhysicalAddress, RootTable, Translation,
VaRange, VirtualAddress,
};
/// An error attempting to map some range in the page table.
#[derive(Clone, Debug, Eq, PartialEq)]
pub enum MapError {
/// The address requested to be mapped was out of the range supported by the page table
/// configuration.
AddressRange(VirtualAddress),
/// The address requested to be mapped was not valid for the mapping in use.
InvalidVirtualAddress(VirtualAddress),
/// The end of the memory region is before the start.
RegionBackwards(MemoryRegion),
/// There was an error while updating a page table entry.
PteUpdateFault(Descriptor),
/// The requested flags are not supported for this mapping
InvalidFlags(Attributes),
/// Updating the range violates break-before-make rules and the mapping is live
BreakBeforeMakeViolation(MemoryRegion),
}
impl Display for MapError {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
match self {
Self::AddressRange(va) => write!(f, "Virtual address {} out of range", va),
Self::InvalidVirtualAddress(va) => {
write!(f, "Invalid virtual address {} for mapping", va)
}
Self::RegionBackwards(region) => {
write!(f, "End of memory region {} is before start.", region)
}
Self::PteUpdateFault(desc) => {
write!(f, "Error updating page table entry {:?}", desc)
}
Self::InvalidFlags(flags) => {
write!(f, "Flags {flags:?} unsupported for mapping.")
}
Self::BreakBeforeMakeViolation(region) => {
write!(f, "Cannot remap region {region} while translation is live.")
}
}
}
}
/// Manages a level 1 page table and associated state.
///
/// Mappings should be added with [`map_range`](Self::map_range) before calling
/// [`activate`](Self::activate) to start using the new page table. To make changes which may
/// require break-before-make semantics you must first call [`deactivate`](Self::deactivate) to
/// switch back to a previous static page table, and then `activate` again after making the desired
/// changes.
#[derive(Debug)]
pub struct Mapping<T: Translation + Clone> {
root: RootTable<T>,
#[allow(unused)]
asid: usize,
#[allow(unused)]
previous_ttbr: Option<usize>,
}
impl<T: Translation + Clone> Mapping<T> {
/// Creates a new page table with the given ASID, root level and translation mapping.
pub fn new(translation: T, asid: usize, rootlevel: usize, va_range: VaRange) -> Self {
Self {
root: RootTable::new(translation, rootlevel, va_range),
asid,
previous_ttbr: None,
}
}
/// Returns whether this mapping is currently active.
pub fn active(&self) -> bool {
self.previous_ttbr.is_some()
}
/// Activates the page table by setting `TTBRn_EL1` to point to it, and saves the previous value
/// of `TTBRn_EL1` so that it may later be restored by [`deactivate`](Self::deactivate).
///
/// Panics if a previous value of `TTBRn_EL1` is already saved and not yet used by a call to
/// `deactivate`.
///
/// In test builds or builds that do not target aarch64, the `TTBRn_EL1` access is omitted.
///
/// # Safety
///
/// The caller must ensure that the page table doesn't unmap any memory which the program is
/// using, or introduce aliases which break Rust's aliasing rules. The page table must not be
/// dropped as long as its mappings are required, as it will automatically be deactivated when
/// it is dropped.
pub unsafe fn activate(&mut self) {
assert!(!self.active());
#[allow(unused)]
let mut previous_ttbr = usize::MAX;
#[cfg(all(not(test), target_arch = "aarch64"))]
// SAFETY: Safe because we trust that self.root.to_physical() returns a valid physical
// address of a page table, and the `Drop` implementation will reset `TTBRn_EL1` before it
// becomes invalid.
unsafe {
match self.root.va_range() {
VaRange::Lower => asm!(
"mrs {previous_ttbr}, ttbr0_el1",
"msr ttbr0_el1, {ttbrval}",
"isb",
ttbrval = in(reg) self.root.to_physical().0 | (self.asid << 48),
previous_ttbr = out(reg) previous_ttbr,
options(preserves_flags),
),
VaRange::Upper => asm!(
"mrs {previous_ttbr}, ttbr1_el1",
"msr ttbr1_el1, {ttbrval}",
"isb",
ttbrval = in(reg) self.root.to_physical().0 | (self.asid << 48),
previous_ttbr = out(reg) previous_ttbr,
options(preserves_flags),
),
}
}
self.previous_ttbr = Some(previous_ttbr);
}
/// Deactivates the page table, by setting `TTBRn_EL1` back to the value it had before
/// [`activate`](Self::activate) was called, and invalidating the TLB for this page table's
/// configured ASID.
///
/// Panics if there is no saved `TTBRn_EL1` value because `activate` has not previously been
/// called.
///
/// In test builds or builds that do not target aarch64, the `TTBRn_EL1` access is omitted.
///
/// # Safety
///
/// The caller must ensure that the previous page table which this is switching back to doesn't
/// unmap any memory which the program is using.
pub unsafe fn deactivate(&mut self) {
assert!(self.active());
#[cfg(all(not(test), target_arch = "aarch64"))]
// SAFETY: Safe because this just restores the previously saved value of `TTBRn_EL1`, which
// must have been valid.
unsafe {
match self.root.va_range() {
VaRange::Lower => asm!(
"msr ttbr0_el1, {ttbrval}",
"isb",
"tlbi aside1, {asid}",
"dsb nsh",
"isb",
asid = in(reg) self.asid << 48,
ttbrval = in(reg) self.previous_ttbr.unwrap(),
options(preserves_flags),
),
VaRange::Upper => asm!(
"msr ttbr1_el1, {ttbrval}",
"isb",
"tlbi aside1, {asid}",
"dsb nsh",
"isb",
asid = in(reg) self.asid << 48,
ttbrval = in(reg) self.previous_ttbr.unwrap(),
options(preserves_flags),
),
}
}
self.previous_ttbr = None;
}
/// Checks whether the given range can be mapped or updated while the translation is live,
/// without violating architectural break-before-make (BBM) requirements.
fn check_range_bbm<F>(&self, range: &MemoryRegion, updater: &F) -> Result<(), MapError>
where
F: Fn(&MemoryRegion, &mut Descriptor, usize) -> Result<(), ()> + ?Sized,
{
self.walk_range(
range,
&mut |mr: &MemoryRegion, d: &Descriptor, level: usize| {
if d.is_valid() {
if !mr.is_block(level) {
// Cannot split a live block mapping
return Err(());
}
// Get the new flags and output address for this descriptor by applying
// the updater function to a copy
let (flags, oa) = {
let mut dd = *d;
updater(mr, &mut dd, level)?;
(dd.flags().ok_or(())?, dd.output_address())
};
if !flags.contains(Attributes::VALID) {
// Removing the valid bit is always ok
return Ok(());
}
if oa != d.output_address() {
// Cannot change output address on a live mapping
return Err(());
}
let desc_flags = d.flags().unwrap();
if (desc_flags ^ flags).intersects(Attributes::NORMAL) {
// Cannot change memory type
return Err(());
}
if (desc_flags - flags).contains(Attributes::NON_GLOBAL) {
// Cannot convert from non-global to global
return Err(());
}
}
Ok(())
},
)
.map_err(|_| MapError::BreakBeforeMakeViolation(range.clone()))?;
Ok(())
}
/// Maps the given range of virtual addresses to the corresponding range of physical addresses
/// starting at `pa`, with the given flags, taking the given constraints into account.
///
/// This should generally only be called while the page table is not active. In particular, any
/// change that may require break-before-make per the architecture must be made while the page
/// table is inactive. Mapping a previously unmapped memory range may be done while the page
/// table is active. This function writes block and page entries, but only maps them if `flags`
/// contains `Attributes::VALID`, otherwise the entries remain invalid.
///
/// # Errors
///
/// Returns [`MapError::RegionBackwards`] if the range is backwards.
///
/// Returns [`MapError::AddressRange`] if the largest address in the `range` is greater than the
/// largest virtual address covered by the page table given its root level.
///
/// Returns [`MapError::InvalidFlags`] if the `flags` argument has unsupported attributes set.
///
/// Returns [`MapError::BreakBeforeMakeViolation'] if the range intersects with live mappings,
/// and modifying those would violate architectural break-before-make (BBM) requirements.
pub fn map_range(
&mut self,
range: &MemoryRegion,
pa: PhysicalAddress,
flags: Attributes,
constraints: Constraints,
) -> Result<(), MapError> {
if self.active() {
let c = |mr: &MemoryRegion, d: &mut Descriptor, lvl: usize| {
let mask = !(paging::granularity_at_level(lvl) - 1);
let pa = (mr.start() - range.start() + pa.0) & mask;
d.set(PhysicalAddress(pa), flags);
Ok(())
};
self.check_range_bbm(range, &c)?;
}
self.root.map_range(range, pa, flags, constraints)?;
#[cfg(target_arch = "aarch64")]
// SAFETY: Safe because this is just a memory barrier.
unsafe {
asm!("dsb ishst");
}
Ok(())
}
/// Applies the provided updater function to a number of PTEs corresponding to a given memory range.
///
/// This may involve splitting block entries if the provided range is not currently mapped
/// down to its precise boundaries. For visiting all the descriptors covering a memory range
/// without potential splitting (and no descriptor updates), use
/// [`walk_range`](Self::walk_range) instead.
///
/// This should generally only be called while the page table is not active. In particular, any
/// change that may require break-before-make per the architecture must be made while the page
/// table is inactive. Mapping a previously unmapped memory range may be done while the page
/// table is active.
///
/// # Errors
///
/// Returns [`MapError::PteUpdateFault`] if the updater function returns an error.
///
/// Returns [`MapError::RegionBackwards`] if the range is backwards.
///
/// Returns [`MapError::AddressRange`] if the largest address in the `range` is greater than the
/// largest virtual address covered by the page table given its root level.
///
/// Returns [`MapError::BreakBeforeMakeViolation'] if the range intersects with live mappings,
/// and modifying those would violate architectural break-before-make (BBM) requirements.
pub fn modify_range<F>(&mut self, range: &MemoryRegion, f: &F) -> Result<(), MapError>
where
F: Fn(&MemoryRegion, &mut Descriptor, usize) -> Result<(), ()> + ?Sized,
{
if self.active() {
self.check_range_bbm(range, f)?;
}
self.root.modify_range(range, f)?;
#[cfg(target_arch = "aarch64")]
// SAFETY: Safe because this is just a memory barrier.
unsafe {
asm!("dsb ishst");
}
Ok(())
}
/// Applies the provided function to a number of PTEs corresponding to a given memory range.
///
/// The virtual address range passed to the callback function may be expanded compared to the
/// `range` parameter, due to alignment to block boundaries.
///
/// # Errors
///
/// Returns [`MapError::PteUpdateFault`] if the callback function returns an error.
///
/// Returns [`MapError::RegionBackwards`] if the range is backwards.
///
/// Returns [`MapError::AddressRange`] if the largest address in the `range` is greater than the
/// largest virtual address covered by the page table given its root level.
pub fn walk_range<F>(&self, range: &MemoryRegion, f: &mut F) -> Result<(), MapError>
where
F: FnMut(&MemoryRegion, &Descriptor, usize) -> Result<(), ()>,
{
self.root.walk_range(range, f)
}
}
impl<T: Translation + Clone> Drop for Mapping<T> {
fn drop(&mut self) {
if self.previous_ttbr.is_some() {
#[cfg(target_arch = "aarch64")]
// SAFETY: When activate was called the caller promised that they wouldn't drop the page
// table until its mappings were no longer needed.
unsafe {
self.deactivate();
}
}
}
}