aarch64_paging/lib.rs
1// Copyright 2022 The aarch64-paging Authors.
2// This project is dual-licensed under Apache 2.0 and MIT terms.
3// See LICENSE-APACHE and LICENSE-MIT for details.
4
5//! A library to manipulate AArch64 VMSA page tables.
6//!
7//! Currently it only supports:
8//! - stage 1 page tables
9//! - 4 KiB pages
10//! - EL3, NS-EL2, NS-EL2&0 and NS-EL1&0 translation regimes
11//!
12//! Full support is provided for identity mapping ([`IdMap`](idmap::IdMap)) and linear mapping
13//! ([`LinearMap`](linearmap::LinearMap)). If you want to use a different mapping scheme, you must
14//! provide an implementation of the [`Translation`] trait and then use [`Mapping`] directly.
15//!
16//! # Example
17//!
18//! ```no_run
19//! # #[cfg(feature = "alloc")] {
20//! use aarch64_paging::{
21//! idmap::IdMap,
22//! descriptor::Attributes,
23//! paging::{MemoryRegion, TranslationRegime},
24//! };
25//!
26//! const ASID: usize = 1;
27//! const ROOT_LEVEL: usize = 1;
28//! const NORMAL_CACHEABLE: Attributes = Attributes::ATTRIBUTE_INDEX_1.union(Attributes::INNER_SHAREABLE);
29//!
30//! // Create a new EL1 page table with identity mapping.
31//! let mut idmap = IdMap::new(ASID, ROOT_LEVEL, TranslationRegime::El1And0);
32//! // Map a 2 MiB region of memory as read-write.
33//! idmap.map_range(
34//! &MemoryRegion::new(0x80200000, 0x80400000),
35//! NORMAL_CACHEABLE | Attributes::NON_GLOBAL | Attributes::VALID | Attributes::ACCESSED,
36//! ).unwrap();
37//! // SAFETY: Everything the program uses is within the 2 MiB region mapped above.
38//! unsafe {
39//! // Set `TTBR0_EL1` to activate the page table.
40//! idmap.activate();
41//! }
42//! # }
43//! ```
44
45#![no_std]
46#![deny(clippy::undocumented_unsafe_blocks)]
47#![deny(unsafe_op_in_unsafe_fn)]
48
49pub mod descriptor;
50#[cfg(feature = "alloc")]
51pub mod idmap;
52#[cfg(feature = "alloc")]
53pub mod linearmap;
54pub mod mair;
55pub mod paging;
56#[cfg(feature = "alloc")]
57pub mod target;
58
59#[cfg(any(test, feature = "alloc"))]
60extern crate alloc;
61
62#[cfg(target_arch = "aarch64")]
63use core::arch::asm;
64use core::sync::atomic::{AtomicUsize, Ordering};
65use descriptor::{
66 Attributes, Descriptor, DescriptorBits, PhysicalAddress, UpdatableDescriptor, VirtualAddress,
67};
68use paging::{Constraints, MemoryRegion, RootTable, Translation, TranslationRegime, VaRange};
69use thiserror::Error;
70
71/// An error attempting to map some range in the page table.
72#[derive(Clone, Debug, Eq, Error, PartialEq)]
73pub enum MapError {
74 /// The address requested to be mapped was out of the range supported by the page table
75 /// configuration.
76 #[error("Virtual address {0} out of range")]
77 AddressRange(VirtualAddress),
78 /// The address requested to be mapped was not valid for the mapping in use.
79 #[error("Invalid virtual address {0} for mapping")]
80 InvalidVirtualAddress(VirtualAddress),
81 /// The end of the memory region is before the start.
82 #[error("End of memory region {0} is before start.")]
83 RegionBackwards(MemoryRegion),
84 /// There was an error while updating a page table entry.
85 #[error("Error updating page table entry {0:?}")]
86 PteUpdateFault(DescriptorBits),
87 /// The requested flags are not supported for this mapping
88 #[error("Flags {0:?} unsupported for mapping.")]
89 InvalidFlags(Attributes),
90 /// Updating the range violates break-before-make rules and the mapping is live
91 #[error("Cannot remap region {0} while translation is live.")]
92 BreakBeforeMakeViolation(MemoryRegion),
93}
94
95/// Manages a level 1 page table and associated state.
96///
97/// Mappings should be added with [`map_range`](Self::map_range) before calling
98/// [`activate`](Self::activate) to start using the new page table. To make changes which may
99/// require break-before-make semantics you must first call [`deactivate`](Self::deactivate) to
100/// switch back to a previous static page table, and then `activate` again after making the desired
101/// changes.
102#[derive(Debug)]
103pub struct Mapping<T: Translation> {
104 root: RootTable<T>,
105 asid: usize,
106 active_count: AtomicUsize,
107}
108
109/// Issues an inner-shareable data synchronization barrier (DSB) followed by an instruction
110/// synchronization barrier (ISB) so that execution does not proceed until all TLB maintenance is
111/// completed.
112fn wait_for_tlb_maintenance() {
113 // SAFETY: Barriers have no side effects that are observeable by the program
114 #[cfg(target_arch = "aarch64")]
115 unsafe {
116 asm!("dsb ish", "isb", options(preserves_flags, nostack));
117 }
118}
119
120impl<T: Translation> Mapping<T> {
121 /// Creates a new page table with the given ASID, root level and translation mapping.
122 pub fn new(
123 translation: T,
124 asid: usize,
125 rootlevel: usize,
126 translation_regime: TranslationRegime,
127 va_range: VaRange,
128 ) -> Self {
129 if !translation_regime.supports_asid() && asid != 0 {
130 panic!("{:?} doesn't support ASID, must be 0.", translation_regime);
131 }
132 Self {
133 root: RootTable::new(translation, rootlevel, translation_regime, va_range),
134 asid,
135 active_count: AtomicUsize::new(0),
136 }
137 }
138
139 /// Returns a reference to the translation used for this page table.
140 pub fn translation(&self) -> &T {
141 self.root.translation()
142 }
143
144 /// Returns whether this mapping is currently active.
145 pub fn active(&self) -> bool {
146 self.active_count.load(Ordering::Acquire) != 0
147 }
148
149 /// Returns the size in bytes of the virtual address space which can be mapped in this page
150 /// table.
151 ///
152 /// This is a function of the chosen root level.
153 pub fn size(&self) -> usize {
154 self.root.size()
155 }
156
157 /// Activates the page table by programming the physical address of the root page table into
158 /// `TTBRn_ELx`, along with the provided ASID. The previous value of `TTBRn_ELx` is returned so
159 /// that it may later be restored by passing it to [`deactivate`](Self::deactivate).
160 ///
161 /// In test builds or builds that do not target aarch64, the `TTBRn_ELx` access is omitted.
162 ///
163 /// # Safety
164 ///
165 /// The caller must ensure that the page table doesn't unmap any memory which the program is
166 /// using, or introduce aliases which break Rust's aliasing rules. The page table must not be
167 /// dropped while it is still active on any CPU.
168 pub unsafe fn activate(&self) -> usize {
169 #[allow(unused_mut, unused_assignments)]
170 let mut previous_ttbr = usize::MAX;
171
172 // Mark the page tables as active before actually activating them, to avoid a race
173 // condition where a CPU observing the counter at zero might assume that the page tables
174 // are not active yet, while they have already been loaded into the TTBR of another CPU.
175 self.mark_active();
176
177 #[cfg(all(not(test), target_arch = "aarch64"))]
178 // SAFETY: We trust that self.root_address() returns a valid physical address of a page
179 // table, and the `Drop` implementation will reset `TTBRn_ELx` before it becomes invalid.
180 unsafe {
181 // Ensure that all page table updates, as well as the increment of the active counter,
182 // are visible to all observers before proceeding
183 asm!("dmb ishst", "isb", options(preserves_flags),);
184 match (self.root.translation_regime(), self.root.va_range()) {
185 (TranslationRegime::El1And0, VaRange::Lower) => asm!(
186 "mrs {previous_ttbr}, ttbr0_el1",
187 "msr ttbr0_el1, {ttbrval}",
188 "isb",
189 ttbrval = in(reg) self.root_address().0 | (self.asid << 48),
190 previous_ttbr = out(reg) previous_ttbr,
191 options(preserves_flags),
192 ),
193 (TranslationRegime::El1And0, VaRange::Upper) => asm!(
194 "mrs {previous_ttbr}, ttbr1_el1",
195 "msr ttbr1_el1, {ttbrval}",
196 "isb",
197 ttbrval = in(reg) self.root_address().0 | (self.asid << 48),
198 previous_ttbr = out(reg) previous_ttbr,
199 options(preserves_flags),
200 ),
201 (TranslationRegime::El2And0, VaRange::Lower) => asm!(
202 "mrs {previous_ttbr}, ttbr0_el2",
203 "msr ttbr0_el2, {ttbrval}",
204 "isb",
205 ttbrval = in(reg) self.root_address().0 | (self.asid << 48),
206 previous_ttbr = out(reg) previous_ttbr,
207 options(preserves_flags),
208 ),
209 (TranslationRegime::El2And0, VaRange::Upper) => asm!(
210 "mrs {previous_ttbr}, s3_4_c2_c0_1", // ttbr1_el2
211 "msr s3_4_c2_c0_1, {ttbrval}",
212 "isb",
213 ttbrval = in(reg) self.root_address().0 | (self.asid << 48),
214 previous_ttbr = out(reg) previous_ttbr,
215 options(preserves_flags),
216 ),
217 (TranslationRegime::El2, VaRange::Lower) => asm!(
218 "mrs {previous_ttbr}, ttbr0_el2",
219 "msr ttbr0_el2, {ttbrval}",
220 "isb",
221 ttbrval = in(reg) self.root_address().0,
222 previous_ttbr = out(reg) previous_ttbr,
223 options(preserves_flags),
224 ),
225 (TranslationRegime::El3, VaRange::Lower) => asm!(
226 "mrs {previous_ttbr}, ttbr0_el3",
227 "msr ttbr0_el3, {ttbrval}",
228 "isb",
229 ttbrval = in(reg) self.root_address().0,
230 previous_ttbr = out(reg) previous_ttbr,
231 options(preserves_flags),
232 ),
233 _ => {
234 panic!("Invalid combination of exception level and VA range.");
235 }
236 }
237 }
238 previous_ttbr
239 }
240
241 /// Deactivates the page table, by setting `TTBRn_ELx` to the provided value, and invalidating
242 /// the TLB for this page table's configured ASID. The provided TTBR value should be the value
243 /// returned by the preceding [`activate`](Self::activate) call.
244 ///
245 /// In test builds or builds that do not target aarch64, the `TTBRn_ELx` access is omitted.
246 ///
247 /// # Safety
248 ///
249 /// The caller must ensure that the previous page table which this is switching back to doesn't
250 /// unmap any memory which the program is using.
251 pub unsafe fn deactivate(&self, #[allow(unused)] previous_ttbr: usize) {
252 assert!(self.active());
253
254 #[cfg(all(not(test), target_arch = "aarch64"))]
255 // SAFETY: This just restores the previously saved value of `TTBRn_ELx`, which must have
256 // been valid.
257 unsafe {
258 match (self.root.translation_regime(), self.root.va_range()) {
259 (TranslationRegime::El1And0, VaRange::Lower) => asm!(
260 "msr ttbr0_el1, {ttbrval}",
261 "isb",
262 "tlbi aside1, {asid}",
263 "dsb nsh",
264 "isb",
265 asid = in(reg) self.asid << 48,
266 ttbrval = in(reg) previous_ttbr,
267 options(preserves_flags),
268 ),
269 (TranslationRegime::El1And0, VaRange::Upper) => asm!(
270 "msr ttbr1_el1, {ttbrval}",
271 "isb",
272 "tlbi aside1, {asid}",
273 "dsb nsh",
274 "isb",
275 asid = in(reg) self.asid << 48,
276 ttbrval = in(reg) previous_ttbr,
277 options(preserves_flags),
278 ),
279 (TranslationRegime::El2And0, VaRange::Lower) => asm!(
280 "msr ttbr0_el2, {ttbrval}",
281 "isb",
282 "tlbi aside1, {asid}",
283 "dsb nsh",
284 "isb",
285 asid = in(reg) self.asid << 48,
286 ttbrval = in(reg) previous_ttbr,
287 options(preserves_flags),
288 ),
289 (TranslationRegime::El2And0, VaRange::Upper) => asm!(
290 "msr s3_4_c2_c0_1, {ttbrval}", // ttbr1_el2
291 "isb",
292 "tlbi aside1, {asid}",
293 "dsb nsh",
294 "isb",
295 asid = in(reg) self.asid << 48,
296 ttbrval = in(reg) previous_ttbr,
297 options(preserves_flags),
298 ),
299 (TranslationRegime::El2, VaRange::Lower) => {
300 panic!("EL2 page table can't safety be deactivated.");
301 }
302 (TranslationRegime::El3, VaRange::Lower) => {
303 panic!("EL3 page table can't safety be deactivated.");
304 }
305 _ => {
306 panic!("Invalid combination of exception level and VA range.");
307 }
308 }
309 }
310 self.mark_inactive();
311 }
312
313 /// Checks whether the given range can be mapped or updated while the translation is live,
314 /// without violating architectural break-before-make (BBM) requirements.
315 fn check_range_bbm<F>(&self, range: &MemoryRegion, updater: &F) -> Result<(), MapError>
316 where
317 F: Fn(&MemoryRegion, &mut UpdatableDescriptor) -> Result<(), ()> + ?Sized,
318 {
319 self.root.visit_range(
320 range,
321 &mut |mr: &MemoryRegion, d: &Descriptor, level: usize| {
322 let err = MapError::BreakBeforeMakeViolation(mr.clone());
323 let mut desc = UpdatableDescriptor::clone_from(d, level);
324
325 updater(mr, &mut desc)
326 .and_then(|_| {
327 if d.is_valid() && !mr.is_block(level) && d.flags() != desc.flags() {
328 // Cannot split a live block mapping
329 Err(())
330 } else {
331 Ok(())
332 }
333 })
334 .or(Err(err))
335 },
336 )
337 }
338
339 /// Invalidates `range` in the TLBs, so that permission changes are guaranteed to have taken
340 /// effect by the time the function returns
341 fn invalidate_range(&self, range: &MemoryRegion) {
342 if self.active() {
343 // If the mapping is active, no modifications are permitted that add or remove paging
344 // levels. This means it is not necessary to iterate over the entire range at page
345 // granularity, as invalidating a 2MiB block mapping or larger only requires a single
346 // TLBI call.
347 // If the mapping is not active, it was either never activated, or has previously been
348 // deactivated, at which point TLB invalidation would have occurred, and so no TLB
349 // maintenance is needed.
350 self.root
351 .visit_range(range, &mut |mr: &MemoryRegion, _: &Descriptor, _: usize| {
352 Ok(self.root.translation_regime().invalidate_va(mr.start()))
353 })
354 .unwrap();
355
356 wait_for_tlb_maintenance();
357 }
358 }
359
360 /// Maps the given range of virtual addresses to the corresponding range of physical addresses
361 /// starting at `pa`, with the given flags, taking the given constraints into account.
362 ///
363 /// To unmap a range, pass `flags` which don't contain the `Attributes::VALID` bit. In this case
364 /// the `pa` is ignored.
365 ///
366 /// This should generally only be called while the page table is not active. In particular, any
367 /// change that may require break-before-make per the architecture must be made while the page
368 /// table is inactive. Mapping a previously unmapped memory range may be done while the page
369 /// table is active. This function writes block and page entries, but only maps them if `flags`
370 /// contains `Attributes::VALID`, otherwise the entries remain invalid.
371 ///
372 /// # Errors
373 ///
374 /// Returns [`MapError::RegionBackwards`] if the range is backwards.
375 ///
376 /// Returns [`MapError::AddressRange`] if the largest address in the `range` is greater than the
377 /// largest virtual address covered by the page table given its root level.
378 ///
379 /// Returns [`MapError::InvalidFlags`] if the `flags` argument has unsupported attributes set.
380 ///
381 /// Returns [`MapError::BreakBeforeMakeViolation`] if the range intersects with live mappings,
382 /// and modifying those would violate architectural break-before-make (BBM) requirements.
383 pub fn map_range(
384 &mut self,
385 range: &MemoryRegion,
386 pa: PhysicalAddress,
387 flags: Attributes,
388 constraints: Constraints,
389 ) -> Result<(), MapError> {
390 if self.active() {
391 let c = |mr: &MemoryRegion, d: &mut UpdatableDescriptor| {
392 let mask = !(paging::granularity_at_level(d.level()) - 1);
393 let pa = (mr.start() - range.start() + pa.0) & mask;
394 let flags = if d.level() == 3 {
395 flags | Attributes::TABLE_OR_PAGE
396 } else {
397 flags
398 };
399 d.set(PhysicalAddress(pa), flags)
400 };
401 self.check_range_bbm(range, &c)?;
402 }
403 self.root.map_range(range, pa, flags, constraints)?;
404 self.invalidate_range(range);
405 Ok(())
406 }
407
408 /// Applies the provided updater function to a number of PTEs corresponding to a given memory range.
409 ///
410 /// This may involve splitting block entries if the provided range is not currently mapped
411 /// down to its precise boundaries. For visiting all the descriptors covering a memory range
412 /// without potential splitting (and no descriptor updates), use
413 /// [`walk_range`](Self::walk_range) instead.
414 ///
415 /// The updater function receives the following arguments:
416 ///
417 /// - The virtual address range mapped by each page table descriptor. A new descriptor will
418 /// have been allocated before the invocation of the updater function if a page table split
419 /// was needed.
420 /// - An `UpdatableDescriptor`, which includes a mutable reference to the page table descriptor
421 /// that permits modifications and the level of a translation table the descriptor belongs to.
422 ///
423 /// The updater function should return:
424 ///
425 /// - `Ok` to continue updating the remaining entries.
426 /// - `Err` to signal an error and stop updating the remaining entries.
427 ///
428 /// This should generally only be called while the page table is not active. In particular, any
429 /// change that may require break-before-make per the architecture must be made while the page
430 /// table is inactive. Mapping a previously unmapped memory range may be done while the page
431 /// table is active.
432 ///
433 /// # Errors
434 ///
435 /// Returns [`MapError::PteUpdateFault`] if the updater function returns an error.
436 ///
437 /// Returns [`MapError::RegionBackwards`] if the range is backwards.
438 ///
439 /// Returns [`MapError::AddressRange`] if the largest address in the `range` is greater than the
440 /// largest virtual address covered by the page table given its root level.
441 ///
442 /// Returns [`MapError::BreakBeforeMakeViolation`] if the range intersects with live mappings,
443 /// and modifying those would violate architectural break-before-make (BBM) requirements.
444 pub fn modify_range<F>(&mut self, range: &MemoryRegion, f: &F) -> Result<(), MapError>
445 where
446 F: Fn(&MemoryRegion, &mut UpdatableDescriptor) -> Result<(), ()> + ?Sized,
447 {
448 if self.active() {
449 self.check_range_bbm(range, f)?;
450 }
451
452 // modify_range() might fail halfway, in which case its Err() result will be returned
453 // directly, and no barrier will be issued. The purpose of the barrier is to ensure that
454 // the new state is visible to all observers before proceeding, but in case of a failure,
455 // what that new state entails is uncertain anyway, and so there is no point in
456 // synchronizing it.
457 if self.root.modify_range(range, f, self.active())? && self.active() {
458 wait_for_tlb_maintenance();
459 }
460 Ok(())
461 }
462
463 /// Applies the provided function to a number of PTEs corresponding to a given memory range.
464 ///
465 /// The virtual address range passed to the callback function may be expanded compared to the
466 /// `range` parameter, due to alignment to block boundaries.
467 ///
468 /// # Errors
469 ///
470 /// Returns [`MapError::PteUpdateFault`] if the callback function returns an error.
471 ///
472 /// Returns [`MapError::RegionBackwards`] if the range is backwards.
473 ///
474 /// Returns [`MapError::AddressRange`] if the largest address in the `range` is greater than the
475 /// largest virtual address covered by the page table given its root level.
476 pub fn walk_range<F>(&self, range: &MemoryRegion, f: &mut F) -> Result<(), MapError>
477 where
478 F: FnMut(&MemoryRegion, &Descriptor, usize) -> Result<(), ()>,
479 {
480 self.root.walk_range(range, f)
481 }
482
483 /// Looks for subtables whose entries are all empty and replaces them with a single empty entry,
484 /// freeing the subtable.
485 ///
486 /// This requires walking the whole hierarchy of pagetables, so you may not want to call it
487 /// every time a region is unmapped. You could instead call it when the system is under memory
488 /// pressure.
489 pub fn compact_subtables(&mut self) {
490 self.root.compact_subtables();
491 }
492
493 /// Returns the physical address of the root table.
494 ///
495 /// This may be used to activate the page table by setting the appropriate TTBRn_ELx if you wish
496 /// to do so yourself rather than by calling [`activate`](Self::activate). Make sure to call
497 /// [`mark_active`](Self::mark_active) after doing so.
498 pub fn root_address(&self) -> PhysicalAddress {
499 self.root.to_physical()
500 }
501
502 /// Returns the ASID of the page table.
503 pub fn asid(&self) -> usize {
504 self.asid
505 }
506
507 /// Marks the page table as active.
508 ///
509 /// This should be called if the page table is manually activated by calling
510 /// [`root_address`](Self::root_address) and setting some TTBR with it. This will cause
511 /// [`map_range`](Self::map_range) and [`modify_range`](Self::modify_range) to perform extra
512 /// checks to avoid violating break-before-make requirements.
513 ///
514 /// It is called automatically by [`activate`](Self::activate).
515 pub fn mark_active(&self) {
516 self.active_count.fetch_add(1, Ordering::Release);
517 }
518
519 /// Marks the page table as inactive.
520 ///
521 /// This may be called after manually disabling the use of the page table, such as by setting
522 /// the relevant TTBR to a different address.
523 ///
524 /// It is called automatically by [`deactivate`](Self::deactivate).
525 pub fn mark_inactive(&self) {
526 let l = self.active_count.fetch_sub(1, Ordering::Release);
527 if l == 0 {
528 // If the old value was 0, the new value underflowed
529 panic!("Underflow in active count.");
530 }
531 }
532}
533
534impl<T: Translation> Drop for Mapping<T> {
535 fn drop(&mut self) {
536 if self.active() {
537 panic!("Dropping active page table mapping!");
538 }
539 }
540}
541
542#[cfg(test)]
543mod tests {
544 #[cfg(feature = "alloc")]
545 use self::idmap::IdTranslation;
546 #[cfg(feature = "alloc")]
547 use super::*;
548
549 #[cfg(feature = "alloc")]
550 #[test]
551 #[should_panic]
552 fn no_el2_asid() {
553 Mapping::new(IdTranslation, 1, 1, TranslationRegime::El2, VaRange::Lower);
554 }
555
556 #[cfg(feature = "alloc")]
557 #[test]
558 #[should_panic]
559 fn no_el3_asid() {
560 Mapping::new(IdTranslation, 1, 1, TranslationRegime::El3, VaRange::Lower);
561 }
562}