Skip to main content

aarch64_paging/
lib.rs

1// Copyright 2022 The aarch64-paging Authors.
2// This project is dual-licensed under Apache 2.0 and MIT terms.
3// See LICENSE-APACHE and LICENSE-MIT for details.
4
5//! A library to manipulate AArch64 VMSA page tables.
6//!
7//! Currently it supports:
8//!   - stage 1 and stage 2 page tables
9//!   - 4 KiB pages
10//!   - EL3, NS-EL2, NS-EL2&0 and NS-EL1&0 translation regimes
11//!   - 64-bit descriptors
12//!
13//! Full support is provided for identity mapping ([`IdMap`](idmap::IdMap)) and linear mapping
14//! ([`LinearMap`](linearmap::LinearMap)). If you want to use a different mapping scheme, you must
15//! provide an implementation of the [`Translation`] trait and then use [`Mapping`] directly.
16//!
17//! # Example
18//!
19//! ```no_run
20//! # #[cfg(feature = "alloc")] {
21//! use aarch64_paging::{
22//!     idmap::IdMap,
23//!     descriptor::El1Attributes,
24//!     paging::{MemoryRegion, El1And0},
25//! };
26//!
27//! const ASID: usize = 1;
28//! const ROOT_LEVEL: usize = 1;
29//! const NORMAL_CACHEABLE: El1Attributes = El1Attributes::ATTRIBUTE_INDEX_1.union(El1Attributes::INNER_SHAREABLE);
30//!
31//! // Create a new EL1 page table with identity mapping.
32//! let mut idmap = IdMap::with_asid(ASID, ROOT_LEVEL, El1And0);
33//! // Map a 2 MiB region of memory as read-write.
34//! idmap.map_range(
35//!     &MemoryRegion::new(0x80200000, 0x80400000),
36//!     NORMAL_CACHEABLE | El1Attributes::NON_GLOBAL | El1Attributes::VALID | El1Attributes::ACCESSED,
37//! ).unwrap();
38//! // SAFETY: Everything the program uses is within the 2 MiB region mapped above.
39//! unsafe {
40//!     // Set `TTBR0_EL1` to activate the page table.
41//!     idmap.activate();
42//! }
43//! # }
44//! ```
45
46#![no_std]
47#![deny(clippy::undocumented_unsafe_blocks)]
48#![deny(unsafe_op_in_unsafe_fn)]
49#![cfg_attr(docsrs, feature(doc_cfg))]
50
51pub mod descriptor;
52#[cfg(feature = "alloc")]
53pub mod idmap;
54#[cfg(feature = "alloc")]
55pub mod linearmap;
56pub mod mair;
57pub mod paging;
58#[cfg(feature = "alloc")]
59pub mod target;
60
61#[cfg(any(test, feature = "alloc"))]
62extern crate alloc;
63
64#[cfg(target_arch = "aarch64")]
65use core::arch::asm;
66use core::sync::atomic::{AtomicUsize, Ordering};
67use descriptor::{
68    Descriptor, DescriptorBits, PagingAttributes, PhysicalAddress, UpdatableDescriptor,
69    VirtualAddress,
70};
71use paging::{Constraints, MemoryRegion, RootTable, Translation, TranslationRegime, VaRange};
72use thiserror::Error;
73
74/// An error attempting to map some range in the page table.
75#[derive(Clone, Debug, Eq, Error, PartialEq)]
76pub enum MapError {
77    /// The address requested to be mapped was out of the range supported by the page table
78    /// configuration.
79    #[error("Virtual address {0} out of range")]
80    AddressRange(VirtualAddress),
81    /// The address requested to be mapped was not valid for the mapping in use.
82    #[error("Invalid virtual address {0} for mapping")]
83    InvalidVirtualAddress(VirtualAddress),
84    /// The end of the memory region is before the start.
85    #[error("End of memory region {0} is before start.")]
86    RegionBackwards(MemoryRegion),
87    /// There was an error while updating a page table entry.
88    #[error("Error updating page table entry {0:?}")]
89    PteUpdateFault(DescriptorBits),
90    /// The requested flags are not supported for this mapping
91    #[error("Flags {0:#x} unsupported for mapping.")]
92    InvalidFlags(usize),
93    /// Updating the range violates break-before-make rules and the mapping is live
94    #[error("Cannot remap region {0} while translation is live.")]
95    BreakBeforeMakeViolation(MemoryRegion),
96}
97
98/// Manages a level 1 page table and associated state.
99///
100/// Mappings should be added with [`map_range`](Self::map_range) before calling
101/// [`activate`](Self::activate) to start using the new page table. To make changes which may
102/// require break-before-make semantics you must first call [`deactivate`](Self::deactivate) to
103/// switch back to a previous static page table, and then `activate` again after making the desired
104/// changes.
105#[derive(Debug)]
106pub struct Mapping<T: Translation<R::Attributes>, R: TranslationRegime> {
107    root: RootTable<R, T>,
108    asid: R::Asid,
109    active_count: AtomicUsize,
110}
111
112/// Issues an inner-shareable data synchronization barrier (DSB) followed by an instruction
113/// synchronization barrier (ISB) so that execution does not proceed until all TLB maintenance is
114/// completed.
115fn wait_for_tlb_maintenance() {
116    // SAFETY: Barriers have no side effects that are observeable by the program
117    #[cfg(target_arch = "aarch64")]
118    unsafe {
119        asm!("dsb ish", "isb", options(preserves_flags, nostack));
120    }
121}
122
123impl<T: Translation<R::Attributes>, R: TranslationRegime<Asid = (), VaRange = ()>> Mapping<T, R> {
124    /// Creates a new page table with the given root level and translation mapping.
125    pub fn new(translation: T, rootlevel: usize, regime: R) -> Self {
126        Self::new_impl(RootTable::new(translation, rootlevel, regime), ())
127    }
128}
129
130impl<T: Translation<R::Attributes>, R: TranslationRegime<Asid = usize, VaRange = VaRange>>
131    Mapping<T, R>
132{
133    /// Creates a new page table with the given ASID, root level and translation mapping.
134    pub fn with_asid_and_va_range(
135        translation: T,
136        asid: usize,
137        rootlevel: usize,
138        regime: R,
139        va_range: VaRange,
140    ) -> Self {
141        Self::new_impl(
142            RootTable::with_va_range(translation, rootlevel, regime, va_range),
143            asid,
144        )
145    }
146}
147
148impl<T: Translation<R::Attributes>, R: TranslationRegime> Mapping<T, R> {
149    fn new_impl(root: RootTable<R, T>, asid: R::Asid) -> Self {
150        Self {
151            root,
152            asid,
153            active_count: AtomicUsize::new(0),
154        }
155    }
156
157    /// Returns a reference to the translation used for this page table.
158    pub fn translation(&self) -> &T {
159        self.root.translation()
160    }
161
162    /// Returns whether this mapping is currently active.
163    pub fn active(&self) -> bool {
164        self.active_count.load(Ordering::Acquire) != 0
165    }
166
167    /// Returns the size in bytes of the virtual address space which can be mapped in this page
168    /// table.
169    ///
170    /// This is a function of the chosen root level.
171    pub fn size(&self) -> usize {
172        self.root.size()
173    }
174
175    /// Activates the page table by programming the physical address of the root page table into
176    /// `TTBRn_ELx`, along with the provided ASID. The previous value of `TTBRn_ELx` is returned so
177    /// that it may later be restored by passing it to [`deactivate`](Self::deactivate).
178    ///
179    /// In test builds or builds that do not target aarch64, the `TTBRn_ELx` access is omitted.
180    ///
181    /// # Safety
182    ///
183    /// The caller must ensure that the page table doesn't unmap any memory which the program is
184    /// using, or introduce aliases which break Rust's aliasing rules. The page table must not be
185    /// dropped while it is still active on any CPU.
186    pub unsafe fn activate(&self) -> usize {
187        #[allow(unused_mut, unused_assignments)]
188        let mut previous_ttbr = usize::MAX;
189
190        // Mark the page tables as active before actually activating them, to avoid a race
191        // condition where a CPU observing the counter at zero might assume that the page tables
192        // are not active yet, while they have already been loaded into the TTBR of another CPU.
193        self.mark_active();
194
195        #[cfg(all(not(test), target_arch = "aarch64"))]
196        // SAFETY: We trust that self.root_address() returns a valid physical address of a page
197        // table, and the `Drop` implementation will reset `TTBRn_ELx` before it becomes invalid.
198        unsafe {
199            // Ensure that all page table updates, as well as the increment of the active counter,
200            // are visible to all observers before proceeding
201            asm!("dmb ishst", "isb", options(preserves_flags),);
202            previous_ttbr =
203                R::activate(self.root_address(), self.asid, self.root.va_range_or_unit());
204        }
205        previous_ttbr
206    }
207
208    /// Deactivates the page table, by setting `TTBRn_ELx` to the provided value, and invalidating
209    /// the TLB for this page table's configured ASID. The provided TTBR value should be the value
210    /// returned by the preceding [`activate`](Self::activate) call.
211    ///
212    /// In test builds or builds that do not target aarch64, the `TTBRn_ELx` access is omitted.
213    ///
214    /// # Safety
215    ///
216    /// The caller must ensure that the previous page table which this is switching back to doesn't
217    /// unmap any memory which the program is using.
218    pub unsafe fn deactivate(&self, #[allow(unused)] previous_ttbr: usize) {
219        assert!(self.active());
220
221        #[cfg(all(not(test), target_arch = "aarch64"))]
222        // SAFETY: This just restores the previously saved value of `TTBRn_ELx`, which must have
223        // been valid.
224        unsafe {
225            R::deactivate(previous_ttbr, self.asid, self.root.va_range_or_unit());
226        }
227        self.mark_inactive();
228    }
229
230    /// Checks whether the given range can be mapped or updated while the translation is live,
231    /// without violating architectural break-before-make (BBM) requirements.
232    fn check_range_bbm<F>(&self, range: &MemoryRegion, updater: &F) -> Result<(), MapError>
233    where
234        F: Fn(&MemoryRegion, &mut UpdatableDescriptor<R::Attributes>) -> Result<(), ()> + ?Sized,
235    {
236        self.root.visit_range(
237            range,
238            &mut |mr: &MemoryRegion, d: &Descriptor<R::Attributes>, level: usize| {
239                let err = MapError::BreakBeforeMakeViolation(mr.clone());
240                let mut desc = UpdatableDescriptor::clone_from(d, level);
241
242                updater(mr, &mut desc)
243                    .and_then(|_| {
244                        if d.is_valid() && !mr.is_block(level) && d.flags() != desc.flags() {
245                            // Cannot split a live block mapping
246                            Err(())
247                        } else {
248                            Ok(())
249                        }
250                    })
251                    .or(Err(err))
252            },
253        )
254    }
255
256    /// Invalidates `range` in the TLBs, so that permission changes are guaranteed to have taken
257    /// effect by the time the function returns
258    fn invalidate_range(&self, range: &MemoryRegion) {
259        if self.active() {
260            // If the mapping is active, no modifications are permitted that add or remove paging
261            // levels. This means it is not necessary to iterate over the entire range at page
262            // granularity, as invalidating a 2MiB block mapping or larger only requires a single
263            // TLBI call.
264            // If the mapping is not active, it was either never activated, or has previously been
265            // deactivated, at which point TLB invalidation would have occurred, and so no TLB
266            // maintenance is needed.
267            self.root
268                .visit_range(
269                    range,
270                    &mut |mr: &MemoryRegion, _: &Descriptor<R::Attributes>, _: usize| {
271                        R::invalidate_va(mr.start());
272                        Ok(())
273                    },
274                )
275                .unwrap();
276
277            wait_for_tlb_maintenance();
278        }
279    }
280
281    /// Maps the given range of virtual addresses to the corresponding range of physical addresses
282    /// starting at `pa`, with the given flags, taking the given constraints into account.
283    ///
284    /// To unmap a range, pass `flags` which don't contain the [`PagingAttributes::VALID`] bit.
285    /// In this case the `pa` is ignored.
286    ///
287    /// This should generally only be called while the page table is not active. In particular, any
288    /// change that may require break-before-make per the architecture must be made while the page
289    /// table is inactive. Mapping a previously unmapped memory range may be done while the page
290    /// table is active. This function writes block and page entries, but only maps them if `flags`
291    /// contains [`PagingAttributes::VALID`], otherwise the entries remain invalid.
292    ///
293    /// # Errors
294    ///
295    /// Returns [`MapError::RegionBackwards`] if the range is backwards.
296    ///
297    /// Returns [`MapError::AddressRange`] if the largest address in the `range` is greater than the
298    /// largest virtual address covered by the page table given its root level.
299    ///
300    /// Returns [`MapError::InvalidFlags`] if the `flags` argument has unsupported attributes set.
301    ///
302    /// Returns [`MapError::BreakBeforeMakeViolation`] if the range intersects with live mappings,
303    /// and modifying those would violate architectural break-before-make (BBM) requirements.
304    pub fn map_range(
305        &mut self,
306        range: &MemoryRegion,
307        pa: PhysicalAddress,
308        flags: R::Attributes,
309        constraints: Constraints,
310    ) -> Result<(), MapError> {
311        if self.active() {
312            let c = |mr: &MemoryRegion, d: &mut UpdatableDescriptor<R::Attributes>| {
313                let mask = !(paging::granularity_at_level(d.level()) - 1);
314                let pa = (mr.start() - range.start() + pa.0) & mask;
315                let flags = if d.level() == 3 {
316                    flags | R::Attributes::TABLE_OR_PAGE
317                } else {
318                    flags
319                };
320                d.set(PhysicalAddress(pa), flags)
321            };
322            self.check_range_bbm(range, &c)?;
323        }
324        self.root.map_range(range, pa, flags, constraints)?;
325        self.invalidate_range(range);
326        Ok(())
327    }
328
329    /// Applies the provided updater function to a number of PTEs corresponding to a given memory range.
330    ///
331    /// This may involve splitting block entries if the provided range is not currently mapped
332    /// down to its precise boundaries. For visiting all the descriptors covering a memory range
333    /// without potential splitting (and no descriptor updates), use
334    /// [`walk_range`](Self::walk_range) instead.
335    ///
336    /// The updater function receives the following arguments:
337    ///
338    /// - The virtual address range mapped by each page table descriptor. A new descriptor will
339    ///   have been allocated before the invocation of the updater function if a page table split
340    ///   was needed.
341    /// - An `UpdatableDescriptor`, which includes a mutable reference to the page table descriptor
342    ///   that permits modifications and the level of a translation table the descriptor belongs to.
343    ///
344    /// The updater function should return:
345    ///
346    /// - `Ok` to continue updating the remaining entries.
347    /// - `Err` to signal an error and stop updating the remaining entries.
348    ///
349    /// This should generally only be called while the page table is not active. In particular, any
350    /// change that may require break-before-make per the architecture must be made while the page
351    /// table is inactive. Mapping a previously unmapped memory range may be done while the page
352    /// table is active.
353    ///
354    /// # Errors
355    ///
356    /// Returns [`MapError::PteUpdateFault`] if the updater function returns an error.
357    ///
358    /// Returns [`MapError::RegionBackwards`] if the range is backwards.
359    ///
360    /// Returns [`MapError::AddressRange`] if the largest address in the `range` is greater than the
361    /// largest virtual address covered by the page table given its root level.
362    ///
363    /// Returns [`MapError::BreakBeforeMakeViolation`] if the range intersects with live mappings,
364    /// and modifying those would violate architectural break-before-make (BBM) requirements.
365    pub fn modify_range<F>(&mut self, range: &MemoryRegion, f: &F) -> Result<(), MapError>
366    where
367        F: Fn(&MemoryRegion, &mut UpdatableDescriptor<R::Attributes>) -> Result<(), ()> + ?Sized,
368    {
369        if self.active() {
370            self.check_range_bbm(range, f)?;
371        }
372
373        // modify_range() might fail halfway, in which case its Err() result will be returned
374        // directly, and no barrier will be issued. The purpose of the barrier is to ensure that
375        // the new state is visible to all observers before proceeding, but in case of a failure,
376        // what that new state entails is uncertain anyway, and so there is no point in
377        // synchronizing it.
378        if self.root.modify_range(range, f, self.active())? && self.active() {
379            wait_for_tlb_maintenance();
380        }
381        Ok(())
382    }
383
384    /// Applies the provided function to a number of PTEs corresponding to a given memory range.
385    ///
386    /// The virtual address range passed to the callback function may be expanded compared to the
387    /// `range` parameter, due to alignment to block boundaries.
388    ///
389    /// # Errors
390    ///
391    /// Returns [`MapError::PteUpdateFault`] if the callback function returns an error.
392    ///
393    /// Returns [`MapError::RegionBackwards`] if the range is backwards.
394    ///
395    /// Returns [`MapError::AddressRange`] if the largest address in the `range` is greater than the
396    /// largest virtual address covered by the page table given its root level.
397    pub fn walk_range<F>(&self, range: &MemoryRegion, f: &mut F) -> Result<(), MapError>
398    where
399        F: FnMut(&MemoryRegion, &Descriptor<R::Attributes>, usize) -> Result<(), ()>,
400    {
401        self.root.walk_range(range, f)
402    }
403
404    /// Looks for subtables whose entries are all empty and replaces them with a single empty entry,
405    /// freeing the subtable.
406    ///
407    /// This requires walking the whole hierarchy of pagetables, so you may not want to call it
408    /// every time a region is unmapped. You could instead call it when the system is under memory
409    /// pressure.
410    pub fn compact_subtables(&mut self) {
411        self.root.compact_subtables();
412    }
413
414    /// Returns the physical address of the root table.
415    ///
416    /// This may be used to activate the page table by setting the appropriate TTBRn_ELx if you wish
417    /// to do so yourself rather than by calling [`activate`](Self::activate). Make sure to call
418    /// [`mark_active`](Self::mark_active) after doing so.
419    pub fn root_address(&self) -> PhysicalAddress {
420        self.root.to_physical()
421    }
422
423    /// Returns the ASID of the page table.
424    pub fn asid(&self) -> R::Asid {
425        self.asid
426    }
427
428    /// Marks the page table as active.
429    ///
430    /// This should be called if the page table is manually activated by calling
431    /// [`root_address`](Self::root_address) and setting some TTBR with it. This will cause
432    /// [`map_range`](Self::map_range) and [`modify_range`](Self::modify_range) to perform extra
433    /// checks to avoid violating break-before-make requirements.
434    ///
435    /// It is called automatically by [`activate`](Self::activate).
436    pub fn mark_active(&self) {
437        self.active_count.fetch_add(1, Ordering::Release);
438    }
439
440    /// Marks the page table as inactive.
441    ///
442    /// This may be called after manually disabling the use of the page table, such as by setting
443    /// the relevant TTBR to a different address.
444    ///
445    /// It is called automatically by [`deactivate`](Self::deactivate).
446    pub fn mark_inactive(&self) {
447        let l = self.active_count.fetch_sub(1, Ordering::Release);
448        if l == 0 {
449            // If the old value was 0, the new value underflowed
450            panic!("Underflow in active count.");
451        }
452    }
453}
454
455impl<T: Translation<R::Attributes>, R: TranslationRegime> Drop for Mapping<T, R> {
456    fn drop(&mut self) {
457        if self.active() {
458            panic!("Dropping active page table mapping!");
459        }
460    }
461}