Skip to main content

aarch64_paging/
paging.rs

1// Copyright 2022 The aarch64-paging Authors.
2// This project is dual-licensed under Apache 2.0 and MIT terms.
3// See LICENSE-APACHE and LICENSE-MIT for details.
4
5//! Generic aarch64 page table manipulation functionality which doesn't assume anything about how
6//! addresses are mapped.
7
8use crate::MapError;
9use crate::descriptor::{
10    Descriptor, El1Attributes, El23Attributes, PagingAttributes, PhysicalAddress, Stage2Attributes,
11    UpdatableDescriptor, VirtualAddress,
12};
13
14use crate::paging::private::IntoVaRange;
15#[cfg(feature = "alloc")]
16use alloc::alloc::{Layout, alloc_zeroed, dealloc, handle_alloc_error};
17use bitflags::{Flags, bitflags};
18#[cfg(all(not(test), target_arch = "aarch64"))]
19use core::arch::asm;
20use core::fmt::{self, Debug, Display, Formatter};
21use core::marker::PhantomData;
22use core::ops::Range;
23use core::ptr::NonNull;
24
25const PAGE_SHIFT: usize = 12;
26
27/// The pagetable level at which all entries are page mappings.
28pub const LEAF_LEVEL: usize = 3;
29
30/// The page size in bytes assumed by this library, 4 KiB.
31pub const PAGE_SIZE: usize = 1 << PAGE_SHIFT;
32
33/// The number of address bits resolved in one level of page table lookup. This is a function of the
34/// page size.
35pub const BITS_PER_LEVEL: usize = PAGE_SHIFT - 3;
36
37/// Which virtual address range a page table is for, i.e. which TTBR register to use for it.
38#[derive(Copy, Clone, Debug, Eq, PartialEq)]
39pub enum VaRange {
40    /// The page table covers the bottom of the virtual address space (starting at address 0), so
41    /// will be used with `TTBR0`.
42    Lower,
43    /// The page table covers the top of the virtual address space (ending at address
44    /// 0xffff_ffff_ffff_ffff), so will be used with `TTBR1`.
45    Upper,
46}
47
48/// Which translation regime a page table is for.
49///
50/// Note that these methods are not intended to be called directly, but rather through [`crate::Mapping`].
51pub trait TranslationRegime: Copy + Clone + Debug + Eq + PartialEq + Send + Sync + 'static {
52    type Attributes: PagingAttributes;
53
54    /// The type of the ASID, or the unit type (`()`) if the translation regime does not support ASID.
55    type Asid: Copy + Clone + Debug + Eq + PartialEq + Send + Sync + 'static;
56
57    /// The type of the VA range, or the unit type (`()`) if the translation regime does not support the upper VA range.
58    type VaRange: private::IntoVaRange
59        + Copy
60        + Clone
61        + Debug
62        + Eq
63        + PartialEq
64        + Send
65        + Sync
66        + 'static;
67
68    /// Invalidates the translation for the given virtual address from the Translation Lookaside Buffer (TLB).
69    fn invalidate_va(va: VirtualAddress);
70
71    /// Activates the page table.
72    ///
73    /// # Safety
74    ///
75    /// See `Mapping::activate`.
76    unsafe fn activate(
77        root_pa: PhysicalAddress,
78        asid: Self::Asid,
79        va_range: Self::VaRange,
80    ) -> usize;
81
82    /// Deactivates the page table.
83    ///
84    /// # Safety
85    ///
86    /// See `Mapping::deactivate`.
87    unsafe fn deactivate(previous_ttbr: usize, asid: Self::Asid, va_range: Self::VaRange);
88}
89
90mod private {
91    use crate::paging::VaRange;
92
93    pub trait IntoVaRange {
94        fn into_va_range(self) -> VaRange;
95    }
96
97    impl IntoVaRange for VaRange {
98        fn into_va_range(self) -> VaRange {
99            self
100        }
101    }
102
103    impl IntoVaRange for () {
104        fn into_va_range(self) -> VaRange {
105            VaRange::Lower
106        }
107    }
108}
109
110/// Non-secure EL1&0, stage 1 translation regime.
111#[derive(Copy, Clone, Debug, Eq, PartialEq)]
112pub struct El1And0;
113
114impl TranslationRegime for El1And0 {
115    type Attributes = El1Attributes;
116
117    type Asid = usize;
118    type VaRange = VaRange;
119
120    fn invalidate_va(va: VirtualAddress) {
121        #[allow(unused)]
122        let va = va.0 >> 12;
123        #[cfg(all(not(test), target_arch = "aarch64"))]
124        // SAFETY: TLBI maintenance has no side effects that are observeable by the
125        // program
126        unsafe {
127            asm!(
128                "tlbi vaae1is, {va}",
129                va = in(reg) va,
130                options(preserves_flags, nostack),
131            );
132        }
133    }
134
135    #[allow(
136        unused_mut,
137        unused_assignments,
138        unused_variables,
139        reason = "used only on aarch64"
140    )]
141    unsafe fn activate(root_pa: PhysicalAddress, asid: usize, va_range: VaRange) -> usize {
142        let mut previous_ttbr = usize::MAX;
143        #[cfg(all(not(test), target_arch = "aarch64"))]
144        // SAFETY: We trust that _root_pa returns a valid physical address of a page table,
145        // and the `Drop` implementation will reset `TTBRn_ELx` before it becomes invalid.
146        unsafe {
147            match va_range {
148                VaRange::Lower => asm!(
149                    "mrs   {previous_ttbr}, ttbr0_el1",
150                    "msr   ttbr0_el1, {ttbrval}",
151                    "isb",
152                    ttbrval = in(reg) root_pa.0 | (asid << 48),
153                    previous_ttbr = out(reg) previous_ttbr,
154                    options(preserves_flags),
155                ),
156                VaRange::Upper => asm!(
157                    "mrs   {previous_ttbr}, ttbr1_el1",
158                    "msr   ttbr1_el1, {ttbrval}",
159                    "isb",
160                    ttbrval = in(reg) root_pa.0 | (asid << 48),
161                    previous_ttbr = out(reg) previous_ttbr,
162                    options(preserves_flags),
163                ),
164            }
165        }
166        previous_ttbr
167    }
168
169    #[allow(
170        unused_mut,
171        unused_assignments,
172        unused_variables,
173        reason = "used only on aarch64"
174    )]
175    unsafe fn deactivate(previous_ttbr: usize, asid: usize, va_range: VaRange) {
176        #[cfg(all(not(test), target_arch = "aarch64"))]
177        // SAFETY: This just restores the previously saved value of `TTBRn_ELx`, which must have
178        // been valid.
179        unsafe {
180            match va_range {
181                VaRange::Lower => asm!(
182                    "msr   ttbr0_el1, {ttbrval}",
183                    "isb",
184                    "tlbi  aside1, {asid}",
185                    "dsb   nsh",
186                    "isb",
187                    asid = in(reg) asid << 48,
188                    ttbrval = in(reg) previous_ttbr,
189                    options(preserves_flags),
190                ),
191                VaRange::Upper => asm!(
192                    "msr   ttbr1_el1, {ttbrval}",
193                    "isb",
194                    "tlbi  aside1, {asid}",
195                    "dsb   nsh",
196                    "isb",
197                    asid = in(reg) asid << 48,
198                    ttbrval = in(reg) previous_ttbr,
199                    options(preserves_flags),
200                ),
201            }
202        }
203    }
204}
205
206/// Non-secure EL2&0, with VHE translation regime.
207#[derive(Copy, Clone, Debug, Eq, PartialEq)]
208pub struct El2And0;
209
210impl TranslationRegime for El2And0 {
211    type Attributes = El1Attributes;
212
213    type Asid = usize;
214    type VaRange = VaRange;
215
216    fn invalidate_va(va: VirtualAddress) {
217        #[allow(unused)]
218        let va = va.0 >> 12;
219        #[cfg(all(not(test), target_arch = "aarch64"))]
220        // SAFETY: TLBI maintenance has no side effects that are observeable by the
221        // program
222        unsafe {
223            asm!(
224                "tlbi vae2is, {va}",
225                va = in(reg) va,
226                options(preserves_flags, nostack),
227            );
228        }
229    }
230
231    #[allow(
232        unused_mut,
233        unused_assignments,
234        unused_variables,
235        reason = "used only on aarch64"
236    )]
237    unsafe fn activate(root_pa: PhysicalAddress, asid: usize, va_range: VaRange) -> usize {
238        let mut previous_ttbr = usize::MAX;
239        #[cfg(all(not(test), target_arch = "aarch64"))]
240        // SAFETY: We trust that _root_pa returns a valid physical address of a page table,
241        // and the `Drop` implementation will reset `TTBRn_ELx` before it becomes invalid.
242        unsafe {
243            match va_range {
244                VaRange::Lower => asm!(
245                    "mrs   {previous_ttbr}, ttbr0_el2",
246                    "msr   ttbr0_el2, {ttbrval}",
247                    "isb",
248                    ttbrval = in(reg) root_pa.0 | (asid << 48),
249                    previous_ttbr = out(reg) previous_ttbr,
250                    options(preserves_flags),
251                ),
252                VaRange::Upper => asm!(
253                    "mrs   {previous_ttbr}, s3_4_c2_c0_1", // ttbr1_el2
254                    "msr   s3_4_c2_c0_1, {ttbrval}",
255                    "isb",
256                    ttbrval = in(reg) root_pa.0 | (asid << 48),
257                    previous_ttbr = out(reg) previous_ttbr,
258                    options(preserves_flags),
259                ),
260            }
261        }
262        previous_ttbr
263    }
264
265    #[allow(
266        unused_mut,
267        unused_assignments,
268        unused_variables,
269        reason = "used only on aarch64"
270    )]
271    unsafe fn deactivate(previous_ttbr: usize, asid: usize, va_range: VaRange) {
272        #[cfg(all(not(test), target_arch = "aarch64"))]
273        // SAFETY: This just restores the previously saved value of `TTBRn_ELx`, which must have
274        // been valid.
275        unsafe {
276            match va_range {
277                VaRange::Lower => asm!(
278                    "msr   ttbr0_el2, {ttbrval}",
279                    "isb",
280                    "tlbi  aside1, {asid}",
281                    "dsb   nsh",
282                    "isb",
283                    asid = in(reg) asid << 48,
284                    ttbrval = in(reg) previous_ttbr,
285                    options(preserves_flags),
286                ),
287                VaRange::Upper => asm!(
288                    "msr   s3_4_c2_c0_1, {ttbrval}", // ttbr1_el2
289                    "isb",
290                    "tlbi  aside1, {asid}",
291                    "dsb   nsh",
292                    "isb",
293                    asid = in(reg) asid << 48,
294                    ttbrval = in(reg) previous_ttbr,
295                    options(preserves_flags),
296                ),
297            }
298        }
299    }
300}
301
302/// Non-secure EL2 translation regime.
303#[derive(Copy, Clone, Debug, Eq, PartialEq)]
304pub struct El2;
305
306impl TranslationRegime for El2 {
307    type Attributes = El23Attributes;
308
309    type Asid = ();
310    type VaRange = ();
311
312    fn invalidate_va(va: VirtualAddress) {
313        #[allow(unused)]
314        let va = va.0 >> 12;
315        #[cfg(all(not(test), target_arch = "aarch64"))]
316        // SAFETY: TLBI maintenance has no side effects that are observeable by the
317        // program
318        unsafe {
319            asm!(
320                "tlbi vae2is, {va}",
321                va = in(reg) va,
322                options(preserves_flags, nostack),
323            );
324        }
325    }
326
327    #[allow(
328        unused_mut,
329        unused_assignments,
330        unused_variables,
331        reason = "used only on aarch64"
332    )]
333    unsafe fn activate(root_pa: PhysicalAddress, asid: (), va_range: ()) -> usize {
334        let mut previous_ttbr = usize::MAX;
335        #[cfg(all(not(test), target_arch = "aarch64"))]
336        // SAFETY: We trust that _root_pa returns a valid physical address of a page table,
337        // and the `Drop` implementation will reset `TTBRn_ELx` before it becomes invalid.
338        unsafe {
339            asm!(
340                "mrs   {previous_ttbr}, ttbr0_el2",
341                "msr   ttbr0_el2, {ttbrval}",
342                "isb",
343                ttbrval = in(reg) root_pa.0,
344                previous_ttbr = out(reg) previous_ttbr,
345                options(preserves_flags),
346            );
347        }
348        previous_ttbr
349    }
350
351    unsafe fn deactivate(_previous_ttbr: usize, _asid: (), _va_range: ()) {
352        panic!("EL2 page table can't safely be deactivated.");
353    }
354}
355
356/// Secure EL3 translation regime.
357#[derive(Copy, Clone, Debug, Eq, PartialEq)]
358pub struct El3;
359
360impl TranslationRegime for El3 {
361    type Attributes = El23Attributes;
362
363    type Asid = ();
364    type VaRange = ();
365
366    fn invalidate_va(va: VirtualAddress) {
367        #[allow(unused)]
368        let va = va.0 >> 12;
369        #[cfg(all(not(test), target_arch = "aarch64"))]
370        // SAFETY: TLBI maintenance has no side effects that are observeable by the
371        // program
372        unsafe {
373            asm!(
374                "tlbi vae3is, {va}",
375                va = in(reg) va,
376                options(preserves_flags, nostack),
377            );
378        }
379    }
380
381    #[allow(
382        unused_mut,
383        unused_assignments,
384        unused_variables,
385        reason = "used only on aarch64"
386    )]
387    unsafe fn activate(root_pa: PhysicalAddress, asid: (), va_range: ()) -> usize {
388        let mut previous_ttbr = usize::MAX;
389        #[cfg(all(not(test), target_arch = "aarch64"))]
390        // SAFETY: We trust that _root_pa returns a valid physical address of a page table,
391        // and the `Drop` implementation will reset `TTBRn_ELx` before it becomes invalid.
392        unsafe {
393            asm!(
394                "mrs   {previous_ttbr}, ttbr0_el3",
395                "msr   ttbr0_el3, {ttbrval}",
396                "isb",
397                ttbrval = in(reg) root_pa.0,
398                previous_ttbr = out(reg) previous_ttbr,
399                options(preserves_flags),
400            );
401        }
402        previous_ttbr
403    }
404
405    unsafe fn deactivate(_previous_ttbr: usize, _asid: (), _va_range: ()) {
406        panic!("EL3 page table can't safely be deactivated.");
407    }
408}
409
410/// Non-secure Stage 2 translation regime.
411#[derive(Copy, Clone, Debug, Eq, PartialEq)]
412pub struct Stage2;
413
414impl TranslationRegime for Stage2 {
415    type Attributes = Stage2Attributes;
416
417    type Asid = ();
418    type VaRange = ();
419
420    fn invalidate_va(va: VirtualAddress) {
421        #[allow(unused)]
422        let va = va.0 >> 12;
423        #[cfg(all(not(test), target_arch = "aarch64"))]
424        // SAFETY: TLBI maintenance has no side effects that are observeable by the
425        // program
426        unsafe {
427            asm!(
428                "tlbi ipas2e1is, {va}",
429                va = in(reg) va,
430                options(preserves_flags, nostack),
431            );
432        }
433    }
434
435    #[allow(
436        unused_mut,
437        unused_assignments,
438        unused_variables,
439        reason = "used only on aarch64"
440    )]
441    unsafe fn activate(root_pa: PhysicalAddress, asid: (), va_range: ()) -> usize {
442        let mut previous_ttbr = usize::MAX;
443        #[cfg(all(not(test), target_arch = "aarch64"))]
444        // SAFETY: We trust that root_pa returns a valid physical address of a page table,
445        // and the `Drop` implementation will reset `TTBRn_ELx` before it becomes invalid.
446        unsafe {
447            asm!(
448                "mrs   {previous_ttbr}, vttbr_el2",
449                "msr   vttbr_el2, {ttbrval}",
450                "isb",
451                ttbrval = in(reg) root_pa.0,
452                previous_ttbr = out(reg) previous_ttbr,
453                options(preserves_flags),
454            );
455        }
456        previous_ttbr
457    }
458
459    #[allow(
460        unused_mut,
461        unused_assignments,
462        unused_variables,
463        reason = "used only on aarch64"
464    )]
465    unsafe fn deactivate(previous_ttbr: usize, asid: (), va_range: ()) {
466        #[cfg(all(not(test), target_arch = "aarch64"))]
467        // SAFETY: This just restores the previously saved value of `TTBRn_ELx`, which must have
468        // been valid.
469        unsafe {
470            asm!(
471                // For Stage 2, we invalidate using the current VTTBR (which has our VMID),
472                // then restore the previous VTTBR.
473                "tlbi  vmalls12e1",
474                "dsb   nsh",
475                "isb",
476                "msr   vttbr_el2, {ttbrval}",
477                "isb",
478                ttbrval = in(reg) previous_ttbr,
479                options(preserves_flags),
480            );
481        }
482    }
483}
484
485/// A range of virtual addresses which may be mapped in a page table.
486#[derive(Clone, Eq, PartialEq)]
487pub struct MemoryRegion(Range<VirtualAddress>);
488
489/// Returns the size in bytes of the address space covered by a single entry in the page table at
490/// the given level.
491pub(crate) fn granularity_at_level(level: usize) -> usize {
492    PAGE_SIZE << ((LEAF_LEVEL - level) * BITS_PER_LEVEL)
493}
494
495/// An implementation of this trait needs to be provided to the mapping routines, so that the
496/// physical addresses used in the page tables can be converted into virtual addresses that can be
497/// used to access their contents from the code.
498pub trait Translation<A: PagingAttributes> {
499    /// Allocates a zeroed page, which is already mapped, to be used for a new subtable of some
500    /// pagetable. Returns both a pointer to the page and its physical address.
501    fn allocate_table(&mut self) -> (NonNull<PageTable<A>>, PhysicalAddress);
502
503    /// Deallocates the page which was previous allocated by [`allocate_table`](Self::allocate_table).
504    ///
505    /// # Safety
506    ///
507    /// The memory must have been allocated by `allocate_table` on the same `Translation`, and not
508    /// yet deallocated.
509    unsafe fn deallocate_table(&mut self, page_table: NonNull<PageTable<A>>);
510
511    /// Given the physical address of a subtable, returns the virtual address at which it is mapped.
512    fn physical_to_virtual(&self, pa: PhysicalAddress) -> NonNull<PageTable<A>>;
513}
514
515impl MemoryRegion {
516    /// Constructs a new `MemoryRegion` for the given range of virtual addresses.
517    ///
518    /// The start is inclusive and the end is exclusive. Both will be aligned to the [`PAGE_SIZE`],
519    /// with the start being rounded down and the end being rounded up.
520    pub const fn new(start: usize, end: usize) -> MemoryRegion {
521        MemoryRegion(
522            VirtualAddress(align_down(start, PAGE_SIZE))..VirtualAddress(align_up(end, PAGE_SIZE)),
523        )
524    }
525
526    /// Returns the first virtual address of the memory range.
527    pub const fn start(&self) -> VirtualAddress {
528        self.0.start
529    }
530
531    /// Returns the first virtual address after the memory range.
532    pub const fn end(&self) -> VirtualAddress {
533        self.0.end
534    }
535
536    /// Returns the length of the memory region in bytes.
537    pub const fn len(&self) -> usize {
538        self.0.end.0 - self.0.start.0
539    }
540
541    /// Returns whether the memory region contains exactly 0 bytes.
542    pub const fn is_empty(&self) -> bool {
543        self.0.start.0 == self.0.end.0
544    }
545
546    fn split(&self, level: usize) -> ChunkedIterator<'_> {
547        ChunkedIterator {
548            range: self,
549            granularity: granularity_at_level(level),
550            start: self.0.start.0,
551        }
552    }
553
554    /// Returns whether this region can be mapped at 'level' using block mappings only.
555    pub(crate) fn is_block(&self, level: usize) -> bool {
556        let gran = granularity_at_level(level);
557        (self.0.start.0 | self.0.end.0) & (gran - 1) == 0
558    }
559}
560
561impl From<Range<VirtualAddress>> for MemoryRegion {
562    fn from(range: Range<VirtualAddress>) -> Self {
563        Self::new(range.start.0, range.end.0)
564    }
565}
566
567impl Display for MemoryRegion {
568    fn fmt(&self, f: &mut Formatter) -> Result<(), fmt::Error> {
569        write!(f, "{}..{}", self.0.start, self.0.end)
570    }
571}
572
573impl Debug for MemoryRegion {
574    fn fmt(&self, f: &mut Formatter) -> Result<(), fmt::Error> {
575        Display::fmt(self, f)
576    }
577}
578
579bitflags! {
580    /// Constraints on page table mappings
581    #[derive(Copy, Clone, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)]
582    pub struct Constraints: usize {
583        /// Block mappings are not permitted, only page mappings
584        const NO_BLOCK_MAPPINGS    = 1 << 0;
585        /// Use of the contiguous hint is not permitted
586        const NO_CONTIGUOUS_HINT   = 1 << 1;
587    }
588}
589
590/// A complete hierarchy of page tables including all levels.
591pub struct RootTable<R: TranslationRegime, T: Translation<R::Attributes>> {
592    table: PageTableWithLevel<T, R::Attributes>,
593    translation: T,
594    pa: PhysicalAddress,
595    va_range: R::VaRange,
596    _regime: PhantomData<R>,
597}
598
599impl<R: TranslationRegime<VaRange = ()>, T: Translation<R::Attributes>> RootTable<R, T> {
600    /// Creates a new page table starting at the given root level.
601    ///
602    /// The level must be between 0 and 3; level -1 (for 52-bit addresses with LPA2) is not
603    /// currently supported by this library. The value of `TCR_EL1.T0SZ` must be set appropriately
604    /// to match.
605    pub fn new(translation: T, level: usize, regime: R) -> Self {
606        Self::new_impl(translation, level, regime, ())
607    }
608}
609
610impl<R: TranslationRegime<VaRange = VaRange>, T: Translation<R::Attributes>> RootTable<R, T> {
611    /// Creates a new page table starting at the given root level.
612    ///
613    /// The level must be between 0 and 3; level -1 (for 52-bit addresses with LPA2) is not
614    /// currently supported by this library. The value of `TCR_EL1.T0SZ` must be set appropriately
615    /// to match.
616    pub fn with_va_range(translation: T, level: usize, regime: R, va_range: VaRange) -> Self {
617        Self::new_impl(translation, level, regime, va_range)
618    }
619
620    /// Returns the virtual address range for which this table is intended.
621    ///
622    /// This affects which TTBR register is used.
623    pub fn va_range(&self) -> VaRange {
624        self.va_range
625    }
626}
627
628impl<R: TranslationRegime, T: Translation<R::Attributes>> RootTable<R, T> {
629    fn new_impl(mut translation: T, level: usize, _regime: R, va_range: R::VaRange) -> Self {
630        if level > LEAF_LEVEL {
631            panic!("Invalid root table level {}.", level);
632        }
633        let (table, pa) = PageTableWithLevel::new(&mut translation, level);
634        RootTable {
635            table,
636            translation,
637            pa,
638            va_range,
639            _regime: PhantomData,
640        }
641    }
642
643    /// Returns the size in bytes of the virtual address space which can be mapped in this page
644    /// table.
645    ///
646    /// This is a function of the chosen root level.
647    pub fn size(&self) -> usize {
648        granularity_at_level(self.table.level) << BITS_PER_LEVEL
649    }
650
651    /// Recursively maps a range into the pagetable hierarchy starting at the root level, mapping
652    /// the pages to the corresponding physical address range starting at `pa`. Block and page
653    /// entries will be written to, but will only be mapped if `flags` contains [`PagingAttributes::VALID`].
654    ///
655    /// To unmap a range, pass `flags` which don't contain the [`PagingAttributes::VALID`] bit. In this case
656    /// the `pa` is ignored.
657    ///
658    /// Returns an error if the virtual address range is out of the range covered by the pagetable,
659    /// or if the `flags` argument has unsupported attributes set.
660    pub fn map_range(
661        &mut self,
662        range: &MemoryRegion,
663        pa: PhysicalAddress,
664        flags: R::Attributes,
665        constraints: Constraints,
666    ) -> Result<(), MapError> {
667        if flags.contains(R::Attributes::TABLE_OR_PAGE) {
668            return Err(MapError::InvalidFlags(flags.bits()));
669        }
670        self.verify_region(range)?;
671        self.table
672            .map_range(&mut self.translation, range, pa, flags, constraints);
673        Ok(())
674    }
675
676    /// Returns the physical address of the root table in memory.
677    pub fn to_physical(&self) -> PhysicalAddress {
678        self.pa
679    }
680
681    /// Returns a reference to the translation used for this page table.
682    pub fn translation(&self) -> &T {
683        &self.translation
684    }
685
686    /// Applies the provided updater function to the page table descriptors covering a given
687    /// memory range.
688    ///
689    /// This may involve splitting block entries if the provided range is not currently mapped
690    /// down to its precise boundaries. For visiting all the descriptors covering a memory range
691    /// without potential splitting (and no descriptor updates), use
692    /// [`walk_range`](Self::walk_range) instead.
693    ///
694    /// The updater function receives the following arguments:
695    ///
696    /// - The virtual address range mapped by each page table descriptor. A new descriptor will
697    ///   have been allocated before the invocation of the updater function if a page table split
698    ///   was needed.
699    /// - An `UpdatableDescriptor`, which includes a mutable reference to the page table descriptor
700    ///   that permits modifications and the level of a translation table the descriptor belongs to.
701    ///
702    /// The updater function should return:
703    ///
704    /// - `Ok` to continue updating the remaining entries.
705    /// - `Err` to signal an error and stop updating the remaining entries.
706    ///
707    /// This should generally only be called while the page table is not active. In particular, any
708    /// change that may require break-before-make per the architecture must be made while the page
709    /// table is inactive. Mapping a previously unmapped memory range may be done while the page
710    /// table is active. This function writes block and page entries, but only maps them if `flags`
711    /// contains [`PagingAttributes::VALID`], otherwise the entries remain invalid.
712    ///
713    /// # Errors
714    ///
715    /// Returns [`MapError::PteUpdateFault`] if the updater function returns an error.
716    ///
717    /// Returns [`MapError::RegionBackwards`] if the range is backwards.
718    ///
719    /// Returns [`MapError::AddressRange`] if the largest address in the `range` is greater than the
720    /// largest virtual address covered by the page table given its root level.
721    ///
722    /// Returns [`MapError::BreakBeforeMakeViolation`] if the range intersects with live mappings,
723    /// and modifying those would violate architectural break-before-make (BBM) requirements.
724    pub(crate) fn modify_range<F>(
725        &mut self,
726        range: &MemoryRegion,
727        f: &F,
728        live: bool,
729    ) -> Result<bool, MapError>
730    where
731        F: Fn(&MemoryRegion, &mut UpdatableDescriptor<R::Attributes>) -> Result<(), ()> + ?Sized,
732    {
733        self.verify_region(range)?;
734        self.table
735            .modify_range::<F, R>(&mut self.translation, range, f, live)
736    }
737
738    pub(crate) fn va_range_or_unit(&self) -> R::VaRange {
739        self.va_range
740    }
741
742    /// Applies the provided callback function to the page table descriptors covering a given
743    /// memory range.
744    ///
745    /// The callback function receives the following arguments:
746    ///
747    /// - The range covered by the current step in the walk. This is always a subrange of `range`
748    ///   even when the descriptor covers a region that exceeds it.
749    /// - The page table descriptor itself.
750    /// - The level of a translation table the descriptor belongs to.
751    ///
752    /// The callback function should return:
753    ///
754    /// - `Ok` to continue visiting the remaining entries.
755    /// - `Err` to signal an error and stop visiting the remaining entries.
756    ///
757    /// # Errors
758    ///
759    /// Returns [`MapError::PteUpdateFault`] if the callback function returns an error.
760    ///
761    /// Returns [`MapError::RegionBackwards`] if the range is backwards.
762    ///
763    /// Returns [`MapError::AddressRange`] if the largest address in the `range` is greater than the
764    /// largest virtual address covered by the page table given its root level.
765    pub fn walk_range<F>(&self, range: &MemoryRegion, f: &mut F) -> Result<(), MapError>
766    where
767        F: FnMut(&MemoryRegion, &Descriptor<R::Attributes>, usize) -> Result<(), ()>,
768    {
769        self.visit_range(range, &mut |mr, desc, level| {
770            f(mr, desc, level).map_err(|_| MapError::PteUpdateFault(desc.bits()))
771        })
772    }
773
774    /// Looks for subtables whose entries are all empty and replaces them with a single empty entry,
775    /// freeing the subtable.
776    ///
777    /// This requires walking the whole hierarchy of pagetables, so you may not want to call it
778    /// every time a region is unmapped. You could instead call it when the system is under memory
779    /// pressure.
780    pub fn compact_subtables(&mut self) {
781        self.table.compact_subtables(&mut self.translation);
782    }
783
784    // Private version of `walk_range` using a closure that returns MapError on error
785    pub(crate) fn visit_range<F>(&self, range: &MemoryRegion, f: &mut F) -> Result<(), MapError>
786    where
787        F: FnMut(&MemoryRegion, &Descriptor<R::Attributes>, usize) -> Result<(), MapError>,
788    {
789        self.verify_region(range)?;
790        self.table.visit_range(&self.translation, range, f)
791    }
792
793    /// Returns the level of mapping used for the given virtual address:
794    /// - `None` if it is unmapped
795    /// - `Some(LEAF_LEVEL)` if it is mapped as a single page
796    /// - `Some(level)` if it is mapped as a block at `level`
797    #[cfg(all(test, feature = "alloc"))]
798    pub(crate) fn mapping_level(&self, va: VirtualAddress) -> Option<usize> {
799        self.table.mapping_level(&self.translation, va)
800    }
801
802    /// Checks whether the region is within range of the page table.
803    fn verify_region(&self, region: &MemoryRegion) -> Result<(), MapError> {
804        if region.end() < region.start() {
805            return Err(MapError::RegionBackwards(region.clone()));
806        }
807        match self.va_range.into_va_range() {
808            VaRange::Lower => {
809                if (region.start().0 as isize) < 0 {
810                    return Err(MapError::AddressRange(region.start()));
811                } else if region.end().0 > self.size() {
812                    return Err(MapError::AddressRange(region.end()));
813                }
814            }
815            VaRange::Upper => {
816                if region.start().0 as isize >= 0
817                    || (region.start().0 as isize).unsigned_abs() > self.size()
818                {
819                    return Err(MapError::AddressRange(region.start()));
820                }
821            }
822        }
823        Ok(())
824    }
825}
826
827impl<R: TranslationRegime, T: Translation<R::Attributes>> Debug for RootTable<R, T> {
828    fn fmt(&self, f: &mut Formatter) -> Result<(), fmt::Error> {
829        writeln!(
830            f,
831            "RootTable {{ pa: {}, translation_regime: {:?}, va_range: {:?}, level: {}, table:",
832            self.pa, PhantomData::<R>, self.va_range, self.table.level
833        )?;
834        self.table.fmt_indented(f, &self.translation, 0)?;
835        write!(f, "}}")
836    }
837}
838
839impl<R: TranslationRegime, T: Translation<R::Attributes>> Drop for RootTable<R, T> {
840    fn drop(&mut self) {
841        // SAFETY: We created the table in `RootTable::new` by calling `PageTableWithLevel::new`
842        // with `self.translation`. Subtables were similarly created by
843        // `PageTableWithLevel::split_entry` calling `PageTableWithLevel::new` with the same
844        // translation.
845        unsafe { self.table.free(&mut self.translation) }
846    }
847}
848
849struct ChunkedIterator<'a> {
850    range: &'a MemoryRegion,
851    granularity: usize,
852    start: usize,
853}
854
855impl Iterator for ChunkedIterator<'_> {
856    type Item = MemoryRegion;
857
858    fn next(&mut self) -> Option<MemoryRegion> {
859        if !self.range.0.contains(&VirtualAddress(self.start)) {
860            return None;
861        }
862        let end = self
863            .range
864            .0
865            .end
866            .0
867            .min((self.start | (self.granularity - 1)) + 1);
868        let c = MemoryRegion::new(self.start, end);
869        self.start = end;
870        Some(c)
871    }
872}
873
874/// Smart pointer which owns a [`PageTable`] and knows what level it is at. This allows it to
875/// implement methods to walk the page table hierachy which require knowing the starting level.
876#[derive(Debug)]
877pub(crate) struct PageTableWithLevel<T: Translation<A>, A: PagingAttributes> {
878    table: NonNull<PageTable<A>>,
879    level: usize,
880    _translation: PhantomData<T>,
881}
882
883// SAFETY: The underlying PageTable is process-wide and can be safely accessed from any thread
884// with appropriate synchronization. This type manages ownership for the raw pointer.
885unsafe impl<T: Translation<A> + Send, A: PagingAttributes> Send for PageTableWithLevel<T, A> {}
886
887// SAFETY: &Self only allows reading from the page table, which is safe to do from any thread.
888unsafe impl<T: Translation<A> + Sync, A: PagingAttributes> Sync for PageTableWithLevel<T, A> {}
889
890impl<T: Translation<A>, A: PagingAttributes> PageTableWithLevel<T, A> {
891    /// Allocates a new, zeroed, appropriately-aligned page table with the given translation,
892    /// returning both a pointer to it and its physical address.
893    fn new(translation: &mut T, level: usize) -> (Self, PhysicalAddress) {
894        assert!(level <= LEAF_LEVEL);
895        let (table, pa) = translation.allocate_table();
896        (
897            // Safe because the pointer has been allocated with the appropriate layout, and the
898            // memory is zeroed which is valid initialisation for a PageTable.
899            Self::from_pointer(table, level),
900            pa,
901        )
902    }
903
904    pub(crate) fn from_pointer(table: NonNull<PageTable<A>>, level: usize) -> Self {
905        Self {
906            table,
907            level,
908            _translation: PhantomData,
909        }
910    }
911
912    /// Returns a reference to the descriptor corresponding to a given virtual address.
913    fn get_entry(&self, va: VirtualAddress) -> &Descriptor<A> {
914        let shift = PAGE_SHIFT + (LEAF_LEVEL - self.level) * BITS_PER_LEVEL;
915        let index = (va.0 >> shift) % (1 << BITS_PER_LEVEL);
916        // SAFETY: We know that the pointer is properly aligned, dereferenced and initialised, and
917        // nothing else can access the page table while we hold a mutable reference to the
918        // PageTableWithLevel (assuming it is not currently active).
919        let table = unsafe { self.table.as_ref() };
920        &table.entries[index]
921    }
922
923    /// Returns a mutable reference to the descriptor corresponding to a given virtual address.
924    fn get_entry_mut(&mut self, va: VirtualAddress) -> &mut Descriptor<A> {
925        let shift = PAGE_SHIFT + (LEAF_LEVEL - self.level) * BITS_PER_LEVEL;
926        let index = (va.0 >> shift) % (1 << BITS_PER_LEVEL);
927        // SAFETY: We know that the pointer is properly aligned, dereferenced and initialised, and
928        // nothing else can access the page table while we hold a mutable reference to the
929        // PageTableWithLevel (assuming it is not currently active).
930        let table = unsafe { self.table.as_mut() };
931        &mut table.entries[index]
932    }
933
934    /// Convert the descriptor in `entry` from a block mapping to a table mapping of
935    /// the same range with the same attributes
936    fn split_entry(
937        translation: &mut T,
938        chunk: &MemoryRegion,
939        entry: &mut Descriptor<A>,
940        level: usize,
941    ) -> Self {
942        let granularity = granularity_at_level(level);
943        let (mut subtable, subtable_pa) = Self::new(translation, level + 1);
944        let old_flags = entry.flags();
945        let old_pa = entry.output_address();
946        if !old_flags.contains(A::TABLE_OR_PAGE) && (!old_flags.is_empty() || old_pa.0 != 0) {
947            // `old` was a block entry, so we need to split it.
948            // Recreate the entire block in the newly added table.
949            let a = align_down(chunk.0.start.0, granularity);
950            let b = align_up(chunk.0.end.0, granularity);
951            subtable.map_range(
952                translation,
953                &MemoryRegion::new(a, b),
954                old_pa,
955                old_flags,
956                Constraints::empty(),
957            );
958        }
959        // If `old` was not a block entry, a newly zeroed page will be added to the hierarchy,
960        // which might be live in this case. We rely on the release semantics of the set() below to
961        // ensure that all observers that see the new entry will also see the zeroed contents.
962        entry.set(subtable_pa, A::TABLE_OR_PAGE | A::VALID);
963        subtable
964    }
965
966    /// Maps the the given virtual address range in this pagetable to the corresponding physical
967    /// address range starting at the given `pa`, recursing into any subtables as necessary. To map
968    /// block and page entries, [`PagingAttributes::VALID`] must be set in `flags`.
969    ///
970    /// If `flags` doesn't contain [`PagingAttributes::VALID`] then the `pa` is ignored.
971    ///
972    /// Assumes that the entire range is within the range covered by this pagetable.
973    ///
974    /// Panics if the `translation` doesn't provide a corresponding physical address for some
975    /// virtual address within the range, as there is no way to roll back to a safe state so this
976    /// should be checked by the caller beforehand.
977    fn map_range(
978        &mut self,
979        translation: &mut T,
980        range: &MemoryRegion,
981        mut pa: PhysicalAddress,
982        flags: A,
983        constraints: Constraints,
984    ) {
985        let level = self.level;
986        let granularity = granularity_at_level(level);
987
988        for chunk in range.split(level) {
989            let entry = self.get_entry_mut(chunk.0.start);
990
991            if level == LEAF_LEVEL {
992                if flags.contains(A::VALID) {
993                    // Put down a page mapping.
994                    entry.set(pa, flags | A::TABLE_OR_PAGE);
995                } else {
996                    // Put down an invalid entry.
997                    entry.set(PhysicalAddress(0), flags);
998                }
999            } else if !entry.is_table_or_page()
1000                && entry.flags() == flags
1001                && entry.output_address().0 == pa.0 - chunk.0.start.0 % granularity
1002            {
1003                // There is no need to split up a block mapping if it already maps the desired `pa`
1004                // with the desired `flags`. So do nothing in this case.
1005            } else if chunk.is_block(level)
1006                && !entry.is_table_or_page()
1007                && is_aligned(pa.0, granularity)
1008                && !constraints.contains(Constraints::NO_BLOCK_MAPPINGS)
1009                && level > 0
1010            {
1011                // Rather than leak the entire subhierarchy, only put down
1012                // a block mapping if the region is not already covered by
1013                // a table mapping.
1014                if flags.contains(A::VALID) {
1015                    entry.set(pa, flags);
1016                } else {
1017                    entry.set(PhysicalAddress(0), flags);
1018                }
1019            } else if chunk.is_block(level)
1020                && let Some(mut subtable) = entry.subtable(translation, level)
1021                && !flags.contains(A::VALID)
1022            {
1023                // There is a subtable but we can remove it. To avoid break-before-make violations
1024                // this is only allowed if the new mapping is not valid, i.e. we are unmapping the
1025                // memory.
1026                entry.set(PhysicalAddress(0), flags);
1027
1028                // SAFETY: The subtable was created with the same translation by
1029                // `PageTableWithLevel::new`, and is no longer referenced by this table. We don't
1030                // reuse subtables so there must not be any other references to it.
1031                unsafe {
1032                    subtable.free(translation);
1033                }
1034            } else {
1035                let mut subtable = entry
1036                    .subtable(translation, level)
1037                    .unwrap_or_else(|| Self::split_entry(translation, &chunk, entry, level));
1038                subtable.map_range(translation, &chunk, pa, flags, constraints);
1039            }
1040            pa.0 += chunk.len();
1041        }
1042    }
1043
1044    fn fmt_indented(
1045        &self,
1046        f: &mut Formatter,
1047        translation: &T,
1048        indentation: usize,
1049    ) -> Result<(), fmt::Error> {
1050        const WIDTH: usize = 3;
1051        // SAFETY: We know that the pointer is aligned, initialised and dereferencable, and the
1052        // PageTable won't be mutated while we are using it.
1053        let table = unsafe { self.table.as_ref() };
1054
1055        let mut i = 0;
1056        while i < table.entries.len() {
1057            if let Some(subtable) = table.entries[i].subtable(translation, self.level) {
1058                writeln!(
1059                    f,
1060                    "{:indentation$}{: <WIDTH$}    : {:?}",
1061                    "", i, table.entries[i],
1062                )?;
1063                subtable.fmt_indented(f, translation, indentation + 2)?;
1064                i += 1;
1065            } else {
1066                let first_contiguous = i;
1067                let first_entry = table.entries[i].bits();
1068                let granularity = granularity_at_level(self.level);
1069                while i < table.entries.len()
1070                    && (table.entries[i].bits() == first_entry
1071                        || (first_entry != 0
1072                            && table.entries[i].bits()
1073                                == first_entry + granularity * (i - first_contiguous)))
1074                {
1075                    i += 1;
1076                }
1077                if i - 1 == first_contiguous {
1078                    write!(f, "{:indentation$}{: <WIDTH$}    : ", "", first_contiguous)?;
1079                } else {
1080                    write!(
1081                        f,
1082                        "{:indentation$}{: <WIDTH$}-{: <WIDTH$}: ",
1083                        "",
1084                        first_contiguous,
1085                        i - 1,
1086                    )?;
1087                }
1088                if first_entry == 0 {
1089                    writeln!(f, "0")?;
1090                } else {
1091                    writeln!(f, "{:?}", Descriptor::<A>::new(first_entry))?;
1092                }
1093            }
1094        }
1095        Ok(())
1096    }
1097
1098    /// Frees the memory used by this pagetable and all subtables. It is not valid to access the
1099    /// page table after this.
1100    ///
1101    /// # Safety
1102    ///
1103    /// The table and all its subtables must have been created by `PageTableWithLevel::new` with the
1104    /// same `translation`.
1105    unsafe fn free(&mut self, translation: &mut T) {
1106        // SAFETY: We know that the pointer is aligned, initialised and dereferencable, and the
1107        // PageTable won't be mutated while we are freeing it.
1108        let table = unsafe { self.table.as_ref() };
1109        for entry in &table.entries {
1110            if let Some(mut subtable) = entry.subtable(translation, self.level) {
1111                // SAFETY: Our caller promised that all our subtables were created by
1112                // `PageTableWithLevel::new` with the same `translation`.
1113                unsafe {
1114                    subtable.free(translation);
1115                }
1116            }
1117        }
1118        // SAFETY: Our caller promised that the table was created by `PageTableWithLevel::new` with
1119        // `translation`, which then allocated it by calling `allocate_table` on `translation`.
1120        unsafe {
1121            // Actually free the memory used by the `PageTable`.
1122            translation.deallocate_table(self.table);
1123        }
1124    }
1125
1126    /// Modifies a range of page table entries by applying a function to each page table entry.
1127    /// If the range is not aligned to block boundaries, block descriptors will be split up.
1128    fn modify_range<F, R: TranslationRegime<Attributes = A>>(
1129        &mut self,
1130        translation: &mut T,
1131        range: &MemoryRegion,
1132        f: &F,
1133        live: bool,
1134    ) -> Result<bool, MapError>
1135    where
1136        F: Fn(&MemoryRegion, &mut UpdatableDescriptor<A>) -> Result<(), ()> + ?Sized,
1137    {
1138        let mut modified = false;
1139        let level = self.level;
1140        for chunk in range.split(level) {
1141            let entry = self.get_entry_mut(chunk.0.start);
1142            if let Some(mut subtable) = entry.subtable(translation, level).or_else(|| {
1143                if !chunk.is_block(level) {
1144                    // The current chunk is not aligned to the block size at this level
1145                    // Split it before recursing to the next level
1146                    Some(Self::split_entry(translation, &chunk, entry, level))
1147                } else {
1148                    None
1149                }
1150            }) {
1151                modified |= subtable.modify_range::<F, R>(translation, &chunk, f, live)?;
1152            } else {
1153                let bits = entry.bits();
1154                let mut desc = UpdatableDescriptor::new(entry, level, live);
1155                f(&chunk, &mut desc).map_err(|_| MapError::PteUpdateFault(bits))?;
1156
1157                if live && desc.updated() {
1158                    // Live descriptor was updated so TLB maintenance is needed
1159                    R::invalidate_va(chunk.start());
1160                    modified = true;
1161                }
1162            }
1163        }
1164        Ok(modified)
1165    }
1166
1167    /// Walks a range of page table entries and passes each one to a caller provided function.
1168    /// If the function returns an error, the walk is terminated and the error value is passed on
1169    fn visit_range<F, E>(&self, translation: &T, range: &MemoryRegion, f: &mut F) -> Result<(), E>
1170    where
1171        F: FnMut(&MemoryRegion, &Descriptor<A>, usize) -> Result<(), E>,
1172    {
1173        let level = self.level;
1174        for chunk in range.split(level) {
1175            let entry = self.get_entry(chunk.0.start);
1176            if let Some(subtable) = entry.subtable(translation, level) {
1177                subtable.visit_range(translation, &chunk, f)?;
1178            } else {
1179                f(&chunk, entry, level)?;
1180            }
1181        }
1182        Ok(())
1183    }
1184
1185    /// Looks for subtables whose entries are all empty and replaces them with a single empty entry,
1186    /// freeing the subtable.
1187    ///
1188    /// Returns true if this table is now entirely empty.
1189    pub fn compact_subtables(&mut self, translation: &mut T) -> bool {
1190        // SAFETY: We know that the pointer is aligned, initialised and dereferencable, and the
1191        // PageTable won't be mutated while we are using it.
1192        let table = unsafe { self.table.as_mut() };
1193
1194        let mut all_empty = true;
1195        for entry in &mut table.entries {
1196            if let Some(mut subtable) = entry.subtable(translation, self.level)
1197                && subtable.compact_subtables(translation)
1198            {
1199                entry.set(PhysicalAddress(0), A::default());
1200
1201                // SAFETY: The subtable was created with the same translation by
1202                // `PageTableWithLevel::new`, and is no longer referenced by this table. We don't
1203                // reuse subtables so there must not be any other references to it.
1204                unsafe {
1205                    subtable.free(translation);
1206                }
1207            }
1208            if entry.bits() != 0 {
1209                all_empty = false;
1210            }
1211        }
1212        all_empty
1213    }
1214
1215    /// Returns the level of mapping used for the given virtual address:
1216    /// - `None` if it is unmapped
1217    /// - `Some(LEAF_LEVEL)` if it is mapped as a single page
1218    /// - `Some(level)` if it is mapped as a block at `level`
1219    #[cfg(all(test, feature = "alloc"))]
1220    pub(crate) fn mapping_level(&self, translation: &T, va: VirtualAddress) -> Option<usize> {
1221        let entry = self.get_entry(va);
1222        if let Some(subtable) = entry.subtable(translation, self.level) {
1223            subtable.mapping_level(translation, va)
1224        } else {
1225            if entry.is_valid() {
1226                Some(self.level)
1227            } else {
1228                None
1229            }
1230        }
1231    }
1232}
1233
1234/// A single level of a page table.
1235#[repr(C, align(4096))]
1236pub struct PageTable<A: PagingAttributes> {
1237    entries: [Descriptor<A>; 1 << BITS_PER_LEVEL],
1238}
1239
1240impl<A: PagingAttributes> PageTable<A> {
1241    /// An empty (i.e. zeroed) page table. This may be useful for initialising statics.
1242    pub const EMPTY: Self = Self {
1243        entries: [Descriptor::EMPTY; 1 << BITS_PER_LEVEL],
1244    };
1245
1246    /// Allocates a new zeroed, appropriately-aligned pagetable on the heap using the global
1247    /// allocator and returns a pointer to it.
1248    #[cfg(feature = "alloc")]
1249    pub fn new() -> NonNull<Self> {
1250        // SAFETY: Zeroed memory is a valid initialisation for a PageTable.
1251        unsafe { allocate_zeroed() }
1252    }
1253
1254    /// Write the in-memory presentation of the page table to the byte slice referenced by `page`.
1255    ///
1256    /// Returns `Ok(())` on success, or `Err(())` if the size of the byte slice is not equal to the
1257    /// size of a page table.
1258    pub fn write_to(&self, page: &mut [u8]) -> Result<(), ()> {
1259        if page.len() != self.entries.len() * size_of::<Descriptor<A>>() {
1260            return Err(());
1261        }
1262        for (chunk, desc) in page
1263            .chunks_exact_mut(size_of::<Descriptor<A>>())
1264            .zip(self.entries.iter())
1265        {
1266            chunk.copy_from_slice(&desc.bits().to_le_bytes());
1267        }
1268        Ok(())
1269    }
1270}
1271
1272impl<A: PagingAttributes> Default for PageTable<A> {
1273    fn default() -> Self {
1274        Self::EMPTY
1275    }
1276}
1277
1278/// Allocates appropriately aligned heap space for a `T` and zeroes it.
1279///
1280/// # Safety
1281///
1282/// It must be valid to initialise the type `T` by simply zeroing its memory.
1283#[cfg(feature = "alloc")]
1284unsafe fn allocate_zeroed<T>() -> NonNull<T> {
1285    let layout = Layout::new::<T>();
1286    assert_ne!(layout.size(), 0);
1287    // SAFETY: We just checked that the layout has non-zero size.
1288    let pointer = unsafe { alloc_zeroed(layout) };
1289    if pointer.is_null() {
1290        handle_alloc_error(layout);
1291    }
1292    // SAFETY: We just checked that the pointer is non-null.
1293    unsafe { NonNull::new_unchecked(pointer as *mut T) }
1294}
1295
1296/// Deallocates the heap space for a `T` which was previously allocated by `allocate_zeroed`.
1297///
1298/// # Safety
1299///
1300/// The memory must have been allocated by the global allocator, with the layout for `T`, and not
1301/// yet deallocated.
1302#[cfg(feature = "alloc")]
1303pub(crate) unsafe fn deallocate<T>(ptr: NonNull<T>) {
1304    let layout = Layout::new::<T>();
1305    // SAFETY: We delegate the safety requirements to our caller.
1306    unsafe {
1307        dealloc(ptr.as_ptr() as *mut u8, layout);
1308    }
1309}
1310
1311const fn align_down(value: usize, alignment: usize) -> usize {
1312    value & !(alignment - 1)
1313}
1314
1315const fn align_up(value: usize, alignment: usize) -> usize {
1316    ((value - 1) | (alignment - 1)) + 1
1317}
1318
1319pub(crate) const fn is_aligned(value: usize, alignment: usize) -> bool {
1320    value & (alignment - 1) == 0
1321}
1322
1323#[cfg(test)]
1324mod tests {
1325    use super::*;
1326    #[cfg(feature = "alloc")]
1327    use crate::target::TargetAllocator;
1328    #[cfg(feature = "alloc")]
1329    use alloc::{format, string::ToString, vec, vec::Vec};
1330
1331    #[cfg(feature = "alloc")]
1332    #[test]
1333    fn display_memory_region() {
1334        let region = MemoryRegion::new(0x1234, 0x56789);
1335        assert_eq!(
1336            &region.to_string(),
1337            "0x0000000000001000..0x0000000000057000"
1338        );
1339        assert_eq!(
1340            &format!("{:?}", region),
1341            "0x0000000000001000..0x0000000000057000"
1342        );
1343    }
1344
1345    #[test]
1346    fn subtract_virtual_address() {
1347        let low = VirtualAddress(0x12);
1348        let high = VirtualAddress(0x1234);
1349        assert_eq!(high - low, 0x1222);
1350    }
1351
1352    #[cfg(debug_assertions)]
1353    #[test]
1354    #[should_panic]
1355    fn subtract_virtual_address_overflow() {
1356        let low = VirtualAddress(0x12);
1357        let high = VirtualAddress(0x1234);
1358
1359        // This would overflow, so should panic.
1360        let _ = low - high;
1361    }
1362
1363    #[test]
1364    fn add_virtual_address() {
1365        assert_eq!(VirtualAddress(0x1234) + 0x42, VirtualAddress(0x1276));
1366    }
1367
1368    #[test]
1369    fn subtract_physical_address() {
1370        let low = PhysicalAddress(0x12);
1371        let high = PhysicalAddress(0x1234);
1372        assert_eq!(high - low, 0x1222);
1373    }
1374
1375    #[cfg(debug_assertions)]
1376    #[test]
1377    #[should_panic]
1378    fn subtract_physical_address_overflow() {
1379        let low = PhysicalAddress(0x12);
1380        let high = PhysicalAddress(0x1234);
1381
1382        // This would overflow, so should panic.
1383        let _ = low - high;
1384    }
1385
1386    #[test]
1387    fn add_physical_address() {
1388        assert_eq!(PhysicalAddress(0x1234) + 0x42, PhysicalAddress(0x1276));
1389    }
1390
1391    #[test]
1392    fn invalid_descriptor() {
1393        let desc = Descriptor::<El1Attributes>::new(0usize);
1394        assert!(!desc.is_valid());
1395        assert!(!desc.flags().contains(El1Attributes::VALID));
1396    }
1397
1398    #[test]
1399    fn set_descriptor() {
1400        const PHYSICAL_ADDRESS: usize = 0x12340000;
1401        let mut desc = Descriptor::<El1Attributes>::new(0usize);
1402        assert!(!desc.is_valid());
1403        desc.set(
1404            PhysicalAddress(PHYSICAL_ADDRESS),
1405            El1Attributes::TABLE_OR_PAGE
1406                | El1Attributes::USER
1407                | El1Attributes::SWFLAG_1
1408                | El1Attributes::VALID,
1409        );
1410        assert!(desc.is_valid());
1411        assert_eq!(
1412            desc.flags(),
1413            El1Attributes::TABLE_OR_PAGE
1414                | El1Attributes::USER
1415                | El1Attributes::SWFLAG_1
1416                | El1Attributes::VALID
1417        );
1418        assert_eq!(desc.output_address(), PhysicalAddress(PHYSICAL_ADDRESS));
1419    }
1420
1421    #[test]
1422    fn modify_descriptor_flags() {
1423        let mut desc = Descriptor::<El1Attributes>::new(0usize);
1424        assert!(!desc.is_valid());
1425        desc.set(
1426            PhysicalAddress(0x12340000),
1427            El1Attributes::TABLE_OR_PAGE | El1Attributes::USER | El1Attributes::SWFLAG_1,
1428        );
1429        UpdatableDescriptor::new(&mut desc, 3, true)
1430            .modify_flags(
1431                El1Attributes::DBM | El1Attributes::SWFLAG_3,
1432                El1Attributes::VALID | El1Attributes::SWFLAG_1,
1433            )
1434            .unwrap();
1435        assert!(!desc.is_valid());
1436        assert_eq!(
1437            desc.flags(),
1438            El1Attributes::TABLE_OR_PAGE
1439                | El1Attributes::USER
1440                | El1Attributes::SWFLAG_3
1441                | El1Attributes::DBM
1442        );
1443    }
1444
1445    #[test]
1446    #[should_panic]
1447    fn modify_descriptor_table_or_page_flag() {
1448        let mut desc = Descriptor::<El1Attributes>::new(0usize);
1449        assert!(!desc.is_valid());
1450        desc.set(
1451            PhysicalAddress(0x12340000),
1452            El1Attributes::TABLE_OR_PAGE | El1Attributes::USER | El1Attributes::SWFLAG_1,
1453        );
1454        UpdatableDescriptor::new(&mut desc, 3, false)
1455            .modify_flags(El1Attributes::VALID, El1Attributes::TABLE_OR_PAGE)
1456            .unwrap();
1457    }
1458
1459    #[cfg(feature = "alloc")]
1460    #[test]
1461    fn unaligned_chunks() {
1462        let region = MemoryRegion::new(0x0000_2000, 0x0020_5000);
1463        let chunks = region.split(LEAF_LEVEL - 1).collect::<Vec<_>>();
1464        assert_eq!(
1465            chunks,
1466            vec![
1467                MemoryRegion::new(0x0000_2000, 0x0020_0000),
1468                MemoryRegion::new(0x0020_0000, 0x0020_5000),
1469            ]
1470        );
1471    }
1472
1473    #[test]
1474    fn table_or_page() {
1475        // Invalid.
1476        assert!(!Descriptor::<El1Attributes>::new(0b00).is_table_or_page());
1477        assert!(!Descriptor::<El1Attributes>::new(0b10).is_table_or_page());
1478
1479        // Block mapping.
1480        assert!(!Descriptor::<El1Attributes>::new(0b01).is_table_or_page());
1481
1482        // Table or page.
1483        assert!(Descriptor::<El1Attributes>::new(0b11).is_table_or_page());
1484    }
1485
1486    #[test]
1487    fn table_or_page_unknown_bits() {
1488        // Some RES0 and IGNORED bits that we set for the sake of the test.
1489        const UNKNOWN: usize = 1 << 50 | 1 << 52;
1490
1491        // Invalid.
1492        assert!(!Descriptor::<El1Attributes>::new(UNKNOWN | 0b00).is_table_or_page());
1493        assert!(!Descriptor::<El1Attributes>::new(UNKNOWN | 0b10).is_table_or_page());
1494
1495        // Block mapping.
1496        assert!(!Descriptor::<El1Attributes>::new(UNKNOWN | 0b01).is_table_or_page());
1497
1498        // Table or page.
1499        assert!(Descriptor::<El1Attributes>::new(UNKNOWN | 0b11).is_table_or_page());
1500    }
1501
1502    #[cfg(feature = "alloc")]
1503    #[test]
1504    fn debug_roottable_empty() {
1505        let table = RootTable::with_va_range(TargetAllocator::new(0), 1, El1And0, VaRange::Lower);
1506        assert_eq!(
1507            format!("{table:?}"),
1508"RootTable { pa: 0x0000000000000000, translation_regime: PhantomData<aarch64_paging::paging::El1And0>, va_range: Lower, level: 1, table:
15090  -511: 0
1510}"
1511        );
1512    }
1513
1514    #[cfg(feature = "alloc")]
1515    #[test]
1516    fn debug_roottable_contiguous() {
1517        let mut table =
1518            RootTable::with_va_range(TargetAllocator::new(0), 1, El1And0, VaRange::Lower);
1519        table
1520            .map_range(
1521                &MemoryRegion::new(PAGE_SIZE * 3, PAGE_SIZE * 6),
1522                PhysicalAddress(PAGE_SIZE * 3),
1523                El1Attributes::VALID | El1Attributes::NON_GLOBAL,
1524                Constraints::empty(),
1525            )
1526            .unwrap();
1527        table
1528            .map_range(
1529                &MemoryRegion::new(PAGE_SIZE * 6, PAGE_SIZE * 7),
1530                PhysicalAddress(PAGE_SIZE * 6),
1531                El1Attributes::VALID | El1Attributes::READ_ONLY,
1532                Constraints::empty(),
1533            )
1534            .unwrap();
1535        table
1536            .map_range(
1537                &MemoryRegion::new(PAGE_SIZE * 8, PAGE_SIZE * 9),
1538                PhysicalAddress(PAGE_SIZE * 8),
1539                El1Attributes::VALID | El1Attributes::READ_ONLY,
1540                Constraints::empty(),
1541            )
1542            .unwrap();
1543        assert_eq!(
1544            format!("{table:?}"),
1545"RootTable { pa: 0x0000000000000000, translation_regime: PhantomData<aarch64_paging::paging::El1And0>, va_range: Lower, level: 1, table:
15460      : 0x00000000001003 (0x0000000000001000, El1Attributes(VALID | TABLE_OR_PAGE))
1547  0      : 0x00000000002003 (0x0000000000002000, El1Attributes(VALID | TABLE_OR_PAGE))
1548    0  -2  : 0\n    3  -5  : 0x00000000003803 (0x0000000000003000, El1Attributes(VALID | TABLE_OR_PAGE | NON_GLOBAL))
1549    6      : 0x00000000006083 (0x0000000000006000, El1Attributes(VALID | TABLE_OR_PAGE | READ_ONLY))
1550    7      : 0
1551    8      : 0x00000000008083 (0x0000000000008000, El1Attributes(VALID | TABLE_OR_PAGE | READ_ONLY))
1552    9  -511: 0
1553  1  -511: 0
15541  -511: 0
1555}"
1556        );
1557    }
1558
1559    #[cfg(feature = "alloc")]
1560    #[test]
1561    fn debug_roottable_contiguous_block() {
1562        let mut table =
1563            RootTable::with_va_range(TargetAllocator::new(0), 1, El1And0, VaRange::Lower);
1564        const BLOCK_SIZE: usize = PAGE_SIZE * 512;
1565        table
1566            .map_range(
1567                &MemoryRegion::new(BLOCK_SIZE * 3, BLOCK_SIZE * 6),
1568                PhysicalAddress(BLOCK_SIZE * 3),
1569                El1Attributes::VALID | El1Attributes::NON_GLOBAL,
1570                Constraints::empty(),
1571            )
1572            .unwrap();
1573        table
1574            .map_range(
1575                &MemoryRegion::new(BLOCK_SIZE * 6, BLOCK_SIZE * 7),
1576                PhysicalAddress(BLOCK_SIZE * 6),
1577                El1Attributes::VALID | El1Attributes::READ_ONLY,
1578                Constraints::empty(),
1579            )
1580            .unwrap();
1581        table
1582            .map_range(
1583                &MemoryRegion::new(BLOCK_SIZE * 8, BLOCK_SIZE * 9),
1584                PhysicalAddress(BLOCK_SIZE * 8),
1585                El1Attributes::VALID | El1Attributes::READ_ONLY,
1586                Constraints::empty(),
1587            )
1588            .unwrap();
1589        assert_eq!(
1590            format!("{table:?}"),
1591"RootTable { pa: 0x0000000000000000, translation_regime: PhantomData<aarch64_paging::paging::El1And0>, va_range: Lower, level: 1, table:
15920      : 0x00000000001003 (0x0000000000001000, El1Attributes(VALID | TABLE_OR_PAGE))
1593  0  -2  : 0
1594  3  -5  : 0x00000000600801 (0x0000000000600000, El1Attributes(VALID | NON_GLOBAL))
1595  6      : 0x00000000c00081 (0x0000000000c00000, El1Attributes(VALID | READ_ONLY))
1596  7      : 0
1597  8      : 0x00000001000081 (0x0000000001000000, El1Attributes(VALID | READ_ONLY))
1598  9  -511: 0
15991  -511: 0
1600}"
1601        );
1602    }
1603}