page_table_multiarch/
lib.rs

1#![cfg_attr(not(test), no_std)]
2#![cfg_attr(doc, feature(doc_auto_cfg))]
3#![doc = include_str!("../README.md")]
4
5#[macro_use]
6extern crate log;
7
8mod arch;
9mod bits64;
10
11use core::{fmt::Debug, marker::PhantomData};
12
13use memory_addr::{MemoryAddr, PhysAddr, VirtAddr};
14
15pub use self::arch::*;
16pub use self::bits64::PageTable64;
17
18#[doc(no_inline)]
19pub use page_table_entry::{GenericPTE, MappingFlags};
20
21/// The error type for page table operation failures.
22#[derive(Debug, PartialEq, Clone, Copy)]
23pub enum PagingError {
24    /// Cannot allocate memory.
25    NoMemory,
26    /// The address is not aligned to the page size.
27    NotAligned,
28    /// The mapping is not present.
29    NotMapped,
30    /// The mapping is already present.
31    AlreadyMapped,
32    /// The page table entry represents a huge page, but the target physical
33    /// frame is 4K in size.
34    MappedToHugePage,
35}
36
37/// The specialized `Result` type for page table operations.
38pub type PagingResult<T = ()> = Result<T, PagingError>;
39
40/// The **architecture-dependent** metadata that must be provided for
41/// [`PageTable64`].
42pub trait PagingMetaData: Sync + Send {
43    /// The number of levels of the hardware page table.
44    const LEVELS: usize;
45    /// The maximum number of bits of physical address.
46    const PA_MAX_BITS: usize;
47    /// The maximum number of bits of virtual address.
48    const VA_MAX_BITS: usize;
49
50    /// The maximum physical address.
51    const PA_MAX_ADDR: usize = (1 << Self::PA_MAX_BITS) - 1;
52
53    /// The virtual address to be translated in this page table.
54    ///
55    /// This associated type allows more flexible use of page tables structs like [`PageTable64`],
56    /// for example, to implement EPTs.
57    type VirtAddr: MemoryAddr;
58    // (^)it can be converted from/to usize and it's trivially copyable
59
60    /// Whether a given physical address is valid.
61    #[inline]
62    fn paddr_is_valid(paddr: usize) -> bool {
63        paddr <= Self::PA_MAX_ADDR // default
64    }
65
66    /// Whether a given virtual address is valid.
67    #[inline]
68    fn vaddr_is_valid(vaddr: usize) -> bool {
69        // default: top bits sign extended
70        let top_mask = usize::MAX << (Self::VA_MAX_BITS - 1);
71        (vaddr & top_mask) == 0 || (vaddr & top_mask) == top_mask
72    }
73
74    /// Flushes the TLB.
75    ///
76    /// If `vaddr` is [`None`], flushes the entire TLB. Otherwise, flushes the TLB
77    /// entry at the given virtual address.
78    fn flush_tlb(vaddr: Option<Self::VirtAddr>);
79}
80
81/// The low-level **OS-dependent** helpers that must be provided for
82/// [`PageTable64`].
83pub trait PagingHandler: Sized {
84    /// Request to allocate a 4K-sized physical frame.
85    fn alloc_frame() -> Option<PhysAddr>;
86    /// Request to free a allocated physical frame.
87    fn dealloc_frame(paddr: PhysAddr);
88    /// Returns a virtual address that maps to the given physical address.
89    ///
90    /// Used to access the physical memory directly in page table implementation.
91    fn phys_to_virt(paddr: PhysAddr) -> VirtAddr;
92}
93
94/// The page sizes supported by the hardware page table.
95#[repr(usize)]
96#[derive(Debug, Copy, Clone, Eq, PartialEq)]
97pub enum PageSize {
98    /// Size of 4 kilobytes (2<sup>12</sup> bytes).
99    Size4K = 0x1000,
100    /// Size of 2 megabytes (2<sup>21</sup> bytes).
101    Size2M = 0x20_0000,
102    /// Size of 1 gigabytes (2<sup>30</sup> bytes).
103    Size1G = 0x4000_0000,
104}
105
106impl PageSize {
107    /// Whether this page size is considered huge (larger than 4K).
108    pub const fn is_huge(self) -> bool {
109        matches!(self, Self::Size1G | Self::Size2M)
110    }
111
112    /// Checks whether a given address or size is aligned to the page size.
113    pub const fn is_aligned(self, addr_or_size: usize) -> bool {
114        memory_addr::is_aligned(addr_or_size, self as usize)
115    }
116
117    /// Returns the offset of the address within the page size.
118    pub const fn align_offset(self, addr: usize) -> usize {
119        memory_addr::align_offset(addr, self as usize)
120    }
121}
122
123impl From<PageSize> for usize {
124    #[inline]
125    fn from(size: PageSize) -> usize {
126        size as usize
127    }
128}
129
130/// This type indicates the mapping of a virtual address has been changed.
131///
132/// The caller can call [`TlbFlush::flush`] to flush TLB entries related to
133/// the given virtual address, or call [`TlbFlush::ignore`] if it knowns the
134/// TLB will be flushed later.
135#[must_use]
136pub struct TlbFlush<M: PagingMetaData>(M::VirtAddr, PhantomData<M>);
137
138impl<M: PagingMetaData> TlbFlush<M> {
139    pub(crate) const fn new(vaddr: M::VirtAddr) -> Self {
140        Self(vaddr, PhantomData)
141    }
142
143    /// Don't flush the TLB and silence the “must be used” warning.
144    pub fn ignore(self) {}
145
146    /// Flush the the TLB by the given virtual address to ensure the mapping
147    /// changes take effect.
148    pub fn flush(self) {
149        M::flush_tlb(Some(self.0))
150    }
151}
152
153/// This type indicates the page table mappings have been changed.
154///
155/// The caller can call [`TlbFlushAll::flush_all`] to flush the entire TLB, or call
156/// [`TlbFlushAll::ignore`] if it knowns the TLB will be flushed later.
157#[must_use]
158pub struct TlbFlushAll<M: PagingMetaData>(PhantomData<M>);
159
160impl<M: PagingMetaData> TlbFlushAll<M> {
161    pub(crate) const fn new() -> Self {
162        Self(PhantomData)
163    }
164
165    /// Don't flush the TLB and silence the “must be used” warning.
166    pub fn ignore(self) {}
167
168    /// Flush the entire TLB.
169    pub fn flush_all(self) {
170        M::flush_tlb(None)
171    }
172}