page_walker/format.rs
1//! This module provides the [`PageFormat`] struct that is used to describe the page table
2//! hierarchy.
3
4use core::ops::Range;
5use crate::level::PageLevel;
6use crate::walker::PteType;
7use num_traits::{FromPrimitive, PrimInt, Unsigned};
8
9/// Describes the page format of the page hierarchy and the mask of bits in the PTE that refer to
10/// the actual physical address and are not used for metadata.
11#[derive(Clone, Debug)]
12pub struct PageFormat<'a, PTE>
13where
14 PTE: FromPrimitive + PrimInt + Unsigned,
15{
16 /// Describes the page table hierarchy as a slice of [`crate::level::PageLevel`] structs that
17 /// each describe a single level in this hierarchy, where the level at index zero is the leaf
18 /// node and the last page level is the root.
19 pub levels: &'a [PageLevel<PTE>],
20
21 /// The physical mask of bits that refer to an actual physical address and are not used for PTE
22 /// metadata.
23 pub physical_mask: PTE,
24}
25
26impl<'a, PTE> PageFormat<'a, PTE>
27where
28 PTE: FromPrimitive + PrimInt + Unsigned,
29{
30 /// Calculates the full virtual address mask by setting all the bits for each page level and
31 /// finding the largest mask. This is used by the [`PageFormat::sign_extend`] method to
32 /// determine the sign bit.
33 pub fn virtual_mask(&self) -> usize {
34 self.levels
35 .iter()
36 .map(|level| level.mask() | level.page_size() - 1)
37 .max()
38 .unwrap()
39 }
40
41 /// Sign extends a given virtual address by extending the sign bit into the unused upper bits
42 /// of the virtual address.
43 pub fn sign_extend(&self, address: usize) -> usize {
44 let sign_bit = 1 << self.virtual_mask().trailing_ones() - 1;
45
46 if address & sign_bit == sign_bit {
47 // Invert the virtual mask and mask it with the address to sign extend the address.
48 !self.virtual_mask() | address
49 } else {
50 address
51 }
52 }
53
54 /// This is a recursive helper function used to traverse the page table hierarchy for a given
55 /// virtual address range and the given physical address of the page table for the current page
56 /// table level. It invokes the appropriate user callbacks in [`crate::walker::PageWalker`],
57 /// while traversing the page tables.
58 fn do_walk<PageWalker, Error>(
59 &self,
60 phys_addr: PTE,
61 mut index: usize,
62 range: Range<usize>,
63 walker: &mut PageWalker,
64 ) -> Result<(), Error>
65 where
66 PageWalker: crate::walker::PageWalker<PTE, Error>,
67 {
68 // Ensure that the index is valid.
69 if index >= self.levels.len() {
70 index = self.levels.len() - 1;
71 }
72
73 let level = &self.levels[index];
74
75 // Split up the range by page boundaries, such that we have a range for each page that is
76 // inclusive of the original range. For instance, the range 0x0000..0x1fff spans two 4K
77 // pages, so this iterator would return 0x0000..0x0fff and 0x1000..0x1fff. We also make
78 // sure that the page ranges are sign extended where appropriate. In addition, calculate
79 // the PTE index.
80 let page_ranges = (level.pte_index(range.start)..=level.pte_index(range.end))
81 .scan(self.sign_extend(range.start), |state, pte_index| {
82 let page_range = *state..level.end(*state).min(range.end);
83 *state = self.sign_extend(level.end(*state) + 1);
84
85 Some((pte_index, page_range))
86 });
87
88 for (pte_index, page_range) in page_ranges {
89 // Get the PTE index for this page range, and then index into the page table to get the
90 // corresponding PTE.
91 let offset: PTE = PTE::from_usize(pte_index * core::mem::size_of::<PTE>()).unwrap();
92 let pte = walker.read_pte(phys_addr + offset)?;
93
94 // Determine whether the PTE refers to a page or a page table. That is, it is a page if
95 // we are at a leaf page table or if the PTE refers to a huge page. Otherwise, it is a
96 // page table.
97 let page_type = match index == 0 || level.is_huge_page(pte) {
98 true => PteType::Page(index),
99 _ => PteType::PageTable(index),
100 };
101
102 // Invoke the user callback to handle this PTE.
103 walker.handle_pte(page_type, page_range.clone(), &pte)?;
104
105 // Invoke the user callback to handle this PTE hole, i.e. when the PTE is not marked as
106 // present.
107 if !level.is_present(pte) {
108 walker.handle_pte_hole(index, page_range.clone(), &pte)?;
109 }
110
111 // If the user did not decide to unmap this page, then we are done with this PTE and
112 // can resume to the next one.
113 if index == 0 || level.is_huge_page(pte) {
114 continue;
115 }
116
117 // At this point we are dealing with a normal page table. Extract the physical address
118 // from the current PTE, and recurse the page table hierarchy.
119 let phys_addr = pte & self.physical_mask;
120 self.do_walk(phys_addr, index - 1, page_range.clone(), walker)?;
121
122 // Provide an opportunity to the user to handle the PTE of the page table upon
123 // recursion. For instance, to free the page table.
124 walker.handle_post_pte(index, page_range, &pte)?;
125 }
126
127 Ok(())
128 }
129
130 /// This is a recursive function used to traverse the page table hierarchy for a given virtual
131 /// address range and the given physical address of the root page table of the page table
132 /// hierarchy. It invokes the appropriate user callbacks in [`crate::walker::PageWalker`],
133 /// while traversing the page tables.
134 pub fn walk<PageWalker, Error>(
135 &self,
136 phys_addr: PTE,
137 range: Range<usize>,
138 walker: &mut PageWalker,
139 ) -> Result<(), Error>
140 where
141 PageWalker: crate::walker::PageWalker<PTE, Error>,
142 {
143 self.do_walk(phys_addr, self.levels.len() - 1, range, walker)
144 }
145
146 /// This is a recursive helper function used to traverse the page table hierarchy for a given
147 /// virtual address range and the given physical address of the page table for the current page
148 /// table level. It invokes the appropriate user callbacks in [`crate::walker::PageWalkerMut`],
149 /// while traversing the page tables.
150 fn do_walk_mut<PageWalkerMut, Error>(
151 &self,
152 phys_addr: PTE,
153 mut index: usize,
154 range: Range<usize>,
155 walker: &mut PageWalkerMut,
156 ) -> Result<(), Error>
157 where
158 PageWalkerMut: crate::walker::PageWalkerMut<PTE, Error>,
159 {
160 // Ensure that the index is valid.
161 if index >= self.levels.len() {
162 index = self.levels.len() - 1;
163 }
164
165 let level = &self.levels[index];
166
167 // Split up the range by page boundaries, such that we have a range for each page that is
168 // inclusive of the original range. For instance, the range 0x0000..0x1fff spans two 4K
169 // pages, so this iterator would return 0x0000..0x0fff and 0x1000..0x1fff. We also make
170 // sure that the page ranges are sign extended where appropriate. In addition, calculate
171 // the PTE index.
172 let page_ranges = (level.pte_index(range.start)..=level.pte_index(range.end))
173 .scan(self.sign_extend(range.start), |state, pte_index| {
174 let page_range = *state..level.end(*state).min(range.end);
175 *state = self.sign_extend(level.end(*state) + 1);
176
177 Some((pte_index, page_range))
178 });
179
180 for (pte_index, page_range) in page_ranges {
181 // Get the PTE index for this page range, and then index into the page table to get the
182 // corresponding PTE.
183 let offset: PTE = PTE::from_usize(pte_index * core::mem::size_of::<PTE>()).unwrap();
184 let mut pte = walker.read_pte(phys_addr + offset)?;
185
186 // Determine whether the PTE refers to a page or a page table. That is, it is a page if
187 // we are at a leaf page table or if the PTE refers to a huge page. Otherwise, it is a
188 // page table.
189 let page_type = match index == 0 || level.is_huge_page(pte) {
190 true => PteType::Page(index),
191 _ => PteType::PageTable(index),
192 };
193
194 // Invoke the user callback to handle this PTE.
195 walker.handle_pte(page_type, page_range.clone(), &mut pte)?;
196
197 // Invoke the user callback to handle this PTE hole, i.e. when the PTE is not marked as
198 // present.
199 if !level.is_present(pte) {
200 walker.handle_pte_hole(index, page_range.clone(), &mut pte)?;
201 }
202
203 // If the user did not decide to unmap this page, then we are done with this PTE and
204 // can resume to the next one.
205 walker.write_pte(phys_addr + offset, pte)?;
206
207 if index == 0 || level.is_huge_page(pte) {
208 continue;
209 }
210
211 // At this point we are dealing with a normal page table. Extract the physical address
212 // from the current PTE, and recurse the page table hierarchy.
213 let phys_addr = pte & self.physical_mask;
214 self.do_walk_mut(phys_addr, index - 1, page_range.clone(), walker)?;
215
216 // Provide an opportunity to the user to handle the PTE of the page table upon
217 // recursion. For instance, to free the page table.
218 walker.handle_post_pte(index, page_range, &mut pte)?;
219 walker.write_pte(phys_addr + offset, pte)?;
220 }
221
222 Ok(())
223 }
224
225 /// This is a recursive function used to traverse the page table hierarchy for a given virtual
226 /// address range and the given physical address of the root page table of the page table
227 /// hierarchy. It invokes the appropriate user callbacks in [`crate::walker::PageWalker`],
228 /// while traversing the page tables.
229 pub fn walk_mut<PageWalkerMut, Error>(
230 &self,
231 phys_addr: PTE,
232 range: Range<usize>,
233 walker: &mut PageWalkerMut,
234 ) -> Result<(), Error>
235 where
236 PageWalkerMut: crate::walker::PageWalkerMut<PTE, Error>,
237 {
238 self.do_walk_mut(phys_addr, self.levels.len() - 1, range, walker)
239 }
240}