1use core::{
2 alloc::Layout,
3 marker::PhantomData,
4 ptr::{slice_from_raw_parts, slice_from_raw_parts_mut, NonNull},
5};
6
7use log::trace;
8
9use crate::{
10 align::*,
11 err::{PagingError, PagingResult},
12 iter::TableIter,
13 page_table_entry::Pte,
14 Access, MapConfig, PTEArch, PTEGeneric, PTEInfo,
15};
16
17#[derive(Clone, Copy)]
22pub struct PageTableRef<'a, P: PTEArch> {
23 addr: usize,
24 walk: PageWalk,
25 _marker: PhantomData<&'a P>,
26}
27
28impl<'a, P: PTEArch> PageTableRef<'a, P> {
29 pub fn create_empty(access: &mut impl Access) -> PagingResult<Self> {
31 Self::new_with_level(P::level(), access)
32 }
33 pub fn new_with_level(level: usize, access: &mut impl Access) -> PagingResult<Self> {
37 assert!(level > 0);
38 let addr = unsafe { Self::alloc_table(access)? };
39 Ok(PageTableRef::from_addr(addr, level))
40 }
41
42 pub fn from_addr(addr: usize, level: usize) -> Self {
43 let walk = PageWalk::new(P::page_size(), level);
44
45 Self {
46 addr,
47 walk,
48 _marker: PhantomData,
49 }
50 }
51
52 pub fn level(&self) -> usize {
53 self.walk.level
54 }
55
56 pub fn paddr(&self) -> usize {
57 self.addr
58 }
59
60 pub unsafe fn map_region(
69 &mut self,
70 config: MapConfig,
71 size: usize,
72 allow_block: bool,
73 access: &mut impl Access,
74 ) -> PagingResult<()> {
75 self.map_region_with_handle(
76 config,
77 size,
78 allow_block,
79 access,
80 None::<fn(*const u8)>.as_ref(),
81 )
82 }
83
84 pub unsafe fn map_region_with_handle(
99 &mut self,
100 cfg: MapConfig,
101 size: usize,
102 allow_huge: bool,
103 access: &mut impl Access,
104 on_page_mapped: Option<&impl Fn(*const u8)>,
105 ) -> PagingResult {
106 let vaddr = cfg.vaddr;
107 let paddr = cfg.paddr;
108
109 if !vaddr.is_aligned_to(P::page_size()) {
110 return Err(PagingError::NotAligned("vaddr"));
111 }
112
113 if !paddr.is_aligned_to(P::page_size()) {
114 return Err(PagingError::NotAligned("paddr"));
115 }
116
117 let mut size = size;
118 trace!(
119 "map_region: [{:#x}, {:#x}) -> [{:#x}, {:#x}) {:?}",
120 vaddr as usize,
121 vaddr as usize + size,
122 paddr,
123 paddr + size,
124 cfg.setting,
125 );
126
127 let mut map_cfg = cfg;
128
129 while size > 0 {
130 let level_deepth = if allow_huge {
131 self.walk
132 .detect_align_level(map_cfg.vaddr, size)
133 .min(self.walk.detect_align_level(map_cfg.paddr as _, size))
134 } else {
135 1
136 };
137 self.get_entry_or_create(&map_cfg, level_deepth, access)?;
138
139 let map_size = self.walk.copy_with_level(level_deepth).level_entry_size();
140
141 if let Some(f) = on_page_mapped {
142 f(vaddr);
143 }
144 map_cfg.vaddr = unsafe { map_cfg.vaddr.add(map_size) };
145 map_cfg.paddr += map_size;
146 size -= map_size;
147 }
148 Ok(())
149 }
150
151 pub fn as_slice(&self, access: &impl Access) -> &'a [Pte<P>] {
152 unsafe {
153 &*slice_from_raw_parts(
154 (self.addr + access.va_offset()) as *const Pte<P>,
155 self.walk.table_size,
156 )
157 }
158 }
159
160 unsafe fn sub_table_or_create(
161 &mut self,
162 idx: usize,
163 map_cfg: &MapConfig,
164 access: &mut impl Access,
165 ) -> PagingResult<PageTableRef<'a, P>> {
166 let mut pte = self.get_pte(idx, access);
167 let sub_level = self.level() - 1;
168
169 if pte.valid() {
170 Ok(Self::from_addr(pte.paddr, sub_level))
171 } else {
172 let table = Self::new_with_level(sub_level, access)?;
173 let ptr = table.addr;
174 pte.is_valid = true;
175 pte.paddr = ptr;
176 pte.is_block = false;
177 pte.setting = map_cfg.setting;
178
179 let s = self.as_slice_mut(access);
180 s[idx] = P::new_pte(pte);
181
182 Ok(table)
183 }
184 }
185
186 unsafe fn get_entry_or_create(
187 &mut self,
188 map_cfg: &MapConfig,
189 level: usize,
190 access: &mut impl Access,
191 ) -> PagingResult<()> {
192 let mut table = *self;
193 while table.level() > 0 {
194 let idx = table.index_of_table(map_cfg.vaddr);
195 if table.level() == level {
196 table.as_slice_mut(access)[idx] =
197 P::new_pte(PTEGeneric::new(map_cfg.paddr, level > 1, map_cfg.setting));
198 return Ok(());
199 }
200 table = table.sub_table_or_create(idx, map_cfg, access)?;
201 }
202 Err(PagingError::NotAligned("vaddr"))
203 }
204
205 pub fn release(&mut self, access: &mut impl Access) {
206 self._release(0usize as _, access);
207 unsafe {
208 access.dealloc(
209 self.addr.to_virt(access),
210 Layout::from_size_align_unchecked(P::page_size(), P::page_size()),
211 );
212 }
213 }
214
215 fn _release(&mut self, start_vaddr: *const u8, access: &mut impl Access) -> Option<()> {
216 let start_vaddr_usize: usize = start_vaddr as _;
217 let entries = self.as_slice(access);
218
219 if self.level() == 1 {
220 return Some(());
221 }
222
223 for (i, entry) in entries.iter().enumerate() {
224 let vaddr_usize = start_vaddr_usize + i * self.entry_size();
225 let vaddr = vaddr_usize as _;
226 let pte = entry.read();
227
228 if pte.valid() {
229 let is_block = pte.is_block;
230
231 if self.level() > 1 && !is_block {
232 let mut table_ref = self.next_table(i, access)?;
233 table_ref._release(vaddr, access)?;
234
235 unsafe {
236 access.dealloc(
237 pte.paddr.to_virt(access),
238 Layout::from_size_align_unchecked(P::page_size(), P::page_size()),
239 );
240 }
241 }
242 }
243 }
244 Some(())
245 }
246
247 fn next_table(&self, idx: usize, access: &impl Access) -> Option<Self> {
248 let pte = self.get_pte(idx, access);
249 if pte.is_block {
250 return None;
251 }
252
253 if pte.valid() {
254 Some(Self::from_addr(pte.paddr, self.level() - 1))
255 } else {
256 None
257 }
258 }
259
260 fn index_of_table(&self, vaddr: *const u8) -> usize {
261 self.walk.index_of_table(vaddr)
262 }
263
264 pub fn entry_size(&self) -> usize {
266 self.walk.level_entry_size()
267 }
268
269 pub fn table_size(&self) -> usize {
270 self.walk.table_size
271 }
272
273 fn as_slice_mut(&mut self, access: &impl Access) -> &'a mut [usize] {
274 unsafe {
275 &mut *slice_from_raw_parts_mut(
276 (self.addr + access.va_offset()) as *mut usize,
277 self.walk.table_size,
278 )
279 }
280 }
281
282 fn get_pte(&self, idx: usize, access: &impl Access) -> PTEGeneric {
283 let s = self.as_slice(access);
284 s[idx].read()
285 }
286
287 unsafe fn alloc_table(access: &mut impl Access) -> PagingResult<usize> {
288 let page_size = P::page_size();
289 let layout = Layout::from_size_align_unchecked(page_size, page_size);
290 if let Some(addr) = access.alloc(layout) {
291 addr.write_bytes(0, page_size);
292 Ok(addr.as_ptr() as usize - access.va_offset())
293 } else {
294 Err(PagingError::NoMemory)
295 }
296 }
297
298 pub fn iter_all<A: Access>(&self, access: &'a A) -> impl Iterator<Item = PTEInfo> + 'a {
299 TableIter::new(0 as _, *self, access)
300 }
301}
302
303const fn log2(value: usize) -> usize {
304 assert!(value > 0, "Value must be positive and non-zero");
305 match value {
306 512 => 9,
307 4096 => 12,
308 _ => {
309 let mut v = value;
310 let mut result = 0;
311
312 while v > 1 {
314 v >>= 1; result += 1;
316 }
317
318 result
319 }
320 }
321}
322
323pub trait PVConvert {
324 fn to_virt<T>(&self, access: &impl Access) -> NonNull<T>;
325}
326
327impl PVConvert for usize {
328 fn to_virt<T>(&self, access: &impl Access) -> NonNull<T> {
329 unsafe { NonNull::new_unchecked((self + access.va_offset()) as *mut u8) }.cast()
330 }
331}
332
333#[derive(Debug, Clone, Copy)]
334pub struct PageWalk {
335 level: usize,
336 table_size: usize,
337 table_size_pow: usize,
338 page_size_pow: usize,
339}
340
341impl PageWalk {
342 fn new(page_size: usize, level: usize) -> Self {
343 let table_size = page_size / size_of::<usize>();
344 let table_size_pow = log2(table_size);
345 let page_size_pow = log2(page_size);
346
347 Self {
348 table_size,
349 table_size_pow,
350 page_size_pow,
351 level,
352 }
353 }
354
355 fn copy_with_level(&self, level: usize) -> Self {
356 let mut c = *self;
357 c.level = level;
358 c
359 }
360
361 fn level_entry_size_shift(&self) -> usize {
362 self.page_size_pow + (self.level - 1) * self.table_size_pow
363 }
364
365 fn index_of_table(&self, vaddr: *const u8) -> usize {
366 (vaddr as usize >> self.level_entry_size_shift()) & (self.table_size - 1)
367 }
368
369 fn level_entry_size(&self) -> usize {
370 1 << self.level_entry_size_shift()
371 }
372
373 fn detect_align_level(&self, vaddr: *const u8, size: usize) -> usize {
374 for level in (0..self.level).rev() {
375 let level_size = self.copy_with_level(level).level_entry_size();
376 if vaddr as usize % level_size == 0 && size >= level_size {
377 return level;
378 }
379 }
380 1
381 }
382}
383
384#[cfg(test)]
385mod test {
386 use super::*;
387
388 const MB: usize = 1024 * 1024;
389 const GB: usize = 1024 * MB;
390
391 #[test]
392 fn test_log2() {
393 assert_eq!(log2(512), 9);
394 assert_eq!(log2(4096), 12);
395 }
396
397 #[test]
398 fn test_level_entry_memory_size() {
399 assert_eq!(PageWalk::new(4096, 1).level_entry_size(), 4096);
400 assert_eq!(PageWalk::new(4096, 2).level_entry_size(), 2 * MB);
401 assert_eq!(PageWalk::new(4096, 3).level_entry_size(), GB);
402 assert_eq!(PageWalk::new(4096, 4).level_entry_size(), 512 * GB);
403 }
404
405 #[test]
406 fn test_idx_of_table() {
407 let w = PageWalk::new(4096, 1);
408 assert_eq!(w.index_of_table(0 as _), 0);
409 assert_eq!(w.index_of_table(0x1000 as _), 1);
410 assert_eq!(w.index_of_table(0x2000 as _), 2);
411
412 let w = PageWalk::new(4096, 2);
413 assert_eq!(w.index_of_table(0 as _), 0);
414 assert_eq!(w.index_of_table((2 * MB) as _), 1);
415
416 let w = PageWalk::new(4096, 3);
417 assert_eq!(w.index_of_table(GB as _), 1);
418
419 let w = PageWalk::new(4096, 4);
420 assert_eq!(w.index_of_table((512 * GB) as _), 1);
421 }
422
423 #[test]
424 fn test_detect_align() {
425 let s = 4 * GB;
426
427 let w = PageWalk::new(0x1000, 4);
428 assert_eq!(w.detect_align_level(0x1000 as _, s), 1);
429
430 assert_eq!(w.detect_align_level((0x1000 * 512) as _, s), 2);
431
432 assert_eq!(w.detect_align_level((0x1000 * 512 * 512) as _, s), 3);
433
434 assert_eq!(w.detect_align_level((2 * GB) as _, s), 3);
435 }
436}