1use core::ops::{Deref, DerefMut};
2
3use crate::{
4 FrameAllocator, PageTableEntry, PagingError, PagingResult, PhysAddr, TableMeta, VirtAddr,
5 frame::Frame,
6 map::{MapConfig, MapRecursiveConfig, UnmapConfig, UnmapRecursiveConfig},
7 walk::{PageTableWalker, WalkConfig},
8};
9
10pub struct PageTable<T: TableMeta, A: FrameAllocator> {
11 inner: PageTableRef<T, A>,
12}
13
14impl<T: TableMeta, A: FrameAllocator> PageTable<T, A> {
15 pub const VALID_BITS: usize = Frame::<T, A>::PT_VALID_BITS;
16
17 pub fn new(allocator: A) -> PagingResult<Self> {
19 let inner = unsafe { PageTableRef::new(allocator) }?;
20 Ok(Self { inner })
21 }
22
23 pub fn valid_bits(&self) -> usize {
24 Frame::<T, A>::PT_VALID_BITS
25 }
26}
27
28impl<T: TableMeta, A: FrameAllocator> Drop for PageTable<T, A> {
29 fn drop(&mut self) {
30 unsafe {
31 self.deallocate();
33 }
34 }
35}
36
37impl<T: TableMeta, A: FrameAllocator> Deref for PageTable<T, A> {
38 type Target = PageTableRef<T, A>;
39
40 fn deref(&self) -> &Self::Target {
41 &self.inner
42 }
43}
44
45impl<T: TableMeta, A: FrameAllocator> DerefMut for PageTable<T, A> {
46 fn deref_mut(&mut self) -> &mut Self::Target {
47 &mut self.inner
48 }
49}
50
51#[derive(Clone, Copy)]
52pub struct PageTableRef<T: TableMeta, A: FrameAllocator> {
53 pub root: Frame<T, A>,
54}
55
56impl<T: TableMeta, A: FrameAllocator> core::fmt::Debug for PageTableRef<T, A>
57where
58 T::P: core::fmt::Debug,
59{
60 fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
61 f.debug_struct("PageTable")
62 .field("root_paddr", &format_args!("{:#x}", self.root.paddr.raw()))
63 .field("table_levels", &T::LEVEL_BITS.len())
64 .field("max_block_level", &T::MAX_BLOCK_LEVEL)
65 .field("page_size", &format_args!("{:#x}", T::PAGE_SIZE))
66 .finish()
67 }
68}
69
70impl<T: TableMeta, A: FrameAllocator> PageTableRef<T, A> {
71 pub unsafe fn new(allocator: A) -> PagingResult<Self> {
77 let root = Frame::new(allocator)?;
78 Ok(Self { root })
79 }
80
81 pub fn from_paddr(paddr: PhysAddr, allocator: A) -> Self {
82 let root = Frame::from_paddr(paddr, allocator);
83 Self { root }
84 }
85
86 pub fn map(&mut self, config: &MapConfig) -> PagingResult {
88 self.validate_map_config(config)?;
90
91 if config.vaddr.raw().checked_add(config.size).is_none()
93 || config.paddr.raw().checked_add(config.size).is_none()
94 {
95 return Err(PagingError::address_overflow(
96 "Virtual or physical address overflow",
97 ));
98 }
99
100 self.root.map_range_recursive(MapRecursiveConfig {
101 start_vaddr: config.vaddr,
102 start_paddr: config.paddr,
103 end_vaddr: config.vaddr + config.size,
104 level: Frame::<T, A>::PT_LEVEL,
105 allow_huge: config.allow_huge,
106 flush: config.flush,
107 pte_template: config.pte,
108 })?;
109
110 Ok(())
111 }
112
113 pub fn unmap(&mut self, start_vaddr: VirtAddr, size: usize) -> PagingResult<()> {
129 self.validate_unmap_params(start_vaddr, size)?;
131
132 let end_vaddr: VirtAddr = match start_vaddr.raw().checked_add(size) {
134 Some(end) => VirtAddr::new(end),
135 None => {
136 return Err(PagingError::address_overflow(
137 "Virtual address overflow in unmap",
138 ));
139 }
140 };
141
142 self.root.unmap_range_recursive(UnmapRecursiveConfig {
143 start_vaddr,
144 end_vaddr,
145 level: Frame::<T, A>::PT_LEVEL,
146 flush: true, })?;
148
149 Ok(())
150 }
151
152 pub fn unmap_with_config(&mut self, config: &UnmapConfig) -> PagingResult<()> {
154 self.validate_unmap_params(config.start_vaddr, config.size)?;
155
156 let end_vaddr = match config.start_vaddr.raw().checked_add(config.size) {
157 Some(end) => VirtAddr::new(end),
158 None => {
159 return Err(PagingError::address_overflow(
160 "Virtual address overflow in unmap_with_config",
161 ));
162 }
163 };
164
165 self.root.unmap_range_recursive(UnmapRecursiveConfig {
166 start_vaddr: config.start_vaddr,
167 end_vaddr,
168 level: Frame::<T, A>::PT_LEVEL,
169 flush: config.flush,
170 })?;
171
172 Ok(())
173 }
174
175 fn validate_unmap_params(&self, start_vaddr: VirtAddr, size: usize) -> PagingResult<()> {
177 if size == 0 {
178 return Err(PagingError::invalid_size("Size cannot be zero in unmap"));
179 }
180
181 if !start_vaddr.raw().is_multiple_of(T::PAGE_SIZE) {
183 return Err(PagingError::alignment_error(
184 "Start virtual address not page aligned in unmap",
185 ));
186 }
187
188 if !size.is_multiple_of(T::PAGE_SIZE) {
190 return Err(PagingError::alignment_error(
191 "Size not page aligned in unmap",
192 ));
193 }
194
195 Ok(())
196 }
197
198 pub fn walk_all(&self, config: WalkConfig) -> PageTableWalker<'_, T, A> {
200 PageTableWalker::new(self, config)
201 }
202
203 pub fn walk(
204 &self,
205 start_vaddr: VirtAddr,
206 end_vaddr: VirtAddr,
207 ) -> impl Iterator<Item = crate::walk::PteInfo<T::P>> + '_ {
208 let config = WalkConfig {
209 start_vaddr,
210 end_vaddr,
211 };
212 PageTableWalker::new(self, config).filter(|p| p.pte.to_config(false).valid)
213 }
214
215 pub fn walk_valid(&self) -> impl Iterator<Item = crate::walk::PteInfo<T::P>> + '_ {
217 self.walk(0.into(), usize::MAX.into()).filter(|p| {
218 let config = p.pte.to_config(false);
219 config.valid && p.is_final_mapping
220 })
221 }
222
223 fn validate_map_config(&self, config: &MapConfig) -> PagingResult {
225 if config.size == 0 {
226 return Err(PagingError::invalid_size("Size cannot be zero"));
227 }
228
229 if !config.vaddr.raw().is_multiple_of(T::PAGE_SIZE) {
231 return Err(PagingError::alignment_error(
232 "Virtual address not page aligned",
233 ));
234 }
235
236 if !config.paddr.raw().is_multiple_of(T::PAGE_SIZE) {
237 return Err(PagingError::alignment_error(
238 "Physical address not page aligned",
239 ));
240 }
241
242 Ok(())
243 }
244
245 pub const fn page_size() -> usize {
246 T::PAGE_SIZE
247 }
248
249 pub const fn table_levels() -> usize {
250 T::LEVEL_BITS.len()
251 }
252
253 pub const fn valid_bits() -> usize {
254 Frame::<T, A>::PT_VALID_BITS
255 }
256
257 pub unsafe fn destroy(mut self) {
270 self.root.deallocate_recursive(Frame::<T, A>::PT_LEVEL);
271 }
272
273 pub unsafe fn deallocate(&mut self) {
288 self.root.deallocate_recursive(Frame::<T, A>::PT_LEVEL);
289 }
290
291 pub fn deallocate_range(&mut self, start_vaddr: VirtAddr, end_vaddr: VirtAddr) -> PagingResult {
296 if start_vaddr >= end_vaddr {
297 return Err(PagingError::invalid_range(
298 "Start address must be less than end address",
299 ));
300 }
301
302 Ok(())
309 }
310
311 pub fn translate(&self, vaddr: VirtAddr) -> PagingResult<(PhysAddr, T::P)> {
324 let (pte, level) = self
325 .root
326 .translate_recursive_with_level(vaddr, Frame::<T, A>::PT_LEVEL)?;
327
328 let pte_config = pte.to_config(level > 1);
329
330 let (phys_addr, _) = if pte_config.huge {
332 let level_size = Frame::<T, A>::level_size(level);
334 let offset_in_page = vaddr.raw() % level_size;
335 (
336 PhysAddr::new(pte_config.paddr.raw() + offset_in_page),
337 level_size,
338 )
339 } else {
340 let offset_in_page = vaddr.raw() % T::PAGE_SIZE;
342 (
343 PhysAddr::new(pte_config.paddr.raw() + offset_in_page),
344 T::PAGE_SIZE,
345 )
346 };
347
348 Ok((phys_addr, pte))
349 }
350
351 pub fn translate_phys(&self, vaddr: VirtAddr) -> PagingResult<PhysAddr> {
361 let (p, _) = self.translate(vaddr)?;
362 Ok(p)
363 }
364
365 pub fn is_mapped(&self, vaddr: VirtAddr) -> bool {
376 self.translate(vaddr).is_ok()
377 }
378
379 pub fn root_paddr(&self) -> crate::PhysAddr {
381 self.root.paddr
382 }
383}