page_table_generic/
table.rs

1use core::{
2    alloc::Layout,
3    marker::PhantomData,
4    ptr::{slice_from_raw_parts, slice_from_raw_parts_mut},
5};
6
7use num_align::*;
8
9use super::{
10    Access, PTEGeneric, PTEInfo, PagingError, PagingResult, PhysAddr, TableGeneric, VirtAddr,
11    iter::TableIter,
12};
13
14#[repr(C)]
15#[derive(Debug, Clone, Copy)]
16pub struct MapConfig<P: PTEGeneric> {
17    pub vaddr: VirtAddr,
18    pub paddr: PhysAddr,
19    pub size: usize,
20    pub pte: P,
21    pub allow_huge: bool,
22    pub flush: bool,
23}
24
25impl<P: PTEGeneric> MapConfig<P> {
26    pub fn new(
27        vaddr: VirtAddr,
28        paddr: PhysAddr,
29        size: usize,
30        pte: P,
31        allow_huge: bool,
32        flush: bool,
33    ) -> Self {
34        Self {
35            vaddr,
36            paddr,
37            size,
38            pte,
39            allow_huge,
40            flush,
41        }
42    }
43}
44
45#[repr(C)]
46#[derive(Debug, Clone, Copy)]
47struct _MapConfig<P: PTEGeneric> {
48    pub vaddr: VirtAddr,
49    pub paddr: PhysAddr,
50    pub pte: P,
51}
52
53#[derive(Clone, Copy)]
54pub struct PageTableRef<'a, T: TableGeneric> {
55    addr: PhysAddr,
56    walk: PageWalk<T>,
57    _marker: PhantomData<&'a T>,
58}
59
60impl<'a, T: TableGeneric> PageTableRef<'a, T> {
61    #[inline(always)]
62    /// Creates a new page table reference.
63    pub fn create_empty(access: &mut impl Access) -> PagingResult<Self> {
64        Self::new_with_level(T::LEVEL, access)
65    }
66
67    /// New page table and returns a reference to it.
68    ///
69    /// `level` is level of this page, should from 1 to up.
70    #[inline(always)]
71    pub fn new_with_level(level: usize, access: &mut impl Access) -> PagingResult<Self> {
72        assert!(level > 0);
73        let addr = unsafe { Self::alloc_table(access)? };
74        Ok(PageTableRef::from_addr(addr, level))
75    }
76
77    #[inline(always)]
78    pub fn root_from_addr(addr: PhysAddr) -> Self {
79        PageTableRef::from_addr(addr, T::LEVEL)
80    }
81
82    #[inline(always)]
83    pub fn from_addr(addr: PhysAddr, level: usize) -> Self {
84        let walk = PageWalk::new(level);
85        Self {
86            addr,
87            walk,
88            _marker: PhantomData,
89        }
90    }
91
92    pub fn max_block_size(&self) -> usize {
93        let max = PageWalk::<T>::new(T::MAX_BLOCK_LEVEL);
94        self.walk.level_entry_size().min(max.level_entry_size())
95    }
96
97    /// Map a contiguous virtual memory region to a contiguous physical memory
98    /// region with the given mapping `flags`.
99    ///
100    /// The virtual and physical memory regions start with `vaddr` and `paddr`
101    /// respectively. The region size is `size`. The addresses and `size` must
102    /// be aligned to 4K, otherwise it will return [`Err(PagingError::NotAligned)`].
103    ///
104    /// When `allow_huge` is true, it will try to map the region with huge pages
105    /// if possible. Otherwise, it will map the region with 4K pages.
106    ///
107    /// [`Err(PagingError::NotAligned)`]: PagingError::NotAligned
108    ///
109    /// # Safety
110    /// User must ensure that the physical address is valid.
111    pub unsafe fn map(
112        &mut self,
113        config: MapConfig<T::PTE>,
114        access: &mut impl Access,
115    ) -> PagingResult {
116        let vaddr = config.vaddr;
117        let paddr = config.paddr;
118
119        if !vaddr.raw().is_aligned_to(T::PAGE_SIZE) {
120            return Err(PagingError::NotAligned("vaddr"));
121        }
122
123        if !paddr.raw().is_aligned_to(T::PAGE_SIZE) {
124            return Err(PagingError::NotAligned("paddr"));
125        }
126
127        let mut size = config.size;
128
129        let mut map_cfg = _MapConfig {
130            vaddr,
131            paddr,
132            pte: config.pte,
133        };
134
135        while size > 0 {
136            let level_deepth = if config.allow_huge {
137                let v_align = self.walk.detect_align_level(map_cfg.vaddr.raw(), size);
138                let p_align = self.walk.detect_align_level(map_cfg.paddr.raw(), size);
139                v_align.min(p_align).min(T::MAX_BLOCK_LEVEL)
140            } else {
141                1
142            };
143            unsafe { self.get_entry_or_create(map_cfg, level_deepth, access)? };
144
145            let map_size = self.walk.copy_with_level(level_deepth).level_entry_size();
146
147            if config.flush {
148                T::flush(Some(vaddr));
149            }
150            map_cfg.vaddr += map_size;
151            map_cfg.paddr += map_size;
152            size -= map_size;
153        }
154        Ok(())
155    }
156
157    pub fn iter_all<A: Access>(&self, access: &'a A) -> impl Iterator<Item = PTEInfo<T::PTE>> + 'a {
158        TableIter::new(0 as _, *self, access)
159    }
160
161    pub fn release(&mut self, access: &mut impl Access) {
162        self._release(0.into(), access);
163        unsafe {
164            access.dealloc(self.addr, Self::pte_layout());
165        }
166    }
167
168    fn pte_layout() -> Layout {
169        unsafe { Layout::from_size_align_unchecked(T::PAGE_SIZE, T::PAGE_SIZE) }
170    }
171
172    fn _release(&mut self, start_vaddr: VirtAddr, access: &mut impl Access) -> Option<()> {
173        let start_vaddr_usize: usize = start_vaddr.raw();
174        let entries = self.as_slice(access);
175
176        if self.level() == 1 {
177            return Some(());
178        }
179
180        for (i, &pte) in entries.iter().enumerate() {
181            let vaddr_usize = start_vaddr_usize + i * self.entry_size();
182            let vaddr = vaddr_usize.into();
183
184            if pte.valid() {
185                let is_block = pte.is_huge();
186
187                if self.level() > 1 && !is_block {
188                    let mut table_ref = self.next_table(i, access)?;
189                    table_ref._release(vaddr, access)?;
190
191                    unsafe {
192                        access.dealloc(pte.paddr(), Self::pte_layout());
193                    }
194                }
195            }
196        }
197        Some(())
198    }
199
200    unsafe fn get_entry_or_create(
201        &mut self,
202        map_cfg: _MapConfig<T::PTE>,
203        level: usize,
204        access: &mut impl Access,
205    ) -> PagingResult<()> {
206        let mut table = *self;
207        while table.level() > 0 {
208            let idx = table.index_of_table(map_cfg.vaddr);
209            if table.level() == level {
210                let mut pte: <T as TableGeneric>::PTE = map_cfg.pte;
211                pte.set_paddr(map_cfg.paddr);
212                pte.set_valid(true);
213                // pte.set_is_leaf(level > 1);
214                if level > 1 {
215                    pte.set_is_huge(true);
216                }
217
218                table.as_slice_mut(access)[idx] = pte;
219                return Ok(());
220            }
221            table = unsafe { table.sub_table_or_create(idx, map_cfg, access)? };
222        }
223        Err(PagingError::NotAligned("vaddr"))
224    }
225
226    unsafe fn sub_table_or_create(
227        &mut self,
228        idx: usize,
229        map_cfg: _MapConfig<T::PTE>,
230        access: &mut impl Access,
231    ) -> PagingResult<PageTableRef<'a, T>> {
232        let mut pte = self.get_pte(idx, access);
233        let sub_level = self.level() - 1;
234
235        if pte.valid() {
236            Ok(Self::from_addr(pte.paddr(), sub_level))
237        } else {
238            pte = map_cfg.pte;
239            let table = Self::new_with_level(sub_level, access)?;
240            let ptr = table.addr;
241            pte.set_valid(true);
242            pte.set_paddr(ptr);
243            pte.set_is_huge(false);
244
245            let s = self.as_slice_mut(access);
246            s[idx] = pte;
247
248            Ok(table)
249        }
250    }
251
252    fn next_table(&self, idx: usize, access: &impl Access) -> Option<Self> {
253        let pte = self.get_pte(idx, access);
254        if pte.is_huge() {
255            return None;
256        }
257        if pte.valid() {
258            Some(Self::from_addr(pte.paddr(), self.level() - 1))
259        } else {
260            None
261        }
262    }
263
264    pub fn as_slice(&self, access: &impl Access) -> &'a [T::PTE] {
265        unsafe { &*slice_from_raw_parts(access.phys_to_mut(self.addr).cast(), T::TABLE_LEN) }
266    }
267    fn as_slice_mut(&mut self, access: &impl Access) -> &'a mut [T::PTE] {
268        unsafe {
269            &mut *slice_from_raw_parts_mut(access.phys_to_mut(self.addr).cast(), T::TABLE_LEN)
270        }
271    }
272
273    pub fn level(&self) -> usize {
274        self.walk.level
275    }
276
277    pub fn paddr(&self) -> PhysAddr {
278        self.addr
279    }
280
281    fn get_pte(&self, idx: usize, access: &impl Access) -> T::PTE {
282        let s = self.as_slice(access);
283        s[idx]
284    }
285
286    #[inline(always)]
287    unsafe fn alloc_table(access: &mut impl Access) -> PagingResult<PhysAddr> {
288        let page_size = T::PAGE_SIZE;
289        let layout = unsafe { Layout::from_size_align_unchecked(page_size, page_size) };
290        if let Some(addr) = unsafe { access.alloc(layout) } {
291            unsafe { access.phys_to_mut(addr).write_bytes(0, page_size) };
292            Ok(addr)
293        } else {
294            Err(PagingError::NoMemory)
295        }
296    }
297
298    fn index_of_table(&self, vaddr: VirtAddr) -> usize {
299        self.walk.index_of_table(vaddr)
300    }
301
302    // 每个页表项代表的内存大小
303    pub fn entry_size(&self) -> usize {
304        self.walk.level_entry_size()
305    }
306}
307
308const fn log2(value: usize) -> usize {
309    assert!(value > 0, "Value must be positive and non-zero");
310    match value {
311        512 => 9,
312        4096 => 12,
313        _ => {
314            let mut v = value;
315            let mut result = 0;
316
317            // 计算最高位的位置
318            while v > 1 {
319                v >>= 1; // 右移一位
320                result += 1;
321            }
322
323            result
324        }
325    }
326}
327
328#[derive(Debug, Clone, Copy)]
329struct PageWalk<T: TableGeneric> {
330    level: usize,
331    _mark: PhantomData<T>,
332}
333
334impl<T: TableGeneric> PageWalk<T> {
335    fn new(level: usize) -> Self {
336        Self {
337            level,
338            _mark: PhantomData,
339        }
340    }
341
342    const fn table_len_pow() -> usize {
343        log2(Self::table_len())
344    }
345
346    const fn page_size_pow() -> usize {
347        log2(T::PAGE_SIZE)
348    }
349
350    const fn table_len() -> usize {
351        T::TABLE_LEN
352    }
353
354    fn copy_with_level(&self, level: usize) -> Self {
355        let mut c = *self;
356        c.level = level;
357        c
358    }
359
360    fn level_entry_size_shift(&self) -> usize {
361        Self::page_size_pow() + (self.level - 1) * Self::table_len_pow()
362    }
363
364    fn index_of_table(&self, vaddr: VirtAddr) -> usize {
365        (vaddr.raw() >> self.level_entry_size_shift()) & (Self::table_len() - 1)
366    }
367
368    fn level_entry_size(&self) -> usize {
369        1 << self.level_entry_size_shift()
370    }
371
372    fn detect_align_level(&self, addr: usize, size: usize) -> usize {
373        for level in (1..self.level + 1).rev() {
374            let level_size = self.copy_with_level(level).level_entry_size();
375            if addr % level_size == 0 && size >= level_size {
376                return level;
377            }
378        }
379        1
380    }
381}
382
383#[cfg(test)]
384mod test {
385    use super::*;
386    use crate::{GB, MB};
387
388    #[test]
389    fn test_log2() {
390        assert_eq!(log2(512), 9);
391        assert_eq!(log2(4096), 12);
392    }
393
394    #[derive(Clone, Copy)]
395    struct TestTable;
396    impl TableGeneric for TestTable {
397        type PTE = TestPTE;
398
399        fn flush(_vaddr: Option<crate::VirtAddr>) {
400            todo!()
401        }
402    }
403
404    #[derive(Clone, Copy, Debug)]
405    #[repr(transparent)]
406    struct TestPTE(usize);
407    impl PTEGeneric for TestPTE {
408        fn valid(&self) -> bool {
409            todo!()
410        }
411
412        fn set_valid(&mut self, _valid: bool) {
413            todo!()
414        }
415
416        fn is_huge(&self) -> bool {
417            todo!()
418        }
419
420        fn set_is_huge(&mut self, _is_block: bool) {
421            todo!()
422        }
423
424        fn paddr(&self) -> PhysAddr {
425            todo!()
426        }
427
428        fn set_paddr(&mut self, paddr: PhysAddr) {
429            todo!()
430        }
431    }
432
433    type Walk = PageWalk<TestTable>;
434
435    #[test]
436    fn test_level_entry_memory_size() {
437        assert_eq!(Walk::new(1).level_entry_size(), 0x1000);
438        assert_eq!(Walk::new(2).level_entry_size(), 2 * MB);
439        assert_eq!(Walk::new(3).level_entry_size(), GB);
440        assert_eq!(Walk::new(4).level_entry_size(), 512 * GB);
441    }
442
443    #[test]
444    fn test_idx_of_table() {
445        let w = Walk::new(1);
446        assert_eq!(w.index_of_table(0.into()), 0);
447        assert_eq!(w.index_of_table(0x1000.into()), 1);
448        assert_eq!(w.index_of_table(0x2000.into()), 2);
449
450        let w = Walk::new(2);
451        assert_eq!(w.index_of_table(0.into()), 0);
452        assert_eq!(w.index_of_table((2 * MB).into()), 1);
453
454        let w = Walk::new(3);
455        assert_eq!(w.index_of_table(GB.into()), 1);
456
457        let w = Walk::new(4);
458        assert_eq!(w.index_of_table((512 * GB).into()), 1);
459    }
460
461    #[test]
462    fn test_idx_of_table2() {
463        let w = Walk::new(3);
464        assert_eq!(w.index_of_table(0xffff_ffc0_8000_0000.into()), 0x102);
465    }
466
467    #[test]
468    fn test_detect_align() {
469        let s = 4 * GB;
470
471        let w = Walk::new(4);
472        assert_eq!(w.detect_align_level(0x1000 as _, s), 1);
473
474        assert_eq!(w.detect_align_level((0x1000 * 512) as _, s), 2);
475
476        assert_eq!(w.detect_align_level((0x1000 * 512 * 512) as _, s), 3);
477
478        assert_eq!(w.detect_align_level((2 * GB) as _, s), 3);
479    }
480}