sparreal_kernel/mem/mmu/
table.rs1use page_table_generic::PagingError;
2use spin::Mutex;
3
4#[cfg(target_os = "none")]
5use crate::mem::ALLOCATOR;
6use crate::{
7 hal_al::mmu::MapConfig,
8 irq::NoIrqGuard,
9 mem::{
10 Phys, PhysAddr, VirtAddr,
11 mmu::{AccessSetting, CacheSetting, HeapGuard},
12 },
13 platform,
14};
15static KERNEL_TABLE: Mutex<Option<PageTable>> = Mutex::new(None);
16
17pub(crate) fn set_kernal_table(table: PageTable) {
18 let g = NoIrqGuard::new();
19 let mut guard = KERNEL_TABLE.lock();
20 if guard.is_some() {
21 panic!("kernel table is already set");
22 }
23 platform::mmu::set_kernel_table(table.raw);
24 *guard = Some(table);
25 drop(g);
26}
27
28pub fn replace_kernel_table(new: PageTable) -> Option<PageTable> {
29 let g = NoIrqGuard::new();
30 let mut guard = KERNEL_TABLE.lock();
31 let current = platform::mmu::get_kernel_table();
32 platform::mmu::set_kernel_table(new.raw);
33 let mut old = guard.replace(new);
34 if old.is_none() {
35 old = Some(unsafe { PageTable::raw_to_own(current) });
36 }
37 drop(g);
38 old
39}
40
41pub fn new_table() -> Result<PageTable, PagingError> {
42 let mut g = ALLOCATOR.lock_heap32();
43 let mut access = HeapGuard(g);
44 let raw = platform::mmu::new_table(&mut access)?;
45 Ok(unsafe { PageTable::raw_to_own(raw) })
46}
47
48pub fn with_kernel_table<F, R>(f: F) -> R
49where
50 F: FnOnce(&mut PageTable) -> R,
51{
52 let g = NoIrqGuard::new();
53 let mut guard = KERNEL_TABLE.lock();
54 if let Some(ref mut table) = *guard {
55 let r = f(table);
56 drop(g);
57 r
58 } else {
59 panic!("kernel table is not initialized");
60 }
61}
62
63pub struct PageTable {
64 raw: crate::hal_al::mmu::PageTableRef,
65}
66
67impl PageTable {
68 pub(crate) unsafe fn raw_to_own(raw: crate::hal_al::mmu::PageTableRef) -> Self {
69 Self { raw }
70 }
71
72 pub fn id(&self) -> usize {
73 self.raw.id
74 }
75
76 pub fn addr(&self) -> Phys<u8> {
77 self.raw.addr
78 }
79
80 pub fn map(&mut self, config: &MapConfig) -> Result<(), PagingError> {
81 let mut g = ALLOCATOR.lock_heap32();
82 let mut access = HeapGuard(g);
83 platform::mmu::table_map(self.raw, &mut access, config)
84 }
85}
86
87impl Drop for PageTable {
88 fn drop(&mut self) {
89 let mut g = ALLOCATOR.lock_heap32();
90 let mut access = HeapGuard(g);
91 platform::mmu::release_table(self.raw, &mut access);
92 }
93}