sparreal_kernel/mem/mmu/
mod.rs

1use core::{
2    alloc::Layout,
3    ffi::CStr,
4    ops::Range,
5    ptr::NonNull,
6    sync::atomic::{AtomicBool, AtomicUsize, Ordering},
7};
8
9use super::{Phys, PhysAddr, PhysCRange, Virt, once::OnceStatic, region::boot_regions};
10pub use arrayvec::ArrayVec;
11use buddy_system_allocator::Heap;
12use memory_addr::MemoryAddr;
13use page_table_generic::{Access, PagingError};
14use ranges_ext::RangeInfo;
15use spin::MutexGuard;
16
17pub use crate::hal_al::mmu::{AccessSetting, CacheSetting};
18use crate::{
19    globals::{self, cpu_inited, global_val},
20    hal_al::mmu::{MapConfig, PageTableRef},
21    io::print::*,
22    mem::TMP_PAGE_ALLOC_ADDR,
23    platform::{self, mmu::page_size},
24    println,
25};
26
27#[cfg(target_os = "none")]
28use crate::mem::ALLOCATOR;
29
30pub mod table;
31
32// pub use paging::init_table;
33// pub use paging::iomap;
34
35// pub const LINER_OFFSET: usize = 0xffff_f000_0000_0000;
36pub const LINER_OFFSET: usize = 0xffff_9000_0000_0000;
37static TEXT_OFFSET: OnceStatic<usize> = OnceStatic::new(0);
38
39pub fn get_text_va_offset() -> usize {
40    *TEXT_OFFSET.get_ref()
41}
42
43pub(crate) fn init_with_tmp_table() {
44    println!("init tmp page table...");
45    let table = new_boot_table().unwrap();
46    platform::mmu::set_kernel_table(table);
47}
48
49pub(crate) fn init() {
50    println!("init page table...");
51
52    let table_ref = new_table().unwrap();
53    let tb = unsafe { table::PageTable::raw_to_own(table_ref) };
54    table::set_kernal_table(tb);
55
56    unsafe {
57        let start = TMP_PAGE_ALLOC_ADDR;
58        let end = global_val().main_memory.end.raw();
59        let len = end - start;
60        let start = (start + LINER_OFFSET) as *mut u8;
61        let ram = core::slice::from_raw_parts_mut(start, len);
62
63        ALLOCATOR.add_to_heap(ram);
64        println!(
65            "expand heap [{:#x}, {:#x})",
66            start as usize,
67            start as usize + len
68        );
69    }
70}
71
72struct PageHeap(Heap<32>);
73
74impl page_table_generic::Access for PageHeap {
75    unsafe fn alloc(&mut self, layout: Layout) -> Option<page_table_generic::PhysAddr> {
76        self.0
77            .alloc(layout)
78            .ok()
79            .map(|ptr| (ptr.as_ptr() as usize).into())
80    }
81
82    unsafe fn dealloc(&mut self, ptr: page_table_generic::PhysAddr, layout: Layout) {
83        self.0
84            .dealloc(unsafe { NonNull::new_unchecked(ptr.raw() as _) }, layout);
85    }
86
87    fn phys_to_mut(&self, phys: page_table_generic::PhysAddr) -> *mut u8 {
88        phys.raw() as *mut u8
89    }
90}
91
92#[repr(C)]
93#[derive(Debug, Clone, Copy)]
94pub struct BootRegion {
95    // 链接地址
96    pub range: PhysCRange,
97    pub name: *const u8,
98    pub access: AccessSetting,
99    pub cache: CacheSetting,
100    pub kind: BootMemoryKind,
101}
102
103unsafe impl Send for BootRegion {}
104
105impl BootRegion {
106    pub fn new(
107        range: Range<PhysAddr>,
108        name: &'static CStr,
109        access: AccessSetting,
110        cache: CacheSetting,
111        kind: BootMemoryKind,
112    ) -> Self {
113        Self {
114            range: range.into(),
115            name: name.as_ptr() as _,
116            access,
117            cache,
118            kind,
119        }
120    }
121
122    pub fn new_with_len(
123        start: PhysAddr,
124        len: usize,
125        name: &'static CStr,
126        access: AccessSetting,
127        cache: CacheSetting,
128        kind: BootMemoryKind,
129    ) -> Self {
130        Self::new(start..start + len, name, access, cache, kind)
131    }
132
133    pub fn name(&self) -> &'static str {
134        unsafe { CStr::from_ptr(self.name as _).to_str().unwrap() }
135    }
136
137    // pub fn va_offset(&self) -> usize {
138    //     match self.kind {
139    //         RegionKind::Stack => {
140    //             if cpu_inited() {
141    //                 self.kind.va_offset()
142    //             } else {
143    //                 // cpu0
144    //                 STACK_BOTTOM - self.range.start.raw()
145    //             }
146    //         }
147    //         _ => self.kind.va_offset(),
148    //     }
149    // }
150}
151
152impl RangeInfo for BootRegion {
153    type Kind = KindWrapper;
154
155    type Type = PhysAddr;
156
157    fn range(&self) -> Range<Self::Type> {
158        self.range.to_range()
159    }
160
161    fn kind(&self) -> Self::Kind {
162        KindWrapper {
163            kind: self.kind,
164            cache: self.cache,
165            access: self.access,
166        }
167    }
168
169    fn overwritable(&self) -> bool {
170        self.kind == BootMemoryKind::Ram
171    }
172
173    fn clone_with_range(&self, range: Range<Self::Type>) -> Self {
174        Self {
175            range: range.into(),
176            name: self.name,
177            access: self.access,
178            cache: self.cache,
179            kind: self.kind,
180        }
181    }
182}
183
184#[derive(Debug, Clone, Copy, PartialEq, Eq)]
185pub struct KindWrapper {
186    kind: BootMemoryKind,
187    cache: CacheSetting,
188    access: AccessSetting,
189}
190
191#[repr(u8)]
192#[derive(Debug, Clone, Copy, PartialEq, Eq)]
193pub enum BootMemoryKind {
194    /// map offset as kv_offset
195    KImage,
196    Reserved,
197    Ram,
198    Mmio,
199}
200
201impl<T> From<Virt<T>> for Phys<T> {
202    fn from(value: Virt<T>) -> Self {
203        let v = value.raw();
204        if (0xffff800000001000..0xffff900000000000).contains(&v) {
205            Phys::new(v - get_text_va_offset())
206        } else {
207            Phys::new(v - LINER_OFFSET)
208        }
209    }
210}
211const MB: usize = 1024 * 1024;
212
213fn new_boot_table() -> Result<PageTableRef, &'static str> {
214    let mut access = PageHeap(Heap::empty());
215    let main_mem = global_val().main_memory.clone();
216
217    let tmp_end = main_mem.end;
218    let tmp_size = tmp_end - main_mem.start.align_up(MB);
219    let tmp_pt = (main_mem.end - tmp_size / 2).raw();
220
221    unsafe { super::TMP_PAGE_ALLOC_ADDR = tmp_pt };
222
223    println!("page table allocator {:#x}, {:#x}", tmp_pt, tmp_end.raw());
224    unsafe { access.0.add_to_heap(tmp_pt, tmp_end.raw()) };
225    new_table_with_access(&mut access)
226}
227
228fn new_table() -> Result<PageTableRef, &'static str> {
229    let mut g = ALLOCATOR.lock_heap32();
230    let mut access = HeapGuard(g);
231    new_table_with_access(&mut access)
232}
233
234fn new_table_with_access(access: &mut dyn Access) -> Result<PageTableRef, &'static str> {
235    let table = platform::mmu::new_table(access).unwrap();
236
237    println!("map boot regions...");
238    for region in platform::boot_regions() {
239        let offset = match region.kind {
240            BootMemoryKind::KImage => platform::mmu::kimage_va_offset(),
241            _ => continue,
242        };
243
244        let pa_start = region.range.start.align_down(page_size());
245        let va_start: Virt<u8> = (pa_start + offset).raw().into();
246        let pa_end = region.range.end.align_up(page_size());
247
248        let size = pa_end - pa_start;
249        println!(
250            "  [{:<16}] [{:#x}, {:#x}) -> [{:#x}, {:#x}),\t{:?},\t{:?}",
251            region.name(),
252            va_start.raw(),
253            va_start.raw() + size,
254            pa_start.raw(),
255            pa_start.raw() + size,
256            region.access,
257            region.cache
258        );
259
260        if let Err(e) = platform::mmu::table_map(
261            table,
262            access,
263            &MapConfig {
264                name: region.name(),
265                va_start,
266                pa_start,
267                size,
268                access: region.access,
269                cache: region.cache,
270            },
271        ) {
272            println!("map error: {e:?}");
273        }
274    }
275    for region in platform::boot_regions() {
276        let offset = LINER_OFFSET;
277
278        let pa_start = region.range.start.align_down(page_size());
279        let va_start: Virt<u8> = (pa_start + offset).raw().into();
280        let pa_end = region.range.end.align_up(page_size());
281
282        let size = pa_end - pa_start;
283        println!(
284            "  [{:<16}] [{:#x}, {:#x}) -> [{:#x}, {:#x}),\t{:?},\t{:?}",
285            region.name(),
286            va_start.raw(),
287            va_start.raw() + size,
288            pa_start.raw(),
289            pa_start.raw() + size,
290            region.access,
291            region.cache
292        );
293
294        if let Err(e) = platform::mmu::table_map(
295            table,
296            access,
297            &MapConfig {
298                name: region.name(),
299                va_start,
300                pa_start,
301                size,
302                access: region.access,
303                cache: region.cache,
304            },
305        ) {
306            println!("map error: {e:?}");
307        }
308    }
309
310    println!("Table: {table:?}");
311
312    Ok(table)
313}
314
315struct HeapGuard<'a>(MutexGuard<'a, Heap<32>>);
316
317impl Access for HeapGuard<'_> {
318    unsafe fn alloc(&mut self, layout: Layout) -> Option<page_table_generic::PhysAddr> {
319        self.0
320            .alloc(layout)
321            .ok()
322            .map(|ptr| (ptr.as_ptr() as usize - LINER_OFFSET).into())
323    }
324
325    unsafe fn dealloc(&mut self, ptr: page_table_generic::PhysAddr, layout: Layout) {
326        self.0.dealloc(
327            unsafe { NonNull::new_unchecked((ptr.raw() + LINER_OFFSET) as _) },
328            layout,
329        );
330    }
331
332    fn phys_to_mut(&self, phys: page_table_generic::PhysAddr) -> *mut u8 {
333        (phys.raw() + LINER_OFFSET) as *mut u8
334    }
335}
336
337pub(crate) fn map(config: &MapConfig) -> Result<(), PagingError> {
338    table::with_kernel_table(|t| t.map(config))
339}