1use core::{
2 alloc::Layout,
3 ffi::CStr,
4 ops::Range,
5 ptr::NonNull,
6 sync::atomic::{AtomicBool, AtomicUsize, Ordering},
7};
8
9use super::{Phys, PhysAddr, PhysCRange, Virt, once::OnceStatic, region::boot_regions};
10pub use arrayvec::ArrayVec;
11use buddy_system_allocator::Heap;
12use memory_addr::MemoryAddr;
13use page_table_generic::{Access, PagingError};
14use spin::MutexGuard;
15
16pub use crate::hal_al::mmu::{AccessSetting, CacheSetting};
17use crate::{
18 globals::{self, cpu_inited, global_val},
19 hal_al::mmu::{MapConfig, PageTableRef},
20 io::print::*,
21 mem::TMP_PAGE_ALLOC_ADDR,
22 platform::{self, mmu::page_size},
23 println,
24};
25
26#[cfg(target_os = "none")]
27use crate::mem::ALLOCATOR;
28
29pub mod table;
30
31pub const LINER_OFFSET: usize = 0xffff_9000_0000_0000;
36static TEXT_OFFSET: OnceStatic<usize> = OnceStatic::new(0);
37
38pub fn get_text_va_offset() -> usize {
39 *TEXT_OFFSET.get_ref()
40}
41
42pub(crate) fn init_with_tmp_table() {
43 println!("init tmp page table...");
44 let table = new_boot_table().unwrap();
45 platform::mmu::set_kernel_table(table);
46}
47
48pub(crate) fn init() {
49 println!("init page table...");
50
51 let table_ref = new_table().unwrap();
52 let tb = unsafe { table::PageTable::raw_to_own(table_ref) };
53 table::set_kernal_table(tb);
54
55 unsafe {
56 let start = TMP_PAGE_ALLOC_ADDR;
57 let end = global_val().main_memory.end.raw();
58 let len = end - start;
59 let start = (start + LINER_OFFSET) as *mut u8;
60 let ram = core::slice::from_raw_parts_mut(start, len);
61
62 ALLOCATOR.add_to_heap(ram);
63 println!(
64 "expand heap [{:#x}, {:#x})",
65 start as usize,
66 start as usize + len
67 );
68 }
69}
70
71struct PageHeap(Heap<32>);
72
73impl page_table_generic::Access for PageHeap {
74 unsafe fn alloc(&mut self, layout: Layout) -> Option<page_table_generic::PhysAddr> {
75 self.0
76 .alloc(layout)
77 .ok()
78 .map(|ptr| (ptr.as_ptr() as usize).into())
79 }
80
81 unsafe fn dealloc(&mut self, ptr: page_table_generic::PhysAddr, layout: Layout) {
82 self.0
83 .dealloc(unsafe { NonNull::new_unchecked(ptr.raw() as _) }, layout);
84 }
85
86 fn phys_to_mut(&self, phys: page_table_generic::PhysAddr) -> *mut u8 {
87 phys.raw() as *mut u8
88 }
89}
90
91#[repr(C)]
92#[derive(Debug, Clone, Copy)]
93pub struct BootRegion {
94 pub range: PhysCRange,
96 pub name: *const u8,
97 pub access: AccessSetting,
98 pub cache: CacheSetting,
99 pub kind: BootMemoryKind,
100}
101
102unsafe impl Send for BootRegion {}
103
104impl BootRegion {
105 pub fn new(
106 range: Range<PhysAddr>,
107 name: &'static CStr,
108 access: AccessSetting,
109 cache: CacheSetting,
110 kind: BootMemoryKind,
111 ) -> Self {
112 Self {
113 range: range.into(),
114 name: name.as_ptr() as _,
115 access,
116 cache,
117 kind,
118 }
119 }
120
121 pub fn new_with_len(
122 start: PhysAddr,
123 len: usize,
124 name: &'static CStr,
125 access: AccessSetting,
126 cache: CacheSetting,
127 kind: BootMemoryKind,
128 ) -> Self {
129 Self::new(start..start + len, name, access, cache, kind)
130 }
131
132 pub fn name(&self) -> &'static str {
133 unsafe { CStr::from_ptr(self.name as _).to_str().unwrap() }
134 }
135
136 }
150
151#[repr(u8)]
152#[derive(Debug, Clone, Copy)]
153pub enum BootMemoryKind {
154 KImage,
156 Reserved,
157 Ram,
158 Mmio,
159}
160
161impl<T> From<Virt<T>> for Phys<T> {
162 fn from(value: Virt<T>) -> Self {
163 let v = value.raw();
164 if (0xffff800000001000..0xffff900000000000).contains(&v) {
165 Phys::new(v - get_text_va_offset())
166 } else {
167 Phys::new(v - LINER_OFFSET)
168 }
169 }
170}
171const MB: usize = 1024 * 1024;
172
173fn new_boot_table() -> Result<PageTableRef, &'static str> {
174 let mut access = PageHeap(Heap::empty());
175 let main_mem = global_val().main_memory.clone();
176
177 let tmp_end = main_mem.end;
178 let tmp_size = tmp_end - main_mem.start.align_up(MB);
179 let tmp_pt = (main_mem.end - tmp_size / 2).raw();
180
181 unsafe { super::TMP_PAGE_ALLOC_ADDR = tmp_pt };
182
183 println!("page table allocator {:#x}, {:#x}", tmp_pt, tmp_end.raw());
184 unsafe { access.0.add_to_heap(tmp_pt, tmp_end.raw()) };
185 new_table_with_access(&mut access)
186}
187
188fn new_table() -> Result<PageTableRef, &'static str> {
189 let mut g = ALLOCATOR.lock_heap32();
190 let mut access = HeapGuard(g);
191 new_table_with_access(&mut access)
192}
193
194fn new_table_with_access(access: &mut dyn Access) -> Result<PageTableRef, &'static str> {
195 let table = platform::mmu::new_table(access).unwrap();
196
197 println!("map boot regions...");
198
199 for region in platform::boot_regions() {
200 let offset = match region.kind {
201 BootMemoryKind::KImage => platform::mmu::kimage_va_offset(),
202 _ => LINER_OFFSET,
203 };
204
205 let pa_start = region.range.start.align_down(page_size());
206 let va_start: Virt<u8> = (pa_start + offset).raw().into();
207 let pa_end = region.range.end.align_up(page_size());
208
209 let size = pa_end - pa_start;
210 println!(
211 " [{:<16}] [{:#x}, {:#x}) -> [{:#x}, {:#x}),\t{:?},\t{:?}",
212 region.name(),
213 va_start.raw(),
214 va_start.raw() + size,
215 pa_start.raw(),
216 pa_start.raw() + size,
217 region.access,
218 region.cache
219 );
220
221 if let Err(e) = platform::mmu::table_map(
222 table,
223 access,
224 &MapConfig {
225 name: region.name(),
226 va_start,
227 pa_start,
228 size,
229 access: region.access,
230 cache: region.cache,
231 },
232 ) {
233 println!("map error: {e:?}");
234 }
235 }
236
237 println!("Table: {table:?}");
238
239 Ok(table)
240}
241
242struct HeapGuard<'a>(MutexGuard<'a, Heap<32>>);
243
244impl Access for HeapGuard<'_> {
245 unsafe fn alloc(&mut self, layout: Layout) -> Option<page_table_generic::PhysAddr> {
246 self.0
247 .alloc(layout)
248 .ok()
249 .map(|ptr| (ptr.as_ptr() as usize - LINER_OFFSET).into())
250 }
251
252 unsafe fn dealloc(&mut self, ptr: page_table_generic::PhysAddr, layout: Layout) {
253 self.0.dealloc(
254 unsafe { NonNull::new_unchecked((ptr.raw() + LINER_OFFSET) as _) },
255 layout,
256 );
257 }
258
259 fn phys_to_mut(&self, phys: page_table_generic::PhysAddr) -> *mut u8 {
260 (phys.raw() + LINER_OFFSET) as *mut u8
261 }
262}
263
264pub(crate) fn map(config: &MapConfig) -> Result<(), PagingError> {
265 table::with_kernel_table(|t| t.map(config))
266}