1#![allow(unused)]
2
3use core::{
4 alloc::GlobalAlloc,
5 ops::Range,
6 ptr::{NonNull, null_mut, slice_from_raw_parts_mut},
7 sync::atomic::{AtomicUsize, Ordering},
8};
9
10use buddy_system_allocator::Heap;
11use log::debug;
12use page_table_generic::PagingError;
13use spin::{Mutex, Once};
14
15use crate::{
16 globals::global_val,
17 hal_al::mmu::MapConfig,
18 irq::NoIrqGuard,
19 mem::{
20 mmu::{AccessSetting, BootMemoryKind, BootRegion, CacheSetting, LINER_OFFSET},
21 once::OnceStatic,
22 },
23 platform::{self, kstack_size},
24 println,
25};
26
27pub use crate::platform::page_size;
28
29mod addr;
30mod cache;
31pub mod mmu;
33pub mod once;
34pub mod region;
35pub use addr::*;
36
37#[cfg(target_os = "none")]
38#[global_allocator]
39static ALLOCATOR: KAllocator = KAllocator {
40 heap32: Mutex::new(Heap::empty()),
41 heap64: Mutex::new(Heap::empty()),
42};
43
44static mut TMP_PAGE_ALLOC_ADDR: usize = 0;
45
46pub unsafe fn alloc_with_mask(layout: core::alloc::Layout, dma_mask: u64) -> *mut u8 {
52 #[cfg(target_os = "none")]
53 {
54 unsafe { ALLOCATOR.alloc_with_mask(layout, dma_mask) }
55 }
56 #[cfg(not(target_os = "none"))]
57 {
58 let _ = dma_mask;
59 unsafe { alloc::alloc::alloc(layout) }
60 }
61}
62
63pub struct KAllocator {
64 heap32: Mutex<Heap<32>>,
65 heap64: Mutex<Heap<64>>,
66}
67
68impl KAllocator {
69 pub fn reset(&self, memory: &mut [u8]) {
70 let range = memory.as_mut_ptr_range();
71 let start = range.start as usize;
72 let end = range.end as usize;
73 let len = memory.len();
74
75 {
76 let mut heap32 = self.heap32.lock();
77 *heap32 = Heap::empty();
78 }
79 {
80 let mut heap64 = self.heap64.lock();
81 *heap64 = Heap::empty();
82 }
83
84 if Self::range_within_u32(start, end) {
85 let mut heap32 = self.heap32.lock();
86 unsafe { heap32.init(start, len) };
87 } else {
88 let mut heap64 = self.heap64.lock();
89 unsafe { heap64.init(start, len) };
90 }
91 }
92
93 pub fn add_to_heap(&self, memory: &mut [u8]) {
94 let range = memory.as_mut_ptr_range();
95 let start = range.start as usize;
96 let end = range.end as usize;
97
98 if Self::range_within_u32(start, end) {
99 let mut heap32 = self.heap32.lock();
100 unsafe { heap32.add_to_heap(start, end) };
101 } else {
102 let mut heap64 = self.heap64.lock();
103 unsafe { heap64.add_to_heap(start, end) };
104 }
105 }
106
107 pub(crate) fn lock_heap32(&self) -> spin::MutexGuard<'_, Heap<32>> {
108 self.heap32.lock()
109 }
110
111 pub(crate) fn lock_heap64(&self) -> spin::MutexGuard<'_, Heap<64>> {
112 self.heap64.lock()
113 }
114
115 pub(crate) unsafe fn alloc_with_mask(
116 &self,
117 layout: core::alloc::Layout,
118 dma_mask: u64,
119 ) -> *mut u8 {
120 let guard = NoIrqGuard::new();
121 let result = if dma_mask <= u32::MAX as u64 {
122 Self::try_alloc(&self.heap32, layout)
123 } else {
124 Self::try_alloc(&self.heap64, layout).or_else(|| Self::try_alloc(&self.heap32, layout))
125 };
126 drop(guard);
127
128 result.map_or(null_mut(), |ptr| ptr.as_ptr())
129 }
130
131 #[inline]
132 fn try_alloc<const BITS: usize>(
133 heap: &Mutex<Heap<BITS>>,
134 layout: core::alloc::Layout,
135 ) -> Option<NonNull<u8>> {
136 let mut guard = heap.lock();
137 guard.alloc(layout).ok()
138 }
139
140 #[inline]
141 fn range_within_u32(start: usize, end: usize) -> bool {
142 if start >= end {
143 return false;
144 }
145
146 let last = end - 1;
147
148 let ps = PhysAddr::from(VirtAddr::from(start));
149 let pe = PhysAddr::from(VirtAddr::from(last));
150
151 let limit = PhysAddr::from(u32::MAX as usize);
152 ps <= limit && pe <= limit
153 }
154
155 #[inline]
156 fn ptr_in_32bit(ptr: *mut u8) -> bool {
157 let phys = PhysAddr::from(VirtAddr::from(ptr as usize));
158 phys <= PhysAddr::from(u32::MAX as usize)
159 }
160}
161
162unsafe impl GlobalAlloc for KAllocator {
163 unsafe fn alloc(&self, layout: core::alloc::Layout) -> *mut u8 {
164 let guard = NoIrqGuard::new();
165 let result =
166 Self::try_alloc(&self.heap64, layout).or_else(|| Self::try_alloc(&self.heap32, layout));
167 drop(guard);
168
169 result.map_or(null_mut(), |ptr| ptr.as_ptr())
170 }
171
172 unsafe fn dealloc(&self, ptr: *mut u8, layout: core::alloc::Layout) {
173 let guard = NoIrqGuard::new();
174 let nn = unsafe { NonNull::new_unchecked(ptr) };
175
176 if Self::ptr_in_32bit(ptr) {
177 self.heap32.lock().dealloc(nn, layout);
178 } else {
179 self.heap64.lock().dealloc(nn, layout);
180 }
181 drop(guard);
182 }
183}
184
185pub(crate) fn init() {
186 let range = global_val().main_memory.clone();
187 mmu::init_with_tmp_table();
188
189 let mut start = VirtAddr::from(range.start.raw() + LINER_OFFSET);
190 let mut end = VirtAddr::from(range.end.raw() + LINER_OFFSET);
191
192 unsafe {
193 if TMP_PAGE_ALLOC_ADDR != 0 {
194 end = VirtAddr::from(TMP_PAGE_ALLOC_ADDR + LINER_OFFSET);
195 }
196 }
197
198 println!("heap add memory [{}, {})", start, end);
199 #[cfg(target_os = "none")]
200 ALLOCATOR.add_to_heap(unsafe { &mut *slice_from_raw_parts_mut(start.into(), end - start) });
201
202 println!("heap initialized");
203
204 mmu::init();
205
206 add_all_ram();
207
208 cache::init();
209}
210
211fn add_all_ram() {
212 let main = global_val().main_memory.clone();
213
214 for region in platform::boot_regions() {
215 if !matches!(region.kind, BootMemoryKind::Ram) {
216 continue;
217 }
218
219 if region.range.to_range().contains(&main.start) {
220 continue;
221 }
222
223 let start = VirtAddr::from(region.range.start.raw() + LINER_OFFSET);
224 let end = VirtAddr::from(region.range.end.raw() + LINER_OFFSET);
225 let len = end - start;
226
227 if len < 2 * 1024 * 1024 {
228 println!(
229 "skip small ram region [{:#x}, {:#x})",
230 start.raw(),
231 end.raw()
232 );
233 continue;
234 }
235
236 println!("Heap add memory [{}, {})", start, end);
237 #[cfg(target_os = "none")]
238 ALLOCATOR.add_to_heap(unsafe { &mut *slice_from_raw_parts_mut(start.into(), len) });
239 }
240}
241
242pub(crate) fn find_main_memory() -> Option<BootRegion> {
243 let mut ram_regions = heapless::Vec::<_, 32>::new();
244 let mut non_ram_regions = heapless::Vec::<_, 32>::new();
245
246 for r in platform::boot_regions() {
248 if matches!(r.kind, BootMemoryKind::Ram) {
249 ram_regions.push(r).ok()?;
250 } else {
251 non_ram_regions.push(r).ok()?;
252 }
253 }
254
255 let mut available_regions = heapless::Vec::<PhysCRange, 64>::new();
256
257 for ram in &ram_regions {
259 let mut current_ranges = heapless::Vec::<PhysCRange, 32>::new();
260 current_ranges.push(ram.range).ok()?;
261
262 for non_ram in &non_ram_regions {
264 let mut new_ranges = heapless::Vec::<PhysCRange, 32>::new();
265
266 for current_range in ¤t_ranges {
267 let overlap_start = current_range.start.raw().max(non_ram.range.start.raw());
269 let overlap_end = current_range.end.raw().min(non_ram.range.end.raw());
270
271 if overlap_start < overlap_end {
272 if current_range.start.raw() < overlap_start {
275 new_ranges
276 .push(PhysCRange {
277 start: current_range.start,
278 end: PhysAddr::new(overlap_start),
279 })
280 .ok()?;
281 }
282 if overlap_end < current_range.end.raw() {
284 new_ranges
285 .push(PhysCRange {
286 start: PhysAddr::new(overlap_end),
287 end: current_range.end,
288 })
289 .ok()?;
290 }
291 } else {
292 new_ranges.push(*current_range).ok()?;
294 }
295 }
296 current_ranges = new_ranges;
297 }
298
299 for range in current_ranges {
301 available_regions.push(range).ok()?;
302 }
303 }
304
305 const MIN_SIZE: usize = 16 * 1024 * 1024; let mut best_region: Option<PhysCRange> = None;
308
309 for region in &available_regions {
310 let size = region.end.raw() - region.start.raw();
311 if size >= MIN_SIZE {
312 match best_region {
313 None => best_region = Some(*region),
314 Some(current_best) => {
315 if region.start.raw() < current_best.start.raw() {
316 best_region = Some(*region);
317 }
318 }
319 }
320 }
321 }
322
323 if let Some(main_range) = best_region {
324 println!(
325 "Selected main memory: {:?}, size: {}MB",
326 main_range,
327 (main_range.end.raw() - main_range.start.raw()) / (1024 * 1024)
328 );
329
330 let first_ram = ram_regions.first()?;
332 Some(BootRegion {
333 range: main_range,
334 name: c"main memory".as_ptr() as _,
335 access: first_ram.access,
336 cache: first_ram.cache,
337 kind: BootMemoryKind::Ram,
338 })
339 } else {
340 println!("no suitable main memory region found (>= 16MB)");
341 None
342 }
343}
344
345pub fn map(config: &MapConfig) -> Result<(), PagingError> {
346 mmu::map(config)
347}
348
349pub fn iomap(paddr: PhysAddr, size: usize) -> NonNull<u8> {
350 let vaddr = VirtAddr::from(paddr.raw() + LINER_OFFSET);
351 match mmu::map(&MapConfig {
352 name: "iomap",
353 va_start: vaddr,
354 pa_start: paddr,
355 size,
356 access: AccessSetting::ReadWrite,
357 cache: CacheSetting::Device,
358 }) {
359 Ok(_) => {}
360 Err(e) => match e {
361 PagingError::AlreadyMapped => {}
362 _ => panic!("iomap failed: {:?}", e),
363 },
364 }
365
366 let ptr: *mut u8 = vaddr.into();
367 unsafe { NonNull::new_unchecked(ptr) }
368
369 }
380
381#[cfg(test)]
382mod tests {
383 use super::*;
384 use crate::hal_al::mmu::{AccessSetting, CacheSetting};
385 use core::ffi::CStr;
386
387 fn create_test_region(
389 start: usize,
390 end: usize,
391 name: &'static str,
392 kind: BootMemoryKind,
393 ) -> BootRegion {
394 BootRegion {
395 range: PhysCRange {
396 start: PhysAddr::new(start),
397 end: PhysAddr::new(end),
398 },
399 name: name.as_ptr(),
400 access: AccessSetting::Read | AccessSetting::Write,
401 cache: CacheSetting::Normal,
402 kind,
403 }
404 }
405
406 fn mock_boot_regions(regions: &[BootRegion]) -> impl Iterator<Item = BootRegion> + '_ {
408 regions.iter().copied()
409 }
410
411 #[test]
412 fn test_find_main_memory_simple_case() {
413 let regions = [
415 create_test_region(0x40000000, 0x60000000, "ram", BootMemoryKind::Ram), ];
417
418 let mut ram_regions = heapless::Vec::<_, 32>::new();
420 let mut non_ram_regions = heapless::Vec::<_, 32>::new();
421
422 for r in mock_boot_regions(®ions) {
423 if matches!(r.kind, BootMemoryKind::Ram) {
424 ram_regions.push(r).unwrap();
425 } else {
426 non_ram_regions.push(r).unwrap();
427 }
428 }
429
430 let mut available_regions = heapless::Vec::<PhysCRange, 64>::new();
431
432 for ram in &ram_regions {
433 let mut current_ranges = heapless::Vec::<PhysCRange, 32>::new();
434 current_ranges.push(ram.range).unwrap();
435
436 for non_ram in &non_ram_regions {
437 let mut new_ranges = heapless::Vec::<PhysCRange, 32>::new();
438
439 for current_range in ¤t_ranges {
440 let overlap_start = current_range.start.raw().max(non_ram.range.start.raw());
441 let overlap_end = current_range.end.raw().min(non_ram.range.end.raw());
442
443 if overlap_start < overlap_end {
444 if current_range.start.raw() < overlap_start {
445 new_ranges
446 .push(PhysCRange {
447 start: current_range.start,
448 end: PhysAddr::new(overlap_start),
449 })
450 .unwrap();
451 }
452 if overlap_end < current_range.end.raw() {
453 new_ranges
454 .push(PhysCRange {
455 start: PhysAddr::new(overlap_end),
456 end: current_range.end,
457 })
458 .unwrap();
459 }
460 } else {
461 new_ranges.push(*current_range).unwrap();
462 }
463 }
464 current_ranges = new_ranges;
465 }
466
467 for range in current_ranges {
468 available_regions.push(range).unwrap();
469 }
470 }
471
472 assert_eq!(available_regions.len(), 1);
474 assert_eq!(available_regions[0].start.raw(), 0x40000000);
475 assert_eq!(available_regions[0].end.raw(), 0x60000000);
476
477 const MIN_SIZE: usize = 16 * 1024 * 1024;
479 let size = available_regions[0].end.raw() - available_regions[0].start.raw();
480 assert!(size >= MIN_SIZE);
481 }
482
483 #[test]
484 fn test_find_main_memory_with_overlap() {
485 let regions = [
487 create_test_region(0x40000000, 0x60000000, "ram", BootMemoryKind::Ram), create_test_region(0x45000000, 0x46000000, "reserved", BootMemoryKind::Reserved), ];
490
491 let mut ram_regions = heapless::Vec::<_, 32>::new();
492 let mut non_ram_regions = heapless::Vec::<_, 32>::new();
493
494 for r in mock_boot_regions(®ions) {
495 if matches!(r.kind, BootMemoryKind::Ram) {
496 ram_regions.push(r).unwrap();
497 } else {
498 non_ram_regions.push(r).unwrap();
499 }
500 }
501
502 let mut available_regions = heapless::Vec::<PhysCRange, 64>::new();
503
504 for ram in &ram_regions {
505 let mut current_ranges = heapless::Vec::<PhysCRange, 32>::new();
506 current_ranges.push(ram.range).unwrap();
507
508 for non_ram in &non_ram_regions {
509 let mut new_ranges = heapless::Vec::<PhysCRange, 32>::new();
510
511 for current_range in ¤t_ranges {
512 let overlap_start = current_range.start.raw().max(non_ram.range.start.raw());
513 let overlap_end = current_range.end.raw().min(non_ram.range.end.raw());
514
515 if overlap_start < overlap_end {
516 if current_range.start.raw() < overlap_start {
517 new_ranges
518 .push(PhysCRange {
519 start: current_range.start,
520 end: PhysAddr::new(overlap_start),
521 })
522 .unwrap();
523 }
524 if overlap_end < current_range.end.raw() {
525 new_ranges
526 .push(PhysCRange {
527 start: PhysAddr::new(overlap_end),
528 end: current_range.end,
529 })
530 .unwrap();
531 }
532 } else {
533 new_ranges.push(*current_range).unwrap();
534 }
535 }
536 current_ranges = new_ranges;
537 }
538
539 for range in current_ranges {
540 available_regions.push(range).unwrap();
541 }
542 }
543
544 assert_eq!(available_regions.len(), 2);
546
547 let region1 = available_regions
549 .iter()
550 .find(|r| r.start.raw() == 0x40000000)
551 .unwrap();
552 assert_eq!(region1.end.raw(), 0x45000000);
553
554 let region2 = available_regions
556 .iter()
557 .find(|r| r.start.raw() == 0x46000000)
558 .unwrap();
559 assert_eq!(region2.end.raw(), 0x60000000);
560
561 const MIN_SIZE: usize = 16 * 1024 * 1024;
563 for region in &available_regions {
564 let size = region.end.raw() - region.start.raw();
565 assert!(size >= MIN_SIZE);
566 }
567 }
568
569 #[test]
570 fn test_find_main_memory_multiple_overlaps() {
571 let regions = [
573 create_test_region(0x40000000, 0x80000000, "ram", BootMemoryKind::Ram), create_test_region(
575 0x45000000,
576 0x46000000,
577 "reserved1",
578 BootMemoryKind::Reserved,
579 ), create_test_region(
581 0x50000000,
582 0x52000000,
583 "reserved2",
584 BootMemoryKind::Reserved,
585 ), create_test_region(0x70000000, 0x71000000, "kimage", BootMemoryKind::KImage), ];
588
589 let mut ram_regions = heapless::Vec::<_, 32>::new();
590 let mut non_ram_regions = heapless::Vec::<_, 32>::new();
591
592 for r in mock_boot_regions(®ions) {
593 if matches!(r.kind, BootMemoryKind::Ram) {
594 ram_regions.push(r).unwrap();
595 } else {
596 non_ram_regions.push(r).unwrap();
597 }
598 }
599
600 let mut available_regions = heapless::Vec::<PhysCRange, 64>::new();
601
602 for ram in &ram_regions {
603 let mut current_ranges = heapless::Vec::<PhysCRange, 32>::new();
604 current_ranges.push(ram.range).unwrap();
605
606 for non_ram in &non_ram_regions {
607 let mut new_ranges = heapless::Vec::<PhysCRange, 32>::new();
608
609 for current_range in ¤t_ranges {
610 let overlap_start = current_range.start.raw().max(non_ram.range.start.raw());
611 let overlap_end = current_range.end.raw().min(non_ram.range.end.raw());
612
613 if overlap_start < overlap_end {
614 if current_range.start.raw() < overlap_start {
615 new_ranges
616 .push(PhysCRange {
617 start: current_range.start,
618 end: PhysAddr::new(overlap_start),
619 })
620 .unwrap();
621 }
622 if overlap_end < current_range.end.raw() {
623 new_ranges
624 .push(PhysCRange {
625 start: PhysAddr::new(overlap_end),
626 end: current_range.end,
627 })
628 .unwrap();
629 }
630 } else {
631 new_ranges.push(*current_range).unwrap();
632 }
633 }
634 current_ranges = new_ranges;
635 }
636
637 for range in current_ranges {
638 available_regions.push(range).unwrap();
639 }
640 }
641
642 assert_eq!(available_regions.len(), 4);
644
645 let expected_regions = [
647 (0x40000000, 0x45000000), (0x46000000, 0x50000000), (0x52000000, 0x70000000), (0x71000000, 0x80000000), ];
652
653 for (start, end) in expected_regions {
654 let region = available_regions
655 .iter()
656 .find(|r| r.start.raw() == start)
657 .unwrap();
658 assert_eq!(region.end.raw(), end);
659 }
660 }
661
662 #[test]
663 fn test_find_main_memory_select_lowest_address() {
664 let regions = [
666 create_test_region(0x80000000, 0x90000000, "ram1", BootMemoryKind::Ram), create_test_region(0x40000000, 0x50000000, "ram2", BootMemoryKind::Ram), ];
669
670 let mut ram_regions = heapless::Vec::<_, 32>::new();
671 let mut non_ram_regions = heapless::Vec::<BootRegion, 32>::new();
672
673 for r in mock_boot_regions(®ions) {
674 if matches!(r.kind, BootMemoryKind::Ram) {
675 ram_regions.push(r).unwrap();
676 }
677 }
678
679 let mut available_regions = heapless::Vec::<PhysCRange, 64>::new();
680
681 for ram in &ram_regions {
682 available_regions.push(ram.range).unwrap();
683 }
684
685 const MIN_SIZE: usize = 16 * 1024 * 1024;
687 let mut best_region: Option<PhysCRange> = None;
688
689 for region in &available_regions {
690 let size = region.end.raw() - region.start.raw();
691 if size >= MIN_SIZE {
692 match best_region {
693 None => best_region = Some(*region),
694 Some(current_best) => {
695 if region.start.raw() < current_best.start.raw() {
696 best_region = Some(*region);
697 }
698 }
699 }
700 }
701 }
702
703 assert!(best_region.is_some());
705 let selected = best_region.unwrap();
706 assert_eq!(selected.start.raw(), 0x40000000);
707 assert_eq!(selected.end.raw(), 0x50000000);
708 }
709
710 #[test]
711 fn test_find_main_memory_no_suitable_region() {
712 let regions = [
714 create_test_region(0x40000000, 0x40800000, "ram1", BootMemoryKind::Ram), create_test_region(0x50000000, 0x50400000, "ram2", BootMemoryKind::Ram), ];
717
718 let mut ram_regions = heapless::Vec::<_, 32>::new();
719 let non_ram_regions = heapless::Vec::<BootRegion, 32>::new();
720
721 for r in mock_boot_regions(®ions) {
722 if matches!(r.kind, BootMemoryKind::Ram) {
723 ram_regions.push(r).unwrap();
724 }
725 }
726
727 let mut available_regions = heapless::Vec::<PhysCRange, 64>::new();
728
729 for ram in &ram_regions {
730 available_regions.push(ram.range).unwrap();
731 }
732
733 const MIN_SIZE: usize = 16 * 1024 * 1024;
735 let mut best_region: Option<PhysCRange> = None;
736
737 for region in &available_regions {
738 let size = region.end.raw() - region.start.raw();
739 if size >= MIN_SIZE {
740 match best_region {
741 None => best_region = Some(*region),
742 Some(current_best) => {
743 if region.start.raw() < current_best.start.raw() {
744 best_region = Some(*region);
745 }
746 }
747 }
748 }
749 }
750
751 assert!(best_region.is_none());
753 }
754
755 #[test]
756 fn test_find_main_memory_edge_case_exact_overlap() {
757 let regions = [
759 create_test_region(0x40000000, 0x50000000, "ram", BootMemoryKind::Ram), create_test_region(0x40000000, 0x50000000, "reserved", BootMemoryKind::Reserved), ];
762
763 let mut ram_regions = heapless::Vec::<_, 32>::new();
764 let mut non_ram_regions = heapless::Vec::<_, 32>::new();
765
766 for r in mock_boot_regions(®ions) {
767 if matches!(r.kind, BootMemoryKind::Ram) {
768 ram_regions.push(r).unwrap();
769 } else {
770 non_ram_regions.push(r).unwrap();
771 }
772 }
773
774 let mut available_regions = heapless::Vec::<PhysCRange, 64>::new();
775
776 for ram in &ram_regions {
777 let mut current_ranges = heapless::Vec::<PhysCRange, 32>::new();
778 current_ranges.push(ram.range).unwrap();
779
780 for non_ram in &non_ram_regions {
781 let mut new_ranges = heapless::Vec::<PhysCRange, 32>::new();
782
783 for current_range in ¤t_ranges {
784 let overlap_start = current_range.start.raw().max(non_ram.range.start.raw());
785 let overlap_end = current_range.end.raw().min(non_ram.range.end.raw());
786
787 if overlap_start < overlap_end {
788 if current_range.start.raw() < overlap_start {
789 new_ranges
790 .push(PhysCRange {
791 start: current_range.start,
792 end: PhysAddr::new(overlap_start),
793 })
794 .unwrap();
795 }
796 if overlap_end < current_range.end.raw() {
797 new_ranges
798 .push(PhysCRange {
799 start: PhysAddr::new(overlap_end),
800 end: current_range.end,
801 })
802 .unwrap();
803 }
804 } else {
805 new_ranges.push(*current_range).unwrap();
806 }
807 }
808 current_ranges = new_ranges;
809 }
810
811 for range in current_ranges {
812 available_regions.push(range).unwrap();
813 }
814 }
815
816 assert_eq!(available_regions.len(), 0);
818 }
819}