1#![allow(unused)]
2
3use core::{
4 alloc::GlobalAlloc,
5 ops::Range,
6 ptr::{NonNull, null_mut, slice_from_raw_parts_mut},
7 sync::atomic::{AtomicUsize, Ordering},
8};
9
10use buddy_system_allocator::Heap;
11use log::debug;
12use page_table_generic::PagingError;
13use spin::{Mutex, Once};
14
15use crate::{
16 globals::global_val,
17 hal_al::mmu::MapConfig,
18 irq::NoIrqGuard,
19 mem::{
20 mmu::{AccessSetting, BootMemoryKind, BootRegion, CacheSetting, LINER_OFFSET},
21 once::OnceStatic,
22 },
23 platform::{self, kstack_size},
24 println,
25};
26
27pub use crate::platform::page_size;
28
29mod addr;
30mod cache;
31pub mod mmu;
33pub mod once;
34pub mod region;
35pub use addr::*;
36
37#[cfg(target_os = "none")]
38#[global_allocator]
39static ALLOCATOR: KAllocator = KAllocator {
40 heap32: Mutex::new(Heap::empty()),
41 heap64: Mutex::new(Heap::empty()),
42};
43
44static mut TMP_PAGE_ALLOC_ADDR: usize = 0;
45
46pub unsafe fn alloc_with_mask(layout: core::alloc::Layout, dma_mask: u64) -> *mut u8 {
52 #[cfg(target_os = "none")]
53 {
54 unsafe { ALLOCATOR.alloc_with_mask(layout, dma_mask) }
55 }
56 #[cfg(not(target_os = "none"))]
57 {
58 let _ = dma_mask;
59 unsafe { alloc::alloc::alloc(layout) }
60 }
61}
62
63pub struct KAllocator {
64 heap32: Mutex<Heap<32>>,
65 heap64: Mutex<Heap<64>>,
66}
67
68impl KAllocator {
69 pub fn reset(&self, memory: &mut [u8]) {
70 let range = memory.as_mut_ptr_range();
71 let start = range.start as usize;
72 let end = range.end as usize;
73 let len = memory.len();
74
75 {
76 let mut heap32 = self.heap32.lock();
77 *heap32 = Heap::empty();
78 }
79 {
80 let mut heap64 = self.heap64.lock();
81 *heap64 = Heap::empty();
82 }
83
84 if Self::range_within_u32(start, end) {
85 let mut heap32 = self.heap32.lock();
86 unsafe { heap32.init(start, len) };
87 } else {
88 let mut heap64 = self.heap64.lock();
89 unsafe { heap64.init(start, len) };
90 }
91 }
92
93 pub fn add_to_heap(&self, memory: &mut [u8]) {
94 let range = memory.as_mut_ptr_range();
95 let start = range.start as usize;
96 let end = range.end as usize;
97
98 if Self::range_within_u32(start, end) {
99 let mut heap32 = self.heap32.lock();
100 unsafe { heap32.add_to_heap(start, end) };
101 } else {
102 let mut heap64 = self.heap64.lock();
103 unsafe { heap64.add_to_heap(start, end) };
104 }
105 }
106
107 pub(crate) fn lock_heap32(&self) -> spin::MutexGuard<'_, Heap<32>> {
108 self.heap32.lock()
109 }
110
111 pub(crate) fn lock_heap64(&self) -> spin::MutexGuard<'_, Heap<64>> {
112 self.heap64.lock()
113 }
114
115 pub(crate) unsafe fn alloc_with_mask(
116 &self,
117 layout: core::alloc::Layout,
118 dma_mask: u64,
119 ) -> *mut u8 {
120 let guard = NoIrqGuard::new();
121 let result = if dma_mask <= u32::MAX as u64 {
122 Self::try_alloc(&self.heap32, layout)
123 } else {
124 Self::try_alloc(&self.heap64, layout).or_else(|| Self::try_alloc(&self.heap32, layout))
125 };
126 drop(guard);
127
128 result.map_or(null_mut(), |ptr| ptr.as_ptr())
129 }
130
131 #[inline]
132 fn try_alloc<const BITS: usize>(
133 heap: &Mutex<Heap<BITS>>,
134 layout: core::alloc::Layout,
135 ) -> Option<NonNull<u8>> {
136 let mut guard = heap.lock();
137 guard.alloc(layout).ok()
138 }
139
140 #[inline]
141 fn range_within_u32(start: usize, end: usize) -> bool {
142 if start >= end {
143 return false;
144 }
145
146 let last = end - 1;
147
148 let ps = PhysAddr::from(VirtAddr::from(start));
149 let pe = PhysAddr::from(VirtAddr::from(last));
150
151 let limit = PhysAddr::from(u32::MAX as usize);
152 ps <= limit && pe <= limit
153 }
154
155 #[inline]
156 fn ptr_in_32bit(ptr: *mut u8) -> bool {
157 let phys = PhysAddr::from(VirtAddr::from(ptr as usize));
158 phys <= PhysAddr::from(u32::MAX as usize)
159 }
160}
161
162unsafe impl GlobalAlloc for KAllocator {
163 unsafe fn alloc(&self, layout: core::alloc::Layout) -> *mut u8 {
164 let guard = NoIrqGuard::new();
165 let result =
166 Self::try_alloc(&self.heap64, layout).or_else(|| Self::try_alloc(&self.heap32, layout));
167 drop(guard);
168
169 result.map_or(null_mut(), |ptr| ptr.as_ptr())
170 }
171
172 unsafe fn dealloc(&self, ptr: *mut u8, layout: core::alloc::Layout) {
173 let guard = NoIrqGuard::new();
174 let nn = unsafe { NonNull::new_unchecked(ptr) };
175
176 if Self::ptr_in_32bit(ptr) {
177 self.heap32.lock().dealloc(nn, layout);
178 } else {
179 self.heap64.lock().dealloc(nn, layout);
180 }
181 drop(guard);
182 }
183}
184
185pub(crate) fn init() {
186 let range = global_val().main_memory.clone();
187 mmu::init_with_tmp_table();
188
189 let mut start = VirtAddr::from(range.start.raw() + LINER_OFFSET);
190 let mut end = VirtAddr::from(range.end.raw() + LINER_OFFSET);
191
192 unsafe {
193 if TMP_PAGE_ALLOC_ADDR != 0 {
194 end = VirtAddr::from(TMP_PAGE_ALLOC_ADDR + LINER_OFFSET);
195 }
196 }
197
198 println!("heap add memory [{}, {})", start, end);
199 #[cfg(target_os = "none")]
200 ALLOCATOR.add_to_heap(unsafe { &mut *slice_from_raw_parts_mut(start.into(), end - start) });
201
202 println!("heap initialized");
203
204 mmu::init();
205
206 add_all_ram();
207
208 cache::init();
209}
210
211fn add_all_ram() {
212 let main = global_val().main_memory.clone();
213
214 for region in platform::boot_regions() {
215 if !matches!(region.kind, BootMemoryKind::Ram) {
216 continue;
217 }
218
219 if region.range.to_range().contains(&main.start) {
220 continue;
221 }
222
223 let start = VirtAddr::from(region.range.start.raw() + LINER_OFFSET);
224 let end = VirtAddr::from(region.range.end.raw() + LINER_OFFSET);
225 let len = end - start;
226
227 println!("Heap add memory [{}, {})", start, end);
228 #[cfg(target_os = "none")]
229 ALLOCATOR.add_to_heap(unsafe { &mut *slice_from_raw_parts_mut(start.into(), len) });
230 }
231}
232
233pub(crate) fn find_main_memory() -> Option<BootRegion> {
234 let mut ram_regions = heapless::Vec::<_, 32>::new();
235 let mut non_ram_regions = heapless::Vec::<_, 32>::new();
236
237 for r in platform::boot_regions() {
239 if matches!(r.kind, BootMemoryKind::Ram) {
240 ram_regions.push(r).ok()?;
241 } else {
242 non_ram_regions.push(r).ok()?;
243 }
244 }
245
246 let mut available_regions = heapless::Vec::<PhysCRange, 64>::new();
247
248 for ram in &ram_regions {
250 let mut current_ranges = heapless::Vec::<PhysCRange, 32>::new();
251 current_ranges.push(ram.range).ok()?;
252
253 for non_ram in &non_ram_regions {
255 let mut new_ranges = heapless::Vec::<PhysCRange, 32>::new();
256
257 for current_range in ¤t_ranges {
258 let overlap_start = current_range.start.raw().max(non_ram.range.start.raw());
260 let overlap_end = current_range.end.raw().min(non_ram.range.end.raw());
261
262 if overlap_start < overlap_end {
263 if current_range.start.raw() < overlap_start {
266 new_ranges
267 .push(PhysCRange {
268 start: current_range.start,
269 end: PhysAddr::new(overlap_start),
270 })
271 .ok()?;
272 }
273 if overlap_end < current_range.end.raw() {
275 new_ranges
276 .push(PhysCRange {
277 start: PhysAddr::new(overlap_end),
278 end: current_range.end,
279 })
280 .ok()?;
281 }
282 } else {
283 new_ranges.push(*current_range).ok()?;
285 }
286 }
287 current_ranges = new_ranges;
288 }
289
290 for range in current_ranges {
292 available_regions.push(range).ok()?;
293 }
294 }
295
296 const MIN_SIZE: usize = 16 * 1024 * 1024; let mut best_region: Option<PhysCRange> = None;
299
300 for region in &available_regions {
301 let size = region.end.raw() - region.start.raw();
302 if size >= MIN_SIZE {
303 match best_region {
304 None => best_region = Some(*region),
305 Some(current_best) => {
306 if region.start.raw() < current_best.start.raw() {
307 best_region = Some(*region);
308 }
309 }
310 }
311 }
312 }
313
314 if let Some(main_range) = best_region {
315 println!(
316 "Selected main memory: {:?}, size: {}MB",
317 main_range,
318 (main_range.end.raw() - main_range.start.raw()) / (1024 * 1024)
319 );
320
321 let first_ram = ram_regions.first()?;
323 Some(BootRegion {
324 range: main_range,
325 name: c"main memory".as_ptr() as _,
326 access: first_ram.access,
327 cache: first_ram.cache,
328 kind: BootMemoryKind::Ram,
329 })
330 } else {
331 println!("no suitable main memory region found (>= 16MB)");
332 None
333 }
334}
335
336pub fn map(config: &MapConfig) -> Result<(), PagingError> {
337 mmu::map(config)
338}
339
340pub fn iomap(paddr: PhysAddr, size: usize) -> NonNull<u8> {
341 let vaddr = VirtAddr::from(paddr.raw() + LINER_OFFSET);
342 match mmu::map(&MapConfig {
343 name: "iomap",
344 va_start: vaddr,
345 pa_start: paddr,
346 size,
347 access: AccessSetting::ReadWrite,
348 cache: CacheSetting::Device,
349 }) {
350 Ok(_) => {}
351 Err(e) => match e {
352 PagingError::AlreadyMapped => {}
353 _ => panic!("iomap failed: {:?}", e),
354 },
355 }
356
357 let ptr: *mut u8 = vaddr.into();
358 unsafe { NonNull::new_unchecked(ptr) }
359
360 }
371
372#[cfg(test)]
373mod tests {
374 use super::*;
375 use crate::hal_al::mmu::{AccessSetting, CacheSetting};
376 use core::ffi::CStr;
377
378 fn create_test_region(
380 start: usize,
381 end: usize,
382 name: &'static str,
383 kind: BootMemoryKind,
384 ) -> BootRegion {
385 BootRegion {
386 range: PhysCRange {
387 start: PhysAddr::new(start),
388 end: PhysAddr::new(end),
389 },
390 name: name.as_ptr(),
391 access: AccessSetting::Read | AccessSetting::Write,
392 cache: CacheSetting::Normal,
393 kind,
394 }
395 }
396
397 fn mock_boot_regions(regions: &[BootRegion]) -> impl Iterator<Item = BootRegion> + '_ {
399 regions.iter().copied()
400 }
401
402 #[test]
403 fn test_find_main_memory_simple_case() {
404 let regions = [
406 create_test_region(0x40000000, 0x60000000, "ram", BootMemoryKind::Ram), ];
408
409 let mut ram_regions = heapless::Vec::<_, 32>::new();
411 let mut non_ram_regions = heapless::Vec::<_, 32>::new();
412
413 for r in mock_boot_regions(®ions) {
414 if matches!(r.kind, BootMemoryKind::Ram) {
415 ram_regions.push(r).unwrap();
416 } else {
417 non_ram_regions.push(r).unwrap();
418 }
419 }
420
421 let mut available_regions = heapless::Vec::<PhysCRange, 64>::new();
422
423 for ram in &ram_regions {
424 let mut current_ranges = heapless::Vec::<PhysCRange, 32>::new();
425 current_ranges.push(ram.range).unwrap();
426
427 for non_ram in &non_ram_regions {
428 let mut new_ranges = heapless::Vec::<PhysCRange, 32>::new();
429
430 for current_range in ¤t_ranges {
431 let overlap_start = current_range.start.raw().max(non_ram.range.start.raw());
432 let overlap_end = current_range.end.raw().min(non_ram.range.end.raw());
433
434 if overlap_start < overlap_end {
435 if current_range.start.raw() < overlap_start {
436 new_ranges
437 .push(PhysCRange {
438 start: current_range.start,
439 end: PhysAddr::new(overlap_start),
440 })
441 .unwrap();
442 }
443 if overlap_end < current_range.end.raw() {
444 new_ranges
445 .push(PhysCRange {
446 start: PhysAddr::new(overlap_end),
447 end: current_range.end,
448 })
449 .unwrap();
450 }
451 } else {
452 new_ranges.push(*current_range).unwrap();
453 }
454 }
455 current_ranges = new_ranges;
456 }
457
458 for range in current_ranges {
459 available_regions.push(range).unwrap();
460 }
461 }
462
463 assert_eq!(available_regions.len(), 1);
465 assert_eq!(available_regions[0].start.raw(), 0x40000000);
466 assert_eq!(available_regions[0].end.raw(), 0x60000000);
467
468 const MIN_SIZE: usize = 16 * 1024 * 1024;
470 let size = available_regions[0].end.raw() - available_regions[0].start.raw();
471 assert!(size >= MIN_SIZE);
472 }
473
474 #[test]
475 fn test_find_main_memory_with_overlap() {
476 let regions = [
478 create_test_region(0x40000000, 0x60000000, "ram", BootMemoryKind::Ram), create_test_region(0x45000000, 0x46000000, "reserved", BootMemoryKind::Reserved), ];
481
482 let mut ram_regions = heapless::Vec::<_, 32>::new();
483 let mut non_ram_regions = heapless::Vec::<_, 32>::new();
484
485 for r in mock_boot_regions(®ions) {
486 if matches!(r.kind, BootMemoryKind::Ram) {
487 ram_regions.push(r).unwrap();
488 } else {
489 non_ram_regions.push(r).unwrap();
490 }
491 }
492
493 let mut available_regions = heapless::Vec::<PhysCRange, 64>::new();
494
495 for ram in &ram_regions {
496 let mut current_ranges = heapless::Vec::<PhysCRange, 32>::new();
497 current_ranges.push(ram.range).unwrap();
498
499 for non_ram in &non_ram_regions {
500 let mut new_ranges = heapless::Vec::<PhysCRange, 32>::new();
501
502 for current_range in ¤t_ranges {
503 let overlap_start = current_range.start.raw().max(non_ram.range.start.raw());
504 let overlap_end = current_range.end.raw().min(non_ram.range.end.raw());
505
506 if overlap_start < overlap_end {
507 if current_range.start.raw() < overlap_start {
508 new_ranges
509 .push(PhysCRange {
510 start: current_range.start,
511 end: PhysAddr::new(overlap_start),
512 })
513 .unwrap();
514 }
515 if overlap_end < current_range.end.raw() {
516 new_ranges
517 .push(PhysCRange {
518 start: PhysAddr::new(overlap_end),
519 end: current_range.end,
520 })
521 .unwrap();
522 }
523 } else {
524 new_ranges.push(*current_range).unwrap();
525 }
526 }
527 current_ranges = new_ranges;
528 }
529
530 for range in current_ranges {
531 available_regions.push(range).unwrap();
532 }
533 }
534
535 assert_eq!(available_regions.len(), 2);
537
538 let region1 = available_regions
540 .iter()
541 .find(|r| r.start.raw() == 0x40000000)
542 .unwrap();
543 assert_eq!(region1.end.raw(), 0x45000000);
544
545 let region2 = available_regions
547 .iter()
548 .find(|r| r.start.raw() == 0x46000000)
549 .unwrap();
550 assert_eq!(region2.end.raw(), 0x60000000);
551
552 const MIN_SIZE: usize = 16 * 1024 * 1024;
554 for region in &available_regions {
555 let size = region.end.raw() - region.start.raw();
556 assert!(size >= MIN_SIZE);
557 }
558 }
559
560 #[test]
561 fn test_find_main_memory_multiple_overlaps() {
562 let regions = [
564 create_test_region(0x40000000, 0x80000000, "ram", BootMemoryKind::Ram), create_test_region(
566 0x45000000,
567 0x46000000,
568 "reserved1",
569 BootMemoryKind::Reserved,
570 ), create_test_region(
572 0x50000000,
573 0x52000000,
574 "reserved2",
575 BootMemoryKind::Reserved,
576 ), create_test_region(0x70000000, 0x71000000, "kimage", BootMemoryKind::KImage), ];
579
580 let mut ram_regions = heapless::Vec::<_, 32>::new();
581 let mut non_ram_regions = heapless::Vec::<_, 32>::new();
582
583 for r in mock_boot_regions(®ions) {
584 if matches!(r.kind, BootMemoryKind::Ram) {
585 ram_regions.push(r).unwrap();
586 } else {
587 non_ram_regions.push(r).unwrap();
588 }
589 }
590
591 let mut available_regions = heapless::Vec::<PhysCRange, 64>::new();
592
593 for ram in &ram_regions {
594 let mut current_ranges = heapless::Vec::<PhysCRange, 32>::new();
595 current_ranges.push(ram.range).unwrap();
596
597 for non_ram in &non_ram_regions {
598 let mut new_ranges = heapless::Vec::<PhysCRange, 32>::new();
599
600 for current_range in ¤t_ranges {
601 let overlap_start = current_range.start.raw().max(non_ram.range.start.raw());
602 let overlap_end = current_range.end.raw().min(non_ram.range.end.raw());
603
604 if overlap_start < overlap_end {
605 if current_range.start.raw() < overlap_start {
606 new_ranges
607 .push(PhysCRange {
608 start: current_range.start,
609 end: PhysAddr::new(overlap_start),
610 })
611 .unwrap();
612 }
613 if overlap_end < current_range.end.raw() {
614 new_ranges
615 .push(PhysCRange {
616 start: PhysAddr::new(overlap_end),
617 end: current_range.end,
618 })
619 .unwrap();
620 }
621 } else {
622 new_ranges.push(*current_range).unwrap();
623 }
624 }
625 current_ranges = new_ranges;
626 }
627
628 for range in current_ranges {
629 available_regions.push(range).unwrap();
630 }
631 }
632
633 assert_eq!(available_regions.len(), 4);
635
636 let expected_regions = [
638 (0x40000000, 0x45000000), (0x46000000, 0x50000000), (0x52000000, 0x70000000), (0x71000000, 0x80000000), ];
643
644 for (start, end) in expected_regions {
645 let region = available_regions
646 .iter()
647 .find(|r| r.start.raw() == start)
648 .unwrap();
649 assert_eq!(region.end.raw(), end);
650 }
651 }
652
653 #[test]
654 fn test_find_main_memory_select_lowest_address() {
655 let regions = [
657 create_test_region(0x80000000, 0x90000000, "ram1", BootMemoryKind::Ram), create_test_region(0x40000000, 0x50000000, "ram2", BootMemoryKind::Ram), ];
660
661 let mut ram_regions = heapless::Vec::<_, 32>::new();
662 let mut non_ram_regions = heapless::Vec::<BootRegion, 32>::new();
663
664 for r in mock_boot_regions(®ions) {
665 if matches!(r.kind, BootMemoryKind::Ram) {
666 ram_regions.push(r).unwrap();
667 }
668 }
669
670 let mut available_regions = heapless::Vec::<PhysCRange, 64>::new();
671
672 for ram in &ram_regions {
673 available_regions.push(ram.range).unwrap();
674 }
675
676 const MIN_SIZE: usize = 16 * 1024 * 1024;
678 let mut best_region: Option<PhysCRange> = None;
679
680 for region in &available_regions {
681 let size = region.end.raw() - region.start.raw();
682 if size >= MIN_SIZE {
683 match best_region {
684 None => best_region = Some(*region),
685 Some(current_best) => {
686 if region.start.raw() < current_best.start.raw() {
687 best_region = Some(*region);
688 }
689 }
690 }
691 }
692 }
693
694 assert!(best_region.is_some());
696 let selected = best_region.unwrap();
697 assert_eq!(selected.start.raw(), 0x40000000);
698 assert_eq!(selected.end.raw(), 0x50000000);
699 }
700
701 #[test]
702 fn test_find_main_memory_no_suitable_region() {
703 let regions = [
705 create_test_region(0x40000000, 0x40800000, "ram1", BootMemoryKind::Ram), create_test_region(0x50000000, 0x50400000, "ram2", BootMemoryKind::Ram), ];
708
709 let mut ram_regions = heapless::Vec::<_, 32>::new();
710 let non_ram_regions = heapless::Vec::<BootRegion, 32>::new();
711
712 for r in mock_boot_regions(®ions) {
713 if matches!(r.kind, BootMemoryKind::Ram) {
714 ram_regions.push(r).unwrap();
715 }
716 }
717
718 let mut available_regions = heapless::Vec::<PhysCRange, 64>::new();
719
720 for ram in &ram_regions {
721 available_regions.push(ram.range).unwrap();
722 }
723
724 const MIN_SIZE: usize = 16 * 1024 * 1024;
726 let mut best_region: Option<PhysCRange> = None;
727
728 for region in &available_regions {
729 let size = region.end.raw() - region.start.raw();
730 if size >= MIN_SIZE {
731 match best_region {
732 None => best_region = Some(*region),
733 Some(current_best) => {
734 if region.start.raw() < current_best.start.raw() {
735 best_region = Some(*region);
736 }
737 }
738 }
739 }
740 }
741
742 assert!(best_region.is_none());
744 }
745
746 #[test]
747 fn test_find_main_memory_edge_case_exact_overlap() {
748 let regions = [
750 create_test_region(0x40000000, 0x50000000, "ram", BootMemoryKind::Ram), create_test_region(0x40000000, 0x50000000, "reserved", BootMemoryKind::Reserved), ];
753
754 let mut ram_regions = heapless::Vec::<_, 32>::new();
755 let mut non_ram_regions = heapless::Vec::<_, 32>::new();
756
757 for r in mock_boot_regions(®ions) {
758 if matches!(r.kind, BootMemoryKind::Ram) {
759 ram_regions.push(r).unwrap();
760 } else {
761 non_ram_regions.push(r).unwrap();
762 }
763 }
764
765 let mut available_regions = heapless::Vec::<PhysCRange, 64>::new();
766
767 for ram in &ram_regions {
768 let mut current_ranges = heapless::Vec::<PhysCRange, 32>::new();
769 current_ranges.push(ram.range).unwrap();
770
771 for non_ram in &non_ram_regions {
772 let mut new_ranges = heapless::Vec::<PhysCRange, 32>::new();
773
774 for current_range in ¤t_ranges {
775 let overlap_start = current_range.start.raw().max(non_ram.range.start.raw());
776 let overlap_end = current_range.end.raw().min(non_ram.range.end.raw());
777
778 if overlap_start < overlap_end {
779 if current_range.start.raw() < overlap_start {
780 new_ranges
781 .push(PhysCRange {
782 start: current_range.start,
783 end: PhysAddr::new(overlap_start),
784 })
785 .unwrap();
786 }
787 if overlap_end < current_range.end.raw() {
788 new_ranges
789 .push(PhysCRange {
790 start: PhysAddr::new(overlap_end),
791 end: current_range.end,
792 })
793 .unwrap();
794 }
795 } else {
796 new_ranges.push(*current_range).unwrap();
797 }
798 }
799 current_ranges = new_ranges;
800 }
801
802 for range in current_ranges {
803 available_regions.push(range).unwrap();
804 }
805 }
806
807 assert_eq!(available_regions.len(), 0);
809 }
810}