sparreal_kernel/os/mem/
allocator.rs1use core::{
2 alloc::GlobalAlloc,
3 ptr::{NonNull, null_mut},
4};
5
6use buddy_system_allocator::Heap;
7use page_table_generic::FrameAllocator;
8use spin::Mutex;
9
10use crate::{
11 hal::al::memory::page_size,
12 os::{
13 irq::NoIrqGuard,
14 mem::{
15 __va,
16 address::{PhysAddr, VirtAddr},
17 },
18 },
19};
20
21#[cfg(target_os = "none")]
22#[global_allocator]
23pub(super) static KERNEL_MEMORY_ALLOCATOR: KernelMemoryAllocator = KernelMemoryAllocator::new();
24
25fn page_frame_layout() -> core::alloc::Layout {
26 core::alloc::Layout::from_size_align(page_size(), page_size()).unwrap()
27}
28
29#[derive(Clone, Copy)]
30pub struct KernelAllocator;
31
32impl FrameAllocator for KernelAllocator {
33 fn alloc_frame(&self) -> Option<page_table_generic::PhysAddr> {
34 kernel_memory_allocator()
35 .lock_heap32()
36 .alloc(page_frame_layout())
37 .ok()
38 .map(|nn| {
39 let virt = VirtAddr::from(nn);
40 let phys: PhysAddr = virt.into();
41 phys.raw().into()
43 })
44 }
45
46 fn dealloc_frame(&self, frame: page_table_generic::PhysAddr) {
47 let phys = PhysAddr::new(frame.raw());
48 let virt: VirtAddr = __va(phys);
49 let ptr = virt.as_mut_ptr();
50 let nn = unsafe { NonNull::new_unchecked(ptr) };
51 kernel_memory_allocator()
52 .lock_heap32()
53 .dealloc(nn, page_frame_layout());
54 }
55
56 fn phys_to_virt(&self, paddr: page_table_generic::PhysAddr) -> *mut u8 {
57 let phys = PhysAddr::new(paddr.raw());
58 let virt: VirtAddr = __va(phys);
59 virt.as_mut_ptr()
60 }
61}
62
63pub fn kernel_memory_allocator() -> &'static KernelMemoryAllocator {
65 #[cfg(target_os = "none")]
66 {
67 &KERNEL_MEMORY_ALLOCATOR
68 }
69 #[cfg(not(target_os = "none"))]
70 {
71 use core::sync::atomic::{AtomicPtr, Ordering};
73
74 use alloc::boxed::Box;
75
76 static EMPTY_ALLOCATOR: AtomicPtr<KernelMemoryAllocator> =
77 AtomicPtr::new(core::ptr::null_mut());
78
79 let ptr = EMPTY_ALLOCATOR.load(Ordering::Acquire);
80 if ptr.is_null() {
81 let allocator = Box::leak(Box::new(KernelMemoryAllocator::new()));
82 match EMPTY_ALLOCATOR.compare_exchange(
83 core::ptr::null_mut(),
84 allocator,
85 Ordering::AcqRel,
86 Ordering::Acquire,
87 ) {
88 Ok(_) => &*allocator,
89 Err(existing) => unsafe { &*existing },
90 }
91 } else {
92 unsafe { &*ptr }
93 }
94 }
95}
96
97pub struct KernelMemoryAllocator {
98 low_address_heap: Mutex<Heap<32>>,
99 high_address_heap: Mutex<Heap<64>>,
100}
101
102impl KernelMemoryAllocator {
103 pub const fn new() -> Self {
104 Self {
105 low_address_heap: Mutex::new(Heap::empty()),
106 high_address_heap: Mutex::new(Heap::empty()),
107 }
108 }
109
110 pub fn add_memory_region(&self, memory: &mut [u8]) {
111 let range = memory.as_mut_ptr_range();
112 let start = range.start as usize;
113 let end = range.end as usize;
114
115 if Self::address_range_fits_in_32bit(start, end) {
116 let mut heap32 = self.low_address_heap.lock();
117 unsafe { heap32.add_to_heap(start, end) };
118 } else {
119 let mut heap64 = self.high_address_heap.lock();
120 unsafe { heap64.add_to_heap(start, end) };
121 }
122 }
123
124 pub(crate) fn lock_heap32(&self) -> spin::MutexGuard<'_, Heap<32>> {
125 self.low_address_heap.lock()
126 }
127
128 #[inline]
149 fn try_alloc<const BITS: usize>(
150 heap: &Mutex<Heap<BITS>>,
151 layout: core::alloc::Layout,
152 ) -> Option<NonNull<u8>> {
153 let mut guard = heap.lock();
154 guard.alloc(layout).ok()
155 }
156
157 #[inline]
158 fn address_range_fits_in_32bit(start: usize, end: usize) -> bool {
159 if start >= end {
160 return false;
161 }
162
163 let last = end - 1;
164
165 let ps = PhysAddr::from(VirtAddr::from(start));
166 let pe = PhysAddr::from(VirtAddr::from(last));
167
168 let limit = PhysAddr::from(u32::MAX as usize);
169 ps <= limit && pe <= limit
170 }
171
172 #[inline]
173 fn pointer_fits_in_32bit(ptr: *mut u8) -> bool {
174 let phys = PhysAddr::from(VirtAddr::from(ptr as usize));
175 phys <= PhysAddr::from(u32::MAX as usize)
176 }
177}
178
179impl Default for KernelMemoryAllocator {
180 fn default() -> Self {
181 Self::new()
182 }
183}
184
185unsafe impl GlobalAlloc for KernelMemoryAllocator {
186 unsafe fn alloc(&self, layout: core::alloc::Layout) -> *mut u8 {
187 let guard = NoIrqGuard::new();
188 let result = Self::try_alloc(&self.high_address_heap, layout)
189 .or_else(|| Self::try_alloc(&self.low_address_heap, layout));
190 drop(guard);
191
192 result.map_or(null_mut(), |ptr| ptr.as_ptr())
193 }
194
195 unsafe fn dealloc(&self, ptr: *mut u8, layout: core::alloc::Layout) {
196 let guard = NoIrqGuard::new();
197 let nn = unsafe { NonNull::new_unchecked(ptr) };
198
199 if Self::pointer_fits_in_32bit(ptr) {
200 self.low_address_heap.lock().dealloc(nn, layout);
201 } else {
202 self.high_address_heap.lock().dealloc(nn, layout);
203 }
204 drop(guard);
205 }
206}