1use crate::buddy::{BuddyPageAllocator, DEFAULT_MAX_ORDER};
4use crate::{AllocError, AllocResult, BaseAllocator, PageAllocator};
5
6#[cfg(feature = "log")]
7use log::{debug, warn};
8
9const MAX_PARTS_PER_ALLOC: usize = 8;
11
12const MAX_COMPOSITE_ALLOCS: usize = 16;
14
15#[derive(Clone, Copy, Debug)]
17struct CompositeBlockInfo {
18 base_addr: usize,
20 part_count: u8,
22 parts: [(usize, u32); MAX_PARTS_PER_ALLOC],
24}
25
26struct CompositeBlockTracker {
28 blocks: [Option<CompositeBlockInfo>; MAX_COMPOSITE_ALLOCS],
30 count: usize,
32}
33
34impl CompositeBlockTracker {
35 const fn new() -> Self {
36 Self {
37 blocks: [None; MAX_COMPOSITE_ALLOCS],
38 count: 0,
39 }
40 }
41
42 fn insert(&mut self, base_addr: usize, parts: &[(usize, u32)], part_count: usize) -> bool {
46 if self.count >= MAX_COMPOSITE_ALLOCS {
47 return false;
48 }
49
50 let mut info = CompositeBlockInfo {
51 base_addr,
52 part_count: part_count as u8,
53 parts: [(0, 0); MAX_PARTS_PER_ALLOC],
54 };
55
56 info.parts[..part_count].copy_from_slice(&parts[..part_count]);
57
58 self.blocks[self.count] = Some(info);
59 self.count += 1;
60 true
61 }
62
63 fn find(&self, base_addr: usize) -> Option<CompositeBlockInfo> {
65 for i in 0..self.count {
66 if let Some(info) = self.blocks[i] {
67 if info.base_addr == base_addr {
68 return Some(info);
69 }
70 }
71 }
72 None
73 }
74
75 fn remove(&mut self, base_addr: usize) -> bool {
79 for i in 0..self.count {
80 if let Some(info) = self.blocks[i] {
81 if info.base_addr == base_addr {
82 if i < self.count - 1 {
84 self.blocks[i] = self.blocks[self.count - 1];
85 }
86 self.blocks[self.count - 1] = None;
87 self.count -= 1;
88 return true;
89 }
90 }
91 }
92 false
93 }
94}
95
96pub struct CompositePageAllocator<const PAGE_SIZE: usize = { crate::DEFAULT_PAGE_SIZE }> {
97 buddy: BuddyPageAllocator<PAGE_SIZE>,
99 composite_tracker: CompositeBlockTracker,
101}
102
103impl<const PAGE_SIZE: usize> CompositePageAllocator<PAGE_SIZE> {
104 pub const fn new() -> Self {
106 Self {
107 buddy: BuddyPageAllocator::<PAGE_SIZE>::new(),
108 composite_tracker: CompositeBlockTracker::new(),
109 }
110 }
111
112 pub fn set_addr_translator(&mut self, translator: &'static dyn crate::AddrTranslator) {
115 self.buddy.set_addr_translator(translator);
116 }
117
118 pub fn alloc_pages_lowmem(&mut self, num_pages: usize, alignment: usize) -> AllocResult<usize> {
121 self.buddy.alloc_pages_lowmem(num_pages, alignment)
122 }
123
124 fn try_combine_contiguous_blocks(
139 &mut self,
140 num_pages: usize,
141 alignment: usize,
142 ) -> Option<usize> {
143 let mut remaining_pages = num_pages;
144 let mut contiguous_blocks: [(usize, u32); MAX_PARTS_PER_ALLOC] =
145 [(0, 0); MAX_PARTS_PER_ALLOC];
146 let mut block_count = 0;
147 let mut min_addr = usize::MAX;
148 let mut max_addr = 0;
149
150 for order in (0..=DEFAULT_MAX_ORDER).rev() {
152 let block_pages = 1usize << order;
153
154 if remaining_pages == 0 || block_count >= MAX_PARTS_PER_ALLOC {
155 break;
156 }
157
158 for zone_id in 0..self.buddy.get_zone_count() {
160 if let Some(blocks) = self.buddy.get_free_blocks_by_order(zone_id, order as u32) {
161 for block in blocks {
163 if block_count >= MAX_PARTS_PER_ALLOC {
164 break;
165 }
166
167 let block_start = block.addr;
168 let block_end = block_start + block_pages * PAGE_SIZE;
169
170 if !crate::is_aligned(block_start, alignment) {
172 continue;
173 }
174
175 if block_count == 0 {
177 contiguous_blocks[block_count] = (block_start, order as u32);
179 min_addr = block_start;
180 max_addr = block_end;
181 block_count += 1;
182 remaining_pages -= block_pages.min(remaining_pages);
183 } else if block_end == min_addr {
184 contiguous_blocks[block_count] = (block_start, order as u32);
186 min_addr = block_start;
187 block_count += 1;
188 remaining_pages -= block_pages.min(remaining_pages);
189 } else if block_start == max_addr {
190 contiguous_blocks[block_count] = (block_start, order as u32);
192 max_addr = block_end;
193 block_count += 1;
194 remaining_pages -= block_pages.min(remaining_pages);
195 }
196
197 if remaining_pages == 0 {
198 break;
199 }
200 }
201 }
202
203 if remaining_pages == 0 {
204 break;
205 }
206 }
207 }
208
209 if remaining_pages == 0 {
211 let mut parts = [(0usize, 0u32); MAX_PARTS_PER_ALLOC];
212
213 for i in 0..block_count {
215 let (addr, order) = contiguous_blocks[i];
216 let block_pages = 1usize << order;
217
218 debug!(
219 "Block {}: addr={:#x}, order={}, pages={}, size={} MB",
220 i,
221 addr,
222 order,
223 block_pages,
224 (block_pages * PAGE_SIZE) / (1024 * 1024)
225 );
226
227 if let Err(_e) = self.buddy.alloc_pages_at(addr, block_pages, alignment) {
229 warn!("Contiguous block allocation failed at {i}, rolling back");
231 #[allow(clippy::needless_range_loop)]
232 for j in 0..i {
233 let (dealloc_addr, dealloc_order) = parts[j];
234 let dealloc_pages = 1usize << dealloc_order;
235 self.buddy.dealloc_pages(dealloc_addr, dealloc_pages);
236 }
237 return None;
238 }
239
240 parts[i] = (addr, order);
241 }
242
243 let actual_pages: usize = parts[..block_count]
245 .iter()
246 .map(|(_, order)| 1usize << *order as usize)
247 .sum();
248 debug_assert!(
249 actual_pages >= num_pages,
250 "Allocated pages {actual_pages} < requested pages {num_pages}"
251 );
252
253 if !self
255 .composite_tracker
256 .insert(min_addr, &parts[..block_count], block_count)
257 {
258 warn!("Composite tracker full, rolling back allocation");
260 #[allow(clippy::needless_range_loop)]
261 for j in 0..block_count {
262 let (dealloc_addr, dealloc_order) = parts[j];
263 let dealloc_pages = 1usize << dealloc_order;
264 self.buddy.dealloc_pages(dealloc_addr, dealloc_pages);
265 }
266 return None;
267 }
268
269 debug!("Contiguous block allocation succeeded: base_addr={min_addr:#x}, pages={num_pages}, parts={block_count}, actual_pages={actual_pages}");
270
271 return Some(min_addr);
272 }
273
274 None
275 }
276
277 #[cfg(feature = "tracking")]
281 fn print_alloc_failure_stats(&self, num_pages: usize, alignment: usize) {
282 self.buddy.print_alloc_failure_stats(num_pages, alignment);
283 }
284
285 #[cfg(not(feature = "tracking"))]
286 fn print_alloc_failure_stats(&self, _num_pages: usize, _alignment: usize) {
287 }
289}
290
291impl<const PAGE_SIZE: usize> PageAllocator for CompositePageAllocator<PAGE_SIZE> {
292 const PAGE_SIZE: usize = PAGE_SIZE;
293
294 fn alloc_pages(&mut self, num_pages: usize, alignment: usize) -> AllocResult<usize> {
295 if num_pages == 0 {
296 return Err(AllocError::InvalidParam);
297 }
298
299 let buddy_pages = if num_pages.is_power_of_two() {
300 num_pages
301 } else {
302 num_pages.next_power_of_two()
303 };
304
305 let base_addr = match self.buddy.alloc_pages(buddy_pages, alignment) {
307 Ok(addr) => addr,
308 Err(_) => {
309 debug!(
311 "Standard allocation failed, trying contiguous block combination for {num_pages} pages"
312 );
313 if let Some(addr) = self.try_combine_contiguous_blocks(num_pages, alignment) {
314 return Ok(addr);
315 }
316 self.print_alloc_failure_stats(num_pages, alignment);
317 return Err(AllocError::NoMemory);
318 }
319 };
320
321 Ok(base_addr)
322 }
323
324 fn dealloc_pages(&mut self, pos: usize, num_pages: usize) {
325 if num_pages == 0 {
326 return;
327 }
328
329 if let Some(info) = self.composite_tracker.find(pos) {
331 debug!(
333 "Deallocating composite block: base_addr={:#x}, part_count={}",
334 pos, info.part_count
335 );
336
337 for i in 0..info.part_count as usize {
338 let (addr, order) = info.parts[i];
339 let pages = 1usize << order;
340 debug!(" Part {i}: addr={addr:#x}, order={order}, pages={pages}");
341 self.buddy.dealloc_pages(addr, pages);
342 }
343
344 self.composite_tracker.remove(pos);
346 } else {
347 if num_pages.is_power_of_two() {
349 self.buddy.dealloc_pages(pos, num_pages);
350 } else {
351 self.buddy.dealloc_pages(pos, num_pages.next_power_of_two());
352 }
353 }
354 }
355
356 fn alloc_pages_at(
360 &mut self,
361 base: usize,
362 num_pages: usize,
363 alignment: usize,
364 ) -> AllocResult<usize> {
365 self.buddy.alloc_pages_at(base, num_pages, alignment)
366 }
367
368 fn total_pages(&self) -> usize {
370 self.buddy.total_pages()
371 }
372
373 fn used_pages(&self) -> usize {
375 self.buddy.used_pages()
376 }
377
378 fn available_pages(&self) -> usize {
380 self.buddy.available_pages()
381 }
382}
383
384impl<const PAGE_SIZE: usize> CompositePageAllocator<PAGE_SIZE> {
385 #[cfg(feature = "tracking")]
387 pub fn get_buddy_stats(&self) -> crate::buddy::BuddyStats {
388 self.buddy.get_stats()
389 }
390}
391
392impl<const PAGE_SIZE: usize> BaseAllocator for CompositePageAllocator<PAGE_SIZE> {
393 fn init(&mut self, start: usize, size: usize) {
395 self.buddy.init(start, size);
396 }
397
398 fn add_memory(&mut self, start: usize, size: usize) -> AllocResult<()> {
400 self.buddy.add_memory(start, size)
401 }
402}
403
404impl<const PAGE_SIZE: usize> crate::slab::PageAllocatorForSlab
406 for CompositePageAllocator<PAGE_SIZE>
407{
408 fn alloc_pages(&mut self, num_pages: usize, alignment: usize) -> AllocResult<usize> {
409 <Self as PageAllocator>::alloc_pages(self, num_pages, alignment)
410 }
411
412 fn dealloc_pages(&mut self, pos: usize, num_pages: usize) {
413 <Self as PageAllocator>::dealloc_pages(self, pos, num_pages)
414 }
415}
416
417impl<const PAGE_SIZE: usize> Default for CompositePageAllocator<PAGE_SIZE> {
418 fn default() -> Self {
419 Self::new()
420 }
421}
422
423#[cfg(test)]
424mod tests {
425 use super::*;
426
427 #[test]
428 fn test_composite_block_tracker() {
429 let mut tracker = CompositeBlockTracker::new();
430
431 let parts = [(0x1000, 3), (0x2000, 2)];
433 assert!(tracker.insert(0x1000, &parts, 2));
434 assert_eq!(tracker.count, 1);
435
436 let info = tracker.find(0x1000);
438 assert!(info.is_some());
439 let info = info.unwrap();
440 assert_eq!(info.base_addr, 0x1000);
441 assert_eq!(info.part_count, 2);
442 assert_eq!(info.parts[0], (0x1000, 3));
443 assert_eq!(info.parts[1], (0x2000, 2));
444
445 assert!(tracker.remove(0x1000));
447 assert_eq!(tracker.count, 0);
448 assert!(tracker.find(0x1000).is_none());
449 }
450
451 #[test]
452 fn test_composite_block_tracker_capacity() {
453 let mut tracker = CompositeBlockTracker::new();
454
455 for i in 0..MAX_COMPOSITE_ALLOCS {
457 let parts = [(0x1000 + i * 0x1000, 3)];
458 assert!(tracker.insert(0x1000 + i * 0x1000, &parts, 1));
459 }
460
461 assert_eq!(tracker.count, MAX_COMPOSITE_ALLOCS);
462
463 let parts = [(0x10000, 3)];
465 assert!(!tracker.insert(0x10000, &parts, 1));
466
467 assert!(tracker.remove(0x1000));
469 assert!(tracker.insert(0x10000, &parts, 1));
470 }
471
472 #[test]
473 fn test_composite_tracker_find_nonexistent() {
474 let tracker = CompositeBlockTracker::new();
475 assert!(tracker.find(0x1000).is_none());
476 }
477
478 #[test]
479 fn test_composite_tracker_remove_nonexistent() {
480 let mut tracker = CompositeBlockTracker::new();
481 assert!(!tracker.remove(0x1000));
482 }
483
484 #[test]
485 fn test_composite_tracker_multiple_blocks() {
486 let mut tracker = CompositeBlockTracker::new();
487
488 let parts1 = [(0x1000, 3), (0x2000, 2)];
489 let parts2 = [(0x3000, 4), (0x4000, 3), (0x5000, 2)];
490
491 tracker.insert(0x1000, &parts1, 2);
492 tracker.insert(0x3000, &parts2, 3);
493
494 assert_eq!(tracker.count, 2);
495
496 let info1 = tracker.find(0x1000);
497 assert!(info1.is_some());
498 assert_eq!(info1.unwrap().part_count, 2);
499
500 let info2 = tracker.find(0x3000);
501 assert!(info2.is_some());
502 assert_eq!(info2.unwrap().part_count, 3);
503 }
504}
505
506#[cfg(test)]
507mod unit_tests {
508 use super::*;
509 use alloc::alloc::{alloc, dealloc};
510 use core::alloc::Layout;
511
512 const TEST_HEAP_SIZE: usize = 16 * 1024 * 1024;
513 const TEST_PAGE_SIZE: usize = 0x1000;
514
515 fn alloc_test_heap(size: usize) -> (*mut u8, Layout) {
516 let layout = Layout::from_size_align(size, TEST_PAGE_SIZE).unwrap();
517 let ptr = unsafe { alloc(layout) };
518 assert!(!ptr.is_null());
519 (ptr, layout)
520 }
521
522 fn dealloc_test_heap(ptr: *mut u8, layout: Layout) {
523 unsafe { dealloc(ptr, layout) };
524 }
525
526 #[test]
527 fn test_composite_allocator_basic() {
528 let (heap_ptr, heap_layout) = alloc_test_heap(TEST_HEAP_SIZE);
529 let heap_addr = heap_ptr as usize;
530
531 let mut allocator = CompositePageAllocator::<TEST_PAGE_SIZE>::new();
532 allocator.init(heap_addr, TEST_HEAP_SIZE);
533
534 let addr1 = allocator.alloc_pages(1, TEST_PAGE_SIZE).unwrap();
535 let addr2 = allocator.alloc_pages(4, TEST_PAGE_SIZE).unwrap();
536
537 assert!(addr1 >= heap_addr && addr1 < heap_addr + TEST_HEAP_SIZE);
538 assert!(addr2 >= heap_addr && addr2 < heap_addr + TEST_HEAP_SIZE);
539
540 allocator.dealloc_pages(addr1, 1);
541 allocator.dealloc_pages(addr2, 4);
542
543 dealloc_test_heap(heap_ptr, heap_layout);
544 }
545
546 #[test]
547 fn test_composite_allocator_alignment() {
548 let (heap_ptr, heap_layout) = alloc_test_heap(TEST_HEAP_SIZE);
549 let heap_addr = heap_ptr as usize;
550
551 let mut allocator = CompositePageAllocator::<TEST_PAGE_SIZE>::new();
552 allocator.init(heap_addr, TEST_HEAP_SIZE);
553
554 let addr = allocator.alloc_pages(1, TEST_PAGE_SIZE * 4).unwrap();
555 assert_eq!(addr & (TEST_PAGE_SIZE * 4 - 1), 0);
556
557 allocator.dealloc_pages(addr, 1);
558 dealloc_test_heap(heap_ptr, heap_layout);
559 }
560}