Skip to main content

buddy_slab_allocator/
page_allocator.rs

1//! Page allocator with contiguous block combination support.
2
3use crate::buddy::{BuddyPageAllocator, DEFAULT_MAX_ORDER};
4use crate::{AllocError, AllocResult, BaseAllocator, PageAllocator};
5
6#[cfg(feature = "log")]
7use log::{debug, warn};
8
9/// Maximum number of buddy blocks in a single contiguous allocation
10const MAX_PARTS_PER_ALLOC: usize = 8;
11
12/// Maximum number of concurrent composite allocations tracked
13const MAX_COMPOSITE_ALLOCS: usize = 16;
14
15/// Metadata for a composite allocation (multiple buddy blocks combined)
16#[derive(Clone, Copy, Debug)]
17struct CompositeBlockInfo {
18    /// Base address of the composite allocation
19    base_addr: usize,
20    /// Number of parts in this composite allocation
21    part_count: u8,
22    /// Individual parts: (address, order)
23    parts: [(usize, u32); MAX_PARTS_PER_ALLOC],
24}
25
26/// Tracker for composite allocations using fixed-size array
27struct CompositeBlockTracker {
28    /// Array to store composite block metadata
29    blocks: [Option<CompositeBlockInfo>; MAX_COMPOSITE_ALLOCS],
30    /// Number of active composite allocations
31    count: usize,
32}
33
34impl CompositeBlockTracker {
35    const fn new() -> Self {
36        Self {
37            blocks: [None; MAX_COMPOSITE_ALLOCS],
38            count: 0,
39        }
40    }
41
42    /// Insert a new composite block
43    ///
44    /// Returns false if the tracker is full
45    fn insert(&mut self, base_addr: usize, parts: &[(usize, u32)], part_count: usize) -> bool {
46        if self.count >= MAX_COMPOSITE_ALLOCS {
47            return false;
48        }
49
50        let mut info = CompositeBlockInfo {
51            base_addr,
52            part_count: part_count as u8,
53            parts: [(0, 0); MAX_PARTS_PER_ALLOC],
54        };
55
56        info.parts[..part_count].copy_from_slice(&parts[..part_count]);
57
58        self.blocks[self.count] = Some(info);
59        self.count += 1;
60        true
61    }
62
63    /// Find a composite block by its base address
64    fn find(&self, base_addr: usize) -> Option<CompositeBlockInfo> {
65        for i in 0..self.count {
66            if let Some(info) = self.blocks[i] {
67                if info.base_addr == base_addr {
68                    return Some(info);
69                }
70            }
71        }
72        None
73    }
74
75    /// Remove a composite block by its base address
76    ///
77    /// Returns true if the block was found and removed
78    fn remove(&mut self, base_addr: usize) -> bool {
79        for i in 0..self.count {
80            if let Some(info) = self.blocks[i] {
81                if info.base_addr == base_addr {
82                    // Move the last element to this position to keep array compact
83                    if i < self.count - 1 {
84                        self.blocks[i] = self.blocks[self.count - 1];
85                    }
86                    self.blocks[self.count - 1] = None;
87                    self.count -= 1;
88                    return true;
89                }
90            }
91        }
92        false
93    }
94}
95
96pub struct CompositePageAllocator<const PAGE_SIZE: usize = { crate::DEFAULT_PAGE_SIZE }> {
97    /// Underlying buddy allocator for standard allocations
98    buddy: BuddyPageAllocator<PAGE_SIZE>,
99    /// Tracker for composite allocations (multiple buddy blocks combined)
100    composite_tracker: CompositeBlockTracker,
101}
102
103impl<const PAGE_SIZE: usize> CompositePageAllocator<PAGE_SIZE> {
104    /// Create a new page allocator with contiguous block support
105    pub const fn new() -> Self {
106        Self {
107            buddy: BuddyPageAllocator::<PAGE_SIZE>::new(),
108            composite_tracker: CompositeBlockTracker::new(),
109        }
110    }
111
112    /// Set the address translator so that the underlying buddy allocator can
113    /// reason about physical address ranges (e.g. low-memory regions).
114    pub fn set_addr_translator(&mut self, translator: &'static dyn crate::AddrTranslator) {
115        self.buddy.set_addr_translator(translator);
116    }
117
118    /// Allocate low-memory pages (physical address < 4GiB).
119    /// This is a thin wrapper over the buddy allocator's lowmem allocation.
120    pub fn alloc_pages_lowmem(&mut self, num_pages: usize, alignment: usize) -> AllocResult<usize> {
121        self.buddy.alloc_pages_lowmem(num_pages, alignment)
122    }
123
124    /// Try to find and allocate contiguous small blocks from buddy free lists.
125    ///
126    /// This method searches buddy free lists for contiguous blocks that can satisfy
127    /// the allocation request. It uses the sorted nature of free lists to efficiently
128    /// check for contiguity.
129    ///
130    /// # Algorithm
131    /// 1. Iterate through free lists from largest to smallest blocks
132    /// 2. For each block, check if it's contiguous with already collected blocks
133    /// 3. Collect contiguous blocks until we have enough pages
134    /// 4. If successful, allocate all collected blocks
135    ///
136    /// # Returns
137    /// Base address of the first block if contiguous blocks found, otherwise None
138    fn try_combine_contiguous_blocks(
139        &mut self,
140        num_pages: usize,
141        alignment: usize,
142    ) -> Option<usize> {
143        let mut remaining_pages = num_pages;
144        let mut contiguous_blocks: [(usize, u32); MAX_PARTS_PER_ALLOC] =
145            [(0, 0); MAX_PARTS_PER_ALLOC];
146        let mut block_count = 0;
147        let mut min_addr = usize::MAX;
148        let mut max_addr = 0;
149
150        // Iterate from largest to smallest blocks
151        for order in (0..=DEFAULT_MAX_ORDER).rev() {
152            let block_pages = 1usize << order;
153
154            if remaining_pages == 0 || block_count >= MAX_PARTS_PER_ALLOC {
155                break;
156            }
157
158            // Get free blocks of this order from all zones
159            for zone_id in 0..self.buddy.get_zone_count() {
160                if let Some(blocks) = self.buddy.get_free_blocks_by_order(zone_id, order as u32) {
161                    // Iterate through sorted free blocks
162                    for block in blocks {
163                        if block_count >= MAX_PARTS_PER_ALLOC {
164                            break;
165                        }
166
167                        let block_start = block.addr;
168                        let block_end = block_start + block_pages * PAGE_SIZE;
169
170                        // Check alignment requirement
171                        if !crate::is_aligned(block_start, alignment) {
172                            continue;
173                        }
174
175                        // Check contiguity with existing blocks
176                        if block_count == 0 {
177                            // First block - just record it
178                            contiguous_blocks[block_count] = (block_start, order as u32);
179                            min_addr = block_start;
180                            max_addr = block_end;
181                            block_count += 1;
182                            remaining_pages -= block_pages.min(remaining_pages);
183                        } else if block_end == min_addr {
184                            // Block is to the left, update min_addr
185                            contiguous_blocks[block_count] = (block_start, order as u32);
186                            min_addr = block_start;
187                            block_count += 1;
188                            remaining_pages -= block_pages.min(remaining_pages);
189                        } else if block_start == max_addr {
190                            // Block is to the right, update max_addr
191                            contiguous_blocks[block_count] = (block_start, order as u32);
192                            max_addr = block_end;
193                            block_count += 1;
194                            remaining_pages -= block_pages.min(remaining_pages);
195                        }
196
197                        if remaining_pages == 0 {
198                            break;
199                        }
200                    }
201                }
202
203                if remaining_pages == 0 {
204                    break;
205                }
206            }
207        }
208
209        // If we found enough contiguous pages, allocate them
210        if remaining_pages == 0 {
211            let mut parts = [(0usize, 0u32); MAX_PARTS_PER_ALLOC];
212
213            // Allocate all contiguous blocks
214            for i in 0..block_count {
215                let (addr, order) = contiguous_blocks[i];
216                let block_pages = 1usize << order;
217
218                debug!(
219                    "Block {}: addr={:#x}, order={}, pages={}, size={} MB",
220                    i,
221                    addr,
222                    order,
223                    block_pages,
224                    (block_pages * PAGE_SIZE) / (1024 * 1024)
225                );
226
227                // Allocate this specific block
228                if let Err(_e) = self.buddy.alloc_pages_at(addr, block_pages, alignment) {
229                    // Allocation failed, rollback
230                    warn!("Contiguous block allocation failed at {i}, rolling back");
231                    #[allow(clippy::needless_range_loop)]
232                    for j in 0..i {
233                        let (dealloc_addr, dealloc_order) = parts[j];
234                        let dealloc_pages = 1usize << dealloc_order;
235                        self.buddy.dealloc_pages(dealloc_addr, dealloc_pages);
236                    }
237                    return None;
238                }
239
240                parts[i] = (addr, order);
241            }
242
243            // Assertion: allocated pages must be >= requested pages
244            let actual_pages: usize = parts[..block_count]
245                .iter()
246                .map(|(_, order)| 1usize << *order as usize)
247                .sum();
248            debug_assert!(
249                actual_pages >= num_pages,
250                "Allocated pages {actual_pages} < requested pages {num_pages}"
251            );
252
253            // Save metadata to tracker for proper deallocation
254            if !self
255                .composite_tracker
256                .insert(min_addr, &parts[..block_count], block_count)
257            {
258                // Tracker is full, rollback and fail
259                warn!("Composite tracker full, rolling back allocation");
260                #[allow(clippy::needless_range_loop)]
261                for j in 0..block_count {
262                    let (dealloc_addr, dealloc_order) = parts[j];
263                    let dealloc_pages = 1usize << dealloc_order;
264                    self.buddy.dealloc_pages(dealloc_addr, dealloc_pages);
265                }
266                return None;
267            }
268
269            debug!("Contiguous block allocation succeeded: base_addr={min_addr:#x}, pages={num_pages}, parts={block_count}, actual_pages={actual_pages}");
270
271            return Some(min_addr);
272        }
273
274        None
275    }
276
277    /// Print detailed statistics when allocation fails.
278    ///
279    /// This function delegates to buddy allocator's detailed statistics reporter.
280    #[cfg(feature = "tracking")]
281    fn print_alloc_failure_stats(&self, num_pages: usize, alignment: usize) {
282        self.buddy.print_alloc_failure_stats(num_pages, alignment);
283    }
284
285    #[cfg(not(feature = "tracking"))]
286    fn print_alloc_failure_stats(&self, _num_pages: usize, _alignment: usize) {
287        // No-op when tracking is disabled
288    }
289}
290
291impl<const PAGE_SIZE: usize> PageAllocator for CompositePageAllocator<PAGE_SIZE> {
292    const PAGE_SIZE: usize = PAGE_SIZE;
293
294    fn alloc_pages(&mut self, num_pages: usize, alignment: usize) -> AllocResult<usize> {
295        if num_pages == 0 {
296            return Err(AllocError::InvalidParam);
297        }
298
299        let buddy_pages = if num_pages.is_power_of_two() {
300            num_pages
301        } else {
302            num_pages.next_power_of_two()
303        };
304
305        // Try to allocate from buddy system first
306        let base_addr = match self.buddy.alloc_pages(buddy_pages, alignment) {
307            Ok(addr) => addr,
308            Err(_) => {
309                // Standard allocation failed, try contiguous block combination
310                debug!(
311                    "Standard allocation failed, trying contiguous block combination for {num_pages} pages"
312                );
313                if let Some(addr) = self.try_combine_contiguous_blocks(num_pages, alignment) {
314                    return Ok(addr);
315                }
316                self.print_alloc_failure_stats(num_pages, alignment);
317                return Err(AllocError::NoMemory);
318            }
319        };
320
321        Ok(base_addr)
322    }
323
324    fn dealloc_pages(&mut self, pos: usize, num_pages: usize) {
325        if num_pages == 0 {
326            return;
327        }
328
329        // Check if this is a composite allocation
330        if let Some(info) = self.composite_tracker.find(pos) {
331            // Composite block: deallocate each part separately
332            debug!(
333                "Deallocating composite block: base_addr={:#x}, part_count={}",
334                pos, info.part_count
335            );
336
337            for i in 0..info.part_count as usize {
338                let (addr, order) = info.parts[i];
339                let pages = 1usize << order;
340                debug!("  Part {i}: addr={addr:#x}, order={order}, pages={pages}");
341                self.buddy.dealloc_pages(addr, pages);
342            }
343
344            // Remove from tracker
345            self.composite_tracker.remove(pos);
346        } else {
347            // Regular buddy block: deallocate directly
348            if num_pages.is_power_of_two() {
349                self.buddy.dealloc_pages(pos, num_pages);
350            } else {
351                self.buddy.dealloc_pages(pos, num_pages.next_power_of_two());
352            }
353        }
354    }
355
356    /// Allocate contiguous memory pages at a specific address.
357    ///
358    /// Delegates to buddy allocator.
359    fn alloc_pages_at(
360        &mut self,
361        base: usize,
362        num_pages: usize,
363        alignment: usize,
364    ) -> AllocResult<usize> {
365        self.buddy.alloc_pages_at(base, num_pages, alignment)
366    }
367
368    /// Return total number of memory pages.
369    fn total_pages(&self) -> usize {
370        self.buddy.total_pages()
371    }
372
373    /// Return number of allocated memory pages.
374    fn used_pages(&self) -> usize {
375        self.buddy.used_pages()
376    }
377
378    /// Return number of available memory pages.
379    fn available_pages(&self) -> usize {
380        self.buddy.available_pages()
381    }
382}
383
384impl<const PAGE_SIZE: usize> CompositePageAllocator<PAGE_SIZE> {
385    /// Get buddy allocator statistics
386    #[cfg(feature = "tracking")]
387    pub fn get_buddy_stats(&self) -> crate::buddy::BuddyStats {
388        self.buddy.get_stats()
389    }
390}
391
392impl<const PAGE_SIZE: usize> BaseAllocator for CompositePageAllocator<PAGE_SIZE> {
393    /// Initialize the allocator with a free memory region.
394    fn init(&mut self, start: usize, size: usize) {
395        self.buddy.init(start, size);
396    }
397
398    /// Add a free memory region to the allocator.
399    fn add_memory(&mut self, start: usize, size: usize) -> AllocResult<()> {
400        self.buddy.add_memory(start, size)
401    }
402}
403
404// Implement PageAllocatorForSlab for CompositePageAllocator
405impl<const PAGE_SIZE: usize> crate::slab::PageAllocatorForSlab
406    for CompositePageAllocator<PAGE_SIZE>
407{
408    fn alloc_pages(&mut self, num_pages: usize, alignment: usize) -> AllocResult<usize> {
409        <Self as PageAllocator>::alloc_pages(self, num_pages, alignment)
410    }
411
412    fn dealloc_pages(&mut self, pos: usize, num_pages: usize) {
413        <Self as PageAllocator>::dealloc_pages(self, pos, num_pages)
414    }
415}
416
417impl<const PAGE_SIZE: usize> Default for CompositePageAllocator<PAGE_SIZE> {
418    fn default() -> Self {
419        Self::new()
420    }
421}
422
423#[cfg(test)]
424mod tests {
425    use super::*;
426
427    #[test]
428    fn test_composite_block_tracker() {
429        let mut tracker = CompositeBlockTracker::new();
430
431        // Test insertion
432        let parts = [(0x1000, 3), (0x2000, 2)];
433        assert!(tracker.insert(0x1000, &parts, 2));
434        assert_eq!(tracker.count, 1);
435
436        // Test finding
437        let info = tracker.find(0x1000);
438        assert!(info.is_some());
439        let info = info.unwrap();
440        assert_eq!(info.base_addr, 0x1000);
441        assert_eq!(info.part_count, 2);
442        assert_eq!(info.parts[0], (0x1000, 3));
443        assert_eq!(info.parts[1], (0x2000, 2));
444
445        // Test removal
446        assert!(tracker.remove(0x1000));
447        assert_eq!(tracker.count, 0);
448        assert!(tracker.find(0x1000).is_none());
449    }
450
451    #[test]
452    fn test_composite_block_tracker_capacity() {
453        let mut tracker = CompositeBlockTracker::new();
454
455        // Fill the tracker to capacity
456        for i in 0..MAX_COMPOSITE_ALLOCS {
457            let parts = [(0x1000 + i * 0x1000, 3)];
458            assert!(tracker.insert(0x1000 + i * 0x1000, &parts, 1));
459        }
460
461        assert_eq!(tracker.count, MAX_COMPOSITE_ALLOCS);
462
463        // Try to insert one more - should fail
464        let parts = [(0x10000, 3)];
465        assert!(!tracker.insert(0x10000, &parts, 1));
466
467        // Remove one and verify we can insert again
468        assert!(tracker.remove(0x1000));
469        assert!(tracker.insert(0x10000, &parts, 1));
470    }
471
472    #[test]
473    fn test_composite_tracker_find_nonexistent() {
474        let tracker = CompositeBlockTracker::new();
475        assert!(tracker.find(0x1000).is_none());
476    }
477
478    #[test]
479    fn test_composite_tracker_remove_nonexistent() {
480        let mut tracker = CompositeBlockTracker::new();
481        assert!(!tracker.remove(0x1000));
482    }
483
484    #[test]
485    fn test_composite_tracker_multiple_blocks() {
486        let mut tracker = CompositeBlockTracker::new();
487
488        let parts1 = [(0x1000, 3), (0x2000, 2)];
489        let parts2 = [(0x3000, 4), (0x4000, 3), (0x5000, 2)];
490
491        tracker.insert(0x1000, &parts1, 2);
492        tracker.insert(0x3000, &parts2, 3);
493
494        assert_eq!(tracker.count, 2);
495
496        let info1 = tracker.find(0x1000);
497        assert!(info1.is_some());
498        assert_eq!(info1.unwrap().part_count, 2);
499
500        let info2 = tracker.find(0x3000);
501        assert!(info2.is_some());
502        assert_eq!(info2.unwrap().part_count, 3);
503    }
504}
505
506#[cfg(test)]
507mod unit_tests {
508    use super::*;
509    use alloc::alloc::{alloc, dealloc};
510    use core::alloc::Layout;
511
512    const TEST_HEAP_SIZE: usize = 16 * 1024 * 1024;
513    const TEST_PAGE_SIZE: usize = 0x1000;
514
515    fn alloc_test_heap(size: usize) -> (*mut u8, Layout) {
516        let layout = Layout::from_size_align(size, TEST_PAGE_SIZE).unwrap();
517        let ptr = unsafe { alloc(layout) };
518        assert!(!ptr.is_null());
519        (ptr, layout)
520    }
521
522    fn dealloc_test_heap(ptr: *mut u8, layout: Layout) {
523        unsafe { dealloc(ptr, layout) };
524    }
525
526    #[test]
527    fn test_composite_allocator_basic() {
528        let (heap_ptr, heap_layout) = alloc_test_heap(TEST_HEAP_SIZE);
529        let heap_addr = heap_ptr as usize;
530
531        let mut allocator = CompositePageAllocator::<TEST_PAGE_SIZE>::new();
532        allocator.init(heap_addr, TEST_HEAP_SIZE);
533
534        let addr1 = allocator.alloc_pages(1, TEST_PAGE_SIZE).unwrap();
535        let addr2 = allocator.alloc_pages(4, TEST_PAGE_SIZE).unwrap();
536
537        assert!(addr1 >= heap_addr && addr1 < heap_addr + TEST_HEAP_SIZE);
538        assert!(addr2 >= heap_addr && addr2 < heap_addr + TEST_HEAP_SIZE);
539
540        allocator.dealloc_pages(addr1, 1);
541        allocator.dealloc_pages(addr2, 4);
542
543        dealloc_test_heap(heap_ptr, heap_layout);
544    }
545
546    #[test]
547    fn test_composite_allocator_alignment() {
548        let (heap_ptr, heap_layout) = alloc_test_heap(TEST_HEAP_SIZE);
549        let heap_addr = heap_ptr as usize;
550
551        let mut allocator = CompositePageAllocator::<TEST_PAGE_SIZE>::new();
552        allocator.init(heap_addr, TEST_HEAP_SIZE);
553
554        let addr = allocator.alloc_pages(1, TEST_PAGE_SIZE * 4).unwrap();
555        assert_eq!(addr & (TEST_PAGE_SIZE * 4 - 1), 0);
556
557        allocator.dealloc_pages(addr, 1);
558        dealloc_test_heap(heap_ptr, heap_layout);
559    }
560}