Skip to main content

buddy_slab_allocator/buddy/
buddy_allocator.rs

1//! Multi-zone buddy allocator using global node pool
2//!
3//! Provides buddy allocator with support for multiple memory zones and
4//! a single shared global node pool for all zones and orders.
5
6use crate::{AllocError, AllocResult, BaseAllocator, PageAllocator};
7
8#[cfg(feature = "log")]
9use log::{debug, error, info, warn};
10
11#[cfg(feature = "tracking")]
12use super::ZoneInfo;
13
14use super::{buddy_block::MAX_ZONES, buddy_set::BuddySet, global_node_pool::GlobalNodePool};
15
16const NODE_POOL_PAGES: usize = 10;
17const NODE_POOL_LOW_WATER_NODES: usize = 128;
18const NODE_POOL_EXPAND_PAGES: usize = 5;
19/// Threshold for low-memory (DMA32-like) physical addresses: 4GiB.
20const LOWMEM_PHYS_THRESHOLD: usize = 1usize << 32;
21
22#[cfg(feature = "tracking")]
23use super::stats::{BuddyStats, MemoryStatsReporter};
24
25/// Buddy page allocator with multi-zone support and global node pool
26///
27/// The `global_node_pool` stores linked-list nodes (ListNode<BuddyBlock>),
28/// which are used to construct the free lists in each BuddySet.
29/// Memory pages themselves are tracked by BuddyBlock values, not by this pool.
30pub struct BuddyPageAllocator<const PAGE_SIZE: usize = { crate::DEFAULT_PAGE_SIZE }> {
31    zones: [BuddySet<PAGE_SIZE>; MAX_ZONES],
32    num_zones: usize,
33    global_node_pool: GlobalNodePool,
34    #[cfg(feature = "tracking")]
35    stats: BuddyStats,
36    /// Optional address translator used to reason about physical addresses.
37    addr_translator: Option<&'static dyn crate::AddrTranslator>,
38}
39
40impl<const PAGE_SIZE: usize> BuddyPageAllocator<PAGE_SIZE> {
41    pub const fn new() -> Self {
42        Self {
43            zones: [const { BuddySet::<PAGE_SIZE>::empty() }; MAX_ZONES],
44            num_zones: 0,
45            global_node_pool: GlobalNodePool::new(),
46            #[cfg(feature = "tracking")]
47            stats: BuddyStats::new(),
48            addr_translator: None,
49        }
50    }
51
52    /// Set the address translator so that the allocator can reason about
53    /// physical address ranges (e.g. low-memory regions below 4GiB).
54    pub fn set_addr_translator(&mut self, translator: &'static dyn crate::AddrTranslator) {
55        self.addr_translator = Some(translator);
56    }
57
58    /// Initialize the global node pool and bootstrap with initial memory region
59    pub fn init(&mut self, base_addr: usize, size: usize) {
60        self.bootstrap(base_addr, size);
61    }
62
63    /// Bootstrap allocator with initial memory region
64    pub fn bootstrap(&mut self, base_addr: usize, size: usize) {
65        if self.num_zones >= MAX_ZONES {
66            panic!("Cannot bootstrap: maximum zones reached");
67        }
68
69        // Reserve a small region at the beginning for the global node pool
70        let node_region_size = NODE_POOL_PAGES * PAGE_SIZE;
71        if size <= node_region_size {
72            panic!("Cannot bootstrap: region too small for node pool");
73        }
74        let node_region_start = base_addr;
75
76        self.global_node_pool
77            .init(node_region_start, node_region_size);
78
79        let zone_base = base_addr + node_region_size;
80        let zone_size = size - node_region_size;
81
82        self.zones[0] = BuddySet::new(zone_base, zone_size, 0);
83        self.zones[0].init(&mut self.global_node_pool, zone_base, zone_size);
84        // Mark whether this zone contains any low (<4G) physical memory, if translator is set.
85        if let Some(translator) = self.addr_translator {
86            if let Some(pa_start) = translator.virt_to_phys(zone_base) {
87                if pa_start < LOWMEM_PHYS_THRESHOLD {
88                    self.zones[0].is_lowmem = true;
89                }
90            }
91        }
92        self.num_zones = 1;
93
94        #[cfg(feature = "tracking")]
95        self.update_stats();
96    }
97
98    #[cfg(feature = "tracking")]
99    pub fn get_stats(&self) -> BuddyStats {
100        self.stats
101    }
102
103    /// Get global node pool statistics
104    pub fn get_node_pool_stats(&self) -> super::global_node_pool::GlobalPoolStats {
105        self.global_node_pool.get_stats()
106    }
107
108    /// Get number of zones in the allocator
109    pub fn get_zone_count(&self) -> usize {
110        self.num_zones
111    }
112
113    /// Allocate contiguous low-memory pages (physical address < 4GiB).
114    ///
115    /// This API is intended for DMA32-like use cases. It only considers zones
116    /// that are marked as `is_lowmem`, and after allocation it performs a
117    /// strict physical boundary check to ensure that both the start and end
118    /// physical addresses are below the 4GiB threshold.
119    pub fn alloc_pages_lowmem(&mut self, num_pages: usize, alignment: usize) -> AllocResult<usize> {
120        if num_pages == 0 {
121            return Err(AllocError::InvalidParam);
122        }
123
124        let translator = self.addr_translator.ok_or(AllocError::InvalidParam)?;
125
126        // Try to expand node pool if we are close to exhaustion
127        self.maybe_expand_node_pool();
128
129        let size_bytes = num_pages * PAGE_SIZE;
130
131        for i in 0..self.num_zones {
132            if !self.zones[i].is_lowmem {
133                continue;
134            }
135
136            match self.zones[i].alloc_pages(&mut self.global_node_pool, num_pages, alignment) {
137                Ok(addr) => {
138                    let start_va = addr;
139                    let end_va = addr + size_bytes - 1;
140
141                    // Ensure both start and end physical addresses are below the
142                    // low-memory threshold to avoid crossing the 4GiB boundary.
143                    if let (Some(pa_start), Some(pa_end)) = (
144                        translator.virt_to_phys(start_va),
145                        translator.virt_to_phys(end_va),
146                    ) {
147                        if pa_start < LOWMEM_PHYS_THRESHOLD && pa_end < LOWMEM_PHYS_THRESHOLD {
148                            #[cfg(feature = "tracking")]
149                            self.update_stats();
150                            return Ok(addr);
151                        }
152                    }
153
154                    // Boundary check failed: roll back this allocation and
155                    // continue searching for another suitable block.
156                    self.zones[i].dealloc_pages(&mut self.global_node_pool, addr, num_pages);
157                }
158                Err(_) => {
159                    continue;
160                }
161            }
162        }
163
164        info!(
165            "buddy allocator: Low-memory allocation failure: {} Byte, align {}",
166            num_pages * PAGE_SIZE,
167            alignment
168        );
169        Err(AllocError::NoMemory)
170    }
171
172    /// Get free blocks of a specific order from a zone
173    /// Returns None if zone doesn't exist
174    pub fn get_free_blocks_by_order(
175        &self,
176        zone_id: usize,
177        order: u32,
178    ) -> Option<super::pooled_list::PooledListIter<'_>> {
179        if zone_id >= self.num_zones {
180            return None;
181        }
182        Some(self.zones[zone_id].get_free_blocks_by_order(&self.global_node_pool, order))
183    }
184
185    /// Update aggregated statistics from all zones
186    #[cfg(feature = "tracking")]
187    fn update_stats(&mut self) {
188        let mut total_stats = BuddyStats::new();
189
190        for i in 0..self.num_zones {
191            let zone_stats = self.zones[i].get_stats();
192            total_stats.add(&zone_stats);
193        }
194
195        self.stats = total_stats;
196    }
197
198    /// Ensure the global node pool has enough free nodes.
199    ///
200    /// When the number of free nodes falls below a low-water mark,
201    /// try to reserve a few pages from any available zone and add them as a new node region.
202    fn maybe_expand_node_pool(&mut self) {
203        let free_nodes = self.global_node_pool.free_node_count();
204        if free_nodes >= NODE_POOL_LOW_WATER_NODES {
205            return;
206        }
207        if self.num_zones == 0 {
208            error!("buddy allocator: no zones available to expand node pool");
209            return;
210        }
211
212        let expand_pages = NODE_POOL_EXPAND_PAGES;
213        let expand_size = expand_pages * PAGE_SIZE;
214
215        // Try all zones to allocate memory for node pool expansion
216        for i in 0..self.num_zones {
217            match self.zones[i].alloc_pages(&mut self.global_node_pool, expand_pages, PAGE_SIZE) {
218                Ok(addr) => {
219                    debug!(
220                        "buddy allocator: expanding node pool from zone {}: free_nodes={} region=[{:#x}, {:#x})",
221                        i,
222                        free_nodes,
223                        addr,
224                        addr + expand_size
225                    );
226                    self.global_node_pool.add_region(addr, expand_size);
227                    return;
228                }
229                Err(_) => {
230                    continue;
231                }
232            }
233        }
234
235        warn!(
236            "buddy allocator: failed to expand node pool at low water: free_nodes={} tried {} zones",
237            free_nodes,
238            self.num_zones
239        );
240    }
241
242    /// Add a new memory region as a new zone
243    pub fn add_memory_region(&mut self, start: usize, size: usize) -> AllocResult<()> {
244        if self.num_zones >= MAX_ZONES {
245            error!("buddy allocator: Cannot add region: maximum zones ({MAX_ZONES}) reached");
246            return Err(AllocError::NoMemory);
247        }
248
249        // Align to page boundaries
250        let aligned_start = start & !(PAGE_SIZE - 1);
251        let end = start + size;
252        let aligned_end = (end + PAGE_SIZE - 1) & !(PAGE_SIZE - 1);
253        let aligned_size = aligned_end - aligned_start;
254
255        if aligned_size == 0 || aligned_size < PAGE_SIZE {
256            warn!("buddy allocator: Aligned size is too small: {aligned_size:#x}, skipping region");
257            return Err(AllocError::InvalidParam);
258        }
259
260        // Check for overlap with existing zones
261        for i in 0..self.num_zones {
262            let zone = &self.zones[i];
263            if !(aligned_end <= zone.base_addr || aligned_start >= zone.end_addr) {
264                error!(
265                    "buddy allocator: Region [{:#x}, {:#x}) overlaps with zone {} [{:#x}, {:#x})",
266                    aligned_start, aligned_end, i, zone.base_addr, zone.end_addr
267                );
268                return Err(AllocError::MemoryOverlap);
269            }
270        }
271
272        let zone_id = self.num_zones;
273        self.zones[zone_id] = BuddySet::new(aligned_start, aligned_size, zone_id);
274        self.zones[zone_id].init(&mut self.global_node_pool, aligned_start, aligned_size);
275        // Mark whether this zone contains any low (<4G) physical memory, if translator is set.
276        if let Some(translator) = self.addr_translator {
277            if let Some(pa_start) = translator.virt_to_phys(aligned_start) {
278                if pa_start < LOWMEM_PHYS_THRESHOLD {
279                    self.zones[zone_id].is_lowmem = true;
280                }
281            }
282        }
283        self.num_zones += 1;
284
285        // Print all zone information after successfully adding a new memory region
286        #[cfg(feature = "tracking")]
287        self.print_zone_info();
288
289        Ok(())
290    }
291
292    /// Find the zone that contains the given address
293    pub fn find_zone_for_addr(&self, addr: usize) -> Option<usize> {
294        (0..self.num_zones).find(|&i| self.zones[i].addr_in_zone(addr))
295    }
296
297    /// Print detailed allocation failure statistics
298    ///
299    /// This method is public to allow CompositePageAllocator to call it
300    /// when allocation fails, providing detailed per-zone failure information.
301    #[cfg(feature = "tracking")]
302    pub fn print_alloc_failure_stats(&self, num_pages: usize, alignment: usize) {
303        let mut zone_infos: [Option<ZoneInfo>; MAX_ZONES] = [None; MAX_ZONES];
304        let mut zone_stats: [Option<BuddyStats>; MAX_ZONES] = [None; MAX_ZONES];
305
306        for i in 0..self.num_zones {
307            zone_infos[i] = Some(self.zones[i].zone_info());
308            zone_stats[i] = Some(self.zones[i].get_stats());
309        }
310
311        // Create slices from the initialized elements
312        let zone_infos_slice: &[ZoneInfo] = unsafe {
313            core::slice::from_raw_parts(zone_infos.as_ptr() as *const ZoneInfo, self.num_zones)
314        };
315        let zone_stats_slice: &[BuddyStats] = unsafe {
316            core::slice::from_raw_parts(zone_stats.as_ptr() as *const BuddyStats, self.num_zones)
317        };
318
319        MemoryStatsReporter::print_alloc_failure_stats(
320            PAGE_SIZE,
321            self.num_zones,
322            &self.stats,
323            zone_infos_slice,
324            zone_stats_slice,
325            num_pages,
326            alignment,
327        );
328    }
329
330    /// Print all zone information and block distribution
331    pub fn print_zone_info(&self) {
332        info!("========== Buddy Allocator Zones Info ==========");
333        info!("Total zones: {}", self.num_zones);
334        info!("Page size: {PAGE_SIZE:#x} ({PAGE_SIZE})");
335        info!("");
336
337        for i in 0..self.num_zones {
338            let zone = &self.zones[i];
339            let _zone_info = zone.zone_info();
340            info!("Zone {i}:");
341            info!(
342                "  Address range: [{:#x}, {:#x})",
343                _zone_info.start_addr, _zone_info.end_addr
344            );
345            info!("  Total pages: {}", _zone_info.total_pages);
346            info!(
347                "  Total size: {:#x} ({} MB)",
348                _zone_info.total_pages * PAGE_SIZE,
349                (_zone_info.total_pages * PAGE_SIZE) / (1024 * 1024)
350            );
351            info!("  Free blocks distribution:");
352
353            // Print block distribution for each order
354            for order in 0..=zone.max_order() {
355                let block_count = zone.get_order_block_count(order);
356                if block_count > 0 {
357                    let _block_size = (1 << order) * PAGE_SIZE;
358                    info!(
359                        "    Order {}: {} blocks (size {} bytes each, total {:#x})",
360                        order,
361                        block_count,
362                        _block_size,
363                        block_count * _block_size
364                    );
365                }
366            }
367            info!("");
368        }
369
370        info!("Global node pool stats:");
371        let _pool_stats = self.global_node_pool.get_stats();
372        info!("  Total allocations: {}", _pool_stats.total_allocations);
373        info!("  Free nodes: {}", _pool_stats.free_nodes);
374        info!("==============================================");
375    }
376
377    #[cfg(not(feature = "tracking"))]
378    pub fn print_alloc_failure_stats(&self, _num_pages: usize, _alignment: usize) {
379        // No-op when tracking is disabled
380    }
381}
382
383impl<const PAGE_SIZE: usize> crate::slab::PageAllocatorForSlab for BuddyPageAllocator<PAGE_SIZE> {
384    fn alloc_pages(&mut self, num_pages: usize, alignment: usize) -> AllocResult<usize> {
385        <Self as PageAllocator>::alloc_pages(self, num_pages, alignment)
386    }
387
388    fn dealloc_pages(&mut self, pos: usize, num_pages: usize) {
389        <Self as PageAllocator>::dealloc_pages(self, pos, num_pages);
390    }
391}
392
393impl<const PAGE_SIZE: usize> Default for BuddyPageAllocator<PAGE_SIZE> {
394    fn default() -> Self {
395        Self::new()
396    }
397}
398
399impl<const PAGE_SIZE: usize> BaseAllocator for BuddyPageAllocator<PAGE_SIZE> {
400    fn init(&mut self, start: usize, size: usize) {
401        self.bootstrap(start, size);
402    }
403
404    fn add_memory(&mut self, start: usize, size: usize) -> AllocResult<()> {
405        self.add_memory_region(start, size)?;
406        #[cfg(feature = "tracking")]
407        self.update_stats();
408        Ok(())
409    }
410}
411
412impl<const PAGE_SIZE: usize> PageAllocator for BuddyPageAllocator<PAGE_SIZE> {
413    const PAGE_SIZE: usize = PAGE_SIZE;
414
415    fn alloc_pages(&mut self, num_pages: usize, alignment: usize) -> AllocResult<usize> {
416        // Try to expand node pool if we are close to exhaustion
417        self.maybe_expand_node_pool();
418
419        // First, prefer zones that are NOT marked as low-memory so that
420        // low-memory regions can be reserved for special use (e.g. DMA32).
421        for i in 0..self.num_zones {
422            if self.zones[i].is_lowmem {
423                continue;
424            }
425            match self.zones[i].alloc_pages(&mut self.global_node_pool, num_pages, alignment) {
426                Ok(addr) => {
427                    #[cfg(feature = "tracking")]
428                    self.update_stats();
429                    return Ok(addr);
430                }
431                Err(_) => {
432                    continue;
433                }
434            }
435        }
436
437        // Then, fall back to zones that are marked as low-memory.
438        for i in 0..self.num_zones {
439            if !self.zones[i].is_lowmem {
440                continue;
441            }
442            match self.zones[i].alloc_pages(&mut self.global_node_pool, num_pages, alignment) {
443                Ok(addr) => {
444                    #[cfg(feature = "tracking")]
445                    self.update_stats();
446                    return Ok(addr);
447                }
448                Err(_) => {
449                    continue;
450                }
451            }
452        }
453
454        debug!(
455            "buddy allocator: Allocation failure: {} Byte, align {}",
456            num_pages * PAGE_SIZE,
457            alignment
458        );
459        Err(AllocError::NoMemory)
460    }
461
462    fn dealloc_pages(&mut self, pos: usize, num_pages: usize) {
463        self.maybe_expand_node_pool();
464        if let Some(zone_idx) = self.find_zone_for_addr(pos) {
465            self.zones[zone_idx].dealloc_pages(&mut self.global_node_pool, pos, num_pages);
466            #[cfg(feature = "tracking")]
467            self.update_stats();
468        } else {
469            warn!("buddy allocator: Dealloc pages at {pos:#x}: address not in any zone");
470        }
471    }
472
473    fn alloc_pages_at(
474        &mut self,
475        base: usize,
476        num_pages: usize,
477        alignment: usize,
478    ) -> AllocResult<usize> {
479        // Try to expand node pool if we are close to exhaustion
480        self.maybe_expand_node_pool();
481
482        if let Some(zone_idx) = self.find_zone_for_addr(base) {
483            match self.zones[zone_idx].alloc_pages_at(
484                &mut self.global_node_pool,
485                base,
486                num_pages,
487                alignment,
488            ) {
489                Ok(addr) => {
490                    #[cfg(feature = "tracking")]
491                    self.update_stats();
492                    Ok(addr)
493                }
494                Err(e) => Err(e),
495            }
496        } else {
497            warn!("buddy allocator: alloc_pages_at: address {base:#x} not in any zone");
498            Err(AllocError::InvalidParam)
499        }
500    }
501
502    fn total_pages(&self) -> usize {
503        #[cfg(feature = "tracking")]
504        return self.stats.total_pages;
505        #[cfg(not(feature = "tracking"))]
506        return 0;
507    }
508
509    fn used_pages(&self) -> usize {
510        #[cfg(feature = "tracking")]
511        return self.stats.used_pages;
512        #[cfg(not(feature = "tracking"))]
513        return 0;
514    }
515
516    fn available_pages(&self) -> usize {
517        #[cfg(feature = "tracking")]
518        return self.stats.free_pages;
519        #[cfg(not(feature = "tracking"))]
520        return 0;
521    }
522}
523
524#[cfg(test)]
525mod tests {
526    use super::*;
527    use alloc::alloc::{alloc, dealloc};
528    use core::alloc::Layout;
529
530    const TEST_HEAP_SIZE: usize = 16 * 1024 * 1024;
531    const TEST_PAGE_SIZE: usize = 0x1000;
532
533    fn alloc_test_heap(size: usize) -> (*mut u8, Layout) {
534        let layout = Layout::from_size_align(size, TEST_PAGE_SIZE).unwrap();
535        let ptr = unsafe { alloc(layout) };
536        assert!(!ptr.is_null());
537        (ptr, layout)
538    }
539
540    fn dealloc_test_heap(ptr: *mut u8, layout: Layout) {
541        unsafe { dealloc(ptr, layout) };
542    }
543
544    #[test]
545    fn test_buddy_allocator_init() {
546        let (heap_ptr, heap_layout) = alloc_test_heap(TEST_HEAP_SIZE);
547        let heap_addr = heap_ptr as usize;
548
549        let mut allocator = BuddyPageAllocator::<TEST_PAGE_SIZE>::new();
550        allocator.init(heap_addr, TEST_HEAP_SIZE);
551
552        let addr = allocator.alloc_pages(1, TEST_PAGE_SIZE);
553        assert!(addr.is_ok());
554        if let Ok(a) = addr {
555            allocator.dealloc_pages(a, 1);
556        }
557
558        dealloc_test_heap(heap_ptr, heap_layout);
559    }
560
561    #[test]
562    fn test_buddy_allocator_multi_pages() {
563        let (heap_ptr, heap_layout) = alloc_test_heap(TEST_HEAP_SIZE);
564        let heap_addr = heap_ptr as usize;
565
566        let mut allocator = BuddyPageAllocator::<TEST_PAGE_SIZE>::new();
567        allocator.init(heap_addr, TEST_HEAP_SIZE);
568
569        let addr1 = allocator.alloc_pages(4, TEST_PAGE_SIZE).unwrap();
570        let addr2 = allocator.alloc_pages(8, TEST_PAGE_SIZE).unwrap();
571        assert_ne!(addr1, addr2);
572
573        allocator.dealloc_pages(addr1, 4);
574        allocator.dealloc_pages(addr2, 8);
575
576        dealloc_test_heap(heap_ptr, heap_layout);
577    }
578
579    #[test]
580    fn test_buddy_allocator_alignment() {
581        let (heap_ptr, heap_layout) = alloc_test_heap(TEST_HEAP_SIZE);
582        let heap_addr = heap_ptr as usize;
583
584        let mut allocator = BuddyPageAllocator::<TEST_PAGE_SIZE>::new();
585        allocator.init(heap_addr, TEST_HEAP_SIZE);
586
587        let addr = allocator.alloc_pages(1, TEST_PAGE_SIZE * 4).unwrap();
588        assert_eq!(addr & (TEST_PAGE_SIZE * 4 - 1), 0);
589
590        allocator.dealloc_pages(addr, 1);
591        dealloc_test_heap(heap_ptr, heap_layout);
592    }
593
594    #[test]
595    fn test_buddy_allocator_zone_count() {
596        let (heap_ptr, heap_layout) = alloc_test_heap(TEST_HEAP_SIZE);
597        let heap_addr = heap_ptr as usize;
598
599        let mut allocator = BuddyPageAllocator::<TEST_PAGE_SIZE>::new();
600        allocator.init(heap_addr, TEST_HEAP_SIZE);
601
602        assert_eq!(allocator.get_zone_count(), 1);
603
604        dealloc_test_heap(heap_ptr, heap_layout);
605    }
606}