Skip to main content

buddy_slab_allocator/buddy/
buddy_set.rs

1//! Single-zone buddy allocator using global node pool
2//!
3//! Implements the core buddy system for a single memory zone using
4//! pooled linked lists that draw nodes from a shared global pool.
5
6use crate::{AllocError, AllocResult};
7
8#[cfg(feature = "log")]
9use log::{error, warn};
10
11use super::{
12    buddy_block::{BuddyBlock, ZoneInfo, DEFAULT_MAX_ORDER},
13    global_node_pool::GlobalNodePool,
14    pooled_list::PooledLinkedList,
15};
16
17/// A buddy set implementation - represents a single zone
18///
19/// Uses pooled linked lists with global node pool for efficient memory usage.
20/// All zones share the same global node pool.
21pub struct BuddySet<const PAGE_SIZE: usize = { crate::DEFAULT_PAGE_SIZE }> {
22    pub(crate) base_addr: usize,
23    pub(crate) end_addr: usize,
24    total_pages: usize,
25    zone_id: usize,
26    /// Whether this zone contains any memory that can map to low (<4G) physical addresses.
27    pub(crate) is_lowmem: bool,
28    /// Free lists for each order
29    free_lists: [PooledLinkedList; DEFAULT_MAX_ORDER + 1],
30}
31
32impl<const PAGE_SIZE: usize> BuddySet<PAGE_SIZE> {
33    /// Create a new buddy set for a zone (uninitialized, must call init())
34    pub const fn new(base_addr: usize, size: usize, zone_id: usize) -> Self {
35        Self {
36            base_addr,
37            end_addr: base_addr + size,
38            total_pages: size / PAGE_SIZE,
39            zone_id,
40            is_lowmem: false,
41            free_lists: [const { PooledLinkedList::new() }; DEFAULT_MAX_ORDER + 1],
42        }
43    }
44
45    /// Create an empty buddy set
46    pub const fn empty() -> Self {
47        Self::new(0, 0, 0)
48    }
49
50    pub const fn max_order(&self) -> usize {
51        DEFAULT_MAX_ORDER
52    }
53
54    /// Add a block to the appropriate list for its order
55    fn add_block_to_order(
56        &mut self,
57        pool: &mut GlobalNodePool,
58        order: usize,
59        block: BuddyBlock,
60    ) -> bool {
61        if order > DEFAULT_MAX_ORDER {
62            error!(
63                "zone {}: Order {} exceeds maximum order {}",
64                self.zone_id, order, DEFAULT_MAX_ORDER
65            );
66            return false;
67        }
68
69        self.free_lists[order].insert_sorted(pool, block)
70    }
71
72    /// Find a block with the given address in the free list for its order
73    fn find_block_in_order(
74        &self,
75        pool: &GlobalNodePool,
76        order: usize,
77        addr: usize,
78    ) -> Option<(usize, Option<usize>)> {
79        if order > DEFAULT_MAX_ORDER {
80            return None;
81        }
82        self.free_lists[order].find_by_addr(pool, addr)
83    }
84
85    /// Remove a block from its list
86    #[allow(dead_code)]
87    fn remove_block_from_order(
88        &mut self,
89        pool: &mut GlobalNodePool,
90        order: usize,
91        node_idx: usize,
92    ) -> bool {
93        if order > DEFAULT_MAX_ORDER {
94            error!(
95                "zone {}: Order {} exceeds maximum order {}",
96                self.zone_id, order, DEFAULT_MAX_ORDER
97            );
98            return false;
99        }
100        self.free_lists[order].remove(pool, node_idx)
101    }
102
103    /// Check if an address belongs to this zone
104    pub fn addr_in_zone(&self, addr: usize) -> bool {
105        addr >= self.base_addr && addr < self.end_addr
106    }
107
108    /// Get zone information
109    pub fn zone_info(&self) -> ZoneInfo {
110        ZoneInfo {
111            start_addr: self.base_addr,
112            end_addr: self.end_addr,
113            total_pages: self.total_pages,
114            zone_id: self.zone_id,
115        }
116    }
117
118    /// Initialize the buddy set with a memory region
119    pub fn init(&mut self, pool: &mut GlobalNodePool, base_addr: usize, size: usize) {
120        let aligned_base = base_addr & !(PAGE_SIZE - 1);
121        let end = base_addr + size;
122        let aligned_end = (end + PAGE_SIZE - 1) & !(PAGE_SIZE - 1);
123        let aligned_size = aligned_end - aligned_base;
124
125        if aligned_size == 0 || aligned_size < PAGE_SIZE {
126            panic!("Aligned size is too small: {:#x}", aligned_size);
127        }
128
129        self.base_addr = aligned_base;
130        self.end_addr = aligned_end;
131        self.total_pages = aligned_size / PAGE_SIZE;
132
133        // Reset free lists
134        for list in &mut self.free_lists {
135            list.clear(pool);
136        }
137
138        self.init_free_blocks(pool);
139    }
140
141    fn init_free_blocks(&mut self, pool: &mut GlobalNodePool) {
142        let mut remaining_pages = self.total_pages;
143        let mut current_addr = self.base_addr;
144
145        while remaining_pages > 0 {
146            // Find the maximum order constrained by:
147            // 1. Remaining pages (can't allocate more than what's left)
148            // 2. Address alignment (block address must be aligned to block_size)
149            // 3. Maximum supported order
150
151            // Maximum order based on remaining pages
152            let max_order_by_pages = if remaining_pages.is_power_of_two() {
153                remaining_pages.trailing_zeros() as usize
154            } else {
155                // For non-power-of-2, use the highest bit position
156                (remaining_pages.next_power_of_two() >> 1).trailing_zeros() as usize
157            };
158
159            // Maximum order based on address alignment
160            // For each order, check if current_addr is aligned to (2^order * PAGE_SIZE)
161            let mut max_order_by_alignment = 0;
162            for test_order in 0..=self.max_order() {
163                let block_size = (1 << test_order) * PAGE_SIZE;
164                if current_addr % block_size == 0 {
165                    max_order_by_alignment = test_order;
166                } else {
167                    break;
168                }
169            }
170
171            // Take the minimum of all constraints
172            let order = max_order_by_pages
173                .min(max_order_by_alignment)
174                .min(self.max_order());
175
176            let block_pages = 1 << order;
177            let block_size = block_pages * PAGE_SIZE;
178
179            // Verify alignment: address must be exact multiple of block_size
180            assert!(
181                current_addr & (block_size - 1) == 0,
182                "Block address {current_addr:#x} not aligned to block size {block_size:#x} (order {order})"
183            );
184
185            // Construct and add the block
186            let block = BuddyBlock {
187                order,
188                addr: current_addr,
189            };
190
191            if !self.add_block_to_order(pool, order, block) {
192                error!(
193                    "zone {}: Failed to add block during fast init: addr={:#x}, order={}, remaining_pages={}",
194                    self.zone_id, current_addr, order, remaining_pages
195                );
196                // Critical failure during initialization
197                panic!("Failed to initialize buddy system");
198            }
199
200            current_addr += block_size;
201            remaining_pages -= block_pages;
202        }
203    }
204
205    /// Allocate pages using buddy system
206    pub fn alloc_pages(
207        &mut self,
208        pool: &mut GlobalNodePool,
209        num_pages: usize,
210        alignment: usize,
211    ) -> AllocResult<usize> {
212        if num_pages == 0 {
213            return Err(AllocError::InvalidParam);
214        }
215
216        // Find the required order (round up to next power of 2)
217        let required_order = if num_pages.is_power_of_two() {
218            num_pages.trailing_zeros() as usize
219        } else {
220            num_pages.next_power_of_two().trailing_zeros() as usize
221        };
222
223        if required_order > self.max_order() {
224            error!(
225                "required order: {}, max order: {}",
226                required_order,
227                self.max_order()
228            );
229            return Err(AllocError::NoMemory);
230        }
231
232        let align_pages = alignment.div_ceil(PAGE_SIZE);
233        let align_order = align_pages.trailing_zeros() as usize;
234        let order_needed = required_order.max(align_order);
235
236        // Try to find a block of the required order or higher
237        for order in order_needed..=self.max_order() {
238            if !self.free_lists[order].is_empty() {
239                let mut block = self.free_lists[order].pop_front(pool).unwrap();
240
241                // Split down to required order
242                while block.order > order_needed {
243                    block.order -= 1;
244                    let split_size = (1 << block.order) * PAGE_SIZE;
245                    let buddy_addr = block.addr + split_size;
246
247                    // Push the second half back to free list (sorted!)
248                    let success = self.add_block_to_order(
249                        pool,
250                        block.order,
251                        BuddyBlock {
252                            order: block.order,
253                            addr: buddy_addr,
254                        },
255                    );
256                    if !success {
257                        warn!(
258                            "Failed to push buddy block to free list during split at order {}",
259                            block.order
260                        );
261                        // Put the original block back
262                        self.add_block_to_order(pool, block.order + 1, block);
263                        return Err(AllocError::NoMemory);
264                    }
265                }
266
267                // Verify alignment requirement
268                assert!(
269                    block.addr % alignment == 0,
270                    "Allocated address {:#x} is not aligned to {:#x} bytes ",
271                    block.addr,
272                    alignment
273                );
274
275                return Ok(block.addr);
276            }
277        }
278
279        Err(AllocError::NoMemory)
280    }
281
282    /// Deallocate pages back to buddy system with automatic merging
283    pub fn dealloc_pages(&mut self, pool: &mut GlobalNodePool, addr: usize, num_pages: usize) {
284        if num_pages == 0 {
285            warn!("zone {}: Trying to deallocate 0 pages", self.zone_id);
286            return;
287        }
288
289        // Validate address belongs to this zone
290        if !self.addr_in_zone(addr) {
291            error!(
292                "zone {}: Address {:#x} not in zone [{:#x}, {:#x})",
293                self.zone_id, addr, self.base_addr, self.end_addr
294            );
295            return;
296        }
297
298        // Calculate order for this deallocation
299        // Handle non-power-of-2 allocations by rounding up (same as alloc_pages)
300        let mut order = if num_pages.is_power_of_two() {
301            num_pages.trailing_zeros() as usize
302        } else {
303            num_pages.next_power_of_two().trailing_zeros() as usize
304        };
305
306        if order > DEFAULT_MAX_ORDER {
307            error!(
308                "zone {}: Order {} exceeds maximum supported order {}",
309                self.zone_id, order, DEFAULT_MAX_ORDER
310            );
311            return;
312        }
313
314        // Convert address and pages to PFN (Page Frame Number)
315        let pfn = addr / PAGE_SIZE;
316
317        // Check alignment using PFN
318        if pfn & ((1 << order) - 1) != 0 {
319            error!(
320                "zone {}: Page PFN {} is not properly aligned for order {} (needs alignment to {} pages)",
321                self.zone_id, pfn, order, 1 << order
322            );
323            return;
324        }
325
326        // Check page alignment
327        if addr & (PAGE_SIZE - 1) != 0 {
328            error!(
329                "zone {}: Attempt to free page at non-page-aligned address {:#x}",
330                self.zone_id, addr
331            );
332            return;
333        }
334
335        // Integrated double-free detection and buddy merging
336        let initial_order = order;
337        let size = (1 << initial_order) * PAGE_SIZE;
338
339        // 1. Descendant check: Check if any part of the block being freed is already free
340        // This handles cases where a large block is freed but contains already free sub-blocks
341        for i in 0..initial_order {
342            if self.free_lists[i].has_block_in_range(pool, addr, addr + size) {
343                warn!(
344                    "zone {}: Double free (descendant) detected at order {} in range [{:#x}, {:#x})",
345                    self.zone_id, i, addr, addr + size
346                );
347                return;
348            }
349        }
350
351        // 2. Ancestor check and Merging loop
352        // We check from initial_order up to max_order.
353        // For each i, we check if the current aligned base is in free_lists[i].
354        let mut current_addr = addr;
355        let mut merging = true;
356
357        for i in initial_order..=self.max_order() {
358            let block_size = (1 << i) * PAGE_SIZE;
359            let current_base = current_addr & !(block_size - 1);
360
361            // Double-free detection (Ancestor check): Check if this block or its parent is already free
362            if self.find_block_in_order(pool, i, current_base).is_some() {
363                warn!(
364                    "zone {}: Double free detected at addr {:#x} (found at order {})",
365                    self.zone_id, addr, i
366                );
367                return;
368            }
369
370            // Buddy merging
371            if merging && i < self.max_order() {
372                let buddy_addr = current_base ^ block_size;
373                if self.addr_in_zone(buddy_addr) {
374                    if let Some((node_idx, prev_idx)) =
375                        self.find_block_in_order(pool, i, buddy_addr)
376                    {
377                        // Buddy found, remove it and continue merging at next order
378                        self.free_lists[i].remove_with_prev(pool, node_idx, prev_idx);
379                        current_addr = current_base & buddy_addr;
380                        order = i + 1;
381                        continue;
382                    }
383                }
384                // No buddy found or out of zone, stop merging but continue checking for double frees
385                merging = false;
386            }
387        }
388
389        // Add the final merged block to the appropriate free list (sorted!)
390        let final_addr = current_addr;
391        let block = BuddyBlock {
392            order,
393            addr: final_addr,
394        };
395
396        if !self.add_block_to_order(pool, order, block) {
397            error!(
398                "zone {}: Failed to push block to free list: addr={:#x}, order={}",
399                self.zone_id, final_addr, order
400            );
401        }
402    }
403
404    /// Get statistics for this zone
405    #[cfg(feature = "tracking")]
406    pub fn get_stats(&self) -> super::stats::BuddyStats {
407        let mut stats = super::stats::BuddyStats::new();
408        stats.total_pages = self.total_pages;
409
410        for order in 0..=DEFAULT_MAX_ORDER {
411            let block_count = self.free_lists[order].len();
412            stats.free_pages_by_order[order] = block_count;
413            stats.free_pages += block_count * (1 << order);
414        }
415
416        stats.used_pages = stats.total_pages.saturating_sub(stats.free_pages);
417        stats
418    }
419
420    /// Get free blocks of a specific order as an iterator
421    pub fn get_free_blocks_by_order<'a>(
422        &'a self,
423        pool: &'a GlobalNodePool,
424        order: u32,
425    ) -> super::pooled_list::PooledListIter<'a> {
426        self.free_lists[order as usize].iter(pool)
427    }
428
429    /// Get the number of blocks in a specific order
430    pub fn get_order_block_count(&self, order: usize) -> usize {
431        if order <= DEFAULT_MAX_ORDER {
432            self.free_lists[order].len()
433        } else {
434            0
435        }
436    }
437
438    /// Allocate pages at a specific address
439    ///
440    /// This method allocates memory at a specific address. If the address range
441    /// is completely free, it will be allocated. If part of a larger free block,
442    /// the block will be split appropriately.
443    pub fn alloc_pages_at(
444        &mut self,
445        pool: &mut GlobalNodePool,
446        base: usize,
447        num_pages: usize,
448        alignment: usize,
449    ) -> AllocResult<usize> {
450        if num_pages == 0 {
451            return Err(AllocError::InvalidParam);
452        }
453
454        // Check if address belongs to this zone
455        if !self.addr_in_zone(base) {
456            error!(
457                "zone {}: Address {:#x} not in zone [{:#x}, {:#x})",
458                self.zone_id, base, self.base_addr, self.end_addr
459            );
460            return Err(AllocError::InvalidParam);
461        }
462
463        // Check page alignment
464        if base & (PAGE_SIZE - 1) != 0 {
465            error!(
466                "zone {}: Address {:#x} is not page-aligned",
467                self.zone_id, base
468            );
469            return Err(AllocError::InvalidParam);
470        }
471
472        // Check alignment requirement
473        if base % alignment != 0 {
474            error!(
475                "zone {}: Address {:#x} is not aligned to {:#x}",
476                self.zone_id, base, alignment
477            );
478            return Err(AllocError::InvalidParam);
479        }
480
481        // Check if range fits in zone
482        let size = num_pages * PAGE_SIZE;
483        if base + size > self.end_addr {
484            error!(
485                "zone {}: Allocation range [{:#x}, {:#x}) exceeds zone end {:#x}",
486                self.zone_id,
487                base,
488                base + size,
489                self.end_addr
490            );
491            return Err(AllocError::InvalidParam);
492        }
493
494        // Calculate required order (round up to next power of 2 if needed)
495        let required_order = if num_pages.is_power_of_two() {
496            num_pages.trailing_zeros() as usize
497        } else {
498            num_pages.next_power_of_two().trailing_zeros() as usize
499        };
500
501        // Calculate the order for the block that contains this address
502        // The block must be aligned to its size
503        let pfn = base / PAGE_SIZE;
504        let aligned_pfn = pfn & !((1 << required_order) - 1);
505
506        // Check if the requested base is properly aligned for its size
507        if aligned_pfn != pfn {
508            error!(
509                "zone {}: Base address {:#x} (PFN {}) is not aligned for {} pages",
510                self.zone_id,
511                base,
512                pfn,
513                1 << required_order
514            );
515            return Err(AllocError::InvalidParam);
516        }
517
518        // Try to find a free block that contains this address
519        // Start from the required order and go up to larger blocks
520        for order in required_order..=self.max_order() {
521            let block_pfn = pfn & !((1 << order) - 1);
522            let block_addr = block_pfn * PAGE_SIZE;
523
524            // Check if this order can contain the request
525            if let Some((node_idx, prev_idx)) = self.find_block_in_order(pool, order, block_addr) {
526                // Verify the block is indeed in the free list and capture its data
527                let node_data = {
528                    let node = pool.get_node(node_idx).unwrap();
529                    if node.data.order != order || node.data.addr != block_addr {
530                        continue;
531                    }
532                    node.data
533                };
534
535                // Remove this block from free list using prev_idx for O(1) deletion
536                self.free_lists[order].remove_with_prev(pool, node_idx, prev_idx);
537
538                // Now we have a larger block, need to split it
539                // to keep only the part that covers [base, base + size)
540                let mut current_block = BuddyBlock {
541                    order,
542                    addr: block_addr,
543                };
544
545                // Split down to required order, keeping the requested region
546                while current_block.order > required_order {
547                    current_block.order -= 1;
548                    let split_size = (1 << current_block.order) * PAGE_SIZE;
549
550                    // Calculate buddy address
551                    let buddy_addr = current_block.addr + split_size;
552
553                    // Check if buddy is part of the requested region
554                    let request_end = base + size;
555
556                    if buddy_addr < base || buddy_addr >= request_end {
557                        // Buddy is outside the requested region, add it back to free list
558                        let success = self.add_block_to_order(
559                            pool,
560                            current_block.order,
561                            BuddyBlock {
562                                order: current_block.order,
563                                addr: buddy_addr,
564                            },
565                        );
566                        if !success {
567                            error!(
568                                "zone {}: Failed to return buddy block during split",
569                                self.zone_id
570                            );
571                            // Put the original block back and fail
572                            self.add_block_to_order(pool, order, node_data);
573                            return Err(AllocError::NoMemory);
574                        }
575                    }
576                    // If buddy is inside the requested region, keep it (don't add back)
577                }
578
579                // Verify we ended up with the correct block
580                assert!(
581                    current_block.addr == base,
582                    "zone {}: Final block address {:#x} doesn't match requested {:#x}",
583                    self.zone_id,
584                    current_block.addr,
585                    base
586                );
587                assert!(
588                    current_block.order == required_order,
589                    "zone {}: Final block order {} doesn't match required {}",
590                    self.zone_id,
591                    current_block.order,
592                    required_order
593                );
594
595                return Ok(base);
596            }
597        }
598
599        // No free block found that contains the requested address
600        error!(
601            "zone {}: No free block found for address {:#x} ({} pages)",
602            self.zone_id, base, num_pages
603        );
604        Err(AllocError::NoMemory)
605    }
606}
607
608impl Default for BuddySet {
609    fn default() -> Self {
610        Self::empty()
611    }
612}