Skip to main content

buddy_slab_allocator/buddy/
mod.rs

1//! Buddy page allocator — page-metadata-based with intrusive free lists.
2//!
3//! The allocator manages one or more contiguous virtual address ranges ("sections").
4//! Each section stores its own [`BuddySection`] descriptor and [`PageMeta`] array
5//! in the caller-provided region prefix, enabling O(1) free-list operations
6//! without any dynamic allocation.
7
8pub mod page_meta;
9
10pub use page_meta::{PFN_NONE, PageFlags, PageMeta};
11
12use core::ptr;
13
14use crate::error::{AllocError, AllocResult};
15use crate::{align_up, eii, is_aligned};
16use page_meta::{free_list_push, free_list_remove};
17
18/// Maximum buddy order. With 4 KiB pages this gives 2^20 × 4 KiB = 4 GiB blocks.
19pub const MAX_ORDER: usize = 20;
20
21/// DMA32 zone upper bound (4 GiB physical).
22const DMA32_LIMIT: usize = 0x1_0000_0000;
23
24fn normalize_region(
25    region_start: usize,
26    region_size: usize,
27    granule: usize,
28) -> Option<(usize, usize)> {
29    if region_size == 0 || !granule.is_power_of_two() {
30        return None;
31    }
32    let region_end = region_start.checked_add(region_size)?;
33    let usable_start = align_up(region_start, granule);
34    let usable_end = region_end & !(granule - 1);
35    if usable_end <= usable_start {
36        return None;
37    }
38    Some((usable_start, usable_end - usable_start))
39}
40
41pub(crate) struct RegionLayout {
42    pub(crate) section_start: usize,
43    pub(crate) meta_start: usize,
44    pub(crate) managed_heap_start: usize,
45    pub(crate) managed_heap_size: usize,
46}
47
48pub(crate) struct SectionInitSpec {
49    pub(crate) region_start: usize,
50    pub(crate) region_size: usize,
51    pub(crate) section_ptr: *mut BuddySection,
52    pub(crate) meta_ptr: *mut u8,
53    pub(crate) meta_size: usize,
54    pub(crate) heap_start: usize,
55    pub(crate) heap_size: usize,
56}
57
58/// Public read-only summary of a managed section.
59#[derive(Debug, Clone, Copy, PartialEq, Eq)]
60pub struct ManagedSection {
61    pub start: usize,
62    pub size: usize,
63    pub free_pages: usize,
64    pub total_pages: usize,
65}
66
67/// Per-region buddy state stored in the region prefix.
68#[repr(C)]
69pub(crate) struct BuddySection {
70    pub(crate) next: *mut BuddySection,
71    pub(crate) region_start: usize,
72    pub(crate) region_size: usize,
73    pub(crate) meta: *mut PageMeta,
74    pub(crate) max_pages: usize,
75    pub(crate) heap_start: usize,
76    pub(crate) heap_size: usize,
77    pub(crate) free_lists: [u32; MAX_ORDER + 1],
78    pub(crate) free_pages: usize,
79    pub(crate) total_pages: usize,
80}
81
82impl BuddySection {
83    const fn metadata_align() -> usize {
84        let section_align = core::mem::align_of::<BuddySection>();
85        let meta_align = core::mem::align_of::<PageMeta>();
86        if section_align > meta_align {
87            section_align
88        } else {
89            meta_align
90        }
91    }
92
93    fn metadata_layout_for_pages(pages: usize) -> Option<(usize, usize)> {
94        let meta_offset = align_up(
95            core::mem::size_of::<BuddySection>(),
96            core::mem::align_of::<PageMeta>(),
97        );
98        let page_meta_size = pages.checked_mul(core::mem::size_of::<PageMeta>())?;
99        let meta_size = meta_offset.checked_add(page_meta_size)?;
100        Some((meta_offset, meta_size))
101    }
102
103    fn available_heap_pages<const PAGE_SIZE: usize>(
104        region_end: usize,
105        section_start: usize,
106        meta_size: usize,
107        heap_align: usize,
108    ) -> Option<usize> {
109        let managed_heap_start = align_up(section_start.checked_add(meta_size)?, heap_align);
110        if managed_heap_start > region_end {
111            return Some(0);
112        }
113        Some((region_end - managed_heap_start) / PAGE_SIZE)
114    }
115
116    fn can_manage_pages<const PAGE_SIZE: usize>(
117        region_end: usize,
118        section_start: usize,
119        pages: usize,
120        heap_align: usize,
121    ) -> bool {
122        let Some((_, meta_size)) = Self::metadata_layout_for_pages(pages) else {
123            return false;
124        };
125        let Some(available_pages) = Self::available_heap_pages::<PAGE_SIZE>(
126            region_end,
127            section_start,
128            meta_size,
129            heap_align,
130        ) else {
131            return false;
132        };
133        available_pages >= pages
134    }
135
136    pub(crate) fn compute_region_layout_with_heap_align<const PAGE_SIZE: usize>(
137        region_start: usize,
138        region_size: usize,
139        heap_align: usize,
140    ) -> Option<RegionLayout> {
141        if region_size == 0 || !PAGE_SIZE.is_power_of_two() || !heap_align.is_power_of_two() {
142            return None;
143        }
144
145        let region_end = region_start.checked_add(region_size)?;
146        let section_start = align_up(region_start, Self::metadata_align());
147        if section_start >= region_end {
148            return None;
149        }
150
151        let heap_search_start = align_up(
152            section_start.checked_add(core::mem::size_of::<BuddySection>())?,
153            PAGE_SIZE,
154        );
155        let max_pages = if heap_search_start >= region_end {
156            0
157        } else {
158            (region_end - heap_search_start) / PAGE_SIZE
159        };
160
161        let mut low = 0usize;
162        let mut high = max_pages;
163        while low < high {
164            let mid = low + (high - low).div_ceil(2);
165            if Self::can_manage_pages::<PAGE_SIZE>(region_end, section_start, mid, heap_align) {
166                low = mid;
167            } else {
168                high = mid - 1;
169            }
170        }
171
172        if low == 0 {
173            return None;
174        }
175
176        let (meta_offset, meta_size) = Self::metadata_layout_for_pages(low)?;
177        let meta_start = section_start.checked_add(meta_offset)?;
178        let managed_heap_start = align_up(section_start.checked_add(meta_size)?, heap_align);
179        let managed_heap_size = low.checked_mul(PAGE_SIZE)?;
180
181        Some(RegionLayout {
182            section_start,
183            meta_start,
184            managed_heap_start,
185            managed_heap_size,
186        })
187    }
188
189    fn compute_region_layout<const PAGE_SIZE: usize>(
190        region_start: usize,
191        region_size: usize,
192    ) -> Option<RegionLayout> {
193        Self::compute_region_layout_with_heap_align::<PAGE_SIZE>(
194            region_start,
195            region_size,
196            PAGE_SIZE,
197        )
198    }
199
200    unsafe fn init_at<const PAGE_SIZE: usize>(
201        section_ptr: *mut BuddySection,
202        region_start: usize,
203        region_size: usize,
204        meta_ptr: *mut u8,
205        meta_size: usize,
206        heap_start: usize,
207        heap_size: usize,
208    ) -> AllocResult {
209        unsafe {
210            if !PAGE_SIZE.is_power_of_two() {
211                return Err(AllocError::InvalidParam);
212            }
213            if !is_aligned(heap_start, PAGE_SIZE) || heap_size == 0 {
214                return Err(AllocError::InvalidParam);
215            }
216
217            let total_pages = heap_size / PAGE_SIZE;
218            let required = BuddyAllocator::<PAGE_SIZE>::required_meta_size(heap_size);
219            if meta_size < required {
220                return Err(AllocError::InvalidParam);
221            }
222
223            let meta = meta_ptr as *mut PageMeta;
224            for i in 0..total_pages {
225                meta.add(i).write(PageMeta::new());
226            }
227
228            section_ptr.write(BuddySection {
229                next: ptr::null_mut(),
230                region_start,
231                region_size,
232                meta,
233                max_pages: total_pages,
234                heap_start,
235                heap_size,
236                free_lists: [PFN_NONE; MAX_ORDER + 1],
237                free_pages: 0,
238                total_pages,
239            });
240
241            let section = &mut *section_ptr;
242            let mut pfn: usize = 0;
243            while pfn < total_pages {
244                let mut order = MAX_ORDER;
245                loop {
246                    let block_pages = 1usize << order;
247                    if block_pages <= total_pages - pfn && (pfn & (block_pages - 1)) == 0 {
248                        break;
249                    }
250                    if order == 0 {
251                        break;
252                    }
253                    order -= 1;
254                }
255                let block_pages = 1usize << order;
256                let m = &mut *section.meta.add(pfn);
257                m.flags = PageFlags::Free;
258                m.order = order as u8;
259                free_list_push(section.meta, &mut section.free_lists, pfn as u32, order);
260                section.free_pages += block_pages;
261                pfn += block_pages;
262            }
263
264            Ok(())
265        }
266    }
267
268    #[inline]
269    fn contains_heap_addr(&self, addr: usize) -> bool {
270        addr >= self.heap_start && addr < self.heap_start + self.heap_size
271    }
272
273    #[inline]
274    fn summary(&self) -> ManagedSection {
275        ManagedSection {
276            start: self.heap_start,
277            size: self.heap_size,
278            free_pages: self.free_pages,
279            total_pages: self.total_pages,
280        }
281    }
282}
283
284/// Page-metadata-based buddy allocator.
285///
286/// `PAGE_SIZE` must be a power of two (commonly 0x1000 = 4 KiB).
287pub struct BuddyAllocator<const PAGE_SIZE: usize = 0x1000> {
288    sections_head: *mut BuddySection,
289    sections_tail: *mut BuddySection,
290    section_count: usize,
291}
292
293// SAFETY: The allocator is designed to be wrapped in a SpinMutex.
294// All section pointers point into caller-provided regions whose lifetime is managed externally.
295unsafe impl<const PAGE_SIZE: usize> Send for BuddyAllocator<PAGE_SIZE> {}
296
297impl<const PAGE_SIZE: usize> BuddyAllocator<PAGE_SIZE> {
298    /// Calculate the metadata-region size (in bytes) required for `heap_size` bytes.
299    pub const fn required_meta_size(heap_size: usize) -> usize {
300        let pages = heap_size / PAGE_SIZE;
301        pages * core::mem::size_of::<PageMeta>()
302    }
303
304    /// Create an uninitialised allocator. Call [`init`](Self::init) before use.
305    pub const fn new() -> Self {
306        Self {
307            sections_head: ptr::null_mut(),
308            sections_tail: ptr::null_mut(),
309            section_count: 0,
310        }
311    }
312}
313
314impl<const PAGE_SIZE: usize> Default for BuddyAllocator<PAGE_SIZE> {
315    fn default() -> Self {
316        Self::new()
317    }
318}
319
320impl<const PAGE_SIZE: usize> BuddyAllocator<PAGE_SIZE> {
321    pub(crate) fn reset(&mut self) {
322        self.sections_head = ptr::null_mut();
323        self.sections_tail = ptr::null_mut();
324        self.section_count = 0;
325    }
326
327    /// Initialise the allocator over the first section.
328    ///
329    /// # Safety
330    /// - `region` must be writable and remain valid for the lifetime of this allocator.
331    /// - Bytes consumed by metadata become unavailable for allocation.
332    pub unsafe fn init(&mut self, region: &mut [u8]) -> AllocResult {
333        unsafe {
334            self.reset();
335            self.add_region(region)
336        }
337    }
338
339    /// Add a new managed region after initialisation.
340    ///
341    /// # Safety
342    /// - `region` must be writable and remain valid for the lifetime of this allocator.
343    /// - The region must not overlap any existing managed region.
344    pub unsafe fn add_region(&mut self, region: &mut [u8]) -> AllocResult {
345        unsafe {
346            let region_start = region.as_mut_ptr() as usize;
347            let region_size = region.len();
348            let (region_start, region_size) =
349                normalize_region(region_start, region_size, PAGE_SIZE)
350                    .ok_or(AllocError::InvalidParam)?;
351            let layout =
352                BuddySection::compute_region_layout::<PAGE_SIZE>(region_start, region_size)
353                    .ok_or(AllocError::InvalidParam)?;
354            self.add_region_raw(SectionInitSpec {
355                region_start,
356                region_size,
357                section_ptr: layout.section_start as *mut BuddySection,
358                meta_ptr: layout.meta_start as *mut u8,
359                meta_size: Self::required_meta_size(layout.managed_heap_size),
360                heap_start: layout.managed_heap_start,
361                heap_size: layout.managed_heap_size,
362            })
363        }
364    }
365
366    pub(crate) unsafe fn add_region_raw(&mut self, spec: SectionInitSpec) -> AllocResult {
367        unsafe {
368            let region_size = spec.region_size;
369            let region_end = spec
370                .region_start
371                .checked_add(region_size)
372                .ok_or(AllocError::InvalidParam)?;
373            let heap_end = spec
374                .heap_start
375                .checked_add(spec.heap_size)
376                .ok_or(AllocError::InvalidParam)?;
377            if heap_end > region_end {
378                return Err(AllocError::InvalidParam);
379            }
380
381            let mut section = self.sections_head;
382            while !section.is_null() {
383                let existing = &*section;
384                let existing_end = existing
385                    .region_start
386                    .checked_add(existing.region_size)
387                    .ok_or(AllocError::InvalidParam)?;
388                if spec.region_start < existing_end && existing.region_start < region_end {
389                    return Err(AllocError::MemoryOverlap);
390                }
391                section = existing.next;
392            }
393
394            BuddySection::init_at::<PAGE_SIZE>(
395                spec.section_ptr,
396                spec.region_start,
397                spec.region_size,
398                spec.meta_ptr,
399                spec.meta_size,
400                spec.heap_start,
401                spec.heap_size,
402            )?;
403
404            if self.sections_head.is_null() {
405                self.sections_head = spec.section_ptr;
406            } else {
407                (*self.sections_tail).next = spec.section_ptr;
408            }
409            self.sections_tail = spec.section_ptr;
410            self.section_count += 1;
411
412            log::debug!(
413                "BuddyAllocator: add section region {:#x}+{:#x}, heap {:#x}..{:#x}, {} pages",
414                spec.region_start,
415                spec.region_size,
416                spec.heap_start,
417                heap_end,
418                spec.heap_size / PAGE_SIZE,
419            );
420
421            Ok(())
422        }
423    }
424
425    /// Number of managed sections.
426    pub fn section_count(&self) -> usize {
427        self.section_count
428    }
429
430    /// Read-only summary for a managed section by registration order.
431    pub fn section(&self, index: usize) -> Option<ManagedSection> {
432        let mut current = self.sections_head;
433        let mut i = 0usize;
434        while !current.is_null() {
435            if i == index {
436                return Some(unsafe { (&*current).summary() });
437            }
438            current = unsafe { (*current).next };
439            i += 1;
440        }
441        None
442    }
443
444    /// Total number of pages managed across all sections.
445    pub fn total_pages(&self) -> usize {
446        let mut total = 0usize;
447        let mut current = self.sections_head;
448        while !current.is_null() {
449            total += unsafe { (*current).total_pages };
450            current = unsafe { (*current).next };
451        }
452        total
453    }
454
455    /// Total managed heap bytes across all sections.
456    ///
457    /// This counts only bytes in allocatable heaps, excluding region-prefix metadata.
458    pub fn managed_bytes(&self) -> usize {
459        let mut total = 0usize;
460        let mut current = self.sections_head;
461        while !current.is_null() {
462            total += unsafe { (*current).heap_size };
463            current = unsafe { (*current).next };
464        }
465        total
466    }
467
468    /// Number of currently free pages across all sections.
469    pub fn free_pages(&self) -> usize {
470        let mut total = 0usize;
471        let mut current = self.sections_head;
472        while !current.is_null() {
473            total += unsafe { (*current).free_pages };
474            current = unsafe { (*current).next };
475        }
476        total
477    }
478
479    /// Allocated backend bytes across all sections.
480    ///
481    /// This is computed as managed heap bytes minus currently free page bytes.
482    /// It reflects page-level occupancy, so it includes slab pages, alignment
483    /// amplification, and internal fragmentation.
484    pub fn allocated_bytes(&self) -> usize {
485        self.managed_bytes()
486            .saturating_sub(self.free_pages().saturating_mul(PAGE_SIZE))
487    }
488
489    /// Allocate `count` contiguous pages, returning the virtual address.
490    pub fn alloc_pages(&mut self, count: usize, align: usize) -> AllocResult<usize> {
491        if count == 0 {
492            return Err(AllocError::InvalidParam);
493        }
494        let align = if align == 0 { PAGE_SIZE } else { align };
495        if !align.is_power_of_two() || align < PAGE_SIZE {
496            return Err(AllocError::InvalidParam);
497        }
498
499        let order = count.next_power_of_two().trailing_zeros() as usize;
500        if order > MAX_ORDER {
501            return Err(AllocError::InvalidParam);
502        }
503
504        let mut section = self.sections_head;
505        while !section.is_null() {
506            if let Ok(addr) =
507                unsafe { Self::alloc_from_section_aligned(&mut *section, order, align) }
508            {
509                return Ok(addr);
510            }
511            section = unsafe { (*section).next };
512        }
513
514        Err(AllocError::NoMemory)
515    }
516
517    fn alloc_from_section_aligned(
518        section: &mut BuddySection,
519        order: usize,
520        align: usize,
521    ) -> AllocResult<usize> {
522        for search_order in order..=MAX_ORDER {
523            let mut pfn_u32 = section.free_lists[search_order];
524            while pfn_u32 != PFN_NONE {
525                let block_pfn = pfn_u32 as usize;
526                if let Some(target_pfn) = Self::find_aligned_pfn_in_block(
527                    section.heap_start,
528                    block_pfn,
529                    search_order,
530                    order,
531                    align,
532                ) {
533                    unsafe {
534                        free_list_remove(
535                            section.meta,
536                            &mut section.free_lists,
537                            pfn_u32,
538                            search_order,
539                        );
540                    }
541
542                    let mut current_order = search_order;
543                    let mut current_pfn = block_pfn;
544                    while current_order > order {
545                        current_order -= 1;
546                        let left_pfn = current_pfn;
547                        let right_pfn = current_pfn + (1 << current_order);
548                        let (next_pfn, free_pfn) = if target_pfn >= right_pfn {
549                            (right_pfn, left_pfn)
550                        } else {
551                            (left_pfn, right_pfn)
552                        };
553                        unsafe {
554                            let bm = &mut *section.meta.add(free_pfn);
555                            bm.flags = PageFlags::Free;
556                            bm.order = current_order as u8;
557                            free_list_push(
558                                section.meta,
559                                &mut section.free_lists,
560                                free_pfn as u32,
561                                current_order,
562                            );
563                        }
564                        current_pfn = next_pfn;
565                    }
566
567                    unsafe {
568                        let m = &mut *section.meta.add(current_pfn);
569                        m.flags = PageFlags::Allocated;
570                        m.order = order as u8;
571                    }
572
573                    section.free_pages -= 1 << order;
574                    return Ok(section.heap_start + current_pfn * PAGE_SIZE);
575                }
576                pfn_u32 = unsafe { (*section.meta.add(pfn_u32 as usize)).next };
577            }
578        }
579
580        Err(AllocError::NoMemory)
581    }
582
583    fn find_aligned_pfn_in_block(
584        heap_start: usize,
585        block_pfn: usize,
586        block_order: usize,
587        alloc_order: usize,
588        align: usize,
589    ) -> Option<usize> {
590        let subblock_pages = 1usize << alloc_order;
591        let align_pages = align / PAGE_SIZE;
592        let heap_page_offset = (heap_start / PAGE_SIZE) & (align_pages - 1);
593        let offset = (align_pages - heap_page_offset) & (align_pages - 1);
594
595        let candidate = if align_pages <= subblock_pages {
596            if !heap_start.is_multiple_of(align) {
597                return None;
598            }
599            block_pfn
600        } else {
601            if !offset.is_multiple_of(subblock_pages) {
602                return None;
603            }
604            let rem = block_pfn & (align_pages - 1);
605            let delta = (offset + align_pages - rem) & (align_pages - 1);
606            block_pfn + delta
607        };
608
609        let block_pages = 1usize << block_order;
610        let last_start = block_pfn + block_pages - subblock_pages;
611        (candidate <= last_start).then_some(candidate)
612    }
613
614    /// Allocate pages whose *physical* address is below 4 GiB (DMA32 zone).
615    pub fn alloc_pages_lowmem(&mut self, count: usize, align: usize) -> AllocResult<usize> {
616        if count == 0 {
617            return Err(AllocError::InvalidParam);
618        }
619        let align = if align == 0 { PAGE_SIZE } else { align };
620        if !align.is_power_of_two() || align < PAGE_SIZE {
621            return Err(AllocError::InvalidParam);
622        }
623
624        let order = count.next_power_of_two().trailing_zeros() as usize;
625        if order > MAX_ORDER {
626            return Err(AllocError::InvalidParam);
627        }
628
629        let mut section = self.sections_head;
630        while !section.is_null() {
631            if let Ok(addr) =
632                unsafe { Self::alloc_lowmem_from_section(&mut *section, order, align) }
633            {
634                return Ok(addr);
635            }
636            section = unsafe { (*section).next };
637        }
638
639        Err(AllocError::NoMemory)
640    }
641
642    fn alloc_lowmem_from_section(
643        section: &mut BuddySection,
644        alloc_order: usize,
645        align: usize,
646    ) -> AllocResult<usize> {
647        for search_order in alloc_order..=MAX_ORDER {
648            let mut pfn_u32 = section.free_lists[search_order];
649            while pfn_u32 != PFN_NONE {
650                let block_pfn = pfn_u32 as usize;
651                let Some(target_pfn) = Self::find_aligned_pfn_in_block(
652                    section.heap_start,
653                    block_pfn,
654                    search_order,
655                    alloc_order,
656                    align,
657                ) else {
658                    pfn_u32 = unsafe { (*section.meta.add(pfn_u32 as usize)).next };
659                    continue;
660                };
661                let addr = section.heap_start + target_pfn * PAGE_SIZE;
662                let phys = eii::virt_to_phys(addr);
663                let block_bytes = (1usize << alloc_order) * PAGE_SIZE;
664                if phys + block_bytes <= DMA32_LIMIT && addr.is_multiple_of(align) {
665                    unsafe {
666                        free_list_remove(
667                            section.meta,
668                            &mut section.free_lists,
669                            pfn_u32,
670                            search_order,
671                        );
672                    }
673
674                    let mut current_order = search_order;
675                    let mut current_pfn = block_pfn;
676                    while current_order > alloc_order {
677                        current_order -= 1;
678                        let left_pfn = current_pfn;
679                        let right_pfn = current_pfn + (1 << current_order);
680                        let (next_pfn, free_pfn) = if target_pfn >= right_pfn {
681                            (right_pfn, left_pfn)
682                        } else {
683                            (left_pfn, right_pfn)
684                        };
685                        unsafe {
686                            let bm = &mut *section.meta.add(free_pfn);
687                            bm.flags = PageFlags::Free;
688                            bm.order = current_order as u8;
689                            free_list_push(
690                                section.meta,
691                                &mut section.free_lists,
692                                free_pfn as u32,
693                                current_order,
694                            );
695                        }
696                        current_pfn = next_pfn;
697                    }
698
699                    unsafe {
700                        let m = &mut *section.meta.add(current_pfn);
701                        m.flags = PageFlags::Allocated;
702                        m.order = alloc_order as u8;
703                    }
704                    section.free_pages -= 1 << alloc_order;
705                    return Ok(addr);
706                }
707                pfn_u32 = unsafe { (*section.meta.add(pfn_u32 as usize)).next };
708            }
709        }
710
711        Err(AllocError::NoMemory)
712    }
713
714    /// Free pages previously obtained via [`alloc_pages`](Self::alloc_pages).
715    ///
716    /// `addr` must be the exact address returned by alloc. The allocator frees
717    /// the full block size recorded in page metadata, which may be larger than
718    /// `count` if the original allocation was rounded up for buddy order or alignment.
719    pub fn dealloc_pages(&mut self, addr: usize, count: usize) {
720        let Some(section) = self.find_section_by_addr_mut(addr) else {
721            debug_assert!(
722                false,
723                "dealloc_pages called with address outside all sections"
724            );
725            return;
726        };
727
728        debug_assert!(is_aligned(addr, PAGE_SIZE));
729        debug_assert!(count > 0);
730
731        let pfn = (addr - section.heap_start) / PAGE_SIZE;
732        debug_assert!(pfn < section.max_pages);
733        let stored = unsafe { &*section.meta.add(pfn) };
734        debug_assert!(
735            stored.flags == PageFlags::Allocated || stored.flags == PageFlags::Slab,
736            "dealloc_pages called on non-allocated block"
737        );
738
739        let expected_order = count.next_power_of_two().trailing_zeros() as usize;
740        let order = stored.order as usize;
741        debug_assert!(
742            expected_order <= order,
743            "dealloc_pages count implies larger order than the allocated block"
744        );
745        Self::dealloc_in_section(section, pfn, order);
746    }
747
748    /// Mark the page at `addr` with the given flags (used by slab to tag pages).
749    ///
750    /// # Safety
751    /// The caller must ensure `addr` is valid and properly allocated.
752    pub unsafe fn set_page_flags(&mut self, addr: usize, flags: PageFlags) -> AllocResult {
753        unsafe {
754            let section = self
755                .find_section_by_addr_mut(addr)
756                .ok_or(AllocError::NotFound)?;
757            let pfn = (addr - section.heap_start) / PAGE_SIZE;
758            (*section.meta.add(pfn)).flags = flags;
759            Ok(())
760        }
761    }
762
763    /// Read the flags of the page containing `addr`.
764    pub fn page_flags(&self, addr: usize) -> AllocResult<PageFlags> {
765        let section = self
766            .find_section_by_addr(addr)
767            .ok_or(AllocError::NotFound)?;
768        let pfn = (addr - section.heap_start) / PAGE_SIZE;
769        Ok(unsafe { (*section.meta.add(pfn)).flags })
770    }
771
772    fn dealloc_in_section(section: &mut BuddySection, mut pfn: usize, mut order: usize) {
773        let freed_pages = 1usize << order;
774
775        while order < MAX_ORDER {
776            let buddy_pfn = pfn ^ (1 << order);
777            if buddy_pfn >= section.max_pages {
778                break;
779            }
780            let buddy = unsafe { &*section.meta.add(buddy_pfn) };
781            if buddy.flags != PageFlags::Free || buddy.order as usize != order {
782                break;
783            }
784            unsafe {
785                free_list_remove(
786                    section.meta,
787                    &mut section.free_lists,
788                    buddy_pfn as u32,
789                    order,
790                );
791            }
792            pfn = pfn.min(buddy_pfn);
793            order += 1;
794        }
795
796        unsafe {
797            let m = &mut *section.meta.add(pfn);
798            m.flags = PageFlags::Free;
799            m.order = order as u8;
800            free_list_push(section.meta, &mut section.free_lists, pfn as u32, order);
801        }
802        section.free_pages += freed_pages;
803    }
804
805    fn find_section_by_addr(&self, addr: usize) -> Option<&BuddySection> {
806        let mut section = self.sections_head;
807        while !section.is_null() {
808            let current = unsafe { &*section };
809            if current.contains_heap_addr(addr) {
810                return Some(current);
811            }
812            section = current.next;
813        }
814        None
815    }
816
817    fn find_section_by_addr_mut(&mut self, addr: usize) -> Option<&mut BuddySection> {
818        let mut section = self.sections_head;
819        while !section.is_null() {
820            let current = unsafe { &mut *section };
821            if current.contains_heap_addr(addr) {
822                return Some(current);
823            }
824            section = current.next;
825        }
826        None
827    }
828}