buddyalloc/
heap.rs

1//! A simple heap based on a buddy allocator.  For the theory of buddy
2//! allocators, see <https://en.wikipedia.org/wiki/Buddy_memory_allocation>
3//!
4//! The basic idea is that our heap size is a power of two, and the heap
5//! starts out as one giant free block.  When a memory allocation request
6//! is received, we round the requested size up to a power of two, and find
7//! the smallest available block we can use.  If the smallest free block is
8//! too big (more than twice as big as the memory we want to allocate), we
9//! split the smallest free block in half recursively until it's the right
10//! size.  This simplifies a lot of bookkeeping, because all our block
11//! sizes are a power of 2, which makes it easy to have one free list per
12//! block size.
13use core::alloc::Layout;
14use core::cmp::{max, min};
15use core::mem::size_of;
16use core::ptr::{self, NonNull};
17use core::result::Result;
18
19use crate::math::log2;
20
21const MIN_HEAP_ALIGN: usize = 4096;
22
23/// Represents an error for an allocation's size.
24#[derive(Clone, Copy, PartialEq, Eq, Debug)]
25pub enum AllocationSizeError {
26    BadAlignment,
27    TooLarge,
28}
29
30/// Represents the reason for an allocation error.
31#[derive(Clone, Copy, PartialEq, Eq, Debug)]
32pub enum AllocationError {
33    HeapExhausted,
34    InvalidSize(AllocationSizeError),
35}
36
37/// An error in the creation of the heap.
38#[derive(Clone, Copy, PartialEq, Eq, Debug)]
39pub enum HeapError {
40    BadBaseAlignment,
41    BadSizeAlignment,
42    BadHeapSize,
43    MinBlockTooSmall,
44}
45
46/// A free block in our heap.  This is actually a header that we store at
47/// the start of the block.  We don't store any size information in the
48/// header, because we allocate a separate free block list for each block
49/// size.
50struct FreeBlock {
51    /// The next block in the free list, or NULL if this is the final
52    /// block.
53    next: *mut FreeBlock,
54}
55
56impl FreeBlock {
57    /// Construct a `FreeBlock` header pointing at `next`.
58    const fn new(next: *mut FreeBlock) -> FreeBlock {
59        FreeBlock { next }
60    }
61}
62
63/// The interface to a heap.  This data structure is stored _outside_ the
64/// heap somewhere, typically in a static variable, because every single
65/// byte of our heap is potentially available for allocation.
66///
67/// The generic parameter N specifies the number of steps to divide the
68/// available heap size by two. This will be the minimum allocable block size.
69///
70/// # Usage
71/// ```no_run
72/// # use buddyalloc::Heap;
73/// # use core::{alloc::Layout, ptr::NonNull};
74/// // This can be a block of free system memory on your microcontroller.
75/// const HEAP_MEM: usize  = 0xFFF0_0000;
76/// const HEAP_SIZE: usize = 0x0008_0000;
77///
78/// let mut heap: Heap<16> = unsafe {
79///     Heap::new(NonNull::new(HEAP_MEM as *mut u8).unwrap(), HEAP_SIZE).unwrap()
80/// };
81/// let mem = heap.allocate(Layout::from_size_align(16, 16).unwrap()).unwrap();
82///
83/// // Yay! We have a 16-byte block of memory from the heap.
84/// ```
85///
86/// # Usage (static initialization)
87/// ```no_run
88/// # use buddyalloc::Heap;
89/// # use core::{alloc::Layout, ptr::NonNull};
90/// const HEAP_MEM: usize  = 0xFFF0_0000;
91/// const HEAP_SIZE: usize = 0x0008_0000;
92/// 
93/// // You'll want to wrap this heap in a lock abstraction for real-world use.
94/// static mut ALLOCATOR: Heap<16> = unsafe {
95///     Heap::new_unchecked(HEAP_MEM as *mut u8, HEAP_SIZE)
96/// };
97///
98/// pub fn some_func() {
99///   let mem = unsafe {
100///     ALLOCATOR.allocate(Layout::from_size_align(16, 16).unwrap()).unwrap()
101///   };
102///
103///   // Yay! We now have a 16-byte block from the heap without initializing it!
104/// }
105/// ```
106#[derive(Debug)]
107pub struct Heap<const N: usize> {
108    /// The base address of our heap.  This must be aligned on a
109    /// `MIN_HEAP_ALIGN` boundary.
110    heap_base: *mut u8,
111
112    /// The space available in our heap.  This must be a power of 2.
113    heap_size: usize,
114
115    /// The free lists for our heap.  The list at `free_lists[0]` contains
116    /// the smallest block size we can allocate, and the list at the end
117    /// can only contain a single free block the size of our entire heap,
118    /// and only when no memory is allocated.
119    free_lists: [*mut FreeBlock; N],
120
121    /// Our minimum block size.  This is calculated based on `heap_size`
122    /// and the generic parameter N, and it must be
123    /// big enough to contain a `FreeBlock` header object.
124    min_block_size: usize,
125
126    /// The log base 2 of our block size.  Cached here so we don't have to
127    /// recompute it on every allocation (but we haven't benchmarked the
128    /// performance gain).
129    min_block_size_log2: u8,
130}
131
132// This structure can safely be sent between threads.
133unsafe impl<const N: usize> Send for Heap<N> {}
134
135impl<const N: usize> Heap<N> {
136    /// Create a new heap. If any parameter is invalid, this will return a [HeapError].
137    pub unsafe fn new(heap_base: NonNull<u8>, heap_size: usize) -> Result<Self, HeapError> {
138        // Calculate our minimum block size based on the number of free
139        // lists we have available.
140        let min_block_size = heap_size >> (N - 1);
141
142        // The heap must be aligned on a 4K bounday.
143        if heap_base.as_ptr() as usize & (MIN_HEAP_ALIGN - 1) != 0 {
144            return Err(HeapError::BadBaseAlignment);
145        }
146
147        // The heap must be big enough to contain at least one block.
148        if heap_size < min_block_size {
149            return Err(HeapError::BadHeapSize);
150        }
151
152        // The smallest possible heap block must be big enough to contain
153        // the block header.
154        if min_block_size < size_of::<FreeBlock>() {
155            return Err(HeapError::MinBlockTooSmall);
156        }
157
158        // The heap size must be a power of 2.
159        if !heap_size.is_power_of_two() {
160            return Err(HeapError::BadSizeAlignment);
161        }
162
163        // We must have one free list per possible heap block size.
164        // FIXME: Can this assertion even be hit?
165        // assert_eq!(
166        //     min_block_size * (2u32.pow(N as u32 - 1)) as usize,
167        //     heap_size
168        // );
169
170        // assert!(N > 0);
171        Ok(Self::new_unchecked(heap_base.as_ptr(), heap_size))
172    }
173
174    /// Create a new heap without checking for parameter validity.
175    /// Useful for const heap creation.
176    ///
177    /// # Safety
178    /// `heap_base` must be aligned on a
179    /// `MIN_HEAP_ALIGN` boundary, `heap_size` must be a power of 2, and
180    /// `heap_size / 2.pow(free_lists.len()-1)` must be greater than or
181    /// equal to `size_of::<FreeBlock>()`.  Passing in invalid parameters
182    /// may do horrible things.
183    pub const unsafe fn new_unchecked(heap_base: *mut u8, heap_size: usize) -> Self {
184        // Calculate our minimum block size based on the number of free
185        // lists we have available.
186        let min_block_size = heap_size >> (N - 1);
187        let mut free_lists: [*mut FreeBlock; N] = [core::ptr::null_mut(); N];
188
189        // Insert the entire heap into the last free list.
190        // See the documentation for `free_lists` - the last entry contains
191        // the entire heap iff no memory is allocated.
192        free_lists[N - 1] = heap_base as *mut FreeBlock;
193
194        // Store all the info about our heap in our struct.
195        Self {
196            heap_base: heap_base,
197            heap_size,
198            free_lists,
199            min_block_size,
200            min_block_size_log2: log2(min_block_size),
201        }
202    }
203
204    /// Figure out what size block we'll need to fulfill an allocation
205    /// request.  This is deterministic, and it does not depend on what
206    /// we've already allocated.  In particular, it's important to be able
207    /// to calculate the same `allocation_size` when freeing memory as we
208    /// did when allocating it, or everything will break horribly.
209    fn allocation_size(&self, mut size: usize, align: usize) -> Result<usize, AllocationSizeError> {
210        // Sorry, we don't support weird alignments.
211        if !align.is_power_of_two() {
212            return Err(AllocationSizeError::BadAlignment);
213        }
214
215        // We can't align any more precisely than our heap base alignment
216        // without getting much too clever, so don't bother.
217        if align > MIN_HEAP_ALIGN {
218            return Err(AllocationSizeError::BadAlignment);
219        }
220
221        // We're automatically aligned to `size` because of how our heap is
222        // sub-divided, but if we need a larger alignment, we can only do
223        // it be allocating more memory.
224        if align > size {
225            size = align;
226        }
227
228        // We can't allocate blocks smaller than `min_block_size`.
229        size = max(size, self.min_block_size);
230
231        // Round up to the next power of two.
232        size = size.next_power_of_two();
233
234        // We can't allocate a block bigger than our heap.
235        if size > self.heap_size {
236            return Err(AllocationSizeError::TooLarge);
237        }
238
239        Ok(size)
240    }
241
242    /// The "order" of an allocation is how many times we need to double
243    /// `min_block_size` in order to get a large enough block, as well as
244    /// the index we use into `free_lists`.
245    fn allocation_order(&self, size: usize, align: usize) -> Result<usize, AllocationSizeError> {
246        self.allocation_size(size, align)
247            .map(|s| (log2(s) - self.min_block_size_log2) as usize)
248    }
249
250    /// The size of the blocks we allocate for a given order.
251    const fn order_size(&self, order: usize) -> usize {
252        1 << (self.min_block_size_log2 as usize + order)
253    }
254
255    /// Pop a block off the appropriate free list.
256    fn free_list_pop(&mut self, order: usize) -> Option<*mut u8> {
257        let candidate = self.free_lists[order];
258        if !candidate.is_null() {
259            // N.B: If this is the entry corresponding to the entire heap,
260            // the next entry is always going to be NULL. Special-case it here
261            // to allow for uninitialized initial data.
262            if order != self.free_lists.len() - 1 {
263                self.free_lists[order] = unsafe { (*candidate).next };
264            } else {
265                self.free_lists[order] = ptr::null_mut();
266            }
267
268            Some(candidate as *mut u8)
269        } else {
270            None
271        }
272    }
273
274    /// Insert `block` of order `order` onto the appropriate free list.
275    unsafe fn free_list_insert(&mut self, order: usize, block: *mut u8) {
276        let free_block_ptr = block as *mut FreeBlock;
277        *free_block_ptr = FreeBlock::new(self.free_lists[order]);
278        self.free_lists[order] = free_block_ptr;
279    }
280
281    /// Attempt to remove a block from our free list, returning true
282    /// success, and false if the block wasn't on our free list.  This is
283    /// the slowest part of a primitive buddy allocator, because it runs in
284    /// O(log N) time where N is the number of blocks of a given size.
285    ///
286    /// We could perhaps improve this by keeping our free lists sorted,
287    /// because then "nursery generation" allocations would probably tend
288    /// to occur at lower addresses and then be faster to find / rule out
289    /// finding.
290    fn free_list_remove(&mut self, order: usize, block: *mut u8) -> bool {
291        let block_ptr = block as *mut FreeBlock;
292
293        // Yuck, list traversals are gross without recursion.  Here,
294        // `*checking` is the pointer we want to check, and `checking` is
295        // the memory location we found it at, which we'll need if we want
296        // to replace the value `*checking` with a new value.
297        let mut checking: &mut *mut FreeBlock = &mut self.free_lists[order];
298
299        // Loop until we run out of free blocks.
300        while !(*checking).is_null() {
301            // Is this the pointer we want to remove from the free list?
302            if *checking == block_ptr {
303                // Yup, this is the one, so overwrite the value we used to
304                // get here with the next one in the sequence.
305                *checking = unsafe { (*(*checking)).next };
306                return true;
307            }
308
309            // Haven't found it yet, so point `checking` at the address
310            // containing our `next` field.  (Once again, this is so we'll
311            // be able to reach back and overwrite it later if necessary.)
312            checking = unsafe { &mut ((*(*checking)).next) };
313        }
314        false
315    }
316
317    /// Split a `block` of order `order` down into a block of order
318    /// `order_needed`, placing any unused chunks on the free list.
319    ///
320    /// # Safety
321    /// The block must be owned by this heap, otherwise bad things
322    /// will happen.
323    unsafe fn split_free_block(&mut self, block: *mut u8, mut order: usize, order_needed: usize) {
324        // Get the size of our starting block.
325        let mut size_to_split = self.order_size(order);
326
327        // Progressively cut our block down to size.
328        while order > order_needed {
329            // Update our loop counters to describe a block half the size.
330            size_to_split >>= 1;
331            order -= 1;
332
333            // Insert the "upper half" of the block into the free list.
334            let split = block.add(size_to_split);
335            self.free_list_insert(order, split);
336        }
337    }
338
339    /// Given a `block` with the specified `order`, find the "buddy" block,
340    /// that is, the other half of the block we originally split it from,
341    /// and also the block we could potentially merge it with.
342    fn buddy(&self, order: usize, block: *mut u8) -> Option<*mut u8> {
343        assert!(block >= self.heap_base);
344
345        let relative = unsafe { block.offset_from(self.heap_base) } as usize;
346        let size = self.order_size(order);
347        if size >= self.heap_size {
348            // The main heap itself does not have a budy.
349            None
350        } else {
351            // Fun: We can find our buddy by xoring the right bit in our
352            // offset from the base of the heap.
353            Some(unsafe { self.heap_base.add(relative ^ size) })
354        }
355    }
356
357    /// Allocate a block of memory large enough to contain `layout`,
358    /// and aligned to `layout`.  This will return an [`AllocationError`]
359    /// if the alignment is greater than `MIN_HEAP_ALIGN`, or if
360    /// we can't find enough memory.
361    ///
362    /// All allocated memory must be passed to `deallocate` with the same
363    /// `layout` parameter, or else horrible things will happen.
364    pub fn allocate(&mut self, layout: Layout) -> Result<*mut u8, AllocationError> {
365        // Figure out which order block we need.
366        match self.allocation_order(layout.size(), layout.align()) {
367            Ok(order_needed) => {
368                // Start with the smallest acceptable block size, and search
369                // upwards until we reach blocks the size of the entire heap.
370                for order in order_needed..self.free_lists.len() {
371                    // Do we have a block of this size?
372                    if let Some(block) = self.free_list_pop(order) {
373                        // If the block is too big, break it up.  This leaves
374                        // the address unchanged, because we always allocate at
375                        // the head of a block.
376                        if order > order_needed {
377                            // SAFETY: The block came from the heap.
378                            unsafe { self.split_free_block(block, order, order_needed) };
379                        }
380
381                        // We have an allocation, so quit now.
382                        return Ok(block);
383                    }
384                }
385
386                // We couldn't find a large enough block for this allocation.
387                Err(AllocationError::HeapExhausted)
388            }
389
390            // We can't allocate a block with the specified size and
391            // alignment.
392            Err(e) => Err(AllocationError::InvalidSize(e)),
393        }
394    }
395
396    /// Deallocate a block allocated using `allocate`.
397    ///
398    /// # Safety
399    /// `ptr` and `layout` must match what was passed to / returned from `allocate`,
400    /// or our heap will be corrupted.
401    pub unsafe fn deallocate(&mut self, ptr: *mut u8, layout: Layout) {
402        let initial_order = self
403            .allocation_order(layout.size(), layout.align())
404            .expect("Tried to dispose of invalid block");
405
406        // The fun part: When deallocating a block, we also want to check
407        // to see if its "buddy" is on the free list.  If the buddy block
408        // is also free, we merge them and continue walking up.
409        //
410        // `block` is the biggest merged block we have so far.
411        let mut block = ptr;
412        for order in initial_order..self.free_lists.len() {
413            // Would this block have a buddy?
414            if let Some(buddy) = self.buddy(order, block) {
415                // Is this block's buddy free?
416                if self.free_list_remove(order, buddy) {
417                    // Merge them!  The lower address of the two is the
418                    // newly-merged block.  Then we want to try again.
419                    block = min(block, buddy);
420                    continue;
421                }
422            }
423
424            // If we reach here, we didn't find a buddy block of this size,
425            // so take what we've got and mark it as free.
426            self.free_list_insert(order, block);
427            return;
428        }
429    }
430}
431
432#[cfg(test)]
433mod test {
434    // Use std in tests.
435    extern crate std;
436    use super::*;
437
438    #[test]
439    fn test_allocation_size_and_order() {
440        unsafe {
441            let heap_size = 256;
442            let layout = std::alloc::Layout::from_size_align(heap_size, 4096).unwrap();
443            let mem = std::alloc::alloc(layout);
444            let heap: Heap<5> = Heap::new(NonNull::new(mem).unwrap(), heap_size).unwrap();
445
446            // Can't align beyond MIN_HEAP_ALIGN.
447            assert_eq!(
448                Err(AllocationSizeError::BadAlignment),
449                heap.allocation_size(256, 8192)
450            );
451
452            // Can't align beyond heap_size.
453            assert_eq!(
454                Err(AllocationSizeError::TooLarge),
455                heap.allocation_size(256, 256 * 2)
456            );
457
458            // Simple allocations just round up to next block size.
459            assert_eq!(Ok(16), heap.allocation_size(0, 1));
460            assert_eq!(Ok(16), heap.allocation_size(1, 1));
461            assert_eq!(Ok(16), heap.allocation_size(16, 1));
462            assert_eq!(Ok(32), heap.allocation_size(17, 1));
463            assert_eq!(Ok(32), heap.allocation_size(32, 32));
464            assert_eq!(Ok(256), heap.allocation_size(256, 256));
465
466            // Aligned allocations use alignment as block size.
467            assert_eq!(Ok(64), heap.allocation_size(16, 64));
468
469            // Block orders.
470            assert_eq!(Ok(0), heap.allocation_order(0, 1));
471            assert_eq!(Ok(0), heap.allocation_order(1, 1));
472            assert_eq!(Ok(0), heap.allocation_order(16, 16));
473            assert_eq!(Ok(1), heap.allocation_order(32, 32));
474            assert_eq!(Ok(2), heap.allocation_order(64, 64));
475            assert_eq!(Ok(3), heap.allocation_order(128, 128));
476            assert_eq!(Ok(4), heap.allocation_order(256, 256));
477            assert_eq!(
478                Err(AllocationSizeError::TooLarge),
479                heap.allocation_order(512, 512)
480            );
481
482            std::alloc::dealloc(mem, layout);
483        }
484    }
485
486    #[test]
487    fn test_buddy() {
488        unsafe {
489            let heap_size = 256;
490            let layout = std::alloc::Layout::from_size_align(heap_size, 4096).unwrap();
491            let mem = std::alloc::alloc(layout);
492            let heap: Heap<5> = Heap::new(NonNull::new(mem).unwrap(), heap_size).unwrap();
493
494            let block_16_0 = mem;
495            let block_16_1 = mem.offset(16);
496            assert_eq!(Some(block_16_1), heap.buddy(0, block_16_0));
497            assert_eq!(Some(block_16_0), heap.buddy(0, block_16_1));
498
499            let block_32_0 = mem;
500            let block_32_1 = mem.offset(32);
501            assert_eq!(Some(block_32_1), heap.buddy(1, block_32_0));
502            assert_eq!(Some(block_32_0), heap.buddy(1, block_32_1));
503
504            let block_32_2 = mem.offset(64);
505            let block_32_3 = mem.offset(96);
506            assert_eq!(Some(block_32_3), heap.buddy(1, block_32_2));
507            assert_eq!(Some(block_32_2), heap.buddy(1, block_32_3));
508
509            let block_256_0 = mem;
510            assert_eq!(None, heap.buddy(4, block_256_0));
511
512            std::alloc::dealloc(mem, layout);
513        }
514    }
515
516    #[test]
517    fn test_alloc_and_dealloc() {
518        unsafe {
519            let heap_size = 256;
520            let layout = std::alloc::Layout::from_size_align(heap_size, 4096).unwrap();
521            let mem = std::alloc::alloc(layout);
522            let mut heap: Heap<5> = Heap::new(NonNull::new(mem).unwrap(), heap_size).unwrap();
523
524            let block_16_0 = heap
525                .allocate(Layout::from_size_align(8, 8).unwrap())
526                .unwrap();
527            assert_eq!(mem, block_16_0);
528
529            let bigger_than_heap = heap.allocate(Layout::from_size_align(heap_size, 4096).unwrap());
530            assert_eq!(
531                Err(AllocationError::InvalidSize(AllocationSizeError::TooLarge)),
532                bigger_than_heap
533            );
534
535            let bigger_than_free =
536                heap.allocate(Layout::from_size_align(heap_size, heap_size).unwrap());
537            assert_eq!(Err(AllocationError::HeapExhausted), bigger_than_free);
538
539            let block_16_1 = heap
540                .allocate(Layout::from_size_align(8, 8).unwrap())
541                .unwrap();
542            assert_eq!(mem.offset(16), block_16_1);
543
544            let block_16_2 = heap
545                .allocate(Layout::from_size_align(8, 8).unwrap())
546                .unwrap();
547            assert_eq!(mem.offset(32), block_16_2);
548
549            let block_32_2 = heap
550                .allocate(Layout::from_size_align(32, 32).unwrap())
551                .unwrap();
552            assert_eq!(mem.offset(64), block_32_2);
553
554            let block_16_3 = heap
555                .allocate(Layout::from_size_align(8, 8).unwrap())
556                .unwrap();
557            assert_eq!(mem.offset(48), block_16_3);
558
559            let block_128_1 = heap
560                .allocate(Layout::from_size_align(128, 128).unwrap())
561                .unwrap();
562            assert_eq!(mem.offset(128), block_128_1);
563
564            let too_fragmented = heap.allocate(Layout::from_size_align(64, 64).unwrap());
565            assert_eq!(Err(AllocationError::HeapExhausted), too_fragmented);
566
567            heap.deallocate(block_32_2, Layout::from_size_align(32, 32).unwrap());
568            heap.deallocate(block_16_0, Layout::from_size_align(8, 8).unwrap());
569            heap.deallocate(block_16_3, Layout::from_size_align(8, 8).unwrap());
570            heap.deallocate(block_16_1, Layout::from_size_align(8, 8).unwrap());
571            heap.deallocate(block_16_2, Layout::from_size_align(8, 8).unwrap());
572
573            let block_128_0 = heap
574                .allocate(Layout::from_size_align(128, 128).unwrap())
575                .unwrap();
576            assert_eq!(mem.offset(0), block_128_0);
577
578            heap.deallocate(block_128_1, Layout::from_size_align(128, 128).unwrap());
579            heap.deallocate(block_128_0, Layout::from_size_align(128, 128).unwrap());
580
581            // And allocate the whole heap, just to make sure everything
582            // got cleaned up correctly.
583            let block_256_0 = heap
584                .allocate(Layout::from_size_align(256, 256).unwrap())
585                .unwrap();
586            assert_eq!(mem.offset(0), block_256_0);
587
588            std::alloc::dealloc(mem, layout);
589        }
590    }
591}