alloc_buddy_simple/heap.rs
1//! A simple heap based on a buddy allocator. For the theory of buddy
2//! allocators, see https://en.wikipedia.org/wiki/Buddy_memory_allocation
3//!
4//! The basic idea is that our heap size is a power of two, and the heap
5//! starts out as one giant free block. When a memory allocation request
6//! is received, we round the requested size up to a power of two, and find
7//! the smallest available block we can use. If the smallest free block is
8//! too big (more than twice as big as the memory we want to allocate), we
9//! split the smallest free block in half recursively until it's the right
10//! size. This simplifies a lot of bookkeeping, because all our block
11//! sizes are a power of 2, which makes it easy to have one free list per
12//! block size.
13
14use core::cmp::{max, min};
15use core::mem::size_of;
16use core::ptr;
17
18use math::PowersOf2;
19
20const MIN_HEAP_ALIGN: usize = 4096;
21
22/// A free block in our heap. This is actually a header that we store at
23/// the start of the block. We don't store any size information in the
24/// header, because we a separate free block list for each block size.
25pub struct FreeBlock {
26 /// The next block in the free list, or NULL if this is the final
27 /// block.
28 next: *mut FreeBlock,
29}
30
31impl FreeBlock {
32 /// Construct a `FreeBlock` header pointing at `next`.
33 fn new(next: *mut FreeBlock) -> FreeBlock {
34 FreeBlock { next: next }
35 }
36}
37
38/// The interface to a heap. This data structure is stored _outside_ the
39/// heap somewhere, because every single byte of our heap is potentially
40/// available for allocation.
41pub struct Heap<'a> {
42 /// The base address of our heap. This must be aligned on a
43 /// `MIN_HEAP_ALIGN` boundary.
44 heap_base: *mut u8,
45
46 /// The space available in our heap. This must be a power of 2.
47 heap_size: usize,
48
49 /// The free lists for our heap. The list at `free_lists[0]` contains
50 /// the smallest block size we can allocate, and the list at the end
51 /// can only contain a single free block the size of our entire heap,
52 /// and only when no memory is allocated.
53 free_lists: &'a mut [*mut FreeBlock],
54
55 /// Our minimum block size. This is calculated based on `heap_size`
56 /// and the length of the provided `free_lists` array, and it must be
57 /// big enough to contain a `FreeBlock` header object.
58 min_block_size: usize,
59
60 /// The log base 2 of our block size. Cached here so we don't have to
61 /// recompute it on every allocation (but we haven't benchmarked the
62 /// performance gain).
63 min_block_size_log2: u8,
64}
65
66// A Heap struct is the sole owner of the memory it manages
67unsafe impl<'a> Send for Heap<'a> {}
68
69impl<'a> Heap<'a> {
70 /// Create a new heap. `heap_base` must be aligned on a
71 /// `MIN_HEAP_ALIGN` boundary, `heap_size` must be a power of 2, and
72 /// `heap_size / 2.pow(free_lists.len()-1)` must be greater than or
73 /// equal to `size_of::<FreeBlock>()`. Passing in invalid parameters
74 /// may do horrible things.
75 pub unsafe fn new(
76 heap_base: *mut u8,
77 heap_size: usize,
78 free_lists: &mut [*mut FreeBlock])
79 -> Heap
80 {
81 // The heap base must not be null.
82 assert!(heap_base != ptr::null_mut());
83
84 // We must have at least one free list.
85 assert!(free_lists.len() > 0);
86
87 // Calculate our minimum block size based on the number of free
88 // lists we have available.
89 let min_block_size = heap_size >> (free_lists.len()-1);
90
91 // The heap must be aligned on a 4K bounday.
92 assert_eq!(heap_base as usize & (MIN_HEAP_ALIGN-1), 0);
93
94 // The heap must be big enough to contain at least one block.
95 assert!(heap_size >= min_block_size);
96
97 // The smallest possible heap block must be big enough to contain
98 // the block header.
99 assert!(min_block_size >= size_of::<FreeBlock>());
100
101 // The heap size must be a power of 2. See:
102 // http://graphics.stanford.edu/~seander/bithacks.html#DetermineIfPowerOf2
103 assert!(heap_size.is_power_of_2());
104
105 // We must have one free list per possible heap block size.
106 assert_eq!(min_block_size *
107 (2u32.pow(free_lists.len() as u32 - 1)) as usize,
108 heap_size);
109
110 // Zero out our free list pointers.
111 for ptr in free_lists.iter_mut() {
112 *ptr = ptr::null_mut();
113 }
114
115 // Store all the info about our heap in our struct.
116 let mut result = Heap {
117 heap_base: heap_base,
118 heap_size: heap_size,
119 free_lists: free_lists,
120 min_block_size: min_block_size,
121 min_block_size_log2: min_block_size.log2(),
122 };
123
124 // Insert the entire heap onto the appropriate free list as a
125 // single block.
126 let order = result.allocation_order(heap_size, 1)
127 .expect("Failed to calculate order for root heap block");
128 result.free_list_insert(order, heap_base);
129
130 // Return our newly-created heap.
131 result
132 }
133
134 /// Figure out what size block we'll need to fulfill an allocation
135 /// request. This is deterministic, and it does not depend on what
136 /// we've already allocated. In particular, it's important to be able
137 /// to calculate the same `allocation_size` when freeing memory as we
138 /// did when allocating it, or everything will break horribly.
139 pub fn allocation_size(&self, mut size: usize, align: usize) -> Option<usize> {
140 // Sorry, we don't support weird alignments.
141 if !align.is_power_of_2() { return None; }
142
143 // We can't align any more precisely than our heap base alignment
144 // without getting much too clever, so don't bother.
145 if align > MIN_HEAP_ALIGN { return None; }
146
147 // We're automatically aligned to `size` because of how our heap is
148 // sub-divided, but if we need a larger alignment, we can only do
149 // it be allocating more memory.
150 if align > size { size = align; }
151
152 // We can't allocate blocks smaller than `min_block_size`.
153 size = max(size, self.min_block_size);
154
155 // Round up to the next power of two.
156 size = size.next_power_of_2();
157
158 // We can't allocate a block bigger than our heap.
159 if size > self.heap_size { return None; }
160
161 Some(size)
162 }
163
164 /// The "order" of an allocation is how many times we need to double
165 /// `min_block_size` in order to get a large enough block, as well as
166 /// the index we use into `free_lists`.
167 pub fn allocation_order(&self, size: usize, align: usize) -> Option<usize> {
168 self.allocation_size(size, align).map(|s| {
169 (s.log2() - self.min_block_size_log2) as usize
170 })
171 }
172
173 /// The size of the blocks we allocate for a given order.
174 fn order_size(&self, order: usize) -> usize {
175 1 << (self.min_block_size_log2 as usize + order)
176 }
177
178 /// Pop a block off the appropriate free list.
179 unsafe fn free_list_pop(&mut self, order: usize) -> Option<*mut u8> {
180 let candidate = self.free_lists[order];
181 if candidate != ptr::null_mut() {
182 self.free_lists[order] = (*candidate).next;
183 Some(candidate as *mut u8)
184 } else {
185 None
186 }
187 }
188
189 /// Insert `block` of order `order` onto the appropriate free list.
190 unsafe fn free_list_insert(&mut self, order: usize, block: *mut u8) {
191 let free_block_ptr = block as *mut FreeBlock;
192 *free_block_ptr = FreeBlock::new(self.free_lists[order]);
193 self.free_lists[order] = free_block_ptr;
194 }
195
196 /// Attempt to remove a block from our free list, returning true
197 /// success, and false if the block wasn't on our free list. This is
198 /// the slowest part of a primitive buddy allocator, because it runs in
199 /// O(log N) time where N is the number of blocks of a given size.
200 ///
201 /// We could perhaps improve this by keeping our free lists sorted,
202 /// because then "nursery generation" allocations would probably tend
203 /// to occur at lower addresses and then be faster to find / rule out
204 /// finding.
205 unsafe fn free_list_remove(
206 &mut self, order: usize, block: *mut u8)
207 -> bool
208 {
209 let block_ptr = block as *mut FreeBlock;
210
211 // Yuck, list traversals are gross without recursion. Here,
212 // `*checking` is the pointer we want to check, and `checking` is
213 // the memory location we found it at, which we'll need if we want
214 // to replace the value `*checking` with a new value.
215 let mut checking: *mut *mut FreeBlock = &mut self.free_lists[order];
216
217 // Loop until we run out of free blocks.
218 while *checking != ptr::null_mut() {
219 // Is this the pointer we want to remove from the free list?
220 if *checking == block_ptr {
221 // Yup, this is the one, so overwrite the value we used to
222 // get here with the next one in the sequence.
223 *checking = (*(*checking)).next;
224 return true;
225 }
226
227 // Haven't found it yet, so point `checking` at the address
228 // containing our `next` field. (Once again, this is so we'll
229 // be able to reach back and overwrite it later if necessary.)
230 checking = &mut ((*(*checking)).next);
231 }
232 false
233 }
234
235 /// Split a `block` of order `order` down into a block of order
236 /// `order_needed`, placing any unused chunks on the free list.
237 unsafe fn split_free_block(
238 &mut self, block: *mut u8, mut order: usize, order_needed: usize)
239 {
240 // Get the size of our starting block.
241 let mut size_to_split = self.order_size(order);
242
243 // Progressively cut our block down to size.
244 while order > order_needed {
245 // Update our loop counters to describe a block half the size.
246 size_to_split >>= 1;
247 order -= 1;
248
249 // Insert the "upper half" of the block into the free list.
250 let split = block.offset(size_to_split as isize);
251 self.free_list_insert(order, split);
252 }
253 }
254
255 /// Allocate a block of memory large enough to contain `size` bytes,
256 /// and aligned on `align`. This will return NULL if the `align` is
257 /// greater than `MIN_HEAP_ALIGN`, if `align` is not a power of 2, or
258 /// if we can't find enough memory.
259 ///
260 /// All allocated memory must be passed to `deallocate` with the same
261 /// `size` and `align` parameter, or else horrible things will happen.
262 pub unsafe fn allocate(&mut self, size: usize, align: usize) -> *mut u8
263 {
264 // Figure out which order block we need.
265 if let Some(order_needed) = self.allocation_order(size, align) {
266
267 // Start with the smallest acceptable block size, and search
268 // upwards until we reach blocks the size of the entire heap.
269 for order in order_needed..self.free_lists.len() {
270
271 // Do we have a block of this size?
272 if let Some(block) = self.free_list_pop(order) {
273
274 // If the block is too big, break it up. This leaves
275 // the address unchanged, because we always allocate at
276 // the head of a block.
277 if order > order_needed {
278 self.split_free_block(block, order, order_needed);
279 }
280
281 // We have an allocation, so quit now.
282 return block;
283 }
284 }
285
286 // We couldn't find a large enough block for this allocation.
287 ptr::null_mut()
288 } else {
289 // We can't allocate a block with the specified size and
290 // alignment.
291 ptr::null_mut()
292 }
293 }
294
295 /// Given a `block` with the specified `order`, find the "buddy" block,
296 /// that is, the other half of the block we originally split it from,
297 /// and also the block we could potentially merge it with.
298 pub unsafe fn buddy(&self, order: usize, block: *mut u8) -> Option<*mut u8> {
299 let relative = (block as usize) - (self.heap_base as usize);
300 let size = self.order_size(order);
301 if size >= self.heap_size {
302 // The main heap itself does not have a budy.
303 None
304 } else {
305 // Fun: We can find our buddy by xoring the right bit in our
306 // offset from the base of the heap.
307 Some(self.heap_base.offset((relative ^ size) as isize))
308 }
309 }
310
311 /// Deallocate a block allocated using `allocate`. Note that the
312 /// `old_size` and `align` values must match the values passed to
313 /// `allocate`, or our heap will be corrupted.
314 pub unsafe fn deallocate(
315 &mut self, ptr: *mut u8, old_size: usize, align: usize)
316 {
317 let initial_order = self.allocation_order(old_size, align)
318 .expect("Tried to dispose of invalid block");
319
320 // The fun part: When deallocating a block, we also want to check
321 // to see if its "buddy" is on the free list. If the buddy block
322 // is also free, we merge them and continue walking up.
323 //
324 // `block` is the biggest merged block we have so far.
325 let mut block = ptr;
326 for order in initial_order..self.free_lists.len() {
327 // Would this block have a buddy?
328 if let Some(buddy) = self.buddy(order, block) {
329 // Is this block's buddy free?
330 if self.free_list_remove(order, buddy) {
331 // Merge them! The lower address of the two is the
332 // newly-merged block. Then we want to try again.
333 block = min(block, buddy);
334 continue;
335 }
336 }
337
338 // If we reach here, we didn't find a buddy block of this size,
339 // so take what we've got and mark it as free.
340 self.free_list_insert(order, block);
341 return;
342 }
343 }
344}
345
346#[cfg(test)]
347mod test {
348 use super::*;
349
350 use core::ptr;
351
352 extern "C" {
353 /// We need this to allocate aligned memory for our heap.
354 fn memalign(alignment: usize, size: usize) -> *mut u8;
355
356 // Release our memory.
357 fn free(ptr: *mut u8);
358 }
359
360 #[test]
361 fn test_allocation_size_and_order() {
362 unsafe {
363 let heap_size = 256;
364 let mem = memalign(4096, heap_size);
365 let mut free_lists: [*mut FreeBlock; 5] = [0 as *mut _; 5];
366 let heap = Heap::new(mem, heap_size, &mut free_lists);
367
368 // TEST NEEDED: Can't align beyond MIN_HEAP_ALIGN.
369
370 // Can't align beyond heap_size.
371 assert_eq!(None, heap.allocation_size(256, 256*2));
372
373 // Simple allocations just round up to next block size.
374 assert_eq!(Some(16), heap.allocation_size(0, 1));
375 assert_eq!(Some(16), heap.allocation_size(1, 1));
376 assert_eq!(Some(16), heap.allocation_size(16, 1));
377 assert_eq!(Some(32), heap.allocation_size(17, 1));
378 assert_eq!(Some(32), heap.allocation_size(32, 32));
379 assert_eq!(Some(256), heap.allocation_size(256, 256));
380
381 // Aligned allocations use alignment as block size.
382 assert_eq!(Some(64), heap.allocation_size(16, 64));
383
384 // Block orders.
385 assert_eq!(Some(0), heap.allocation_order(0, 1));
386 assert_eq!(Some(0), heap.allocation_order(1, 1));
387 assert_eq!(Some(0), heap.allocation_order(16, 16));
388 assert_eq!(Some(1), heap.allocation_order(32, 32));
389 assert_eq!(Some(2), heap.allocation_order(64, 64));
390 assert_eq!(Some(3), heap.allocation_order(128, 128));
391 assert_eq!(Some(4), heap.allocation_order(256, 256));
392 assert_eq!(None, heap.allocation_order(512, 512));
393
394 free(mem);
395 }
396 }
397
398 #[test]
399 fn test_buddy() {
400 unsafe {
401 let heap_size = 256;
402 let mem = memalign(4096, heap_size);
403 let mut free_lists: [*mut FreeBlock; 5] = [0 as *mut _; 5];
404 let heap = Heap::new(mem, heap_size, &mut free_lists);
405
406 let block_16_0 = mem;
407 let block_16_1 = mem.offset(16);
408 assert_eq!(Some(block_16_1), heap.buddy(0, block_16_0));
409 assert_eq!(Some(block_16_0), heap.buddy(0, block_16_1));
410
411 let block_32_0 = mem;
412 let block_32_1 = mem.offset(32);
413 assert_eq!(Some(block_32_1), heap.buddy(1, block_32_0));
414 assert_eq!(Some(block_32_0), heap.buddy(1, block_32_1));
415
416 let block_32_2 = mem.offset(64);
417 let block_32_3 = mem.offset(96);
418 assert_eq!(Some(block_32_3), heap.buddy(1, block_32_2));
419 assert_eq!(Some(block_32_2), heap.buddy(1, block_32_3));
420
421 let block_256_0 = mem;
422 assert_eq!(None, heap.buddy(4, block_256_0));
423
424 free(mem);
425 }
426 }
427
428 #[test]
429 fn test_alloc_and_dealloc() {
430 unsafe {
431 let heap_size = 256;
432 let mem = memalign(4096, heap_size);
433 let mut free_lists: [*mut FreeBlock; 5] = [0 as *mut _; 5];
434 let mut heap = Heap::new(mem, heap_size, &mut free_lists);
435
436 let block_16_0 = heap.allocate(8, 8);
437 assert_eq!(mem, block_16_0);
438
439 let bigger_than_heap = heap.allocate(4096, heap_size);
440 assert_eq!(ptr::null_mut(), bigger_than_heap);
441
442 let bigger_than_free = heap.allocate(heap_size, heap_size);
443 assert_eq!(ptr::null_mut(), bigger_than_free);
444
445 let block_16_1 = heap.allocate(8, 8);
446 assert_eq!(mem.offset(16), block_16_1);
447
448 let block_16_2 = heap.allocate(8, 8);
449 assert_eq!(mem.offset(32), block_16_2);
450
451 let block_32_2 = heap.allocate(32, 32);
452 assert_eq!(mem.offset(64), block_32_2);
453
454 let block_16_3 = heap.allocate(8, 8);
455 assert_eq!(mem.offset(48), block_16_3);
456
457 let block_128_1 = heap.allocate(128, 128);
458 assert_eq!(mem.offset(128), block_128_1);
459
460 let too_fragmented = heap.allocate(64, 64);
461 assert_eq!(ptr::null_mut(), too_fragmented);
462
463 heap.deallocate(block_32_2, 32, 32);
464 heap.deallocate(block_16_0, 8, 8);
465 heap.deallocate(block_16_3, 8, 8);
466 heap.deallocate(block_16_1, 8, 8);
467 heap.deallocate(block_16_2, 8, 8);
468
469 let block_128_0 = heap.allocate(128, 128);
470 assert_eq!(mem.offset(0), block_128_0);
471
472 heap.deallocate(block_128_1, 128, 128);
473 heap.deallocate(block_128_0, 128, 128);
474
475 // And allocate the whole heap, just to make sure everything
476 // got cleaned up correctly.
477 let block_256_0 = heap.allocate(256, 256);
478 assert_eq!(mem.offset(0), block_256_0);
479
480 free(mem);
481 }
482 }
483}