1#![no_std]
4#![deny(warnings, unstable_features, missing_docs)]
5
6mod bitmap;
8mod linked_list;
9
10pub use bitmap::UsizeBuddy;
11pub use linked_list::LinkedListBuddy;
12
13use core::{alloc::Layout, fmt, num::NonZeroUsize, ptr::NonNull};
14
15pub trait BuddyLine {
17 const EMPTY: Self;
19
20 const INTRUSIVE_META_SIZE: usize = 0;
22
23 #[inline]
25 fn init(&mut self, _order: usize, _base: usize) {}
26
27 #[inline]
29 fn take(&mut self, _idx: usize) -> bool {
30 unimplemented!()
31 }
32}
33
34pub trait OligarchyCollection: BuddyLine {
36 fn take_any(&mut self, align_order: usize, count: usize) -> Option<usize>;
40
41 fn put(&mut self, idx: usize);
43}
44
45pub trait BuddyCollection: BuddyLine {
47 fn take_any(&mut self, align_order: usize) -> Option<usize>;
51
52 fn put(&mut self, idx: usize) -> Option<usize>;
57}
58
59#[derive(Clone, Copy, PartialEq, Eq, Debug)]
61#[repr(transparent)]
62pub struct BuddyError;
63
64pub struct BuddyAllocator<const N: usize, O: OligarchyCollection, B: BuddyCollection> {
66 oligarchy: O,
68
69 buddies: [B; N],
71
72 min_order: usize,
76
77 free: usize,
79
80 capacity: usize,
82}
83
84impl<const N: usize, O: OligarchyCollection, B: BuddyCollection> BuddyAllocator<N, O, B> {
85 const MAX_LAYER: usize = N;
87 const O_MIN_ORDER: usize = O::INTRUSIVE_META_SIZE.next_power_of_two().trailing_zeros() as _;
89 const B_MIN_ORDER: usize = B::INTRUSIVE_META_SIZE.next_power_of_two().trailing_zeros() as _;
91
92 #[inline]
94 pub const fn new() -> Self {
95 Self {
96 oligarchy: O::EMPTY,
97 buddies: [B::EMPTY; N],
98 min_order: 0,
99 free: 0,
100 capacity: 0,
101 }
102 }
103
104 #[inline]
106 pub fn capacity(&self) -> usize {
107 self.capacity
108 }
109
110 #[inline]
112 pub fn free(&self) -> usize {
113 self.free
114 }
115
116 #[inline]
118 const fn max_order(&self) -> usize {
119 self.min_order + Self::MAX_LAYER
120 }
121
122 #[inline]
126 pub fn init<T>(&mut self, min_order: usize, base: NonNull<T>) {
127 assert_eq!(
128 0, self.capacity,
129 "init is not allowed after any transfering"
130 );
131
132 self.min_order = min_order;
133 let max_order = self.max_order();
134
135 assert!(Self::O_MIN_ORDER <= max_order);
136 assert!(Self::B_MIN_ORDER <= min_order);
137
138 let base = base.as_ptr() as usize;
139 self.buddies.iter_mut().enumerate().for_each(|(i, c)| {
140 let o = self.min_order + i;
141 c.init(o, base >> o)
142 });
143 self.oligarchy.init(max_order, base >> max_order);
144 }
145
146 #[inline]
155 pub unsafe fn transfer<T>(&mut self, ptr: NonNull<T>, size: usize) {
156 self.capacity += size;
157 self.deallocate(ptr, size)
158 }
159
160 #[inline]
162 pub fn snatch<T>(
163 &mut self,
164 align_order: usize,
165 size: NonZeroUsize,
166 ) -> Result<(NonNull<T>, usize), BuddyError> {
167 let ans = self.allocate(align_order, size);
168 if let Ok((_, size)) = ans {
169 self.capacity -= size;
170 }
171 ans
172 }
173
174 #[inline]
176 pub fn allocate_type<T>(&mut self) -> Result<(NonNull<T>, usize), BuddyError> {
177 self.allocate_layout(Layout::new::<T>())
178 }
179
180 #[inline]
182 pub fn allocate_layout<T>(
183 &mut self,
184 layout: Layout,
185 ) -> Result<(NonNull<T>, usize), BuddyError> {
186 #[inline]
187 const fn allocated<T, U>(ptr: *mut T, size: usize) -> (NonNull<U>, usize) {
188 (unsafe { NonNull::new_unchecked(ptr) }.cast(), size)
189 }
190
191 if let Some(size) = NonZeroUsize::new(layout.size()) {
192 self.allocate(layout.align().trailing_zeros() as _, size)
193 } else {
194 Ok(allocated(self, 0))
195 }
196 }
197
198 pub fn allocate<T>(
202 &mut self,
203 align_order: usize,
204 size: NonZeroUsize,
205 ) -> Result<(NonNull<T>, usize), BuddyError> {
206 let max_order = self.max_order();
207 #[inline]
208 const fn allocated<T, U>(ptr: *mut T, size: usize) -> (NonNull<U>, usize) {
209 (unsafe { NonNull::new_unchecked(ptr) }.cast(), size)
210 }
211
212 let page_mask = (1usize << self.min_order) - 1;
214 let ans_size = (size.get() + page_mask) & !page_mask;
215 let size_order = nonzero(ans_size.next_power_of_two()).trailing_zeros() as usize;
217 let (ptr, alloc_size) = if size_order >= max_order {
219 let count = ((ans_size >> (max_order - 1)) + 1) >> 1;
221 match self.oligarchy.take_any(align_order >> max_order, count) {
222 Some(idx) => (idx << max_order, count << max_order),
223 None => Err(BuddyError)?,
224 }
225 } else {
226 let layer0 = size_order - self.min_order;
228 let mut layer = layer0;
229 let mut idx = loop {
230 if layer == Self::MAX_LAYER {
232 match self.oligarchy.take_any(align_order >> max_order, 1) {
233 Some(idx) => break idx,
234 None => Err(BuddyError)?,
235 }
236 }
237 match self.buddies[layer].take_any(align_order >> (self.min_order + layer)) {
239 Some(idx) => break idx,
240 None => layer += 1,
241 }
242 };
243 assert!(self.buddies[layer0..layer].iter_mut().rev().all(|b| {
245 idx <<= 1;
246 b.put(idx + 1).is_none()
247 }));
248 (idx << size_order, 1 << size_order)
250 };
251 self.free -= alloc_size;
252 if alloc_size > ans_size {
254 self.deallocate(
255 unsafe { NonNull::new_unchecked((ptr + ans_size) as *mut u8) },
256 alloc_size - ans_size,
257 );
258 }
259 Ok(allocated(ptr as *mut (), ans_size))
260 }
261
262 pub unsafe fn deallocate_layout<T>(&mut self, ptr: NonNull<T>, layout: Layout) {
269 debug_assert!((1 << (ptr.as_ptr() as usize).trailing_zeros()) >= layout.align());
270
271 let mask = (1 << self.min_order) - 1;
272 self.deallocate(ptr, (layout.size() + mask) & !mask)
273 }
274
275 pub fn deallocate<T>(&mut self, ptr: NonNull<T>, size: usize) {
281 debug_assert!(
282 size.trailing_zeros() as usize >= self.min_order,
283 "size must align to minium order"
284 );
285
286 let max_order = self.max_order();
287
288 let mut ptr = ptr.as_ptr() as usize;
289 let end = ptr + size;
290 while ptr < end {
291 let len = nonzero(end - ptr);
293 let order_ptr = nonzero(ptr).trailing_zeros();
295 let order_len = usize::BITS - len.leading_zeros() - 1;
297 let order = order_ptr.min(order_len) as usize;
299 if order >= max_order {
301 let idx = ptr >> max_order;
303 let count = len.get() >> max_order;
305 ptr += count << max_order;
307 (idx..).take(count).for_each(|idx| self.oligarchy.put(idx));
309 } else {
310 let mut idx = ptr >> order;
312 ptr += 1 << order;
314 for layer in (order - self.min_order).. {
316 if layer == Self::MAX_LAYER {
318 self.oligarchy.put(idx);
319 break;
320 }
321 match self.buddies[layer].put(idx) {
323 Some(parent) => idx = parent,
324 None => break,
325 }
326 }
327 }
328 }
329 self.free += size;
330 assert!(
331 self.free <= self.capacity,
332 "something wrong with the free bytes, it is larger than the capacity: {} > {}",
333 self.free,
334 self.capacity
335 );
336 }
337}
338
339impl<const N: usize, O: OligarchyCollection + fmt::Debug, B: BuddyCollection + fmt::Debug>
340 fmt::Debug for BuddyAllocator<N, O, B>
341{
342 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
343 writeln!(f, "BuddyAllocator@{:#018x}", self as *const _ as usize)?;
344 writeln!(f, "---------------------------------")?;
345 for (i, line) in self.buddies.iter().enumerate() {
346 writeln!(f, "{:>2}> {line:?}", self.min_order + i)?;
347 }
348 writeln!(f, "{:>2}> {:?}", self.max_order(), self.oligarchy)
349 }
350}
351
352#[inline]
353const fn nonzero(val: usize) -> NonZeroUsize {
354 unsafe { NonZeroUsize::new_unchecked(val) }
355}
356
357struct Order(usize);
361
362impl Order {
363 #[inline]
364 const fn new(order: usize) -> Self {
365 Self(order)
366 }
367
368 #[inline]
369 unsafe fn idx_to_ptr<T>(&self, idx: usize) -> NonNull<T> {
370 NonNull::new_unchecked((idx << self.0) as *mut _)
371 }
372
373 #[inline]
374 fn ptr_to_idx<T>(&self, ptr: NonNull<T>) -> usize {
375 (ptr.as_ptr() as usize) >> self.0
376 }
377}