1#![allow(clippy::needless_range_loop)]
8
9const OOM_MSG: &str = "requires more memory space to initialize BuddyAlloc";
10const LEAF_ALIGN_ERROR_MSG: &str = "leaf size must be align to 16 bytes";
11pub const MIN_LEAF_SIZE_ALIGN: usize = 16;
13
14pub const fn block_size(k: usize, leaf_size: usize) -> usize {
15 (1 << k) * leaf_size
16}
17
18const fn block_size_2base(k: usize, leaf2base: usize) -> usize {
19 (1 << k) << leaf2base
20}
21
22const fn nblock(k: usize, entries_size: usize) -> usize {
23 1 << (entries_size - k - 1)
24}
25
26const fn roundup(n: usize, sz2base: usize) -> usize {
27 (((n - 1) >> sz2base) + 1) << sz2base
28}
29
30fn log2(mut n: usize) -> usize {
31 let mut k = 0;
32 while n > 1 {
33 k += 1;
34 n >>= 1;
35 }
36 k
37}
38
39fn bit_isset(bit_array: *const u8, i: usize) -> bool {
40 unsafe {
41 let b = bit_array.add(i >> 3);
42 let m = 1 << (i % 8);
43 *b & m == m
44 }
45}
46
47fn bit_set(bit_array: *mut u8, i: usize) {
48 unsafe {
49 let b = bit_array.add(i >> 3);
50 let m = 1 << (i % 8);
51 *b |= m;
52 }
53}
54
55fn bit_clear(bit_array: *mut u8, i: usize) {
56 debug_assert!(bit_isset(bit_array, i));
57 unsafe {
58 let b = bit_array.add(i >> 3);
59 let m = 1 << (i % 8);
60 *b &= !m;
61 }
62}
63
64pub fn first_up_k(n: usize, leaf_size: usize) -> usize {
66 let mut k = 0;
67 let mut size = leaf_size;
68 while size < n {
69 k += 1;
70 size <<= 1;
71 }
72 k
73}
74
75struct Node {
76 next: *mut Node,
77 prev: *mut Node,
78}
79
80impl Node {
81 fn init(list: *mut Node) {
82 unsafe {
83 list.write(Node {
84 next: list,
85 prev: list,
86 });
87 }
88 }
89
90 fn remove(list: *mut Node) {
91 unsafe {
92 core::ptr::write_volatile(&mut (*(*list).prev).next, (*list).next);
95 core::ptr::write_volatile(&mut (*(*list).next).prev, (*list).prev);
96 }
97 }
98
99 fn pop(list: *mut Node) -> *mut Node {
100 debug_assert!(!Self::is_empty(list));
101 let n_list: *mut Node = unsafe { (*list).next };
102 Self::remove(n_list);
103 n_list
104 }
105
106 fn push(list: *mut Node, p: *mut u8) {
107 let p = p.cast::<Node>();
108 unsafe {
109 let n_list: Node = Node {
110 prev: list,
111 next: (*list).next,
112 };
113 p.write(n_list);
115 core::ptr::write_volatile(&mut (*(*list).next).prev, p);
118 core::ptr::write_volatile(&mut (*list).next, p);
119 }
120 }
121
122 fn is_empty(list: *const Node) -> bool {
123 unsafe { (*list).next as *const Node == list }
124 }
125}
126
127struct Entry {
128 free: *mut Node,
129 alloc: *mut u8,
131 split: *mut u8,
133}
134
135impl Default for Entry {
136 fn default() -> Self {
137 Entry {
138 free: core::ptr::null_mut(),
139 alloc: core::ptr::null_mut(),
140 split: core::ptr::null_mut(),
141 }
142 }
143}
144
145#[derive(Clone, Copy)]
146pub struct BuddyAllocParam {
147 base_addr: *const u8,
149 len: usize,
151 leaf_size: usize,
153 zero_filled: bool,
156}
157
158impl BuddyAllocParam {
159 pub const fn new(base_addr: *const u8, len: usize, leaf_size: usize) -> Self {
163 BuddyAllocParam {
164 base_addr,
165 len,
166 leaf_size,
167 zero_filled: false,
168 }
169 }
170
171 pub const fn new_with_zero_filled(base_addr: *const u8, len: usize, leaf_size: usize) -> Self {
173 BuddyAllocParam {
174 base_addr,
175 len,
176 leaf_size,
177 zero_filled: true,
178 }
179 }
180}
181
182pub struct BuddyAlloc {
183 base_addr: usize,
185 end_addr: usize,
187 unavailable: usize,
189 entries: *mut Entry,
190 entries_size: usize,
191 leaf2base: usize,
193}
194
195impl BuddyAlloc {
196 pub unsafe fn new(param: BuddyAllocParam) -> Self {
202 let BuddyAllocParam {
203 base_addr,
204 len,
205 leaf_size,
206 zero_filled,
207 } = param;
208 let mut base_addr = base_addr as usize;
209 let end_addr = base_addr + len;
210 assert!(
211 leaf_size % MIN_LEAF_SIZE_ALIGN == 0 && leaf_size != 0,
212 "{}",
213 LEAF_ALIGN_ERROR_MSG
214 );
215 let leaf2base = log2(leaf_size);
216 base_addr = roundup(base_addr, leaf2base);
217 let entries_size = log2((end_addr - base_addr) >> leaf2base) + 2;
221
222 let used_bytes = core::mem::size_of::<Entry>() * entries_size;
224 debug_assert!(end_addr >= base_addr + used_bytes, "{}", OOM_MSG);
225 let entries = base_addr as *mut Entry;
226 base_addr += used_bytes;
227
228 let buddy_list_size = core::mem::size_of::<Node>();
229 for k in 0..entries_size {
231 debug_assert!(end_addr >= base_addr + buddy_list_size, "{}", OOM_MSG);
233 let entry = entries.add(k).as_mut().expect("entry");
234 entry.free = base_addr as *mut Node;
235 if !zero_filled {
236 core::ptr::write_bytes(entry.free, 0, buddy_list_size);
237 }
238 Node::init(entry.free);
239 base_addr += buddy_list_size;
240 }
241
242 for k in 0..entries_size {
244 let used_bytes = roundup(nblock(k, entries_size), 3) >> 3;
247 debug_assert!(end_addr >= base_addr + used_bytes, "{}", OOM_MSG);
248 let entry = entries.add(k).as_mut().expect("entry");
249 entry.alloc = base_addr as *mut u8;
250 if !zero_filled {
252 core::ptr::write_bytes(entry.alloc, 0, used_bytes);
253 }
254 base_addr += used_bytes;
255 }
256
257 for k in 1..entries_size {
259 let used_bytes = roundup(nblock(k, entries_size), 3) >> 3;
262 debug_assert!(end_addr >= base_addr + used_bytes, "{}", OOM_MSG);
263 let entry = entries.add(k).as_mut().expect("entry");
264 entry.split = base_addr as *mut u8;
265 if !zero_filled {
266 core::ptr::write_bytes(entry.split, 0, used_bytes);
267 }
268 base_addr += used_bytes;
269 }
270
271 base_addr = roundup(base_addr, leaf2base);
273 assert!(end_addr >= base_addr, "{}", OOM_MSG);
274 debug_assert_eq!(
275 (base_addr >> leaf2base) << leaf2base,
276 base_addr,
277 "misalignment"
278 );
279
280 let mut allocator = BuddyAlloc {
281 base_addr,
282 end_addr,
283 entries,
284 entries_size,
285 leaf2base,
286 unavailable: 0,
287 };
288 allocator.init_free_list();
289 allocator
290 }
291
292 fn init_free_list(&mut self) {
293 let mut base_addr = self.base_addr;
294 let end_addr = self.end_addr;
295 let entries_size = self.entries_size;
296
297 for k in (0..(entries_size - 1)).rev() {
299 let block_size = block_size_2base(k, self.leaf2base);
300 let entry = self.entry(k);
301 let parent_entry = self.entry(k + 1);
302
303 while base_addr + block_size <= end_addr {
305 debug_assert!(!bit_isset(
306 entry.alloc,
307 self.block_index(k, base_addr as *const u8)
308 ));
309 Node::push(entry.free, base_addr as *mut u8);
310 let block_index = self.block_index(k, base_addr as *const u8);
312 if block_index & 1 == 0 {
313 let parent_index = self.block_index(k + 1, base_addr as *const u8);
314 bit_set(parent_entry.alloc, parent_index);
315 bit_set(parent_entry.split, parent_index);
316 }
317 base_addr += block_size;
318 }
319
320 let n = nblock(k, entries_size);
322 let unavailable_block_index = self.block_index(k, base_addr as *const u8);
323 debug_assert!(unavailable_block_index < n);
324 bit_set(entry.alloc, unavailable_block_index);
325 }
326
327 self.unavailable = end_addr - base_addr;
328 }
329
330 pub fn malloc(&mut self, nbytes: usize) -> *mut u8 {
331 let fk = first_up_k(nbytes, 1 << self.leaf2base);
332 let mut k = match (fk..self.entries_size).find(|&k| !Node::is_empty(self.entry(k).free)) {
333 Some(k) => k,
334 None => return core::ptr::null_mut(),
335 };
336 let p: *mut u8 = Node::pop(self.entry(k).free) as *mut u8;
337 bit_set(self.entry(k).alloc, self.block_index(k, p));
338 while k > fk {
339 let q: *mut u8 = (p as usize + block_size_2base(k - 1, self.leaf2base)) as *mut u8;
340 bit_set(self.entry(k).split, self.block_index(k, p));
341 let parent_entry = self.entry(k - 1);
342 bit_set(parent_entry.alloc, self.block_index(k - 1, p));
343 debug_assert!(!bit_isset(parent_entry.alloc, self.block_index(k - 1, q)));
344 Node::push(parent_entry.free, q);
345 k -= 1;
346 }
347 debug_assert_eq!(
348 ((p as usize) >> self.leaf2base) << self.leaf2base,
349 p as usize,
350 "misalignment"
351 );
352 p
353 }
354
355 pub fn free(&mut self, mut p: *mut u8) {
356 let mut k = self.find_k_for_p(p);
357 while k < (self.entries_size - 1) {
358 let block_index = self.block_index(k, p);
359 let entry = self.entry(k);
360 bit_clear(entry.alloc, block_index);
361 let is_head = block_index & 1 == 0;
362 let buddy = if is_head {
363 block_index + 1
364 } else {
365 block_index - 1
366 };
367 if bit_isset(entry.alloc, buddy) {
368 break;
369 }
370 let q = self.block_addr(k, buddy);
376 Node::remove(q as *mut Node);
377 if !is_head {
378 p = q as *mut u8;
379 }
380 bit_clear(self.entry(k + 1).split, self.block_index(k + 1, p));
381 k += 1;
382 }
383 debug_assert!(!bit_isset(self.entry(k).alloc, self.block_index(k, p)));
384 Node::push(self.entry(k).free, p);
385 }
386
387 pub fn available_bytes(&self) -> usize {
392 self.end_addr - self.unavailable - self.base_addr
393 }
394
395 fn entry(&self, i: usize) -> &Entry {
396 debug_assert!(i < self.entries_size, "index out of range");
397 unsafe { self.entries.add(i).as_ref().expect("entry") }
398 }
399
400 fn find_k_for_p(&self, p: *const u8) -> usize {
402 for k in 0..(self.entries_size - 1) {
403 if bit_isset(self.entry(k + 1).split, self.block_index(k + 1, p)) {
404 debug_assert!(bit_isset(self.entry(k).alloc, self.block_index(k, p)));
405 return k;
406 }
407 }
408 0
409 }
410
411 fn block_index(&self, k: usize, p: *const u8) -> usize {
413 if (p as usize) < self.base_addr {
414 panic!("out of memory");
416 }
417 let n = p as usize - self.base_addr;
418 let index = (n >> k) >> self.leaf2base;
420 debug_assert!(index < nblock(k, self.entries_size));
421 index
422 }
423
424 fn block_addr(&self, k: usize, i: usize) -> usize {
426 let n = (i << k) << self.leaf2base;
428 self.base_addr + n
429 }
430}