1use parking_lot::{Mutex, RwLock};
72use std::collections::HashMap;
73use std::sync::atomic::{AtomicU64, AtomicUsize, Ordering};
74
75pub const MIN_BLOCK_SIZE: usize = 16;
77pub const MAX_BLOCK_SIZE: usize = 1 << 30;
79pub const DEFAULT_POOL_SIZE: usize = 64 * 1024 * 1024;
81
82#[derive(Debug, Clone, PartialEq, Eq)]
84pub enum BuddyError {
85 SizeTooLarge(usize),
87 ZeroSize,
89 OutOfMemory,
91 InvalidAddress(usize),
93 DoubleFree(usize),
95 BlockNotFound(usize),
97 PoolExhausted,
99}
100
101#[derive(Debug, Clone, Copy)]
103#[repr(C)]
104pub struct BlockHeader {
105 magic: u32,
107 order: u8,
109 flags: u8,
111 _padding: [u8; 2],
113 size: u32,
115}
116
117#[allow(dead_code)]
118const BLOCK_MAGIC: u32 = 0xB0DD_1E5A;
119#[allow(dead_code)]
120const FLAG_ALLOCATED: u8 = 0x01;
121#[allow(dead_code)]
122const FLAG_SPLIT: u8 = 0x02;
123
124#[allow(dead_code)]
125impl BlockHeader {
126 fn new(order: u8, size: u32) -> Self {
127 Self {
128 magic: BLOCK_MAGIC,
129 order,
130 flags: 0,
131 _padding: [0; 2],
132 size,
133 }
134 }
135
136 fn is_valid(&self) -> bool {
137 self.magic == BLOCK_MAGIC
138 }
139
140 fn is_allocated(&self) -> bool {
141 self.flags & FLAG_ALLOCATED != 0
142 }
143
144 fn set_allocated(&mut self, allocated: bool) {
145 if allocated {
146 self.flags |= FLAG_ALLOCATED;
147 } else {
148 self.flags &= !FLAG_ALLOCATED;
149 }
150 }
151
152 fn is_split(&self) -> bool {
153 self.flags & FLAG_SPLIT != 0
154 }
155
156 fn set_split(&mut self, split: bool) {
157 if split {
158 self.flags |= FLAG_SPLIT;
159 } else {
160 self.flags &= !FLAG_SPLIT;
161 }
162 }
163}
164
165#[allow(dead_code)]
167#[derive(Debug, Clone, Copy)]
168struct FreeBlock {
169 addr: usize,
171 next: usize,
173}
174
175#[derive(Debug, Default)]
177pub struct BuddyStats {
178 pub allocations: AtomicU64,
180 pub deallocations: AtomicU64,
182 pub allocated_bytes: AtomicUsize,
184 pub peak_allocated_bytes: AtomicUsize,
186 pub splits: AtomicU64,
188 pub merges: AtomicU64,
190 pub failed_allocations: AtomicU64,
192}
193
194impl BuddyStats {
195 pub fn new() -> Self {
196 Self::default()
197 }
198
199 pub fn record_allocation(&self, size: usize) {
200 self.allocations.fetch_add(1, Ordering::Relaxed);
201 let old = self.allocated_bytes.fetch_add(size, Ordering::Relaxed);
202 let new = old + size;
203
204 let mut current_peak = self.peak_allocated_bytes.load(Ordering::Relaxed);
206 while new > current_peak {
207 match self.peak_allocated_bytes.compare_exchange_weak(
208 current_peak,
209 new,
210 Ordering::Relaxed,
211 Ordering::Relaxed,
212 ) {
213 Ok(_) => break,
214 Err(p) => current_peak = p,
215 }
216 }
217 }
218
219 pub fn record_deallocation(&self, size: usize) {
220 self.deallocations.fetch_add(1, Ordering::Relaxed);
221 self.allocated_bytes.fetch_sub(size, Ordering::Relaxed);
222 }
223
224 pub fn record_split(&self) {
225 self.splits.fetch_add(1, Ordering::Relaxed);
226 }
227
228 pub fn record_merge(&self) {
229 self.merges.fetch_add(1, Ordering::Relaxed);
230 }
231
232 pub fn record_failed_allocation(&self) {
233 self.failed_allocations.fetch_add(1, Ordering::Relaxed);
234 }
235}
236
237#[inline]
239pub fn size_to_order(size: usize) -> u8 {
240 if size <= MIN_BLOCK_SIZE {
241 return 4; }
243
244 let bits = (size - 1).leading_zeros();
246 (usize::BITS - bits) as u8
247}
248
249#[inline]
251pub fn order_to_size(order: u8) -> usize {
252 1 << order
253}
254
255#[inline]
257pub fn buddy_addr(addr: usize, order: u8) -> usize {
258 addr ^ (1 << order)
259}
260
261pub struct MemoryPool {
263 base: usize,
265 size: usize,
267 max_order: u8,
269 min_order: u8,
271 free_lists: Vec<Mutex<Vec<usize>>>,
273 allocated: RwLock<HashMap<usize, u8>>,
275 stats: BuddyStats,
277}
278
279impl MemoryPool {
280 pub fn new(base: usize, size: usize) -> Result<Self, BuddyError> {
282 if size == 0 {
283 return Err(BuddyError::ZeroSize);
284 }
285
286 if !size.is_power_of_two() {
288 return Err(BuddyError::SizeTooLarge(size));
289 }
290
291 let max_order = size_to_order(size);
292 let min_order = size_to_order(MIN_BLOCK_SIZE);
293
294 let num_orders = (max_order - min_order + 1) as usize;
295 let mut free_lists = Vec::with_capacity(num_orders);
296
297 for _ in 0..num_orders {
298 free_lists.push(Mutex::new(Vec::new()));
299 }
300
301 free_lists[num_orders - 1].lock().push(base);
303
304 Ok(Self {
305 base,
306 size,
307 max_order,
308 min_order,
309 free_lists,
310 allocated: RwLock::new(HashMap::new()),
311 stats: BuddyStats::new(),
312 })
313 }
314
315 fn list_index(&self, order: u8) -> usize {
317 (order - self.min_order) as usize
318 }
319
320 pub fn allocate(&self, size: usize) -> Result<usize, BuddyError> {
322 if size == 0 {
323 return Err(BuddyError::ZeroSize);
324 }
325
326 let required_order = size_to_order(size);
327
328 if required_order > self.max_order {
329 self.stats.record_failed_allocation();
330 return Err(BuddyError::SizeTooLarge(size));
331 }
332
333 let mut found_order = None;
335 for order in required_order..=self.max_order {
336 let idx = self.list_index(order);
337 let list = self.free_lists[idx].lock();
338 if !list.is_empty() {
339 found_order = Some(order);
340 break;
341 }
342 }
343
344 let available_order = match found_order {
345 Some(o) => o,
346 None => {
347 self.stats.record_failed_allocation();
348 return Err(BuddyError::OutOfMemory);
349 }
350 };
351
352 let addr = {
354 let idx = self.list_index(available_order);
355 self.free_lists[idx]
356 .lock()
357 .pop()
358 .ok_or(BuddyError::OutOfMemory)?
359 };
360
361 let final_addr = self.split_block(addr, available_order, required_order);
363
364 self.allocated.write().insert(final_addr, required_order);
366 self.stats.record_allocation(order_to_size(required_order));
367
368 Ok(final_addr)
369 }
370
371 fn split_block(&self, addr: usize, current_order: u8, target_order: u8) -> usize {
373 let mut current_addr = addr;
374 let mut order = current_order;
375
376 while order > target_order {
377 order -= 1;
378 self.stats.record_split();
379
380 let buddy = buddy_addr(current_addr, order);
382
383 let idx = self.list_index(order);
385 self.free_lists[idx].lock().push(buddy);
386
387 if buddy < current_addr {
389 current_addr = buddy;
390 }
391 }
392
393 current_addr
394 }
395
396 pub fn deallocate(&self, addr: usize) -> Result<(), BuddyError> {
398 let order = {
400 let mut allocated = self.allocated.write();
401 allocated
402 .remove(&addr)
403 .ok_or(BuddyError::InvalidAddress(addr))?
404 };
405
406 self.stats.record_deallocation(order_to_size(order));
407
408 self.merge_block(addr, order);
410
411 Ok(())
412 }
413
414 fn merge_block(&self, addr: usize, order: u8) {
416 let mut current_addr = addr;
417 let mut current_order = order;
418
419 while current_order < self.max_order {
420 let buddy = buddy_addr(current_addr, current_order);
421
422 let idx = self.list_index(current_order);
424 let mut list = self.free_lists[idx].lock();
425
426 if let Some(pos) = list.iter().position(|&a| a == buddy) {
427 list.swap_remove(pos);
429 drop(list);
430
431 self.stats.record_merge();
432
433 current_addr = current_addr.min(buddy);
435 current_order += 1;
436 } else {
437 list.push(current_addr);
439 return;
440 }
441 }
442
443 let idx = self.list_index(current_order);
445 self.free_lists[idx].lock().push(current_addr);
446 }
447
448 pub fn contains(&self, addr: usize) -> bool {
450 addr >= self.base && addr < self.base + self.size
451 }
452
453 pub fn stats(&self) -> &BuddyStats {
455 &self.stats
456 }
457
458 pub fn free_block_counts(&self) -> Vec<(u8, usize)> {
460 let mut counts = Vec::new();
461 for order in self.min_order..=self.max_order {
462 let idx = self.list_index(order);
463 let count = self.free_lists[idx].lock().len();
464 counts.push((order, count));
465 }
466 counts
467 }
468
469 pub fn free_bytes(&self) -> usize {
471 let mut total = 0;
472 for order in self.min_order..=self.max_order {
473 let idx = self.list_index(order);
474 let count = self.free_lists[idx].lock().len();
475 total += count * order_to_size(order);
476 }
477 total
478 }
479}
480
481pub struct BuddyAllocator {
483 pools: RwLock<Vec<MemoryPool>>,
485 default_pool_size: usize,
487 next_base: AtomicUsize,
489 stats: BuddyStats,
491}
492
493impl BuddyAllocator {
494 pub fn new() -> Self {
496 Self::with_pool_size(DEFAULT_POOL_SIZE)
497 }
498
499 pub fn with_pool_size(pool_size: usize) -> Self {
501 let pool_size = pool_size.next_power_of_two();
503
504 Self {
505 pools: RwLock::new(Vec::new()),
506 default_pool_size: pool_size,
507 next_base: AtomicUsize::new(0x1000), stats: BuddyStats::new(),
509 }
510 }
511
512 pub fn add_pool(&self, size: usize) -> Result<usize, BuddyError> {
514 let size = size.next_power_of_two();
515 let base = self.next_base.fetch_add(size, Ordering::SeqCst);
516
517 let pool = MemoryPool::new(base, size)?;
518 let pool_id = {
519 let mut pools = self.pools.write();
520 let id = pools.len();
521 pools.push(pool);
522 id
523 };
524
525 Ok(pool_id)
526 }
527
528 fn ensure_pool(&self) -> Result<(), BuddyError> {
530 let pools = self.pools.read();
531 if pools.is_empty() {
532 drop(pools);
533 self.add_pool(self.default_pool_size)?;
534 }
535 Ok(())
536 }
537
538 pub fn allocate(&self, size: usize) -> Result<usize, BuddyError> {
540 if size == 0 {
541 return Err(BuddyError::ZeroSize);
542 }
543
544 self.ensure_pool()?;
545
546 let pools = self.pools.read();
548 for pool in pools.iter() {
549 if let Ok(addr) = pool.allocate(size) {
550 self.stats
551 .record_allocation(order_to_size(size_to_order(size)));
552 return Ok(addr);
553 }
554 }
555
556 drop(pools);
558
559 let required_size = size.next_power_of_two().max(self.default_pool_size);
560 self.add_pool(required_size)?;
561
562 let pools = self.pools.read();
563 if let Some(pool) = pools.last()
564 && let Ok(addr) = pool.allocate(size)
565 {
566 self.stats
567 .record_allocation(order_to_size(size_to_order(size)));
568 return Ok(addr);
569 }
570
571 self.stats.record_failed_allocation();
572 Err(BuddyError::OutOfMemory)
573 }
574
575 pub fn deallocate(&self, addr: usize) -> Result<(), BuddyError> {
577 let pools = self.pools.read();
578
579 for pool in pools.iter() {
580 let has_addr = pool.allocated.read().contains_key(&addr);
582 if has_addr {
583 let order = {
584 let allocated = pool.allocated.read();
585 allocated.get(&addr).copied()
586 };
587
588 if let Some(order) = order {
589 self.stats.record_deallocation(order_to_size(order));
590 }
591
592 return pool.deallocate(addr);
593 }
594 }
595
596 Err(BuddyError::InvalidAddress(addr))
597 }
598
599 pub fn stats(&self) -> &BuddyStats {
601 &self.stats
602 }
603
604 pub fn total_free_bytes(&self) -> usize {
606 let pools = self.pools.read();
607 pools.iter().map(|p| p.free_bytes()).sum()
608 }
609
610 pub fn pool_count(&self) -> usize {
612 self.pools.read().len()
613 }
614}
615
616impl Default for BuddyAllocator {
617 fn default() -> Self {
618 Self::new()
619 }
620}
621
622pub struct TypedBuddyAllocator<T> {
624 allocator: BuddyAllocator,
625 _marker: std::marker::PhantomData<T>,
626}
627
628impl<T> TypedBuddyAllocator<T> {
629 pub fn new() -> Self {
631 Self {
632 allocator: BuddyAllocator::new(),
633 _marker: std::marker::PhantomData,
634 }
635 }
636
637 pub fn allocate_one(&self) -> Result<usize, BuddyError> {
639 self.allocator.allocate(std::mem::size_of::<T>())
640 }
641
642 pub fn allocate_array(&self, count: usize) -> Result<usize, BuddyError> {
644 self.allocator.allocate(std::mem::size_of::<T>() * count)
645 }
646
647 pub fn deallocate(&self, addr: usize) -> Result<(), BuddyError> {
649 self.allocator.deallocate(addr)
650 }
651}
652
653impl<T> Default for TypedBuddyAllocator<T> {
654 fn default() -> Self {
655 Self::new()
656 }
657}
658
659pub struct SlabAllocator {
661 object_size: usize,
663 objects_per_slab: usize,
665 buddy: BuddyAllocator,
667 slabs: RwLock<HashMap<usize, Vec<usize>>>,
669 allocated: RwLock<HashMap<usize, usize>>,
671 stats: BuddyStats,
673}
674
675impl SlabAllocator {
676 pub fn new(object_size: usize) -> Self {
678 let object_size = object_size.max(8).next_power_of_two();
680
681 let slab_size = 4096usize;
683 let objects_per_slab = slab_size / object_size;
684
685 Self {
686 object_size,
687 objects_per_slab: objects_per_slab.max(1),
688 buddy: BuddyAllocator::new(),
689 slabs: RwLock::new(HashMap::new()),
690 allocated: RwLock::new(HashMap::new()),
691 stats: BuddyStats::new(),
692 }
693 }
694
695 pub fn allocate(&self) -> Result<usize, BuddyError> {
697 {
699 let mut slabs = self.slabs.write();
700 for (base, free_list) in slabs.iter_mut() {
701 if let Some(offset) = free_list.pop() {
702 let addr = base + offset;
703 self.allocated.write().insert(addr, *base);
704 self.stats.record_allocation(self.object_size);
705 return Ok(addr);
706 }
707 }
708 }
709
710 let slab_size = self.object_size * self.objects_per_slab;
712 let slab_base = self.buddy.allocate(slab_size)?;
713
714 let mut free_list = Vec::with_capacity(self.objects_per_slab - 1);
716 for i in 1..self.objects_per_slab {
717 free_list.push(i * self.object_size);
718 }
719
720 self.slabs.write().insert(slab_base, free_list);
721 self.allocated.write().insert(slab_base, slab_base);
722 self.stats.record_allocation(self.object_size);
723
724 Ok(slab_base)
725 }
726
727 pub fn deallocate(&self, addr: usize) -> Result<(), BuddyError> {
729 let slab_base = self
730 .allocated
731 .write()
732 .remove(&addr)
733 .ok_or(BuddyError::InvalidAddress(addr))?;
734
735 let offset = addr - slab_base;
736 self.slabs
737 .write()
738 .get_mut(&slab_base)
739 .ok_or(BuddyError::BlockNotFound(slab_base))?
740 .push(offset);
741
742 self.stats.record_deallocation(self.object_size);
743
744 Ok(())
745 }
746
747 pub fn stats(&self) -> &BuddyStats {
749 &self.stats
750 }
751
752 pub fn object_size(&self) -> usize {
754 self.object_size
755 }
756}
757
758pub struct BuddyArena {
760 buddy: BuddyAllocator,
762 current_block: Mutex<Option<ArenaBlock>>,
764 block_size: usize,
766 blocks: RwLock<Vec<usize>>,
768}
769
770struct ArenaBlock {
771 base: usize,
772 offset: usize,
773 size: usize,
774}
775
776impl BuddyArena {
777 pub fn new(block_size: usize) -> Self {
779 let block_size = block_size.next_power_of_two();
780
781 Self {
782 buddy: BuddyAllocator::new(),
783 current_block: Mutex::new(None),
784 block_size,
785 blocks: RwLock::new(Vec::new()),
786 }
787 }
788
789 pub fn allocate(&self, size: usize, align: usize) -> Result<usize, BuddyError> {
791 if size == 0 {
792 return Err(BuddyError::ZeroSize);
793 }
794
795 let mut current = self.current_block.lock();
796
797 if let Some(ref mut block) = *current {
799 let aligned_offset = (block.offset + align - 1) & !(align - 1);
800 if aligned_offset + size <= block.size {
801 block.offset = aligned_offset + size;
802 return Ok(block.base + aligned_offset);
803 }
804 }
805
806 let new_size = size.max(self.block_size).next_power_of_two();
808 let base = self.buddy.allocate(new_size)?;
809
810 self.blocks.write().push(base);
811
812 let aligned_offset = (align - 1) & !(align - 1);
813 *current = Some(ArenaBlock {
814 base,
815 offset: aligned_offset + size,
816 size: new_size,
817 });
818
819 Ok(base + aligned_offset)
820 }
821
822 pub fn allocate_val<T>(&self, _val: T) -> Result<usize, BuddyError> {
824 self.allocate(std::mem::size_of::<T>(), std::mem::align_of::<T>())
825 }
826
827 pub fn reset(&self) {
829 let mut current = self.current_block.lock();
830 *current = None;
831
832 let blocks = std::mem::take(&mut *self.blocks.write());
833 for block in blocks {
834 let _ = self.buddy.deallocate(block);
835 }
836 }
837
838 pub fn allocated_size(&self) -> usize {
840 self.blocks.read().len() * self.block_size
841 }
842}
843
844impl Drop for BuddyArena {
845 fn drop(&mut self) {
846 self.reset();
847 }
848}
849
850#[cfg(test)]
851mod tests {
852 use super::*;
853
854 #[test]
855 fn test_size_to_order() {
856 assert_eq!(size_to_order(1), 4); assert_eq!(size_to_order(16), 4); assert_eq!(size_to_order(17), 5); assert_eq!(size_to_order(32), 5);
860 assert_eq!(size_to_order(64), 6);
861 assert_eq!(size_to_order(1024), 10);
862 assert_eq!(size_to_order(1025), 11); }
864
865 #[test]
866 fn test_order_to_size() {
867 assert_eq!(order_to_size(4), 16);
868 assert_eq!(order_to_size(5), 32);
869 assert_eq!(order_to_size(10), 1024);
870 assert_eq!(order_to_size(20), 1 << 20);
871 }
872
873 #[test]
874 fn test_buddy_addr() {
875 assert_eq!(buddy_addr(0, 4), 16);
878 assert_eq!(buddy_addr(16, 4), 0);
879
880 assert_eq!(buddy_addr(0, 5), 32);
882 assert_eq!(buddy_addr(32, 5), 0);
883 assert_eq!(buddy_addr(64, 5), 96);
884 assert_eq!(buddy_addr(96, 5), 64);
885 }
886
887 #[test]
888 fn test_memory_pool_basic() {
889 let pool = MemoryPool::new(0, 1024).unwrap();
890
891 let addr1 = pool.allocate(16).unwrap();
893 assert!(pool.contains(addr1));
894
895 let addr2 = pool.allocate(16).unwrap();
897 assert_ne!(addr1, addr2);
898
899 pool.deallocate(addr1).unwrap();
901 pool.deallocate(addr2).unwrap();
902 }
903
904 #[test]
905 fn test_memory_pool_splitting() {
906 let pool = MemoryPool::new(0, 256).unwrap();
907
908 let addr = pool.allocate(16).unwrap();
911
912 assert!(pool.stats().splits.load(Ordering::Relaxed) > 0);
914
915 pool.deallocate(addr).unwrap();
916 }
917
918 #[test]
919 fn test_memory_pool_merging() {
920 let pool = MemoryPool::new(0, 256).unwrap();
921
922 let addr1 = pool.allocate(16).unwrap();
924 let addr2 = pool.allocate(16).unwrap();
925
926 pool.deallocate(addr1).unwrap();
928 pool.deallocate(addr2).unwrap();
929
930 assert!(pool.stats().merges.load(Ordering::Relaxed) > 0);
932
933 let addr3 = pool.allocate(256).unwrap();
935 assert_eq!(addr3, 0);
936 }
937
938 #[test]
939 fn test_memory_pool_out_of_memory() {
940 let pool = MemoryPool::new(0, 256).unwrap();
941
942 let mut addrs = Vec::new();
944 for _ in 0..16 {
945 addrs.push(pool.allocate(16).unwrap());
946 }
947
948 assert!(matches!(pool.allocate(16), Err(BuddyError::OutOfMemory)));
950
951 pool.deallocate(addrs.pop().unwrap()).unwrap();
953 assert!(pool.allocate(16).is_ok());
954 }
955
956 #[test]
957 fn test_buddy_allocator_basic() {
958 let alloc = BuddyAllocator::new();
959
960 let addr1 = alloc.allocate(100).unwrap();
961 let addr2 = alloc.allocate(200).unwrap();
962
963 assert_ne!(addr1, addr2);
964
965 alloc.deallocate(addr1).unwrap();
966 alloc.deallocate(addr2).unwrap();
967 }
968
969 #[test]
970 fn test_buddy_allocator_auto_pool() {
971 let alloc = BuddyAllocator::with_pool_size(1024);
972
973 assert_eq!(alloc.pool_count(), 0);
974
975 let _ = alloc.allocate(16).unwrap();
976
977 assert_eq!(alloc.pool_count(), 1);
978 }
979
980 #[test]
981 fn test_buddy_allocator_multiple_pools() {
982 let alloc = BuddyAllocator::with_pool_size(256);
983
984 let mut addrs = Vec::new();
986 for _ in 0..32 {
987 addrs.push(alloc.allocate(16).unwrap());
988 }
989
990 assert!(alloc.pool_count() > 1);
992
993 for addr in addrs {
995 alloc.deallocate(addr).unwrap();
996 }
997 }
998
999 #[test]
1000 fn test_typed_allocator() {
1001 #[repr(C)]
1002 struct MyStruct {
1003 a: u64,
1004 b: u32,
1005 c: u16,
1006 }
1007
1008 let alloc = TypedBuddyAllocator::<MyStruct>::new();
1009
1010 let addr1 = alloc.allocate_one().unwrap();
1011 let addr2 = alloc.allocate_array(10).unwrap();
1012
1013 assert_ne!(addr1, addr2);
1014
1015 alloc.deallocate(addr1).unwrap();
1016 alloc.deallocate(addr2).unwrap();
1017 }
1018
1019 #[test]
1020 fn test_slab_allocator() {
1021 let slab = SlabAllocator::new(32);
1022
1023 let mut addrs = Vec::new();
1025 for _ in 0..10 {
1026 addrs.push(slab.allocate().unwrap());
1027 }
1028
1029 for i in 0..addrs.len() {
1031 for j in (i + 1)..addrs.len() {
1032 assert_ne!(addrs[i], addrs[j]);
1033 }
1034 }
1035
1036 for addr in addrs {
1038 slab.deallocate(addr).unwrap();
1039 }
1040 }
1041
1042 #[test]
1043 fn test_slab_reuse() {
1044 let slab = SlabAllocator::new(64);
1045
1046 let addr1 = slab.allocate().unwrap();
1047 slab.deallocate(addr1).unwrap();
1048
1049 let addr2 = slab.allocate().unwrap();
1050
1051 assert_eq!(addr1, addr2);
1053 }
1054
1055 #[test]
1056 fn test_buddy_arena() {
1057 let arena = BuddyArena::new(4096);
1058
1059 let addr1 = arena.allocate(100, 8).unwrap();
1061 let addr2 = arena.allocate(200, 16).unwrap();
1062 let addr3 = arena.allocate(50, 4).unwrap();
1063
1064 assert_ne!(addr1, addr2);
1066 assert_ne!(addr2, addr3);
1067 assert_ne!(addr1, addr3);
1068
1069 assert_eq!(addr2 % 16, 0);
1071
1072 arena.reset();
1074
1075 let _ = arena.allocate(100, 8).unwrap();
1077 }
1078
1079 #[test]
1080 fn test_arena_large_allocation() {
1081 let arena = BuddyArena::new(256);
1082
1083 let result = arena.allocate(1024, 8);
1085 assert!(result.is_ok(), "Allocation failed: {:?}", result);
1087 let addr = result.unwrap();
1088 println!("Arena allocated address: {:#x}", addr);
1091 }
1092
1093 #[test]
1094 fn test_stats_tracking() {
1095 let pool = MemoryPool::new(0, 1024).unwrap();
1096
1097 let addr = pool.allocate(64).unwrap();
1098
1099 assert!(pool.stats().allocations.load(Ordering::Relaxed) > 0);
1100 assert!(pool.stats().allocated_bytes.load(Ordering::Relaxed) > 0);
1101
1102 pool.deallocate(addr).unwrap();
1103
1104 assert!(pool.stats().deallocations.load(Ordering::Relaxed) > 0);
1105 }
1106
1107 #[test]
1108 fn test_free_bytes_tracking() {
1109 let pool = MemoryPool::new(0, 1024).unwrap();
1110
1111 let initial_free = pool.free_bytes();
1112 assert_eq!(initial_free, 1024);
1113
1114 let addr = pool.allocate(64).unwrap();
1115
1116 let after_alloc = pool.free_bytes();
1117 assert!(after_alloc < initial_free);
1118
1119 pool.deallocate(addr).unwrap();
1120
1121 let after_free = pool.free_bytes();
1122 assert_eq!(after_free, initial_free);
1123 }
1124
1125 #[test]
1126 fn test_double_free() {
1127 let pool = MemoryPool::new(0, 1024).unwrap();
1128
1129 let addr = pool.allocate(64).unwrap();
1130 pool.deallocate(addr).unwrap();
1131
1132 assert!(matches!(
1134 pool.deallocate(addr),
1135 Err(BuddyError::InvalidAddress(_))
1136 ));
1137 }
1138
1139 #[test]
1140 fn test_invalid_address() {
1141 let pool = MemoryPool::new(0, 1024).unwrap();
1142
1143 assert!(matches!(
1145 pool.deallocate(999),
1146 Err(BuddyError::InvalidAddress(_))
1147 ));
1148 }
1149
1150 #[test]
1151 fn test_concurrent_allocations() {
1152 use std::sync::Arc;
1153 use std::thread;
1154
1155 let alloc = Arc::new(BuddyAllocator::new());
1156 let mut handles = Vec::new();
1157
1158 for _ in 0..2 {
1160 let alloc = Arc::clone(&alloc);
1161 handles.push(thread::spawn(move || {
1162 let mut addrs = Vec::new();
1163 for _ in 0..10 {
1164 if let Ok(addr) = alloc.allocate(32) {
1165 addrs.push(addr);
1166 }
1167 }
1168 std::thread::sleep(std::time::Duration::from_millis(1));
1170 for addr in addrs {
1171 let _ = alloc.deallocate(addr);
1172 }
1173 }));
1174 }
1175
1176 for handle in handles {
1177 handle.join().unwrap();
1178 }
1179 }
1180}