1use parking_lot::{Mutex, RwLock};
69use std::collections::HashMap;
70use std::sync::atomic::{AtomicU64, AtomicUsize, Ordering};
71
72pub const MIN_BLOCK_SIZE: usize = 16;
74pub const MAX_BLOCK_SIZE: usize = 1 << 30;
76pub const DEFAULT_POOL_SIZE: usize = 64 * 1024 * 1024;
78
79#[derive(Debug, Clone, PartialEq, Eq)]
81pub enum BuddyError {
82 SizeTooLarge(usize),
84 ZeroSize,
86 OutOfMemory,
88 InvalidAddress(usize),
90 DoubleFree(usize),
92 BlockNotFound(usize),
94 PoolExhausted,
96}
97
98#[derive(Debug, Clone, Copy)]
100#[repr(C)]
101pub struct BlockHeader {
102 magic: u32,
104 order: u8,
106 flags: u8,
108 _padding: [u8; 2],
110 size: u32,
112}
113
114#[allow(dead_code)]
115const BLOCK_MAGIC: u32 = 0xB0DD_1E5A;
116#[allow(dead_code)]
117const FLAG_ALLOCATED: u8 = 0x01;
118#[allow(dead_code)]
119const FLAG_SPLIT: u8 = 0x02;
120
121#[allow(dead_code)]
122impl BlockHeader {
123 fn new(order: u8, size: u32) -> Self {
124 Self {
125 magic: BLOCK_MAGIC,
126 order,
127 flags: 0,
128 _padding: [0; 2],
129 size,
130 }
131 }
132
133 fn is_valid(&self) -> bool {
134 self.magic == BLOCK_MAGIC
135 }
136
137 fn is_allocated(&self) -> bool {
138 self.flags & FLAG_ALLOCATED != 0
139 }
140
141 fn set_allocated(&mut self, allocated: bool) {
142 if allocated {
143 self.flags |= FLAG_ALLOCATED;
144 } else {
145 self.flags &= !FLAG_ALLOCATED;
146 }
147 }
148
149 fn is_split(&self) -> bool {
150 self.flags & FLAG_SPLIT != 0
151 }
152
153 fn set_split(&mut self, split: bool) {
154 if split {
155 self.flags |= FLAG_SPLIT;
156 } else {
157 self.flags &= !FLAG_SPLIT;
158 }
159 }
160}
161
162#[allow(dead_code)]
164#[derive(Debug, Clone, Copy)]
165struct FreeBlock {
166 addr: usize,
168 next: usize,
170}
171
172#[derive(Debug, Default)]
174pub struct BuddyStats {
175 pub allocations: AtomicU64,
177 pub deallocations: AtomicU64,
179 pub allocated_bytes: AtomicUsize,
181 pub peak_allocated_bytes: AtomicUsize,
183 pub splits: AtomicU64,
185 pub merges: AtomicU64,
187 pub failed_allocations: AtomicU64,
189}
190
191impl BuddyStats {
192 pub fn new() -> Self {
193 Self::default()
194 }
195
196 pub fn record_allocation(&self, size: usize) {
197 self.allocations.fetch_add(1, Ordering::Relaxed);
198 let old = self.allocated_bytes.fetch_add(size, Ordering::Relaxed);
199 let new = old + size;
200
201 let mut current_peak = self.peak_allocated_bytes.load(Ordering::Relaxed);
203 while new > current_peak {
204 match self.peak_allocated_bytes.compare_exchange_weak(
205 current_peak,
206 new,
207 Ordering::Relaxed,
208 Ordering::Relaxed,
209 ) {
210 Ok(_) => break,
211 Err(p) => current_peak = p,
212 }
213 }
214 }
215
216 pub fn record_deallocation(&self, size: usize) {
217 self.deallocations.fetch_add(1, Ordering::Relaxed);
218 self.allocated_bytes.fetch_sub(size, Ordering::Relaxed);
219 }
220
221 pub fn record_split(&self) {
222 self.splits.fetch_add(1, Ordering::Relaxed);
223 }
224
225 pub fn record_merge(&self) {
226 self.merges.fetch_add(1, Ordering::Relaxed);
227 }
228
229 pub fn record_failed_allocation(&self) {
230 self.failed_allocations.fetch_add(1, Ordering::Relaxed);
231 }
232}
233
234#[inline]
236pub fn size_to_order(size: usize) -> u8 {
237 if size <= MIN_BLOCK_SIZE {
238 return 4; }
240
241 let bits = (size - 1).leading_zeros();
243 (usize::BITS - bits) as u8
244}
245
246#[inline]
248pub fn order_to_size(order: u8) -> usize {
249 1 << order
250}
251
252#[inline]
254pub fn buddy_addr(addr: usize, order: u8) -> usize {
255 addr ^ (1 << order)
256}
257
258pub struct MemoryPool {
260 base: usize,
262 size: usize,
264 max_order: u8,
266 min_order: u8,
268 free_lists: Vec<Mutex<Vec<usize>>>,
270 allocated: RwLock<HashMap<usize, u8>>,
272 stats: BuddyStats,
274}
275
276impl MemoryPool {
277 pub fn new(base: usize, size: usize) -> Result<Self, BuddyError> {
279 if size == 0 {
280 return Err(BuddyError::ZeroSize);
281 }
282
283 if !size.is_power_of_two() {
285 return Err(BuddyError::SizeTooLarge(size));
286 }
287
288 let max_order = size_to_order(size);
289 let min_order = size_to_order(MIN_BLOCK_SIZE);
290
291 let num_orders = (max_order - min_order + 1) as usize;
292 let mut free_lists = Vec::with_capacity(num_orders);
293
294 for _ in 0..num_orders {
295 free_lists.push(Mutex::new(Vec::new()));
296 }
297
298 free_lists[num_orders - 1].lock().push(base);
300
301 Ok(Self {
302 base,
303 size,
304 max_order,
305 min_order,
306 free_lists,
307 allocated: RwLock::new(HashMap::new()),
308 stats: BuddyStats::new(),
309 })
310 }
311
312 fn list_index(&self, order: u8) -> usize {
314 (order - self.min_order) as usize
315 }
316
317 pub fn allocate(&self, size: usize) -> Result<usize, BuddyError> {
319 if size == 0 {
320 return Err(BuddyError::ZeroSize);
321 }
322
323 let required_order = size_to_order(size);
324
325 if required_order > self.max_order {
326 self.stats.record_failed_allocation();
327 return Err(BuddyError::SizeTooLarge(size));
328 }
329
330 let mut found_order = None;
332 for order in required_order..=self.max_order {
333 let idx = self.list_index(order);
334 let list = self.free_lists[idx].lock();
335 if !list.is_empty() {
336 found_order = Some(order);
337 break;
338 }
339 }
340
341 let available_order = match found_order {
342 Some(o) => o,
343 None => {
344 self.stats.record_failed_allocation();
345 return Err(BuddyError::OutOfMemory);
346 }
347 };
348
349 let addr = {
351 let idx = self.list_index(available_order);
352 self.free_lists[idx]
353 .lock()
354 .pop()
355 .ok_or(BuddyError::OutOfMemory)?
356 };
357
358 let final_addr = self.split_block(addr, available_order, required_order);
360
361 self.allocated.write().insert(final_addr, required_order);
363 self.stats.record_allocation(order_to_size(required_order));
364
365 Ok(final_addr)
366 }
367
368 fn split_block(&self, addr: usize, current_order: u8, target_order: u8) -> usize {
370 let mut current_addr = addr;
371 let mut order = current_order;
372
373 while order > target_order {
374 order -= 1;
375 self.stats.record_split();
376
377 let buddy = buddy_addr(current_addr, order);
379
380 let idx = self.list_index(order);
382 self.free_lists[idx].lock().push(buddy);
383
384 if buddy < current_addr {
386 current_addr = buddy;
387 }
388 }
389
390 current_addr
391 }
392
393 pub fn deallocate(&self, addr: usize) -> Result<(), BuddyError> {
395 let order = {
397 let mut allocated = self.allocated.write();
398 allocated
399 .remove(&addr)
400 .ok_or(BuddyError::InvalidAddress(addr))?
401 };
402
403 self.stats.record_deallocation(order_to_size(order));
404
405 self.merge_block(addr, order);
407
408 Ok(())
409 }
410
411 fn merge_block(&self, addr: usize, order: u8) {
413 let mut current_addr = addr;
414 let mut current_order = order;
415
416 while current_order < self.max_order {
417 let buddy = buddy_addr(current_addr, current_order);
418
419 let idx = self.list_index(current_order);
421 let mut list = self.free_lists[idx].lock();
422
423 if let Some(pos) = list.iter().position(|&a| a == buddy) {
424 list.swap_remove(pos);
426 drop(list);
427
428 self.stats.record_merge();
429
430 current_addr = current_addr.min(buddy);
432 current_order += 1;
433 } else {
434 list.push(current_addr);
436 return;
437 }
438 }
439
440 let idx = self.list_index(current_order);
442 self.free_lists[idx].lock().push(current_addr);
443 }
444
445 pub fn contains(&self, addr: usize) -> bool {
447 addr >= self.base && addr < self.base + self.size
448 }
449
450 pub fn stats(&self) -> &BuddyStats {
452 &self.stats
453 }
454
455 pub fn free_block_counts(&self) -> Vec<(u8, usize)> {
457 let mut counts = Vec::new();
458 for order in self.min_order..=self.max_order {
459 let idx = self.list_index(order);
460 let count = self.free_lists[idx].lock().len();
461 counts.push((order, count));
462 }
463 counts
464 }
465
466 pub fn free_bytes(&self) -> usize {
468 let mut total = 0;
469 for order in self.min_order..=self.max_order {
470 let idx = self.list_index(order);
471 let count = self.free_lists[idx].lock().len();
472 total += count * order_to_size(order);
473 }
474 total
475 }
476}
477
478pub struct BuddyAllocator {
480 pools: RwLock<Vec<MemoryPool>>,
482 default_pool_size: usize,
484 next_base: AtomicUsize,
486 stats: BuddyStats,
488}
489
490impl BuddyAllocator {
491 pub fn new() -> Self {
493 Self::with_pool_size(DEFAULT_POOL_SIZE)
494 }
495
496 pub fn with_pool_size(pool_size: usize) -> Self {
498 let pool_size = pool_size.next_power_of_two();
500
501 Self {
502 pools: RwLock::new(Vec::new()),
503 default_pool_size: pool_size,
504 next_base: AtomicUsize::new(0x1000), stats: BuddyStats::new(),
506 }
507 }
508
509 pub fn add_pool(&self, size: usize) -> Result<usize, BuddyError> {
511 let size = size.next_power_of_two();
512 let base = self.next_base.fetch_add(size, Ordering::SeqCst);
513
514 let pool = MemoryPool::new(base, size)?;
515 let pool_id = {
516 let mut pools = self.pools.write();
517 let id = pools.len();
518 pools.push(pool);
519 id
520 };
521
522 Ok(pool_id)
523 }
524
525 fn ensure_pool(&self) -> Result<(), BuddyError> {
527 let pools = self.pools.read();
528 if pools.is_empty() {
529 drop(pools);
530 self.add_pool(self.default_pool_size)?;
531 }
532 Ok(())
533 }
534
535 pub fn allocate(&self, size: usize) -> Result<usize, BuddyError> {
537 if size == 0 {
538 return Err(BuddyError::ZeroSize);
539 }
540
541 self.ensure_pool()?;
542
543 let pools = self.pools.read();
545 for pool in pools.iter() {
546 if let Ok(addr) = pool.allocate(size) {
547 self.stats
548 .record_allocation(order_to_size(size_to_order(size)));
549 return Ok(addr);
550 }
551 }
552
553 drop(pools);
555
556 let required_size = size.next_power_of_two().max(self.default_pool_size);
557 self.add_pool(required_size)?;
558
559 let pools = self.pools.read();
560 if let Some(pool) = pools.last()
561 && let Ok(addr) = pool.allocate(size)
562 {
563 self.stats
564 .record_allocation(order_to_size(size_to_order(size)));
565 return Ok(addr);
566 }
567
568 self.stats.record_failed_allocation();
569 Err(BuddyError::OutOfMemory)
570 }
571
572 pub fn deallocate(&self, addr: usize) -> Result<(), BuddyError> {
574 let pools = self.pools.read();
575
576 for pool in pools.iter() {
577 let has_addr = pool.allocated.read().contains_key(&addr);
579 if has_addr {
580 let order = {
581 let allocated = pool.allocated.read();
582 allocated.get(&addr).copied()
583 };
584
585 if let Some(order) = order {
586 self.stats.record_deallocation(order_to_size(order));
587 }
588
589 return pool.deallocate(addr);
590 }
591 }
592
593 Err(BuddyError::InvalidAddress(addr))
594 }
595
596 pub fn stats(&self) -> &BuddyStats {
598 &self.stats
599 }
600
601 pub fn total_free_bytes(&self) -> usize {
603 let pools = self.pools.read();
604 pools.iter().map(|p| p.free_bytes()).sum()
605 }
606
607 pub fn pool_count(&self) -> usize {
609 self.pools.read().len()
610 }
611}
612
613impl Default for BuddyAllocator {
614 fn default() -> Self {
615 Self::new()
616 }
617}
618
619pub struct TypedBuddyAllocator<T> {
621 allocator: BuddyAllocator,
622 _marker: std::marker::PhantomData<T>,
623}
624
625impl<T> TypedBuddyAllocator<T> {
626 pub fn new() -> Self {
628 Self {
629 allocator: BuddyAllocator::new(),
630 _marker: std::marker::PhantomData,
631 }
632 }
633
634 pub fn allocate_one(&self) -> Result<usize, BuddyError> {
636 self.allocator.allocate(std::mem::size_of::<T>())
637 }
638
639 pub fn allocate_array(&self, count: usize) -> Result<usize, BuddyError> {
641 self.allocator.allocate(std::mem::size_of::<T>() * count)
642 }
643
644 pub fn deallocate(&self, addr: usize) -> Result<(), BuddyError> {
646 self.allocator.deallocate(addr)
647 }
648}
649
650impl<T> Default for TypedBuddyAllocator<T> {
651 fn default() -> Self {
652 Self::new()
653 }
654}
655
656pub struct SlabAllocator {
658 object_size: usize,
660 objects_per_slab: usize,
662 buddy: BuddyAllocator,
664 slabs: RwLock<HashMap<usize, Vec<usize>>>,
666 allocated: RwLock<HashMap<usize, usize>>,
668 stats: BuddyStats,
670}
671
672impl SlabAllocator {
673 pub fn new(object_size: usize) -> Self {
675 let object_size = object_size.max(8).next_power_of_two();
677
678 let slab_size = 4096usize;
680 let objects_per_slab = slab_size / object_size;
681
682 Self {
683 object_size,
684 objects_per_slab: objects_per_slab.max(1),
685 buddy: BuddyAllocator::new(),
686 slabs: RwLock::new(HashMap::new()),
687 allocated: RwLock::new(HashMap::new()),
688 stats: BuddyStats::new(),
689 }
690 }
691
692 pub fn allocate(&self) -> Result<usize, BuddyError> {
694 {
696 let mut slabs = self.slabs.write();
697 for (base, free_list) in slabs.iter_mut() {
698 if let Some(offset) = free_list.pop() {
699 let addr = base + offset;
700 self.allocated.write().insert(addr, *base);
701 self.stats.record_allocation(self.object_size);
702 return Ok(addr);
703 }
704 }
705 }
706
707 let slab_size = self.object_size * self.objects_per_slab;
709 let slab_base = self.buddy.allocate(slab_size)?;
710
711 let mut free_list = Vec::with_capacity(self.objects_per_slab - 1);
713 for i in 1..self.objects_per_slab {
714 free_list.push(i * self.object_size);
715 }
716
717 self.slabs.write().insert(slab_base, free_list);
718 self.allocated.write().insert(slab_base, slab_base);
719 self.stats.record_allocation(self.object_size);
720
721 Ok(slab_base)
722 }
723
724 pub fn deallocate(&self, addr: usize) -> Result<(), BuddyError> {
726 let slab_base = self
727 .allocated
728 .write()
729 .remove(&addr)
730 .ok_or(BuddyError::InvalidAddress(addr))?;
731
732 let offset = addr - slab_base;
733 self.slabs
734 .write()
735 .get_mut(&slab_base)
736 .ok_or(BuddyError::BlockNotFound(slab_base))?
737 .push(offset);
738
739 self.stats.record_deallocation(self.object_size);
740
741 Ok(())
742 }
743
744 pub fn stats(&self) -> &BuddyStats {
746 &self.stats
747 }
748
749 pub fn object_size(&self) -> usize {
751 self.object_size
752 }
753}
754
755pub struct BuddyArena {
757 buddy: BuddyAllocator,
759 current_block: Mutex<Option<ArenaBlock>>,
761 block_size: usize,
763 blocks: RwLock<Vec<usize>>,
765}
766
767struct ArenaBlock {
768 base: usize,
769 offset: usize,
770 size: usize,
771}
772
773impl BuddyArena {
774 pub fn new(block_size: usize) -> Self {
776 let block_size = block_size.next_power_of_two();
777
778 Self {
779 buddy: BuddyAllocator::new(),
780 current_block: Mutex::new(None),
781 block_size,
782 blocks: RwLock::new(Vec::new()),
783 }
784 }
785
786 pub fn allocate(&self, size: usize, align: usize) -> Result<usize, BuddyError> {
788 if size == 0 {
789 return Err(BuddyError::ZeroSize);
790 }
791
792 let mut current = self.current_block.lock();
793
794 if let Some(ref mut block) = *current {
796 let aligned_offset = (block.offset + align - 1) & !(align - 1);
797 if aligned_offset + size <= block.size {
798 block.offset = aligned_offset + size;
799 return Ok(block.base + aligned_offset);
800 }
801 }
802
803 let new_size = size.max(self.block_size).next_power_of_two();
805 let base = self.buddy.allocate(new_size)?;
806
807 self.blocks.write().push(base);
808
809 let aligned_offset = (align - 1) & !(align - 1);
810 *current = Some(ArenaBlock {
811 base,
812 offset: aligned_offset + size,
813 size: new_size,
814 });
815
816 Ok(base + aligned_offset)
817 }
818
819 pub fn allocate_val<T>(&self, _val: T) -> Result<usize, BuddyError> {
821 self.allocate(std::mem::size_of::<T>(), std::mem::align_of::<T>())
822 }
823
824 pub fn reset(&self) {
826 let mut current = self.current_block.lock();
827 *current = None;
828
829 let blocks = std::mem::take(&mut *self.blocks.write());
830 for block in blocks {
831 let _ = self.buddy.deallocate(block);
832 }
833 }
834
835 pub fn allocated_size(&self) -> usize {
837 self.blocks.read().len() * self.block_size
838 }
839}
840
841impl Drop for BuddyArena {
842 fn drop(&mut self) {
843 self.reset();
844 }
845}
846
847#[cfg(test)]
848mod tests {
849 use super::*;
850
851 #[test]
852 fn test_size_to_order() {
853 assert_eq!(size_to_order(1), 4); assert_eq!(size_to_order(16), 4); assert_eq!(size_to_order(17), 5); assert_eq!(size_to_order(32), 5);
857 assert_eq!(size_to_order(64), 6);
858 assert_eq!(size_to_order(1024), 10);
859 assert_eq!(size_to_order(1025), 11); }
861
862 #[test]
863 fn test_order_to_size() {
864 assert_eq!(order_to_size(4), 16);
865 assert_eq!(order_to_size(5), 32);
866 assert_eq!(order_to_size(10), 1024);
867 assert_eq!(order_to_size(20), 1 << 20);
868 }
869
870 #[test]
871 fn test_buddy_addr() {
872 assert_eq!(buddy_addr(0, 4), 16);
875 assert_eq!(buddy_addr(16, 4), 0);
876
877 assert_eq!(buddy_addr(0, 5), 32);
879 assert_eq!(buddy_addr(32, 5), 0);
880 assert_eq!(buddy_addr(64, 5), 96);
881 assert_eq!(buddy_addr(96, 5), 64);
882 }
883
884 #[test]
885 fn test_memory_pool_basic() {
886 let pool = MemoryPool::new(0, 1024).unwrap();
887
888 let addr1 = pool.allocate(16).unwrap();
890 assert!(pool.contains(addr1));
891
892 let addr2 = pool.allocate(16).unwrap();
894 assert_ne!(addr1, addr2);
895
896 pool.deallocate(addr1).unwrap();
898 pool.deallocate(addr2).unwrap();
899 }
900
901 #[test]
902 fn test_memory_pool_splitting() {
903 let pool = MemoryPool::new(0, 256).unwrap();
904
905 let addr = pool.allocate(16).unwrap();
908
909 assert!(pool.stats().splits.load(Ordering::Relaxed) > 0);
911
912 pool.deallocate(addr).unwrap();
913 }
914
915 #[test]
916 fn test_memory_pool_merging() {
917 let pool = MemoryPool::new(0, 256).unwrap();
918
919 let addr1 = pool.allocate(16).unwrap();
921 let addr2 = pool.allocate(16).unwrap();
922
923 pool.deallocate(addr1).unwrap();
925 pool.deallocate(addr2).unwrap();
926
927 assert!(pool.stats().merges.load(Ordering::Relaxed) > 0);
929
930 let addr3 = pool.allocate(256).unwrap();
932 assert_eq!(addr3, 0);
933 }
934
935 #[test]
936 fn test_memory_pool_out_of_memory() {
937 let pool = MemoryPool::new(0, 256).unwrap();
938
939 let mut addrs = Vec::new();
941 for _ in 0..16 {
942 addrs.push(pool.allocate(16).unwrap());
943 }
944
945 assert!(matches!(pool.allocate(16), Err(BuddyError::OutOfMemory)));
947
948 pool.deallocate(addrs.pop().unwrap()).unwrap();
950 assert!(pool.allocate(16).is_ok());
951 }
952
953 #[test]
954 fn test_buddy_allocator_basic() {
955 let alloc = BuddyAllocator::new();
956
957 let addr1 = alloc.allocate(100).unwrap();
958 let addr2 = alloc.allocate(200).unwrap();
959
960 assert_ne!(addr1, addr2);
961
962 alloc.deallocate(addr1).unwrap();
963 alloc.deallocate(addr2).unwrap();
964 }
965
966 #[test]
967 fn test_buddy_allocator_auto_pool() {
968 let alloc = BuddyAllocator::with_pool_size(1024);
969
970 assert_eq!(alloc.pool_count(), 0);
971
972 let _ = alloc.allocate(16).unwrap();
973
974 assert_eq!(alloc.pool_count(), 1);
975 }
976
977 #[test]
978 fn test_buddy_allocator_multiple_pools() {
979 let alloc = BuddyAllocator::with_pool_size(256);
980
981 let mut addrs = Vec::new();
983 for _ in 0..32 {
984 addrs.push(alloc.allocate(16).unwrap());
985 }
986
987 assert!(alloc.pool_count() > 1);
989
990 for addr in addrs {
992 alloc.deallocate(addr).unwrap();
993 }
994 }
995
996 #[test]
997 fn test_typed_allocator() {
998 #[repr(C)]
999 struct MyStruct {
1000 a: u64,
1001 b: u32,
1002 c: u16,
1003 }
1004
1005 let alloc = TypedBuddyAllocator::<MyStruct>::new();
1006
1007 let addr1 = alloc.allocate_one().unwrap();
1008 let addr2 = alloc.allocate_array(10).unwrap();
1009
1010 assert_ne!(addr1, addr2);
1011
1012 alloc.deallocate(addr1).unwrap();
1013 alloc.deallocate(addr2).unwrap();
1014 }
1015
1016 #[test]
1017 fn test_slab_allocator() {
1018 let slab = SlabAllocator::new(32);
1019
1020 let mut addrs = Vec::new();
1022 for _ in 0..10 {
1023 addrs.push(slab.allocate().unwrap());
1024 }
1025
1026 for i in 0..addrs.len() {
1028 for j in (i + 1)..addrs.len() {
1029 assert_ne!(addrs[i], addrs[j]);
1030 }
1031 }
1032
1033 for addr in addrs {
1035 slab.deallocate(addr).unwrap();
1036 }
1037 }
1038
1039 #[test]
1040 fn test_slab_reuse() {
1041 let slab = SlabAllocator::new(64);
1042
1043 let addr1 = slab.allocate().unwrap();
1044 slab.deallocate(addr1).unwrap();
1045
1046 let addr2 = slab.allocate().unwrap();
1047
1048 assert_eq!(addr1, addr2);
1050 }
1051
1052 #[test]
1053 fn test_buddy_arena() {
1054 let arena = BuddyArena::new(4096);
1055
1056 let addr1 = arena.allocate(100, 8).unwrap();
1058 let addr2 = arena.allocate(200, 16).unwrap();
1059 let addr3 = arena.allocate(50, 4).unwrap();
1060
1061 assert_ne!(addr1, addr2);
1063 assert_ne!(addr2, addr3);
1064 assert_ne!(addr1, addr3);
1065
1066 assert_eq!(addr2 % 16, 0);
1068
1069 arena.reset();
1071
1072 let _ = arena.allocate(100, 8).unwrap();
1074 }
1075
1076 #[test]
1077 fn test_arena_large_allocation() {
1078 let arena = BuddyArena::new(256);
1079
1080 let result = arena.allocate(1024, 8);
1082 assert!(result.is_ok(), "Allocation failed: {:?}", result);
1084 let addr = result.unwrap();
1085 println!("Arena allocated address: {:#x}", addr);
1088 }
1089
1090 #[test]
1091 fn test_stats_tracking() {
1092 let pool = MemoryPool::new(0, 1024).unwrap();
1093
1094 let addr = pool.allocate(64).unwrap();
1095
1096 assert!(pool.stats().allocations.load(Ordering::Relaxed) > 0);
1097 assert!(pool.stats().allocated_bytes.load(Ordering::Relaxed) > 0);
1098
1099 pool.deallocate(addr).unwrap();
1100
1101 assert!(pool.stats().deallocations.load(Ordering::Relaxed) > 0);
1102 }
1103
1104 #[test]
1105 fn test_free_bytes_tracking() {
1106 let pool = MemoryPool::new(0, 1024).unwrap();
1107
1108 let initial_free = pool.free_bytes();
1109 assert_eq!(initial_free, 1024);
1110
1111 let addr = pool.allocate(64).unwrap();
1112
1113 let after_alloc = pool.free_bytes();
1114 assert!(after_alloc < initial_free);
1115
1116 pool.deallocate(addr).unwrap();
1117
1118 let after_free = pool.free_bytes();
1119 assert_eq!(after_free, initial_free);
1120 }
1121
1122 #[test]
1123 fn test_double_free() {
1124 let pool = MemoryPool::new(0, 1024).unwrap();
1125
1126 let addr = pool.allocate(64).unwrap();
1127 pool.deallocate(addr).unwrap();
1128
1129 assert!(matches!(
1131 pool.deallocate(addr),
1132 Err(BuddyError::InvalidAddress(_))
1133 ));
1134 }
1135
1136 #[test]
1137 fn test_invalid_address() {
1138 let pool = MemoryPool::new(0, 1024).unwrap();
1139
1140 assert!(matches!(
1142 pool.deallocate(999),
1143 Err(BuddyError::InvalidAddress(_))
1144 ));
1145 }
1146
1147 #[test]
1148 fn test_concurrent_allocations() {
1149 use std::sync::Arc;
1150 use std::thread;
1151
1152 let alloc = Arc::new(BuddyAllocator::new());
1153 let mut handles = Vec::new();
1154
1155 for _ in 0..2 {
1157 let alloc = Arc::clone(&alloc);
1158 handles.push(thread::spawn(move || {
1159 let mut addrs = Vec::new();
1160 for _ in 0..10 {
1161 if let Ok(addr) = alloc.allocate(32) {
1162 addrs.push(addr);
1163 }
1164 }
1165 std::thread::sleep(std::time::Duration::from_millis(1));
1167 for addr in addrs {
1168 let _ = alloc.deallocate(addr);
1169 }
1170 }));
1171 }
1172
1173 for handle in handles {
1174 handle.join().unwrap();
1175 }
1176 }
1177}