sochdb_core/
buddy_allocator.rs

1// Copyright 2025 Sushanth (https://github.com/sushanthpy)
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7//     http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15//! Buddy Allocator for SochDB Memory Management
16//!
17//! # DEPRECATION NOTICE
18//!
19//! **This module is deprecated and will be removed in a future release.**
20//!
21//! ## Recommendation: Use jemalloc Instead
22//!
23//! Enable the `jemalloc` feature in your `Cargo.toml`:
24//!
25//! ```toml
26//! sochdb-core = { version = "...", features = ["jemalloc"] }
27//! ```
28//!
29//! ### Why jemalloc is preferred:
30//!
31//! 1. **Production-proven**: Used by Firefox, Facebook, Redis, RocksDB
32//! 2. **Thread-local caching**: Eliminates lock contention on hot paths
33//! 3. **Better fragmentation handling**: Size-class based allocation
34//! 4. **Automatic memory return**: Returns memory to OS via `madvise`
35//! 5. **No maintenance burden**: Well-tested, battle-hardened code
36//!
37//! ### Issues with this custom allocator:
38//!
39//! 1. **Virtual address tracking only**: Does not manage real memory
40//! 2. **Lock contention**: RwLock on every allocation
41//! 3. **No memory reclamation**: Never returns memory to OS
42//! 4. **Internal fragmentation**: Buddy allocation wastes ~50% for small allocs
43//!
44//! ## Migration Path
45//!
46//! If you are using `BuddyAllocator` or `SlabAllocator` directly:
47//!
48//! ```rust,ignore
49//! // Before (deprecated):
50//! let allocator = BuddyAllocator::new(64 * 1024 * 1024)?;
51//! let addr = allocator.allocate(1024)?;
52//!
53//! // After (recommended):
54//! // Simply enable the jemalloc feature and use standard allocation:
55//! let buffer: Vec<u8> = Vec::with_capacity(1024);
56//! ```
57//!
58//! ## Original Documentation
59//!
60//! Implements a buddy system allocator for efficient power-of-2 memory block allocation.
61//! Key features:
62//! - O(1) allocation and deallocation for available blocks
63//! - Automatic block splitting and merging
64//! - Memory coalescing to reduce fragmentation
65//! - Thread-safe with fine-grained locking
66//! - Support for multiple memory pools
67
68use parking_lot::{Mutex, RwLock};
69use std::collections::HashMap;
70use std::sync::atomic::{AtomicU64, AtomicUsize, Ordering};
71
72/// Minimum block size (16 bytes)
73pub const MIN_BLOCK_SIZE: usize = 16;
74/// Maximum block size (1 GB)
75pub const MAX_BLOCK_SIZE: usize = 1 << 30;
76/// Default pool size (64 MB)
77pub const DEFAULT_POOL_SIZE: usize = 64 * 1024 * 1024;
78
79/// Error types for buddy allocator operations
80#[derive(Debug, Clone, PartialEq, Eq)]
81pub enum BuddyError {
82    /// Requested size is too large
83    SizeTooLarge(usize),
84    /// Requested size is zero
85    ZeroSize,
86    /// No memory available
87    OutOfMemory,
88    /// Invalid address
89    InvalidAddress(usize),
90    /// Double free detected
91    DoubleFree(usize),
92    /// Block not found
93    BlockNotFound(usize),
94    /// Pool exhausted
95    PoolExhausted,
96}
97
98/// Block header stored at the beginning of each block
99#[derive(Debug, Clone, Copy)]
100#[repr(C)]
101pub struct BlockHeader {
102    /// Magic number for validation
103    magic: u32,
104    /// Order (log2 of size)
105    order: u8,
106    /// Flags (allocated, etc.)
107    flags: u8,
108    /// Padding
109    _padding: [u8; 2],
110    /// Size of the block
111    size: u32,
112}
113
114#[allow(dead_code)]
115const BLOCK_MAGIC: u32 = 0xB0DD_1E5A;
116#[allow(dead_code)]
117const FLAG_ALLOCATED: u8 = 0x01;
118#[allow(dead_code)]
119const FLAG_SPLIT: u8 = 0x02;
120
121#[allow(dead_code)]
122impl BlockHeader {
123    fn new(order: u8, size: u32) -> Self {
124        Self {
125            magic: BLOCK_MAGIC,
126            order,
127            flags: 0,
128            _padding: [0; 2],
129            size,
130        }
131    }
132
133    fn is_valid(&self) -> bool {
134        self.magic == BLOCK_MAGIC
135    }
136
137    fn is_allocated(&self) -> bool {
138        self.flags & FLAG_ALLOCATED != 0
139    }
140
141    fn set_allocated(&mut self, allocated: bool) {
142        if allocated {
143            self.flags |= FLAG_ALLOCATED;
144        } else {
145            self.flags &= !FLAG_ALLOCATED;
146        }
147    }
148
149    fn is_split(&self) -> bool {
150        self.flags & FLAG_SPLIT != 0
151    }
152
153    fn set_split(&mut self, split: bool) {
154        if split {
155            self.flags |= FLAG_SPLIT;
156        } else {
157            self.flags &= !FLAG_SPLIT;
158        }
159    }
160}
161
162/// A free block in the buddy allocator
163#[allow(dead_code)]
164#[derive(Debug, Clone, Copy)]
165struct FreeBlock {
166    /// Address of the block
167    addr: usize,
168    /// Next free block in the list (0 = end)
169    next: usize,
170}
171
172/// Statistics for the buddy allocator
173#[derive(Debug, Default)]
174pub struct BuddyStats {
175    /// Total allocations
176    pub allocations: AtomicU64,
177    /// Total deallocations
178    pub deallocations: AtomicU64,
179    /// Current allocated bytes
180    pub allocated_bytes: AtomicUsize,
181    /// Peak allocated bytes
182    pub peak_allocated_bytes: AtomicUsize,
183    /// Number of splits
184    pub splits: AtomicU64,
185    /// Number of merges
186    pub merges: AtomicU64,
187    /// Failed allocations
188    pub failed_allocations: AtomicU64,
189}
190
191impl BuddyStats {
192    pub fn new() -> Self {
193        Self::default()
194    }
195
196    pub fn record_allocation(&self, size: usize) {
197        self.allocations.fetch_add(1, Ordering::Relaxed);
198        let old = self.allocated_bytes.fetch_add(size, Ordering::Relaxed);
199        let new = old + size;
200
201        // Update peak if necessary
202        let mut current_peak = self.peak_allocated_bytes.load(Ordering::Relaxed);
203        while new > current_peak {
204            match self.peak_allocated_bytes.compare_exchange_weak(
205                current_peak,
206                new,
207                Ordering::Relaxed,
208                Ordering::Relaxed,
209            ) {
210                Ok(_) => break,
211                Err(p) => current_peak = p,
212            }
213        }
214    }
215
216    pub fn record_deallocation(&self, size: usize) {
217        self.deallocations.fetch_add(1, Ordering::Relaxed);
218        self.allocated_bytes.fetch_sub(size, Ordering::Relaxed);
219    }
220
221    pub fn record_split(&self) {
222        self.splits.fetch_add(1, Ordering::Relaxed);
223    }
224
225    pub fn record_merge(&self) {
226        self.merges.fetch_add(1, Ordering::Relaxed);
227    }
228
229    pub fn record_failed_allocation(&self) {
230        self.failed_allocations.fetch_add(1, Ordering::Relaxed);
231    }
232}
233
234/// Calculate the order (log2) for a given size
235#[inline]
236pub fn size_to_order(size: usize) -> u8 {
237    if size <= MIN_BLOCK_SIZE {
238        return 4; // 2^4 = 16
239    }
240
241    // Round up to next power of 2
242    let bits = (size - 1).leading_zeros();
243    (usize::BITS - bits) as u8
244}
245
246/// Calculate the size for a given order
247#[inline]
248pub fn order_to_size(order: u8) -> usize {
249    1 << order
250}
251
252/// Calculate buddy address for a given address and order
253#[inline]
254pub fn buddy_addr(addr: usize, order: u8) -> usize {
255    addr ^ (1 << order)
256}
257
258/// A single memory pool managed by the buddy allocator
259pub struct MemoryPool {
260    /// Base address of the pool
261    base: usize,
262    /// Total size of the pool
263    size: usize,
264    /// Maximum order (log2 of pool size)
265    max_order: u8,
266    /// Minimum order (log2 of minimum block size)
267    min_order: u8,
268    /// Free lists for each order (index = order - min_order)
269    free_lists: Vec<Mutex<Vec<usize>>>,
270    /// Map of allocated blocks: address -> order
271    allocated: RwLock<HashMap<usize, u8>>,
272    /// Statistics
273    stats: BuddyStats,
274}
275
276impl MemoryPool {
277    /// Create a new memory pool with the given base address and size
278    pub fn new(base: usize, size: usize) -> Result<Self, BuddyError> {
279        if size == 0 {
280            return Err(BuddyError::ZeroSize);
281        }
282
283        // Ensure size is power of 2
284        if !size.is_power_of_two() {
285            return Err(BuddyError::SizeTooLarge(size));
286        }
287
288        let max_order = size_to_order(size);
289        let min_order = size_to_order(MIN_BLOCK_SIZE);
290
291        let num_orders = (max_order - min_order + 1) as usize;
292        let mut free_lists = Vec::with_capacity(num_orders);
293
294        for _ in 0..num_orders {
295            free_lists.push(Mutex::new(Vec::new()));
296        }
297
298        // Initialize with one block of maximum size
299        free_lists[num_orders - 1].lock().push(base);
300
301        Ok(Self {
302            base,
303            size,
304            max_order,
305            min_order,
306            free_lists,
307            allocated: RwLock::new(HashMap::new()),
308            stats: BuddyStats::new(),
309        })
310    }
311
312    /// Get the index into free_lists for a given order
313    fn list_index(&self, order: u8) -> usize {
314        (order - self.min_order) as usize
315    }
316
317    /// Allocate a block of the given size
318    pub fn allocate(&self, size: usize) -> Result<usize, BuddyError> {
319        if size == 0 {
320            return Err(BuddyError::ZeroSize);
321        }
322
323        let required_order = size_to_order(size);
324
325        if required_order > self.max_order {
326            self.stats.record_failed_allocation();
327            return Err(BuddyError::SizeTooLarge(size));
328        }
329
330        // Find the smallest available block that fits
331        let mut found_order = None;
332        for order in required_order..=self.max_order {
333            let idx = self.list_index(order);
334            let list = self.free_lists[idx].lock();
335            if !list.is_empty() {
336                found_order = Some(order);
337                break;
338            }
339        }
340
341        let available_order = match found_order {
342            Some(o) => o,
343            None => {
344                self.stats.record_failed_allocation();
345                return Err(BuddyError::OutOfMemory);
346            }
347        };
348
349        // Pop a block from the found level
350        let addr = {
351            let idx = self.list_index(available_order);
352            self.free_lists[idx]
353                .lock()
354                .pop()
355                .ok_or(BuddyError::OutOfMemory)?
356        };
357
358        // Split down to the required size
359        let final_addr = self.split_block(addr, available_order, required_order);
360
361        // Mark as allocated
362        self.allocated.write().insert(final_addr, required_order);
363        self.stats.record_allocation(order_to_size(required_order));
364
365        Ok(final_addr)
366    }
367
368    /// Split a block from current_order down to target_order
369    fn split_block(&self, addr: usize, current_order: u8, target_order: u8) -> usize {
370        let mut current_addr = addr;
371        let mut order = current_order;
372
373        while order > target_order {
374            order -= 1;
375            self.stats.record_split();
376
377            // Calculate buddy address
378            let buddy = buddy_addr(current_addr, order);
379
380            // Add buddy to free list
381            let idx = self.list_index(order);
382            self.free_lists[idx].lock().push(buddy);
383
384            // Keep the lower address
385            if buddy < current_addr {
386                current_addr = buddy;
387            }
388        }
389
390        current_addr
391    }
392
393    /// Free a previously allocated block
394    pub fn deallocate(&self, addr: usize) -> Result<(), BuddyError> {
395        // Get the order of this block
396        let order = {
397            let mut allocated = self.allocated.write();
398            allocated
399                .remove(&addr)
400                .ok_or(BuddyError::InvalidAddress(addr))?
401        };
402
403        self.stats.record_deallocation(order_to_size(order));
404
405        // Try to merge with buddy
406        self.merge_block(addr, order);
407
408        Ok(())
409    }
410
411    /// Merge a freed block with its buddy if possible
412    fn merge_block(&self, addr: usize, order: u8) {
413        let mut current_addr = addr;
414        let mut current_order = order;
415
416        while current_order < self.max_order {
417            let buddy = buddy_addr(current_addr, current_order);
418
419            // Check if buddy is in the free list
420            let idx = self.list_index(current_order);
421            let mut list = self.free_lists[idx].lock();
422
423            if let Some(pos) = list.iter().position(|&a| a == buddy) {
424                // Remove buddy from free list
425                list.swap_remove(pos);
426                drop(list);
427
428                self.stats.record_merge();
429
430                // Use lower address as the new block
431                current_addr = current_addr.min(buddy);
432                current_order += 1;
433            } else {
434                // Buddy not free, add current block to free list
435                list.push(current_addr);
436                return;
437            }
438        }
439
440        // Reached max order, add to top-level free list
441        let idx = self.list_index(current_order);
442        self.free_lists[idx].lock().push(current_addr);
443    }
444
445    /// Check if an address is within this pool
446    pub fn contains(&self, addr: usize) -> bool {
447        addr >= self.base && addr < self.base + self.size
448    }
449
450    /// Get pool statistics
451    pub fn stats(&self) -> &BuddyStats {
452        &self.stats
453    }
454
455    /// Get the number of free blocks at each order
456    pub fn free_block_counts(&self) -> Vec<(u8, usize)> {
457        let mut counts = Vec::new();
458        for order in self.min_order..=self.max_order {
459            let idx = self.list_index(order);
460            let count = self.free_lists[idx].lock().len();
461            counts.push((order, count));
462        }
463        counts
464    }
465
466    /// Get total free bytes
467    pub fn free_bytes(&self) -> usize {
468        let mut total = 0;
469        for order in self.min_order..=self.max_order {
470            let idx = self.list_index(order);
471            let count = self.free_lists[idx].lock().len();
472            total += count * order_to_size(order);
473        }
474        total
475    }
476}
477
478/// Multi-pool buddy allocator
479pub struct BuddyAllocator {
480    /// Memory pools
481    pools: RwLock<Vec<MemoryPool>>,
482    /// Default pool size for new pools
483    default_pool_size: usize,
484    /// Next pool base address (for virtual addressing)
485    next_base: AtomicUsize,
486    /// Global statistics
487    stats: BuddyStats,
488}
489
490impl BuddyAllocator {
491    /// Create a new buddy allocator with default settings
492    pub fn new() -> Self {
493        Self::with_pool_size(DEFAULT_POOL_SIZE)
494    }
495
496    /// Create a new buddy allocator with the specified default pool size
497    pub fn with_pool_size(pool_size: usize) -> Self {
498        // Round up to power of 2
499        let pool_size = pool_size.next_power_of_two();
500
501        Self {
502            pools: RwLock::new(Vec::new()),
503            default_pool_size: pool_size,
504            next_base: AtomicUsize::new(0x1000), // Start after null page
505            stats: BuddyStats::new(),
506        }
507    }
508
509    /// Add a new memory pool
510    pub fn add_pool(&self, size: usize) -> Result<usize, BuddyError> {
511        let size = size.next_power_of_two();
512        let base = self.next_base.fetch_add(size, Ordering::SeqCst);
513
514        let pool = MemoryPool::new(base, size)?;
515        let pool_id = {
516            let mut pools = self.pools.write();
517            let id = pools.len();
518            pools.push(pool);
519            id
520        };
521
522        Ok(pool_id)
523    }
524
525    /// Ensure at least one pool exists
526    fn ensure_pool(&self) -> Result<(), BuddyError> {
527        let pools = self.pools.read();
528        if pools.is_empty() {
529            drop(pools);
530            self.add_pool(self.default_pool_size)?;
531        }
532        Ok(())
533    }
534
535    /// Allocate memory
536    pub fn allocate(&self, size: usize) -> Result<usize, BuddyError> {
537        if size == 0 {
538            return Err(BuddyError::ZeroSize);
539        }
540
541        self.ensure_pool()?;
542
543        // Try each pool
544        let pools = self.pools.read();
545        for pool in pools.iter() {
546            if let Ok(addr) = pool.allocate(size) {
547                self.stats
548                    .record_allocation(order_to_size(size_to_order(size)));
549                return Ok(addr);
550            }
551        }
552
553        // No pool had space, try to add a new one
554        drop(pools);
555
556        let required_size = size.next_power_of_two().max(self.default_pool_size);
557        self.add_pool(required_size)?;
558
559        let pools = self.pools.read();
560        if let Some(pool) = pools.last()
561            && let Ok(addr) = pool.allocate(size)
562        {
563            self.stats
564                .record_allocation(order_to_size(size_to_order(size)));
565            return Ok(addr);
566        }
567
568        self.stats.record_failed_allocation();
569        Err(BuddyError::OutOfMemory)
570    }
571
572    /// Deallocate memory
573    pub fn deallocate(&self, addr: usize) -> Result<(), BuddyError> {
574        let pools = self.pools.read();
575
576        for pool in pools.iter() {
577            // Check if this pool has this address allocated
578            let has_addr = pool.allocated.read().contains_key(&addr);
579            if has_addr {
580                let order = {
581                    let allocated = pool.allocated.read();
582                    allocated.get(&addr).copied()
583                };
584
585                if let Some(order) = order {
586                    self.stats.record_deallocation(order_to_size(order));
587                }
588
589                return pool.deallocate(addr);
590            }
591        }
592
593        Err(BuddyError::InvalidAddress(addr))
594    }
595
596    /// Get global statistics
597    pub fn stats(&self) -> &BuddyStats {
598        &self.stats
599    }
600
601    /// Get total free bytes across all pools
602    pub fn total_free_bytes(&self) -> usize {
603        let pools = self.pools.read();
604        pools.iter().map(|p| p.free_bytes()).sum()
605    }
606
607    /// Get number of pools
608    pub fn pool_count(&self) -> usize {
609        self.pools.read().len()
610    }
611}
612
613impl Default for BuddyAllocator {
614    fn default() -> Self {
615        Self::new()
616    }
617}
618
619/// A typed buddy allocator for allocating objects of a specific type
620pub struct TypedBuddyAllocator<T> {
621    allocator: BuddyAllocator,
622    _marker: std::marker::PhantomData<T>,
623}
624
625impl<T> TypedBuddyAllocator<T> {
626    /// Create a new typed allocator
627    pub fn new() -> Self {
628        Self {
629            allocator: BuddyAllocator::new(),
630            _marker: std::marker::PhantomData,
631        }
632    }
633
634    /// Allocate space for one T
635    pub fn allocate_one(&self) -> Result<usize, BuddyError> {
636        self.allocator.allocate(std::mem::size_of::<T>())
637    }
638
639    /// Allocate space for N elements
640    pub fn allocate_array(&self, count: usize) -> Result<usize, BuddyError> {
641        self.allocator.allocate(std::mem::size_of::<T>() * count)
642    }
643
644    /// Deallocate
645    pub fn deallocate(&self, addr: usize) -> Result<(), BuddyError> {
646        self.allocator.deallocate(addr)
647    }
648}
649
650impl<T> Default for TypedBuddyAllocator<T> {
651    fn default() -> Self {
652        Self::new()
653    }
654}
655
656/// Slab allocator built on top of buddy allocator for fixed-size objects
657pub struct SlabAllocator {
658    /// Object size
659    object_size: usize,
660    /// Objects per slab
661    objects_per_slab: usize,
662    /// Underlying buddy allocator
663    buddy: BuddyAllocator,
664    /// Free object lists per slab: slab_base -> list of free offsets
665    slabs: RwLock<HashMap<usize, Vec<usize>>>,
666    /// Allocated objects: addr -> slab_base
667    allocated: RwLock<HashMap<usize, usize>>,
668    /// Statistics
669    stats: BuddyStats,
670}
671
672impl SlabAllocator {
673    /// Create a new slab allocator for objects of the given size
674    pub fn new(object_size: usize) -> Self {
675        // Round up object size to minimum alignment
676        let object_size = object_size.max(8).next_power_of_two();
677
678        // Calculate objects per slab (aim for ~4KB slabs)
679        let slab_size = 4096usize;
680        let objects_per_slab = slab_size / object_size;
681
682        Self {
683            object_size,
684            objects_per_slab: objects_per_slab.max(1),
685            buddy: BuddyAllocator::new(),
686            slabs: RwLock::new(HashMap::new()),
687            allocated: RwLock::new(HashMap::new()),
688            stats: BuddyStats::new(),
689        }
690    }
691
692    /// Allocate one object
693    pub fn allocate(&self) -> Result<usize, BuddyError> {
694        // Try to find a slab with free objects
695        {
696            let mut slabs = self.slabs.write();
697            for (base, free_list) in slabs.iter_mut() {
698                if let Some(offset) = free_list.pop() {
699                    let addr = base + offset;
700                    self.allocated.write().insert(addr, *base);
701                    self.stats.record_allocation(self.object_size);
702                    return Ok(addr);
703                }
704            }
705        }
706
707        // No free objects, allocate a new slab
708        let slab_size = self.object_size * self.objects_per_slab;
709        let slab_base = self.buddy.allocate(slab_size)?;
710
711        // Initialize free list for new slab (skip first object, return it)
712        let mut free_list = Vec::with_capacity(self.objects_per_slab - 1);
713        for i in 1..self.objects_per_slab {
714            free_list.push(i * self.object_size);
715        }
716
717        self.slabs.write().insert(slab_base, free_list);
718        self.allocated.write().insert(slab_base, slab_base);
719        self.stats.record_allocation(self.object_size);
720
721        Ok(slab_base)
722    }
723
724    /// Deallocate an object
725    pub fn deallocate(&self, addr: usize) -> Result<(), BuddyError> {
726        let slab_base = self
727            .allocated
728            .write()
729            .remove(&addr)
730            .ok_or(BuddyError::InvalidAddress(addr))?;
731
732        let offset = addr - slab_base;
733        self.slabs
734            .write()
735            .get_mut(&slab_base)
736            .ok_or(BuddyError::BlockNotFound(slab_base))?
737            .push(offset);
738
739        self.stats.record_deallocation(self.object_size);
740
741        Ok(())
742    }
743
744    /// Get statistics
745    pub fn stats(&self) -> &BuddyStats {
746        &self.stats
747    }
748
749    /// Get object size
750    pub fn object_size(&self) -> usize {
751        self.object_size
752    }
753}
754
755/// Arena allocator using buddy allocator for backing storage
756pub struct BuddyArena {
757    /// Underlying buddy allocator
758    buddy: BuddyAllocator,
759    /// Current arena block
760    current_block: Mutex<Option<ArenaBlock>>,
761    /// Block size
762    block_size: usize,
763    /// All allocated block addresses for cleanup
764    blocks: RwLock<Vec<usize>>,
765}
766
767struct ArenaBlock {
768    base: usize,
769    offset: usize,
770    size: usize,
771}
772
773impl BuddyArena {
774    /// Create a new arena with the specified block size
775    pub fn new(block_size: usize) -> Self {
776        let block_size = block_size.next_power_of_two();
777
778        Self {
779            buddy: BuddyAllocator::new(),
780            current_block: Mutex::new(None),
781            block_size,
782            blocks: RwLock::new(Vec::new()),
783        }
784    }
785
786    /// Allocate from the arena (bump allocation)
787    pub fn allocate(&self, size: usize, align: usize) -> Result<usize, BuddyError> {
788        if size == 0 {
789            return Err(BuddyError::ZeroSize);
790        }
791
792        let mut current = self.current_block.lock();
793
794        // Try to allocate from current block
795        if let Some(ref mut block) = *current {
796            let aligned_offset = (block.offset + align - 1) & !(align - 1);
797            if aligned_offset + size <= block.size {
798                block.offset = aligned_offset + size;
799                return Ok(block.base + aligned_offset);
800            }
801        }
802
803        // Need a new block
804        let new_size = size.max(self.block_size).next_power_of_two();
805        let base = self.buddy.allocate(new_size)?;
806
807        self.blocks.write().push(base);
808
809        let aligned_offset = (align - 1) & !(align - 1);
810        *current = Some(ArenaBlock {
811            base,
812            offset: aligned_offset + size,
813            size: new_size,
814        });
815
816        Ok(base + aligned_offset)
817    }
818
819    /// Allocate a value and return its address
820    pub fn allocate_val<T>(&self, _val: T) -> Result<usize, BuddyError> {
821        self.allocate(std::mem::size_of::<T>(), std::mem::align_of::<T>())
822    }
823
824    /// Reset the arena (free all blocks)
825    pub fn reset(&self) {
826        let mut current = self.current_block.lock();
827        *current = None;
828
829        let blocks = std::mem::take(&mut *self.blocks.write());
830        for block in blocks {
831            let _ = self.buddy.deallocate(block);
832        }
833    }
834
835    /// Get total allocated size
836    pub fn allocated_size(&self) -> usize {
837        self.blocks.read().len() * self.block_size
838    }
839}
840
841impl Drop for BuddyArena {
842    fn drop(&mut self) {
843        self.reset();
844    }
845}
846
847#[cfg(test)]
848mod tests {
849    use super::*;
850
851    #[test]
852    fn test_size_to_order() {
853        assert_eq!(size_to_order(1), 4); // min = 16
854        assert_eq!(size_to_order(16), 4); // 2^4 = 16
855        assert_eq!(size_to_order(17), 5); // needs 2^5 = 32
856        assert_eq!(size_to_order(32), 5);
857        assert_eq!(size_to_order(64), 6);
858        assert_eq!(size_to_order(1024), 10);
859        assert_eq!(size_to_order(1025), 11); // needs 2048
860    }
861
862    #[test]
863    fn test_order_to_size() {
864        assert_eq!(order_to_size(4), 16);
865        assert_eq!(order_to_size(5), 32);
866        assert_eq!(order_to_size(10), 1024);
867        assert_eq!(order_to_size(20), 1 << 20);
868    }
869
870    #[test]
871    fn test_buddy_addr() {
872        // For order 4 (size 16):
873        // addr 0 -> buddy 16, addr 16 -> buddy 0
874        assert_eq!(buddy_addr(0, 4), 16);
875        assert_eq!(buddy_addr(16, 4), 0);
876
877        // For order 5 (size 32):
878        assert_eq!(buddy_addr(0, 5), 32);
879        assert_eq!(buddy_addr(32, 5), 0);
880        assert_eq!(buddy_addr(64, 5), 96);
881        assert_eq!(buddy_addr(96, 5), 64);
882    }
883
884    #[test]
885    fn test_memory_pool_basic() {
886        let pool = MemoryPool::new(0, 1024).unwrap();
887
888        // Allocate 16 bytes (min size)
889        let addr1 = pool.allocate(16).unwrap();
890        assert!(pool.contains(addr1));
891
892        // Allocate another 16 bytes
893        let addr2 = pool.allocate(16).unwrap();
894        assert_ne!(addr1, addr2);
895
896        // Free both
897        pool.deallocate(addr1).unwrap();
898        pool.deallocate(addr2).unwrap();
899    }
900
901    #[test]
902    fn test_memory_pool_splitting() {
903        let pool = MemoryPool::new(0, 256).unwrap();
904
905        // Pool starts with one 256-byte block
906        // Allocating 16 bytes should split multiple times
907        let addr = pool.allocate(16).unwrap();
908
909        // Check that splits occurred
910        assert!(pool.stats().splits.load(Ordering::Relaxed) > 0);
911
912        pool.deallocate(addr).unwrap();
913    }
914
915    #[test]
916    fn test_memory_pool_merging() {
917        let pool = MemoryPool::new(0, 256).unwrap();
918
919        // Allocate two adjacent 16-byte blocks
920        let addr1 = pool.allocate(16).unwrap();
921        let addr2 = pool.allocate(16).unwrap();
922
923        // Free both - should trigger merging
924        pool.deallocate(addr1).unwrap();
925        pool.deallocate(addr2).unwrap();
926
927        // Check that merges occurred
928        assert!(pool.stats().merges.load(Ordering::Relaxed) > 0);
929
930        // Should be able to allocate the full pool again
931        let addr3 = pool.allocate(256).unwrap();
932        assert_eq!(addr3, 0);
933    }
934
935    #[test]
936    fn test_memory_pool_out_of_memory() {
937        let pool = MemoryPool::new(0, 256).unwrap();
938
939        // Fill up the pool
940        let mut addrs = Vec::new();
941        for _ in 0..16 {
942            addrs.push(pool.allocate(16).unwrap());
943        }
944
945        // Should fail now
946        assert!(matches!(pool.allocate(16), Err(BuddyError::OutOfMemory)));
947
948        // Free one and try again
949        pool.deallocate(addrs.pop().unwrap()).unwrap();
950        assert!(pool.allocate(16).is_ok());
951    }
952
953    #[test]
954    fn test_buddy_allocator_basic() {
955        let alloc = BuddyAllocator::new();
956
957        let addr1 = alloc.allocate(100).unwrap();
958        let addr2 = alloc.allocate(200).unwrap();
959
960        assert_ne!(addr1, addr2);
961
962        alloc.deallocate(addr1).unwrap();
963        alloc.deallocate(addr2).unwrap();
964    }
965
966    #[test]
967    fn test_buddy_allocator_auto_pool() {
968        let alloc = BuddyAllocator::with_pool_size(1024);
969
970        assert_eq!(alloc.pool_count(), 0);
971
972        let _ = alloc.allocate(16).unwrap();
973
974        assert_eq!(alloc.pool_count(), 1);
975    }
976
977    #[test]
978    fn test_buddy_allocator_multiple_pools() {
979        let alloc = BuddyAllocator::with_pool_size(256);
980
981        // Allocate more than one pool can hold
982        let mut addrs = Vec::new();
983        for _ in 0..32 {
984            addrs.push(alloc.allocate(16).unwrap());
985        }
986
987        // Should have multiple pools now
988        assert!(alloc.pool_count() > 1);
989
990        // Free all
991        for addr in addrs {
992            alloc.deallocate(addr).unwrap();
993        }
994    }
995
996    #[test]
997    fn test_typed_allocator() {
998        #[repr(C)]
999        struct MyStruct {
1000            a: u64,
1001            b: u32,
1002            c: u16,
1003        }
1004
1005        let alloc = TypedBuddyAllocator::<MyStruct>::new();
1006
1007        let addr1 = alloc.allocate_one().unwrap();
1008        let addr2 = alloc.allocate_array(10).unwrap();
1009
1010        assert_ne!(addr1, addr2);
1011
1012        alloc.deallocate(addr1).unwrap();
1013        alloc.deallocate(addr2).unwrap();
1014    }
1015
1016    #[test]
1017    fn test_slab_allocator() {
1018        let slab = SlabAllocator::new(32);
1019
1020        // Allocate several objects
1021        let mut addrs = Vec::new();
1022        for _ in 0..10 {
1023            addrs.push(slab.allocate().unwrap());
1024        }
1025
1026        // All addresses should be unique
1027        for i in 0..addrs.len() {
1028            for j in (i + 1)..addrs.len() {
1029                assert_ne!(addrs[i], addrs[j]);
1030            }
1031        }
1032
1033        // Free all
1034        for addr in addrs {
1035            slab.deallocate(addr).unwrap();
1036        }
1037    }
1038
1039    #[test]
1040    fn test_slab_reuse() {
1041        let slab = SlabAllocator::new(64);
1042
1043        let addr1 = slab.allocate().unwrap();
1044        slab.deallocate(addr1).unwrap();
1045
1046        let addr2 = slab.allocate().unwrap();
1047
1048        // Should reuse the same address
1049        assert_eq!(addr1, addr2);
1050    }
1051
1052    #[test]
1053    fn test_buddy_arena() {
1054        let arena = BuddyArena::new(4096);
1055
1056        // Allocate several items
1057        let addr1 = arena.allocate(100, 8).unwrap();
1058        let addr2 = arena.allocate(200, 16).unwrap();
1059        let addr3 = arena.allocate(50, 4).unwrap();
1060
1061        // All unique addresses
1062        assert_ne!(addr1, addr2);
1063        assert_ne!(addr2, addr3);
1064        assert_ne!(addr1, addr3);
1065
1066        // Check alignment
1067        assert_eq!(addr2 % 16, 0);
1068
1069        // Reset
1070        arena.reset();
1071
1072        // Can allocate again
1073        let _ = arena.allocate(100, 8).unwrap();
1074    }
1075
1076    #[test]
1077    fn test_arena_large_allocation() {
1078        let arena = BuddyArena::new(256);
1079
1080        // Allocate more than block size
1081        let result = arena.allocate(1024, 8);
1082        // Should succeed
1083        assert!(result.is_ok(), "Allocation failed: {:?}", result);
1084        let addr = result.unwrap();
1085        // Arena uses bump allocation from buddy allocator pool
1086        // The buddy allocator starts at base 0x1000
1087        println!("Arena allocated address: {:#x}", addr);
1088    }
1089
1090    #[test]
1091    fn test_stats_tracking() {
1092        let pool = MemoryPool::new(0, 1024).unwrap();
1093
1094        let addr = pool.allocate(64).unwrap();
1095
1096        assert!(pool.stats().allocations.load(Ordering::Relaxed) > 0);
1097        assert!(pool.stats().allocated_bytes.load(Ordering::Relaxed) > 0);
1098
1099        pool.deallocate(addr).unwrap();
1100
1101        assert!(pool.stats().deallocations.load(Ordering::Relaxed) > 0);
1102    }
1103
1104    #[test]
1105    fn test_free_bytes_tracking() {
1106        let pool = MemoryPool::new(0, 1024).unwrap();
1107
1108        let initial_free = pool.free_bytes();
1109        assert_eq!(initial_free, 1024);
1110
1111        let addr = pool.allocate(64).unwrap();
1112
1113        let after_alloc = pool.free_bytes();
1114        assert!(after_alloc < initial_free);
1115
1116        pool.deallocate(addr).unwrap();
1117
1118        let after_free = pool.free_bytes();
1119        assert_eq!(after_free, initial_free);
1120    }
1121
1122    #[test]
1123    fn test_double_free() {
1124        let pool = MemoryPool::new(0, 1024).unwrap();
1125
1126        let addr = pool.allocate(64).unwrap();
1127        pool.deallocate(addr).unwrap();
1128
1129        // Double free should fail
1130        assert!(matches!(
1131            pool.deallocate(addr),
1132            Err(BuddyError::InvalidAddress(_))
1133        ));
1134    }
1135
1136    #[test]
1137    fn test_invalid_address() {
1138        let pool = MemoryPool::new(0, 1024).unwrap();
1139
1140        // Try to free an address that was never allocated
1141        assert!(matches!(
1142            pool.deallocate(999),
1143            Err(BuddyError::InvalidAddress(_))
1144        ));
1145    }
1146
1147    #[test]
1148    fn test_concurrent_allocations() {
1149        use std::sync::Arc;
1150        use std::thread;
1151
1152        let alloc = Arc::new(BuddyAllocator::new());
1153        let mut handles = Vec::new();
1154
1155        // Use fewer threads and allocations to reduce complexity
1156        for _ in 0..2 {
1157            let alloc = Arc::clone(&alloc);
1158            handles.push(thread::spawn(move || {
1159                let mut addrs = Vec::new();
1160                for _ in 0..10 {
1161                    if let Ok(addr) = alloc.allocate(32) {
1162                        addrs.push(addr);
1163                    }
1164                }
1165                // Add a small delay between alloc and dealloc phases
1166                std::thread::sleep(std::time::Duration::from_millis(1));
1167                for addr in addrs {
1168                    let _ = alloc.deallocate(addr);
1169                }
1170            }));
1171        }
1172
1173        for handle in handles {
1174            handle.join().unwrap();
1175        }
1176    }
1177}