Skip to main content

sochdb_core/
buddy_allocator.rs

1// SPDX-License-Identifier: AGPL-3.0-or-later
2// SochDB - LLM-Optimized Embedded Database
3// Copyright (C) 2026 Sushanth Reddy Vanagala (https://github.com/sushanthpy)
4//
5// This program is free software: you can redistribute it and/or modify
6// it under the terms of the GNU Affero General Public License as published by
7// the Free Software Foundation, either version 3 of the License, or
8// (at your option) any later version.
9//
10// This program is distributed in the hope that it will be useful,
11// but WITHOUT ANY WARRANTY; without even the implied warranty of
12// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13// GNU Affero General Public License for more details.
14//
15// You should have received a copy of the GNU Affero General Public License
16// along with this program. If not, see <https://www.gnu.org/licenses/>.
17
18//! Buddy Allocator for SochDB Memory Management
19//!
20//! # DEPRECATION NOTICE
21//!
22//! **This module is deprecated and will be removed in a future release.**
23//!
24//! ## Recommendation: Use jemalloc Instead
25//!
26//! Enable the `jemalloc` feature in your `Cargo.toml`:
27//!
28//! ```toml
29//! sochdb-core = { version = "...", features = ["jemalloc"] }
30//! ```
31//!
32//! ### Why jemalloc is preferred:
33//!
34//! 1. **Production-proven**: Used by Firefox, Facebook, Redis, RocksDB
35//! 2. **Thread-local caching**: Eliminates lock contention on hot paths
36//! 3. **Better fragmentation handling**: Size-class based allocation
37//! 4. **Automatic memory return**: Returns memory to OS via `madvise`
38//! 5. **No maintenance burden**: Well-tested, battle-hardened code
39//!
40//! ### Issues with this custom allocator:
41//!
42//! 1. **Virtual address tracking only**: Does not manage real memory
43//! 2. **Lock contention**: RwLock on every allocation
44//! 3. **No memory reclamation**: Never returns memory to OS
45//! 4. **Internal fragmentation**: Buddy allocation wastes ~50% for small allocs
46//!
47//! ## Migration Path
48//!
49//! If you are using `BuddyAllocator` or `SlabAllocator` directly:
50//!
51//! ```rust,ignore
52//! // Before (deprecated):
53//! let allocator = BuddyAllocator::new(64 * 1024 * 1024)?;
54//! let addr = allocator.allocate(1024)?;
55//!
56//! // After (recommended):
57//! // Simply enable the jemalloc feature and use standard allocation:
58//! let buffer: Vec<u8> = Vec::with_capacity(1024);
59//! ```
60//!
61//! ## Original Documentation
62//!
63//! Implements a buddy system allocator for efficient power-of-2 memory block allocation.
64//! Key features:
65//! - O(1) allocation and deallocation for available blocks
66//! - Automatic block splitting and merging
67//! - Memory coalescing to reduce fragmentation
68//! - Thread-safe with fine-grained locking
69//! - Support for multiple memory pools
70
71use parking_lot::{Mutex, RwLock};
72use std::collections::HashMap;
73use std::sync::atomic::{AtomicU64, AtomicUsize, Ordering};
74
75/// Minimum block size (16 bytes)
76pub const MIN_BLOCK_SIZE: usize = 16;
77/// Maximum block size (1 GB)
78pub const MAX_BLOCK_SIZE: usize = 1 << 30;
79/// Default pool size (64 MB)
80pub const DEFAULT_POOL_SIZE: usize = 64 * 1024 * 1024;
81
82/// Error types for buddy allocator operations
83#[derive(Debug, Clone, PartialEq, Eq)]
84pub enum BuddyError {
85    /// Requested size is too large
86    SizeTooLarge(usize),
87    /// Requested size is zero
88    ZeroSize,
89    /// No memory available
90    OutOfMemory,
91    /// Invalid address
92    InvalidAddress(usize),
93    /// Double free detected
94    DoubleFree(usize),
95    /// Block not found
96    BlockNotFound(usize),
97    /// Pool exhausted
98    PoolExhausted,
99}
100
101/// Block header stored at the beginning of each block
102#[derive(Debug, Clone, Copy)]
103#[repr(C)]
104pub struct BlockHeader {
105    /// Magic number for validation
106    magic: u32,
107    /// Order (log2 of size)
108    order: u8,
109    /// Flags (allocated, etc.)
110    flags: u8,
111    /// Padding
112    _padding: [u8; 2],
113    /// Size of the block
114    size: u32,
115}
116
117#[allow(dead_code)]
118const BLOCK_MAGIC: u32 = 0xB0DD_1E5A;
119#[allow(dead_code)]
120const FLAG_ALLOCATED: u8 = 0x01;
121#[allow(dead_code)]
122const FLAG_SPLIT: u8 = 0x02;
123
124#[allow(dead_code)]
125impl BlockHeader {
126    fn new(order: u8, size: u32) -> Self {
127        Self {
128            magic: BLOCK_MAGIC,
129            order,
130            flags: 0,
131            _padding: [0; 2],
132            size,
133        }
134    }
135
136    fn is_valid(&self) -> bool {
137        self.magic == BLOCK_MAGIC
138    }
139
140    fn is_allocated(&self) -> bool {
141        self.flags & FLAG_ALLOCATED != 0
142    }
143
144    fn set_allocated(&mut self, allocated: bool) {
145        if allocated {
146            self.flags |= FLAG_ALLOCATED;
147        } else {
148            self.flags &= !FLAG_ALLOCATED;
149        }
150    }
151
152    fn is_split(&self) -> bool {
153        self.flags & FLAG_SPLIT != 0
154    }
155
156    fn set_split(&mut self, split: bool) {
157        if split {
158            self.flags |= FLAG_SPLIT;
159        } else {
160            self.flags &= !FLAG_SPLIT;
161        }
162    }
163}
164
165/// A free block in the buddy allocator
166#[allow(dead_code)]
167#[derive(Debug, Clone, Copy)]
168struct FreeBlock {
169    /// Address of the block
170    addr: usize,
171    /// Next free block in the list (0 = end)
172    next: usize,
173}
174
175/// Statistics for the buddy allocator
176#[derive(Debug, Default)]
177pub struct BuddyStats {
178    /// Total allocations
179    pub allocations: AtomicU64,
180    /// Total deallocations
181    pub deallocations: AtomicU64,
182    /// Current allocated bytes
183    pub allocated_bytes: AtomicUsize,
184    /// Peak allocated bytes
185    pub peak_allocated_bytes: AtomicUsize,
186    /// Number of splits
187    pub splits: AtomicU64,
188    /// Number of merges
189    pub merges: AtomicU64,
190    /// Failed allocations
191    pub failed_allocations: AtomicU64,
192}
193
194impl BuddyStats {
195    pub fn new() -> Self {
196        Self::default()
197    }
198
199    pub fn record_allocation(&self, size: usize) {
200        self.allocations.fetch_add(1, Ordering::Relaxed);
201        let old = self.allocated_bytes.fetch_add(size, Ordering::Relaxed);
202        let new = old + size;
203
204        // Update peak if necessary
205        let mut current_peak = self.peak_allocated_bytes.load(Ordering::Relaxed);
206        while new > current_peak {
207            match self.peak_allocated_bytes.compare_exchange_weak(
208                current_peak,
209                new,
210                Ordering::Relaxed,
211                Ordering::Relaxed,
212            ) {
213                Ok(_) => break,
214                Err(p) => current_peak = p,
215            }
216        }
217    }
218
219    pub fn record_deallocation(&self, size: usize) {
220        self.deallocations.fetch_add(1, Ordering::Relaxed);
221        self.allocated_bytes.fetch_sub(size, Ordering::Relaxed);
222    }
223
224    pub fn record_split(&self) {
225        self.splits.fetch_add(1, Ordering::Relaxed);
226    }
227
228    pub fn record_merge(&self) {
229        self.merges.fetch_add(1, Ordering::Relaxed);
230    }
231
232    pub fn record_failed_allocation(&self) {
233        self.failed_allocations.fetch_add(1, Ordering::Relaxed);
234    }
235}
236
237/// Calculate the order (log2) for a given size
238#[inline]
239pub fn size_to_order(size: usize) -> u8 {
240    if size <= MIN_BLOCK_SIZE {
241        return 4; // 2^4 = 16
242    }
243
244    // Round up to next power of 2
245    let bits = (size - 1).leading_zeros();
246    (usize::BITS - bits) as u8
247}
248
249/// Calculate the size for a given order
250#[inline]
251pub fn order_to_size(order: u8) -> usize {
252    1 << order
253}
254
255/// Calculate buddy address for a given address and order
256#[inline]
257pub fn buddy_addr(addr: usize, order: u8) -> usize {
258    addr ^ (1 << order)
259}
260
261/// A single memory pool managed by the buddy allocator
262pub struct MemoryPool {
263    /// Base address of the pool
264    base: usize,
265    /// Total size of the pool
266    size: usize,
267    /// Maximum order (log2 of pool size)
268    max_order: u8,
269    /// Minimum order (log2 of minimum block size)
270    min_order: u8,
271    /// Free lists for each order (index = order - min_order)
272    free_lists: Vec<Mutex<Vec<usize>>>,
273    /// Map of allocated blocks: address -> order
274    allocated: RwLock<HashMap<usize, u8>>,
275    /// Statistics
276    stats: BuddyStats,
277}
278
279impl MemoryPool {
280    /// Create a new memory pool with the given base address and size
281    pub fn new(base: usize, size: usize) -> Result<Self, BuddyError> {
282        if size == 0 {
283            return Err(BuddyError::ZeroSize);
284        }
285
286        // Ensure size is power of 2
287        if !size.is_power_of_two() {
288            return Err(BuddyError::SizeTooLarge(size));
289        }
290
291        let max_order = size_to_order(size);
292        let min_order = size_to_order(MIN_BLOCK_SIZE);
293
294        let num_orders = (max_order - min_order + 1) as usize;
295        let mut free_lists = Vec::with_capacity(num_orders);
296
297        for _ in 0..num_orders {
298            free_lists.push(Mutex::new(Vec::new()));
299        }
300
301        // Initialize with one block of maximum size
302        free_lists[num_orders - 1].lock().push(base);
303
304        Ok(Self {
305            base,
306            size,
307            max_order,
308            min_order,
309            free_lists,
310            allocated: RwLock::new(HashMap::new()),
311            stats: BuddyStats::new(),
312        })
313    }
314
315    /// Get the index into free_lists for a given order
316    fn list_index(&self, order: u8) -> usize {
317        (order - self.min_order) as usize
318    }
319
320    /// Allocate a block of the given size
321    pub fn allocate(&self, size: usize) -> Result<usize, BuddyError> {
322        if size == 0 {
323            return Err(BuddyError::ZeroSize);
324        }
325
326        let required_order = size_to_order(size);
327
328        if required_order > self.max_order {
329            self.stats.record_failed_allocation();
330            return Err(BuddyError::SizeTooLarge(size));
331        }
332
333        // Find the smallest available block that fits
334        let mut found_order = None;
335        for order in required_order..=self.max_order {
336            let idx = self.list_index(order);
337            let list = self.free_lists[idx].lock();
338            if !list.is_empty() {
339                found_order = Some(order);
340                break;
341            }
342        }
343
344        let available_order = match found_order {
345            Some(o) => o,
346            None => {
347                self.stats.record_failed_allocation();
348                return Err(BuddyError::OutOfMemory);
349            }
350        };
351
352        // Pop a block from the found level
353        let addr = {
354            let idx = self.list_index(available_order);
355            self.free_lists[idx]
356                .lock()
357                .pop()
358                .ok_or(BuddyError::OutOfMemory)?
359        };
360
361        // Split down to the required size
362        let final_addr = self.split_block(addr, available_order, required_order);
363
364        // Mark as allocated
365        self.allocated.write().insert(final_addr, required_order);
366        self.stats.record_allocation(order_to_size(required_order));
367
368        Ok(final_addr)
369    }
370
371    /// Split a block from current_order down to target_order
372    fn split_block(&self, addr: usize, current_order: u8, target_order: u8) -> usize {
373        let mut current_addr = addr;
374        let mut order = current_order;
375
376        while order > target_order {
377            order -= 1;
378            self.stats.record_split();
379
380            // Calculate buddy address
381            let buddy = buddy_addr(current_addr, order);
382
383            // Add buddy to free list
384            let idx = self.list_index(order);
385            self.free_lists[idx].lock().push(buddy);
386
387            // Keep the lower address
388            if buddy < current_addr {
389                current_addr = buddy;
390            }
391        }
392
393        current_addr
394    }
395
396    /// Free a previously allocated block
397    pub fn deallocate(&self, addr: usize) -> Result<(), BuddyError> {
398        // Get the order of this block
399        let order = {
400            let mut allocated = self.allocated.write();
401            allocated
402                .remove(&addr)
403                .ok_or(BuddyError::InvalidAddress(addr))?
404        };
405
406        self.stats.record_deallocation(order_to_size(order));
407
408        // Try to merge with buddy
409        self.merge_block(addr, order);
410
411        Ok(())
412    }
413
414    /// Merge a freed block with its buddy if possible
415    fn merge_block(&self, addr: usize, order: u8) {
416        let mut current_addr = addr;
417        let mut current_order = order;
418
419        while current_order < self.max_order {
420            let buddy = buddy_addr(current_addr, current_order);
421
422            // Check if buddy is in the free list
423            let idx = self.list_index(current_order);
424            let mut list = self.free_lists[idx].lock();
425
426            if let Some(pos) = list.iter().position(|&a| a == buddy) {
427                // Remove buddy from free list
428                list.swap_remove(pos);
429                drop(list);
430
431                self.stats.record_merge();
432
433                // Use lower address as the new block
434                current_addr = current_addr.min(buddy);
435                current_order += 1;
436            } else {
437                // Buddy not free, add current block to free list
438                list.push(current_addr);
439                return;
440            }
441        }
442
443        // Reached max order, add to top-level free list
444        let idx = self.list_index(current_order);
445        self.free_lists[idx].lock().push(current_addr);
446    }
447
448    /// Check if an address is within this pool
449    pub fn contains(&self, addr: usize) -> bool {
450        addr >= self.base && addr < self.base + self.size
451    }
452
453    /// Get pool statistics
454    pub fn stats(&self) -> &BuddyStats {
455        &self.stats
456    }
457
458    /// Get the number of free blocks at each order
459    pub fn free_block_counts(&self) -> Vec<(u8, usize)> {
460        let mut counts = Vec::new();
461        for order in self.min_order..=self.max_order {
462            let idx = self.list_index(order);
463            let count = self.free_lists[idx].lock().len();
464            counts.push((order, count));
465        }
466        counts
467    }
468
469    /// Get total free bytes
470    pub fn free_bytes(&self) -> usize {
471        let mut total = 0;
472        for order in self.min_order..=self.max_order {
473            let idx = self.list_index(order);
474            let count = self.free_lists[idx].lock().len();
475            total += count * order_to_size(order);
476        }
477        total
478    }
479}
480
481/// Multi-pool buddy allocator
482pub struct BuddyAllocator {
483    /// Memory pools
484    pools: RwLock<Vec<MemoryPool>>,
485    /// Default pool size for new pools
486    default_pool_size: usize,
487    /// Next pool base address (for virtual addressing)
488    next_base: AtomicUsize,
489    /// Global statistics
490    stats: BuddyStats,
491}
492
493impl BuddyAllocator {
494    /// Create a new buddy allocator with default settings
495    pub fn new() -> Self {
496        Self::with_pool_size(DEFAULT_POOL_SIZE)
497    }
498
499    /// Create a new buddy allocator with the specified default pool size
500    pub fn with_pool_size(pool_size: usize) -> Self {
501        // Round up to power of 2
502        let pool_size = pool_size.next_power_of_two();
503
504        Self {
505            pools: RwLock::new(Vec::new()),
506            default_pool_size: pool_size,
507            next_base: AtomicUsize::new(0x1000), // Start after null page
508            stats: BuddyStats::new(),
509        }
510    }
511
512    /// Add a new memory pool
513    pub fn add_pool(&self, size: usize) -> Result<usize, BuddyError> {
514        let size = size.next_power_of_two();
515        let base = self.next_base.fetch_add(size, Ordering::SeqCst);
516
517        let pool = MemoryPool::new(base, size)?;
518        let pool_id = {
519            let mut pools = self.pools.write();
520            let id = pools.len();
521            pools.push(pool);
522            id
523        };
524
525        Ok(pool_id)
526    }
527
528    /// Ensure at least one pool exists
529    fn ensure_pool(&self) -> Result<(), BuddyError> {
530        let pools = self.pools.read();
531        if pools.is_empty() {
532            drop(pools);
533            self.add_pool(self.default_pool_size)?;
534        }
535        Ok(())
536    }
537
538    /// Allocate memory
539    pub fn allocate(&self, size: usize) -> Result<usize, BuddyError> {
540        if size == 0 {
541            return Err(BuddyError::ZeroSize);
542        }
543
544        self.ensure_pool()?;
545
546        // Try each pool
547        let pools = self.pools.read();
548        for pool in pools.iter() {
549            if let Ok(addr) = pool.allocate(size) {
550                self.stats
551                    .record_allocation(order_to_size(size_to_order(size)));
552                return Ok(addr);
553            }
554        }
555
556        // No pool had space, try to add a new one
557        drop(pools);
558
559        let required_size = size.next_power_of_two().max(self.default_pool_size);
560        self.add_pool(required_size)?;
561
562        let pools = self.pools.read();
563        if let Some(pool) = pools.last()
564            && let Ok(addr) = pool.allocate(size)
565        {
566            self.stats
567                .record_allocation(order_to_size(size_to_order(size)));
568            return Ok(addr);
569        }
570
571        self.stats.record_failed_allocation();
572        Err(BuddyError::OutOfMemory)
573    }
574
575    /// Deallocate memory
576    pub fn deallocate(&self, addr: usize) -> Result<(), BuddyError> {
577        let pools = self.pools.read();
578
579        for pool in pools.iter() {
580            // Check if this pool has this address allocated
581            let has_addr = pool.allocated.read().contains_key(&addr);
582            if has_addr {
583                let order = {
584                    let allocated = pool.allocated.read();
585                    allocated.get(&addr).copied()
586                };
587
588                if let Some(order) = order {
589                    self.stats.record_deallocation(order_to_size(order));
590                }
591
592                return pool.deallocate(addr);
593            }
594        }
595
596        Err(BuddyError::InvalidAddress(addr))
597    }
598
599    /// Get global statistics
600    pub fn stats(&self) -> &BuddyStats {
601        &self.stats
602    }
603
604    /// Get total free bytes across all pools
605    pub fn total_free_bytes(&self) -> usize {
606        let pools = self.pools.read();
607        pools.iter().map(|p| p.free_bytes()).sum()
608    }
609
610    /// Get number of pools
611    pub fn pool_count(&self) -> usize {
612        self.pools.read().len()
613    }
614}
615
616impl Default for BuddyAllocator {
617    fn default() -> Self {
618        Self::new()
619    }
620}
621
622/// A typed buddy allocator for allocating objects of a specific type
623pub struct TypedBuddyAllocator<T> {
624    allocator: BuddyAllocator,
625    _marker: std::marker::PhantomData<T>,
626}
627
628impl<T> TypedBuddyAllocator<T> {
629    /// Create a new typed allocator
630    pub fn new() -> Self {
631        Self {
632            allocator: BuddyAllocator::new(),
633            _marker: std::marker::PhantomData,
634        }
635    }
636
637    /// Allocate space for one T
638    pub fn allocate_one(&self) -> Result<usize, BuddyError> {
639        self.allocator.allocate(std::mem::size_of::<T>())
640    }
641
642    /// Allocate space for N elements
643    pub fn allocate_array(&self, count: usize) -> Result<usize, BuddyError> {
644        self.allocator.allocate(std::mem::size_of::<T>() * count)
645    }
646
647    /// Deallocate
648    pub fn deallocate(&self, addr: usize) -> Result<(), BuddyError> {
649        self.allocator.deallocate(addr)
650    }
651}
652
653impl<T> Default for TypedBuddyAllocator<T> {
654    fn default() -> Self {
655        Self::new()
656    }
657}
658
659/// Slab allocator built on top of buddy allocator for fixed-size objects
660pub struct SlabAllocator {
661    /// Object size
662    object_size: usize,
663    /// Objects per slab
664    objects_per_slab: usize,
665    /// Underlying buddy allocator
666    buddy: BuddyAllocator,
667    /// Free object lists per slab: slab_base -> list of free offsets
668    slabs: RwLock<HashMap<usize, Vec<usize>>>,
669    /// Allocated objects: addr -> slab_base
670    allocated: RwLock<HashMap<usize, usize>>,
671    /// Statistics
672    stats: BuddyStats,
673}
674
675impl SlabAllocator {
676    /// Create a new slab allocator for objects of the given size
677    pub fn new(object_size: usize) -> Self {
678        // Round up object size to minimum alignment
679        let object_size = object_size.max(8).next_power_of_two();
680
681        // Calculate objects per slab (aim for ~4KB slabs)
682        let slab_size = 4096usize;
683        let objects_per_slab = slab_size / object_size;
684
685        Self {
686            object_size,
687            objects_per_slab: objects_per_slab.max(1),
688            buddy: BuddyAllocator::new(),
689            slabs: RwLock::new(HashMap::new()),
690            allocated: RwLock::new(HashMap::new()),
691            stats: BuddyStats::new(),
692        }
693    }
694
695    /// Allocate one object
696    pub fn allocate(&self) -> Result<usize, BuddyError> {
697        // Try to find a slab with free objects
698        {
699            let mut slabs = self.slabs.write();
700            for (base, free_list) in slabs.iter_mut() {
701                if let Some(offset) = free_list.pop() {
702                    let addr = base + offset;
703                    self.allocated.write().insert(addr, *base);
704                    self.stats.record_allocation(self.object_size);
705                    return Ok(addr);
706                }
707            }
708        }
709
710        // No free objects, allocate a new slab
711        let slab_size = self.object_size * self.objects_per_slab;
712        let slab_base = self.buddy.allocate(slab_size)?;
713
714        // Initialize free list for new slab (skip first object, return it)
715        let mut free_list = Vec::with_capacity(self.objects_per_slab - 1);
716        for i in 1..self.objects_per_slab {
717            free_list.push(i * self.object_size);
718        }
719
720        self.slabs.write().insert(slab_base, free_list);
721        self.allocated.write().insert(slab_base, slab_base);
722        self.stats.record_allocation(self.object_size);
723
724        Ok(slab_base)
725    }
726
727    /// Deallocate an object
728    pub fn deallocate(&self, addr: usize) -> Result<(), BuddyError> {
729        let slab_base = self
730            .allocated
731            .write()
732            .remove(&addr)
733            .ok_or(BuddyError::InvalidAddress(addr))?;
734
735        let offset = addr - slab_base;
736        self.slabs
737            .write()
738            .get_mut(&slab_base)
739            .ok_or(BuddyError::BlockNotFound(slab_base))?
740            .push(offset);
741
742        self.stats.record_deallocation(self.object_size);
743
744        Ok(())
745    }
746
747    /// Get statistics
748    pub fn stats(&self) -> &BuddyStats {
749        &self.stats
750    }
751
752    /// Get object size
753    pub fn object_size(&self) -> usize {
754        self.object_size
755    }
756}
757
758/// Arena allocator using buddy allocator for backing storage
759pub struct BuddyArena {
760    /// Underlying buddy allocator
761    buddy: BuddyAllocator,
762    /// Current arena block
763    current_block: Mutex<Option<ArenaBlock>>,
764    /// Block size
765    block_size: usize,
766    /// All allocated block addresses for cleanup
767    blocks: RwLock<Vec<usize>>,
768}
769
770struct ArenaBlock {
771    base: usize,
772    offset: usize,
773    size: usize,
774}
775
776impl BuddyArena {
777    /// Create a new arena with the specified block size
778    pub fn new(block_size: usize) -> Self {
779        let block_size = block_size.next_power_of_two();
780
781        Self {
782            buddy: BuddyAllocator::new(),
783            current_block: Mutex::new(None),
784            block_size,
785            blocks: RwLock::new(Vec::new()),
786        }
787    }
788
789    /// Allocate from the arena (bump allocation)
790    pub fn allocate(&self, size: usize, align: usize) -> Result<usize, BuddyError> {
791        if size == 0 {
792            return Err(BuddyError::ZeroSize);
793        }
794
795        let mut current = self.current_block.lock();
796
797        // Try to allocate from current block
798        if let Some(ref mut block) = *current {
799            let aligned_offset = (block.offset + align - 1) & !(align - 1);
800            if aligned_offset + size <= block.size {
801                block.offset = aligned_offset + size;
802                return Ok(block.base + aligned_offset);
803            }
804        }
805
806        // Need a new block
807        let new_size = size.max(self.block_size).next_power_of_two();
808        let base = self.buddy.allocate(new_size)?;
809
810        self.blocks.write().push(base);
811
812        let aligned_offset = (align - 1) & !(align - 1);
813        *current = Some(ArenaBlock {
814            base,
815            offset: aligned_offset + size,
816            size: new_size,
817        });
818
819        Ok(base + aligned_offset)
820    }
821
822    /// Allocate a value and return its address
823    pub fn allocate_val<T>(&self, _val: T) -> Result<usize, BuddyError> {
824        self.allocate(std::mem::size_of::<T>(), std::mem::align_of::<T>())
825    }
826
827    /// Reset the arena (free all blocks)
828    pub fn reset(&self) {
829        let mut current = self.current_block.lock();
830        *current = None;
831
832        let blocks = std::mem::take(&mut *self.blocks.write());
833        for block in blocks {
834            let _ = self.buddy.deallocate(block);
835        }
836    }
837
838    /// Get total allocated size
839    pub fn allocated_size(&self) -> usize {
840        self.blocks.read().len() * self.block_size
841    }
842}
843
844impl Drop for BuddyArena {
845    fn drop(&mut self) {
846        self.reset();
847    }
848}
849
850#[cfg(test)]
851mod tests {
852    use super::*;
853
854    #[test]
855    fn test_size_to_order() {
856        assert_eq!(size_to_order(1), 4); // min = 16
857        assert_eq!(size_to_order(16), 4); // 2^4 = 16
858        assert_eq!(size_to_order(17), 5); // needs 2^5 = 32
859        assert_eq!(size_to_order(32), 5);
860        assert_eq!(size_to_order(64), 6);
861        assert_eq!(size_to_order(1024), 10);
862        assert_eq!(size_to_order(1025), 11); // needs 2048
863    }
864
865    #[test]
866    fn test_order_to_size() {
867        assert_eq!(order_to_size(4), 16);
868        assert_eq!(order_to_size(5), 32);
869        assert_eq!(order_to_size(10), 1024);
870        assert_eq!(order_to_size(20), 1 << 20);
871    }
872
873    #[test]
874    fn test_buddy_addr() {
875        // For order 4 (size 16):
876        // addr 0 -> buddy 16, addr 16 -> buddy 0
877        assert_eq!(buddy_addr(0, 4), 16);
878        assert_eq!(buddy_addr(16, 4), 0);
879
880        // For order 5 (size 32):
881        assert_eq!(buddy_addr(0, 5), 32);
882        assert_eq!(buddy_addr(32, 5), 0);
883        assert_eq!(buddy_addr(64, 5), 96);
884        assert_eq!(buddy_addr(96, 5), 64);
885    }
886
887    #[test]
888    fn test_memory_pool_basic() {
889        let pool = MemoryPool::new(0, 1024).unwrap();
890
891        // Allocate 16 bytes (min size)
892        let addr1 = pool.allocate(16).unwrap();
893        assert!(pool.contains(addr1));
894
895        // Allocate another 16 bytes
896        let addr2 = pool.allocate(16).unwrap();
897        assert_ne!(addr1, addr2);
898
899        // Free both
900        pool.deallocate(addr1).unwrap();
901        pool.deallocate(addr2).unwrap();
902    }
903
904    #[test]
905    fn test_memory_pool_splitting() {
906        let pool = MemoryPool::new(0, 256).unwrap();
907
908        // Pool starts with one 256-byte block
909        // Allocating 16 bytes should split multiple times
910        let addr = pool.allocate(16).unwrap();
911
912        // Check that splits occurred
913        assert!(pool.stats().splits.load(Ordering::Relaxed) > 0);
914
915        pool.deallocate(addr).unwrap();
916    }
917
918    #[test]
919    fn test_memory_pool_merging() {
920        let pool = MemoryPool::new(0, 256).unwrap();
921
922        // Allocate two adjacent 16-byte blocks
923        let addr1 = pool.allocate(16).unwrap();
924        let addr2 = pool.allocate(16).unwrap();
925
926        // Free both - should trigger merging
927        pool.deallocate(addr1).unwrap();
928        pool.deallocate(addr2).unwrap();
929
930        // Check that merges occurred
931        assert!(pool.stats().merges.load(Ordering::Relaxed) > 0);
932
933        // Should be able to allocate the full pool again
934        let addr3 = pool.allocate(256).unwrap();
935        assert_eq!(addr3, 0);
936    }
937
938    #[test]
939    fn test_memory_pool_out_of_memory() {
940        let pool = MemoryPool::new(0, 256).unwrap();
941
942        // Fill up the pool
943        let mut addrs = Vec::new();
944        for _ in 0..16 {
945            addrs.push(pool.allocate(16).unwrap());
946        }
947
948        // Should fail now
949        assert!(matches!(pool.allocate(16), Err(BuddyError::OutOfMemory)));
950
951        // Free one and try again
952        pool.deallocate(addrs.pop().unwrap()).unwrap();
953        assert!(pool.allocate(16).is_ok());
954    }
955
956    #[test]
957    fn test_buddy_allocator_basic() {
958        let alloc = BuddyAllocator::new();
959
960        let addr1 = alloc.allocate(100).unwrap();
961        let addr2 = alloc.allocate(200).unwrap();
962
963        assert_ne!(addr1, addr2);
964
965        alloc.deallocate(addr1).unwrap();
966        alloc.deallocate(addr2).unwrap();
967    }
968
969    #[test]
970    fn test_buddy_allocator_auto_pool() {
971        let alloc = BuddyAllocator::with_pool_size(1024);
972
973        assert_eq!(alloc.pool_count(), 0);
974
975        let _ = alloc.allocate(16).unwrap();
976
977        assert_eq!(alloc.pool_count(), 1);
978    }
979
980    #[test]
981    fn test_buddy_allocator_multiple_pools() {
982        let alloc = BuddyAllocator::with_pool_size(256);
983
984        // Allocate more than one pool can hold
985        let mut addrs = Vec::new();
986        for _ in 0..32 {
987            addrs.push(alloc.allocate(16).unwrap());
988        }
989
990        // Should have multiple pools now
991        assert!(alloc.pool_count() > 1);
992
993        // Free all
994        for addr in addrs {
995            alloc.deallocate(addr).unwrap();
996        }
997    }
998
999    #[test]
1000    fn test_typed_allocator() {
1001        #[repr(C)]
1002        struct MyStruct {
1003            a: u64,
1004            b: u32,
1005            c: u16,
1006        }
1007
1008        let alloc = TypedBuddyAllocator::<MyStruct>::new();
1009
1010        let addr1 = alloc.allocate_one().unwrap();
1011        let addr2 = alloc.allocate_array(10).unwrap();
1012
1013        assert_ne!(addr1, addr2);
1014
1015        alloc.deallocate(addr1).unwrap();
1016        alloc.deallocate(addr2).unwrap();
1017    }
1018
1019    #[test]
1020    fn test_slab_allocator() {
1021        let slab = SlabAllocator::new(32);
1022
1023        // Allocate several objects
1024        let mut addrs = Vec::new();
1025        for _ in 0..10 {
1026            addrs.push(slab.allocate().unwrap());
1027        }
1028
1029        // All addresses should be unique
1030        for i in 0..addrs.len() {
1031            for j in (i + 1)..addrs.len() {
1032                assert_ne!(addrs[i], addrs[j]);
1033            }
1034        }
1035
1036        // Free all
1037        for addr in addrs {
1038            slab.deallocate(addr).unwrap();
1039        }
1040    }
1041
1042    #[test]
1043    fn test_slab_reuse() {
1044        let slab = SlabAllocator::new(64);
1045
1046        let addr1 = slab.allocate().unwrap();
1047        slab.deallocate(addr1).unwrap();
1048
1049        let addr2 = slab.allocate().unwrap();
1050
1051        // Should reuse the same address
1052        assert_eq!(addr1, addr2);
1053    }
1054
1055    #[test]
1056    fn test_buddy_arena() {
1057        let arena = BuddyArena::new(4096);
1058
1059        // Allocate several items
1060        let addr1 = arena.allocate(100, 8).unwrap();
1061        let addr2 = arena.allocate(200, 16).unwrap();
1062        let addr3 = arena.allocate(50, 4).unwrap();
1063
1064        // All unique addresses
1065        assert_ne!(addr1, addr2);
1066        assert_ne!(addr2, addr3);
1067        assert_ne!(addr1, addr3);
1068
1069        // Check alignment
1070        assert_eq!(addr2 % 16, 0);
1071
1072        // Reset
1073        arena.reset();
1074
1075        // Can allocate again
1076        let _ = arena.allocate(100, 8).unwrap();
1077    }
1078
1079    #[test]
1080    fn test_arena_large_allocation() {
1081        let arena = BuddyArena::new(256);
1082
1083        // Allocate more than block size
1084        let result = arena.allocate(1024, 8);
1085        // Should succeed
1086        assert!(result.is_ok(), "Allocation failed: {:?}", result);
1087        let addr = result.unwrap();
1088        // Arena uses bump allocation from buddy allocator pool
1089        // The buddy allocator starts at base 0x1000
1090        println!("Arena allocated address: {:#x}", addr);
1091    }
1092
1093    #[test]
1094    fn test_stats_tracking() {
1095        let pool = MemoryPool::new(0, 1024).unwrap();
1096
1097        let addr = pool.allocate(64).unwrap();
1098
1099        assert!(pool.stats().allocations.load(Ordering::Relaxed) > 0);
1100        assert!(pool.stats().allocated_bytes.load(Ordering::Relaxed) > 0);
1101
1102        pool.deallocate(addr).unwrap();
1103
1104        assert!(pool.stats().deallocations.load(Ordering::Relaxed) > 0);
1105    }
1106
1107    #[test]
1108    fn test_free_bytes_tracking() {
1109        let pool = MemoryPool::new(0, 1024).unwrap();
1110
1111        let initial_free = pool.free_bytes();
1112        assert_eq!(initial_free, 1024);
1113
1114        let addr = pool.allocate(64).unwrap();
1115
1116        let after_alloc = pool.free_bytes();
1117        assert!(after_alloc < initial_free);
1118
1119        pool.deallocate(addr).unwrap();
1120
1121        let after_free = pool.free_bytes();
1122        assert_eq!(after_free, initial_free);
1123    }
1124
1125    #[test]
1126    fn test_double_free() {
1127        let pool = MemoryPool::new(0, 1024).unwrap();
1128
1129        let addr = pool.allocate(64).unwrap();
1130        pool.deallocate(addr).unwrap();
1131
1132        // Double free should fail
1133        assert!(matches!(
1134            pool.deallocate(addr),
1135            Err(BuddyError::InvalidAddress(_))
1136        ));
1137    }
1138
1139    #[test]
1140    fn test_invalid_address() {
1141        let pool = MemoryPool::new(0, 1024).unwrap();
1142
1143        // Try to free an address that was never allocated
1144        assert!(matches!(
1145            pool.deallocate(999),
1146            Err(BuddyError::InvalidAddress(_))
1147        ));
1148    }
1149
1150    #[test]
1151    fn test_concurrent_allocations() {
1152        use std::sync::Arc;
1153        use std::thread;
1154
1155        let alloc = Arc::new(BuddyAllocator::new());
1156        let mut handles = Vec::new();
1157
1158        // Use fewer threads and allocations to reduce complexity
1159        for _ in 0..2 {
1160            let alloc = Arc::clone(&alloc);
1161            handles.push(thread::spawn(move || {
1162                let mut addrs = Vec::new();
1163                for _ in 0..10 {
1164                    if let Ok(addr) = alloc.allocate(32) {
1165                        addrs.push(addr);
1166                    }
1167                }
1168                // Add a small delay between alloc and dealloc phases
1169                std::thread::sleep(std::time::Duration::from_millis(1));
1170                for addr in addrs {
1171                    let _ = alloc.deallocate(addr);
1172                }
1173            }));
1174        }
1175
1176        for handle in handles {
1177            handle.join().unwrap();
1178        }
1179    }
1180}