Skip to main content

buddy_slab_allocator/
global_allocator.rs

1//! Global allocator implementation.
2//!
3//! This module implements a global allocator that coordinates between
4//! buddy page allocator and slab byte allocator for optimal performance.
5
6extern crate alloc;
7
8use crate::{AllocError, AllocResult, BaseAllocator, ByteAllocator, PageAllocator};
9use core::alloc::Layout;
10use core::ptr::NonNull;
11#[cfg(feature = "tracking")]
12use core::sync::atomic::AtomicUsize;
13use core::sync::atomic::{AtomicBool, Ordering};
14
15#[cfg(feature = "tracking")]
16use super::buddy::BuddyStats;
17use super::page_allocator::CompositePageAllocator;
18use super::slab::{PageAllocatorForSlab, SlabByteAllocator};
19
20#[cfg(feature = "log")]
21use log::error;
22
23const MIN_HEAP_SIZE: usize = 0x8000; // 32KB minimum heap
24
25/// Memory usage statistics
26#[cfg(feature = "tracking")]
27#[derive(Debug, Clone, Copy, Default)]
28pub struct UsageStats {
29    pub total_pages: usize,
30    pub used_pages: usize,
31    pub free_pages: usize,
32    pub slab_bytes: usize,
33    pub heap_bytes: usize,
34}
35
36/// Internal atomic representation of usage statistics
37#[cfg(feature = "tracking")]
38struct UsageStatsAtomic {
39    total_pages: AtomicUsize,
40    used_pages: AtomicUsize,
41    free_pages: AtomicUsize,
42    slab_bytes: AtomicUsize,
43    heap_bytes: AtomicUsize,
44}
45
46#[cfg(feature = "tracking")]
47impl UsageStatsAtomic {
48    const fn new() -> Self {
49        Self {
50            total_pages: AtomicUsize::new(0),
51            used_pages: AtomicUsize::new(0),
52            free_pages: AtomicUsize::new(0),
53            slab_bytes: AtomicUsize::new(0),
54            heap_bytes: AtomicUsize::new(0),
55        }
56    }
57
58    fn snapshot(&self) -> UsageStats {
59        UsageStats {
60            total_pages: self.total_pages.load(Ordering::Relaxed),
61            used_pages: self.used_pages.load(Ordering::Relaxed),
62            free_pages: self.free_pages.load(Ordering::Relaxed),
63            slab_bytes: self.slab_bytes.load(Ordering::Relaxed),
64            heap_bytes: self.heap_bytes.load(Ordering::Relaxed),
65        }
66    }
67}
68
69#[cfg(feature = "tracking")]
70#[inline]
71fn saturating_sub_atomic(counter: &AtomicUsize, value: usize) {
72    let mut prev = counter.load(Ordering::Relaxed);
73    loop {
74        let new = prev.saturating_sub(value);
75        match counter.compare_exchange(prev, new, Ordering::AcqRel, Ordering::Relaxed) {
76            Ok(_) => break,
77            Err(actual) => prev = actual,
78        }
79    }
80}
81
82/// Global allocator that coordinates composite and slab allocators
83pub struct GlobalAllocator<const PAGE_SIZE: usize = { crate::DEFAULT_PAGE_SIZE }> {
84    page_allocator: CompositePageAllocator<PAGE_SIZE>,
85    slab_allocator: SlabByteAllocator<PAGE_SIZE>,
86    #[cfg(feature = "tracking")]
87    stats: UsageStatsAtomic,
88    initialized: AtomicBool,
89}
90
91impl<const PAGE_SIZE: usize> GlobalAllocator<PAGE_SIZE> {
92    pub const fn new() -> Self {
93        Self {
94            page_allocator: CompositePageAllocator::<PAGE_SIZE>::new(),
95            slab_allocator: SlabByteAllocator::<PAGE_SIZE>::new(),
96            #[cfg(feature = "tracking")]
97            stats: UsageStatsAtomic::new(),
98            initialized: AtomicBool::new(false),
99        }
100    }
101
102    /// Set the address translator so that the underlying page allocator can
103    /// reason about physical address ranges (e.g. low-memory regions below 4GiB).
104    pub fn set_addr_translator(&mut self, translator: &'static dyn crate::AddrTranslator) {
105        self.page_allocator.set_addr_translator(translator);
106    }
107
108    /// Allocate low-memory pages (physical address < 4GiB).
109    /// This is a thin wrapper over the composite allocator's lowmem API.
110    pub fn alloc_dma32_pages(&mut self, num_pages: usize, alignment: usize) -> AllocResult<usize> {
111        if !self.initialized.load(Ordering::SeqCst) {
112            error!("global allocator: Allocator not initialized");
113            return Err(AllocError::NoMemory);
114        }
115
116        let addr = self
117            .page_allocator
118            .alloc_pages_lowmem(num_pages, alignment)?;
119
120        // Update statistics
121        #[cfg(feature = "tracking")]
122        {
123            self.stats
124                .used_pages
125                .fetch_add(num_pages, Ordering::Relaxed);
126            self.stats
127                .free_pages
128                .fetch_sub(num_pages, Ordering::Relaxed);
129        }
130
131        Ok(addr)
132    }
133
134    /// Initialize allocator with given memory region
135    ///
136    /// # Examples
137    ///
138    /// ```no_run
139    /// use buddy_slab_allocator::GlobalAllocator;
140    ///
141    /// const PAGE_SIZE: usize = 0x1000;
142    /// let mut allocator = GlobalAllocator::<PAGE_SIZE>::new();
143    /// allocator.init(0x8000_0000, 16 * 1024 * 1024).unwrap();
144    /// ```
145    pub fn init(&mut self, start_vaddr: usize, size: usize) -> AllocResult<()> {
146        if size <= MIN_HEAP_SIZE {
147            return Err(AllocError::InvalidParam);
148        }
149
150        self.page_allocator.init(start_vaddr, size);
151
152        self.slab_allocator.init();
153
154        {
155            let page_alloc_ptr = &mut self.page_allocator as *mut CompositePageAllocator<PAGE_SIZE>;
156            self.slab_allocator
157                .set_page_allocator(page_alloc_ptr as *mut dyn PageAllocatorForSlab);
158        }
159
160        // Update statistics
161        #[cfg(feature = "tracking")]
162        {
163            self.stats
164                .total_pages
165                .store(self.page_allocator.total_pages(), Ordering::Relaxed);
166            self.stats
167                .used_pages
168                .store(self.page_allocator.used_pages(), Ordering::Relaxed);
169            self.stats
170                .free_pages
171                .store(self.page_allocator.available_pages(), Ordering::Relaxed);
172        }
173
174        self.initialized.store(true, Ordering::SeqCst);
175        Ok(())
176    }
177
178    /// Dynamically add memory region to allocator
179    pub fn add_memory(&mut self, start_vaddr: usize, size: usize) -> AllocResult<()> {
180        self.page_allocator.add_memory(start_vaddr, size)?;
181
182        // Update statistics
183        #[cfg(feature = "tracking")]
184        {
185            self.stats
186                .total_pages
187                .store(self.page_allocator.total_pages(), Ordering::Relaxed);
188            self.stats
189                .free_pages
190                .store(self.page_allocator.available_pages(), Ordering::Relaxed);
191        }
192
193        Ok(())
194    }
195
196    /// Smart allocation based on size
197    ///
198    /// Small allocations (≤2048 bytes) use slab allocator,
199    /// larger allocations use page allocator.
200    ///
201    /// # Examples
202    ///
203    /// ```no_run
204    /// use buddy_slab_allocator::GlobalAllocator;
205    /// use core::alloc::Layout;
206    ///
207    /// const PAGE_SIZE: usize = 0x1000;
208    /// let mut allocator = GlobalAllocator::<PAGE_SIZE>::new();
209    /// allocator.init(0x8000_0000, 16 * 1024 * 1024).unwrap();
210    ///
211    /// let layout = Layout::from_size_align(64, 8).unwrap();
212    /// let ptr = allocator.alloc(layout).unwrap();
213    /// allocator.dealloc(ptr, layout);
214    /// ```
215    pub fn alloc(&mut self, layout: Layout) -> AllocResult<NonNull<u8>> {
216        if !self.initialized.load(Ordering::SeqCst) {
217            error!("global allocator: Allocator not initialized");
218            return Err(AllocError::NoMemory);
219        }
220
221        if layout.size() <= 2048 && layout.align() <= 2048 {
222            // Try slab allocator first
223            match self.slab_allocator.alloc(layout) {
224                Ok(ptr) => {
225                    #[cfg(feature = "tracking")]
226                    {
227                        self.stats
228                            .slab_bytes
229                            .fetch_add(layout.size(), Ordering::Relaxed);
230                    }
231                    return Ok(ptr);
232                }
233                Err(e) => {
234                    // Slab allocator should handle all requests that satisfy constraints
235                    // If it fails, it's a real error (e.g., out of memory)
236                    // Log for debugging
237                    error!(
238                        "global allocator: Slab allocator failed for layout {layout:?}, error: {e:?}, falling back to page allocator"
239                    );
240                    return Err(e);
241                }
242            }
243        }
244
245        let pages_needed = layout.size().div_ceil(PAGE_SIZE);
246
247        let addr =
248            PageAllocator::alloc_pages(&mut self.page_allocator, pages_needed, layout.align())?;
249        let ptr = unsafe { NonNull::new_unchecked(addr as *mut u8) };
250
251        #[cfg(feature = "tracking")]
252        {
253            self.stats
254                .used_pages
255                .fetch_add(pages_needed, Ordering::Relaxed);
256            self.stats
257                .free_pages
258                .fetch_sub(pages_needed, Ordering::Relaxed);
259            self.stats
260                .heap_bytes
261                .fetch_add(layout.size(), Ordering::Relaxed);
262        }
263
264        Ok(ptr)
265    }
266
267    /// Allocate pages
268    ///
269    /// # Examples
270    ///
271    /// ```no_run
272    /// use buddy_slab_allocator::{GlobalAllocator, PageAllocator};
273    ///
274    /// const PAGE_SIZE: usize = 0x1000;
275    /// let mut allocator = GlobalAllocator::<PAGE_SIZE>::new();
276    /// allocator.init(0x8000_0000, 16 * 1024 * 1024).unwrap();
277    ///
278    /// let addr = allocator.alloc_pages(4, PAGE_SIZE).unwrap();
279    /// allocator.dealloc_pages(addr, 4);
280    /// ```
281    pub fn alloc_pages(&mut self, num_pages: usize, alignment: usize) -> AllocResult<usize> {
282        if !self.initialized.load(Ordering::SeqCst) {
283            return Err(AllocError::NoMemory);
284        }
285
286        let addr = PageAllocator::alloc_pages(&mut self.page_allocator, num_pages, alignment)?;
287
288        // Update statistics
289        #[cfg(feature = "tracking")]
290        {
291            self.stats
292                .used_pages
293                .fetch_add(num_pages, Ordering::Relaxed);
294            self.stats
295                .free_pages
296                .fetch_sub(num_pages, Ordering::Relaxed);
297        }
298
299        Ok(addr)
300    }
301
302    /// Deallocate memory
303    pub fn dealloc(&mut self, ptr: NonNull<u8>, layout: Layout) {
304        if !self.initialized.load(Ordering::SeqCst) {
305            error!("global allocator: Deallocating memory before initializing");
306            return;
307        }
308
309        if layout.size() <= 2048 && layout.align() <= 2048 {
310            // This memory must have been allocated by slab allocator
311            // If dealloc fails (not found in slab), it's a critical error
312            self.slab_allocator.dealloc(ptr, layout);
313            #[cfg(feature = "tracking")]
314            {
315                saturating_sub_atomic(&self.stats.slab_bytes, layout.size());
316            }
317            return;
318        }
319
320        // This memory was allocated by page allocator
321        let pages_needed = layout.size().div_ceil(PAGE_SIZE);
322        PageAllocator::dealloc_pages(
323            &mut self.page_allocator,
324            ptr.as_ptr() as usize,
325            pages_needed,
326        );
327        #[cfg(feature = "tracking")]
328        {
329            saturating_sub_atomic(&self.stats.used_pages, pages_needed);
330            self.stats
331                .free_pages
332                .fetch_add(pages_needed, Ordering::Relaxed);
333            saturating_sub_atomic(&self.stats.heap_bytes, layout.size());
334        }
335    }
336
337    /// Deallocate pages
338    pub fn dealloc_pages(&mut self, pos: usize, num_pages: usize) {
339        if !self.initialized.load(Ordering::SeqCst) {
340            return;
341        }
342
343        PageAllocator::dealloc_pages(&mut self.page_allocator, pos, num_pages);
344
345        // Update statistics
346        #[cfg(feature = "tracking")]
347        {
348            saturating_sub_atomic(&self.stats.used_pages, num_pages);
349            self.stats
350                .free_pages
351                .fetch_add(num_pages, Ordering::Relaxed);
352        }
353    }
354
355    /// Reallocate memory
356    #[allow(clippy::not_unsafe_ptr_arg_deref)]
357    pub fn realloc(&mut self, ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 {
358        if new_size == 0 {
359            if let Some(ptr) = NonNull::new(ptr) {
360                self.dealloc(ptr, layout);
361            }
362            return core::ptr::null_mut();
363        }
364
365        if ptr.is_null() {
366            let new_layout = Layout::from_size_align(new_size, layout.align())
367                .unwrap_or_else(|_| Layout::new::<u8>());
368            return match self.alloc(new_layout) {
369                Ok(ptr) => ptr.as_ptr(),
370                Err(_) => core::ptr::null_mut(),
371            };
372        }
373
374        let new_layout = Layout::from_size_align(new_size, layout.align())
375            .unwrap_or_else(|_| Layout::new::<u8>());
376
377        // If new size fits in old allocation, return old pointer
378        if new_size <= layout.size() {
379            return ptr;
380        }
381
382        // Allocate new memory and copy
383        match self.alloc(new_layout) {
384            Ok(new_ptr) => {
385                let new_ptr = new_ptr.as_ptr();
386                unsafe {
387                    core::ptr::copy_nonoverlapping(
388                        ptr,
389                        new_ptr,
390                        core::cmp::min(layout.size(), new_size),
391                    );
392                }
393                if let Some(ptr) = NonNull::new(ptr) {
394                    self.dealloc(ptr, layout);
395                }
396                new_ptr
397            }
398            Err(_) => core::ptr::null_mut(),
399        }
400    }
401}
402
403impl<const PAGE_SIZE: usize> GlobalAllocator<PAGE_SIZE> {
404    /// Get memory statistics
405    #[cfg(feature = "tracking")]
406    pub fn get_stats(&self) -> UsageStats {
407        self.stats.snapshot()
408    }
409
410    /// Get buddy allocator statistics
411    #[cfg(feature = "tracking")]
412    pub fn get_buddy_stats(&self) -> BuddyStats {
413        self.page_allocator.get_buddy_stats()
414    }
415}
416
417impl<const PAGE_SIZE: usize> Default for GlobalAllocator<PAGE_SIZE> {
418    fn default() -> Self {
419        Self::new()
420    }
421}
422
423impl<const PAGE_SIZE: usize> BaseAllocator for GlobalAllocator<PAGE_SIZE> {
424    fn init(&mut self, start: usize, size: usize) {
425        self.page_allocator.init(start, size);
426    }
427
428    fn add_memory(&mut self, start: usize, size: usize) -> AllocResult {
429        self.page_allocator.add_memory(start, size)
430    }
431}
432
433impl<const PAGE_SIZE: usize> PageAllocator for GlobalAllocator<PAGE_SIZE> {
434    const PAGE_SIZE: usize = PAGE_SIZE;
435
436    fn alloc_pages(&mut self, num_pages: usize, alignment: usize) -> AllocResult<usize> {
437        if !self.initialized.load(Ordering::SeqCst) {
438            return Err(AllocError::NoMemory);
439        }
440
441        let addr = <CompositePageAllocator<PAGE_SIZE> as PageAllocator>::alloc_pages(
442            &mut self.page_allocator,
443            num_pages,
444            alignment,
445        )?;
446
447        // Update statistics
448        #[cfg(feature = "tracking")]
449        {
450            self.stats
451                .used_pages
452                .fetch_add(num_pages, Ordering::Relaxed);
453            self.stats
454                .free_pages
455                .fetch_sub(num_pages, Ordering::Relaxed);
456        }
457
458        Ok(addr)
459    }
460
461    fn dealloc_pages(&mut self, pos: usize, num_pages: usize) {
462        if !self.initialized.load(Ordering::SeqCst) {
463            return;
464        }
465
466        <CompositePageAllocator<PAGE_SIZE> as PageAllocator>::dealloc_pages(
467            &mut self.page_allocator,
468            pos,
469            num_pages,
470        );
471
472        // Update statistics
473        #[cfg(feature = "tracking")]
474        {
475            saturating_sub_atomic(&self.stats.used_pages, num_pages);
476            self.stats
477                .free_pages
478                .fetch_add(num_pages, Ordering::Relaxed);
479        }
480    }
481
482    fn alloc_pages_at(
483        &mut self,
484        base: usize,
485        num_pages: usize,
486        alignment: usize,
487    ) -> AllocResult<usize> {
488        if !self.initialized.load(Ordering::SeqCst) {
489            return Err(AllocError::NoMemory);
490        }
491
492        let addr = <CompositePageAllocator<PAGE_SIZE> as PageAllocator>::alloc_pages_at(
493            &mut self.page_allocator,
494            base,
495            num_pages,
496            alignment,
497        )?;
498
499        // Update statistics
500        #[cfg(feature = "tracking")]
501        {
502            self.stats
503                .used_pages
504                .fetch_add(num_pages, Ordering::Relaxed);
505            self.stats
506                .free_pages
507                .fetch_sub(num_pages, Ordering::Relaxed);
508        }
509
510        Ok(addr)
511    }
512
513    fn total_pages(&self) -> usize {
514        self.page_allocator.total_pages()
515    }
516
517    fn used_pages(&self) -> usize {
518        self.page_allocator.used_pages()
519    }
520
521    fn available_pages(&self) -> usize {
522        self.page_allocator.available_pages()
523    }
524}