kizzasi_core/
embedded_alloc.rs

1//! Embedded-friendly allocator for no_std environments
2//!
3//! This module provides a simple, efficient allocator suitable for embedded systems
4//! with limited memory. Features:
5//! - Fixed-size pool allocation for predictable memory usage
6//! - Bump allocator for sequential allocations
7//! - Stack-based allocation for temporary data
8//! - Zero-allocation APIs where possible
9
10#[cfg(not(feature = "std"))]
11use core::{
12    alloc::{GlobalAlloc, Layout},
13    cell::UnsafeCell,
14    ptr::{self, NonNull},
15    sync::atomic::{AtomicUsize, Ordering},
16};
17
18#[cfg(feature = "std")]
19use std::{
20    alloc::{GlobalAlloc, Layout},
21    cell::UnsafeCell,
22    ptr::{self, NonNull},
23    sync::atomic::{AtomicUsize, Ordering},
24};
25
26/// Fixed-size memory pool for embedded systems
27///
28/// Provides O(1) allocation and deallocation from a pre-allocated buffer.
29/// Suitable for real-time systems with deterministic memory requirements.
30pub struct FixedPool<const SIZE: usize, const ALIGN: usize> {
31    buffer: UnsafeCell<[u8; SIZE]>,
32    free_list: AtomicUsize,
33    block_size: usize,
34    num_blocks: usize,
35}
36
37impl<const SIZE: usize, const ALIGN: usize> FixedPool<SIZE, ALIGN> {
38    /// Create a new fixed pool with given block size
39    ///
40    /// # Arguments
41    ///
42    /// * `block_size` - Size of each allocatable block (will be aligned to ALIGN)
43    ///
44    /// # Examples
45    ///
46    /// ```
47    /// use kizzasi_core::embedded_alloc::FixedPool;
48    ///
49    /// // Pool with 1KB, 8-byte aligned blocks
50    /// let pool: FixedPool<1024, 8> = FixedPool::new(64);
51    /// ```
52    #[allow(clippy::manual_div_ceil)] // div_ceil is not const fn
53    pub const fn new(block_size: usize) -> Self {
54        let aligned_block_size = ((block_size + ALIGN - 1) / ALIGN) * ALIGN;
55        let num_blocks = SIZE / aligned_block_size;
56
57        Self {
58            buffer: UnsafeCell::new([0u8; SIZE]),
59            free_list: AtomicUsize::new(0),
60            block_size: aligned_block_size,
61            num_blocks,
62        }
63    }
64
65    /// Allocate a block from the pool
66    ///
67    /// Returns `None` if the pool is exhausted.
68    pub fn alloc(&self) -> Option<NonNull<u8>> {
69        loop {
70            let free_idx = self.free_list.load(Ordering::Acquire);
71            if free_idx >= self.num_blocks {
72                return None; // Pool exhausted
73            }
74
75            // Try to claim this block
76            if self
77                .free_list
78                .compare_exchange(free_idx, free_idx + 1, Ordering::AcqRel, Ordering::Acquire)
79                .is_ok()
80            {
81                let offset = free_idx * self.block_size;
82                let ptr = unsafe {
83                    let buf = &*self.buffer.get();
84                    buf.as_ptr().add(offset) as *mut u8
85                };
86                return NonNull::new(ptr);
87            }
88            // CAS failed, retry
89        }
90    }
91
92    /// Deallocate a block back to the pool
93    ///
94    /// # Safety
95    ///
96    /// The pointer must have been allocated from this pool and not already freed.
97    pub unsafe fn dealloc(&self, _ptr: NonNull<u8>) {
98        // For simplicity, this implementation doesn't support deallocation
99        // In a real embedded system, you'd maintain a free list of blocks
100        // For now, we just decrement the counter if safe
101        let current = self.free_list.load(Ordering::Acquire);
102        if current > 0 {
103            self.free_list.fetch_sub(1, Ordering::Release);
104        }
105    }
106
107    /// Reset the pool, freeing all allocations
108    ///
109    /// # Safety
110    ///
111    /// Only call this when you're certain no outstanding allocations exist.
112    pub unsafe fn reset(&self) {
113        self.free_list.store(0, Ordering::Release);
114    }
115
116    /// Get the number of available blocks
117    pub fn available(&self) -> usize {
118        let used = self.free_list.load(Ordering::Acquire);
119        self.num_blocks.saturating_sub(used)
120    }
121
122    /// Get total number of blocks in the pool
123    pub const fn capacity(&self) -> usize {
124        self.num_blocks
125    }
126
127    /// Get block size
128    pub const fn block_size(&self) -> usize {
129        self.block_size
130    }
131}
132
133// Safety: FixedPool uses atomic operations for thread safety
134unsafe impl<const SIZE: usize, const ALIGN: usize> Send for FixedPool<SIZE, ALIGN> {}
135unsafe impl<const SIZE: usize, const ALIGN: usize> Sync for FixedPool<SIZE, ALIGN> {}
136
137/// Bump allocator for sequential allocations
138///
139/// Very fast O(1) allocation, no deallocation. Suitable for temporary
140/// computations where all memory is freed at once.
141pub struct BumpAllocator<const SIZE: usize> {
142    buffer: UnsafeCell<[u8; SIZE]>,
143    offset: AtomicUsize,
144}
145
146impl<const SIZE: usize> BumpAllocator<SIZE> {
147    /// Create a new bump allocator
148    pub const fn new() -> Self {
149        Self {
150            buffer: UnsafeCell::new([0u8; SIZE]),
151            offset: AtomicUsize::new(0),
152        }
153    }
154
155    /// Allocate memory with given layout
156    ///
157    /// Returns `None` if insufficient space remains.
158    pub fn alloc(&self, layout: Layout) -> Option<NonNull<u8>> {
159        let size = layout.size();
160        let align = layout.align();
161
162        loop {
163            let current_offset = self.offset.load(Ordering::Acquire);
164
165            // Align the offset
166            let aligned_offset = (current_offset + align - 1) & !(align - 1);
167            let new_offset = aligned_offset + size;
168
169            if new_offset > SIZE {
170                return None; // Out of memory
171            }
172
173            // Try to claim this allocation
174            if self
175                .offset
176                .compare_exchange(
177                    current_offset,
178                    new_offset,
179                    Ordering::AcqRel,
180                    Ordering::Acquire,
181                )
182                .is_ok()
183            {
184                let ptr = unsafe {
185                    let buf = &*self.buffer.get();
186                    buf.as_ptr().add(aligned_offset) as *mut u8
187                };
188                return NonNull::new(ptr);
189            }
190            // CAS failed, retry
191        }
192    }
193
194    /// Reset the allocator, freeing all allocations
195    ///
196    /// # Safety
197    ///
198    /// Only call this when you're certain no outstanding allocations exist.
199    pub unsafe fn reset(&self) {
200        self.offset.store(0, Ordering::Release);
201    }
202
203    /// Get the number of bytes allocated
204    pub fn used(&self) -> usize {
205        self.offset.load(Ordering::Acquire)
206    }
207
208    /// Get the number of bytes available
209    pub fn available(&self) -> usize {
210        SIZE.saturating_sub(self.used())
211    }
212
213    /// Get total capacity
214    pub const fn capacity(&self) -> usize {
215        SIZE
216    }
217}
218
219// Safety: BumpAllocator uses atomic operations for thread safety
220unsafe impl<const SIZE: usize> Send for BumpAllocator<SIZE> {}
221unsafe impl<const SIZE: usize> Sync for BumpAllocator<SIZE> {}
222
223impl<const SIZE: usize> Default for BumpAllocator<SIZE> {
224    fn default() -> Self {
225        Self::new()
226    }
227}
228
229/// Stack-based allocator for temporary allocations
230///
231/// Allocations must be freed in LIFO order. Very fast and suitable
232/// for nested scopes.
233pub struct StackAllocator<const SIZE: usize> {
234    buffer: UnsafeCell<[u8; SIZE]>,
235    offset: AtomicUsize,
236}
237
238impl<const SIZE: usize> StackAllocator<SIZE> {
239    /// Create a new stack allocator
240    pub const fn new() -> Self {
241        Self {
242            buffer: UnsafeCell::new([0u8; SIZE]),
243            offset: AtomicUsize::new(0),
244        }
245    }
246
247    /// Push an allocation onto the stack
248    ///
249    /// Returns the pointer and the offset to restore when popping.
250    pub fn push(&self, layout: Layout) -> Option<(NonNull<u8>, usize)> {
251        let size = layout.size();
252        let align = layout.align();
253
254        loop {
255            let current_offset = self.offset.load(Ordering::Acquire);
256
257            // Align the offset
258            let aligned_offset = (current_offset + align - 1) & !(align - 1);
259            let new_offset = aligned_offset + size;
260
261            if new_offset > SIZE {
262                return None; // Out of memory
263            }
264
265            // Try to claim this allocation
266            if self
267                .offset
268                .compare_exchange(
269                    current_offset,
270                    new_offset,
271                    Ordering::AcqRel,
272                    Ordering::Acquire,
273                )
274                .is_ok()
275            {
276                let ptr = unsafe {
277                    let buf = &*self.buffer.get();
278                    buf.as_ptr().add(aligned_offset) as *mut u8
279                };
280                // Return pointer and the offset BEFORE this allocation (for pop)
281                return NonNull::new(ptr).map(|p| (p, current_offset));
282            }
283            // CAS failed, retry
284        }
285    }
286
287    /// Pop an allocation from the stack
288    ///
289    /// # Arguments
290    ///
291    /// * `saved_offset` - The offset returned from `push`
292    ///
293    /// # Safety
294    ///
295    /// Must be called in LIFO order matching the `push` calls.
296    pub unsafe fn pop(&self, saved_offset: usize) {
297        self.offset.store(saved_offset, Ordering::Release);
298    }
299
300    /// Get current stack depth (bytes used)
301    pub fn depth(&self) -> usize {
302        self.offset.load(Ordering::Acquire)
303    }
304
305    /// Get available space
306    pub fn available(&self) -> usize {
307        SIZE.saturating_sub(self.depth())
308    }
309
310    /// Get total capacity
311    pub const fn capacity(&self) -> usize {
312        SIZE
313    }
314}
315
316// Safety: StackAllocator uses atomic operations for thread safety
317unsafe impl<const SIZE: usize> Send for StackAllocator<SIZE> {}
318unsafe impl<const SIZE: usize> Sync for StackAllocator<SIZE> {}
319
320impl<const SIZE: usize> Default for StackAllocator<SIZE> {
321    fn default() -> Self {
322        Self::new()
323    }
324}
325
326/// Global embedded allocator combining bump and pool allocation
327///
328/// For no_std environments, this can be used as the global allocator.
329pub struct EmbeddedAllocator<const SIZE: usize, const POOL_SIZE: usize> {
330    bump: BumpAllocator<SIZE>,
331    pool: FixedPool<POOL_SIZE, 8>,
332}
333
334impl<const SIZE: usize, const POOL_SIZE: usize> EmbeddedAllocator<SIZE, POOL_SIZE> {
335    /// Create a new embedded allocator
336    ///
337    /// # Arguments
338    ///
339    /// * `block_size` - Size of blocks in the pool allocator
340    pub const fn new(block_size: usize) -> Self {
341        Self {
342            bump: BumpAllocator::new(),
343            pool: FixedPool::new(block_size),
344        }
345    }
346
347    /// Reset both allocators
348    ///
349    /// # Safety
350    ///
351    /// Only call when no outstanding allocations exist.
352    pub unsafe fn reset(&self) {
353        self.bump.reset();
354        self.pool.reset();
355    }
356
357    /// Get total memory used
358    pub fn used(&self) -> usize {
359        self.bump.used() + (self.pool.capacity() - self.pool.available()) * self.pool.block_size()
360    }
361
362    /// Get total capacity
363    pub const fn capacity(&self) -> usize {
364        SIZE + POOL_SIZE
365    }
366}
367
368unsafe impl<const SIZE: usize, const POOL_SIZE: usize> GlobalAlloc
369    for EmbeddedAllocator<SIZE, POOL_SIZE>
370{
371    unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
372        // Try pool first for small allocations
373        if layout.size() <= self.pool.block_size() {
374            if let Some(ptr) = self.pool.alloc() {
375                return ptr.as_ptr();
376            }
377        }
378
379        // Fall back to bump allocator
380        self.bump
381            .alloc(layout)
382            .map(|p| p.as_ptr())
383            .unwrap_or(ptr::null_mut())
384    }
385
386    unsafe fn dealloc(&self, ptr: *mut u8, _layout: Layout) {
387        // Try to return to pool if it's a pool allocation
388        if let Some(nonnull) = NonNull::new(ptr) {
389            self.pool.dealloc(nonnull);
390        }
391    }
392}
393
394// Safety: EmbeddedAllocator uses atomic operations and is thread-safe
395unsafe impl<const SIZE: usize, const POOL_SIZE: usize> Send for EmbeddedAllocator<SIZE, POOL_SIZE> {}
396unsafe impl<const SIZE: usize, const POOL_SIZE: usize> Sync for EmbeddedAllocator<SIZE, POOL_SIZE> {}
397
398/// RAII guard for stack allocations
399pub struct StackGuard<'a, const SIZE: usize> {
400    allocator: &'a StackAllocator<SIZE>,
401    saved_offset: usize,
402}
403
404impl<'a, const SIZE: usize> Drop for StackGuard<'a, SIZE> {
405    fn drop(&mut self) {
406        unsafe {
407            self.allocator.pop(self.saved_offset);
408        }
409    }
410}
411
412impl<const SIZE: usize> StackAllocator<SIZE> {
413    /// Allocate with RAII guard that automatically frees on drop
414    pub fn scoped_alloc(&self, layout: Layout) -> Option<(NonNull<u8>, StackGuard<'_, SIZE>)> {
415        self.push(layout).map(|(ptr, saved_offset)| {
416            let guard = StackGuard {
417                allocator: self,
418                saved_offset,
419            };
420            (ptr, guard)
421        })
422    }
423}
424
425#[cfg(test)]
426mod tests {
427    use super::*;
428
429    #[test]
430    fn test_fixed_pool_basic() {
431        let pool: FixedPool<1024, 8> = FixedPool::new(64);
432        assert_eq!(pool.capacity(), 16); // 1024 / 64 = 16 blocks
433        assert_eq!(pool.available(), 16);
434
435        let ptr1 = pool.alloc().expect("Failed to allocate");
436        assert_eq!(pool.available(), 15);
437
438        let ptr2 = pool.alloc().expect("Failed to allocate");
439        assert_eq!(pool.available(), 14);
440
441        assert_ne!(ptr1.as_ptr(), ptr2.as_ptr());
442
443        unsafe {
444            pool.dealloc(ptr1);
445            pool.dealloc(ptr2);
446        }
447    }
448
449    #[test]
450    fn test_fixed_pool_exhaustion() {
451        let pool: FixedPool<128, 8> = FixedPool::new(32);
452        assert_eq!(pool.capacity(), 4);
453
454        let mut ptrs = vec![];
455        for _ in 0..4 {
456            ptrs.push(pool.alloc().expect("Failed to allocate"));
457        }
458
459        // Pool should be exhausted
460        assert!(pool.alloc().is_none());
461        assert_eq!(pool.available(), 0);
462    }
463
464    #[test]
465    fn test_bump_allocator_basic() {
466        let bump: BumpAllocator<1024> = BumpAllocator::new();
467        assert_eq!(bump.capacity(), 1024);
468        assert_eq!(bump.used(), 0);
469
470        let layout = Layout::from_size_align(64, 8).unwrap();
471        let ptr1 = bump.alloc(layout).expect("Failed to allocate");
472        assert_eq!(bump.used(), 64);
473
474        let ptr2 = bump.alloc(layout).expect("Failed to allocate");
475        assert_eq!(bump.used(), 128);
476
477        assert_ne!(ptr1.as_ptr(), ptr2.as_ptr());
478    }
479
480    #[test]
481    fn test_bump_allocator_alignment() {
482        let bump: BumpAllocator<1024> = BumpAllocator::new();
483
484        // Allocate with different alignments
485        let layout1 = Layout::from_size_align(1, 1).unwrap();
486        let _ptr1 = bump.alloc(layout1).expect("Failed to allocate");
487
488        let layout2 = Layout::from_size_align(8, 8).unwrap();
489        let ptr2 = bump.alloc(layout2).expect("Failed to allocate");
490
491        // ptr2 should be 8-byte aligned
492        assert_eq!(ptr2.as_ptr() as usize % 8, 0);
493    }
494
495    #[test]
496    fn test_bump_allocator_reset() {
497        let bump: BumpAllocator<1024> = BumpAllocator::new();
498
499        let layout = Layout::from_size_align(64, 8).unwrap();
500        let _ptr = bump.alloc(layout).expect("Failed to allocate");
501        assert_eq!(bump.used(), 64);
502
503        unsafe {
504            bump.reset();
505        }
506        assert_eq!(bump.used(), 0);
507    }
508
509    #[test]
510    fn test_stack_allocator_basic() {
511        let stack: StackAllocator<1024> = StackAllocator::new();
512        assert_eq!(stack.capacity(), 1024);
513        assert_eq!(stack.depth(), 0);
514
515        let layout = Layout::from_size_align(64, 8).unwrap();
516        let (ptr1, offset1) = stack.push(layout).expect("Failed to allocate");
517        assert!(stack.depth() >= 64);
518
519        let (ptr2, offset2) = stack.push(layout).expect("Failed to allocate");
520        assert!(stack.depth() >= 128);
521
522        assert_ne!(ptr1.as_ptr(), ptr2.as_ptr());
523
524        // Pop in LIFO order
525        unsafe {
526            stack.pop(offset2);
527            assert!(stack.depth() < 128);
528            stack.pop(offset1);
529            assert_eq!(stack.depth(), 0);
530        }
531    }
532
533    #[test]
534    fn test_stack_allocator_scoped() {
535        let stack: StackAllocator<1024> = StackAllocator::new();
536
537        let layout = Layout::from_size_align(64, 8).unwrap();
538        {
539            let (_ptr1, _guard1) = stack.scoped_alloc(layout).expect("Failed to allocate");
540            assert!(stack.depth() >= 64);
541
542            {
543                let (_ptr2, _guard2) = stack.scoped_alloc(layout).expect("Failed to allocate");
544                assert!(stack.depth() >= 128);
545            }
546            // guard2 dropped, space freed
547            assert!(stack.depth() < 128);
548        }
549        // guard1 dropped, all space freed
550        // Note: depth might not be exactly 0 due to alignment
551    }
552
553    #[test]
554    fn test_embedded_allocator_basic() {
555        let allocator: EmbeddedAllocator<1024, 512> = EmbeddedAllocator::new(32);
556        assert_eq!(allocator.capacity(), 1536);
557
558        unsafe {
559            let layout = Layout::from_size_align(16, 8).unwrap();
560            let ptr1 = allocator.alloc(layout);
561            assert!(!ptr1.is_null());
562
563            let ptr2 = allocator.alloc(layout);
564            assert!(!ptr2.is_null());
565            assert_ne!(ptr1, ptr2);
566
567            allocator.dealloc(ptr1, layout);
568            allocator.dealloc(ptr2, layout);
569        }
570    }
571
572    #[test]
573    fn test_embedded_allocator_small_large() {
574        let allocator: EmbeddedAllocator<2048, 1024> = EmbeddedAllocator::new(64);
575
576        unsafe {
577            // Small allocation (should use pool)
578            let small_layout = Layout::from_size_align(32, 8).unwrap();
579            let small_ptr = allocator.alloc(small_layout);
580            assert!(!small_ptr.is_null());
581
582            // Large allocation (should use bump)
583            let large_layout = Layout::from_size_align(128, 8).unwrap();
584            let large_ptr = allocator.alloc(large_layout);
585            assert!(!large_ptr.is_null());
586
587            assert_ne!(small_ptr, large_ptr);
588
589            allocator.dealloc(small_ptr, small_layout);
590            allocator.dealloc(large_ptr, large_layout);
591        }
592    }
593
594    #[test]
595    fn test_fixed_pool_reset() {
596        let pool: FixedPool<1024, 8> = FixedPool::new(64);
597
598        let _ptr1 = pool.alloc().expect("Failed to allocate");
599        let _ptr2 = pool.alloc().expect("Failed to allocate");
600        assert_eq!(pool.available(), 14);
601
602        unsafe {
603            pool.reset();
604        }
605        assert_eq!(pool.available(), 16);
606    }
607}