Skip to main content

oxigdal_embedded/
alloc_utils.rs

1//! Custom allocator implementations for no_std environments
2//!
3//! Provides allocator wrappers compatible with Rust's allocator API
4
5#[cfg(feature = "alloc")]
6use alloc::alloc::{GlobalAlloc, Layout};
7use core::ptr::NonNull;
8
9use crate::error::{EmbeddedError, Result};
10use crate::memory_pool::MemoryPool;
11
12/// Bump allocator for sequential allocations
13///
14/// Simple and fast allocator that only supports allocation, not deallocation
15/// of individual items. Perfect for temporary buffers and stack-like usage.
16pub struct BumpAllocator<P: MemoryPool> {
17    pool: P,
18}
19
20impl<P: MemoryPool> BumpAllocator<P> {
21    /// Create a new bump allocator with the given pool
22    pub const fn new(pool: P) -> Self {
23        Self { pool }
24    }
25
26    /// Allocate memory from the bump allocator
27    ///
28    /// # Errors
29    ///
30    /// Returns error if the pool is exhausted or alignment requirements cannot be met
31    pub fn allocate(&self, size: usize, align: usize) -> Result<NonNull<u8>> {
32        self.pool.allocate(size, align)
33    }
34
35    /// Get the total capacity
36    pub fn capacity(&self) -> usize {
37        self.pool.capacity()
38    }
39
40    /// Get currently used bytes
41    pub fn used(&self) -> usize {
42        self.pool.used()
43    }
44
45    /// Get available bytes
46    pub fn available(&self) -> usize {
47        self.pool.available()
48    }
49
50    /// Reset the allocator (reclaim all memory)
51    ///
52    /// # Safety
53    ///
54    /// All pointers allocated from this allocator must not be used after reset
55    pub unsafe fn reset(&self) -> Result<()> {
56        // SAFETY: Caller guarantees all allocated pointers will not be used after reset
57        unsafe { self.pool.reset() }
58    }
59}
60
61#[cfg(feature = "alloc")]
62unsafe impl<P: MemoryPool + Sync> GlobalAlloc for BumpAllocator<P> {
63    unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
64        match self.pool.allocate(layout.size(), layout.align()) {
65            Ok(ptr) => ptr.as_ptr(),
66            Err(_) => core::ptr::null_mut(),
67        }
68    }
69
70    unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
71        // Bump allocator doesn't support individual deallocation
72        let _ = ptr;
73        let _ = layout;
74    }
75}
76
77/// Stack-based allocator for fixed-size allocations
78///
79/// Maintains a stack of allocations and only allows deallocation in LIFO order
80pub struct StackAllocator<const N: usize> {
81    buffer: [u8; N],
82    offset: core::cell::Cell<usize>,
83}
84
85impl<const N: usize> StackAllocator<N> {
86    /// Create a new stack allocator
87    pub const fn new() -> Self {
88        Self {
89            buffer: [0u8; N],
90            offset: core::cell::Cell::new(0),
91        }
92    }
93
94    /// Allocate from the stack
95    ///
96    /// # Errors
97    ///
98    /// Returns error if insufficient space or invalid alignment
99    pub fn allocate(&self, size: usize, align: usize) -> Result<NonNull<u8>> {
100        if size == 0 {
101            return Err(EmbeddedError::InvalidParameter);
102        }
103
104        if !align.is_power_of_two() {
105            return Err(EmbeddedError::InvalidAlignment {
106                required: align,
107                actual: 0,
108            });
109        }
110
111        let current_offset = self.offset.get();
112        let base_addr = self.buffer.as_ptr() as usize;
113        let aligned_offset = (current_offset + align - 1) & !(align - 1);
114
115        let new_offset = match aligned_offset.checked_add(size) {
116            Some(offset) if offset <= N => offset,
117            _ => {
118                return Err(EmbeddedError::BufferTooSmall {
119                    required: size,
120                    available: N.saturating_sub(current_offset),
121                });
122            }
123        };
124
125        self.offset.set(new_offset);
126
127        let ptr_addr = base_addr.wrapping_add(aligned_offset);
128        // SAFETY: We've verified the pointer is within bounds
129        let ptr = unsafe { NonNull::new_unchecked(ptr_addr as *mut u8) };
130        Ok(ptr)
131    }
132
133    /// Pop the last allocation
134    ///
135    /// # Safety
136    ///
137    /// Must be called in LIFO order matching allocations
138    pub unsafe fn pop(&self, size: usize) -> Result<()> {
139        let current_offset = self.offset.get();
140        if size > current_offset {
141            return Err(EmbeddedError::InvalidParameter);
142        }
143
144        self.offset.set(current_offset - size);
145        Ok(())
146    }
147
148    /// Get current offset
149    pub fn used(&self) -> usize {
150        self.offset.get()
151    }
152
153    /// Get remaining capacity
154    pub fn available(&self) -> usize {
155        N.saturating_sub(self.offset.get())
156    }
157
158    /// Reset the allocator
159    pub fn reset(&self) {
160        self.offset.set(0);
161    }
162}
163
164impl<const N: usize> Default for StackAllocator<N> {
165    fn default() -> Self {
166        Self::new()
167    }
168}
169
170/// Arena allocator for temporary allocations
171///
172/// Fast allocator for temporary objects that will all be freed together
173pub struct Arena<const N: usize> {
174    buffer: core::cell::UnsafeCell<[u8; N]>,
175    offset: core::cell::Cell<usize>,
176}
177
178impl<const N: usize> Arena<N> {
179    /// Create a new arena
180    pub const fn new() -> Self {
181        Self {
182            buffer: core::cell::UnsafeCell::new([0u8; N]),
183            offset: core::cell::Cell::new(0),
184        }
185    }
186
187    /// Allocate from the arena
188    ///
189    /// # Errors
190    ///
191    /// Returns error if insufficient space
192    pub fn allocate(&self, size: usize, align: usize) -> Result<NonNull<u8>> {
193        if size == 0 {
194            return Err(EmbeddedError::InvalidParameter);
195        }
196
197        if !align.is_power_of_two() {
198            return Err(EmbeddedError::InvalidAlignment {
199                required: align,
200                actual: 0,
201            });
202        }
203
204        let current_offset = self.offset.get();
205        let aligned_offset = (current_offset + align - 1) & !(align - 1);
206
207        let new_offset = match aligned_offset.checked_add(size) {
208            Some(offset) if offset <= N => offset,
209            _ => {
210                return Err(EmbeddedError::BufferTooSmall {
211                    required: size,
212                    available: N.saturating_sub(current_offset),
213                });
214            }
215        };
216
217        self.offset.set(new_offset);
218
219        // SAFETY: We own the buffer and offset is within bounds
220        let base_ptr = self.buffer.get() as *mut u8;
221        let ptr = unsafe { base_ptr.add(aligned_offset) };
222        let nonnull = NonNull::new(ptr).ok_or(EmbeddedError::AllocationFailed)?;
223        Ok(nonnull)
224    }
225
226    /// Allocate a typed value
227    ///
228    /// Returns a `NonNull<T>` pointer to properly aligned, uninitialized memory.
229    /// The caller is responsible for initializing the memory before use.
230    ///
231    /// # Errors
232    ///
233    /// Returns error if insufficient space
234    pub fn allocate_typed<T>(&self) -> Result<NonNull<T>> {
235        let ptr = self.allocate(core::mem::size_of::<T>(), core::mem::align_of::<T>())?;
236        Ok(ptr.cast::<T>())
237    }
238
239    /// Clear the arena (reclaim all memory)
240    pub fn clear(&self) {
241        self.offset.set(0);
242    }
243
244    /// Get used bytes
245    pub fn used(&self) -> usize {
246        self.offset.get()
247    }
248
249    /// Get available bytes
250    pub fn available(&self) -> usize {
251        N.saturating_sub(self.offset.get())
252    }
253}
254
255impl<const N: usize> Default for Arena<N> {
256    fn default() -> Self {
257        Self::new()
258    }
259}
260
261#[cfg(test)]
262mod tests {
263    use super::*;
264    use crate::memory_pool::StaticPool;
265
266    #[test]
267    fn test_bump_allocator() {
268        let pool = StaticPool::<1024>::new();
269        let allocator = BumpAllocator::new(pool);
270
271        let ptr1 = allocator.allocate(64, 8).expect("allocation failed");
272        let ptr2 = allocator.allocate(128, 16).expect("allocation failed");
273
274        assert_ne!(ptr1, ptr2);
275        assert!(allocator.used() > 0);
276    }
277
278    #[test]
279    fn test_stack_allocator() {
280        let allocator = StackAllocator::<1024>::new();
281
282        let _ptr1 = allocator.allocate(64, 8).expect("allocation failed");
283        assert_eq!(allocator.used(), 64);
284
285        let _ptr2 = allocator.allocate(128, 16).expect("allocation failed");
286        assert!(allocator.used() >= 64 + 128);
287
288        // SAFETY: We're popping in LIFO order
289        unsafe {
290            allocator.pop(128).expect("pop failed");
291        }
292        assert_eq!(allocator.used(), 64);
293    }
294
295    #[test]
296    fn test_arena_allocator() {
297        let arena = Arena::<1024>::new();
298
299        let _ptr1 = arena.allocate(64, 8).expect("allocation failed");
300        let _ptr2 = arena.allocate(128, 16).expect("allocation failed");
301
302        assert!(arena.used() > 0);
303
304        arena.clear();
305        assert_eq!(arena.used(), 0);
306    }
307
308    #[test]
309    fn test_arena_typed_allocation() {
310        let arena = Arena::<1024>::new();
311
312        let mut ptr: NonNull<u64> = arena.allocate_typed().expect("allocation failed");
313        // SAFETY: We just allocated this memory and have exclusive access
314        let value = unsafe { ptr.as_mut() };
315        *value = 42;
316        assert_eq!(*value, 42);
317    }
318}