Skip to main content

oxigdal_embedded/
memory_pool.rs

1//! Static memory pool implementations for no_std environments
2//!
3//! Provides memory pool abstractions that allow predictable allocation behavior
4//! in embedded systems without heap allocation.
5
6use core::cell::UnsafeCell;
7use core::ptr::NonNull;
8use core::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
9
10use crate::error::{EmbeddedError, Result};
11
12/// Memory pool trait for unified pool interface
13pub trait MemoryPool {
14    /// Allocate memory from the pool
15    ///
16    /// # Errors
17    ///
18    /// Returns `PoolExhausted` if no memory is available
19    /// Returns `InvalidAlignment` if alignment requirements cannot be met
20    fn allocate(&self, size: usize, align: usize) -> Result<NonNull<u8>>;
21
22    /// Deallocate memory back to the pool
23    ///
24    /// # Safety
25    ///
26    /// The pointer must have been allocated from this pool
27    unsafe fn deallocate(&self, ptr: NonNull<u8>, size: usize, align: usize) -> Result<()>;
28
29    /// Get the total capacity of the pool
30    fn capacity(&self) -> usize;
31
32    /// Get the currently used bytes
33    fn used(&self) -> usize;
34
35    /// Get the available bytes
36    fn available(&self) -> usize {
37        self.capacity().saturating_sub(self.used())
38    }
39
40    /// Reset the pool (deallocate all)
41    ///
42    /// # Safety
43    ///
44    /// All pointers allocated from this pool must not be used after reset
45    unsafe fn reset(&self) -> Result<()>;
46}
47
48/// Static memory pool with compile-time size
49///
50/// Uses a simple bump allocator strategy suitable for embedded systems
51pub struct StaticPool<const N: usize> {
52    buffer: UnsafeCell<[u8; N]>,
53    offset: AtomicUsize,
54    locked: AtomicBool,
55}
56
57impl<const N: usize> StaticPool<N> {
58    /// Create a new static pool
59    pub const fn new() -> Self {
60        Self {
61            buffer: UnsafeCell::new([0u8; N]),
62            offset: AtomicUsize::new(0),
63            locked: AtomicBool::new(false),
64        }
65    }
66
67    /// Get the base address of the pool
68    fn base_addr(&self) -> usize {
69        self.buffer.get() as usize
70    }
71
72    /// Try to acquire lock for allocation
73    fn try_lock(&self) -> Result<()> {
74        match self
75            .locked
76            .compare_exchange(false, true, Ordering::Acquire, Ordering::Relaxed)
77        {
78            Ok(_) => Ok(()),
79            Err(_) => Err(EmbeddedError::ResourceBusy),
80        }
81    }
82
83    /// Release the lock
84    fn unlock(&self) {
85        self.locked.store(false, Ordering::Release);
86    }
87}
88
89impl<const N: usize> Default for StaticPool<N> {
90    fn default() -> Self {
91        Self::new()
92    }
93}
94
95impl<const N: usize> MemoryPool for StaticPool<N> {
96    fn allocate(&self, size: usize, align: usize) -> Result<NonNull<u8>> {
97        if size == 0 {
98            return Err(EmbeddedError::InvalidParameter);
99        }
100
101        if !align.is_power_of_two() {
102            return Err(EmbeddedError::InvalidAlignment {
103                required: align,
104                actual: 0,
105            });
106        }
107
108        self.try_lock()?;
109
110        let current_offset = self.offset.load(Ordering::Relaxed);
111        let base = self.base_addr();
112
113        // Calculate aligned offset considering the base address
114        // We need (base + aligned_offset) % align == 0
115        // So aligned_offset = align_up(base + current_offset, align) - base
116        let current_addr = base.wrapping_add(current_offset);
117        let aligned_addr =
118            (current_addr.wrapping_add(align.wrapping_sub(1))) & !align.wrapping_sub(1);
119        let aligned_offset = aligned_addr.wrapping_sub(base);
120
121        let new_offset = match aligned_offset.checked_add(size) {
122            Some(offset) if offset <= N => offset,
123            _ => {
124                self.unlock();
125                return Err(EmbeddedError::PoolExhausted);
126            }
127        };
128
129        self.offset.store(new_offset, Ordering::Release);
130        self.unlock();
131
132        // SAFETY: We've verified the pointer is within our buffer and properly aligned
133        let ptr = unsafe { NonNull::new_unchecked(aligned_addr as *mut u8) };
134        Ok(ptr)
135    }
136
137    unsafe fn deallocate(&self, _ptr: NonNull<u8>, _size: usize, _align: usize) -> Result<()> {
138        // Bump allocator doesn't support individual deallocation
139        // Use reset() to reclaim all memory
140        Ok(())
141    }
142
143    fn capacity(&self) -> usize {
144        N
145    }
146
147    fn used(&self) -> usize {
148        self.offset.load(Ordering::Relaxed)
149    }
150
151    unsafe fn reset(&self) -> Result<()> {
152        self.try_lock()?;
153        self.offset.store(0, Ordering::Release);
154        self.unlock();
155        Ok(())
156    }
157}
158
159// SAFETY: StaticPool uses atomic operations for thread-safe access
160unsafe impl<const N: usize> Sync for StaticPool<N> {}
161unsafe impl<const N: usize> Send for StaticPool<N> {}
162
163/// Block-based memory pool with fixed-size blocks
164///
165/// More efficient for frequent allocations/deallocations of similar sizes
166pub struct BlockPool<const BLOCK_SIZE: usize, const NUM_BLOCKS: usize, const BITMAP_SIZE: usize> {
167    blocks: UnsafeCell<[[u8; BLOCK_SIZE]; NUM_BLOCKS]>,
168    free_bitmap: UnsafeCell<[u8; BITMAP_SIZE]>,
169    free_count: AtomicUsize,
170    locked: AtomicBool,
171}
172
173impl<const BLOCK_SIZE: usize, const NUM_BLOCKS: usize, const BITMAP_SIZE: usize>
174    BlockPool<BLOCK_SIZE, NUM_BLOCKS, BITMAP_SIZE>
175{
176    /// Create a new block pool
177    ///
178    /// # Panics
179    ///
180    /// Panics if BITMAP_SIZE is not sufficient for NUM_BLOCKS
181    pub const fn new() -> Self {
182        // We need at least (NUM_BLOCKS + 7) / 8 bytes for the bitmap
183        // This check will be evaluated at compile time
184        assert!(
185            BITMAP_SIZE * 8 >= NUM_BLOCKS,
186            "BITMAP_SIZE is too small for NUM_BLOCKS"
187        );
188
189        Self {
190            blocks: UnsafeCell::new([[0u8; BLOCK_SIZE]; NUM_BLOCKS]),
191            free_bitmap: UnsafeCell::new([0xFF; BITMAP_SIZE]),
192            free_count: AtomicUsize::new(NUM_BLOCKS),
193            locked: AtomicBool::new(false),
194        }
195    }
196
197    /// Try to acquire lock
198    fn try_lock(&self) -> Result<()> {
199        match self
200            .locked
201            .compare_exchange(false, true, Ordering::Acquire, Ordering::Relaxed)
202        {
203            Ok(_) => Ok(()),
204            Err(_) => Err(EmbeddedError::ResourceBusy),
205        }
206    }
207
208    /// Release the lock
209    fn unlock(&self) {
210        self.locked.store(false, Ordering::Release);
211    }
212
213    /// Find and allocate a free block
214    fn find_free_block(&self) -> Option<usize> {
215        // SAFETY: We hold the lock
216        let bitmap = unsafe { &mut *self.free_bitmap.get() };
217
218        for (byte_idx, byte) in bitmap.iter_mut().enumerate() {
219            if *byte == 0 {
220                continue;
221            }
222
223            // Find first set bit
224            for bit_idx in 0..8 {
225                let block_idx = byte_idx * 8 + bit_idx;
226                if block_idx >= NUM_BLOCKS {
227                    return None;
228                }
229
230                if (*byte >> bit_idx) & 1 != 0 {
231                    // Mark as allocated
232                    *byte &= !(1 << bit_idx);
233                    return Some(block_idx);
234                }
235            }
236        }
237
238        None
239    }
240
241    /// Mark a block as free
242    ///
243    /// # Safety
244    ///
245    /// block_idx must be valid and currently allocated
246    unsafe fn free_block(&self, block_idx: usize) {
247        // SAFETY: Caller guarantees block_idx is valid and we hold the lock
248        let bitmap = unsafe { &mut *self.free_bitmap.get() };
249        let byte_idx = block_idx / 8;
250        let bit_idx = block_idx % 8;
251
252        bitmap[byte_idx] |= 1 << bit_idx;
253    }
254}
255
256impl<const BLOCK_SIZE: usize, const NUM_BLOCKS: usize, const BITMAP_SIZE: usize> Default
257    for BlockPool<BLOCK_SIZE, NUM_BLOCKS, BITMAP_SIZE>
258{
259    fn default() -> Self {
260        Self::new()
261    }
262}
263
264impl<const BLOCK_SIZE: usize, const NUM_BLOCKS: usize, const BITMAP_SIZE: usize> MemoryPool
265    for BlockPool<BLOCK_SIZE, NUM_BLOCKS, BITMAP_SIZE>
266{
267    fn allocate(&self, size: usize, align: usize) -> Result<NonNull<u8>> {
268        if size > BLOCK_SIZE {
269            return Err(EmbeddedError::BufferTooSmall {
270                required: size,
271                available: BLOCK_SIZE,
272            });
273        }
274
275        if !align.is_power_of_two() {
276            return Err(EmbeddedError::InvalidAlignment {
277                required: align,
278                actual: 0,
279            });
280        }
281
282        self.try_lock()?;
283
284        let block_idx = match self.find_free_block() {
285            Some(idx) => idx,
286            None => {
287                self.unlock();
288                return Err(EmbeddedError::PoolExhausted);
289            }
290        };
291
292        self.free_count.fetch_sub(1, Ordering::Relaxed);
293
294        // SAFETY: We hold the lock and block_idx is valid
295        let blocks = unsafe { &mut *self.blocks.get() };
296        let ptr = blocks[block_idx].as_mut_ptr();
297
298        // Verify alignment
299        let ptr_addr = ptr as usize;
300        if ptr_addr % align != 0 {
301            // SAFETY: We just allocated this block
302            unsafe { self.free_block(block_idx) };
303            self.free_count.fetch_add(1, Ordering::Relaxed);
304            self.unlock();
305            return Err(EmbeddedError::InvalidAlignment {
306                required: align,
307                actual: ptr_addr % align.max(1),
308            });
309        }
310
311        self.unlock();
312
313        // SAFETY: ptr is non-null and valid
314        let nonnull = unsafe { NonNull::new_unchecked(ptr) };
315        Ok(nonnull)
316    }
317
318    unsafe fn deallocate(&self, ptr: NonNull<u8>, _size: usize, _align: usize) -> Result<()> {
319        self.try_lock()?;
320
321        // Calculate block index from pointer
322        // SAFETY: We hold the lock and the blocks array is valid
323        let blocks = unsafe { &*self.blocks.get() };
324        let base_addr = blocks.as_ptr() as usize;
325        let ptr_addr = ptr.as_ptr() as usize;
326
327        if ptr_addr < base_addr {
328            self.unlock();
329            return Err(EmbeddedError::InvalidParameter);
330        }
331
332        let offset = ptr_addr.wrapping_sub(base_addr);
333        let block_idx = offset / BLOCK_SIZE;
334
335        if block_idx >= NUM_BLOCKS {
336            self.unlock();
337            return Err(EmbeddedError::InvalidParameter);
338        }
339
340        // SAFETY: We've verified block_idx is valid and ptr was allocated from this pool
341        unsafe { self.free_block(block_idx) };
342        self.free_count.fetch_add(1, Ordering::Relaxed);
343
344        self.unlock();
345        Ok(())
346    }
347
348    fn capacity(&self) -> usize {
349        BLOCK_SIZE * NUM_BLOCKS
350    }
351
352    fn used(&self) -> usize {
353        let free = self.free_count.load(Ordering::Relaxed);
354        (NUM_BLOCKS.saturating_sub(free)) * BLOCK_SIZE
355    }
356
357    unsafe fn reset(&self) -> Result<()> {
358        self.try_lock()?;
359
360        // SAFETY: We hold the lock and the bitmap is valid
361        let bitmap = unsafe { &mut *self.free_bitmap.get() };
362        bitmap.fill(0xFF);
363
364        self.free_count.store(NUM_BLOCKS, Ordering::Release);
365        self.unlock();
366        Ok(())
367    }
368}
369
370// SAFETY: BlockPool uses atomic operations and locking for thread-safe access
371unsafe impl<const BLOCK_SIZE: usize, const NUM_BLOCKS: usize, const BITMAP_SIZE: usize> Sync
372    for BlockPool<BLOCK_SIZE, NUM_BLOCKS, BITMAP_SIZE>
373{
374}
375unsafe impl<const BLOCK_SIZE: usize, const NUM_BLOCKS: usize, const BITMAP_SIZE: usize> Send
376    for BlockPool<BLOCK_SIZE, NUM_BLOCKS, BITMAP_SIZE>
377{
378}
379
380#[cfg(test)]
381mod tests {
382    use super::*;
383
384    #[test]
385    fn test_static_pool_allocation() {
386        let pool = StaticPool::<1024>::new();
387
388        let ptr1 = pool.allocate(64, 8).expect("allocation failed");
389        assert_eq!(pool.used(), 64);
390
391        let ptr2 = pool.allocate(128, 16).expect("allocation failed");
392        assert!(pool.used() >= 64 + 128);
393
394        assert_ne!(ptr1, ptr2);
395    }
396
397    #[test]
398    fn test_static_pool_exhaustion() {
399        let pool = StaticPool::<128>::new();
400
401        let _ptr1 = pool.allocate(64, 8).expect("allocation failed");
402        let _ptr2 = pool.allocate(64, 8).expect("allocation failed");
403
404        // Pool should be exhausted now
405        let result = pool.allocate(64, 8);
406        assert!(matches!(result, Err(EmbeddedError::PoolExhausted)));
407    }
408
409    #[test]
410    fn test_static_pool_reset() {
411        let pool = StaticPool::<1024>::new();
412
413        let _ptr = pool.allocate(512, 8).expect("allocation failed");
414        assert!(pool.used() > 0);
415
416        // SAFETY: We won't use the pointer after reset
417        unsafe { pool.reset().expect("reset failed") };
418        assert_eq!(pool.used(), 0);
419    }
420
421    #[test]
422    fn test_block_pool_allocation() {
423        // BITMAP_SIZE = (NUM_BLOCKS + 7) / 8 = (16 + 7) / 8 = 2
424        let pool = BlockPool::<64, 16, 2>::new();
425
426        let ptr1 = pool.allocate(32, 4).expect("allocation failed");
427        let ptr2 = pool.allocate(32, 4).expect("allocation failed");
428
429        assert_ne!(ptr1, ptr2);
430        assert_eq!(pool.used(), 128); // 2 blocks * 64 bytes
431    }
432
433    #[test]
434    fn test_block_pool_deallocation() {
435        // BITMAP_SIZE = (NUM_BLOCKS + 7) / 8 = (16 + 7) / 8 = 2
436        let pool = BlockPool::<64, 16, 2>::new();
437
438        let ptr = pool.allocate(32, 4).expect("allocation failed");
439        assert_eq!(pool.used(), 64);
440
441        // SAFETY: ptr was allocated from this pool
442        unsafe { pool.deallocate(ptr, 32, 4).expect("deallocation failed") };
443        assert_eq!(pool.used(), 0);
444    }
445
446    #[test]
447    fn test_block_pool_exhaustion() {
448        // BITMAP_SIZE = (NUM_BLOCKS + 7) / 8 = (4 + 7) / 8 = 1
449        let pool = BlockPool::<64, 4, 1>::new();
450
451        for _ in 0..4 {
452            pool.allocate(32, 4).expect("allocation failed");
453        }
454
455        let result = pool.allocate(32, 4);
456        assert!(matches!(result, Err(EmbeddedError::PoolExhausted)));
457    }
458}