memory_pool_allocator/
lib.rs

1//! # memory-pool-allocator
2//!
3//! A no_std, thread-safe, array chunk tracking-based fixed-size memory pool allocator.
4//!
5//! ## Features
6//! - User-provided memory pool (via `*mut u8`)
7//! - Chunked allocation with array-based chunk tracking
8//! - Optional statistics and zero-on-free features
9//! - Thread-safe via `parking_lot::Mutex`
10//!
11//! ### Default Features
12//! - **zero-on-free**: Zeroes memory of each allocation when it is deallocated.
13//! - **zero-on-drop**: Zeroes the entire memory pool when the allocator is dropped.
14//! - **statistics**: Tracks allocation and deallocation statistics (number of allocated chunks, allocation/deallocation errors).
15//!
16//! ## Type Parameters
17//! - `N`: Total pool size in bytes
18//! - `M`: Number of chunks to divide the pool into
19//!
20//! ## Safety
21//! - The user must provide a pointer to a memory region of at least `N` bytes, aligned to the maximum alignment required by allocations.
22//! - The allocator does not manage the lifetime of the memory pool; the user is responsible for ensuring it is valid for the allocator's lifetime.
23//! - If the pool is not sufficiently aligned, allocations with higher alignment requirements may fail or result in undefined behavior.
24//!
25//! ## Example
26//! ```rust
27//! # use memory_pool_allocator::MemoryPoolAllocator;
28//! # use core::alloc::Layout;
29//! #[repr(align(64))]
30//! struct Aligned {
31//!     mem: [u8; 1024]
32//! }
33//! let mut aligned = Aligned { mem: [0; 1024] };
34//! let allocator = unsafe { MemoryPoolAllocator::<1024, 64>::new(aligned.mem.as_mut_ptr()) };
35//! let layout = Layout::from_size_align(128, 64).unwrap();
36//! let ptr = allocator.try_allocate(layout).unwrap();
37//! assert_eq!(ptr as usize % 64, 0);
38//! allocator.try_deallocate(ptr).unwrap();
39//! ```
40//!
41//! ## License
42//! Licensed under either of Apache License, Version 2.0 or MIT license at your option.
43
44// Copyright 2025 Chirping666
45// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or http://www.apache.org/licenses/LICENSE-2.0>
46// or the MIT license <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
47
48#![no_std]
49
50use anyhow::{anyhow, Result};
51use core::{alloc::Layout, ptr::NonNull};
52use parking_lot::Mutex;
53
54/// Array chunk tracking-based fixed-size memory pool allocator
55/// 
56/// # Type Parameters
57/// * `N` - Total pool size in bytes
58/// * `M` - Number of chunks to divide the pool into
59/// 
60pub struct MemoryPoolAllocator<const N: usize, const M: usize> {
61    inner: Mutex<PoolInner<N, M>>,
62    #[cfg(feature = "statistics")]
63    stats: Mutex<PoolStats>,
64}
65
66struct PoolInner<const N: usize, const M: usize> {
67    /// Pointer to the actual memory pool (user-provided)
68    pool: *mut u8,
69    /// Allocation tracking
70    meta: [Option<usize>; M],
71}
72
73/// Allocation errors
74#[derive(Debug, Clone, Copy, PartialEq, Eq)]
75pub enum AllocError {
76    /// Not enough contiguous space
77    OutOfMemory,
78    /// Invalid layout parameters
79    InvalidLayout,
80    /// Pointer not from this allocator
81    InvalidPointer,
82    /// Pointer not currently allocated
83    NotAllocated,
84}
85
86impl core::fmt::Display for AllocError {
87    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
88        match self {
89            Self::OutOfMemory => write!(f, "Out of memory"),
90            Self::InvalidLayout => write!(f, "Invalid layout parameters"),
91            Self::InvalidPointer => write!(f, "Pointer not from this allocator"),
92            Self::NotAllocated => write!(f, "Pointer not currently allocated"),
93        }
94    }
95}
96
97impl From<anyhow::Error> for AllocError {
98    fn from(err: anyhow::Error) -> Self {
99        if err.is::<AllocError>() {
100            *err.downcast_ref::<AllocError>().unwrap()
101        } else {
102            AllocError::InvalidLayout
103        }
104    }
105}
106
107#[cfg(feature = "statistics")]
108/// Pool statistics
109#[derive(Debug, Clone)]
110pub struct PoolStats {
111    pub allocated_chunks: usize,
112    pub allocation_errors: usize,
113    pub deallocation_errors: usize,
114}
115
116#[cfg(feature = "statistics")]
117impl PoolStats {
118    const fn new() -> Self {
119        Self {
120            allocated_chunks: 0,
121            allocation_errors: 0,
122            deallocation_errors: 0,
123        }
124    }
125}
126
127// Safety: Pool data is protected by mutex
128unsafe impl<const N: usize, const M: usize> Sync for MemoryPoolAllocator<N, M> {}
129unsafe impl<const N: usize, const M: usize> Send for MemoryPoolAllocator<N, M> {}
130
131impl<const N: usize, const M: usize> MemoryPoolAllocator<N, M> {
132    /// Size of each chunk in bytes
133    pub const CHUNK_SIZE: usize = N / M;
134
135    /// # Safety
136    /// The caller must ensure the pointer is valid for reads/writes of N bytes and properly aligned for all pool operations.
137    pub const unsafe fn new(pool: *mut u8) -> Self {
138        Self {
139            inner: Mutex::new(PoolInner {
140                pool,
141                meta: [None; M],
142            }),
143            #[cfg(feature = "statistics")]
144            stats: Mutex::new(PoolStats::new()),
145        }
146    }
147
148    /// Attempts to allocate memory with the specified layout
149    pub fn try_allocate(&self, layout: Layout) -> Result<*mut u8> {
150        // Handle zero-size allocations
151        if layout.size() == 0 {
152            return Ok(NonNull::dangling().as_ptr());
153        }
154
155        // Validate layout
156        if !layout.align().is_power_of_two() || layout.align() > N {
157            #[cfg(feature = "statistics")]
158            {
159                self.stats.lock().allocation_errors += 1;
160            }
161            return Err(anyhow!(AllocError::InvalidLayout).context("Invalid alignment or size"));
162        }
163
164        let chunks_needed = (layout.size() + Self::CHUNK_SIZE - 1) / Self::CHUNK_SIZE;
165        if chunks_needed > M {
166            #[cfg(feature = "statistics")]
167            {
168                self.stats.lock().allocation_errors += 1;
169            }
170            return Err(anyhow!(AllocError::OutOfMemory).context("Failed to find free region"));
171        }
172
173        let mut inner = self.inner.lock();
174        let pool_base = inner.pool as usize;
175
176        // Find a suitable free region
177        if let Some((start_chunk, total_chunks)) = self.find_free_region(&inner, chunks_needed, layout.align()) {
178            self.mark_allocated(&mut inner.meta, start_chunk, total_chunks)?;
179
180            // Calculate pointer address
181            let ptr_addr = pool_base + start_chunk * Self::CHUNK_SIZE;
182
183            #[cfg(feature = "statistics")]
184            {
185                let mut stats = self.stats.lock();
186                stats.allocated_chunks += total_chunks;
187            }
188            return Ok(ptr_addr as *mut u8);
189        }
190
191        #[cfg(feature = "statistics")]
192        {
193            self.stats.lock().allocation_errors += 1;
194        }
195        Err(anyhow!(AllocError::OutOfMemory).context("Failed to find free region"))
196    }
197
198    /// Attempts to deallocate previously allocated memory
199    pub fn try_deallocate(&self, ptr: *mut u8) -> Result<()> {
200        // Handle null
201        if ptr.is_null() {
202            #[cfg(feature = "statistics")]
203            {
204                self.stats.lock().deallocation_errors += 1;
205            }
206            return Err(anyhow!(AllocError::InvalidPointer).context("Cannot deallocate null pointer"));
207        }
208
209        let mut inner = self.inner.lock();
210        let pool_base = inner.pool as usize;
211        let ptr_addr = ptr as usize;
212
213        // Validate pointer is within pool
214        if ptr_addr < pool_base || ptr_addr >= pool_base + N {
215            #[cfg(feature = "statistics")]
216            {
217                self.stats.lock().deallocation_errors += 1;
218            }
219            return Err(anyhow!(AllocError::InvalidPointer).context("Pointer not from this allocator"));
220        }
221
222        // Find the allocation metadata
223        if (ptr_addr - pool_base) % Self::CHUNK_SIZE != 0 {
224            #[cfg(feature = "statistics")]
225            {
226                self.stats.lock().deallocation_errors += 1;
227            }
228            return Err(anyhow!(AllocError::InvalidPointer).context("Pointer not aligned to chunk size"));
229        }
230
231        let start_chunk = (ptr_addr - pool_base) / Self::CHUNK_SIZE;
232        if start_chunk >= M || !self.is_chunk_allocated(&inner.meta, start_chunk) {
233            #[cfg(feature = "statistics")]
234            {
235                self.stats.lock().deallocation_errors += 1;
236            }
237            return Err(anyhow!(AllocError::NotAllocated).context("Pointer not currently allocated"));
238        }
239
240        // Calculate the start of the allocated chunk
241        let total_chunks = match inner.meta[start_chunk] {
242            Some(size) => size,
243            None => {
244                #[cfg(feature = "statistics")]
245                {
246                    self.stats.lock().deallocation_errors += 1;
247                }
248                return Err(anyhow!(AllocError::NotAllocated).context("Chunk already free"));
249            }
250        };
251        
252        // Clear memory if feature enabled
253        #[cfg(feature = "zero-on-free")]
254        {
255            unsafe {
256                let start_ptr = (pool_base + start_chunk * Self::CHUNK_SIZE) as *mut u8;
257                core::ptr::write_bytes(start_ptr, 0, total_chunks * Self::CHUNK_SIZE);
258            }
259        }
260        
261        // Mark chunks as free
262        self.mark_chunks_free(&mut inner.meta, start_chunk)?;
263
264        // Update stats
265        #[cfg(feature = "statistics")]
266        {
267            let mut stats = self.stats.lock();
268            stats.allocated_chunks = stats.allocated_chunks.saturating_sub(total_chunks);
269        }
270        
271        Ok(())
272    }
273
274    // === Private meta helper methods ===
275
276    /// Checks if a specific chunk is allocated
277    /// Checks if a specific chunk is allocated
278    #[inline]
279    fn is_chunk_allocated(&self, meta: &[Option<usize>; M], chunk_idx: usize) -> bool {
280        if chunk_idx < M {
281            matches!(meta[chunk_idx], Some(_))
282        } else {
283            false
284        }
285    }
286
287    /// Marks a range of chunks as allocated
288    fn mark_allocated(&self, meta: &mut [Option<usize>; M], start_chunk: usize, chunk_size: usize) -> Result<()> {
289        if start_chunk + chunk_size > M {
290            return Err(anyhow!(AllocError::OutOfMemory).context("Not enough space to allocate chunks"));
291        }
292        meta[start_chunk] = Some(chunk_size);
293        Ok(())
294    }
295
296    /// Marks a range of chunks as free
297    fn mark_chunks_free(&self, meta: &mut [Option<usize>; M], start_chunk: usize) -> Result<()> {
298        match meta[start_chunk] {
299            Some(size) => {
300                if start_chunk + size > M {
301                    return Err(anyhow!(AllocError::OutOfMemory).context("Invalid chunk range to free"));
302                }
303
304                for i in start_chunk..start_chunk + size {
305                    if i < M {
306                        meta[i] = None; // Mark as free
307                    }
308                }
309
310                Ok(())
311            }
312            None => {
313                Err(anyhow!(AllocError::NotAllocated).context("Chunk already free"))
314            }
315        }
316    }
317
318    /// Finds a contiguous free region that can accommodate the request
319    fn find_free_region(&self, inner: &PoolInner<N, M>, chunks_needed: usize, align: usize) -> Option<(usize,usize)> {
320
321        let pool_base = inner.pool as usize;
322        
323        let mut start = 0;
324        while start + chunks_needed <= M {
325            // Calculate the address of the block
326            let block_addr = pool_base + start * Self::CHUNK_SIZE;
327            // Check if the block is aligned
328            let aligned_addr = (block_addr + align - 1) & !(align - 1);
329            // Check if the aligned address is within bounds
330            let alignment_waste = aligned_addr - block_addr;
331            // Round up to next chunk boundary if needed
332            let alignment_chunks = (alignment_waste + Self::CHUNK_SIZE - 1) / Self::CHUNK_SIZE;
333            // Check if we have enough space after alignment
334            let total_chunks_needed = alignment_chunks + chunks_needed;
335            // If we exceed the total number of chunks, break
336            if start + total_chunks_needed > M {
337                break;
338            }
339            // Check if all chunks in the range are free
340            let mut found = true;
341            // Check if the start chunk is aligned
342            for i in 0..total_chunks_needed {
343                if self.is_chunk_allocated(&inner.meta, start + i) {
344                    found = false;
345                    start = start + i + 1;
346                    break;
347                }
348            }
349            // If we found a free region, return it
350            if found {
351                return Some((start + alignment_chunks, chunks_needed));
352            }
353        }
354        None
355    }
356
357}
358
359#[cfg(feature = "zero-on-drop")]
360impl<const N: usize, const M: usize> Drop for MemoryPoolAllocator<N, M> {
361    fn drop(&mut self) {
362        // Ensure all chunks are freed before dropping
363        let inner = self.inner.lock();
364        unsafe {
365            core::ptr::write_bytes(inner.pool, 0, N);
366        }
367    }
368}
369
370#[cfg(test)]
371mod tests {
372    use super::*;
373
374    #[test]
375    fn test_basic_allocation() {
376        type Alloc = MemoryPoolAllocator<1024, 64>;
377        let mut mem = [0u8; 1024];
378        let allocator = unsafe { Alloc::new(mem.as_mut_ptr()) };
379        
380        let layout = Layout::from_size_align(16, 8).unwrap();
381        let ptr = allocator.try_allocate(layout).unwrap();
382        
383        // Check pointer alignment
384        assert_eq!(ptr as usize % 8, 0);
385        
386        // Deallocate
387        assert!(allocator.try_deallocate(ptr).is_ok());
388        
389        #[cfg(feature = "statistics")]
390        {
391            let stats = allocator.stats.lock();
392            assert_eq!(stats.allocated_chunks, 0);
393        }
394    }
395
396    #[test]
397    fn test_multiple_allocations() {
398        type Alloc = MemoryPoolAllocator<1024, 64>;
399        let mut mem = [0u8; 1024];
400        let allocator = unsafe { Alloc::new(mem.as_mut_ptr()) };
401        
402        let layout = Layout::from_size_align(16, 8).unwrap();
403        let mut ptrs = [core::ptr::null_mut(); 10];
404        let mut count = 0;
405        
406        // Allocate multiple blocks
407        for i in 0..10 {
408            match allocator.try_allocate(layout) {
409                Ok(ptr) => {
410                    ptrs[i] = ptr;
411                    count += 1;
412                }
413                Err(_) => break,
414            }
415        }
416        
417        assert!(count > 0);
418        
419        // Verify all pointers are different and aligned
420        for i in 0..count {
421            assert_eq!(ptrs[i] as usize % 8, 0);
422            for j in (i+1)..count {
423                assert_ne!(ptrs[i], ptrs[j]);
424            }
425        }
426        
427        // Deallocate all
428        for i in 0..count {
429            assert!(allocator.try_deallocate(ptrs[i]).is_ok());
430        }
431
432        #[cfg(feature = "statistics")]
433        {
434            let stats = allocator.stats.lock();
435            assert_eq!(stats.allocated_chunks, 0);
436        }
437    }
438
439    #[test]
440    fn test_alignment_handling() {
441        type Alloc = MemoryPoolAllocator<2048, 32>;
442        #[repr(align(32))]
443        struct Aligned {
444            mem: [u8;1024]
445        }
446        let mut aligned = Aligned { mem: [0; 1024] };
447        let allocator = unsafe { Alloc::new(aligned.mem.as_mut_ptr()) };
448        
449        // Allocate with different alignments
450        let layout1 = Layout::from_size_align(32, 16).unwrap();
451        let ptr1 = allocator.try_allocate(layout1).unwrap();
452        assert_eq!(ptr1 as usize % 16, 0);
453        
454        let layout2 = Layout::from_size_align(64, 32).unwrap();
455        let ptr2 = allocator.try_allocate(layout2).unwrap();
456        assert_eq!(ptr2 as usize % 32, 0);
457        
458        // Deallocate
459        allocator.try_deallocate(ptr1).unwrap();
460        allocator.try_deallocate(ptr2).unwrap();
461    }
462
463    #[test]
464    #[cfg(feature = "statistics")]
465    fn test_error_handling() {
466       
467        type Alloc = MemoryPoolAllocator<512, 32>;
468        let mut mem = [0u8; 1024];
469        let allocator = unsafe { Alloc::new(mem.as_mut_ptr()) };
470        
471        let layout = Layout::from_size_align(16, 8).unwrap();
472        
473        // Try allocating more than available
474        for _ in 0..32 {
475            assert!(allocator.try_allocate(layout).is_ok());
476        }
477        
478        // Next allocation should fail
479        assert!(allocator.try_allocate(layout).is_err());
480        
481        // Check stats
482        let stats = allocator.stats.lock();
483        assert!(stats.allocation_errors > 0);
484    }
485
486    #[test]
487    fn test_full_pool() {
488        type Alloc = MemoryPoolAllocator<64, 4>;
489        let mut mem = [0u8; 1024];
490        let allocator = unsafe { Alloc::new(mem.as_mut_ptr()) };
491        
492        let layout = Layout::from_size_align(16, 8).unwrap();
493        
494        // Fill the pool
495        let ptr1 = allocator.try_allocate(layout).unwrap();
496        let ptr2 = allocator.try_allocate(layout).unwrap();
497        let ptr3 = allocator.try_allocate(layout).unwrap();
498        let ptr4 = allocator.try_allocate(layout).unwrap();
499        
500        // Next allocation should fail
501        assert!(allocator.try_allocate(layout).is_err());
502        
503        // Free one and try again
504        allocator.try_deallocate(ptr2).unwrap();
505        let ptr5 = allocator.try_allocate(layout).unwrap();
506        
507        // Clean up
508        allocator.try_deallocate(ptr1).unwrap();
509        allocator.try_deallocate(ptr3).unwrap();
510        allocator.try_deallocate(ptr4).unwrap();
511        allocator.try_deallocate(ptr5).unwrap();
512    }
513
514    #[test]
515    #[cfg(feature = "zero-on-free")]
516    fn test_memory_zeroing() {
517        type Alloc = MemoryPoolAllocator<64, 4>;
518        let mut mem = [0u8; 1024];
519        let allocator = unsafe { Alloc::new(mem.as_mut_ptr()) };
520        
521        let layout = Layout::from_size_align(16, 8).unwrap();
522        let ptr = allocator.try_allocate(layout).unwrap();
523        
524        // Write pattern after the metadata (first 8 bytes)
525        unsafe {
526            let data_ptr = ptr.add(8);
527            core::ptr::write_bytes(data_ptr, 0xAB, 8);
528        }
529        
530        // Deallocate
531        allocator.try_deallocate(ptr).unwrap();
532        
533        // Reallocate
534        let ptr2 = allocator.try_allocate(layout).unwrap();
535        
536        // Check if the data portion is zeroed (skip metadata)
537        let mut buffer = [0u8; 8];
538        unsafe {
539            let data_ptr = ptr2.add(8);
540            core::ptr::copy_nonoverlapping(data_ptr, buffer.as_mut_ptr(), 8);
541        }
542        assert!(buffer.iter().all(|&b| b == 0));
543        
544        allocator.try_deallocate(ptr2).unwrap();
545    }
546}