memory_pool_allocator/
lib.rs

1//! # memory-pool-allocator
2//!
3//! A no_std, thread-safe, array chunk tracking-based fixed-size memory pool allocator.
4//!
5//! ## Features
6//! - User-provided memory pool (via `*mut u8`)
7//! - Chunked allocation with array-based chunk tracking
8//! - Optional statistics and zero-on-free features
9//! - Thread-safe via `parking_lot::Mutex`
10//!
11//! ## Default Features
12//! - **zero-on-free**: Zeroes memory of each allocation when it is deallocated.
13//! - **zero-on-drop**: Zeroes the entire memory pool when the allocator is dropped.
14//! - **statistics**: Tracks allocation and deallocation statistics (number of allocated chunks, allocation/deallocation errors).
15//!
16//! ## Optional Feature
17//! - **debug**: Adds assertions of pool consistency for debug builds.
18//! 
19//! ## Type Parameters
20//! - `N`: Total pool size in bytes
21//! - `M`: Number of chunks to divide the pool into
22//!
23//! ## Safety
24//! - The user must provide a pointer to a memory region of at least `N` bytes, aligned to the maximum alignment required by allocations.
25//! - The allocator does not manage the lifetime of the memory pool; the user is responsible for ensuring it is valid for the allocator's lifetime.
26//! - If the pool is not sufficiently aligned, allocations with higher alignment requirements may fail or result in undefined behavior.
27//!
28//! ## Example
29//! ```rust
30//! # use memory_pool_allocator::MemoryPoolAllocator;
31//! # use core::alloc::Layout;
32//! #[repr(align(64))]
33//! struct Aligned {
34//!     mem: [u8; 1024]
35//! }
36//! let mut aligned = Aligned { mem: [0; 1024] };
37//! let allocator = unsafe { MemoryPoolAllocator::<1024, 64>::new(aligned.mem.as_mut_ptr()) };
38//! let layout = Layout::from_size_align(128, 64).unwrap();
39//! let ptr = allocator.try_allocate(layout).unwrap();
40//! assert_eq!(ptr as usize % 64, 0);
41//! allocator.try_deallocate(ptr).unwrap();
42//! ```
43//!
44//! ## License
45//! Licensed under either of Apache License, Version 2.0 or MIT license at your option.
46
47// Copyright 2025 Chirping666
48// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or http://www.apache.org/licenses/LICENSE-2.0>
49// or the MIT license <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
50
51#![no_std]
52
53#[cfg(test)]
54mod tests;
55
56use anyhow::{Result, anyhow};
57use core::{alloc::Layout, ptr::NonNull};
58use parking_lot::Mutex;
59
60/// Array chunk tracking-based fixed-size memory pool allocator
61///
62/// # Type Parameters
63/// * `N` - Total pool size in bytes
64/// * `M` - Number of chunks to divide the pool into
65///
66pub struct MemoryPoolAllocator<const N: usize, const M: usize> {
67    inner: Mutex<PoolInner<N, M>>,
68    #[cfg(feature = "statistics")]
69    stats: Mutex<PoolStats>,
70}
71
72struct PoolInner<const N: usize, const M: usize> {
73    /// Pointer to the actual memory pool (user-provided)
74    pool: *mut u8,
75    /// Allocation tracking
76    meta: [MetaInfo; M],
77}
78
79#[derive(Clone, Copy, Debug, PartialEq, Eq)]
80enum MetaInfo {
81    /// Start of a number of free chunks
82    Free(usize),
83
84    FreeContinuation,
85
86    AllocStart {
87        size: usize,       // Size of the allocation
88        ptr_offset: usize, // Offset from the chunk base
89    },
90    /// Chunk is allocated
91    AllocContinuation,
92}
93
94/// Allocation errors
95#[derive(Debug, Clone, Copy, PartialEq, Eq)]
96pub enum AllocError {
97    /// Not enough contiguous space
98    OutOfMemory,
99    /// Invalid layout parameters
100    InvalidLayout,
101    /// Pointer not from this allocator
102    InvalidPointer,
103    /// Pointer not currently allocated
104    NotAllocated,
105}
106
107impl core::fmt::Display for AllocError {
108    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
109        match self {
110            Self::OutOfMemory => write!(f, "Out of memory"),
111            Self::InvalidLayout => write!(f, "Invalid layout parameters"),
112            Self::InvalidPointer => write!(f, "Pointer not from this allocator"),
113            Self::NotAllocated => write!(f, "Pointer not currently allocated"),
114        }
115    }
116}
117
118impl From<anyhow::Error> for AllocError {
119    fn from(err: anyhow::Error) -> Self {
120        if err.is::<AllocError>() {
121            *err.downcast_ref::<AllocError>().unwrap()
122        } else {
123            AllocError::InvalidLayout
124        }
125    }
126}
127
128#[cfg(feature = "statistics")]
129/// Pool statistics
130#[derive(Debug, Clone)]
131pub struct PoolStats {
132    pub allocated_chunks: usize,
133    pub allocation_errors: usize,
134    pub deallocation_errors: usize,
135}
136
137#[cfg(feature = "statistics")]
138impl PoolStats {
139    const fn new() -> Self {
140        Self {
141            allocated_chunks: 0,
142            allocation_errors: 0,
143            deallocation_errors: 0,
144        }
145    }
146}
147
148/// Information about an allocation for deallocation
149struct AllocationInfo {
150    start_chunk: usize,
151    total_chunks: usize,
152}
153
154// Safety: Pool data is protected by mutex
155unsafe impl<const N: usize, const M: usize> Sync for MemoryPoolAllocator<N, M> {}
156unsafe impl<const N: usize, const M: usize> Send for MemoryPoolAllocator<N, M> {}
157
158impl<const N: usize, const M: usize> MemoryPoolAllocator<N, M> {
159    // Compile-time assertion
160    const _DIVISIBILITY: () = assert!(
161        N % M == 0,
162        "Pool size N must be exactly divisible by chunk count M"
163    );
164    const _NON_ZERO_CHUNK_NUM: () = assert!(M > 0, "Must have at least one chunk");
165    const _NON_ZERO_POOL_SIZE: () = assert!(N > 0, "Pool size must be greater than zero");
166    const _N_GR_THAN_OR_EQ_TO_M: () = assert!(
167        N >= M,
168        "Pool size N must be greater than or equal to chunk count M"
169    );
170
171    /// Size of each chunk in bytes
172    pub const CHUNK_SIZE: usize = N / M;
173
174    /// # Safety
175    /// The caller must ensure the pointer is valid for reads/writes of N bytes and properly aligned for all pool operations.
176    pub const unsafe fn new(pool: *mut u8) -> Self {
177        let mut meta = [MetaInfo::FreeContinuation; M];
178        meta[0] = MetaInfo::Free(M); // Initialize first chunk as Free with size M
179        Self {
180            inner: Mutex::new(PoolInner { pool, meta }),
181            #[cfg(feature = "statistics")]
182            stats: Mutex::new(PoolStats::new()),
183        }
184    }
185
186    /// Attempts to allocate memory with the specified layout
187    pub fn try_allocate(&self, layout: Layout) -> Result<*mut u8> {
188        // Handle zero-size allocations
189        if layout.size() == 0 {
190            return Ok(NonNull::dangling().as_ptr());
191        }
192
193        // Validate layout
194        if !layout.align().is_power_of_two() || layout.align() > N {
195            #[cfg(feature = "statistics")]
196            {
197                self.stats.lock().allocation_errors += 1;
198            }
199            return Err(anyhow!(AllocError::InvalidLayout).context("Invalid alignment or size"));
200        }
201
202        let chunks_needed = (layout.size() + Self::CHUNK_SIZE - 1) / Self::CHUNK_SIZE;
203        if chunks_needed > M || chunks_needed == 0 {
204            #[cfg(feature = "statistics")]
205            {
206                self.stats.lock().allocation_errors += 1;
207            }
208            return Err(anyhow!(AllocError::OutOfMemory).context("Allocation too large"));
209        }
210
211        let mut inner = self.inner.lock();
212        let pool_base = inner.pool as usize;
213
214        // Find a suitable free region
215        if let Some((start_chunk, total_chunks, aligned_ptr)) =
216            self.find_free_region(&inner, chunks_needed, layout.align(), pool_base)
217        {
218            self.mark_allocated(
219                &mut inner.meta,
220                start_chunk,
221                total_chunks,
222                aligned_ptr,
223                pool_base,
224            )?;
225
226            #[cfg(feature = "debug")]
227            #[cfg(debug_assertions)]
228            {
229                debug_assert!(
230                    self.validate_pool_consistency(&inner.meta),
231                    "Pool consistency check failed after allocation"
232                );
233            }
234
235            #[cfg(feature = "statistics")]
236            {
237                let mut stats = self.stats.lock();
238                stats.allocated_chunks += total_chunks;
239            }
240
241            return Ok(aligned_ptr as *mut u8);
242        }
243
244        #[cfg(feature = "statistics")]
245        {
246            self.stats.lock().allocation_errors += 1;
247        }
248        Err(anyhow!(AllocError::OutOfMemory).context("No suitable free region found"))
249    }
250
251    /// Attempts to deallocate previously allocated memory  
252    pub fn try_deallocate(&self, ptr: *mut u8) -> Result<()> {
253        // Handle null pointer
254        if ptr.is_null() {
255            #[cfg(feature = "statistics")]
256            {
257                self.stats.lock().deallocation_errors += 1;
258            }
259            return Err(
260                anyhow!(AllocError::InvalidPointer).context("Cannot deallocate null pointer")
261            );
262        }
263
264        let mut inner = self.inner.lock();
265        let pool_base = inner.pool as usize;
266        let ptr_addr = ptr as usize;
267
268        // Validate pointer is within pool bounds
269        if ptr_addr < pool_base || ptr_addr >= pool_base + N {
270            #[cfg(feature = "statistics")]
271            {
272                self.stats.lock().deallocation_errors += 1;
273            }
274            return Err(
275                anyhow!(AllocError::InvalidPointer).context("Pointer not from this allocator")
276            );
277        }
278
279        // Find the allocation that contains this pointer
280        let allocation_info =
281            match self.find_allocation_containing_ptr(&inner.meta, ptr_addr, pool_base) {
282                Ok(info) => info,
283                Err(_) => {
284                    #[cfg(feature = "statistics")]
285                    {
286                        self.stats.lock().deallocation_errors += 1;
287                    }
288                    return Err(anyhow!(AllocError::NotAllocated)
289                        .context("Pointer not currently allocated"));
290                }
291            };
292
293        // Clear memory if feature enabled
294        #[cfg(feature = "zero-on-free")]
295        {
296            unsafe {
297                let start_ptr =
298                    (pool_base + allocation_info.start_chunk * Self::CHUNK_SIZE) as *mut u8;
299                core::ptr::write_bytes(
300                    start_ptr,
301                    0,
302                    allocation_info.total_chunks * Self::CHUNK_SIZE,
303                );
304            }
305        }
306
307        // Mark chunks as free and coalesce
308        self.mark_chunks_free(
309            &mut inner.meta,
310            allocation_info.start_chunk,
311            allocation_info.total_chunks,
312        )?;
313
314        #[cfg(feature = "debug")]
315        #[cfg(debug_assertions)]
316        {
317            debug_assert!(
318                self.validate_pool_consistency(&inner.meta),
319                "Pool consistency check failed after deallocation"
320            );
321        }
322
323        // Update stats
324        #[cfg(feature = "statistics")]
325        {
326            let mut stats = self.stats.lock();
327            stats.allocated_chunks = stats
328                .allocated_chunks
329                .saturating_sub(allocation_info.total_chunks);
330        }
331
332        Ok(())
333    }
334
335    // === Private helper methods ===
336
337    /// Find the allocation that contains the given pointer
338    fn find_allocation_containing_ptr(
339        &self,
340        meta: &[MetaInfo; M],
341        ptr_addr: usize,
342        pool_base: usize,
343    ) -> Result<AllocationInfo> {
344        // Check which chunk contains this pointer
345        let containing_chunk = (ptr_addr - pool_base) / Self::CHUNK_SIZE;
346        if containing_chunk >= M {
347            return Err(anyhow!(AllocError::InvalidPointer).context("Pointer beyond pool bounds"));
348        }
349
350        // Find the start of the allocation by scanning backwards
351        let mut scan_chunk = containing_chunk;
352        loop {
353            match meta[scan_chunk] {
354                MetaInfo::AllocStart { size, ptr_offset } => {
355                    // Found the allocation start
356                    let end_chunk = scan_chunk + size;
357                    if containing_chunk < end_chunk {
358                        // Check if this is the right allocation by validating the pointer
359                        let expected_ptr = pool_base + scan_chunk * Self::CHUNK_SIZE + ptr_offset;
360                        if ptr_addr == expected_ptr {
361                            return Ok(AllocationInfo {
362                                start_chunk: scan_chunk,
363                                total_chunks: size,
364                            });
365                        }
366                    }
367                    return Err(anyhow!(AllocError::NotAllocated)
368                        .context("Pointer not matching expected allocation"));
369                }
370                MetaInfo::AllocContinuation => {
371                    // Continue scanning backwards
372                    if scan_chunk == 0 {
373                        return Err(
374                            anyhow!(AllocError::NotAllocated).context("No allocation start found")
375                        );
376                    }
377                    scan_chunk -= 1;
378                }
379                _ => {
380                    return Err(anyhow!(AllocError::NotAllocated).context("Pointer in free region"));
381                }
382            }
383        }
384    }
385
386    /// Finds a contiguous free region that can accommodate the request, considering alignment
387    /// Returns (start_chunk, total_chunks_to_reserve, aligned_user_pointer)
388    fn find_free_region(
389        &self,
390        inner: &PoolInner<N, M>,
391        chunks_needed: usize,
392        align: usize,
393        pool_base: usize,
394    ) -> Option<(usize, usize, usize)> {
395        let mut i = 0;
396
397        while i < M {
398            match inner.meta[i] {
399                MetaInfo::Free(free_size) => {
400                    // Found a free region, try to allocate within it
401                    let free_start = i;
402                    let free_end = i + free_size;
403
404                    // Try different starting positions within this free region
405                    for try_start in free_start..free_end {
406                        // Calculate address of this chunk
407                        let chunk_addr = pool_base + try_start * Self::CHUNK_SIZE;
408
409                        // Calculate aligned address
410                        let aligned_addr = (chunk_addr + align - 1) & !(align - 1);
411
412                        // How many bytes of alignment padding do we need?
413                        let alignment_offset = aligned_addr - chunk_addr;
414
415                        // Convert alignment offset to chunks (round up)
416                        let alignment_chunks =
417                            (alignment_offset + Self::CHUNK_SIZE - 1) / Self::CHUNK_SIZE;
418
419                        // Total chunks needed: alignment padding + actual allocation
420                        let total_chunks = alignment_chunks + chunks_needed;
421
422                        // Check if we have enough space in this free region
423                        if try_start + total_chunks <= free_end {
424                            // Verify all chunks in the range are actually free
425                            let all_free =
426                                (try_start..(try_start + total_chunks)).all(|check_idx| {
427                                    matches!(
428                                        inner.meta[check_idx],
429                                        MetaInfo::Free(_) | MetaInfo::FreeContinuation
430                                    )
431                                });
432
433                            if all_free {
434                                // Recalculate the actual aligned pointer within our reserved space
435                                let reserved_start_addr = pool_base + try_start * Self::CHUNK_SIZE;
436                                let final_aligned_addr =
437                                    (reserved_start_addr + align - 1) & !(align - 1);
438
439                                // Ensure the aligned address is within our reserved region
440                                let reserved_end_addr =
441                                    pool_base + (try_start + total_chunks) * Self::CHUNK_SIZE;
442                                if final_aligned_addr + (chunks_needed * Self::CHUNK_SIZE)
443                                    <= reserved_end_addr
444                                {
445                                    return Some((try_start, total_chunks, final_aligned_addr));
446                                }
447                            }
448                        }
449                    }
450
451                    // Move to the next region
452                    i = free_end;
453                }
454                _ => {
455                    // Not a free region, move to next chunk
456                    i += 1;
457                }
458            }
459        }
460
461        None
462    }
463
464    /// Marks a range of chunks as allocated
465    fn mark_allocated(
466        &self,
467        meta: &mut [MetaInfo; M],
468        start_chunk: usize,
469        total_chunks: usize,
470        user_ptr: usize,
471        pool_base: usize,
472    ) -> Result<()> {
473        if start_chunk + total_chunks > M {
474            return Err(anyhow!(AllocError::OutOfMemory).context("Allocation exceeds pool bounds"));
475        }
476
477        // Calculate the offset of the user pointer from the chunk base
478        let chunk_base_addr = pool_base + start_chunk * Self::CHUNK_SIZE;
479        let ptr_offset = user_ptr - chunk_base_addr;
480
481        // Mark the first chunk with the allocation info
482        meta[start_chunk] = MetaInfo::AllocStart {
483            size: total_chunks,
484            ptr_offset,
485        };
486
487        // Mark subsequent chunks as allocated continuations
488        for i in 1..total_chunks {
489            meta[start_chunk + i] = MetaInfo::AllocContinuation;
490        }
491
492        // Handle any remaining free space after our allocation
493        let after_allocation = start_chunk + total_chunks;
494        if after_allocation < M {
495            // Check if there are contiguous free chunks after our allocation
496            let mut remaining_free = 0;
497            let mut scan_idx = after_allocation;
498
499            while scan_idx < M && matches!(meta[scan_idx], MetaInfo::FreeContinuation) {
500                remaining_free += 1;
501                scan_idx += 1;
502            }
503
504            // If there are free chunks, mark them properly
505            if remaining_free > 0 {
506                meta[after_allocation] = MetaInfo::Free(remaining_free);
507                // The rest remain as FreeContinuation (they're already FreeContinuation)
508            }
509        }
510
511        #[cfg(feature = "debug")]
512        #[cfg(debug_assertions)]
513        {
514            debug_assert!(
515                self.validate_pool_consistency(meta),
516                "Pool consistency check failed after marking allocation"
517            );
518        }
519
520        Ok(())
521    }
522
523    /// Marks chunks as free and coalesces with adjacent free regions
524    fn mark_chunks_free(
525        &self,
526        meta: &mut [MetaInfo; M],
527        start_chunk: usize,
528        chunk_count: usize,
529    ) -> Result<()> {
530        if start_chunk + chunk_count > M {
531            return Err(anyhow!(AllocError::OutOfMemory).context("Invalid chunk range"));
532        }
533
534        // First, mark all chunks in the allocation as free continuations
535        for i in start_chunk..(start_chunk + chunk_count) {
536            meta[i] = MetaInfo::FreeContinuation;
537        }
538
539        // Now rebuild all free region markers by scanning the entire metadata array
540        // This is simpler and safer than trying to do complex coalescing
541        self.rebuild_free_markers(meta);
542
543        #[cfg(feature = "debug")]
544        #[cfg(debug_assertions)]
545        {
546            debug_assert!(
547                self.validate_pool_consistency(meta),
548                "Pool consistency check failed after marking chunks free"
549            );
550        }
551
552        Ok(())
553    }
554
555    /// Rebuild free region markers after freeing chunks
556    /// This scans the entire metadata array and properly marks free regions
557    fn rebuild_free_markers(&self, meta: &mut [MetaInfo; M]) {
558        let mut i = 0;
559        while i < M {
560            match meta[i] {
561                MetaInfo::FreeContinuation | MetaInfo::Free(_) => {
562                    // Found the start of a free region (or need to rebuild it), count its size
563                    let start = i;
564                    let mut size = 0;
565
566                    // Count consecutive free chunks (both FreeContinuation and Free(_))
567                    while i < M && matches!(meta[i], MetaInfo::FreeContinuation | MetaInfo::Free(_))
568                    {
569                        size += 1;
570                        i += 1;
571                    }
572
573                    // Mark this free region properly
574                    if size > 0 {
575                        meta[start] = MetaInfo::Free(size);
576                        // Mark the rest as FreeContinuation
577                        for j in (start + 1)..(start + size) {
578                            if j < M {
579                                meta[j] = MetaInfo::FreeContinuation;
580                            }
581                        }
582                    }
583                }
584                MetaInfo::AllocStart { size, .. } => {
585                    // Skip this entire allocation
586                    i += size;
587                }
588                MetaInfo::AllocContinuation => {
589                    // This shouldn't happen at a scan position, but handle it gracefully
590                    i += 1;
591                }
592            }
593        }
594    }
595
596    /// Get current pool statistics and debug info
597    #[cfg(feature = "statistics")]
598    pub fn get_stats(&self) -> PoolStats {
599        self.stats.lock().clone()
600    }
601
602    /// Get a snapshot of the current pool state for debugging
603    // #[cfg(debug_assertions)]
604    // pub fn debug_pool_state(&self) -> Vec<(usize, MetaInfo)> {
605    //     let inner = self.inner.lock();
606    //     inner.meta.iter().enumerate().map(|(i, &meta)| (i, meta)).collect()
607    // }
608
609    /// Validate pool consistency (debug builds only)
610    #[cfg(feature = "debug")]
611    #[cfg(debug_assertions)]
612    fn validate_pool_consistency(&self, meta: &[MetaInfo; M]) -> bool {
613        let mut i = 0;
614        while i < M {
615            match meta[i] {
616                MetaInfo::Free(size) => {
617                    if i + size > M {
618                        return false; // Size exceeds bounds
619                    }
620                    // Check that following chunks are FreeContinuation
621                    for j in 1..size {
622                        if !matches!(meta[i + j], MetaInfo::FreeContinuation) {
623                            return false;
624                        }
625                    }
626                    i += size;
627                }
628                MetaInfo::AllocStart { size, .. } => {
629                    if i + size > M {
630                        return false; // Size exceeds bounds
631                    }
632                    // Check that following chunks are AllocContinuation
633                    for j in 1..size {
634                        if !matches!(meta[i + j], MetaInfo::AllocContinuation) {
635                            return false;
636                        }
637                    }
638                    i += size;
639                }
640                MetaInfo::FreeContinuation | MetaInfo::AllocContinuation => {
641                    // These should only appear as continuations, not at scan positions
642                    return false;
643                }
644            }
645        }
646        true
647    }
648
649    // This is probably not needed at all.
650    // /// Checks if a specific chunk is allocated (used by tests/debug)
651    // #[allow(dead_code)]
652    // #[cfg(test)]
653    // fn is_allocated(&self, meta: &[MetaInfo; M], chunk_idx: usize) -> bool {
654    //     if chunk_idx >= M {
655    //         return false;
656    //     }
657    //     matches!(
658    //         meta[chunk_idx],
659    //         MetaInfo::AllocStart { .. } | MetaInfo::AllocContinuation
660    //     )
661    // }
662
663    // /// Test helper function to check if a pointer is properly aligned
664    // #[allow(dead_code)]
665    // #[cfg(test)]
666    // fn is_properly_aligned(ptr: *mut u8, align: usize) -> bool {
667    //     (ptr as usize) % align == 0
668    // }
669
670    /// Test helper to get total free space in chunks
671    #[cfg(test)]
672    fn count_free_chunks(&self) -> usize {
673        let inner = self.inner.lock();
674        let mut count = 0;
675        let mut i = 0;
676        while i < M {
677            match inner.meta[i] {
678                MetaInfo::Free(size) => {
679                    count += size;
680                    i += size;
681                }
682                MetaInfo::AllocStart { size, .. } => {
683                    i += size;
684                }
685                _ => i += 1,
686            }
687        }
688        count
689    }
690
691    /// Test helper to get total allocated space in chunks
692    #[cfg(test)]
693    fn count_allocated_chunks(&self) -> usize {
694        let inner = self.inner.lock();
695        let mut count = 0;
696        let mut i = 0;
697        while i < M {
698            match inner.meta[i] {
699                MetaInfo::Free(size) => {
700                    i += size;
701                }
702                MetaInfo::AllocStart { size, .. } => {
703                    count += size;
704                    i += size;
705                }
706                _ => i += 1,
707            }
708        }
709        count
710    }
711}
712
713#[cfg(feature = "zero-on-drop")]
714impl<const N: usize, const M: usize> Drop for MemoryPoolAllocator<N, M> {
715    fn drop(&mut self) {
716        // Ensure all chunks are freed before dropping
717        let inner = self.inner.lock();
718        unsafe {
719            core::ptr::write_bytes(inner.pool, 0, N);
720        }
721    }
722}