memory_pool_allocator/
lib.rs

1//! # memory-pool-allocator
2//!
3//! A no_std, thread-safe, array chunk tracking-based fixed-size memory pool allocator.
4//!
5//! ## Features
6//! - User-provided memory pool (via `*mut u8`)
7//! - Chunked allocation with array-based chunk tracking
8//! - Optional statistics and zero-on-free features
9//! - Thread-safe via `parking_lot::Mutex`
10//!
11//! ## Default Features
12//! - **zero-on-free**: Zeroes memory of each allocation when it is deallocated.
13//! - **zero-on-drop**: Zeroes the entire memory pool when the allocator is dropped.
14//! - **statistics**: Tracks allocation and deallocation statistics (number of allocated chunks, allocation/deallocation errors).
15//!
16//! ## Optional Feature
17//! - **debug**: Adds assertions of pool consistency for debug builds.
18//!
19//! ## Type Parameters
20//! - `N`: Total pool size in bytes
21//! - `M`: Number of chunks to divide the pool into
22//!
23//! ## Safety
24//! - The user must provide a pointer to a memory region of at least `N` bytes, aligned to the maximum alignment required by allocations.
25//! - The allocator does not manage the lifetime of the memory pool; the user is responsible for ensuring it is valid for the allocator's lifetime.
26//! - If the pool is not sufficiently aligned, allocations with higher alignment requirements may fail or result in undefined behavior.
27//!
28//! ## Example
29//! ```rust
30//! # use memory_pool_allocator::MemoryPoolAllocator;
31//! # use core::alloc::Layout;
32//! #[repr(align(64))]
33//! struct Aligned {
34//!     mem: [u8; 1024]
35//! }
36//! let mut aligned = Aligned { mem: [0; 1024] };
37//! let allocator = unsafe { MemoryPoolAllocator::<1024, 64>::new(aligned.mem.as_mut_ptr()) };
38//! let layout = Layout::from_size_align(128, 64).unwrap();
39//! let ptr = allocator.try_allocate(layout).unwrap();
40//! assert_eq!(ptr as usize % 64, 0);
41//! allocator.try_deallocate(ptr).unwrap();
42//! ```
43//!
44//! ## License
45//! Licensed under either of Apache License, Version 2.0 or MIT license at your option.
46
47// Copyright 2025 Chirping666
48// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or http://www.apache.org/licenses/LICENSE-2.0>
49// or the MIT license <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
50
51#![no_std]
52
53#[cfg(test)]
54mod tests;
55
56use anyhow::{Result, anyhow};
57use core::{alloc::Layout, ptr::NonNull};
58use parking_lot::Mutex;
59
60/// Array chunk tracking-based fixed-size memory pool allocator
61///
62/// # Type Parameters
63/// * `N` - Total pool size in bytes
64/// * `M` - Number of chunks to divide the pool into
65///
66pub struct MemoryPoolAllocator<const N: usize, const M: usize> {
67    inner: Mutex<PoolInner<N, M>>,
68    #[cfg(feature = "statistics")]
69    stats: Mutex<PoolStats>,
70}
71
72struct PoolInner<const N: usize, const M: usize> {
73    /// Pointer to the actual memory pool (user-provided)
74    pool: *mut u8,
75    /// Allocation tracking
76    meta: [MetaInfo; M],
77}
78
79#[derive(Clone, Copy, Debug, PartialEq, Eq)]
80enum MetaInfo {
81    /// Start of a number of free chunks
82    Free(usize),
83
84    FreeContinuation,
85
86    AllocStart {
87        size: usize,       // Size of the allocation
88        ptr_offset: usize, // Offset from the chunk base
89    },
90    /// Chunk is allocated
91    AllocContinuation,
92}
93
94/// Allocation errors
95#[derive(Debug, Clone, Copy, PartialEq, Eq)]
96pub enum AllocError {
97    /// Not enough contiguous space
98    OutOfMemory,
99    /// Invalid layout parameters
100    InvalidLayout,
101    /// Pointer not from this allocator
102    InvalidPointer,
103    /// Pointer not currently allocated
104    NotAllocated,
105}
106
107impl core::fmt::Display for AllocError {
108    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
109        match self {
110            Self::OutOfMemory => write!(f, "Out of memory"),
111            Self::InvalidLayout => write!(f, "Invalid layout parameters"),
112            Self::InvalidPointer => write!(f, "Pointer not from this allocator"),
113            Self::NotAllocated => write!(f, "Pointer not currently allocated"),
114        }
115    }
116}
117
118impl From<anyhow::Error> for AllocError {
119    fn from(err: anyhow::Error) -> Self {
120        if err.is::<AllocError>() {
121            *err.downcast_ref::<AllocError>().unwrap()
122        } else {
123            AllocError::InvalidLayout
124        }
125    }
126}
127
128#[cfg(feature = "statistics")]
129/// Pool statistics
130#[derive(Debug, Clone)]
131pub struct PoolStats {
132    pub allocated_chunks: usize,
133    pub allocation_errors: usize,
134    pub deallocation_errors: usize,
135}
136
137#[cfg(feature = "statistics")]
138impl PoolStats {
139    const fn new() -> Self {
140        Self {
141            allocated_chunks: 0,
142            allocation_errors: 0,
143            deallocation_errors: 0,
144        }
145    }
146}
147
148/// Information about an allocation for deallocation
149struct AllocationInfo {
150    start_chunk: usize,
151    total_chunks: usize,
152}
153
154// Safety: Pool data is protected by mutex
155unsafe impl<const N: usize, const M: usize> Sync for MemoryPoolAllocator<N, M> {}
156unsafe impl<const N: usize, const M: usize> Send for MemoryPoolAllocator<N, M> {}
157
158impl<const N: usize, const M: usize> MemoryPoolAllocator<N, M> {
159    // Compile-time assertion
160    const _DIVISIBILITY: () = assert!(
161        N % M == 0,
162        "Pool size N must be exactly divisible by chunk count M"
163    );
164    const _NON_ZERO_CHUNK_NUM: () = assert!(M > 0, "Must have at least one chunk");
165    const _NON_ZERO_POOL_SIZE: () = assert!(N > 0, "Pool size must be greater than zero");
166    const _N_GR_THAN_OR_EQ_TO_M: () = assert!(
167        N >= M,
168        "Pool size N must be greater than or equal to chunk count M"
169    );
170
171    /// Size of each chunk in bytes
172    pub const CHUNK_SIZE: usize = N / M;
173
174    /// # Safety
175    /// The caller must ensure the pointer is valid for reads/writes of N bytes and properly aligned for all pool operations.
176    pub const unsafe fn new(pool: *mut u8) -> Self {
177        let mut meta = [MetaInfo::FreeContinuation; M];
178        meta[0] = MetaInfo::Free(M); // Initialize first chunk as Free with size M
179        Self {
180            inner: Mutex::new(PoolInner { pool, meta }),
181            #[cfg(feature = "statistics")]
182            stats: Mutex::new(PoolStats::new()),
183        }
184    }
185
186    /// Attempts to allocate memory with the specified layout
187    pub fn try_allocate(&self, layout: Layout) -> Result<*mut u8> {
188        // Handle zero-size allocations
189        if layout.size() == 0 {
190            return Ok(NonNull::dangling().as_ptr());
191        }
192
193        // Validate layout
194        if !layout.align().is_power_of_two() || layout.align() > N {
195            #[cfg(feature = "statistics")]
196            {
197                self.stats.lock().allocation_errors += 1;
198            }
199            return Err(anyhow!(AllocError::InvalidLayout).context("Invalid alignment or size"));
200        }
201
202        let chunks_needed = (layout.size() + Self::CHUNK_SIZE - 1) / Self::CHUNK_SIZE;
203        if chunks_needed > M || chunks_needed == 0 {
204            #[cfg(feature = "statistics")]
205            {
206                self.stats.lock().allocation_errors += 1;
207            }
208            return Err(anyhow!(AllocError::OutOfMemory).context("Allocation too large"));
209        }
210
211        let mut inner = self.inner.lock();
212        let pool_base = inner.pool as usize;
213
214        // Find a suitable free region
215        if let Some((start_chunk, total_chunks, aligned_ptr)) =
216            self.find_free_region(&inner, chunks_needed, layout.align(), pool_base)
217        {
218            self.mark_allocated(
219                &mut inner.meta,
220                start_chunk,
221                total_chunks,
222                aligned_ptr,
223                pool_base,
224            )?;
225
226            #[cfg(feature = "debug")]
227            #[cfg(debug_assertions)]
228            {
229                debug_assert!(
230                    self.validate_pool_consistency(&inner.meta),
231                    "Pool consistency check failed after allocation"
232                );
233            }
234
235            #[cfg(feature = "statistics")]
236            {
237                let mut stats = self.stats.lock();
238                stats.allocated_chunks += total_chunks;
239            }
240
241            return Ok(aligned_ptr as *mut u8);
242        }
243
244        #[cfg(feature = "statistics")]
245        {
246            self.stats.lock().allocation_errors += 1;
247        }
248        Err(anyhow!(AllocError::OutOfMemory).context("No suitable free region found"))
249    }
250
251    /// Attempts to deallocate previously allocated memory  
252    pub fn try_deallocate(&self, ptr: *mut u8) -> Result<()> {
253        // Handle null pointer
254        if ptr.is_null() {
255            #[cfg(feature = "statistics")]
256            {
257                self.stats.lock().deallocation_errors += 1;
258            }
259            return Err(
260                anyhow!(AllocError::InvalidPointer).context("Cannot deallocate null pointer")
261            );
262        }
263
264        let mut inner = self.inner.lock();
265        let pool_base = inner.pool as usize;
266        let ptr_addr = ptr as usize;
267
268        // Validate pointer is within pool bounds
269        if ptr_addr < pool_base || ptr_addr >= pool_base + N {
270            #[cfg(feature = "statistics")]
271            {
272                self.stats.lock().deallocation_errors += 1;
273            }
274            return Err(
275                anyhow!(AllocError::InvalidPointer).context("Pointer not from this allocator")
276            );
277        }
278
279        // Find the allocation that contains this pointer
280        let allocation_info =
281            match self.find_allocation_containing_ptr(&inner.meta, ptr_addr, pool_base) {
282                Ok(info) => info,
283                Err(_) => {
284                    #[cfg(feature = "statistics")]
285                    {
286                        self.stats.lock().deallocation_errors += 1;
287                    }
288                    return Err(anyhow!(AllocError::NotAllocated)
289                        .context("Pointer not currently allocated"));
290                }
291            };
292
293        // Clear memory if feature enabled
294        #[cfg(feature = "zero-on-free")]
295        {
296            unsafe {
297                let start_ptr =
298                    (pool_base + allocation_info.start_chunk * Self::CHUNK_SIZE) as *mut u8;
299                core::ptr::write_bytes(
300                    start_ptr,
301                    0,
302                    allocation_info.total_chunks * Self::CHUNK_SIZE,
303                );
304            }
305        }
306
307        // Mark chunks as free and coalesce
308        self.mark_chunks_free(
309            &mut inner.meta,
310            allocation_info.start_chunk,
311            allocation_info.total_chunks,
312        )?;
313
314        #[cfg(feature = "debug")]
315        #[cfg(debug_assertions)]
316        {
317            debug_assert!(
318                self.validate_pool_consistency(&inner.meta),
319                "Pool consistency check failed after deallocation"
320            );
321        }
322
323        // Update stats
324        #[cfg(feature = "statistics")]
325        {
326            let mut stats = self.stats.lock();
327            stats.allocated_chunks = stats
328                .allocated_chunks
329                .saturating_sub(allocation_info.total_chunks);
330        }
331
332        Ok(())
333    }
334
335    // === Private helper methods ===
336
337    /// Find the allocation that contains the given pointer
338    fn find_allocation_containing_ptr(
339        &self,
340        meta: &[MetaInfo; M],
341        ptr_addr: usize,
342        pool_base: usize,
343    ) -> Result<AllocationInfo> {
344        // Check which chunk contains this pointer
345        let containing_chunk = (ptr_addr - pool_base) / Self::CHUNK_SIZE;
346        if containing_chunk >= M {
347            return Err(anyhow!(AllocError::InvalidPointer).context("Pointer beyond pool bounds"));
348        }
349
350        // Find the start of the allocation by scanning backwards
351        let mut scan_chunk = containing_chunk;
352        loop {
353            match meta[scan_chunk] {
354                MetaInfo::AllocStart { size, ptr_offset } => {
355                    // Found the allocation start
356                    let end_chunk = scan_chunk + size;
357                    if containing_chunk < end_chunk {
358                        // Check if this is the right allocation by validating the pointer
359                        let expected_ptr = pool_base + scan_chunk * Self::CHUNK_SIZE + ptr_offset;
360                        if ptr_addr == expected_ptr {
361                            return Ok(AllocationInfo {
362                                start_chunk: scan_chunk,
363                                total_chunks: size,
364                            });
365                        }
366                    }
367                    return Err(anyhow!(AllocError::NotAllocated)
368                        .context("Pointer not matching expected allocation"));
369                }
370                MetaInfo::AllocContinuation => {
371                    // Continue scanning backwards
372                    if scan_chunk == 0 {
373                        return Err(
374                            anyhow!(AllocError::NotAllocated).context("No allocation start found")
375                        );
376                    }
377                    scan_chunk -= 1;
378                }
379                _ => {
380                    return Err(anyhow!(AllocError::NotAllocated).context("Pointer in free region"));
381                }
382            }
383        }
384    }
385
386    /// Finds a contiguous free region that can accommodate the request, considering alignment
387    /// Returns (start_chunk, total_chunks_to_reserve, aligned_user_pointer)
388    fn find_free_region(
389        &self,
390        inner: &PoolInner<N, M>,
391        chunks_needed: usize,
392        align: usize,
393        pool_base: usize,
394    ) -> Option<(usize, usize, usize)> {
395        let mut i = 0;
396
397        while i < M {
398            match inner.meta[i] {
399                MetaInfo::Free(free_size) => {
400                    // Found a free region, try to allocate within it
401                    let free_start = i;
402                    let free_end = i + free_size;
403
404                    // Try different starting positions within this free region
405                    for try_start in free_start..free_end {
406                        // Calculate address of this chunk
407                        let chunk_addr = pool_base + try_start * Self::CHUNK_SIZE;
408
409                        // Calculate aligned address
410                        let aligned_addr = (chunk_addr + align - 1) & !(align - 1);
411
412                        // How many bytes of alignment padding do we need?
413                        let alignment_offset = aligned_addr - chunk_addr;
414
415                        // Convert alignment offset to chunks (round up)
416                        let alignment_chunks =
417                            (alignment_offset + Self::CHUNK_SIZE - 1) / Self::CHUNK_SIZE;
418
419                        // Total chunks needed: alignment padding + actual allocation
420                        let total_chunks = alignment_chunks + chunks_needed;
421
422                        // Check if we have enough space in this free region
423                        if try_start + total_chunks <= free_end {
424                            // Recalculate the actual aligned pointer within our reserved space
425                            let reserved_start_addr = pool_base + try_start * Self::CHUNK_SIZE;
426                            let final_aligned_addr =
427                                (reserved_start_addr + align - 1) & !(align - 1);
428
429                            // Ensure the aligned address is within our reserved region
430                            let reserved_end_addr =
431                                pool_base + (try_start + total_chunks) * Self::CHUNK_SIZE;
432                            if final_aligned_addr + (chunks_needed * Self::CHUNK_SIZE)
433                                <= reserved_end_addr
434                            {
435                                return Some((try_start, total_chunks, final_aligned_addr));
436                            }
437                        }
438                    }
439
440                    // Move to the next region
441                    i = free_end;
442                }
443                _ => {
444                    // Not a free region, move to next chunk
445                    i += 1;
446                }
447            }
448        }
449
450        None
451    }
452
453    /// Marks a range of chunks as allocated
454    fn mark_allocated(
455        &self,
456        meta: &mut [MetaInfo; M],
457        start_chunk: usize,
458        total_chunks: usize,
459        user_ptr: usize,
460        pool_base: usize,
461    ) -> Result<()> {
462        if start_chunk + total_chunks > M {
463            return Err(anyhow!(AllocError::OutOfMemory).context("Allocation exceeds pool bounds"));
464        }
465
466        // Calculate the offset of the user pointer from the chunk base
467        let chunk_base_addr = pool_base + start_chunk * Self::CHUNK_SIZE;
468        let ptr_offset = user_ptr - chunk_base_addr;
469
470        // Determine the full free region that contains this allocation
471        let mut region_start = start_chunk;
472        while region_start > 0 && matches!(meta[region_start - 1], MetaInfo::FreeContinuation) {
473            region_start -= 1;
474        }
475
476        let free_region_size = match meta.get(region_start) {
477            Some(MetaInfo::Free(size)) => *size,
478            Some(
479                MetaInfo::FreeContinuation
480                | MetaInfo::AllocStart { .. }
481                | MetaInfo::AllocContinuation,
482            ) => {
483                return Err(anyhow!(AllocError::OutOfMemory)
484                    .context("Attempted to allocate from a non-free region"));
485            }
486            None => {
487                return Err(anyhow!(AllocError::OutOfMemory)
488                    .context("Allocation region start out of bounds"));
489            }
490        };
491
492        let region_end = region_start + free_region_size;
493        if start_chunk + total_chunks > region_end {
494            return Err(anyhow!(AllocError::OutOfMemory)
495                .context("Allocation exceeds available free region"));
496        }
497
498        // Temporarily mark the entire region as continuations so we can rebuild
499        for idx in region_start..region_end {
500            meta[idx] = MetaInfo::FreeContinuation;
501        }
502
503        // Restore any leading free region before the allocation
504        let leading_free = start_chunk.saturating_sub(region_start);
505        if leading_free > 0 {
506            Self::set_free_region(meta, region_start, leading_free);
507        }
508
509        // Mark the first chunk with the allocation info
510        meta[start_chunk] = MetaInfo::AllocStart {
511            size: total_chunks,
512            ptr_offset,
513        };
514
515        // Mark subsequent chunks as allocated continuations
516        for i in 1..total_chunks {
517            meta[start_chunk + i] = MetaInfo::AllocContinuation;
518        }
519
520        // Restore any trailing free region after the allocation
521        let allocation_end = start_chunk + total_chunks;
522        if allocation_end < region_end {
523            Self::set_free_region(meta, allocation_end, region_end - allocation_end);
524        }
525
526        #[cfg(feature = "debug")]
527        #[cfg(debug_assertions)]
528        {
529            debug_assert!(
530                self.validate_pool_consistency(meta),
531                "Pool consistency check failed after marking allocation"
532            );
533        }
534
535        Ok(())
536    }
537
538    /// Marks chunks as free and coalesces with adjacent free regions
539    fn mark_chunks_free(
540        &self,
541        meta: &mut [MetaInfo; M],
542        start_chunk: usize,
543        chunk_count: usize,
544    ) -> Result<()> {
545        if start_chunk + chunk_count > M {
546            return Err(anyhow!(AllocError::OutOfMemory).context("Invalid chunk range"));
547        }
548
549        let left_region = if start_chunk > 0 {
550            Self::free_region_info(meta, start_chunk - 1)
551        } else {
552            None
553        };
554
555        let right_index = start_chunk + chunk_count;
556        let right_region = if right_index < M {
557            Self::free_region_info(meta, right_index)
558        } else {
559            None
560        };
561
562        let mut region_start = start_chunk;
563        if let Some((left_start, _)) = left_region {
564            region_start = left_start;
565        }
566
567        let mut region_end = start_chunk + chunk_count;
568        if let Some((right_start, right_size)) = right_region {
569            region_end = core::cmp::max(region_end, right_start + right_size);
570        }
571
572        for idx in region_start..region_end {
573            meta[idx] = MetaInfo::FreeContinuation;
574        }
575
576        Self::set_free_region(meta, region_start, region_end - region_start);
577
578        #[cfg(feature = "debug")]
579        #[cfg(debug_assertions)]
580        {
581            debug_assert!(
582                self.validate_pool_consistency(meta),
583                "Pool consistency check failed after marking chunks free"
584            );
585        }
586
587        Ok(())
588    }
589
590    fn free_region_info(meta: &[MetaInfo; M], idx: usize) -> Option<(usize, usize)> {
591        if idx >= M {
592            return None;
593        }
594
595        match meta[idx] {
596            MetaInfo::Free(size) => Some((idx, size)),
597            MetaInfo::FreeContinuation => {
598                let mut start = idx;
599                while start > 0 && matches!(meta[start - 1], MetaInfo::FreeContinuation) {
600                    start -= 1;
601                }
602
603                if let MetaInfo::Free(size) = meta[start] {
604                    Some((start, size))
605                } else {
606                    None
607                }
608            }
609            _ => None,
610        }
611    }
612
613    fn set_free_region(meta: &mut [MetaInfo; M], start: usize, len: usize) {
614        if len == 0 {
615            return;
616        }
617
618        meta[start] = MetaInfo::Free(len);
619        for offset in 1..len {
620            meta[start + offset] = MetaInfo::FreeContinuation;
621        }
622    }
623
624    /// Get current pool statistics and debug info
625    #[cfg(feature = "statistics")]
626    pub fn get_stats(&self) -> PoolStats {
627        self.stats.lock().clone()
628    }
629
630    /// Get a snapshot of the current pool state for debugging
631    // #[cfg(debug_assertions)]
632    // pub fn debug_pool_state(&self) -> Vec<(usize, MetaInfo)> {
633    //     let inner = self.inner.lock();
634    //     inner.meta.iter().enumerate().map(|(i, &meta)| (i, meta)).collect()
635    // }
636
637    /// Validate pool consistency (debug builds only)
638    #[cfg(feature = "debug")]
639    #[cfg(debug_assertions)]
640    fn validate_pool_consistency(&self, meta: &[MetaInfo; M]) -> bool {
641        let mut i = 0;
642        while i < M {
643            match meta[i] {
644                MetaInfo::Free(size) => {
645                    if i + size > M {
646                        return false; // Size exceeds bounds
647                    }
648                    // Check that following chunks are FreeContinuation
649                    for j in 1..size {
650                        if !matches!(meta[i + j], MetaInfo::FreeContinuation) {
651                            return false;
652                        }
653                    }
654                    i += size;
655                }
656                MetaInfo::AllocStart { size, .. } => {
657                    if i + size > M {
658                        return false; // Size exceeds bounds
659                    }
660                    // Check that following chunks are AllocContinuation
661                    for j in 1..size {
662                        if !matches!(meta[i + j], MetaInfo::AllocContinuation) {
663                            return false;
664                        }
665                    }
666                    i += size;
667                }
668                MetaInfo::FreeContinuation | MetaInfo::AllocContinuation => {
669                    // These should only appear as continuations, not at scan positions
670                    return false;
671                }
672            }
673        }
674        true
675    }
676
677    // This is probably not needed at all.
678    // /// Checks if a specific chunk is allocated (used by tests/debug)
679    // #[allow(dead_code)]
680    // #[cfg(test)]
681    // fn is_allocated(&self, meta: &[MetaInfo; M], chunk_idx: usize) -> bool {
682    //     if chunk_idx >= M {
683    //         return false;
684    //     }
685    //     matches!(
686    //         meta[chunk_idx],
687    //         MetaInfo::AllocStart { .. } | MetaInfo::AllocContinuation
688    //     )
689    // }
690
691    // /// Test helper function to check if a pointer is properly aligned
692    // #[allow(dead_code)]
693    // #[cfg(test)]
694    // fn is_properly_aligned(ptr: *mut u8, align: usize) -> bool {
695    //     (ptr as usize) % align == 0
696    // }
697
698    /// Test helper to get total free space in chunks
699    #[cfg(test)]
700    fn count_free_chunks(&self) -> usize {
701        let inner = self.inner.lock();
702        let mut count = 0;
703        let mut i = 0;
704        while i < M {
705            match inner.meta[i] {
706                MetaInfo::Free(size) => {
707                    count += size;
708                    i += size;
709                }
710                MetaInfo::AllocStart { size, .. } => {
711                    i += size;
712                }
713                _ => i += 1,
714            }
715        }
716        count
717    }
718
719    /// Test helper to get total allocated space in chunks
720    #[cfg(test)]
721    fn count_allocated_chunks(&self) -> usize {
722        let inner = self.inner.lock();
723        let mut count = 0;
724        let mut i = 0;
725        while i < M {
726            match inner.meta[i] {
727                MetaInfo::Free(size) => {
728                    i += size;
729                }
730                MetaInfo::AllocStart { size, .. } => {
731                    count += size;
732                    i += size;
733                }
734                _ => i += 1,
735            }
736        }
737        count
738    }
739}
740
741#[cfg(feature = "zero-on-drop")]
742impl<const N: usize, const M: usize> Drop for MemoryPoolAllocator<N, M> {
743    fn drop(&mut self) {
744        // Ensure all chunks are freed before dropping
745        let inner = self.inner.lock();
746        unsafe {
747            core::ptr::write_bytes(inner.pool, 0, N);
748        }
749    }
750}