1#![no_std]
52
53#[cfg(test)]
54mod tests;
55
56use anyhow::{Result, anyhow};
57use core::{alloc::Layout, ptr::NonNull};
58use parking_lot::Mutex;
59
60pub struct MemoryPoolAllocator<const N: usize, const M: usize> {
67 inner: Mutex<PoolInner<N, M>>,
68 #[cfg(feature = "statistics")]
69 stats: Mutex<PoolStats>,
70}
71
72struct PoolInner<const N: usize, const M: usize> {
73 pool: *mut u8,
75 meta: [MetaInfo; M],
77}
78
79#[derive(Clone, Copy, Debug, PartialEq, Eq)]
80enum MetaInfo {
81 Free(usize),
83
84 FreeContinuation,
85
86 AllocStart {
87 size: usize, ptr_offset: usize, },
90 AllocContinuation,
92}
93
94#[derive(Debug, Clone, Copy, PartialEq, Eq)]
96pub enum AllocError {
97 OutOfMemory,
99 InvalidLayout,
101 InvalidPointer,
103 NotAllocated,
105}
106
107impl core::fmt::Display for AllocError {
108 fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
109 match self {
110 Self::OutOfMemory => write!(f, "Out of memory"),
111 Self::InvalidLayout => write!(f, "Invalid layout parameters"),
112 Self::InvalidPointer => write!(f, "Pointer not from this allocator"),
113 Self::NotAllocated => write!(f, "Pointer not currently allocated"),
114 }
115 }
116}
117
118impl From<anyhow::Error> for AllocError {
119 fn from(err: anyhow::Error) -> Self {
120 if err.is::<AllocError>() {
121 *err.downcast_ref::<AllocError>().unwrap()
122 } else {
123 AllocError::InvalidLayout
124 }
125 }
126}
127
128#[cfg(feature = "statistics")]
129#[derive(Debug, Clone)]
131pub struct PoolStats {
132 pub allocated_chunks: usize,
133 pub allocation_errors: usize,
134 pub deallocation_errors: usize,
135}
136
137#[cfg(feature = "statistics")]
138impl PoolStats {
139 const fn new() -> Self {
140 Self {
141 allocated_chunks: 0,
142 allocation_errors: 0,
143 deallocation_errors: 0,
144 }
145 }
146}
147
148struct AllocationInfo {
150 start_chunk: usize,
151 total_chunks: usize,
152}
153
154unsafe impl<const N: usize, const M: usize> Sync for MemoryPoolAllocator<N, M> {}
156unsafe impl<const N: usize, const M: usize> Send for MemoryPoolAllocator<N, M> {}
157
158impl<const N: usize, const M: usize> MemoryPoolAllocator<N, M> {
159 const _DIVISIBILITY: () = assert!(
161 N % M == 0,
162 "Pool size N must be exactly divisible by chunk count M"
163 );
164 const _NON_ZERO_CHUNK_NUM: () = assert!(M > 0, "Must have at least one chunk");
165 const _NON_ZERO_POOL_SIZE: () = assert!(N > 0, "Pool size must be greater than zero");
166 const _N_GR_THAN_OR_EQ_TO_M: () = assert!(
167 N >= M,
168 "Pool size N must be greater than or equal to chunk count M"
169 );
170
171 pub const CHUNK_SIZE: usize = N / M;
173
174 pub const unsafe fn new(pool: *mut u8) -> Self {
177 let mut meta = [MetaInfo::FreeContinuation; M];
178 meta[0] = MetaInfo::Free(M); Self {
180 inner: Mutex::new(PoolInner { pool, meta }),
181 #[cfg(feature = "statistics")]
182 stats: Mutex::new(PoolStats::new()),
183 }
184 }
185
186 pub fn try_allocate(&self, layout: Layout) -> Result<*mut u8> {
188 if layout.size() == 0 {
190 return Ok(NonNull::dangling().as_ptr());
191 }
192
193 if !layout.align().is_power_of_two() || layout.align() > N {
195 #[cfg(feature = "statistics")]
196 {
197 self.stats.lock().allocation_errors += 1;
198 }
199 return Err(anyhow!(AllocError::InvalidLayout).context("Invalid alignment or size"));
200 }
201
202 let chunks_needed = (layout.size() + Self::CHUNK_SIZE - 1) / Self::CHUNK_SIZE;
203 if chunks_needed > M || chunks_needed == 0 {
204 #[cfg(feature = "statistics")]
205 {
206 self.stats.lock().allocation_errors += 1;
207 }
208 return Err(anyhow!(AllocError::OutOfMemory).context("Allocation too large"));
209 }
210
211 let mut inner = self.inner.lock();
212 let pool_base = inner.pool as usize;
213
214 if let Some((start_chunk, total_chunks, aligned_ptr)) =
216 self.find_free_region(&inner, chunks_needed, layout.align(), pool_base)
217 {
218 self.mark_allocated(
219 &mut inner.meta,
220 start_chunk,
221 total_chunks,
222 aligned_ptr,
223 pool_base,
224 )?;
225
226 #[cfg(feature = "debug")]
227 #[cfg(debug_assertions)]
228 {
229 debug_assert!(
230 self.validate_pool_consistency(&inner.meta),
231 "Pool consistency check failed after allocation"
232 );
233 }
234
235 #[cfg(feature = "statistics")]
236 {
237 let mut stats = self.stats.lock();
238 stats.allocated_chunks += total_chunks;
239 }
240
241 return Ok(aligned_ptr as *mut u8);
242 }
243
244 #[cfg(feature = "statistics")]
245 {
246 self.stats.lock().allocation_errors += 1;
247 }
248 Err(anyhow!(AllocError::OutOfMemory).context("No suitable free region found"))
249 }
250
251 pub fn try_deallocate(&self, ptr: *mut u8) -> Result<()> {
253 if ptr.is_null() {
255 #[cfg(feature = "statistics")]
256 {
257 self.stats.lock().deallocation_errors += 1;
258 }
259 return Err(
260 anyhow!(AllocError::InvalidPointer).context("Cannot deallocate null pointer")
261 );
262 }
263
264 let mut inner = self.inner.lock();
265 let pool_base = inner.pool as usize;
266 let ptr_addr = ptr as usize;
267
268 if ptr_addr < pool_base || ptr_addr >= pool_base + N {
270 #[cfg(feature = "statistics")]
271 {
272 self.stats.lock().deallocation_errors += 1;
273 }
274 return Err(
275 anyhow!(AllocError::InvalidPointer).context("Pointer not from this allocator")
276 );
277 }
278
279 let allocation_info =
281 match self.find_allocation_containing_ptr(&inner.meta, ptr_addr, pool_base) {
282 Ok(info) => info,
283 Err(_) => {
284 #[cfg(feature = "statistics")]
285 {
286 self.stats.lock().deallocation_errors += 1;
287 }
288 return Err(anyhow!(AllocError::NotAllocated)
289 .context("Pointer not currently allocated"));
290 }
291 };
292
293 #[cfg(feature = "zero-on-free")]
295 {
296 unsafe {
297 let start_ptr =
298 (pool_base + allocation_info.start_chunk * Self::CHUNK_SIZE) as *mut u8;
299 core::ptr::write_bytes(
300 start_ptr,
301 0,
302 allocation_info.total_chunks * Self::CHUNK_SIZE,
303 );
304 }
305 }
306
307 self.mark_chunks_free(
309 &mut inner.meta,
310 allocation_info.start_chunk,
311 allocation_info.total_chunks,
312 )?;
313
314 #[cfg(feature = "debug")]
315 #[cfg(debug_assertions)]
316 {
317 debug_assert!(
318 self.validate_pool_consistency(&inner.meta),
319 "Pool consistency check failed after deallocation"
320 );
321 }
322
323 #[cfg(feature = "statistics")]
325 {
326 let mut stats = self.stats.lock();
327 stats.allocated_chunks = stats
328 .allocated_chunks
329 .saturating_sub(allocation_info.total_chunks);
330 }
331
332 Ok(())
333 }
334
335 fn find_allocation_containing_ptr(
339 &self,
340 meta: &[MetaInfo; M],
341 ptr_addr: usize,
342 pool_base: usize,
343 ) -> Result<AllocationInfo> {
344 let containing_chunk = (ptr_addr - pool_base) / Self::CHUNK_SIZE;
346 if containing_chunk >= M {
347 return Err(anyhow!(AllocError::InvalidPointer).context("Pointer beyond pool bounds"));
348 }
349
350 let mut scan_chunk = containing_chunk;
352 loop {
353 match meta[scan_chunk] {
354 MetaInfo::AllocStart { size, ptr_offset } => {
355 let end_chunk = scan_chunk + size;
357 if containing_chunk < end_chunk {
358 let expected_ptr = pool_base + scan_chunk * Self::CHUNK_SIZE + ptr_offset;
360 if ptr_addr == expected_ptr {
361 return Ok(AllocationInfo {
362 start_chunk: scan_chunk,
363 total_chunks: size,
364 });
365 }
366 }
367 return Err(anyhow!(AllocError::NotAllocated)
368 .context("Pointer not matching expected allocation"));
369 }
370 MetaInfo::AllocContinuation => {
371 if scan_chunk == 0 {
373 return Err(
374 anyhow!(AllocError::NotAllocated).context("No allocation start found")
375 );
376 }
377 scan_chunk -= 1;
378 }
379 _ => {
380 return Err(anyhow!(AllocError::NotAllocated).context("Pointer in free region"));
381 }
382 }
383 }
384 }
385
386 fn find_free_region(
389 &self,
390 inner: &PoolInner<N, M>,
391 chunks_needed: usize,
392 align: usize,
393 pool_base: usize,
394 ) -> Option<(usize, usize, usize)> {
395 let mut i = 0;
396
397 while i < M {
398 match inner.meta[i] {
399 MetaInfo::Free(free_size) => {
400 let free_start = i;
402 let free_end = i + free_size;
403
404 for try_start in free_start..free_end {
406 let chunk_addr = pool_base + try_start * Self::CHUNK_SIZE;
408
409 let aligned_addr = (chunk_addr + align - 1) & !(align - 1);
411
412 let alignment_offset = aligned_addr - chunk_addr;
414
415 let alignment_chunks =
417 (alignment_offset + Self::CHUNK_SIZE - 1) / Self::CHUNK_SIZE;
418
419 let total_chunks = alignment_chunks + chunks_needed;
421
422 if try_start + total_chunks <= free_end {
424 let reserved_start_addr = pool_base + try_start * Self::CHUNK_SIZE;
426 let final_aligned_addr =
427 (reserved_start_addr + align - 1) & !(align - 1);
428
429 let reserved_end_addr =
431 pool_base + (try_start + total_chunks) * Self::CHUNK_SIZE;
432 if final_aligned_addr + (chunks_needed * Self::CHUNK_SIZE)
433 <= reserved_end_addr
434 {
435 return Some((try_start, total_chunks, final_aligned_addr));
436 }
437 }
438 }
439
440 i = free_end;
442 }
443 _ => {
444 i += 1;
446 }
447 }
448 }
449
450 None
451 }
452
453 fn mark_allocated(
455 &self,
456 meta: &mut [MetaInfo; M],
457 start_chunk: usize,
458 total_chunks: usize,
459 user_ptr: usize,
460 pool_base: usize,
461 ) -> Result<()> {
462 if start_chunk + total_chunks > M {
463 return Err(anyhow!(AllocError::OutOfMemory).context("Allocation exceeds pool bounds"));
464 }
465
466 let chunk_base_addr = pool_base + start_chunk * Self::CHUNK_SIZE;
468 let ptr_offset = user_ptr - chunk_base_addr;
469
470 let mut region_start = start_chunk;
472 while region_start > 0 && matches!(meta[region_start - 1], MetaInfo::FreeContinuation) {
473 region_start -= 1;
474 }
475
476 let free_region_size = match meta.get(region_start) {
477 Some(MetaInfo::Free(size)) => *size,
478 Some(
479 MetaInfo::FreeContinuation
480 | MetaInfo::AllocStart { .. }
481 | MetaInfo::AllocContinuation,
482 ) => {
483 return Err(anyhow!(AllocError::OutOfMemory)
484 .context("Attempted to allocate from a non-free region"));
485 }
486 None => {
487 return Err(anyhow!(AllocError::OutOfMemory)
488 .context("Allocation region start out of bounds"));
489 }
490 };
491
492 let region_end = region_start + free_region_size;
493 if start_chunk + total_chunks > region_end {
494 return Err(anyhow!(AllocError::OutOfMemory)
495 .context("Allocation exceeds available free region"));
496 }
497
498 for idx in region_start..region_end {
500 meta[idx] = MetaInfo::FreeContinuation;
501 }
502
503 let leading_free = start_chunk.saturating_sub(region_start);
505 if leading_free > 0 {
506 Self::set_free_region(meta, region_start, leading_free);
507 }
508
509 meta[start_chunk] = MetaInfo::AllocStart {
511 size: total_chunks,
512 ptr_offset,
513 };
514
515 for i in 1..total_chunks {
517 meta[start_chunk + i] = MetaInfo::AllocContinuation;
518 }
519
520 let allocation_end = start_chunk + total_chunks;
522 if allocation_end < region_end {
523 Self::set_free_region(meta, allocation_end, region_end - allocation_end);
524 }
525
526 #[cfg(feature = "debug")]
527 #[cfg(debug_assertions)]
528 {
529 debug_assert!(
530 self.validate_pool_consistency(meta),
531 "Pool consistency check failed after marking allocation"
532 );
533 }
534
535 Ok(())
536 }
537
538 fn mark_chunks_free(
540 &self,
541 meta: &mut [MetaInfo; M],
542 start_chunk: usize,
543 chunk_count: usize,
544 ) -> Result<()> {
545 if start_chunk + chunk_count > M {
546 return Err(anyhow!(AllocError::OutOfMemory).context("Invalid chunk range"));
547 }
548
549 let left_region = if start_chunk > 0 {
550 Self::free_region_info(meta, start_chunk - 1)
551 } else {
552 None
553 };
554
555 let right_index = start_chunk + chunk_count;
556 let right_region = if right_index < M {
557 Self::free_region_info(meta, right_index)
558 } else {
559 None
560 };
561
562 let mut region_start = start_chunk;
563 if let Some((left_start, _)) = left_region {
564 region_start = left_start;
565 }
566
567 let mut region_end = start_chunk + chunk_count;
568 if let Some((right_start, right_size)) = right_region {
569 region_end = core::cmp::max(region_end, right_start + right_size);
570 }
571
572 for idx in region_start..region_end {
573 meta[idx] = MetaInfo::FreeContinuation;
574 }
575
576 Self::set_free_region(meta, region_start, region_end - region_start);
577
578 #[cfg(feature = "debug")]
579 #[cfg(debug_assertions)]
580 {
581 debug_assert!(
582 self.validate_pool_consistency(meta),
583 "Pool consistency check failed after marking chunks free"
584 );
585 }
586
587 Ok(())
588 }
589
590 fn free_region_info(meta: &[MetaInfo; M], idx: usize) -> Option<(usize, usize)> {
591 if idx >= M {
592 return None;
593 }
594
595 match meta[idx] {
596 MetaInfo::Free(size) => Some((idx, size)),
597 MetaInfo::FreeContinuation => {
598 let mut start = idx;
599 while start > 0 && matches!(meta[start - 1], MetaInfo::FreeContinuation) {
600 start -= 1;
601 }
602
603 if let MetaInfo::Free(size) = meta[start] {
604 Some((start, size))
605 } else {
606 None
607 }
608 }
609 _ => None,
610 }
611 }
612
613 fn set_free_region(meta: &mut [MetaInfo; M], start: usize, len: usize) {
614 if len == 0 {
615 return;
616 }
617
618 meta[start] = MetaInfo::Free(len);
619 for offset in 1..len {
620 meta[start + offset] = MetaInfo::FreeContinuation;
621 }
622 }
623
624 #[cfg(feature = "statistics")]
626 pub fn get_stats(&self) -> PoolStats {
627 self.stats.lock().clone()
628 }
629
630 #[cfg(feature = "debug")]
639 #[cfg(debug_assertions)]
640 fn validate_pool_consistency(&self, meta: &[MetaInfo; M]) -> bool {
641 let mut i = 0;
642 while i < M {
643 match meta[i] {
644 MetaInfo::Free(size) => {
645 if i + size > M {
646 return false; }
648 for j in 1..size {
650 if !matches!(meta[i + j], MetaInfo::FreeContinuation) {
651 return false;
652 }
653 }
654 i += size;
655 }
656 MetaInfo::AllocStart { size, .. } => {
657 if i + size > M {
658 return false; }
660 for j in 1..size {
662 if !matches!(meta[i + j], MetaInfo::AllocContinuation) {
663 return false;
664 }
665 }
666 i += size;
667 }
668 MetaInfo::FreeContinuation | MetaInfo::AllocContinuation => {
669 return false;
671 }
672 }
673 }
674 true
675 }
676
677 #[cfg(test)]
700 fn count_free_chunks(&self) -> usize {
701 let inner = self.inner.lock();
702 let mut count = 0;
703 let mut i = 0;
704 while i < M {
705 match inner.meta[i] {
706 MetaInfo::Free(size) => {
707 count += size;
708 i += size;
709 }
710 MetaInfo::AllocStart { size, .. } => {
711 i += size;
712 }
713 _ => i += 1,
714 }
715 }
716 count
717 }
718
719 #[cfg(test)]
721 fn count_allocated_chunks(&self) -> usize {
722 let inner = self.inner.lock();
723 let mut count = 0;
724 let mut i = 0;
725 while i < M {
726 match inner.meta[i] {
727 MetaInfo::Free(size) => {
728 i += size;
729 }
730 MetaInfo::AllocStart { size, .. } => {
731 count += size;
732 i += size;
733 }
734 _ => i += 1,
735 }
736 }
737 count
738 }
739}
740
741#[cfg(feature = "zero-on-drop")]
742impl<const N: usize, const M: usize> Drop for MemoryPoolAllocator<N, M> {
743 fn drop(&mut self) {
744 let inner = self.inner.lock();
746 unsafe {
747 core::ptr::write_bytes(inner.pool, 0, N);
748 }
749 }
750}