1use core::cell::UnsafeCell;
7use core::ptr::NonNull;
8use core::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
9
10use crate::error::{EmbeddedError, Result};
11
12pub trait MemoryPool {
14 fn allocate(&self, size: usize, align: usize) -> Result<NonNull<u8>>;
21
22 unsafe fn deallocate(&self, ptr: NonNull<u8>, size: usize, align: usize) -> Result<()>;
28
29 fn capacity(&self) -> usize;
31
32 fn used(&self) -> usize;
34
35 fn available(&self) -> usize {
37 self.capacity().saturating_sub(self.used())
38 }
39
40 unsafe fn reset(&self) -> Result<()>;
46}
47
48pub struct StaticPool<const N: usize> {
52 buffer: UnsafeCell<[u8; N]>,
53 offset: AtomicUsize,
54 locked: AtomicBool,
55}
56
57impl<const N: usize> StaticPool<N> {
58 pub const fn new() -> Self {
60 Self {
61 buffer: UnsafeCell::new([0u8; N]),
62 offset: AtomicUsize::new(0),
63 locked: AtomicBool::new(false),
64 }
65 }
66
67 fn base_addr(&self) -> usize {
69 self.buffer.get() as usize
70 }
71
72 fn try_lock(&self) -> Result<()> {
74 match self
75 .locked
76 .compare_exchange(false, true, Ordering::Acquire, Ordering::Relaxed)
77 {
78 Ok(_) => Ok(()),
79 Err(_) => Err(EmbeddedError::ResourceBusy),
80 }
81 }
82
83 fn unlock(&self) {
85 self.locked.store(false, Ordering::Release);
86 }
87}
88
89impl<const N: usize> Default for StaticPool<N> {
90 fn default() -> Self {
91 Self::new()
92 }
93}
94
95impl<const N: usize> MemoryPool for StaticPool<N> {
96 fn allocate(&self, size: usize, align: usize) -> Result<NonNull<u8>> {
97 if size == 0 {
98 return Err(EmbeddedError::InvalidParameter);
99 }
100
101 if !align.is_power_of_two() {
102 return Err(EmbeddedError::InvalidAlignment {
103 required: align,
104 actual: 0,
105 });
106 }
107
108 self.try_lock()?;
109
110 let current_offset = self.offset.load(Ordering::Relaxed);
111 let base = self.base_addr();
112
113 let current_addr = base.wrapping_add(current_offset);
117 let aligned_addr =
118 (current_addr.wrapping_add(align.wrapping_sub(1))) & !align.wrapping_sub(1);
119 let aligned_offset = aligned_addr.wrapping_sub(base);
120
121 let new_offset = match aligned_offset.checked_add(size) {
122 Some(offset) if offset <= N => offset,
123 _ => {
124 self.unlock();
125 return Err(EmbeddedError::PoolExhausted);
126 }
127 };
128
129 self.offset.store(new_offset, Ordering::Release);
130 self.unlock();
131
132 let ptr = unsafe { NonNull::new_unchecked(aligned_addr as *mut u8) };
134 Ok(ptr)
135 }
136
137 unsafe fn deallocate(&self, _ptr: NonNull<u8>, _size: usize, _align: usize) -> Result<()> {
138 Ok(())
141 }
142
143 fn capacity(&self) -> usize {
144 N
145 }
146
147 fn used(&self) -> usize {
148 self.offset.load(Ordering::Relaxed)
149 }
150
151 unsafe fn reset(&self) -> Result<()> {
152 self.try_lock()?;
153 self.offset.store(0, Ordering::Release);
154 self.unlock();
155 Ok(())
156 }
157}
158
159unsafe impl<const N: usize> Sync for StaticPool<N> {}
161unsafe impl<const N: usize> Send for StaticPool<N> {}
162
163pub struct BlockPool<const BLOCK_SIZE: usize, const NUM_BLOCKS: usize, const BITMAP_SIZE: usize> {
167 blocks: UnsafeCell<[[u8; BLOCK_SIZE]; NUM_BLOCKS]>,
168 free_bitmap: UnsafeCell<[u8; BITMAP_SIZE]>,
169 free_count: AtomicUsize,
170 locked: AtomicBool,
171}
172
173impl<const BLOCK_SIZE: usize, const NUM_BLOCKS: usize, const BITMAP_SIZE: usize>
174 BlockPool<BLOCK_SIZE, NUM_BLOCKS, BITMAP_SIZE>
175{
176 pub const fn new() -> Self {
182 assert!(
185 BITMAP_SIZE * 8 >= NUM_BLOCKS,
186 "BITMAP_SIZE is too small for NUM_BLOCKS"
187 );
188
189 Self {
190 blocks: UnsafeCell::new([[0u8; BLOCK_SIZE]; NUM_BLOCKS]),
191 free_bitmap: UnsafeCell::new([0xFF; BITMAP_SIZE]),
192 free_count: AtomicUsize::new(NUM_BLOCKS),
193 locked: AtomicBool::new(false),
194 }
195 }
196
197 fn try_lock(&self) -> Result<()> {
199 match self
200 .locked
201 .compare_exchange(false, true, Ordering::Acquire, Ordering::Relaxed)
202 {
203 Ok(_) => Ok(()),
204 Err(_) => Err(EmbeddedError::ResourceBusy),
205 }
206 }
207
208 fn unlock(&self) {
210 self.locked.store(false, Ordering::Release);
211 }
212
213 fn find_free_block(&self) -> Option<usize> {
215 let bitmap = unsafe { &mut *self.free_bitmap.get() };
217
218 for (byte_idx, byte) in bitmap.iter_mut().enumerate() {
219 if *byte == 0 {
220 continue;
221 }
222
223 for bit_idx in 0..8 {
225 let block_idx = byte_idx * 8 + bit_idx;
226 if block_idx >= NUM_BLOCKS {
227 return None;
228 }
229
230 if (*byte >> bit_idx) & 1 != 0 {
231 *byte &= !(1 << bit_idx);
233 return Some(block_idx);
234 }
235 }
236 }
237
238 None
239 }
240
241 unsafe fn free_block(&self, block_idx: usize) {
247 let bitmap = unsafe { &mut *self.free_bitmap.get() };
249 let byte_idx = block_idx / 8;
250 let bit_idx = block_idx % 8;
251
252 bitmap[byte_idx] |= 1 << bit_idx;
253 }
254}
255
256impl<const BLOCK_SIZE: usize, const NUM_BLOCKS: usize, const BITMAP_SIZE: usize> Default
257 for BlockPool<BLOCK_SIZE, NUM_BLOCKS, BITMAP_SIZE>
258{
259 fn default() -> Self {
260 Self::new()
261 }
262}
263
264impl<const BLOCK_SIZE: usize, const NUM_BLOCKS: usize, const BITMAP_SIZE: usize> MemoryPool
265 for BlockPool<BLOCK_SIZE, NUM_BLOCKS, BITMAP_SIZE>
266{
267 fn allocate(&self, size: usize, align: usize) -> Result<NonNull<u8>> {
268 if size > BLOCK_SIZE {
269 return Err(EmbeddedError::BufferTooSmall {
270 required: size,
271 available: BLOCK_SIZE,
272 });
273 }
274
275 if !align.is_power_of_two() {
276 return Err(EmbeddedError::InvalidAlignment {
277 required: align,
278 actual: 0,
279 });
280 }
281
282 self.try_lock()?;
283
284 let block_idx = match self.find_free_block() {
285 Some(idx) => idx,
286 None => {
287 self.unlock();
288 return Err(EmbeddedError::PoolExhausted);
289 }
290 };
291
292 self.free_count.fetch_sub(1, Ordering::Relaxed);
293
294 let blocks = unsafe { &mut *self.blocks.get() };
296 let ptr = blocks[block_idx].as_mut_ptr();
297
298 let ptr_addr = ptr as usize;
300 if ptr_addr % align != 0 {
301 unsafe { self.free_block(block_idx) };
303 self.free_count.fetch_add(1, Ordering::Relaxed);
304 self.unlock();
305 return Err(EmbeddedError::InvalidAlignment {
306 required: align,
307 actual: ptr_addr % align.max(1),
308 });
309 }
310
311 self.unlock();
312
313 let nonnull = unsafe { NonNull::new_unchecked(ptr) };
315 Ok(nonnull)
316 }
317
318 unsafe fn deallocate(&self, ptr: NonNull<u8>, _size: usize, _align: usize) -> Result<()> {
319 self.try_lock()?;
320
321 let blocks = unsafe { &*self.blocks.get() };
324 let base_addr = blocks.as_ptr() as usize;
325 let ptr_addr = ptr.as_ptr() as usize;
326
327 if ptr_addr < base_addr {
328 self.unlock();
329 return Err(EmbeddedError::InvalidParameter);
330 }
331
332 let offset = ptr_addr.wrapping_sub(base_addr);
333 let block_idx = offset / BLOCK_SIZE;
334
335 if block_idx >= NUM_BLOCKS {
336 self.unlock();
337 return Err(EmbeddedError::InvalidParameter);
338 }
339
340 unsafe { self.free_block(block_idx) };
342 self.free_count.fetch_add(1, Ordering::Relaxed);
343
344 self.unlock();
345 Ok(())
346 }
347
348 fn capacity(&self) -> usize {
349 BLOCK_SIZE * NUM_BLOCKS
350 }
351
352 fn used(&self) -> usize {
353 let free = self.free_count.load(Ordering::Relaxed);
354 (NUM_BLOCKS.saturating_sub(free)) * BLOCK_SIZE
355 }
356
357 unsafe fn reset(&self) -> Result<()> {
358 self.try_lock()?;
359
360 let bitmap = unsafe { &mut *self.free_bitmap.get() };
362 bitmap.fill(0xFF);
363
364 self.free_count.store(NUM_BLOCKS, Ordering::Release);
365 self.unlock();
366 Ok(())
367 }
368}
369
370unsafe impl<const BLOCK_SIZE: usize, const NUM_BLOCKS: usize, const BITMAP_SIZE: usize> Sync
372 for BlockPool<BLOCK_SIZE, NUM_BLOCKS, BITMAP_SIZE>
373{
374}
375unsafe impl<const BLOCK_SIZE: usize, const NUM_BLOCKS: usize, const BITMAP_SIZE: usize> Send
376 for BlockPool<BLOCK_SIZE, NUM_BLOCKS, BITMAP_SIZE>
377{
378}
379
380#[cfg(test)]
381mod tests {
382 use super::*;
383
384 #[test]
385 fn test_static_pool_allocation() {
386 let pool = StaticPool::<1024>::new();
387
388 let ptr1 = pool.allocate(64, 8).expect("allocation failed");
389 assert_eq!(pool.used(), 64);
390
391 let ptr2 = pool.allocate(128, 16).expect("allocation failed");
392 assert!(pool.used() >= 64 + 128);
393
394 assert_ne!(ptr1, ptr2);
395 }
396
397 #[test]
398 fn test_static_pool_exhaustion() {
399 let pool = StaticPool::<128>::new();
400
401 let _ptr1 = pool.allocate(64, 8).expect("allocation failed");
402 let _ptr2 = pool.allocate(64, 8).expect("allocation failed");
403
404 let result = pool.allocate(64, 8);
406 assert!(matches!(result, Err(EmbeddedError::PoolExhausted)));
407 }
408
409 #[test]
410 fn test_static_pool_reset() {
411 let pool = StaticPool::<1024>::new();
412
413 let _ptr = pool.allocate(512, 8).expect("allocation failed");
414 assert!(pool.used() > 0);
415
416 unsafe { pool.reset().expect("reset failed") };
418 assert_eq!(pool.used(), 0);
419 }
420
421 #[test]
422 fn test_block_pool_allocation() {
423 let pool = BlockPool::<64, 16, 2>::new();
425
426 let ptr1 = pool.allocate(32, 4).expect("allocation failed");
427 let ptr2 = pool.allocate(32, 4).expect("allocation failed");
428
429 assert_ne!(ptr1, ptr2);
430 assert_eq!(pool.used(), 128); }
432
433 #[test]
434 fn test_block_pool_deallocation() {
435 let pool = BlockPool::<64, 16, 2>::new();
437
438 let ptr = pool.allocate(32, 4).expect("allocation failed");
439 assert_eq!(pool.used(), 64);
440
441 unsafe { pool.deallocate(ptr, 32, 4).expect("deallocation failed") };
443 assert_eq!(pool.used(), 0);
444 }
445
446 #[test]
447 fn test_block_pool_exhaustion() {
448 let pool = BlockPool::<64, 4, 1>::new();
450
451 for _ in 0..4 {
452 pool.allocate(32, 4).expect("allocation failed");
453 }
454
455 let result = pool.allocate(32, 4);
456 assert!(matches!(result, Err(EmbeddedError::PoolExhausted)));
457 }
458}