zigzag_alloc/alloc/pool.rs
1//! Fixed-size object pool allocator.
2//!
3//! A pool allocator pre-allocates a contiguous *slab* of equal-sized *blocks*
4//! and maintains a lock-free free-list over them. Allocation pops one block
5//! from the free-list; deallocation pushes it back.
6//!
7//! ## Characteristics
8//!
9//! | Property | Value |
10//! |----------|-------|
11//! | Allocation / deallocation cost | O(1), typically a single CAS |
12//! | Fragmentation | Zero (fixed-size blocks) |
13//! | Thread safety | Lock-free (`AtomicPtr` CAS loop) |
14//! | Max allocation size | `block_layout.size()` |
15//! | Individual deallocation | ✅ Supported |
16//!
17//! ## Example
18//!
19//! ```rust,ignore
20//! let pool = PoolAllocator::typed::<[u8; 64]>(&sys, 128).unwrap();
21//! let ptr = unsafe { pool.alloc(Layout::new::<[u8; 64]>()) }.unwrap();
22//! // ... use memory ...
23//! unsafe { pool.dealloc(ptr, Layout::new::<[u8; 64]>()) };
24//! ```
25
26use core::{
27 alloc::Layout,
28 mem,
29 ptr::{self, NonNull},
30 sync::atomic::{AtomicPtr, Ordering},
31};
32
33use super::allocator::Allocator;
34use crate::simd;
35
36// ── FreeNode ─────────────────────────────────────────────────────────────────
37
38/// An intrusive free-list node overlaid on top of unused pool blocks.
39///
40/// When a block is free its first bytes are reinterpreted as a `FreeNode`
41/// holding a pointer to the next free block. When a block is in use the
42/// `FreeNode` overlay is no longer valid and must not be accessed.
43struct FreeNode {
44 /// Raw pointer to the next free block, or null if this is the last node.
45 next: *mut FreeNode,
46}
47
48/// A lock-free, fixed-size block allocator backed by a single pre-allocated
49/// slab.
50///
51/// At construction time the slab is divided into equal-sized *blocks* whose
52/// size is at least `max(item_layout.size(), size_of::<FreeNode>())` and
53/// whose alignment is at least `max(item_layout.align(), align_of::<FreeNode>())`.
54/// All blocks are linked into a free-list; allocation atomically pops the
55/// head; deallocation atomically pushes to the head.
56///
57/// # Constraints
58///
59/// * Allocations larger than `block_layout.size()` or with stricter alignment
60/// than `block_layout.align()` are rejected (return `None`).
61/// * The pool has a fixed `capacity`; once all blocks are in use, allocation
62/// returns `None`.
63///
64/// # Thread Safety
65///
66/// `PoolAllocator` is `Sync` + `Send` when the backing allocator is `Sync` +
67/// `Send`. Both `alloc` and `dealloc` use lock-free CAS loops.
68pub struct PoolAllocator<A: Allocator> {
69 /// Backing allocator that owns the slab.
70 backing: A,
71 /// Padded layout of a single block (>= item_layout, >= FreeNode layout).
72 block_layout: Layout,
73 /// Layout of the entire slab (`block_layout.size() * capacity`).
74 slab_layout: Layout,
75 /// Pointer to the start of the slab.
76 slab: NonNull<u8>,
77 /// Lock-free head of the free-list; null when the pool is exhausted.
78 free_head: AtomicPtr<FreeNode>,
79 /// Total number of blocks in the pool.
80 capacity: usize,
81}
82
83// SAFETY: The free-list is maintained via lock-free CAS operations, ensuring
84// that concurrent `alloc` and `dealloc` calls never corrupt the list.
85// The slab pointer itself is not mutated after construction.
86unsafe impl<A: Allocator + Sync> Sync for PoolAllocator<A> {}
87unsafe impl<A: Allocator + Send> Send for PoolAllocator<A> {}
88
89impl<A: Allocator> PoolAllocator<A> {
90 /// Creates a new pool with `capacity` blocks sized for `item_layout`.
91 ///
92 /// The effective block size is `max(item_layout.size(), size_of::<FreeNode>())`
93 /// padded to the effective alignment. Returns `None` if the backing
94 /// allocator fails to allocate the slab.
95 ///
96 /// # Arguments
97 ///
98 /// * `backing` — Allocator used to allocate and later free the slab.
99 /// * `item_layout` — Layout of the items that will be stored in the pool.
100 /// * `capacity` — Maximum number of live allocations the pool can hold.
101 pub fn new(backing: A, item_layout: Layout, capacity: usize) -> Option<Self> {
102 // The block must be large enough to hold either a user item or the
103 // FreeNode overlay (whichever is bigger), and aligned sufficiently for
104 // both.
105 let block_size = item_layout.size() .max(mem::size_of ::<FreeNode>());
106 let block_align = item_layout.align().max(mem::align_of::<FreeNode>());
107
108 let block_layout = Layout::from_size_align(block_size, block_align)
109 .ok()?
110 .pad_to_align(); // ensure stride is a multiple of alignment
111
112 let total_size = block_layout.size().checked_mul(capacity)?;
113 let slab_layout = Layout::from_size_align(total_size, block_layout.align()).ok()?;
114
115 // SAFETY: `slab_layout` has non-zero size (capacity > 0 is assumed by
116 // the caller; zero capacity results in a zero-sized slab which the
117 // backing allocator may or may not accept).
118 let slab = unsafe { backing.alloc(slab_layout)? };
119
120 // Zero-fill so that all FreeNode overlays start in a clean state.
121 // SAFETY: `slab` is valid for `total_size` bytes as guaranteed by the
122 // backing allocator.
123 unsafe { simd::fill_bytes(slab.as_ptr(), 0, total_size) };
124
125 // Build the free-list in reverse order so that the first block ends up
126 // at the head (i.e. allocations proceed forward through the slab).
127 let mut head: *mut FreeNode = ptr::null_mut();
128 for i in (0..capacity).rev() {
129 // SAFETY: `i * block_layout.size()` is within the slab bounds
130 // because `i < capacity` and `total_size = block_layout.size() * capacity`.
131 let block = unsafe {
132 slab.as_ptr().add(i * block_layout.size()) as *mut FreeNode
133 };
134 // SAFETY: `block` points to a zero-initialised block that is at
135 // least `size_of::<FreeNode>()` bytes — writing a FreeNode is safe.
136 unsafe { (*block).next = head };
137 head = block;
138 }
139
140 Some(Self {
141 backing,
142 block_layout,
143 slab_layout,
144 slab,
145 free_head: AtomicPtr::new(head),
146 capacity,
147 })
148 }
149
150 /// Convenience constructor that derives `item_layout` from the type `T`.
151 ///
152 /// Equivalent to `PoolAllocator::new(backing, Layout::new::<T>(), capacity)`.
153 pub fn typed<T>(backing: A, capacity: usize) -> Option<Self> {
154 Self::new(backing, Layout::new::<T>(), capacity)
155 }
156
157 /// Returns the maximum number of blocks this pool can provide simultaneously.
158 #[inline]
159 pub fn capacity(&self) -> usize { self.capacity }
160
161 /// Returns the padded layout of a single pool block.
162 #[inline]
163 pub fn block_layout(&self) -> Layout { self.block_layout }
164
165 /// Returns the number of blocks currently on the free-list.
166 ///
167 /// This traverses the entire free-list and is O(free_count), so use only
168 /// for diagnostics / debugging.
169 pub fn free_count(&self) -> usize {
170 let mut n = 0usize;
171 let mut node = self.free_head.load(Ordering::Relaxed);
172 while !node.is_null() {
173 n += 1;
174 // SAFETY: Every non-null node on the free-list was written by
175 // `dealloc` or during construction and points to a valid `FreeNode`.
176 node = unsafe { (*node).next };
177 }
178 n
179 }
180
181 /// Zeroes the block at `ptr` and returns it to the pool.
182 ///
183 /// Useful when blocks may contain sensitive data.
184 ///
185 /// # Safety
186 ///
187 /// * `ptr` must have been obtained from this pool via [`alloc`](Allocator::alloc).
188 /// * `ptr` must not be used after this call.
189 pub unsafe fn dealloc_zeroed(&self, ptr: NonNull<u8>) {
190 // SAFETY: `ptr` is valid for `block_layout.size()` bytes — guaranteed
191 // by the caller.
192 unsafe { simd::fill_bytes(ptr.as_ptr(), 0, self.block_layout.size()) };
193 // SAFETY: Forwarding to `dealloc` which handles the free-list push.
194 unsafe { self.dealloc(ptr, self.block_layout) };
195 }
196
197 /// Zeroes the entire slab without modifying the free-list.
198 ///
199 /// Intended for secure teardown. **Do not call while any block is in use.**
200 ///
201 /// # Safety
202 ///
203 /// All blocks — including live ones — will have their bytes set to zero.
204 /// Any live references into the pool will read garbage (all zeros).
205 pub unsafe fn wipe_slab(&self) {
206 // SAFETY: `slab` is valid for `slab_layout.size()` bytes as established
207 // in the constructor.
208 unsafe { simd::fill_bytes(self.slab.as_ptr(), 0, self.slab_layout.size()) };
209 }
210}
211
212impl<A: Allocator> Allocator for PoolAllocator<A> {
213 /// Pops one block from the free-list and returns a pointer to it.
214 ///
215 /// Returns `None` if the pool is exhausted or if `layout` is too large /
216 /// too strictly aligned for the pool's block size.
217 ///
218 /// # Safety
219 ///
220 /// * `layout.size()` must be ≤ `block_layout.size()`.
221 /// * `layout.align()` must be ≤ `block_layout.align()`.
222 /// * The caller must eventually call [`dealloc`](Allocator::dealloc) with
223 /// the same pointer (and any layout whose size and align fit the pool).
224 unsafe fn alloc(&self, layout: Layout) -> Option<NonNull<u8>> {
225 if layout.size() > self.block_layout.size()
226 || layout.align() > self.block_layout.align()
227 {
228 return None;
229 }
230
231 // Atomically pop the head of the free-list.
232 let mut head = self.free_head.load(Ordering::Acquire);
233 loop {
234 // If head is null the pool is exhausted.
235 let node = NonNull::new(head)?;
236 // SAFETY: `head` is a valid `FreeNode` pointer — maintained by the
237 // constructor and the `dealloc` push path.
238 let next = unsafe { (*head).next };
239 match self.free_head.compare_exchange_weak(
240 head, next,
241 Ordering::AcqRel,
242 Ordering::Acquire,
243 ) {
244 Ok(_) => return Some(node.cast()),
245 Err(act) => head = act,
246 }
247 }
248 }
249
250 /// Pushes a block back onto the free-list.
251 ///
252 /// The `_layout` parameter is ignored; what matters is that `ptr` belongs
253 /// to this pool's slab.
254 ///
255 /// # Safety
256 ///
257 /// * `ptr` must have been obtained from **this** pool via
258 /// [`alloc`](Allocator::alloc).
259 /// * `ptr` must not be accessed after this call.
260 /// * Calling `dealloc` twice with the same pointer is undefined behaviour
261 /// (double-free corrupts the free-list).
262 unsafe fn dealloc(&self, ptr: NonNull<u8>, _layout: Layout) {
263 let node = ptr.as_ptr() as *mut FreeNode;
264
265 // Atomically push the block onto the head of the free-list.
266 let mut head = self.free_head.load(Ordering::Relaxed);
267 loop {
268 // SAFETY: `node` is within the slab so it is large enough for a
269 // `FreeNode`. Writing before the CAS succeeds is safe because if
270 // the CAS fails we simply overwrite `next` again on the next attempt.
271 unsafe { ptr::write(node, FreeNode { next: head }) };
272 match self.free_head.compare_exchange_weak(
273 head, node,
274 Ordering::Release,
275 Ordering::Relaxed,
276 ) {
277 Ok(_) => return,
278 Err(act) => head = act,
279 }
280 }
281 }
282}
283
284impl<A: Allocator> Drop for PoolAllocator<A> {
285 /// Returns the entire slab to the backing allocator.
286 fn drop(&mut self) {
287 // SAFETY: `slab` was obtained from `backing` with `slab_layout` in the
288 // constructor, and `backing` is still alive (it is owned by `self`).
289 unsafe { self.backing.dealloc(self.slab, self.slab_layout) };
290 }
291}