nexus_slab/unbounded.rs
1//! Growable slab allocator.
2//!
3//! This module provides an unbounded (growable) slab allocator.
4//! Growth happens by adding independent chunks — no copying.
5//!
6//! # Example
7//!
8//! ```
9//! use nexus_slab::unbounded::Slab;
10//!
11//! let slab = Slab::with_chunk_capacity(4096);
12//! let slot = slab.alloc(42u64);
13//! assert_eq!(*slot, 42);
14//! // SAFETY: slot was allocated from this slab
15//! unsafe { slab.free(slot) };
16//! ```
17
18use std::cell::Cell;
19use std::fmt;
20use std::mem;
21
22use crate::bounded::Slab as BoundedSlab;
23use crate::shared::{RawSlot, SlotCell};
24
25// =============================================================================
26// Claim
27// =============================================================================
28
29/// A claimed slot that has not yet been written to.
30///
31/// Created by [`Slab::claim()`]. Must be consumed via [`write()`](Self::write)
32/// to complete the allocation. If dropped without calling `write()`, the slot
33/// is returned to the freelist.
34///
35/// The `write()` method is `#[inline]`, enabling the compiler to potentially
36/// optimize the value write as a placement new (constructing directly into
37/// the slot memory).
38pub struct Claim<'a, T> {
39 slot_ptr: *mut SlotCell<T>,
40 slab: &'a Slab<T>,
41 chunk_idx: usize,
42}
43
44impl<T> Claim<'_, T> {
45 /// Writes the value to the claimed slot and returns the [`RawSlot`] handle.
46 ///
47 /// This consumes the claim. The value is written directly to the slot's
48 /// memory, which may enable placement new optimization.
49 #[inline]
50 pub fn write(self, value: T) -> RawSlot<T> {
51 let slot_ptr = self.slot_ptr;
52 // SAFETY: We own this slot from claim(), it's valid and vacant
53 unsafe {
54 (*slot_ptr).write_value(value);
55 }
56 // Don't run Drop - we're completing the allocation
57 mem::forget(self);
58 // SAFETY: slot_ptr is valid and now occupied
59 unsafe { RawSlot::from_ptr(slot_ptr) }
60 }
61}
62
63impl<T> fmt::Debug for Claim<'_, T> {
64 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
65 f.debug_struct("Claim")
66 .field("slot_ptr", &self.slot_ptr)
67 .field("chunk_idx", &self.chunk_idx)
68 .finish()
69 }
70}
71
72impl<T> Drop for Claim<'_, T> {
73 fn drop(&mut self) {
74 // Abandoned claim - return slot to the correct chunk's freelist
75 let chunk = self.slab.chunk(self.chunk_idx);
76 let chunk_slab = &*chunk.inner;
77
78 let free_head = chunk_slab.free_head.get();
79 let was_full = free_head.is_null();
80
81 // SAFETY: slot_ptr is valid and still vacant (never written to)
82 unsafe {
83 (*self.slot_ptr).set_next_free(free_head);
84 }
85 chunk_slab.free_head.set(self.slot_ptr);
86
87 // If chunk was full, add it back to the available-space list
88 if was_full {
89 chunk.next_with_space.set(self.slab.head_with_space.get());
90 self.slab.head_with_space.set(self.chunk_idx);
91 }
92 }
93}
94
95// =============================================================================
96// Constants
97// =============================================================================
98
99/// Sentinel for chunk freelist
100const CHUNK_NONE: usize = usize::MAX;
101
102// =============================================================================
103// ChunkEntry
104// =============================================================================
105
106/// Internal wrapper for a chunk in the growable slab.
107struct ChunkEntry<T> {
108 inner: Box<BoundedSlab<T>>,
109 next_with_space: Cell<usize>,
110}
111
112// =============================================================================
113// Slab
114// =============================================================================
115
116/// Growable slab allocator.
117///
118/// Uses independent chunks for growth — no copying when the slab grows.
119///
120/// # Drop Behavior
121///
122/// Dropping a `Slab` does **not** drop values in occupied slots. `SlotCell` is
123/// a union, so the compiler cannot know which slots contain live values. The
124/// caller must free all outstanding [`RawSlot`] handles before dropping the
125/// slab, or the values will be leaked.
126///
127/// This is acceptable for the primary use case (TLS via `thread_local!`) where
128/// thread exit leaks all TLS storage anyway.
129///
130/// # Const Construction
131///
132/// Supports const construction via [`new()`](Self::new) followed by
133/// runtime initialization via [`init()`](Self::init). This enables use with
134/// `thread_local!` using the `const { }` block syntax for zero-overhead TLS access.
135///
136/// ```ignore
137/// thread_local! {
138/// static SLAB: Slab<MyType> = const { Slab::new() };
139/// }
140///
141/// // Later, at runtime:
142/// SLAB.with(|s| s.init(4096));
143/// ```
144///
145/// For direct usage, prefer [`with_chunk_capacity()`](Self::with_chunk_capacity).
146pub struct Slab<T> {
147 chunks: std::cell::UnsafeCell<Vec<ChunkEntry<T>>>,
148 chunk_capacity: Cell<usize>,
149 head_with_space: Cell<usize>,
150}
151
152impl<T> Slab<T> {
153 /// Creates an empty, uninitialized slab.
154 ///
155 /// This is a const function that performs no allocation. Call [`init()`](Self::init)
156 /// to configure chunk capacity before use.
157 ///
158 /// For direct usage, prefer [`with_chunk_capacity()`](Self::with_chunk_capacity).
159 ///
160 /// # Example
161 ///
162 /// ```ignore
163 /// // For use with thread_local! const initialization
164 /// thread_local! {
165 /// static SLAB: Slab<u64> = const { Slab::new() };
166 /// }
167 /// ```
168 #[inline]
169 pub const fn new() -> Self {
170 Self {
171 chunks: std::cell::UnsafeCell::new(Vec::new()),
172 chunk_capacity: Cell::new(0),
173 head_with_space: Cell::new(CHUNK_NONE),
174 }
175 }
176
177 /// Creates a new slab with the given chunk capacity.
178 ///
179 /// Chunks are allocated on-demand when slots are requested.
180 ///
181 /// # Panics
182 ///
183 /// Panics if chunk_capacity is zero.
184 #[inline]
185 pub fn with_chunk_capacity(chunk_capacity: usize) -> Self {
186 let slab = Self::new();
187 slab.init(chunk_capacity);
188 slab
189 }
190
191 /// Initializes the slab with the given chunk capacity.
192 ///
193 /// This configures the chunk parameters. Chunks are allocated on-demand
194 /// when slots are requested. Must be called exactly once before any allocations.
195 ///
196 /// # Panics
197 ///
198 /// - Panics if the slab is already initialized (chunk_capacity > 0)
199 /// - Panics if chunk_capacity is zero
200 pub fn init(&self, chunk_capacity: usize) {
201 assert!(self.chunk_capacity.get() == 0, "Slab already initialized");
202 assert!(chunk_capacity > 0, "chunk_capacity must be non-zero");
203
204 self.chunk_capacity.set(chunk_capacity);
205 }
206
207 /// Returns true if the slab has been initialized.
208 #[inline]
209 pub fn is_initialized(&self) -> bool {
210 self.chunk_capacity.get() > 0
211 }
212
213 /// Returns the total capacity across all chunks.
214 #[inline]
215 pub fn capacity(&self) -> usize {
216 self.chunks().len() * self.chunk_capacity.get()
217 }
218
219 /// Returns the chunk capacity.
220 #[inline]
221 pub fn chunk_capacity(&self) -> usize {
222 self.chunk_capacity.get()
223 }
224
225 #[inline]
226 fn chunks(&self) -> &Vec<ChunkEntry<T>> {
227 // SAFETY: !Sync prevents shared access across threads.
228 // Only one thread can hold &self at a time.
229 unsafe { &*self.chunks.get() }
230 }
231
232 #[inline]
233 #[allow(clippy::mut_from_ref)]
234 fn chunks_mut(&self) -> &mut Vec<ChunkEntry<T>> {
235 // SAFETY: !Sync prevents shared access across threads.
236 // Only one thread can hold &self at a time.
237 unsafe { &mut *self.chunks.get() }
238 }
239
240 fn chunk(&self, chunk_idx: usize) -> &ChunkEntry<T> {
241 let chunks = self.chunks();
242 debug_assert!(chunk_idx < chunks.len());
243 unsafe { chunks.get_unchecked(chunk_idx) }
244 }
245
246 /// Returns the number of allocated chunks.
247 #[inline]
248 pub fn chunk_count(&self) -> usize {
249 self.chunks().len()
250 }
251
252 /// Returns `true` if `ptr` falls within any chunk's slot array.
253 ///
254 /// O(chunks) scan. Typically 1–5 chunks. Used in `debug_assert!`
255 /// to validate provenance.
256 #[doc(hidden)]
257 pub fn contains_ptr(&self, ptr: *const ()) -> bool {
258 let chunks = self.chunks();
259 for chunk in chunks {
260 let chunk_slab = &*chunk.inner;
261 if chunk_slab.contains_ptr(ptr) {
262 return true;
263 }
264 }
265 false
266 }
267
268 /// Ensures at least `count` chunks are allocated.
269 ///
270 /// No-op if the slab already has `count` or more chunks. Only allocates
271 /// the difference.
272 pub fn reserve_chunks(&self, count: usize) {
273 let current = self.chunks().len();
274 for _ in current..count {
275 self.grow();
276 }
277 }
278
279 /// Grows the slab by adding a single new chunk.
280 fn grow(&self) {
281 let chunks = self.chunks_mut();
282 let chunk_idx = chunks.len();
283 let inner = Box::new(BoundedSlab::with_capacity(self.chunk_capacity.get()));
284
285 let entry = ChunkEntry {
286 inner,
287 next_with_space: Cell::new(self.head_with_space.get()),
288 };
289
290 chunks.push(entry);
291 self.head_with_space.set(chunk_idx);
292 }
293
294 // =========================================================================
295 // Allocation API
296 // =========================================================================
297
298 /// Claims a slot from the freelist without writing a value.
299 ///
300 /// Always succeeds — grows the slab if needed. The returned [`Claim`]
301 /// must be consumed via [`Claim::write()`] to complete the allocation.
302 ///
303 /// This two-phase allocation enables placement new optimization: the
304 /// value can be constructed directly into the slot memory.
305 ///
306 /// # Example
307 ///
308 /// ```
309 /// use nexus_slab::unbounded::Slab;
310 ///
311 /// let slab = Slab::with_chunk_capacity(16);
312 /// let claim = slab.claim();
313 /// let slot = claim.write(42u64);
314 /// assert_eq!(*slot, 42);
315 /// // SAFETY: slot was allocated from this slab
316 /// unsafe { slab.free(slot) };
317 /// ```
318 #[inline]
319 pub fn claim(&self) -> Claim<'_, T> {
320 let (slot_ptr, chunk_idx) = self.claim_ptr();
321 Claim {
322 slot_ptr,
323 slab: self,
324 chunk_idx,
325 }
326 }
327
328 /// Claims a slot from the freelist, returning the raw pointer and chunk index.
329 ///
330 /// Always succeeds — grows the slab if needed. This is a low-level API for
331 /// macro-generated code that needs to escape TLS closures.
332 ///
333 /// # Safety Contract
334 ///
335 /// The caller MUST either:
336 /// - Write a value to the slot and use it as an allocated slot, OR
337 /// - Return the pointer to the freelist via `free_ptr()` if abandoning
338 #[doc(hidden)]
339 #[inline]
340 pub fn claim_ptr(&self) -> (*mut SlotCell<T>, usize) {
341 // Ensure we have space (grow if needed)
342 if self.head_with_space.get() == CHUNK_NONE {
343 self.grow();
344 }
345
346 // Get the chunk with space
347 let chunk_idx = self.head_with_space.get();
348 let chunk = self.chunk(chunk_idx);
349 let chunk_slab = &*chunk.inner;
350
351 // Load freelist head pointer from chunk
352 let slot_ptr = chunk_slab.free_head.get();
353 debug_assert!(!slot_ptr.is_null(), "chunk on freelist has no free slots");
354
355 // SAFETY: slot_ptr came from the freelist. Slot is vacant, so next_free is active.
356 let next_free = unsafe { (*slot_ptr).get_next_free() };
357
358 // Update chunk's freelist head
359 chunk_slab.free_head.set(next_free);
360
361 // If chunk is now full, remove from slab's available-chunk list
362 if next_free.is_null() {
363 self.head_with_space.set(chunk.next_with_space.get());
364 }
365
366 (slot_ptr, chunk_idx)
367 }
368
369 /// Allocates a slot and writes the value.
370 ///
371 /// Always succeeds — grows the slab if needed.
372 #[inline]
373 pub fn alloc(&self, value: T) -> RawSlot<T> {
374 let (slot_ptr, _chunk_idx) = self.claim_ptr();
375
376 // SAFETY: Slot is claimed from freelist, we have exclusive access
377 unsafe {
378 (*slot_ptr).write_value(value);
379 }
380
381 // SAFETY: slot_ptr is valid and occupied
382 unsafe { RawSlot::from_ptr(slot_ptr) }
383 }
384
385 /// Frees a slot, dropping the value and returning storage to the freelist.
386 ///
387 /// # Performance
388 ///
389 /// O(n) where n = chunk count, due to chunk lookup. Typically 1-5 chunks.
390 ///
391 /// # Safety
392 ///
393 /// - `slot` must have been allocated from **this** slab
394 /// - No references to the slot's value may exist
395 #[inline]
396 #[allow(clippy::needless_pass_by_value)]
397 pub unsafe fn free(&self, slot: RawSlot<T>) {
398 let slot_ptr = slot.into_ptr();
399 debug_assert!(
400 self.contains_ptr(slot_ptr as *const ()),
401 "slot was not allocated from this slab"
402 );
403 // SAFETY: Caller guarantees slot is valid and occupied
404 unsafe {
405 (*slot_ptr).drop_value_in_place();
406 self.free_ptr(slot_ptr);
407 }
408 }
409
410 /// Frees a slot and returns the value without dropping it.
411 ///
412 /// # Performance
413 ///
414 /// O(n) where n = chunk count, due to chunk lookup. Typically 1-5 chunks.
415 ///
416 /// # Safety
417 ///
418 /// - `slot` must have been allocated from **this** slab
419 /// - No references to the slot's value may exist
420 #[inline]
421 #[allow(clippy::needless_pass_by_value)]
422 pub unsafe fn take(&self, slot: RawSlot<T>) -> T {
423 let slot_ptr = slot.into_ptr();
424 debug_assert!(
425 self.contains_ptr(slot_ptr as *const ()),
426 "slot was not allocated from this slab"
427 );
428 // SAFETY: Caller guarantees slot is valid and occupied
429 unsafe {
430 let value = (*slot_ptr).read_value();
431 self.free_ptr(slot_ptr);
432 value
433 }
434 }
435
436 /// Returns a slot to the freelist by pointer.
437 ///
438 /// Does NOT drop the value — caller must drop before calling.
439 /// Finds the owning chunk via linear scan (typically 1-5 chunks).
440 ///
441 /// # Safety
442 ///
443 /// - `slot_ptr` must point to a slot within this slab
444 /// - Value must already be dropped or moved out
445 #[doc(hidden)]
446 pub unsafe fn free_ptr(&self, slot_ptr: *mut SlotCell<T>) {
447 let chunks = self.chunks();
448 let cap = self.chunk_capacity.get();
449
450 // Find which chunk owns this pointer
451 for (chunk_idx, chunk) in chunks.iter().enumerate() {
452 let chunk_slab = &*chunk.inner;
453 let base = chunk_slab.slots_ptr();
454 let end = base.wrapping_add(cap);
455
456 if slot_ptr >= base && slot_ptr < end {
457 let free_head = chunk_slab.free_head.get();
458 let was_full = free_head.is_null();
459
460 // SAFETY: slot_ptr is within this chunk's range
461 unsafe {
462 (*slot_ptr).set_next_free(free_head);
463 }
464 chunk_slab.free_head.set(slot_ptr);
465
466 if was_full {
467 chunk.next_with_space.set(self.head_with_space.get());
468 self.head_with_space.set(chunk_idx);
469 }
470 return;
471 }
472 }
473
474 unreachable!("free_ptr: slot_ptr not found in any chunk");
475 }
476}
477
478impl<T> Default for Slab<T> {
479 fn default() -> Self {
480 Self::new()
481 }
482}
483
484impl<T> fmt::Debug for Slab<T> {
485 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
486 f.debug_struct("Slab")
487 .field("capacity", &self.capacity())
488 .finish()
489 }
490}
491
492// =============================================================================
493// Tests
494// =============================================================================
495
496#[cfg(test)]
497mod tests {
498 use super::*;
499 use std::borrow::{Borrow, BorrowMut};
500
501 #[test]
502 fn slab_basic() {
503 let slab = Slab::<u64>::with_chunk_capacity(16);
504
505 let slot = slab.alloc(42);
506 assert_eq!(*slot, 42);
507 // SAFETY: slot was allocated from this slab
508 unsafe { slab.free(slot) };
509 }
510
511 #[test]
512 fn slab_grows() {
513 let slab = Slab::<u64>::with_chunk_capacity(4);
514
515 let mut slots = Vec::new();
516 for i in 0..10 {
517 slots.push(slab.alloc(i));
518 }
519
520 assert!(slab.capacity() >= 10);
521
522 // Free all slots
523 for slot in slots {
524 // SAFETY: each slot was allocated from this slab
525 unsafe { slab.free(slot) };
526 }
527 }
528
529 #[test]
530 fn slot_deref_mut() {
531 let slab = Slab::<String>::with_chunk_capacity(16);
532 let mut slot = slab.alloc("hello".to_string());
533 slot.push_str(" world");
534 assert_eq!(&*slot, "hello world");
535 // SAFETY: slot was allocated from this slab
536 unsafe { slab.free(slot) };
537 }
538
539 #[test]
540 fn slot_dealloc_take() {
541 let slab = Slab::<String>::with_chunk_capacity(16);
542 let slot = slab.alloc("hello".to_string());
543
544 // SAFETY: slot was allocated from this slab
545 let value = unsafe { slab.take(slot) };
546 assert_eq!(value, "hello");
547 }
548
549 #[test]
550 fn chunk_freelist_maintenance() {
551 let slab = Slab::<u64>::with_chunk_capacity(2);
552
553 // Fill first chunk
554 let s1 = slab.alloc(1);
555 let s2 = slab.alloc(2);
556 // Triggers growth
557 let s3 = slab.alloc(3);
558
559 // Free from first chunk — should add it back to available list
560 // SAFETY: s1 was allocated from this slab
561 unsafe { slab.free(s1) };
562
563 // Should reuse the freed slot
564 let s4 = slab.alloc(4);
565
566 // SAFETY: all slots were allocated from this slab
567 unsafe {
568 slab.free(s2);
569 slab.free(s3);
570 slab.free(s4);
571 }
572 }
573
574 #[test]
575 fn slot_size() {
576 assert_eq!(std::mem::size_of::<RawSlot<u64>>(), 8);
577 }
578
579 #[test]
580 fn borrow_traits() {
581 let slab = Slab::<u64>::with_chunk_capacity(16);
582 let mut slot = slab.alloc(42);
583
584 let borrowed: &u64 = slot.borrow();
585 assert_eq!(*borrowed, 42);
586
587 let borrowed_mut: &mut u64 = slot.borrow_mut();
588 *borrowed_mut = 100;
589 assert_eq!(*slot, 100);
590
591 // SAFETY: slot was allocated from slab
592 unsafe { slab.free(slot) };
593 }
594}