Skip to main content

nexus_slab/
unbounded.rs

1//! Growable slab allocator.
2//!
3//! This module provides an unbounded (growable) slab allocator.
4//! Growth happens by adding independent chunks — no copying.
5//!
6//! # Example
7//!
8//! ```
9//! use nexus_slab::unbounded::Slab;
10//!
11//! // SAFETY: caller guarantees slab contract (see struct docs)
12//! let slab = unsafe { Slab::with_chunk_capacity(4096) };
13//! let slot = slab.alloc(42u64);
14//! assert_eq!(*slot, 42);
15//! slab.free(slot);
16//! ```
17
18use core::cell::Cell;
19use core::fmt;
20use core::mem;
21
22use alloc::boxed::Box;
23use alloc::vec::Vec;
24
25use crate::bounded::Slab as BoundedSlab;
26use crate::shared::{Slot, SlotCell};
27
28// =============================================================================
29// Claim
30// =============================================================================
31
32/// A claimed slot that has not yet been written to.
33///
34/// Created by [`Slab::claim()`]. Must be consumed via [`write()`](Self::write)
35/// to complete the allocation. If dropped without calling `write()`, the slot
36/// is returned to the freelist.
37///
38/// The `write()` method is `#[inline]`, enabling the compiler to potentially
39/// optimize the value write as a placement new (constructing directly into
40/// the slot memory).
41pub struct Claim<'a, T> {
42    slot_ptr: *mut SlotCell<T>,
43    slab: &'a Slab<T>,
44    chunk_idx: usize,
45}
46
47impl<T> Claim<'_, T> {
48    /// Writes the value to the claimed slot and returns the [`Slot`] handle.
49    ///
50    /// This consumes the claim. The value is written directly to the slot's
51    /// memory, which may enable placement new optimization.
52    #[inline]
53    pub fn write(self, value: T) -> Slot<T> {
54        let slot_ptr = self.slot_ptr;
55        // SAFETY: We own this slot from claim(), it's valid and vacant
56        unsafe {
57            (*slot_ptr).write_value(value);
58        }
59        // Don't run Drop - we're completing the allocation
60        mem::forget(self);
61        // SAFETY: slot_ptr is valid and now occupied
62        unsafe { Slot::from_ptr(slot_ptr) }
63    }
64}
65
66impl<T> fmt::Debug for Claim<'_, T> {
67    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
68        f.debug_struct("Claim")
69            .field("slot_ptr", &self.slot_ptr)
70            .field("chunk_idx", &self.chunk_idx)
71            .finish()
72    }
73}
74
75impl<T> Drop for Claim<'_, T> {
76    fn drop(&mut self) {
77        // Abandoned claim - return slot to the correct chunk's freelist
78        let chunk = self.slab.chunk(self.chunk_idx);
79        let chunk_slab = &*chunk.inner;
80
81        let free_head = chunk_slab.free_head.get();
82        let was_full = free_head.is_null();
83
84        // SAFETY: slot_ptr is valid and still vacant (never written to)
85        unsafe {
86            (*self.slot_ptr).set_next_free(free_head);
87        }
88        chunk_slab.free_head.set(self.slot_ptr);
89
90        // If chunk was full, add it back to the available-space list
91        if was_full {
92            chunk.next_with_space.set(self.slab.head_with_space.get());
93            self.slab.head_with_space.set(self.chunk_idx);
94        }
95    }
96}
97
98// =============================================================================
99// Constants
100// =============================================================================
101
102/// Sentinel for chunk freelist
103const CHUNK_NONE: usize = usize::MAX;
104
105// =============================================================================
106// ChunkEntry
107// =============================================================================
108
109/// Internal wrapper for a chunk in the growable slab.
110struct ChunkEntry<T> {
111    inner: Box<BoundedSlab<T>>,
112    next_with_space: Cell<usize>,
113}
114
115// =============================================================================
116// Slab
117// =============================================================================
118
119/// Growable slab allocator.
120///
121/// Uses independent chunks for growth — no copying when the slab grows.
122///
123/// Construction is `unsafe` — by creating a slab, you accept the contract:
124///
125/// - **Free everything you allocate.** Dropping the slab does NOT drop
126///   values in occupied slots. Unfree'd slots leak silently.
127/// - **Free from the same slab.** Passing a [`Slot`] to a different
128///   slab's `free()` corrupts the freelist.
129/// - **Don't share across threads.** The slab is `!Send` and `!Sync`.
130///
131/// # Const Construction
132///
133/// ```no_run
134/// use nexus_slab::unbounded::Slab;
135///
136/// struct MyType(u64);
137///
138/// // SAFETY: single slab per type, freed before thread exit
139/// thread_local! {
140///     static SLAB: Slab<MyType> = const { unsafe { Slab::new() } };
141/// }
142/// SLAB.with(|s| unsafe { s.init(4096) });
143/// ```
144///
145/// For direct usage, prefer [`with_chunk_capacity()`](Self::with_chunk_capacity).
146pub struct Slab<T> {
147    chunks: core::cell::UnsafeCell<Vec<ChunkEntry<T>>>,
148    chunk_capacity: Cell<usize>,
149    head_with_space: Cell<usize>,
150}
151
152impl<T> Slab<T> {
153    /// Creates an empty, uninitialized slab.
154    ///
155    /// This is a const function that performs no allocation. Call [`init()`](Self::init)
156    /// to configure chunk capacity before use.
157    ///
158    /// For direct usage, prefer [`with_chunk_capacity()`](Self::with_chunk_capacity).
159    ///
160    /// # Safety
161    ///
162    /// See [struct-level safety contract](Self).
163    ///
164    /// # Example
165    ///
166    /// ```no_run
167    /// use nexus_slab::unbounded::Slab;
168    ///
169    /// // SAFETY: single slab per type, freed before thread exit
170    /// thread_local! {
171    ///     static SLAB: Slab<u64> = const { unsafe { Slab::new() } };
172    /// }
173    /// ```
174    #[inline]
175    pub const unsafe fn new() -> Self {
176        Self {
177            chunks: core::cell::UnsafeCell::new(Vec::new()),
178            chunk_capacity: Cell::new(0),
179            head_with_space: Cell::new(CHUNK_NONE),
180        }
181    }
182
183    /// Creates a new slab with the given chunk capacity.
184    ///
185    /// Chunks are allocated on-demand when slots are requested.
186    ///
187    /// # Panics
188    ///
189    /// # Safety
190    ///
191    /// See [struct-level safety contract](Self).
192    ///
193    /// # Panics
194    ///
195    /// Panics if chunk_capacity is zero.
196    #[inline]
197    pub unsafe fn with_chunk_capacity(chunk_capacity: usize) -> Self {
198        // SAFETY: caller upholds the slab contract
199        let slab = unsafe { Self::new() };
200        // SAFETY: caller upholds the slab contract
201        unsafe { slab.init(chunk_capacity) };
202        slab
203    }
204
205    /// Initializes the slab with the given chunk capacity.
206    ///
207    /// This configures the chunk parameters. Chunks are allocated on-demand
208    /// when slots are requested. Must be called exactly once before any allocations.
209    ///
210    /// # Safety
211    ///
212    /// See [struct-level safety contract](Self).
213    ///
214    /// # Panics
215    ///
216    /// - Panics if the slab is already initialized (chunk_capacity > 0)
217    /// - Panics if chunk_capacity is zero
218    pub unsafe fn init(&self, chunk_capacity: usize) {
219        assert!(self.chunk_capacity.get() == 0, "Slab already initialized");
220        assert!(chunk_capacity > 0, "chunk_capacity must be non-zero");
221
222        self.chunk_capacity.set(chunk_capacity);
223    }
224
225    /// Returns true if the slab has been initialized.
226    #[inline]
227    pub fn is_initialized(&self) -> bool {
228        self.chunk_capacity.get() > 0
229    }
230
231    /// Returns the total capacity across all chunks.
232    #[inline]
233    pub fn capacity(&self) -> usize {
234        self.chunks().len() * self.chunk_capacity.get()
235    }
236
237    /// Returns the chunk capacity.
238    #[inline]
239    pub fn chunk_capacity(&self) -> usize {
240        self.chunk_capacity.get()
241    }
242
243    #[inline]
244    fn chunks(&self) -> &Vec<ChunkEntry<T>> {
245        // SAFETY: !Sync prevents shared access across threads.
246        // Only one thread can hold &self at a time.
247        unsafe { &*self.chunks.get() }
248    }
249
250    #[inline]
251    #[allow(clippy::mut_from_ref)]
252    fn chunks_mut(&self) -> &mut Vec<ChunkEntry<T>> {
253        // SAFETY: !Sync prevents shared access across threads.
254        // Only one thread can hold &self at a time.
255        unsafe { &mut *self.chunks.get() }
256    }
257
258    fn chunk(&self, chunk_idx: usize) -> &ChunkEntry<T> {
259        let chunks = self.chunks();
260        debug_assert!(chunk_idx < chunks.len());
261        unsafe { chunks.get_unchecked(chunk_idx) }
262    }
263
264    /// Returns the number of allocated chunks.
265    #[inline]
266    pub fn chunk_count(&self) -> usize {
267        self.chunks().len()
268    }
269
270    /// Returns `true` if `ptr` falls within any chunk's slot array.
271    ///
272    /// O(chunks) scan. Typically 1–5 chunks. Used in `debug_assert!`
273    /// to validate provenance.
274    #[doc(hidden)]
275    pub fn contains_ptr(&self, ptr: *const ()) -> bool {
276        let chunks = self.chunks();
277        for chunk in chunks {
278            let chunk_slab = &*chunk.inner;
279            if chunk_slab.contains_ptr(ptr) {
280                return true;
281            }
282        }
283        false
284    }
285
286    /// Ensures at least `count` chunks are allocated.
287    ///
288    /// No-op if the slab already has `count` or more chunks. Only allocates
289    /// the difference.
290    pub fn reserve_chunks(&self, count: usize) {
291        let current = self.chunks().len();
292        for _ in current..count {
293            self.grow();
294        }
295    }
296
297    /// Grows the slab by adding a single new chunk.
298    fn grow(&self) {
299        let chunks = self.chunks_mut();
300        let chunk_idx = chunks.len();
301        // SAFETY: The outer slab's construction was unsafe, so the caller
302        // already accepted the slab contract. Inner chunks inherit that contract.
303        let inner = Box::new(unsafe { BoundedSlab::with_capacity(self.chunk_capacity.get()) });
304
305        let entry = ChunkEntry {
306            inner,
307            next_with_space: Cell::new(self.head_with_space.get()),
308        };
309
310        chunks.push(entry);
311        self.head_with_space.set(chunk_idx);
312    }
313
314    // =========================================================================
315    // Allocation API
316    // =========================================================================
317
318    /// Claims a slot from the freelist without writing a value.
319    ///
320    /// Always succeeds — grows the slab if needed. The returned [`Claim`]
321    /// must be consumed via [`Claim::write()`] to complete the allocation.
322    ///
323    /// This two-phase allocation enables placement new optimization: the
324    /// value can be constructed directly into the slot memory.
325    ///
326    /// # Example
327    ///
328    /// ```
329    /// use nexus_slab::unbounded::Slab;
330    ///
331    /// // SAFETY: caller guarantees slab contract (see struct docs)
332    /// let slab = unsafe { Slab::with_chunk_capacity(16) };
333    /// let claim = slab.claim();
334    /// let slot = claim.write(42u64);
335    /// assert_eq!(*slot, 42);
336    /// slab.free(slot);
337    /// ```
338    #[inline]
339    pub fn claim(&self) -> Claim<'_, T> {
340        let (slot_ptr, chunk_idx) = self.claim_ptr();
341        Claim {
342            slot_ptr,
343            slab: self,
344            chunk_idx,
345        }
346    }
347
348    /// Claims a slot from the freelist, returning the raw pointer and chunk index.
349    ///
350    /// Always succeeds — grows the slab if needed. This is a low-level API for
351    /// macro-generated code that needs to escape TLS closures.
352    ///
353    /// # Safety Contract
354    ///
355    /// The caller MUST either:
356    /// - Write a value to the slot and use it as an allocated slot, OR
357    /// - Return the pointer to the freelist via `free_ptr()` if abandoning
358    #[doc(hidden)]
359    #[inline]
360    pub(crate) fn claim_ptr(&self) -> (*mut SlotCell<T>, usize) {
361        // Ensure we have space (grow if needed)
362        if self.head_with_space.get() == CHUNK_NONE {
363            self.grow();
364        }
365
366        // Get the chunk with space
367        let chunk_idx = self.head_with_space.get();
368        let chunk = self.chunk(chunk_idx);
369        let chunk_slab = &*chunk.inner;
370
371        // Load freelist head pointer from chunk
372        let slot_ptr = chunk_slab.free_head.get();
373        debug_assert!(!slot_ptr.is_null(), "chunk on freelist has no free slots");
374
375        // SAFETY: slot_ptr came from the freelist. Slot is vacant, so next_free is active.
376        let next_free = unsafe { (*slot_ptr).get_next_free() };
377
378        // Update chunk's freelist head
379        chunk_slab.free_head.set(next_free);
380
381        // If chunk is now full, remove from slab's available-chunk list
382        if next_free.is_null() {
383            self.head_with_space.set(chunk.next_with_space.get());
384        }
385
386        (slot_ptr, chunk_idx)
387    }
388
389    /// Allocates a slot and writes the value.
390    ///
391    /// Always succeeds — grows the slab if needed.
392    #[inline]
393    pub fn alloc(&self, value: T) -> Slot<T> {
394        self.claim().write(value)
395    }
396
397    /// Frees a slot, dropping the value and returning storage to the freelist.
398    ///
399    /// Consumes the handle — the slot cannot be used after this call.
400    ///
401    /// # Performance
402    ///
403    /// O(n) where n = chunk count, due to chunk lookup. Typically 1-5 chunks.
404    #[inline]
405    #[allow(clippy::needless_pass_by_value)]
406    pub fn free(&self, slot: Slot<T>) {
407        let slot_ptr = slot.into_raw();
408        debug_assert!(
409            self.contains_ptr(slot_ptr as *const ()),
410            "slot was not allocated from this slab"
411        );
412        // SAFETY: Caller guarantees slot is valid and occupied
413        unsafe {
414            (*slot_ptr).drop_value_in_place();
415            self.free_ptr(slot_ptr);
416        }
417    }
418
419    /// Frees a slot and returns the value without dropping it.
420    ///
421    /// Consumes the handle — the slot cannot be used after this call.
422    ///
423    /// # Performance
424    ///
425    /// O(n) where n = chunk count, due to chunk lookup. Typically 1-5 chunks.
426    #[inline]
427    #[allow(clippy::needless_pass_by_value)]
428    pub fn take(&self, slot: Slot<T>) -> T {
429        let slot_ptr = slot.into_raw();
430        debug_assert!(
431            self.contains_ptr(slot_ptr as *const ()),
432            "slot was not allocated from this slab"
433        );
434        // SAFETY: Caller guarantees slot is valid and occupied
435        unsafe {
436            let value = (*slot_ptr).read_value();
437            self.free_ptr(slot_ptr);
438            value
439        }
440    }
441
442    /// Returns a slot to the freelist by pointer.
443    ///
444    /// Does NOT drop the value — caller must drop before calling.
445    /// Finds the owning chunk via linear scan (typically 1-5 chunks).
446    ///
447    /// # Safety
448    ///
449    /// - `slot_ptr` must point to a slot within this slab
450    /// - Value must already be dropped or moved out
451    #[doc(hidden)]
452    pub(crate) unsafe fn free_ptr(&self, slot_ptr: *mut SlotCell<T>) {
453        let chunks = self.chunks();
454        let cap = self.chunk_capacity.get();
455
456        // Find which chunk owns this pointer
457        for (chunk_idx, chunk) in chunks.iter().enumerate() {
458            let chunk_slab = &*chunk.inner;
459            let base = chunk_slab.slots_ptr();
460            let end = base.wrapping_add(cap);
461
462            if slot_ptr >= base && slot_ptr < end {
463                let free_head = chunk_slab.free_head.get();
464                let was_full = free_head.is_null();
465
466                // SAFETY: slot_ptr is within this chunk's range
467                unsafe {
468                    (*slot_ptr).set_next_free(free_head);
469                }
470                chunk_slab.free_head.set(slot_ptr);
471
472                if was_full {
473                    chunk.next_with_space.set(self.head_with_space.get());
474                    self.head_with_space.set(chunk_idx);
475                }
476                return;
477            }
478        }
479
480        unreachable!("free_ptr: slot_ptr not found in any chunk");
481    }
482}
483
484impl<T> fmt::Debug for Slab<T> {
485    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
486        f.debug_struct("Slab")
487            .field("capacity", &self.capacity())
488            .finish()
489    }
490}
491
492// =============================================================================
493// Tests
494// =============================================================================
495
496#[cfg(test)]
497mod tests {
498    use super::*;
499    use std::borrow::{Borrow, BorrowMut};
500
501    #[test]
502    fn slab_basic() {
503        let slab = unsafe { Slab::<u64>::with_chunk_capacity(16) };
504
505        let slot = slab.alloc(42);
506        assert_eq!(*slot, 42);
507        slab.free(slot);
508    }
509
510    #[test]
511    fn slab_grows() {
512        let slab = unsafe { Slab::<u64>::with_chunk_capacity(4) };
513
514        let mut slots = Vec::new();
515        for i in 0..10 {
516            slots.push(slab.alloc(i));
517        }
518
519        assert!(slab.capacity() >= 10);
520
521        for slot in slots {
522            slab.free(slot);
523        }
524    }
525
526    #[test]
527    fn slot_deref_mut() {
528        let slab = unsafe { Slab::<String>::with_chunk_capacity(16) };
529        let mut slot = slab.alloc("hello".to_string());
530        slot.push_str(" world");
531        assert_eq!(&*slot, "hello world");
532        slab.free(slot);
533    }
534
535    #[test]
536    fn slot_dealloc_take() {
537        let slab = unsafe { Slab::<String>::with_chunk_capacity(16) };
538        let slot = slab.alloc("hello".to_string());
539
540        let value = slab.take(slot);
541        assert_eq!(value, "hello");
542    }
543
544    #[test]
545    fn chunk_freelist_maintenance() {
546        let slab = unsafe { Slab::<u64>::with_chunk_capacity(2) };
547
548        // Fill first chunk
549        let s1 = slab.alloc(1);
550        let s2 = slab.alloc(2);
551        // Triggers growth
552        let s3 = slab.alloc(3);
553
554        // Free from first chunk — should add it back to available list
555        slab.free(s1);
556
557        // Should reuse the freed slot
558        let s4 = slab.alloc(4);
559
560        slab.free(s2);
561        slab.free(s3);
562        slab.free(s4);
563    }
564
565    #[test]
566    fn slot_size() {
567        assert_eq!(std::mem::size_of::<Slot<u64>>(), 8);
568    }
569
570    #[test]
571    fn borrow_traits() {
572        let slab = unsafe { Slab::<u64>::with_chunk_capacity(16) };
573        let mut slot = slab.alloc(42);
574
575        let borrowed: &u64 = slot.borrow();
576        assert_eq!(*borrowed, 42);
577
578        let borrowed_mut: &mut u64 = slot.borrow_mut();
579        *borrowed_mut = 100;
580        assert_eq!(*slot, 100);
581
582        slab.free(slot);
583    }
584}