Skip to main content

nexus_slab/
unbounded.rs

1//! Growable slab allocator.
2//!
3//! This module provides an unbounded (growable) slab allocator.
4//! Growth happens by adding independent chunks — no copying.
5//!
6//! # Example
7//!
8//! ```
9//! use nexus_slab::unbounded::Slab;
10//!
11//! // SAFETY: caller guarantees slab contract (see struct docs)
12//! let slab = unsafe { Slab::with_chunk_capacity(4096) };
13//! let slot = slab.alloc(42u64);
14//! assert_eq!(*slot, 42);
15//! slab.free(slot);
16//! ```
17
18use core::cell::Cell;
19use core::fmt;
20use core::mem;
21
22use alloc::boxed::Box;
23use alloc::vec::Vec;
24
25use crate::bounded::Slab as BoundedSlab;
26use crate::shared::{Slot, SlotCell};
27
28// =============================================================================
29// Claim
30// =============================================================================
31
32/// A claimed slot that has not yet been written to.
33///
34/// Created by [`Slab::claim()`]. Must be consumed via [`write()`](Self::write)
35/// to complete the allocation. If dropped without calling `write()`, the slot
36/// is returned to the freelist.
37///
38/// The `write()` method is `#[inline]`, enabling the compiler to potentially
39/// optimize the value write as a placement new (constructing directly into
40/// the slot memory).
41pub struct Claim<'a, T> {
42    slot_ptr: *mut SlotCell<T>,
43    slab: &'a Slab<T>,
44    chunk_idx: usize,
45}
46
47impl<T> Claim<'_, T> {
48    /// Writes the value to the claimed slot and returns the [`Slot`] handle.
49    ///
50    /// This consumes the claim. The value is written directly to the slot's
51    /// memory, which may enable placement new optimization.
52    #[inline]
53    pub fn write(self, value: T) -> Slot<T> {
54        let slot_ptr = self.slot_ptr;
55        // SAFETY: We own this slot from claim(), it's valid and vacant
56        unsafe {
57            (*slot_ptr).write_value(value);
58        }
59        // Don't run Drop - we're completing the allocation
60        mem::forget(self);
61        // SAFETY: slot_ptr is valid and now occupied
62        unsafe { Slot::from_ptr(slot_ptr) }
63    }
64
65    /// Extract the raw slot pointer and chunk index, consuming the claim.
66    ///
67    /// Transfers ownership to the caller — the slot will NOT be returned
68    /// to the freelist on drop. The caller must either write a value and
69    /// eventually free it, or return the slot via `free_ptr()`.
70    #[inline]
71    pub(crate) fn into_ptr(self) -> (*mut SlotCell<T>, usize) {
72        let ptr = self.slot_ptr;
73        let chunk_idx = self.chunk_idx;
74        mem::forget(self);
75        (ptr, chunk_idx)
76    }
77}
78
79impl<T> fmt::Debug for Claim<'_, T> {
80    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
81        f.debug_struct("Claim")
82            .field("slot_ptr", &self.slot_ptr)
83            .field("chunk_idx", &self.chunk_idx)
84            .finish()
85    }
86}
87
88impl<T> Drop for Claim<'_, T> {
89    fn drop(&mut self) {
90        // Abandoned claim - return slot to the correct chunk's freelist
91        let chunk = self.slab.chunk(self.chunk_idx);
92        let chunk_slab = &*chunk.inner;
93
94        let free_head = chunk_slab.free_head.get();
95        let was_full = free_head.is_null();
96
97        // SAFETY: slot_ptr is valid and still vacant (never written to)
98        unsafe {
99            (*self.slot_ptr).set_next_free(free_head);
100        }
101        chunk_slab.free_head.set(self.slot_ptr);
102
103        // If chunk was full, add it back to the available-space list
104        if was_full {
105            chunk.next_with_space.set(self.slab.head_with_space.get());
106            self.slab.head_with_space.set(self.chunk_idx);
107        }
108    }
109}
110
111// =============================================================================
112// Constants
113// =============================================================================
114
115/// Sentinel for chunk freelist
116const CHUNK_NONE: usize = usize::MAX;
117
118// =============================================================================
119// ChunkEntry
120// =============================================================================
121
122/// Internal wrapper for a chunk in the growable slab.
123struct ChunkEntry<T> {
124    inner: Box<BoundedSlab<T>>,
125    next_with_space: Cell<usize>,
126}
127
128// =============================================================================
129// Slab
130// =============================================================================
131
132/// Growable slab allocator.
133///
134/// Uses independent chunks for growth — no copying when the slab grows.
135///
136/// # Safety Contract
137///
138/// Construction is `unsafe` because it opts you into manual memory
139/// management. By creating a slab, you accept these invariants:
140///
141/// - **Free from the correct slab.** Passing a [`Slot`] to a different
142///   slab's `free()` is undefined behavior — it corrupts the freelist.
143///   In debug builds, this is caught by `debug_assert!`.
144/// - **Free everything you allocate.** Dropping the slab does NOT drop
145///   values in occupied slots. Unfreed slots leak silently.
146/// - **Single-threaded.** The slab is `!Send` and `!Sync`.
147///
148/// ## Why `free()` is safe
149///
150/// The safety contract is accepted once, at construction. After that:
151/// - [`Slot`] is move-only (no `Copy`, no `Clone`) — double-free is
152///   prevented by the type system.
153/// - `free()` consumes the `Slot` — the handle cannot be used after.
154/// - Cross-slab misuse is the only remaining hazard, and it was
155///   accepted as the caller's responsibility at construction time.
156pub struct Slab<T> {
157    chunks: core::cell::UnsafeCell<Vec<ChunkEntry<T>>>,
158    chunk_capacity: Cell<usize>,
159    head_with_space: Cell<usize>,
160}
161
162impl<T> Slab<T> {
163    /// Creates a new slab with the given chunk capacity.
164    ///
165    /// Chunks are allocated on-demand when slots are requested.
166    ///
167    /// # Safety
168    ///
169    /// See [struct-level safety contract](Self).
170    ///
171    /// # Panics
172    ///
173    /// Panics if `chunk_capacity` is zero.
174    #[inline]
175    pub unsafe fn with_chunk_capacity(chunk_capacity: usize) -> Self {
176        // SAFETY: caller upholds the slab contract
177        unsafe { Builder::new().chunk_capacity(chunk_capacity).build() }
178    }
179
180    /// Returns the total capacity across all chunks.
181    #[inline]
182    pub fn capacity(&self) -> usize {
183        self.chunks().len() * self.chunk_capacity.get()
184    }
185
186    /// Returns the chunk capacity.
187    #[inline]
188    pub fn chunk_capacity(&self) -> usize {
189        self.chunk_capacity.get()
190    }
191
192    #[inline]
193    fn chunks(&self) -> &Vec<ChunkEntry<T>> {
194        // SAFETY: !Sync prevents shared access across threads.
195        // Only one thread can hold &self at a time.
196        unsafe { &*self.chunks.get() }
197    }
198
199    #[inline]
200    #[allow(clippy::mut_from_ref)]
201    fn chunks_mut(&self) -> &mut Vec<ChunkEntry<T>> {
202        // SAFETY: !Sync prevents shared access across threads.
203        // Only one thread can hold &self at a time.
204        unsafe { &mut *self.chunks.get() }
205    }
206
207    fn chunk(&self, chunk_idx: usize) -> &ChunkEntry<T> {
208        let chunks = self.chunks();
209        debug_assert!(chunk_idx < chunks.len());
210        unsafe { chunks.get_unchecked(chunk_idx) }
211    }
212
213    /// Returns the number of allocated chunks.
214    #[inline]
215    pub fn chunk_count(&self) -> usize {
216        self.chunks().len()
217    }
218
219    /// Returns `true` if `ptr` falls within any chunk's slot array.
220    ///
221    /// O(chunks) scan. Typically 1–5 chunks. Used in `debug_assert!`
222    /// to validate provenance.
223    #[doc(hidden)]
224    pub fn contains_ptr(&self, ptr: *const ()) -> bool {
225        let chunks = self.chunks();
226        for chunk in chunks {
227            let chunk_slab = &*chunk.inner;
228            if chunk_slab.contains_ptr(ptr) {
229                return true;
230            }
231        }
232        false
233    }
234
235    /// Ensures at least `count` chunks are allocated.
236    ///
237    /// No-op if the slab already has `count` or more chunks. Only allocates
238    /// the difference.
239    pub fn reserve_chunks(&self, count: usize) {
240        let current = self.chunks().len();
241        for _ in current..count {
242            self.grow();
243        }
244    }
245
246    /// Grows the slab by adding a single new chunk.
247    fn grow(&self) {
248        let chunks = self.chunks_mut();
249        let chunk_idx = chunks.len();
250        // SAFETY: The outer slab's construction was unsafe, so the caller
251        // already accepted the slab contract. Inner chunks inherit that contract.
252        let inner = Box::new(unsafe { BoundedSlab::with_capacity(self.chunk_capacity.get()) });
253
254        let entry = ChunkEntry {
255            inner,
256            next_with_space: Cell::new(self.head_with_space.get()),
257        };
258
259        chunks.push(entry);
260        self.head_with_space.set(chunk_idx);
261    }
262
263    // =========================================================================
264    // Allocation API
265    // =========================================================================
266
267    /// Claims a slot from the freelist without writing a value.
268    ///
269    /// Always succeeds — grows the slab if needed. The returned [`Claim`]
270    /// must be consumed via [`Claim::write()`] to complete the allocation.
271    ///
272    /// This two-phase allocation enables placement new optimization: the
273    /// value can be constructed directly into the slot memory.
274    ///
275    /// # Example
276    ///
277    /// ```
278    /// use nexus_slab::unbounded::Slab;
279    ///
280    /// // SAFETY: caller guarantees slab contract (see struct docs)
281    /// let slab = unsafe { Slab::with_chunk_capacity(16) };
282    /// let claim = slab.claim();
283    /// let slot = claim.write(42u64);
284    /// assert_eq!(*slot, 42);
285    /// slab.free(slot);
286    /// ```
287    #[inline]
288    pub fn claim(&self) -> Claim<'_, T> {
289        let (slot_ptr, chunk_idx) = self.claim_ptr();
290        Claim {
291            slot_ptr,
292            slab: self,
293            chunk_idx,
294        }
295    }
296
297    /// Claims a slot from the freelist, returning the raw pointer and chunk index.
298    ///
299    /// Always succeeds — grows the slab if needed. This is a low-level API for
300    /// macro-generated code that needs to escape TLS closures.
301    ///
302    /// # Safety Contract
303    ///
304    /// The caller MUST either:
305    /// - Write a value to the slot and use it as an allocated slot, OR
306    /// - Return the pointer to the freelist via `free_ptr()` if abandoning
307    #[doc(hidden)]
308    #[inline]
309    pub(crate) fn claim_ptr(&self) -> (*mut SlotCell<T>, usize) {
310        // Ensure we have space (grow if needed)
311        if self.head_with_space.get() == CHUNK_NONE {
312            self.grow();
313        }
314
315        // Get the chunk with space
316        let chunk_idx = self.head_with_space.get();
317        let chunk = self.chunk(chunk_idx);
318        let chunk_slab = &*chunk.inner;
319
320        // Load freelist head pointer from chunk
321        let slot_ptr = chunk_slab.free_head.get();
322        debug_assert!(!slot_ptr.is_null(), "chunk on freelist has no free slots");
323
324        // SAFETY: slot_ptr came from the freelist. Slot is vacant, so next_free is active.
325        let next_free = unsafe { (*slot_ptr).get_next_free() };
326
327        // Update chunk's freelist head
328        chunk_slab.free_head.set(next_free);
329
330        // If chunk is now full, remove from slab's available-chunk list
331        if next_free.is_null() {
332            self.head_with_space.set(chunk.next_with_space.get());
333        }
334
335        (slot_ptr, chunk_idx)
336    }
337
338    /// Allocates a slot and writes the value.
339    ///
340    /// Always succeeds — grows the slab if needed.
341    #[inline]
342    pub fn alloc(&self, value: T) -> Slot<T> {
343        self.claim().write(value)
344    }
345
346    /// Frees a slot, dropping the value and returning storage to the freelist.
347    ///
348    /// Consumes the handle — the slot cannot be used after this call.
349    ///
350    /// # Performance
351    ///
352    /// O(n) where n = chunk count, due to chunk lookup. Typically 1-5 chunks.
353    #[inline]
354    #[allow(clippy::needless_pass_by_value)]
355    pub fn free(&self, slot: Slot<T>) {
356        let slot_ptr = slot.into_raw();
357        debug_assert!(
358            self.contains_ptr(slot_ptr as *const ()),
359            "slot was not allocated from this slab"
360        );
361        // SAFETY: Caller guarantees slot is valid and occupied
362        unsafe {
363            (*slot_ptr).drop_value_in_place();
364            self.free_ptr(slot_ptr);
365        }
366    }
367
368    /// Frees a slot and returns the value without dropping it.
369    ///
370    /// Consumes the handle — the slot cannot be used after this call.
371    ///
372    /// # Performance
373    ///
374    /// O(n) where n = chunk count, due to chunk lookup. Typically 1-5 chunks.
375    #[inline]
376    #[allow(clippy::needless_pass_by_value)]
377    pub fn take(&self, slot: Slot<T>) -> T {
378        let slot_ptr = slot.into_raw();
379        debug_assert!(
380            self.contains_ptr(slot_ptr as *const ()),
381            "slot was not allocated from this slab"
382        );
383        // SAFETY: Caller guarantees slot is valid and occupied
384        unsafe {
385            let value = (*slot_ptr).read_value();
386            self.free_ptr(slot_ptr);
387            value
388        }
389    }
390
391    /// Returns a slot to the freelist by pointer, given the chunk index.
392    ///
393    /// O(1) — goes directly to the correct chunk's freelist.
394    /// Does NOT drop the value — caller must drop before calling.
395    ///
396    /// # Safety
397    ///
398    /// - `slot_ptr` must point to a slot within chunk `chunk_idx`
399    /// - Value must already be dropped or moved out
400    #[doc(hidden)]
401    pub(crate) unsafe fn free_ptr_in_chunk(&self, slot_ptr: *mut SlotCell<T>, chunk_idx: usize) {
402        let chunk = self.chunk(chunk_idx);
403        let chunk_slab = &*chunk.inner;
404
405        let free_head = chunk_slab.free_head.get();
406        let was_full = free_head.is_null();
407
408        unsafe {
409            (*slot_ptr).set_next_free(free_head);
410        }
411        chunk_slab.free_head.set(slot_ptr);
412
413        if was_full {
414            chunk.next_with_space.set(self.head_with_space.get());
415            self.head_with_space.set(chunk_idx);
416        }
417    }
418
419    /// Returns a slot to the freelist by pointer.
420    ///
421    /// Does NOT drop the value — caller must drop before calling.
422    /// Finds the owning chunk via linear scan (typically 1-5 chunks).
423    ///
424    /// # Safety
425    ///
426    /// - `slot_ptr` must point to a slot within this slab
427    /// - Value must already be dropped or moved out
428    #[doc(hidden)]
429    pub(crate) unsafe fn free_ptr(&self, slot_ptr: *mut SlotCell<T>) {
430        let chunks = self.chunks();
431        let cap = self.chunk_capacity.get();
432
433        // Find which chunk owns this pointer
434        for (chunk_idx, chunk) in chunks.iter().enumerate() {
435            let chunk_slab = &*chunk.inner;
436            let base = chunk_slab.slots_ptr();
437            let end = base.wrapping_add(cap);
438
439            if slot_ptr >= base && slot_ptr < end {
440                let free_head = chunk_slab.free_head.get();
441                let was_full = free_head.is_null();
442
443                // SAFETY: slot_ptr is within this chunk's range
444                unsafe {
445                    (*slot_ptr).set_next_free(free_head);
446                }
447                chunk_slab.free_head.set(slot_ptr);
448
449                if was_full {
450                    chunk.next_with_space.set(self.head_with_space.get());
451                    self.head_with_space.set(chunk_idx);
452                }
453                return;
454            }
455        }
456
457        unreachable!("free_ptr: slot_ptr not found in any chunk");
458    }
459}
460
461// =============================================================================
462// Builder
463// =============================================================================
464
465/// Builder for [`Slab`].
466///
467/// Configures chunk capacity and optional pre-allocation before constructing
468/// the slab. The type parameter only appears at the terminal [`build()`](Self::build)
469/// call.
470///
471/// # Example
472///
473/// ```
474/// use nexus_slab::unbounded::Builder;
475///
476/// // SAFETY: caller guarantees slab contract (see Slab docs)
477/// let slab = unsafe {
478///     Builder::new()
479///         .chunk_capacity(4096)
480///         .initial_chunks(4)
481///         .build::<u64>()
482/// };
483/// let slot = slab.alloc(42);
484/// assert_eq!(*slot, 42);
485/// slab.free(slot);
486/// ```
487pub struct Builder {
488    chunk_capacity: usize,
489    initial_chunks: usize,
490}
491
492impl Builder {
493    /// Creates a new builder with default settings.
494    ///
495    /// Defaults: `chunk_capacity = 256`, `initial_chunks = 0` (lazy growth).
496    #[inline]
497    pub fn new() -> Self {
498        Self {
499            chunk_capacity: 256,
500            initial_chunks: 0,
501        }
502    }
503
504    /// Sets the capacity of each chunk.
505    ///
506    /// # Panics
507    ///
508    /// Panics at [`build()`](Self::build) if zero.
509    #[inline]
510    pub fn chunk_capacity(mut self, cap: usize) -> Self {
511        self.chunk_capacity = cap;
512        self
513    }
514
515    /// Sets the number of chunks to pre-allocate.
516    ///
517    /// Default is 0 (lazy growth — chunks allocated on first use).
518    #[inline]
519    pub fn initial_chunks(mut self, n: usize) -> Self {
520        self.initial_chunks = n;
521        self
522    }
523
524    /// Builds the slab.
525    ///
526    /// # Safety
527    ///
528    /// See [`Slab`] safety contract.
529    ///
530    /// # Panics
531    ///
532    /// Panics if `chunk_capacity` is zero.
533    #[inline]
534    pub unsafe fn build<T>(self) -> Slab<T> {
535        assert!(self.chunk_capacity > 0, "chunk_capacity must be non-zero");
536
537        let slab = Slab {
538            chunks: core::cell::UnsafeCell::new(Vec::new()),
539            chunk_capacity: Cell::new(self.chunk_capacity),
540            head_with_space: Cell::new(CHUNK_NONE),
541        };
542
543        for _ in 0..self.initial_chunks {
544            slab.grow();
545        }
546        slab
547    }
548}
549
550impl Default for Builder {
551    fn default() -> Self {
552        Self::new()
553    }
554}
555
556impl fmt::Debug for Builder {
557    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
558        f.debug_struct("Builder")
559            .field("chunk_capacity", &self.chunk_capacity)
560            .field("initial_chunks", &self.initial_chunks)
561            .finish()
562    }
563}
564
565impl<T> fmt::Debug for Slab<T> {
566    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
567        f.debug_struct("Slab")
568            .field("capacity", &self.capacity())
569            .finish()
570    }
571}
572
573// =============================================================================
574// Tests
575// =============================================================================
576
577#[cfg(test)]
578mod tests {
579    use super::*;
580    use std::borrow::{Borrow, BorrowMut};
581
582    #[test]
583    fn slab_basic() {
584        let slab = unsafe { Slab::<u64>::with_chunk_capacity(16) };
585
586        let slot = slab.alloc(42);
587        assert_eq!(*slot, 42);
588        slab.free(slot);
589    }
590
591    #[test]
592    fn slab_grows() {
593        let slab = unsafe { Slab::<u64>::with_chunk_capacity(4) };
594
595        let mut slots = Vec::new();
596        for i in 0..10 {
597            slots.push(slab.alloc(i));
598        }
599
600        assert!(slab.capacity() >= 10);
601
602        for slot in slots {
603            slab.free(slot);
604        }
605    }
606
607    #[test]
608    fn slot_deref_mut() {
609        let slab = unsafe { Slab::<String>::with_chunk_capacity(16) };
610        let mut slot = slab.alloc("hello".to_string());
611        slot.push_str(" world");
612        assert_eq!(&*slot, "hello world");
613        slab.free(slot);
614    }
615
616    #[test]
617    fn slot_dealloc_take() {
618        let slab = unsafe { Slab::<String>::with_chunk_capacity(16) };
619        let slot = slab.alloc("hello".to_string());
620
621        let value = slab.take(slot);
622        assert_eq!(value, "hello");
623    }
624
625    #[test]
626    fn chunk_freelist_maintenance() {
627        let slab = unsafe { Slab::<u64>::with_chunk_capacity(2) };
628
629        // Fill first chunk
630        let s1 = slab.alloc(1);
631        let s2 = slab.alloc(2);
632        // Triggers growth
633        let s3 = slab.alloc(3);
634
635        // Free from first chunk — should add it back to available list
636        slab.free(s1);
637
638        // Should reuse the freed slot
639        let s4 = slab.alloc(4);
640
641        slab.free(s2);
642        slab.free(s3);
643        slab.free(s4);
644    }
645
646    #[test]
647    fn slot_size() {
648        assert_eq!(std::mem::size_of::<Slot<u64>>(), 8);
649    }
650
651    #[test]
652    fn borrow_traits() {
653        let slab = unsafe { Slab::<u64>::with_chunk_capacity(16) };
654        let mut slot = slab.alloc(42);
655
656        let borrowed: &u64 = slot.borrow();
657        assert_eq!(*borrowed, 42);
658
659        let borrowed_mut: &mut u64 = slot.borrow_mut();
660        *borrowed_mut = 100;
661        assert_eq!(*slot, 100);
662
663        slab.free(slot);
664    }
665
666    // =========================================================================
667    // Builder tests
668    // =========================================================================
669
670    #[test]
671    fn builder_defaults() {
672        let slab = unsafe { Builder::new().build::<u64>() };
673        assert_eq!(slab.chunk_capacity(), 256);
674        assert_eq!(slab.chunk_count(), 0);
675
676        let slot = slab.alloc(42);
677        assert_eq!(*slot, 42);
678        slab.free(slot);
679    }
680
681    #[test]
682    fn builder_custom_chunk_capacity() {
683        let slab = unsafe { Builder::new().chunk_capacity(64).build::<u64>() };
684        assert_eq!(slab.chunk_capacity(), 64);
685
686        let slot = slab.alloc(1);
687        assert_eq!(slab.capacity(), 64);
688        slab.free(slot);
689    }
690
691    #[test]
692    fn builder_initial_chunks() {
693        let slab = unsafe {
694            Builder::new()
695                .chunk_capacity(32)
696                .initial_chunks(4)
697                .build::<u64>()
698        };
699        assert_eq!(slab.chunk_count(), 4);
700        assert_eq!(slab.capacity(), 128);
701    }
702
703    #[test]
704    #[should_panic(expected = "chunk_capacity must be non-zero")]
705    fn builder_zero_chunk_capacity_panics() {
706        let _slab = unsafe { Builder::new().chunk_capacity(0).build::<u64>() };
707    }
708}