nexus_slab/lib.rs
1//! # nexus-slab
2//!
3//! A high-performance slab allocator for **stable memory addresses** without heap
4//! allocation overhead.
5//!
6//! # What Is This?
7//!
8//! `nexus-slab` is a **custom allocator pattern**—not a replacement for Rust's global
9//! allocator, but a specialized allocator for:
10//!
11//! - **Stable memory addresses** - pointers remain valid until explicitly freed
12//! - **Box-like semantics without Box** - RAII ownership with pre-allocated storage
13//! - **Node-based data structures** - linked lists, trees, graphs with internal pointers
14//! - **Predictable tail latency** - no reallocation spikes during growth
15//!
16//! Think of `Slot<T>` as analogous to `Box<T>`: an owning handle that provides access
17//! to a value and deallocates on drop. The difference is that `Box` allocates from the
18//! heap on every call, while `Slot` allocates from a pre-allocated slab—O(1) with no
19//! syscalls.
20//!
21//! # Quick Start
22//!
23//! Use [`create_allocator!`] to create a type-safe allocator with 8-byte RAII slots:
24//!
25//! ```
26//! use nexus_slab::create_allocator;
27//!
28//! // Define an allocator for your type
29//! create_allocator!(order_alloc, u64);
30//!
31//! // Initialize at startup (bounded = fixed capacity)
32//! order_alloc::init().bounded(1024).build();
33//!
34//! // Insert returns an RAII Slot (8 bytes)
35//! let slot = order_alloc::insert(42);
36//! assert_eq!(*slot, 42);
37//!
38//! // Slot auto-deallocates on drop
39//! drop(slot);
40//! assert_eq!(order_alloc::len(), 0);
41//! ```
42//!
43//! # Performance
44//!
45//! All measurements in CPU cycles (see `BENCHMARKS.md` for methodology):
46//!
47//! | Operation | Slot API | slab crate | Notes |
48//! |-----------|----------|------------|-------|
49//! | GET p50 | **2** | 3 | Direct pointer, no lookup |
50//! | GET_MUT p50 | **2** | 3 | Direct pointer |
51//! | INSERT p50 | 8 | **4** | +4 cycles TLS overhead |
52//! | REMOVE p50 | 4 | **3** | TLS overhead |
53//! | REPLACE p50 | **2** | 4 | Direct pointer, no lookup |
54//!
55//! The TLS lookup adds ~4 cycles to INSERT/REMOVE, but access operations have zero
56//! overhead because `Slot` caches the pointer. For access-heavy workloads, this is
57//! a net win. Full lifecycle cost is +1 cycle vs direct API.
58//!
59//! # Bounded vs Unbounded
60//!
61//! ```
62//! use nexus_slab::create_allocator;
63//!
64//! create_allocator!(bounded_alloc, u64);
65//! create_allocator!(unbounded_alloc, u64);
66//!
67//! // Bounded: fixed capacity, returns None when full
68//! bounded_alloc::init().bounded(100).build();
69//!
70//! // Unbounded: grows by adding chunks (no copying)
71//! unbounded_alloc::init()
72//! .unbounded()
73//! .chunk_capacity(4096)
74//! .capacity(10_000) // pre-allocate
75//! .build();
76//! ```
77//!
78//! # Key-based Access
79//!
80//! Leak a slot to get a [`Key`] for external storage:
81//!
82//! ```
83//! use nexus_slab::create_allocator;
84//!
85//! create_allocator!(my_alloc, String);
86//! my_alloc::init().bounded(100).build();
87//!
88//! let slot = my_alloc::insert("hello".to_string());
89//! let key = slot.leak(); // Slot forgotten, data stays alive
90//!
91//! assert!(my_alloc::contains_key(key));
92//!
93//! // Access via key (unsafe - caller ensures validity)
94//! let value = unsafe { my_alloc::get_unchecked(key) };
95//! assert_eq!(value, "hello");
96//! ```
97//!
98//! # Architecture
99//!
100//! ## Two-Level Freelist
101//!
102//! ```text
103//! slabs_head ─► Slab 2 ─► Slab 0 ─► NONE
104//! │ │
105//! ▼ ▼
106//! [slots] [slots] Slab 1 (full, not on freelist)
107//! ```
108//!
109//! - **Slab freelist**: Which slabs have available space (O(1) lookup)
110//! - **Slot freelist**: Which slots within a slab are free (per-slab, LIFO)
111//!
112//! ## Slot Design
113//!
114//! Each slot is 8 bytes (single pointer). The VTable for slab operations
115//! is stored in thread-local storage, enabling:
116//!
117//! - Minimal slot size (cache-friendly)
118//! - RAII semantics (drop returns slot to freelist)
119//! - Type safety via macro-generated modules
120//!
121//! ## Stamp Encoding
122//!
123//! Each slot has a `stamp: u64` that encodes state and key:
124//!
125//! - **Bits 63-32**: State (vacant flag + next_free index)
126//! - **Bits 31-0**: Key (valid regardless of state)
127//!
128//! Freelists are **intra-slab only** - chains never cross slab boundaries.
129
130#![warn(missing_docs)]
131
132#[doc(hidden)]
133pub mod bounded;
134#[doc(hidden)]
135pub mod shared;
136#[doc(hidden)]
137pub mod unbounded;
138
139#[doc(hidden)]
140pub mod macros;
141
142// Note: create_allocator! is automatically exported at crate root via #[macro_export]
143
144// Re-export sentinel for Key::NONE
145pub use shared::SLOT_NONE;
146
147// Re-export SlotCell for direct slot access (used by nexus-collections)
148pub use shared::SlotCell;
149
150// Re-export VTable for direct vtable access (used by nexus-collections)
151pub use shared::VTable;
152
153// =============================================================================
154// Key
155// =============================================================================
156
157/// Opaque handle to an allocated slot.
158///
159/// A `Key` is simply an index into the slab. It does not contain a generation
160/// counter or any other validation mechanism.
161///
162/// # Design Rationale: No Generational Indices
163///
164/// This slab intentionally omits generational indices (ABA protection). Why?
165///
166/// **The slab is dumb storage, not a source of truth.**
167///
168/// In real systems, your data has authoritative external identifiers:
169/// - Exchange order IDs in trading systems
170/// - Database primary keys in web services
171/// - Session tokens in connection managers
172///
173/// When you receive a message referencing an entity, you must validate against
174/// the authoritative identifier anyway:
175///
176/// ```ignore
177/// fn on_fill(fill: Fill, key: Key) {
178/// let Some(order) = slab.get(key) else { return };
179///
180/// // This check is REQUIRED regardless of generational indices
181/// if order.exchange_id != fill.exchange_id {
182/// panic!("order mismatch");
183/// }
184///
185/// // Process...
186/// }
187/// ```
188///
189/// Generational indices would catch the same bug that domain validation catches,
190/// but at a cost of ~8 cycles per operation. Since domain validation is
191/// unavoidable, generations provide no additional safety—only overhead.
192///
193/// **If a stale key reaches the slab, your architecture has a bug.** The fix is
194/// to correct the architecture (clear ownership, proper state machines), not to
195/// add runtime checks that mask the underlying problem.
196///
197/// # Sentinel
198///
199/// [`Key::NONE`] represents an invalid/absent key, useful for optional key
200/// fields without `Option` overhead.
201#[derive(Clone, Copy, PartialEq, Eq, Hash)]
202#[repr(transparent)]
203pub struct Key(u32);
204
205impl Key {
206 /// Sentinel value representing no key / invalid key.
207 ///
208 /// Equivalent to `SLOT_NONE`. Check with [`is_none`](Self::is_none).
209 pub const NONE: Self = Key(SLOT_NONE);
210
211 /// Creates a new key from an index.
212 ///
213 /// This is primarily for internal use by the allocator macro.
214 #[doc(hidden)]
215 #[inline]
216 pub const fn new(index: u32) -> Self {
217 Key(index)
218 }
219
220 /// Returns the slot index.
221 ///
222 /// For bounded slabs, this is the direct slot index.
223 /// For unbounded slabs, this encodes chunk and local index via
224 /// power-of-2 arithmetic.
225 #[inline]
226 pub const fn index(self) -> u32 {
227 self.0
228 }
229
230 /// Returns `true` if this is the [`Key::NONE`] sentinel.
231 #[inline]
232 pub const fn is_none(self) -> bool {
233 self.0 == SLOT_NONE
234 }
235
236 /// Returns `true` if this is a valid key (not [`Key::NONE`]).
237 #[inline]
238 pub const fn is_some(self) -> bool {
239 self.0 != SLOT_NONE
240 }
241
242 /// Returns the raw `u32` representation.
243 ///
244 /// Useful for serialization or FFI.
245 #[inline]
246 pub const fn into_raw(self) -> u32 {
247 self.0
248 }
249
250 /// Constructs a key from a raw `u32` value.
251 ///
252 /// No safety invariants—any `u32` is valid. However, using a key not
253 /// returned by this slab's `insert` will return `None` or wrong data.
254 #[inline]
255 pub const fn from_raw(value: u32) -> Self {
256 Key(value)
257 }
258}
259
260impl std::fmt::Debug for Key {
261 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
262 if self.is_none() {
263 f.write_str("Key::NONE")
264 } else {
265 write!(f, "Key({})", self.0)
266 }
267 }
268}
269
270#[cfg(test)]
271mod tests {
272 use super::*;
273
274 // =========================================================================
275 // Key tests
276 // =========================================================================
277
278 #[test]
279 fn key_new_and_index() {
280 let key = Key::new(12345);
281 assert_eq!(key.index(), 12345);
282 }
283
284 #[test]
285 fn key_zero_index() {
286 let key = Key::new(0);
287 assert_eq!(key.index(), 0);
288 assert!(key.is_some());
289 }
290
291 #[test]
292 fn key_max_valid_index() {
293 // Max valid index is SLOT_NONE - 1
294 let key = Key::new(SLOT_NONE - 1);
295 assert_eq!(key.index(), SLOT_NONE - 1);
296 assert!(key.is_some());
297 }
298
299 #[test]
300 fn key_none_sentinel() {
301 assert!(Key::NONE.is_none());
302 assert!(!Key::NONE.is_some());
303 assert_eq!(Key::NONE.index(), SLOT_NONE);
304 }
305
306 #[test]
307 fn key_valid_is_some() {
308 let key = Key::new(42);
309 assert!(key.is_some());
310 assert!(!key.is_none());
311 }
312
313 #[test]
314 fn key_raw_roundtrip() {
315 let key = Key::new(999);
316 let raw = key.into_raw();
317 let restored = Key::from_raw(raw);
318 assert_eq!(key, restored);
319 assert_eq!(restored.index(), 999);
320 }
321
322 #[test]
323 fn key_none_raw_roundtrip() {
324 let raw = Key::NONE.into_raw();
325 assert_eq!(raw, SLOT_NONE);
326 let restored = Key::from_raw(raw);
327 assert!(restored.is_none());
328 }
329
330 #[test]
331 fn key_debug_format() {
332 let key = Key::new(42);
333 let debug = format!("{:?}", key);
334 assert_eq!(debug, "Key(42)");
335
336 let none_debug = format!("{:?}", Key::NONE);
337 assert_eq!(none_debug, "Key::NONE");
338 }
339
340 #[test]
341 fn key_equality() {
342 let a = Key::new(100);
343 let b = Key::new(100);
344 let c = Key::new(200);
345
346 assert_eq!(a, b);
347 assert_ne!(a, c);
348 assert_eq!(Key::NONE, Key::NONE);
349 }
350
351 #[test]
352 fn key_size() {
353 assert_eq!(std::mem::size_of::<Key>(), 4);
354 }
355}