nexus_slot/lib.rs
1//! High-performance conflation slots for latest-value-wins scenarios.
2//!
3//! Two variants based on reader topology:
4//!
5//! - [`spsc`] — Single producer, single consumer. Lowest overhead.
6//! - [`spmc`] — Single producer, multiple consumers. [`SharedReader`](spmc::SharedReader) is `Clone`.
7//!
8//! Both use a seqlock internally: the writer increments a sequence counter,
9//! copies data via word-at-a-time atomics, and increments again. Readers
10//! speculatively copy and retry if the sequence changed.
11//!
12//! # The `Pod` Trait
13//!
14//! Types must implement [`Pod`] (Plain Old Data) — no heap allocations,
15//! no drop glue, byte-copyable. Any `Copy` type implements `Pod` automatically.
16//!
17//! ```rust
18//! use nexus_slot::Pod;
19//!
20//! #[repr(C)]
21//! struct OrderBook {
22//! bids: [f64; 20],
23//! asks: [f64; 20],
24//! sequence: u64,
25//! }
26//!
27//! // SAFETY: OrderBook is just bytes — no heap allocations
28//! unsafe impl Pod for OrderBook {}
29//! ```
30//!
31//! # Examples
32//!
33//! ```rust
34//! #[derive(Copy, Clone, Default)]
35//! struct Quote { bid: f64, ask: f64, seq: u64 }
36//!
37//! // SPSC — single reader
38//! let (mut writer, mut reader) = nexus_slot::spsc::slot::<Quote>();
39//! writer.write(Quote { bid: 100.0, ask: 100.05, seq: 1 });
40//! assert_eq!(reader.read().unwrap().seq, 1);
41//! ```
42//!
43//! ```rust
44//! #[derive(Copy, Clone, Default)]
45//! struct Quote { bid: f64, ask: f64, seq: u64 }
46//!
47//! // SPMC — multiple readers
48//! let (mut writer, mut reader1) = nexus_slot::spmc::shared_slot::<Quote>();
49//! let mut reader2 = reader1.clone();
50//!
51//! writer.write(Quote { bid: 100.0, ask: 100.05, seq: 1 });
52//! assert!(reader1.read().is_some());
53//! assert!(reader2.read().is_some()); // independent consumption
54//! ```
55
56pub mod spmc;
57pub mod spsc;
58
59use std::mem::{MaybeUninit, align_of, size_of};
60use std::sync::atomic::{AtomicU8, AtomicUsize, Ordering};
61
62/// Marker trait for types safe to use in a conflated slot.
63///
64/// # Safety
65///
66/// Implementor guarantees:
67///
68/// 1. **No heap allocations**: No `Vec`, `String`, `Box`, `Arc`, etc.
69/// 2. **No owned resources**: No `File`, `TcpStream`, `Mutex`, etc.
70/// 3. **No drop glue**: `std::mem::needs_drop::<Self>()` returns false.
71/// 4. **Byte-copyable**: Safe to memcpy without cleanup.
72///
73/// Essentially: the type could be `Copy`, but chooses not to.
74///
75/// # Example
76///
77/// ```rust
78/// use nexus_slot::Pod;
79///
80/// #[repr(C)]
81/// struct OrderBook {
82/// bids: [f64; 20],
83/// asks: [f64; 20],
84/// bid_count: u8,
85/// ask_count: u8,
86/// sequence: u64,
87/// }
88///
89/// // SAFETY: Just bytes, no heap
90/// unsafe impl Pod for OrderBook {}
91/// ```
92pub unsafe trait Pod: Sized {
93 const _ASSERT_NO_DROP: () = {
94 assert!(
95 !std::mem::needs_drop::<Self>(),
96 "Pod types must not require drop"
97 );
98 };
99}
100
101// Any Copy type is Pod
102unsafe impl<T: Copy> Pod for T {}
103
104/// Atomically stores `size_of::<T>()` bytes into shared memory.
105///
106/// Word-at-a-time `AtomicUsize` stores when alignment permits,
107/// `AtomicU8` fallback for tail bytes or poorly-aligned types.
108/// All stores use `Relaxed` ordering — caller provides fences.
109///
110/// # Safety
111///
112/// - `dst` must be valid for `size_of::<T>()` bytes
113/// - `dst` must be aligned to `align_of::<T>()`
114/// - `dst` must be derived from `UnsafeCell` (shared-mutable provenance)
115#[inline]
116pub(crate) unsafe fn atomic_store<T: Pod>(dst: *mut T, src: &T) {
117 unsafe {
118 let dst = dst.cast::<u8>();
119 let src = (src as *const T).cast::<u8>();
120 let size = size_of::<T>();
121
122 if align_of::<T>() >= align_of::<usize>() {
123 let words = size / size_of::<usize>();
124 let tail = size % size_of::<usize>();
125
126 for i in 0..words {
127 let atom = &*(dst.add(i * size_of::<usize>()) as *const AtomicUsize);
128 let val = src.add(i * size_of::<usize>()).cast::<usize>().read();
129 atom.store(val, Ordering::Relaxed);
130 }
131
132 let base = words * size_of::<usize>();
133 for i in 0..tail {
134 let atom = &*(dst.add(base + i) as *const AtomicU8);
135 atom.store(*src.add(base + i), Ordering::Relaxed);
136 }
137 } else {
138 for i in 0..size {
139 let atom = &*(dst.add(i) as *const AtomicU8);
140 atom.store(*src.add(i), Ordering::Relaxed);
141 }
142 }
143 }
144}
145
146/// Atomically loads `size_of::<T>()` bytes from shared memory.
147///
148/// Word-at-a-time `AtomicUsize` loads when alignment permits,
149/// `AtomicU8` fallback for tail bytes or poorly-aligned types.
150/// All loads use `Relaxed` ordering — caller provides fences.
151///
152/// # Safety
153///
154/// - `src` must be valid for `size_of::<T>()` bytes
155/// - `src` must be aligned to `align_of::<T>()`
156/// - `src` must be derived from `UnsafeCell` (shared-mutable provenance)
157#[inline]
158pub(crate) unsafe fn atomic_load<T: Pod>(src: *const T) -> T {
159 unsafe {
160 let mut buf = MaybeUninit::<T>::uninit();
161 let dst = buf.as_mut_ptr().cast::<u8>();
162 let src = src.cast::<u8>();
163 let size = size_of::<T>();
164
165 if align_of::<T>() >= align_of::<usize>() {
166 let words = size / size_of::<usize>();
167 let tail = size % size_of::<usize>();
168
169 for i in 0..words {
170 let atom = &*(src.add(i * size_of::<usize>()) as *const AtomicUsize);
171 let val = atom.load(Ordering::Relaxed);
172 dst.add(i * size_of::<usize>()).cast::<usize>().write(val);
173 }
174
175 let base = words * size_of::<usize>();
176 for i in 0..tail {
177 let atom = &*(src.add(base + i) as *const AtomicU8);
178 *dst.add(base + i) = atom.load(Ordering::Relaxed);
179 }
180 } else {
181 for i in 0..size {
182 let atom = &*(src.add(i) as *const AtomicU8);
183 *dst.add(i) = atom.load(Ordering::Relaxed);
184 }
185 }
186
187 buf.assume_init()
188 }
189}