atomic_matrix/handlers.rs
1//! # Matrix High-Level API Handles
2//!
3//! This module encapsulates the matrix raw primitives into a more ergonomic API
4//! that abstracts a lot of manual and repetitive work that has to be executed in
5//! order to correctly interact with the matrix, as well as some safe pre-baked
6//! functions that add more extensibility over what can be done generally.
7//!
8//! # Abstraction Layers
9//!
10//! ```text
11//! [ internals -> Matrix Internal Frameworks ] + iter, workers, tables, ...
12//! * builds on
13//! [ MatrixHandler ] + typed blocks, lifecycle, sharing
14//! * escape hatch
15//! [ AtomicMatrix ] + raw offsets, sizes, bytes
16//! *
17//! [ /dev/shm ] * physical shared memory
18//! ```
19//!
20//! # Handler Scope
21//!
22//! The handler owns the SHM mapping and provides:
23//! - Typed block allocation (`allocate<T>`) and deallocation (`free<T>`)
24//! - Raw byte allocation for unknown types (`allocate_raw`)
25//! - Zero-copy typed read and write on allocated blocks
26//! - User-defined lifecycle state management (states 49+)
27//! - Atomic state transitions with user-defined ordering
28//! - Thread sharing via [`SharedHandler`]
29//! - Escape hatches to the raw matrix and base pointer
30//!
31//! > Any high-level datasets and operators will be implemented in the **internals**
32//! > folder.
33//!
34//! # Lifecycle States
35//!
36//! States 0–48 are reserved for internal matrix operations:
37//! - `0` — `STATE_FREE`
38//! - `1` — `STATE_ALLOCATED`
39//! - `2` — `STATE_ACKED`
40//! - `3` — `STATE_COALESCING`
41//!
42//! States 49 and above are available for user-defined lifecycles.
43//! The matrix coalescing engine ignores any state beyond the ones described above —
44//! a block in state 112 is never reclaimed automatically. Call `free()` explicitly
45//! when done.
46//!
47//! **Note:** States 4–48 are reserved for future internal state management
48//! implementations that have not been planned yet. Better safe than sorry.
49//!
50//! # Thread Sharing
51//!
52//! [`MatrixHandler`] owns the mmap and is not `Clone`. Use `share()` to produce a
53//! [`SharedHandler`] that can be sent to other threads. The original handler must
54//! outlive all shared handles derived from it.
55
56use std::sync::atomic::Ordering;
57use crate::matrix::core::{ AtomicMatrix, BlockHeader, RelativePtr };
58use memmap2::MmapMut;
59
60/// Minimum state value available for user-defined lifecycles.
61/// States 0–48 are reserved for internal matrix and future framework use.
62/// Currently only 0–3 are assigned — the remaining range (4–48) is reserved
63/// for future internal lifecycle states without breaking user code.
64pub const USER_STATE_MIN: u32 = 49;
65
66/// Errors produced by [`MatrixHandler`] and [`SharedHandler`] operations.
67#[derive(Debug)]
68pub enum HandlerError {
69 /// The allocator could not find a free block. Either OOM or contention.
70 AllocationFailed(String),
71 /// Caller attempted to set or transition to a reserved internal state (0–48).
72 ReservedStatus(u32),
73 /// Atomic state transition failed — block was not in the expected state.
74 /// Contains the actual state found.
75 TransitionFailed(u32),
76 /// The block offset is outside the valid segment range.
77 InvalidOffset(u32),
78 /// Failed to unlink the main SHM file for unpredicted reasons.
79 SeppukuFailed { path: String, reason: std::io::Error},
80}
81
82/// A typed handle to an allocated block in the matrix.
83///
84/// Since the matrix operates entirely on raw pointer addresses and internal
85/// types, `Block<T>` is provided at the API level to wrap allocations into
86/// a typed, ergonomic handle. The raw [`RelativePtr`] returned by the matrix
87/// is reinterpreted as `T` and wrapped in `Block<T>` to maintain type
88/// information at the surface layer. All pointer arithmetic is delegated to
89/// the inner [`RelativePtr<T>`], referred to as **pointer**.
90///
91/// # Validity
92///
93/// A `Block<T>` is valid as long as:
94/// - The originating [`MatrixHandler`] (and its mmap) is alive.
95/// - The block has not been freed via `handler.free()`.
96///
97/// Blocks carry no lifetime parameter. The caller is responsible for not using
98/// a block after freeing it or after the handler is dropped.
99#[derive(Debug)]
100pub struct Block<T> {
101 /// Payload offset from SHM base — points past the `BlockHeader`.
102 pub pointer: RelativePtr<T>,
103}
104
105/// A lightweight reflection of the original handler that can be safely sent
106/// across threads.
107///
108/// Produced by [`MatrixHandler::share()`]. Holds raw pointers into the SHM
109/// segment. The originating [`MatrixHandler`] **must** outlive all
110/// `SharedHandler` instances derived from it.
111///
112/// `SharedHandler` exposes the same allocation, I/O, lifecycle, and escape
113/// hatch API as [`MatrixHandler`] via the [`HandlerFunctions`] trait —
114/// it does not own the mmap.
115pub struct SharedHandler {
116 matrix_addr: usize,
117 base_addr: usize,
118 segment_size: u32,
119 first_block_offset: u32,
120}
121
122/// The primary interface for interacting with an [`AtomicMatrix`].
123///
124/// Owns the SHM mapping. Cannot be cloned — use [`share()`] to produce a
125/// [`SharedHandler`] for other threads.
126///
127/// See module documentation for the full abstraction layer diagram.
128pub struct MatrixHandler {
129 matrix: &'static mut AtomicMatrix,
130 mmap: MmapMut,
131 first_block_offset: u32,
132}
133
134impl<T> Block<T> {
135 /// Constructs a `Block<T>` from a raw payload offset.
136 ///
137 /// The offset must point past the [`BlockHeader`] (i.e. `header_offset + 32`).
138 /// Type `T` is introduced here — the matrix has no knowledge of it.
139 pub(crate) fn from_offset(offset: u32) -> Self {
140 Self { pointer: RelativePtr::new(offset) }
141 }
142}
143
144impl MatrixHandler {
145 /// Internal constructor. Called exclusively by [`AtomicMatrix::bootstrap`].
146 pub(crate) fn new(
147 matrix: &'static mut AtomicMatrix,
148 mmap: MmapMut,
149 first_block_offset: u32
150 ) -> Self {
151 Self { matrix, mmap, first_block_offset }
152 }
153
154 /// Produces a lightweight [`SharedHandler`] that can be sent to other threads.
155 ///
156 /// [`SharedHandler`] holds raw pointers into the SHM segment. This handler
157 /// **must** outlive all shared handles derived from it — Rust cannot enforce
158 /// this lifetime relationship automatically because `SharedHandler` uses raw
159 /// pointers. Violating this contract is undefined behaviour.
160 pub fn share(&self) -> SharedHandler {
161 SharedHandler {
162 matrix_addr: self.matrix as *const AtomicMatrix as usize,
163 base_addr: self.base_ptr() as usize,
164 segment_size: self.segment_size(),
165 first_block_offset: self.first_block_offset,
166 }
167 }
168
169 /// Removes the SHM file from the system.
170 ///
171 /// Should be called before the application exits to prevent the SHM file
172 /// from persisting in `/dev/shm/` across runs. This is an explicit, opt-
173 /// in cleanup - if omitted, the file survives until the system reboots
174 /// or it's manually removed.
175 ///
176 /// Existing mapping remains valid until the handlers are dropped; this
177 /// only removes the filesystem entry, preventing new attachments.
178 pub fn die(&self) -> Result<(), HandlerError> {
179 let id = self.matrix().id;
180 let path = format!("/dev/shm/{}", id);
181
182 match std::fs::remove_file(&path) {
183 Ok(_) => {},
184 Err(e) if e.kind() == std::io::ErrorKind::NotFound => {},
185 Err(e) => {
186 return Err(HandlerError::SeppukuFailed {
187 path,
188 reason: e,
189 });
190 },
191 };
192
193 Ok(())
194 }
195}
196
197impl HandlerFunctions for MatrixHandler {
198 fn base_ptr(&self) -> *const u8 { self.mmap.as_ptr() }
199 fn matrix(&self) -> &AtomicMatrix { self.matrix }
200 fn first_block_offset(&self) -> u32 { self.first_block_offset }
201 fn segment_size(&self) -> u32 { self.mmap.len() as u32 }
202}
203
204// Safety: AtomicMatrix uses only atomic operations internally.
205// Caller guarantees the originating MatrixHandler outlives all SharedHandlers.
206unsafe impl Send for SharedHandler {}
207unsafe impl Sync for SharedHandler {}
208
209impl HandlerFunctions for SharedHandler {
210 fn base_ptr(&self) -> *const u8 { self.base_addr as *const u8 }
211 fn matrix(&self) -> &AtomicMatrix {
212 unsafe { &*(self.matrix_addr as *const AtomicMatrix) }
213 }
214 fn first_block_offset(&self) -> u32 { self.first_block_offset }
215 fn segment_size(&self) -> u32 { self.segment_size }
216}
217
218/// Defines the core interaction surface for any matrix handle.
219///
220/// Implemented by both [`MatrixHandler`] and [`SharedHandler`]. All matrix
221/// operations — allocation, I/O, lifecycle management, and escape hatches —
222/// are provided through this trait so that framework code in `internals` can
223/// operate generically over either handle type via `impl HandlerFunctions`.
224///
225/// # Implementing this trait
226///
227/// Implementors must provide four primitive accessors:
228/// - [`base_ptr()`] — the SHM base pointer for this process's mapping
229/// - [`matrix()`] — reference to the underlying [`AtomicMatrix`]
230/// - [`first_block_offset()`] — offset of the first data block in the segment
231/// - [`segment_size()`] — total segment size in bytes
232///
233/// All other methods have default implementations built on these four.
234pub trait HandlerFunctions {
235 /// Returns the SHM base pointer for this process's mapping.
236 fn base_ptr(&self) -> *const u8;
237
238 /// Returns a reference to the underlying [`AtomicMatrix`].
239 fn matrix(&self) -> &AtomicMatrix;
240
241 /// Returns the offset of the first data block in the segment.
242 /// Used by `internals` iterators as the physical chain walk start point.
243 fn first_block_offset(&self) -> u32;
244
245 /// Returns the total segment size in bytes.
246 fn segment_size(&self) -> u32;
247
248 /// Allocates a block sized to hold `T`.
249 ///
250 /// Size is computed from `size_of::<T>()` and rounded up to the 16-byte
251 /// minimum payload if necessary. The matrix remains typeless — type
252 /// information exists only in the returned [`Block<T>`].
253 ///
254 /// # Errors
255 /// Returns [`HandlerError::AllocationFailed`] if the matrix is out of
256 /// memory or under contention after 512 retries.
257 fn allocate<T>(&self) -> Result<Block<T>, HandlerError> {
258 let size = std::mem::size_of::<T>() as u32;
259 self.matrix()
260 .allocate(self.base_ptr(), size)
261 .map(|ptr| Block::from_offset(ptr.offset()))
262 .map_err(HandlerError::AllocationFailed)
263 }
264
265
266
267 /// Allocates a raw byte block of the given size.
268 ///
269 /// Returns a [`RelativePtr<u8>`] directly — use when the payload type is
270 /// not known at allocation time, or when building `internals` framework
271 /// primitives that operate on raw offsets. The caller is responsible for
272 /// all casting and interpretation of the memory.
273 ///
274 /// # Errors
275 /// Returns [`HandlerError::AllocationFailed`] if OOM or contention.
276 fn allocate_raw(&self, size: u32) -> Result<RelativePtr<u8>, HandlerError> {
277 self.matrix()
278 .allocate(self.base_ptr(), size)
279 .map_err(HandlerError::AllocationFailed)
280 }
281
282 /// Writes a value of type `T` into an allocated block.
283 ///
284 /// # Safety
285 /// - `block` must be in `STATE_ALLOCATED`.
286 /// - `block` must have been allocated with sufficient size to hold `T`.
287 /// This is guaranteed if the block was produced by [`allocate::<T>()`].
288 /// - No other thread may be reading or writing this block concurrently.
289 /// The caller is responsible for all synchronization beyond the atomic
290 /// state transitions provided by [`set_state`] and [`transition_state`].
291 unsafe fn write<T>(&self, block: &mut Block<T>, value: T) {
292 unsafe { block.pointer.write(self.base_ptr(), value) }
293 }
294
295 /// Reads a shared reference to `T` from an allocated block.
296 ///
297 /// # Safety
298 /// - `block` must be in `STATE_ALLOCATED`.
299 /// - A value of type `T` must have been previously written via [`write`].
300 /// - The returned reference is valid as long as the SHM mapping is alive
301 /// and the block has not been freed. It is **not** tied to the lifetime
302 /// of the [`Block<T>`] handle — the caller must ensure the block is not
303 /// freed while the reference is in use.
304 /// - No other thread may be writing to this block concurrently.
305 unsafe fn read<'a, T>(&self, block: &Block<T>) -> &'a T {
306 unsafe { block.pointer.resolve(self.base_ptr()) }
307 }
308
309 /// Reads a mutable reference to `T` from an allocated block.
310 ///
311 /// # Safety
312 /// - `block` must be in `STATE_ALLOCATED`.
313 /// - A value of type `T` must have been previously written via [`write`].
314 /// - The returned reference is valid as long as the SHM mapping is alive
315 /// and the block has not been freed. It is **not** tied to the lifetime
316 /// of the [`Block<T>`] handle — the caller must ensure the block is not
317 /// freed while the reference is in use.
318 /// - No other thread may be reading or writing this block concurrently.
319 /// Two simultaneous `read_mut` calls on the same block is undefined behaviour.
320 unsafe fn read_mut<'a, T>(&self, block: &Block<T>) -> &'a mut T {
321 unsafe { block.pointer.resolve_mut(self.base_ptr()) }
322 }
323
324 /// Frees a typed block.
325 ///
326 /// Marks the block `STATE_ACKED` and immediately triggers coalescing.
327 /// The block is invalid after this call — using it in any way is
328 /// undefined behaviour.
329 fn free<T>(&self, block: Block<T>) {
330 let header_ptr = RelativePtr::<BlockHeader>::new(block.pointer.offset() - 32);
331 self.matrix().ack(&header_ptr, self.base_ptr());
332 }
333
334 /// Frees a block by its header offset directly.
335 ///
336 /// Used by `internals` framework code that operates on raw offsets
337 /// rather than typed [`Block<T>`] handles. `header_offset` must point
338 /// to a valid [`BlockHeader`] within the segment.
339 fn free_at(&self, header_offset: u32) {
340 let header_ptr = RelativePtr::<BlockHeader>::new(header_offset);
341 self.matrix().ack(&header_ptr, self.base_ptr());
342 }
343
344 /// Sets a user-defined lifecycle state on a block.
345 ///
346 /// The state must be >= [`USER_STATE_MIN`] (49). Attempting to set an
347 /// internal state (0–48) returns [`HandlerError::ReservedStatus`].
348 ///
349 /// User states are invisible to the coalescing engine — a block in any
350 /// user state will never be automatically reclaimed. Call [`free`]
351 /// explicitly when the lifecycle is complete.
352 ///
353 /// # Errors
354 /// Returns [`HandlerError::ReservedStatus`] if `state < USER_STATE_MIN`.
355 fn set_state<T>(&self, block: &Block<T>, state: u32) -> Result<(), HandlerError> {
356 if state < USER_STATE_MIN {
357 return Err(HandlerError::ReservedStatus(state));
358 }
359 unsafe {
360 block.pointer
361 .resolve_header_mut(self.base_ptr())
362 .state
363 .store(state, Ordering::Release);
364 }
365 Ok(())
366 }
367
368 /// Returns the current state of a block.
369 ///
370 /// `order` controls the memory ordering of the atomic load. Use
371 /// `Ordering::Acquire` for the general case. Use `Ordering::Relaxed`
372 /// only if you do not need to synchronize with writes to the block's
373 /// payload.
374 fn get_state<T>(&self, block: &Block<T>, order: Ordering) -> u32 {
375 unsafe {
376 block.pointer
377 .resolve_header(self.base_ptr())
378 .state
379 .load(order)
380 }
381 }
382
383 /// Atomically transitions a block from one state to another.
384 ///
385 /// Succeeds only if the block is currently in `expected`. `next` must
386 /// be >= [`USER_STATE_MIN`] — transitioning into an internal state is
387 /// not permitted.
388 ///
389 /// `success_order` controls the memory ordering on success. Use
390 /// `Ordering::AcqRel` for the general case. The failure ordering is
391 /// always `Ordering::Relaxed`.
392 ///
393 /// Returns `Ok(expected)` on success — the value that was replaced.
394 ///
395 /// # Errors
396 /// - [`HandlerError::ReservedStatus`] if `next < USER_STATE_MIN`.
397 /// - [`HandlerError::TransitionFailed(actual)`] if the block was not
398 /// in `expected` — `actual` is the state that was observed instead.
399 fn transition_state<T>(
400 &self,
401 block: &Block<T>,
402 expected: u32,
403 next: u32,
404 success_order: Ordering
405 ) -> Result<u32, HandlerError> {
406 if next < USER_STATE_MIN {
407 return Err(HandlerError::ReservedStatus(next));
408 }
409 unsafe {
410 block.pointer
411 .resolve_header_mut(self.base_ptr())
412 .state
413 .compare_exchange(expected, next, success_order, Ordering::Relaxed)
414 .map_err(HandlerError::TransitionFailed)
415 }
416 }
417
418 /// Returns a raw reference to the underlying [`AtomicMatrix`].
419 ///
420 /// For `internals` framework authors who need allocator primitives
421 /// directly. Bypasses all handler abstractions — use with care.
422 fn raw_matrix(&self) -> &AtomicMatrix {
423 self.matrix()
424 }
425
426 /// Returns the raw SHM base pointer for this process's mapping.
427 ///
428 /// Use alongside [`raw_matrix()`] when building `internals` that need
429 /// direct access to block memory beyond what the typed API provides.
430 fn raw_base_ptr(&self) -> *const u8 {
431 self.base_ptr()
432 }
433}