byte_slab/slab_box.rs
1//! An owned allocation from a `BSlab`
2//!
3//! A `SlabBox` may be read or written to (exclusively) by the owner.
4//! A `SlabBox` may also be converted into a `SlabArc` in order to be shared.
5//! The underlying memory is freed for reuse automatically when the Box has
6//! been dropped.
7
8use core::ops::DerefMut;
9use core::ops::Deref;
10use core::{
11 mem::forget,
12 sync::atomic::Ordering,
13};
14
15use crate::byte_slab::BSlab;
16use crate::slab_arc::SlabArc;
17
18// TODO: This doesn't HAVE to be 'static, but it makes my life easier
19// if you want not-that, I guess open an issue and let me know?
20/// An owned, BSlab allocated chunk of bytes.
21///
22/// `SlabBox`s implement the `Deref` and `DerefMut` traits for access to
23/// the underlying allocation
24///
25/// ## Example
26/// ```rust
27/// use byte_slab::BSlab;
28/// use std::thread::spawn;
29///
30/// static SLAB: BSlab<4, 128> = BSlab::new();
31///
32/// fn main() {
33/// // Initialize the byte slab
34/// SLAB.init().unwrap();
35///
36/// let mut box_1 = SLAB.alloc_box().unwrap();
37///
38/// // Fill
39/// assert_eq!(box_1.len(), 128);
40/// box_1.iter_mut().enumerate().for_each(|(i, x)| *x = i as u8);
41///
42/// // We can now send the box to another thread
43/// let hdl = spawn(move || {
44/// box_1.iter().enumerate().for_each(|(i, x)| assert_eq!(i as u8, *x));
45/// });
46///
47/// hdl.join();
48/// }
49/// ```
50pub struct SlabBox<const N: usize, const SZ: usize> {
51 pub(crate) slab: &'static BSlab<N, SZ>,
52 pub(crate) idx: usize,
53}
54
55impl<const N: usize, const SZ: usize> Drop for SlabBox<N, SZ> {
56 fn drop(&mut self) {
57 let arc = unsafe { self.slab.get_idx_unchecked(self.idx).arc };
58
59 // drop refct
60 let zero = arc.compare_exchange(1, 0, Ordering::SeqCst, Ordering::SeqCst);
61 // TODO: Make debug assert?
62 assert!(zero.is_ok());
63
64 // TODO: Why is this necessary?
65 if let Ok(q) = self.slab.get_q() {
66 while let Err(_) = q.enqueue(self.idx) {}
67 }
68
69 // TODO: Zero on drop? As option?
70 }
71}
72
73impl<const N: usize, const SZ: usize> Deref for SlabBox<N, SZ> {
74 type Target = [u8; SZ];
75
76 fn deref(&self) -> &Self::Target {
77 let buf = unsafe { self.slab.get_idx_unchecked(self.idx).buf };
78
79 unsafe { &*buf.get() }
80 }
81}
82
83impl<const N: usize, const SZ: usize> DerefMut for SlabBox<N, SZ> {
84 fn deref_mut(&mut self) -> &mut Self::Target {
85 let buf = unsafe { self.slab.get_idx_unchecked(self.idx).buf };
86
87 unsafe { &mut *buf.get() }
88 }
89}
90
91impl<const N: usize, const SZ: usize> SlabBox<N, SZ> {
92 /// Convert the `SlabBox` into a `SlabArc`.
93 ///
94 /// This loses the ability to mutate the data within the allocation, but the
95 /// may now be shared to multiple locations using reference counts
96 pub fn into_arc(self) -> SlabArc<N, SZ> {
97 let arc = unsafe { self.slab.get_idx_unchecked(self.idx).arc };
98
99 let refct = arc.load(Ordering::SeqCst);
100 assert_eq!(1, refct);
101
102 let new_arc = SlabArc {
103 slab: self.slab,
104 idx: self.idx,
105 };
106
107 // Forget the box to avoid the destructor
108 forget(self);
109
110 new_arc
111 }
112}
113
114// SAFETY:
115//
116// SlabBoxes may be sent safely, as the underlying BSlab is Sync
117unsafe impl<const N: usize, const SZ: usize> Send for SlabBox<N, SZ> {}