spsc_ringbuf_core/
ringbuf_ref.rs

1//! Fixed capacity Single Producer Single Consumer Ringbuffer with no mutex protection.
2//! Implementation based on https://www.snellman.net/blog/archive/2016-12-13-ring-buffers/
3
4use core::mem::MaybeUninit;
5use core::{cell::Cell, cell::UnsafeCell};
6
7/// Internal Index struct emcapsulating masking and wrapping operations
8/// according to size const size N. Note that we deliberately use u32
9/// to limit the index to 4 bytes and max supported capacity to 2^31-1
10#[derive(Eq, PartialEq)]
11pub struct Index<const RANGE: usize> {
12    cell: Cell<u32>,
13}
14
15#[derive(Debug)]
16pub enum ErrCode {
17    BufFull,
18    BufEmpty,
19}
20
21impl<const N: usize> Index<N> {
22
23    const OK: () = assert!(N < (u32::MAX/2) as usize, "Ringbuf capacity must be < u32::MAX/2");
24
25    #[inline(always)]
26    pub fn wrap_inc(&self) {
27
28        let n = N as u32;
29        // Wrapping increment by 1 first
30        let val = self.cell.get().wrapping_add(1);
31
32        // Wrap index between [0, 2*N-1]
33        // For power 2 of values, the natural overflow wrap
34        // matches the wraparound of N. Hence the manual wrap
35        // below is not required for power of 2 N
36        if !n.is_power_of_two() && val > 2 * n - 1 {
37            // val = val - 2*N
38            self.cell.set(val.wrapping_sub(2 * n));
39        } else {
40            self.cell.set(val);
41        }
42    }
43    
44    #[inline(always)]
45    pub fn wrap_dist(&self, val: &Index<N>) -> u32 {
46        
47        let n = N as u32;
48        // If N is power of two, just return wrapp_sub(val)
49        // If N is not power of two, wrap value between [0, 2*N-1]
50        // Assumes current value is in the range of [-2*N, 4*N-1]
51        // Not asserting here since we only take Index, which cannot be
52        // incremented beyong 2*N-1
53        let raw = self.cell.get().wrapping_sub(val.get());
54        if !n.is_power_of_two() {
55            if (raw as i32) < 0 {
56                return raw.wrapping_add(2 * n);
57            } else if raw > 2 * n - 1 {
58                return raw.wrapping_sub(2 * n);
59            }
60        }
61        raw
62    }
63
64    // Mask the value for indexing [0, N-1]
65    #[inline(always)]
66    pub fn mask(&self) -> u32 {
67        let n = N as u32;
68        let val = self.cell.get();
69        if n.is_power_of_two() {
70            val & (n - 1)
71        } else if val > n - 1 {
72            val - n
73        } else {
74            val
75        }
76    }
77
78    #[inline(always)]
79    pub fn get(&self) -> u32 {
80        self.cell.get()
81    }
82    
83    #[allow(clippy::let_unit_value)]
84    #[inline(always)]
85    pub const fn new(val: u32) -> Self {
86        let _: () = Index::<N>::OK;
87        Index {
88            cell: Cell::new(val),
89        }
90    }
91}
92
93/// A ring buffer of capacity N holding items of type T.
94/// Non power-of-two N is supported but less efficient.
95pub struct RingBufRef<T, const N: usize> {
96    // this is from where we dequeue items
97    rd_idx: Index<N>,
98    //  where we enqueue new items
99    wr_idx: Index<N>,
100    // this is the backend array
101    buffer_ucell: [UnsafeCell<MaybeUninit<T>>; N],
102}
103// Delcare this is thread safe due to the owner protection
104// sequence (Producer-> consumer , consumer -> owner)
105unsafe impl<T, const N: usize> Sync for RingBufRef<T, N> {}
106
107impl<T, const N: usize> RingBufRef<T, N> {
108    // Need to prevent N = 0 instances since the code would compile but crash
109    // on the 2*N-1 usize subtracts
110    // https://users.rust-lang.org/t/how-do-i-static-assert-a-property-of-a-generic-u32-parameter/76307/2
111    const OK: () = assert!(N > 0, "Ringbuf capacity must be larger than 0!");
112
113    const INIT_U: UnsafeCell<MaybeUninit<T>> = UnsafeCell::new(MaybeUninit::uninit());
114    pub const INIT_0: RingBufRef<T, N> = Self::new();
115
116    #[allow(clippy::let_unit_value)]
117    #[inline]
118    pub const fn new() -> Self {
119        // This dummy statement evaluates the assert to prevent 0 sized RingBufRef
120        // from being compiled.
121        let _: () = RingBufRef::<T, N>::OK;
122        RingBufRef {
123            rd_idx: Index::new(0),
124            wr_idx: Index::new(0),
125            buffer_ucell: [Self::INIT_U; N],
126        }
127    }
128
129    #[inline(always)]
130    pub fn is_empty(&self) -> bool {
131        self.rd_idx == self.wr_idx
132    }
133
134    #[inline(always)]
135    pub fn len(&self) -> u32 {
136        // returns the number of elements between read and write pointer
137        // use wrapping sub
138        self.wr_idx.wrap_dist(&self.rd_idx)
139    }
140    #[inline(always)]
141    pub fn is_full(&self) -> bool {
142        self.len() as usize == N
143    }
144
145    #[inline(always)]
146    pub fn capacity(&self) -> usize {
147        N
148    }
149
150    /// Returns the write index location as mutable reference.
151    /// The Result<> return enforces handling of return type
152    /// I.e. if user does not check for push success, the compiler
153    /// generates warnings
154    /// Calling stage twice without commit in between results in the same
155    /// location written! We could add some protection by remembering this
156    /// during alloc but this will incur runtime cost
157    #[inline(always)]
158    pub fn writer_front(&self) -> Option<&mut T> {
159        if !self.is_full() {
160            // buffer_ucell contains UnsafeCell<MaybeUninit<T>>
161            // UnsafeCell's get is defined as "fn get(&self) -> *mut T"
162            let m: *mut MaybeUninit<T> = self.buffer_ucell[self.wr_idx.mask() as usize].get();
163            let t: &mut T = unsafe { &mut *(m as *mut T) };
164            Some(t)
165        } else {
166            None
167        }
168    }
169    /// Commit whatever at the write index location by moving the write index
170    #[inline(always)]
171    pub fn commit(&self) -> Result<(), ErrCode> {
172        if !self.is_full() {
173            self.wr_idx.wrap_inc();
174            Ok(())
175        } else {
176            Err(ErrCode::BufFull)
177        }
178    }
179
180    /// Alloc and commit in one step by providing the value T to be written
181    /// val's ownership is moved. (Question: it seems if T implements Clone,
182    /// compiler copies T)
183    #[inline(always)]
184    pub fn push(&self, val: T) -> Result<(), ErrCode> {
185        if !self.is_full() {
186            // buffer_ucell contains UnsafeCell<MaybeUninit<T>>
187            // UnsafeCell's get is defined as "fn get(&self) -> *mut T"
188            // * (* mut T) deference allows the MaybeUninit.write() to be called to
189            // Set the value
190            unsafe {
191                (*self.buffer_ucell[self.wr_idx.mask() as usize].get()).write(val);
192            }
193            self.wr_idx.wrap_inc();
194            Ok(())
195        } else {
196            Err(ErrCode::BufFull)
197        }
198    }
199    /// Returns an Option of reference to location at read index
200    #[inline(always)]
201    pub fn reader_front(&self) -> Option<&T> {
202        if self.is_empty() {
203            None
204        } else {
205            let x: *mut MaybeUninit<T> = self.buffer_ucell[self.rd_idx.mask() as usize].get();
206            let t: &T = unsafe { &*(x as *const T) };
207            Some(t)
208        }
209    }
210    /// Returns an Option of mutable reference to location at read index
211    #[inline(always)]
212    pub fn reader_front_mut(&self) -> Option<&mut T> {
213        if self.is_empty() {
214            None
215        } else {
216            let x: *mut MaybeUninit<T> = self.buffer_ucell[self.rd_idx.mask() as usize].get();
217            let t: &mut T = unsafe { &mut *(x as *mut T) };
218            Some(t)
219        }
220    }
221
222    /// Consume the item at rd_idx
223    #[inline(always)]
224    pub fn pop(&self) -> Result<(), ErrCode> {
225        if !self.is_empty() {
226            self.rd_idx.wrap_inc();
227            Ok(())
228        } else {
229            Err(ErrCode::BufEmpty)
230        }
231    }
232}
233
234#[cfg(test)]
235mod tests {
236    use super::*;
237
238    impl<T, const N: usize> RingBufRef<T, N> {
239        
240        // Test only method for testing wraparound
241        // at extremes
242        pub fn test_init_wr_rd(&self, val: u32) {
243            self.wr_idx.cell.set(val);
244            self.rd_idx.cell.set(val);
245        }
246    }
247 
248    // Test for static allocation
249    // A 4-deep ring buffer
250    const CMD_Q_DEPTH: usize = 4;
251    pub struct SomeStruct {
252        id: u32,
253    }
254    pub struct Interface {
255        cmd_q: RingBufRef<SomeStruct, CMD_Q_DEPTH>,
256    }
257
258    // Set up 2 entries of interfaces
259    const NUM_INTFS: usize = 2;
260    // Init value, suppress the clippy warning for declaring const interior mutable "A “non-constant”
261    // const item is a legacy way to supply an initialized value to downstream"
262    #[allow(clippy::declare_interior_mutable_const)]
263    const INTF_INIT: Interface = Interface {
264        cmd_q: RingBufRef::INIT_0,
265    };
266
267    // Final instantiation as global
268    static SHARED_INTF: [Interface; NUM_INTFS] = [INTF_INIT; NUM_INTFS];
269
270    fn test_operations<const N: usize>(rbufr1: RingBufRef<u32, N>, iter: usize) {
271
272        for i in 0..iter {
273            let loc = rbufr1.writer_front();
274
275            if let Some(v) = loc {
276                *v = i as u32;
277            }
278            assert!(rbufr1.commit().is_ok());
279            if let Some(v) = rbufr1.reader_front() {
280                assert!(*v == i as u32);
281                assert!(rbufr1.pop().is_ok());
282            }
283        }
284
285        assert!(rbufr1.reader_front().is_none());
286
287        for _ in 0..N {
288            assert!(rbufr1.writer_front().is_some());
289            assert!(rbufr1.commit().is_ok());
290        }
291        // should fail
292        assert!(rbufr1.writer_front().is_none());
293        assert!(rbufr1.commit().is_err());
294
295        //println!("wr {} rd {}, len {}",
296        //    rbufr1.wr_idx.cell.get(),
297        //    rbufr1.rd_idx.cell.get(),
298        //    rbufr1.len());
299
300        // pop half
301        for _ in 0..N / 2 {
302            assert!(rbufr1.pop().is_ok());
303        }
304        //println!("wr {} rd {}, len {}",
305        //    rbufr1.wr_idx.cell.get(),
306        //    rbufr1.rd_idx.cell.get(),
307        //    rbufr1.len());
308
309        // alloc half
310        for _ in 0..N / 2 {
311            assert!(rbufr1.writer_front().is_some());
312            assert!(rbufr1.commit().is_ok());
313        }
314    }
315
316    #[test]
317    fn validate_size() {
318        // 4 bytes of wr_idx, 4 bytes of rd_idx, 16*4 for buffer
319        assert!(core::mem::size_of::<RingBufRef<u32, 16>>() == (4 + 4 + 16*4));
320
321        // 4 bytes of wr_idx, 4 bytes of rd_idx, 16*2 for buffer
322        assert!(core::mem::size_of::<RingBufRef<u16, 16>>() == (4 + 4 + 16*2));
323
324        // 4 bytes of wr_idx, 4 bytes of rd_idx, 32*1 for buffer
325        assert!(core::mem::size_of::<RingBufRef<u8, 32>>() == (4 + 4 + 32));
326    }
327
328    #[test]
329    fn power_of_two_len() {
330        let rbufr1: RingBufRef<u32, 16> = RingBufRef::new();
331        test_operations::<16>(rbufr1, 2 * 16 - 1 + 16 / 2);
332    }
333    #[test]
334    fn ping_pong() {
335        let rbufr1: RingBufRef<u32, 2> = RingBufRef::new();
336        test_operations::<2>(rbufr1, 2 * 2 - 1 + 2 / 2);
337    }
338    #[test]
339    fn single() {
340        let rbufr1: RingBufRef<u32, 1> = RingBufRef::new();
341        test_operations::<1>(rbufr1, 7);
342    }
343    #[test]
344    fn non_power_of_two_len() {
345        let rbufr1: RingBufRef<u32, 15> = RingBufRef::new();
346        test_operations::<15>(rbufr1, 2 * 15 - 1 + 15 / 2);
347    }
348    #[test]
349    fn power_of_two_len_wrap() {
350        // Test wr and rd near wraparound of u32
351        let rbufr1: RingBufRef<u32, {(u16::MAX) as usize + 1}> = RingBufRef::new();
352        // Caution - direct initialization must guarantee the value is valid
353        // i.e. any value if N is power of 2; 
354        // otherwise must be [0, 2*N-1]
355        rbufr1.test_init_wr_rd(u32::MAX-2);
356        test_operations::<{(u16::MAX) as usize + 1}>(rbufr1, 32768);
357    }
358    #[test]
359    fn static_instance_example() {
360        let intf: &'static Interface = &SHARED_INTF[0];
361
362        let alloc_res = intf.cmd_q.writer_front();
363
364        if let Some(cmd) = alloc_res {
365            cmd.id = 42;
366
367            assert!(intf.cmd_q.commit().is_ok());
368        }
369
370        let cmd = intf.cmd_q.reader_front();
371
372        assert!(cmd.is_some());
373        assert!(cmd.unwrap().id == 42);
374    }
375    //#[test]
376    //fn zero_len() {
377    //    test_operations::<0>();
378    //}
379}