defmt_persist/
ring_buffer.rs

1//! A single-producer, single-consumer (SPSC) lock-free queue.
2
3use core::{
4    cell::UnsafeCell,
5    mem::MaybeUninit,
6    ops::Range,
7    ptr, slice,
8    sync::atomic::{AtomicU32, Ordering, compiler_fence, fence},
9};
10
11/// A single-producer, single-consumer (SPSC) lock-free queue storing up to `len-1` bytes.
12/// `len` is defined by the leftover size of the region after the [`RingBuffer`] has taken its
13/// size.
14///
15/// # ECC Flush
16///
17/// On MCUs with 32-bit or 64-bit ECC-protected RAM (e.g., STM32H7/H5), writes are cached
18/// until a full ECC word is written. A reset before the cache is flushed can lose data.
19///
20/// Enable the `ecc` feature to add an `_ecc_flush` field. After each write operation,
21/// a single byte is written to this field, which flushes the ECC write cache by performing
22/// an unaligned access to a different SRAM word.
23///
24/// Note: The struct layout changes with this feature, so the MAGIC value differs to
25/// force reinitialization when switching between configurations.
26///
27/// # CPU Data Cache
28///
29/// On Cortex-M7 and other cores with a data cache, ensure the persist memory region is
30/// configured as non-cacheable via the MPU. Otherwise, data may be lost in the CPU cache
31/// on reset, even with ECC flushing enabled. Cortex-M0/M0+/M3/M4 do not have a data cache.
32#[repr(C)]
33pub struct RingBuffer {
34    /// If the value is [`MAGIC`], the struct is initialized.
35    ///
36    /// In particular, this means that the reader-owned part of the buffer
37    /// contains real data.
38    header: u128,
39    /// Where the next read starts.
40    ///
41    /// The RingBuffer always guarantees `read < len`.
42    read: AtomicU32,
43    /// Where the next write starts.
44    ///
45    /// The RingBuffer always guarantees `write < len`.
46    write: AtomicU32,
47    /// Writing a single byte to this field flushes the ECC write cache.
48    /// An unaligned write to a different SRAM word forces the cache to commit.
49    #[cfg(feature = "ecc")]
50    _ecc_flush: UnsafeCell<u64>,
51}
52
53/// Writes data into the buffer.
54pub struct Producer<'a> {
55    header: &'a RingBuffer,
56    buf: &'a [UnsafeCell<MaybeUninit<u8>>],
57}
58
59/// Reads data previously written to the buffer.
60///
61/// Returned by [`crate::init`]. Use [`Consumer::read`] to get a [`GrantR`] for accessing
62/// the buffered data, then call [`GrantR::release`] to mark bytes as consumed.
63///
64/// With the `async-await` feature, use [`Consumer::wait_for_data`] to asynchronously
65/// wait for new data to be available.
66pub struct Consumer<'a> {
67    header: &'a RingBuffer,
68    buf: &'a [UnsafeCell<MaybeUninit<u8>>],
69}
70
71// SAFETY: Consumer can be safely sent to another thread because:
72// - Only one Consumer exists per queue (single-consumer invariant enforced by split())
73// - Atomic operations on header.read/write synchronize with the Producer
74// - The UnsafeCell slice is only accessed through methods that maintain the SPSC invariant
75unsafe impl Send for Consumer<'_> {}
76
77/// Value used to indicate that the queue is initialized.
78///
79/// Replace this if the layout or field semantics change in a backwards-incompatible way.
80/// The `ecc` layout uses a different magic to force reinitialization when switching.
81#[cfg(not(feature = "ecc"))]
82const MAGIC: u128 = 0xb528_c25f_90c6_16af_cbc1_502c_09c1_fd6e;
83#[cfg(feature = "ecc")]
84const MAGIC: u128 = 0x1dff_2060_27b9_f2b4_a194_1013_69cd_3c6c;
85
86/// Field offsets for corruption testing.
87#[cfg(feature = "qemu-test")]
88pub mod offsets {
89    use super::RingBuffer;
90    use core::mem::{offset_of, size_of};
91    use core::sync::atomic::AtomicU32;
92
93    /// Offset of the header field.
94    pub const HEADER: usize = offset_of!(RingBuffer, header);
95    /// Offset of the read index field.
96    pub const READ: usize = offset_of!(RingBuffer, read);
97    /// Offset of the write index field.
98    pub const WRITE: usize = offset_of!(RingBuffer, write);
99    /// Size of an index field.
100    pub const INDEX_SIZE: usize = size_of::<AtomicU32>();
101}
102
103impl RingBuffer {
104    #[cfg(test)]
105    pub(crate) fn new(read: u32, write: u32) -> Self {
106        RingBuffer {
107            header: MAGIC,
108            read: AtomicU32::new(read),
109            write: AtomicU32::new(write),
110            #[cfg(feature = "ecc")]
111            _ecc_flush: UnsafeCell::new(0),
112        }
113    }
114
115    /// Flush the ECC write cache by writing a single byte to the flush field.
116    ///
117    /// No-op when `ecc` feature is disabled.
118    #[inline]
119    fn flush_ecc(&self) {
120        #[cfg(feature = "ecc")]
121        {
122            // Ensure previous writes are emitted before the volatile write.
123            compiler_fence(Ordering::SeqCst);
124            // SAFETY: Writing a single byte to our own `UnsafeCell` field is safe.
125            // This unaligned access to a different SRAM word flushes the ECC cache.
126            // Concurrent writes from Producer and Consumer are safe because:
127            // - Single-byte writes are atomic on all supported platforms.
128            // - The value written is always 0; we don't care about the result.
129            unsafe {
130                let ptr: *mut u8 = self._ecc_flush.get().cast();
131                ptr.write_volatile(0);
132            }
133        }
134    }
135    /// Creates a `RingBuffer` or recovers previous state if available.
136    ///
137    /// # Safety
138    ///
139    /// - `memory.start` must be aligned to `align_of::<RingBuffer>()`.
140    /// - `memory.len()` must be greater than `size_of::<RingBuffer>()`.
141    /// - Buffer size (`memory.len() - size_of::<RingBuffer>()`) must be less than
142    ///   `i32::MAX / 4` to avoid overflow in pointer arithmetic.
143    /// - This takes logical ownership of the provided `memory` for the
144    ///   `'static` lifetime. Make sure that any previous owner is no longer
145    ///   live, for example by only ever having one application running at a
146    ///   time and only one call to this function in the application's lifetime.
147    ///
148    /// It is, however, not a problem for both a bootloader and its booted
149    /// application to call this function, provided the bootloader program
150    /// ends when it boots into the application and cannot resume execution
151    /// afterwards.
152    ///
153    /// There is always a risk that corrupt memory is accepted as
154    /// valid. While this function checks for direct memory safety problems,
155    /// it cannot vet the data in a non-empty buffer. Treat it as external
156    /// input and do not rely on its value for memory safety.
157    pub(crate) unsafe fn recover_or_reinitialize(
158        memory: Range<usize>,
159    ) -> (Producer<'static>, Consumer<'static>) {
160        let v: *mut Self = ptr::with_exposed_provenance_mut(memory.start);
161        let buf_len = memory.len() - size_of::<RingBuffer>();
162
163        // SAFETY:
164        // - Alignment is guaranteed by the caller.
165        // - Size is guaranteed by the caller.
166        // - All fields (`u128`, `AtomicU32`, `UnsafeCell<u64>`, `[UnsafeCell<MaybeUninit<u8>>, X]`)
167        //   are valid for any bit pattern, so interpreting the raw memory as this
168        //   type and buffer is sound. As the memory is initialized outside the Rust abstract
169        //   machine (of the running program), we consider the caveats of non-fixed
170        //   bit patterns from `MaybeUninit` mitigated.
171        // - The caller guarantees this function is called at most once during
172        //   program execution for any given `memory`, ensuring no aliasing
173        //   references exist for the `'static` lifetime.
174        let v = unsafe { &mut *v };
175        let header = ptr::from_mut(&mut v.header);
176        // SAFETY: A regular read from v.header would be safe here, but it would maybe be
177        // optimizsed away.
178        if unsafe { header.read_volatile() } != MAGIC {
179            v.read.store(0, Ordering::Relaxed);
180            // The intermediate state doesn't matter until header == MAGIC
181            v.write.store(0, Ordering::Relaxed);
182            v.flush_ecc();
183
184            fence(Ordering::SeqCst);
185            // SAFETY: A regular assignment to v.header would be safe
186            // here, but is not guaranteed to actually update memory. This
187            // must mean the pointer is valid for writes and properly
188            // aligned.
189            unsafe { header.write_volatile(MAGIC) };
190        } else {
191            // The header promised to keep the contract, but we don't
192            // trust it for the safety of our pointer offsets.
193            let write = v.write.load(Ordering::Relaxed) as usize;
194            let read = v.read.load(Ordering::Relaxed) as usize;
195            let read_ok = read < buf_len;
196            let write_ok = write < buf_len;
197            // Since `header` is already marked as valid, some extra care
198            // is taken here to avoid situations where there is a gap of time
199            // where both indexes are in-bounds, but not valid. Otherwise
200            // a poorly timed reset could leave the queue in a state that
201            // appears valid and non-empty.
202            match (read_ok, write_ok) {
203                (true, true) => {}
204                (true, false) => v.write.store(read as u32, Ordering::Relaxed),
205                (false, true) => v.read.store(write as u32, Ordering::Relaxed),
206                (false, false) => {
207                    v.read.store(0, Ordering::Relaxed);
208                    // write is still invalid between these operations
209                    v.write.store(0, Ordering::Relaxed);
210                }
211            };
212            v.flush_ecc();
213        }
214        fence(Ordering::SeqCst);
215
216        // SAFETY:
217        // - The caller guarantees at least 1 byte of space left in the allocated area.
218        // - There are no alignment requirements on the values in the buffer.
219        // - There is no mutable aliasing, all slices made from this buffer are immutable - writes
220        //   are made via the `UnsafeCell`s interior mutability.
221        let buf: &[UnsafeCell<MaybeUninit<u8>>] = unsafe {
222            slice::from_raw_parts(
223                ptr::with_exposed_provenance(memory.start + size_of::<RingBuffer>()),
224                buf_len,
225            )
226        };
227
228        // SAFETY: The caller guarantees buf.len() < i32::MAX / 4.
229        unsafe { v.split(buf) }
230    }
231
232    /// Splits the queue into producer and consumer given a memory area.
233    ///
234    /// # Safety
235    ///
236    /// `buf.len()` must be less than `i32::MAX / 4` to avoid overflow in pointer arithmetic.
237    #[inline]
238    pub const unsafe fn split<'a>(
239        &'a mut self,
240        buf: &'a [UnsafeCell<MaybeUninit<u8>>],
241    ) -> (Producer<'a>, Consumer<'a>) {
242        (
243            Producer { header: self, buf },
244            Consumer { header: self, buf },
245        )
246    }
247}
248
249impl Producer<'_> {
250    /// How much space is left in the buffer?
251    #[inline]
252    fn available(&self, read: usize, write: usize) -> usize {
253        if read > write {
254            read - write - 1
255        } else {
256            self.buf.len() - write - 1 + read
257        }
258    }
259
260    /// Appends `data` to the buffer.
261    ///
262    /// If there is not enough space, the last bytes are silently discarded.
263    #[inline]
264    pub fn write(&mut self, data: &[u8]) {
265        // Relaxed: stale `read` is safe (underestimates available space).
266        let read = self.header.read.load(Ordering::Relaxed) as usize;
267        // Relaxed: producer owns `write`, no cross-thread synchronization needed.
268        let write = self.header.write.load(Ordering::Relaxed) as usize;
269        let buf: *mut u8 = self.buf.as_ptr().cast_mut().cast();
270        let len = data.len().min(self.available(read, write));
271        if len == 0 {
272            return;
273        }
274
275        // There are `ptr::copy_nonoverlapping` and `pointer::add` calls below.
276        // The common safety arguments are:
277        //
278        // For `copy_nonoverlapping`:
279        // - src valid: sub-slice of `data`, which is valid for reads.
280        // - dst valid: sub-slice of the producer-owned part of `buf`, which is valid for writes.
281        // - aligned: u8 slices have alignment 1.
282        // - nonoverlapping: The caller-provided `data` cannot overlap with the part of `buf` owned
283        //   by the producer, because only the consumer gives slices to external code.
284        //
285        // For `pointer::add`:
286        // - offset in bytes fits in `isize`: the only constructor `RingBuffer::split`
287        //   passes this requirement on to its caller.
288        // - entire memory range inside the same allocation: we stay within `buf`, which is a
289        //   single allocation.
290        //
291        // What remains to show for each use is that src and dst ranges are valid sub-slices of
292        // `data` and the producer-owned part of `buf`, respectively.
293
294        if write + len > self.buf.len() {
295            // Wrapping case: the write crosses the end of the buffer.
296            // This can only happen when write >= read (if write < read, then
297            // available = read - write - 1, and
298            // write + len <= write + read - write - 1 = read - 1 < buf.len(), = contradiction).
299            let pivot = self.buf.len() - write;
300            // SAFETY:
301            // - First copy: data[0..pivot] -> buf[write..buf.len()]
302            //   - src: pivot < len <= data.len() (since write + len > buf.len()
303            //     implies len > buf.len() - write = pivot).
304            //   - dst: write < buf.len() by field invariant, and
305            //     write + pivot = buf.len(), so dst is buf[write..buf.len()].
306            unsafe { ptr::copy_nonoverlapping(data.as_ptr(), buf.add(write), pivot) };
307            // SAFETY:
308            // - Second copy: data[pivot..len] -> buf[0..len-pivot]
309            //   - src: pivot..len is in bounds since pivot < len <= data.len().
310            //   - dst: len - pivot <= available - pivot. With write >= read,
311            //     available = buf.len() - write - 1 + read, so
312            //     len - pivot <= buf.len() - write - 1 + read - (buf.len() - write)
313            //     = read - 1 < read. Thus buf[0..len-pivot] does not overlap
314            //     with consumer-owned memory starting at read.
315            unsafe { ptr::copy_nonoverlapping(data.as_ptr().add(pivot), buf, len - pivot) };
316        } else {
317            // Non-wrapping case: the entire write fits before the end.
318            // SAFETY:
319            // - src: data[0..len] is valid since len <= data.len().
320            // - dst: buf[write..write+len]. write < buf.len() by field
321            //   invariant, and write + len <= buf.len() by the else branch
322            //   condition. len <= available ensures we don't write into
323            //   consumer-owned memory.
324            unsafe { ptr::copy_nonoverlapping(data.as_ptr(), buf.add(write), len) };
325        }
326
327        // Flush data before updating index. With 32-bit ECC, the index store may flush
328        // immediately while data is still cached. This ensures the index never points
329        // to uncommitted data.
330        self.header.flush_ecc();
331
332        self.header.write.store(
333            (write.wrapping_add(len) % self.buf.len()) as u32,
334            Ordering::Release,
335        );
336        self.header.flush_ecc();
337    }
338}
339
340impl Consumer<'_> {
341    /// Returns `true` if there is no data available to read.
342    #[inline]
343    pub fn is_empty(&self) -> bool {
344        // Acquire: synchronizes with producer's Release store to see written data.
345        let write = self.header.write.load(Ordering::Acquire) as usize;
346        // Relaxed: consumer owns `read`, no cross-thread synchronization needed.
347        let read = self.header.read.load(Ordering::Relaxed) as usize;
348
349        write == read
350    }
351
352    /// Read data from the buffer.
353    ///
354    /// If the data available to read crosses the end of the ring, this
355    /// function may provide a smaller slice. Only after releasing the data
356    /// up to the end of the ring will the next call provide more data.
357    #[inline]
358    #[must_use]
359    pub fn read(&mut self) -> GrantR<'_, '_> {
360        // Acquire: synchronizes with producer's Release store, ensuring we see the written data.
361        let write = self.header.write.load(Ordering::Acquire) as usize;
362        // Relaxed: consumer owns `read`, no cross-thread synchronization needed.
363        let read = self.header.read.load(Ordering::Relaxed) as usize;
364        let buf: *mut u8 = self.buf.as_ptr().cast_mut().cast();
365
366        let (len1, len2) = if write < read {
367            (self.buf.len() - read, write)
368        } else {
369            (write - read, 0)
370        };
371
372        // SAFETY:
373        // For `slice::from_raw_parts`:
374        // - Non-null, valid, aligned: it is a sub-slice of `buf`,
375        //   relying on the invariants on `read` and `write`.
376        // - Properly initialized values: The memory owned by the consumer
377        //   has been initialized by the producer. When recovering the data
378        //   from a previous run, we instead rely on the ability of u8 to
379        //   accept any (fixed) bit pattern. Since the recovery procedure
380        //   produces the value from memory outside the Rust abstract machine,
381        //   the hazards of uninitialized memory should be mitigated.
382        // - Not mutated for the lifetime: only the producer modifies
383        //   `buf`, but the consumer owns this memory until the read pointer
384        //   is updated. The read pointer is only updated in the function
385        //   that drops the slice.
386        // - Total size in bytes < i32::MAX: we stay inside `buf`
387        //   and the only constructor `RingBuffer::split` requires of its caller
388        //   that no in-bounds buffer is too big.
389        //
390        // For `pointer::add`:
391        // - offset in bytes fits in `isize`: buf.len() fits, which is checked
392        //   before constructing a Consumer. write - read fits if write >= read,
393        //   which holds in the cases we use it.
394        // - entire memory range inside the same allocation: read < len, so the
395        //   offset remains in the buffer's allocation.
396        let slice1 = unsafe { slice::from_raw_parts(buf.add(read), len1) };
397        // SAFETY:
398        // For `slice::from_raw_parts`:
399        // - Non-null, valid, aligned: it is a sub-slice of `buf`,
400        //   relying on the invariants on `read` and `write`.
401        // - Properly initialized values: The memory owned by the consumer
402        //   has been initialized by the producer. When recovering the data
403        //   from a previous run, we instead rely on the ability of u8 to
404        //   accept any (fixed) bit pattern. Since the recovery procedure
405        //   produces the value from memory outside the Rust abstract machine,
406        //   the hazards of uninitialized memory should be mitigated.
407        // - Not mutated for the lifetime: only the producer modifies
408        //   `buf`, but the consumer owns this memory until the read pointer
409        //   is updated. The read pointer is only updated in the function
410        //   that drops the slice.
411        // - Total size in bytes < i32::MAX: we stay inside `buf`
412        //   and the only constructor `RingBuffer::split` requires of its caller
413        //   that no in-bounds buffer is too big.
414        //
415        // For `pointer::add`:
416        // - offset in bytes fits in `isize`: buf.len() fits, which is checked
417        //   before constructing a Consumer. write - read fits if write >= read,
418        //   which holds in the cases we use it.
419        // - entire memory range inside the same allocation: read < len, so the
420        //   offset remains in the buffer's allocation.
421        let slice2 = unsafe { slice::from_raw_parts(buf, len2) };
422        GrantR {
423            consumer: self,
424            slice1,
425            slice2,
426            original_read: read,
427        }
428    }
429
430    #[cfg(feature = "async-await")]
431    /// Waits until there is data in the [`Consumer`].
432    pub async fn wait_for_data(&mut self) {
433        core::future::poll_fn(|cx| {
434            super::logger::WAKER.register(cx.waker());
435
436            if self.is_empty() {
437                core::task::Poll::Pending
438            } else {
439                core::task::Poll::Ready(())
440            }
441        })
442        .await
443    }
444}
445
446/// A read grant providing access to buffered data.
447///
448/// Obtained from [`Consumer::read`]. The grant provides a slice of available data
449/// via [`GrantR::buf`]. When done reading, call [`GrantR::release`] to mark bytes
450/// as consumed and free space for new writes.
451///
452/// If the grant is dropped without calling `release`, no data is consumed.
453pub struct GrantR<'a, 'c> {
454    consumer: &'a Consumer<'c>,
455    slice1: &'a [u8],
456    slice2: &'a [u8],
457    original_read: usize,
458}
459
460// SAFETY: GrantR can be safely sent to another thread because:
461// - Only one GrantR can exist at a time (Consumer::read takes &mut self)
462// - The slice is a regular &[u8] pointing to consumer-owned memory that the producer
463//   won't modify until release() updates the read pointer
464// - release() only performs atomic stores to header.read (and `_ecc_flush` for ECC)
465// - The underlying UnsafeCell in Consumer::buf is not directly accessed through GrantR;
466//   the slice was materialized in Consumer::read before GrantR was created
467unsafe impl Send for GrantR<'_, '_> {}
468
469impl<'a, 'c> GrantR<'a, 'c> {
470    /// Finish the read, marking `used` elements as used
471    ///
472    /// This frees up the `used` space for future writes.
473    #[inline]
474    pub fn release(self, used: usize) {
475        let used = used.min(self.slice1.len() + self.slice2.len());
476        // Non-atomic read-modify-write is ok here because there can
477        // never be more than one active GrantR at a time.
478        let read = self.original_read;
479        let new_read = if read + used < self.consumer.buf.len() {
480            read + used
481        } else {
482            used - self.slice1.len()
483        };
484        self.consumer
485            .header
486            .read
487            .store(new_read as u32, Ordering::Release);
488        self.consumer.header.flush_ecc();
489    }
490
491    /// Finish the read, marking all bytes as used.
492    ///
493    /// This is equivalent to `grant.release(grant.buf().len())`.
494    #[inline]
495    pub fn release_all(self) {
496        self.release(usize::MAX);
497    }
498
499    /// Returns the bytes that this grant is allowed to read.
500    #[inline]
501    pub fn bufs(&self) -> (&[u8], &[u8]) {
502        (self.slice1, self.slice2)
503    }
504}
505
506#[cfg(test)]
507mod test {
508
509    use super::*;
510
511    #[test]
512    fn touching_no_boundaries() {
513        let mut b = RingBuffer::new(1, 1);
514        let buf = &[const { UnsafeCell::new(MaybeUninit::uninit()) }; 4];
515        // SAFETY: Test buffer is 4 bytes, well under i32::MAX / 4.
516        let (mut p, mut c) = unsafe { b.split(buf) };
517        p.write(&[1, 2]);
518
519        let r = c.read();
520        assert_eq!(r.bufs(), (&[1, 2][..], &[][..]));
521        r.release(2);
522        let r = c.read();
523        assert_eq!(r.bufs(), (&[][..], &[][..]));
524    }
525
526    #[test]
527    fn fill_simple() {
528        let mut b = RingBuffer::new(0, 0);
529        let buf = &[const { UnsafeCell::new(MaybeUninit::uninit()) }; 4];
530        // SAFETY: Test buffer is 4 bytes, well under i32::MAX / 4.
531        let (mut p, mut c) = unsafe { b.split(buf) };
532        p.write(&[1, 2, 3]);
533
534        let r = c.read();
535        assert_eq!(r.bufs(), (&[1, 2, 3][..], &[][..]));
536        r.release(3);
537        let r = c.read();
538        assert_eq!(r.bufs(), (&[][..], &[][..]));
539    }
540
541    #[test]
542    fn fill_crossing_end() {
543        let mut b = RingBuffer::new(2, 2);
544        let buf = &[const { UnsafeCell::new(MaybeUninit::uninit()) }; 4];
545        // SAFETY: Test buffer is 4 bytes, well under i32::MAX / 4.
546        let (mut p, mut c) = unsafe { b.split(buf) };
547        p.write(&[1, 2, 3]);
548
549        let r = c.read();
550        assert_eq!(r.bufs(), (&[1, 2][..], &[3][..]));
551        r.release(2);
552        let r = c.read();
553        assert_eq!(r.bufs(), (&[3][..], &[][..]));
554        r.release(1);
555        let r = c.read();
556        assert_eq!(r.bufs(), (&[][..], &[][..]));
557    }
558
559    #[test]
560    fn release_crossing_end() {
561        let mut b = RingBuffer::new(2, 2);
562        let buf = &[const { UnsafeCell::new(MaybeUninit::uninit()) }; 4];
563        // SAFETY: Test buffer is 4 bytes, well under i32::MAX / 4.
564        let (mut p, mut c) = unsafe { b.split(buf) };
565        p.write(&[1, 2, 3]);
566
567        let r = c.read();
568        assert_eq!(r.bufs(), (&[1, 2][..], &[3][..]));
569        r.release(3);
570        let r = c.read();
571        assert_eq!(r.bufs(), (&[][..], &[][..]));
572    }
573
574    #[test]
575    fn underfill_crossing_end() {
576        let mut b = RingBuffer::new(3, 3);
577        let buf = &[const { UnsafeCell::new(MaybeUninit::uninit()) }; 4];
578        // SAFETY: Test buffer is 4 bytes, well under i32::MAX / 4.
579        let (mut p, mut c) = unsafe { b.split(buf) };
580        p.write(&[1, 2]);
581
582        let r = c.read();
583        assert_eq!(r.bufs(), (&[1][..], &[2][..]));
584        r.release(1);
585        let r = c.read();
586        assert_eq!(r.bufs(), (&[2][..], &[][..]));
587        r.release(1);
588        let r = c.read();
589        assert_eq!(r.bufs(), (&[][..], &[][..]));
590    }
591
592    #[test]
593    fn overfill() {
594        let mut b = RingBuffer::new(0, 0);
595        let buf = &[const { UnsafeCell::new(MaybeUninit::uninit()) }; 4];
596        // SAFETY: Test buffer is 4 bytes, well under i32::MAX / 4.
597        let (mut p, mut c) = unsafe { b.split(buf) };
598        p.write(&[1, 2, 3, 4, 5, 6, 7]);
599
600        let r = c.read();
601        assert_eq!(r.bufs(), (&[1, 2, 3][..], &[][..]));
602        r.release(3);
603        let r = c.read();
604        assert_eq!(r.bufs(), (&[][..], &[][..]));
605    }
606
607    #[test]
608    fn stop_at_end() {
609        let mut b = RingBuffer::new(2, 2);
610        let buf = &[const { UnsafeCell::new(MaybeUninit::uninit()) }; 4];
611        // SAFETY: Test buffer is 4 bytes, well under i32::MAX / 4.
612        let (mut p, mut c) = unsafe { b.split(buf) };
613        p.write(&[1, 2]);
614
615        let r = c.read();
616        assert_eq!(r.bufs(), (&[1, 2][..], &[][..]));
617        r.release(2);
618        let r = c.read();
619        assert_eq!(r.bufs(), (&[][..], &[][..]));
620    }
621
622    #[test]
623    fn stop_before_end() {
624        let mut b = RingBuffer::new(2, 2);
625        let buf = &[const { UnsafeCell::new(MaybeUninit::uninit()) }; 4];
626        // SAFETY: Test buffer is 4 bytes, well under i32::MAX / 4.
627        let (mut p, mut c) = unsafe { b.split(buf) };
628        p.write(&[1]);
629
630        let r = c.read();
631        assert_eq!(r.bufs(), (&[1][..], &[][..]));
632        r.release(1);
633        let r = c.read();
634        assert_eq!(r.bufs(), (&[][..], &[][..]));
635    }
636
637    #[test]
638    fn zero_release() {
639        let mut b = RingBuffer::new(2, 2);
640        let buf = &[const { UnsafeCell::new(MaybeUninit::uninit()) }; 4];
641        // SAFETY: Test buffer is 4 bytes, well under i32::MAX / 4.
642        let (mut p, mut c) = unsafe { b.split(buf) };
643        p.write(&[1, 2]);
644
645        let r = c.read();
646        assert_eq!(r.bufs(), (&[1, 2][..], &[][..]));
647        r.release(0);
648        let r = c.read();
649        assert_eq!(r.bufs(), (&[1, 2][..], &[][..]));
650    }
651
652    #[test]
653    fn partial_release() {
654        let mut b = RingBuffer::new(2, 2);
655        let buf = &[const { UnsafeCell::new(MaybeUninit::uninit()) }; 4];
656        // SAFETY: Test buffer is 4 bytes, well under i32::MAX / 4.
657        let (mut p, mut c) = unsafe { b.split(buf) };
658        p.write(&[1, 2]);
659
660        let r = c.read();
661        assert_eq!(r.bufs(), (&[1, 2][..], &[][..]));
662        r.release(1);
663        let r = c.read();
664        assert_eq!(r.bufs(), (&[2][..], &[][..]));
665    }
666}