bbqueue/
bbbuffer.rs

1use crate::{
2    framed::{FrameConsumer, FrameProducer},
3    Error, Result,
4};
5use core::{
6    cell::UnsafeCell,
7    cmp::min,
8    marker::PhantomData,
9    mem::{forget, transmute, MaybeUninit},
10    ops::{Deref, DerefMut},
11    ptr::NonNull,
12    result::Result as CoreResult,
13    slice::from_raw_parts_mut,
14    sync::atomic::{
15        AtomicBool, AtomicUsize,
16        Ordering::{AcqRel, Acquire, Release},
17    },
18};
19#[derive(Debug)]
20/// A backing structure for a BBQueue. Can be used to create either
21/// a BBQueue or a split Producer/Consumer pair
22pub struct BBBuffer<const N: usize> {
23    buf: UnsafeCell<MaybeUninit<[u8; N]>>,
24
25    /// Where the next byte will be written
26    write: AtomicUsize,
27
28    /// Where the next byte will be read from
29    read: AtomicUsize,
30
31    /// Used in the inverted case to mark the end of the
32    /// readable streak. Otherwise will == sizeof::<self.buf>().
33    /// Writer is responsible for placing this at the correct
34    /// place when entering an inverted condition, and Reader
35    /// is responsible for moving it back to sizeof::<self.buf>()
36    /// when exiting the inverted condition
37    last: AtomicUsize,
38
39    /// Used by the Writer to remember what bytes are currently
40    /// allowed to be written to, but are not yet ready to be
41    /// read from
42    reserve: AtomicUsize,
43
44    /// Is there an active read grant?
45    read_in_progress: AtomicBool,
46
47    /// Is there an active write grant?
48    write_in_progress: AtomicBool,
49
50    /// Have we already split?
51    already_split: AtomicBool,
52}
53
54unsafe impl<const A: usize> Sync for BBBuffer<A> {}
55
56impl<'a, const N: usize> BBBuffer<N> {
57    /// Attempt to split the `BBBuffer` into `Consumer` and `Producer` halves to gain access to the
58    /// buffer. If buffer has already been split, an error will be returned.
59    ///
60    /// NOTE: When splitting, the underlying buffer will be explicitly initialized
61    /// to zero. This may take a measurable amount of time, depending on the size
62    /// of the buffer. This is necessary to prevent undefined behavior. If the buffer
63    /// is placed at `static` scope within the `.bss` region, the explicit initialization
64    /// will be elided (as it is already performed as part of memory initialization)
65    ///
66    /// NOTE:  If the `thumbv6` feature is selected, this function takes a short critical section
67    /// while splitting.
68    ///
69    /// ```rust
70    /// # // bbqueue test shim!
71    /// # fn bbqtest() {
72    /// use bbqueue::BBBuffer;
73    ///
74    /// // Create and split a new buffer
75    /// let buffer: BBBuffer<6> = BBBuffer::new();
76    /// let (prod, cons) = buffer.try_split().unwrap();
77    ///
78    /// // Not possible to split twice
79    /// assert!(buffer.try_split().is_err());
80    /// # // bbqueue test shim!
81    /// # }
82    /// #
83    /// # fn main() {
84    /// # #[cfg(not(feature = "thumbv6"))]
85    /// # bbqtest();
86    /// # }
87    /// ```
88    pub fn try_split(&'a self) -> Result<(Producer<'a, N>, Consumer<'a, N>)> {
89        if atomic::swap(&self.already_split, true, AcqRel) {
90            return Err(Error::AlreadySplit);
91        }
92
93        unsafe {
94            // Explicitly zero the data to avoid undefined behavior.
95            // This is required, because we hand out references to the buffers,
96            // which mean that creating them as references is technically UB for now
97            let mu_ptr = self.buf.get();
98            (*mu_ptr).as_mut_ptr().write_bytes(0u8, 1);
99
100            let nn1 = NonNull::new_unchecked(self as *const _ as *mut _);
101            let nn2 = NonNull::new_unchecked(self as *const _ as *mut _);
102
103            Ok((
104                Producer {
105                    bbq: nn1,
106                    pd: PhantomData,
107                },
108                Consumer {
109                    bbq: nn2,
110                    pd: PhantomData,
111                },
112            ))
113        }
114    }
115
116    /// Attempt to split the `BBBuffer` into `FrameConsumer` and `FrameProducer` halves
117    /// to gain access to the buffer. If buffer has already been split, an error
118    /// will be returned.
119    ///
120    /// NOTE: When splitting, the underlying buffer will be explicitly initialized
121    /// to zero. This may take a measurable amount of time, depending on the size
122    /// of the buffer. This is necessary to prevent undefined behavior. If the buffer
123    /// is placed at `static` scope within the `.bss` region, the explicit initialization
124    /// will be elided (as it is already performed as part of memory initialization)
125    ///
126    /// NOTE:  If the `thumbv6` feature is selected, this function takes a short critical
127    /// section while splitting.
128    pub fn try_split_framed(&'a self) -> Result<(FrameProducer<'a, N>, FrameConsumer<'a, N>)> {
129        let (producer, consumer) = self.try_split()?;
130        Ok((FrameProducer { producer }, FrameConsumer { consumer }))
131    }
132
133    /// Attempt to release the Producer and Consumer
134    ///
135    /// This re-initializes the buffer so it may be split in a different mode at a later
136    /// time. There must be no read or write grants active, or an error will be returned.
137    ///
138    /// The `Producer` and `Consumer` must be from THIS `BBBuffer`, or an error will
139    /// be returned.
140    ///
141    /// ```rust
142    /// # // bbqueue test shim!
143    /// # fn bbqtest() {
144    /// use bbqueue::BBBuffer;
145    ///
146    /// // Create and split a new buffer
147    /// let buffer: BBBuffer<6> = BBBuffer::new();
148    /// let (prod, cons) = buffer.try_split().unwrap();
149    ///
150    /// // Not possible to split twice
151    /// assert!(buffer.try_split().is_err());
152    ///
153    /// // Release the producer and consumer
154    /// assert!(buffer.try_release(prod, cons).is_ok());
155    ///
156    /// // Split the buffer in framed mode
157    /// let (fprod, fcons) = buffer.try_split_framed().unwrap();
158    /// # // bbqueue test shim!
159    /// # }
160    /// #
161    /// # fn main() {
162    /// # #[cfg(not(feature = "thumbv6"))]
163    /// # bbqtest();
164    /// # }
165    /// ```
166    pub fn try_release(
167        &'a self,
168        prod: Producer<'a, N>,
169        cons: Consumer<'a, N>,
170    ) -> CoreResult<(), (Producer<'a, N>, Consumer<'a, N>)> {
171        // Note: Re-entrancy is not possible because we require ownership
172        // of the producer and consumer, which are not cloneable. We also
173        // can assume the buffer has been split, because
174
175        // Are these our producers and consumers?
176        let our_prod = prod.bbq.as_ptr() as *const Self == self;
177        let our_cons = cons.bbq.as_ptr() as *const Self == self;
178
179        if !(our_prod && our_cons) {
180            // Can't release, not our producer and consumer
181            return Err((prod, cons));
182        }
183
184        let wr_in_progress = self.write_in_progress.load(Acquire);
185        let rd_in_progress = self.read_in_progress.load(Acquire);
186
187        if wr_in_progress || rd_in_progress {
188            // Can't release, active grant(s) in progress
189            return Err((prod, cons));
190        }
191
192        // Drop the producer and consumer halves
193        drop(prod);
194        drop(cons);
195
196        // Re-initialize the buffer (not totally needed, but nice to do)
197        self.write.store(0, Release);
198        self.read.store(0, Release);
199        self.reserve.store(0, Release);
200        self.last.store(0, Release);
201
202        // Mark the buffer as ready to be split
203        self.already_split.store(false, Release);
204
205        Ok(())
206    }
207
208    /// Attempt to release the Producer and Consumer in Framed mode
209    ///
210    /// This re-initializes the buffer so it may be split in a different mode at a later
211    /// time. There must be no read or write grants active, or an error will be returned.
212    ///
213    /// The `FrameProducer` and `FrameConsumer` must be from THIS `BBBuffer`, or an error
214    /// will be returned.
215    pub fn try_release_framed(
216        &'a self,
217        prod: FrameProducer<'a, N>,
218        cons: FrameConsumer<'a, N>,
219    ) -> CoreResult<(), (FrameProducer<'a, N>, FrameConsumer<'a, N>)> {
220        self.try_release(prod.producer, cons.consumer)
221            .map_err(|(producer, consumer)| {
222                // Restore the wrapper types
223                (FrameProducer { producer }, FrameConsumer { consumer })
224            })
225    }
226}
227
228impl<const A: usize> BBBuffer<A> {
229    /// Create a new constant inner portion of a `BBBuffer`.
230    ///
231    /// NOTE: This is only necessary to use when creating a `BBBuffer` at static
232    /// scope, and is generally never used directly. This process is necessary to
233    /// work around current limitations in `const fn`, and will be replaced in
234    /// the future.
235    ///
236    /// ```rust,no_run
237    /// use bbqueue::BBBuffer;
238    ///
239    /// static BUF: BBBuffer<6> = BBBuffer::new();
240    ///
241    /// fn main() {
242    ///    let (prod, cons) = BUF.try_split().unwrap();
243    /// }
244    /// ```
245    pub const fn new() -> Self {
246        Self {
247            // This will not be initialized until we split the buffer
248            buf: UnsafeCell::new(MaybeUninit::uninit()),
249
250            /// Owned by the writer
251            write: AtomicUsize::new(0),
252
253            /// Owned by the reader
254            read: AtomicUsize::new(0),
255
256            /// Cooperatively owned
257            ///
258            /// NOTE: This should generally be initialized as size_of::<self.buf>(), however
259            /// this would prevent the structure from being entirely zero-initialized,
260            /// and can cause the .data section to be much larger than necessary. By
261            /// forcing the `last` pointer to be zero initially, we place the structure
262            /// in an "inverted" condition, which will be resolved on the first commited
263            /// bytes that are written to the structure.
264            ///
265            /// When read == last == write, no bytes will be allowed to be read (good), but
266            /// write grants can be given out (also good).
267            last: AtomicUsize::new(0),
268
269            /// Owned by the Writer, "private"
270            reserve: AtomicUsize::new(0),
271
272            /// Owned by the Reader, "private"
273            read_in_progress: AtomicBool::new(false),
274
275            /// Owned by the Writer, "private"
276            write_in_progress: AtomicBool::new(false),
277
278            /// We haven't split at the start
279            already_split: AtomicBool::new(false),
280        }
281    }
282}
283
284/// `Producer` is the primary interface for pushing data into a `BBBuffer`.
285/// There are various methods for obtaining a grant to write to the buffer, with
286/// different potential tradeoffs. As all grants are required to be a contiguous
287/// range of data, different strategies are sometimes useful when making the decision
288/// between maximizing usage of the buffer, and ensuring a given grant is successful.
289///
290/// As a short summary of currently possible grants:
291///
292/// * `grant_exact(N)`
293///   * User will receive a grant `sz == N` (or receive an error)
294///   * This may cause a wraparound if a grant of size N is not available
295///       at the end of the ring.
296///   * If this grant caused a wraparound, the bytes that were "skipped" at the
297///       end of the ring will not be available until the reader reaches them,
298///       regardless of whether the grant commited any data or not.
299///   * Maximum possible waste due to skipping: `N - 1` bytes
300/// * `grant_max_remaining(N)`
301///   * User will receive a grant `0 < sz <= N` (or receive an error)
302///   * This will only cause a wrap to the beginning of the ring if exactly
303///       zero bytes are available at the end of the ring.
304///   * Maximum possible waste due to skipping: 0 bytes
305///
306/// See [this github issue](https://github.com/jamesmunns/bbqueue/issues/38) for a
307/// discussion of grant methods that could be added in the future.
308pub struct Producer<'a, const N: usize> {
309    bbq: NonNull<BBBuffer<N>>,
310    pd: PhantomData<&'a ()>,
311}
312
313unsafe impl<'a, const N: usize> Send for Producer<'a, N> {}
314
315impl<'a, const N: usize> Producer<'a, N> {
316    /// Request a writable, contiguous section of memory of exactly
317    /// `sz` bytes. If the buffer size requested is not available,
318    /// an error will be returned.
319    ///
320    /// This method may cause the buffer to wrap around early if the
321    /// requested space is not available at the end of the buffer, but
322    /// is available at the beginning
323    ///
324    /// ```rust
325    /// # // bbqueue test shim!
326    /// # fn bbqtest() {
327    /// use bbqueue::BBBuffer;
328    ///
329    /// // Create and split a new buffer of 6 elements
330    /// let buffer: BBBuffer<6> = BBBuffer::new();
331    /// let (mut prod, cons) = buffer.try_split().unwrap();
332    ///
333    /// // Successfully obtain and commit a grant of four bytes
334    /// let mut grant = prod.grant_exact(4).unwrap();
335    /// assert_eq!(grant.buf().len(), 4);
336    /// grant.commit(4);
337    ///
338    /// // Try to obtain a grant of three bytes
339    /// assert!(prod.grant_exact(3).is_err());
340    /// # // bbqueue test shim!
341    /// # }
342    /// #
343    /// # fn main() {
344    /// # #[cfg(not(feature = "thumbv6"))]
345    /// # bbqtest();
346    /// # }
347    /// ```
348    pub fn grant_exact(&mut self, sz: usize) -> Result<GrantW<'a, N>> {
349        let inner = unsafe { &self.bbq.as_ref() };
350
351        if atomic::swap(&inner.write_in_progress, true, AcqRel) {
352            return Err(Error::GrantInProgress);
353        }
354
355        // Writer component. Must never write to `read`,
356        // be careful writing to `load`
357        let write = inner.write.load(Acquire);
358        let read = inner.read.load(Acquire);
359        let max = N;
360        let already_inverted = write < read;
361
362        let start = if already_inverted {
363            if (write + sz) < read {
364                // Inverted, room is still available
365                write
366            } else {
367                // Inverted, no room is available
368                inner.write_in_progress.store(false, Release);
369                return Err(Error::InsufficientSize);
370            }
371        } else {
372            if write + sz <= max {
373                // Non inverted condition
374                write
375            } else {
376                // Not inverted, but need to go inverted
377
378                // NOTE: We check sz < read, NOT <=, because
379                // write must never == read in an inverted condition, since
380                // we will then not be able to tell if we are inverted or not
381                if sz < read {
382                    // Invertible situation
383                    0
384                } else {
385                    // Not invertible, no space
386                    inner.write_in_progress.store(false, Release);
387                    return Err(Error::InsufficientSize);
388                }
389            }
390        };
391
392        // Safe write, only viewed by this task
393        inner.reserve.store(start + sz, Release);
394
395        // This is sound, as UnsafeCell, MaybeUninit, and GenericArray
396        // are all `#[repr(Transparent)]
397        let start_of_buf_ptr = inner.buf.get().cast::<u8>();
398        let grant_slice =
399            unsafe { from_raw_parts_mut(start_of_buf_ptr.offset(start as isize), sz) };
400
401        Ok(GrantW {
402            buf: grant_slice,
403            bbq: self.bbq,
404            to_commit: 0,
405        })
406    }
407
408    /// Request a writable, contiguous section of memory of up to
409    /// `sz` bytes. If a buffer of size `sz` is not available without
410    /// wrapping, but some space (0 < available < sz) is available without
411    /// wrapping, then a grant will be given for the remaining size at the
412    /// end of the buffer. If no space is available for writing, an error
413    /// will be returned.
414    ///
415    /// ```
416    /// # // bbqueue test shim!
417    /// # fn bbqtest() {
418    /// use bbqueue::BBBuffer;
419    ///
420    /// // Create and split a new buffer of 6 elements
421    /// let buffer: BBBuffer<6> = BBBuffer::new();
422    /// let (mut prod, mut cons) = buffer.try_split().unwrap();
423    ///
424    /// // Successfully obtain and commit a grant of four bytes
425    /// let mut grant = prod.grant_max_remaining(4).unwrap();
426    /// assert_eq!(grant.buf().len(), 4);
427    /// grant.commit(4);
428    ///
429    /// // Release the four initial commited bytes
430    /// let mut grant = cons.read().unwrap();
431    /// assert_eq!(grant.buf().len(), 4);
432    /// grant.release(4);
433    ///
434    /// // Try to obtain a grant of three bytes, get two bytes
435    /// let mut grant = prod.grant_max_remaining(3).unwrap();
436    /// assert_eq!(grant.buf().len(), 2);
437    /// grant.commit(2);
438    /// # // bbqueue test shim!
439    /// # }
440    /// #
441    /// # fn main() {
442    /// # #[cfg(not(feature = "thumbv6"))]
443    /// # bbqtest();
444    /// # }
445    /// ```
446    pub fn grant_max_remaining(&mut self, mut sz: usize) -> Result<GrantW<'a, N>> {
447        let inner = unsafe { &self.bbq.as_ref() };
448
449        if atomic::swap(&inner.write_in_progress, true, AcqRel) {
450            return Err(Error::GrantInProgress);
451        }
452
453        // Writer component. Must never write to `read`,
454        // be careful writing to `load`
455        let write = inner.write.load(Acquire);
456        let read = inner.read.load(Acquire);
457        let max = N;
458
459        let already_inverted = write < read;
460
461        let start = if already_inverted {
462            // In inverted case, read is always > write
463            let remain = read - write - 1;
464
465            if remain != 0 {
466                sz = min(remain, sz);
467                write
468            } else {
469                // Inverted, no room is available
470                inner.write_in_progress.store(false, Release);
471                return Err(Error::InsufficientSize);
472            }
473        } else {
474            if write != max {
475                // Some (or all) room remaining in un-inverted case
476                sz = min(max - write, sz);
477                write
478            } else {
479                // Not inverted, but need to go inverted
480
481                // NOTE: We check read > 1, NOT read >= 1, because
482                // write must never == read in an inverted condition, since
483                // we will then not be able to tell if we are inverted or not
484                if read > 1 {
485                    sz = min(read - 1, sz);
486                    0
487                } else {
488                    // Not invertible, no space
489                    inner.write_in_progress.store(false, Release);
490                    return Err(Error::InsufficientSize);
491                }
492            }
493        };
494
495        // Safe write, only viewed by this task
496        inner.reserve.store(start + sz, Release);
497
498        // This is sound, as UnsafeCell, MaybeUninit, and GenericArray
499        // are all `#[repr(Transparent)]
500        let start_of_buf_ptr = inner.buf.get().cast::<u8>();
501        let grant_slice =
502            unsafe { from_raw_parts_mut(start_of_buf_ptr.offset(start as isize), sz) };
503
504        Ok(GrantW {
505            buf: grant_slice,
506            bbq: self.bbq,
507            to_commit: 0,
508        })
509    }
510}
511
512/// `Consumer` is the primary interface for reading data from a `BBBuffer`.
513pub struct Consumer<'a, const N: usize> {
514    bbq: NonNull<BBBuffer<N>>,
515    pd: PhantomData<&'a ()>,
516}
517
518unsafe impl<'a, const N: usize> Send for Consumer<'a, N> {}
519
520impl<'a, const N: usize> Consumer<'a, N> {
521    /// Obtains a contiguous slice of committed bytes. This slice may not
522    /// contain ALL available bytes, if the writer has wrapped around. The
523    /// remaining bytes will be available after all readable bytes are
524    /// released
525    ///
526    /// ```rust
527    /// # // bbqueue test shim!
528    /// # fn bbqtest() {
529    /// use bbqueue::BBBuffer;
530    ///
531    /// // Create and split a new buffer of 6 elements
532    /// let buffer: BBBuffer<6> = BBBuffer::new();
533    /// let (mut prod, mut cons) = buffer.try_split().unwrap();
534    ///
535    /// // Successfully obtain and commit a grant of four bytes
536    /// let mut grant = prod.grant_max_remaining(4).unwrap();
537    /// assert_eq!(grant.buf().len(), 4);
538    /// grant.commit(4);
539    ///
540    /// // Obtain a read grant
541    /// let mut grant = cons.read().unwrap();
542    /// assert_eq!(grant.buf().len(), 4);
543    /// # // bbqueue test shim!
544    /// # }
545    /// #
546    /// # fn main() {
547    /// # #[cfg(not(feature = "thumbv6"))]
548    /// # bbqtest();
549    /// # }
550    /// ```
551    pub fn read(&mut self) -> Result<GrantR<'a, N>> {
552        let inner = unsafe { &self.bbq.as_ref() };
553
554        if atomic::swap(&inner.read_in_progress, true, AcqRel) {
555            return Err(Error::GrantInProgress);
556        }
557
558        let write = inner.write.load(Acquire);
559        let last = inner.last.load(Acquire);
560        let mut read = inner.read.load(Acquire);
561
562        // Resolve the inverted case or end of read
563        if (read == last) && (write < read) {
564            read = 0;
565            // This has some room for error, the other thread reads this
566            // Impact to Grant:
567            //   Grant checks if read < write to see if inverted. If not inverted, but
568            //     no space left, Grant will initiate an inversion, but will not trigger it
569            // Impact to Commit:
570            //   Commit does not check read, but if Grant has started an inversion,
571            //   grant could move Last to the prior write position
572            // MOVING READ BACKWARDS!
573            inner.read.store(0, Release);
574        }
575
576        let sz = if write < read {
577            // Inverted, only believe last
578            last
579        } else {
580            // Not inverted, only believe write
581            write
582        } - read;
583
584        if sz == 0 {
585            inner.read_in_progress.store(false, Release);
586            return Err(Error::InsufficientSize);
587        }
588
589        // This is sound, as UnsafeCell, MaybeUninit, and GenericArray
590        // are all `#[repr(Transparent)]
591        let start_of_buf_ptr = inner.buf.get().cast::<u8>();
592        let grant_slice = unsafe { from_raw_parts_mut(start_of_buf_ptr.offset(read as isize), sz) };
593
594        Ok(GrantR {
595            buf: grant_slice,
596            bbq: self.bbq,
597            to_release: 0,
598        })
599    }
600
601    /// Obtains two disjoint slices, which are each contiguous of committed bytes.
602    /// Combined these contain all previously commited data.
603    pub fn split_read(&mut self) -> Result<SplitGrantR<'a, N>> {
604        let inner = unsafe { &self.bbq.as_ref() };
605
606        if atomic::swap(&inner.read_in_progress, true, AcqRel) {
607            return Err(Error::GrantInProgress);
608        }
609
610        let write = inner.write.load(Acquire);
611        let last = inner.last.load(Acquire);
612        let mut read = inner.read.load(Acquire);
613
614        // Resolve the inverted case or end of read
615        if (read == last) && (write < read) {
616            read = 0;
617            // This has some room for error, the other thread reads this
618            // Impact to Grant:
619            //   Grant checks if read < write to see if inverted. If not inverted, but
620            //     no space left, Grant will initiate an inversion, but will not trigger it
621            // Impact to Commit:
622            //   Commit does not check read, but if Grant has started an inversion,
623            //   grant could move Last to the prior write position
624            // MOVING READ BACKWARDS!
625            inner.read.store(0, Release);
626        }
627
628        let (sz1, sz2) = if write < read {
629            // Inverted, only believe last
630            (last - read, write)
631        } else {
632            // Not inverted, only believe write
633            (write - read, 0)
634        };
635
636        if sz1 == 0 {
637            inner.read_in_progress.store(false, Release);
638            return Err(Error::InsufficientSize);
639        }
640
641        // This is sound, as UnsafeCell, MaybeUninit, and GenericArray
642        // are all `#[repr(Transparent)]
643        let start_of_buf_ptr = inner.buf.get().cast::<u8>();
644        let grant_slice1 =
645            unsafe { from_raw_parts_mut(start_of_buf_ptr.offset(read as isize), sz1) };
646        let grant_slice2 = unsafe { from_raw_parts_mut(start_of_buf_ptr, sz2) };
647
648        Ok(SplitGrantR {
649            buf1: grant_slice1,
650            buf2: grant_slice2,
651            bbq: self.bbq,
652            to_release: 0,
653        })
654    }
655}
656
657impl<const N: usize> BBBuffer<N> {
658    /// Returns the size of the backing storage.
659    ///
660    /// This is the maximum number of bytes that can be stored in this queue.
661    ///
662    /// ```rust
663    /// # // bbqueue test shim!
664    /// # fn bbqtest() {
665    /// use bbqueue::BBBuffer;
666    ///
667    /// // Create a new buffer of 6 elements
668    /// let buffer: BBBuffer<6> = BBBuffer::new();
669    /// assert_eq!(buffer.capacity(), 6);
670    /// # // bbqueue test shim!
671    /// # }
672    /// #
673    /// # fn main() {
674    /// # #[cfg(not(feature = "thumbv6"))]
675    /// # bbqtest();
676    /// # }
677    /// ```
678    pub const fn capacity(&self) -> usize {
679        N
680    }
681}
682
683/// A structure representing a contiguous region of memory that
684/// may be written to, and potentially "committed" to the queue.
685///
686/// NOTE: If the grant is dropped without explicitly commiting
687/// the contents, or by setting a the number of bytes to
688/// automatically be committed with `to_commit()`, then no bytes
689/// will be comitted for writing.
690///
691/// If the `thumbv6` feature is selected, dropping the grant
692/// without committing it takes a short critical section,
693#[derive(Debug, PartialEq)]
694pub struct GrantW<'a, const N: usize> {
695    pub(crate) buf: &'a mut [u8],
696    bbq: NonNull<BBBuffer<N>>,
697    pub(crate) to_commit: usize,
698}
699
700unsafe impl<'a, const N: usize> Send for GrantW<'a, N> {}
701
702/// A structure representing a contiguous region of memory that
703/// may be read from, and potentially "released" (or cleared)
704/// from the queue
705///
706/// NOTE: If the grant is dropped without explicitly releasing
707/// the contents, or by setting the number of bytes to automatically
708/// be released with `to_release()`, then no bytes will be released
709/// as read.
710///
711///
712/// If the `thumbv6` feature is selected, dropping the grant
713/// without releasing it takes a short critical section,
714#[derive(Debug, PartialEq)]
715pub struct GrantR<'a, const N: usize> {
716    pub(crate) buf: &'a mut [u8],
717    bbq: NonNull<BBBuffer<N>>,
718    pub(crate) to_release: usize,
719}
720
721/// A structure representing up to two contiguous regions of memory that
722/// may be read from, and potentially "released" (or cleared)
723/// from the queue
724#[derive(Debug, PartialEq)]
725pub struct SplitGrantR<'a, const N: usize> {
726    pub(crate) buf1: &'a mut [u8],
727    pub(crate) buf2: &'a mut [u8],
728    bbq: NonNull<BBBuffer<N>>,
729    pub(crate) to_release: usize,
730}
731
732unsafe impl<'a, const N: usize> Send for GrantR<'a, N> {}
733
734unsafe impl<'a, const N: usize> Send for SplitGrantR<'a, N> {}
735
736impl<'a, const N: usize> GrantW<'a, N> {
737    /// Finalizes a writable grant given by `grant()` or `grant_max()`.
738    /// This makes the data available to be read via `read()`. This consumes
739    /// the grant.
740    ///
741    /// If `used` is larger than the given grant, the maximum amount will
742    /// be commited
743    ///
744    /// NOTE:  If the `thumbv6` feature is selected, this function takes a short critical
745    /// section while committing.
746    pub fn commit(mut self, used: usize) {
747        self.commit_inner(used);
748        forget(self);
749    }
750
751    /// Obtain access to the inner buffer for writing
752    ///
753    /// ```rust
754    /// # // bbqueue test shim!
755    /// # fn bbqtest() {
756    /// use bbqueue::BBBuffer;
757    ///
758    /// // Create and split a new buffer of 6 elements
759    /// let buffer: BBBuffer<6> = BBBuffer::new();
760    /// let (mut prod, mut cons) = buffer.try_split().unwrap();
761    ///
762    /// // Successfully obtain and commit a grant of four bytes
763    /// let mut grant = prod.grant_max_remaining(4).unwrap();
764    /// grant.buf().copy_from_slice(&[1, 2, 3, 4]);
765    /// grant.commit(4);
766    /// # // bbqueue test shim!
767    /// # }
768    /// #
769    /// # fn main() {
770    /// # #[cfg(not(feature = "thumbv6"))]
771    /// # bbqtest();
772    /// # }
773    /// ```
774    pub fn buf(&mut self) -> &mut [u8] {
775        self.buf
776    }
777
778    /// Sometimes, it's not possible for the lifetimes to check out. For example,
779    /// if you need to hand this buffer to a function that expects to receive a
780    /// `&'static mut [u8]`, it is not possible for the inner reference to outlive the
781    /// grant itself.
782    ///
783    /// You MUST guarantee that in no cases, the reference that is returned here outlives
784    /// the grant itself. Once the grant has been released, referencing the data contained
785    /// WILL cause undefined behavior.
786    ///
787    /// Additionally, you must ensure that a separate reference to this data is not created
788    /// to this data, e.g. using `DerefMut` or the `buf()` method of this grant.
789    pub unsafe fn as_static_mut_buf(&mut self) -> &'static mut [u8] {
790        transmute::<&mut [u8], &'static mut [u8]>(self.buf)
791    }
792
793    #[inline(always)]
794    pub(crate) fn commit_inner(&mut self, used: usize) {
795        let inner = unsafe { &self.bbq.as_ref() };
796
797        // If there is no grant in progress, return early. This
798        // generally means we are dropping the grant within a
799        // wrapper structure
800        if !inner.write_in_progress.load(Acquire) {
801            return;
802        }
803
804        // Writer component. Must never write to READ,
805        // be careful writing to LAST
806
807        // Saturate the grant commit
808        let len = self.buf.len();
809        let used = min(len, used);
810
811        let write = inner.write.load(Acquire);
812        atomic::fetch_sub(&inner.reserve, len - used, AcqRel);
813
814        let max = N;
815        let last = inner.last.load(Acquire);
816        let new_write = inner.reserve.load(Acquire);
817
818        if (new_write < write) && (write != max) {
819            // We have already wrapped, but we are skipping some bytes at the end of the ring.
820            // Mark `last` where the write pointer used to be to hold the line here
821            inner.last.store(write, Release);
822        } else if new_write > last {
823            // We're about to pass the last pointer, which was previously the artificial
824            // end of the ring. Now that we've passed it, we can "unlock" the section
825            // that was previously skipped.
826            //
827            // Since new_write is strictly larger than last, it is safe to move this as
828            // the other thread will still be halted by the (about to be updated) write
829            // value
830            inner.last.store(max, Release);
831        }
832        // else: If new_write == last, either:
833        // * last == max, so no need to write, OR
834        // * If we write in the end chunk again, we'll update last to max next time
835        // * If we write to the start chunk in a wrap, we'll update last when we
836        //     move write backwards
837
838        // Write must be updated AFTER last, otherwise read could think it was
839        // time to invert early!
840        inner.write.store(new_write, Release);
841
842        // Allow subsequent grants
843        inner.write_in_progress.store(false, Release);
844    }
845
846    /// Configures the amount of bytes to be commited on drop.
847    pub fn to_commit(&mut self, amt: usize) {
848        self.to_commit = self.buf.len().min(amt);
849    }
850}
851
852impl<'a, const N: usize> GrantR<'a, N> {
853    /// Release a sequence of bytes from the buffer, allowing the space
854    /// to be used by later writes. This consumes the grant.
855    ///
856    /// If `used` is larger than the given grant, the full grant will
857    /// be released.
858    ///
859    /// NOTE:  If the `thumbv6` feature is selected, this function takes a short critical
860    /// section while releasing.
861    pub fn release(mut self, used: usize) {
862        // Saturate the grant release
863        let used = min(self.buf.len(), used);
864
865        self.release_inner(used);
866        forget(self);
867    }
868
869    pub(crate) fn shrink(&mut self, len: usize) {
870        let mut new_buf: &mut [u8] = &mut [];
871        core::mem::swap(&mut self.buf, &mut new_buf);
872        let (new, _) = new_buf.split_at_mut(len);
873        self.buf = new;
874    }
875
876    /// Obtain access to the inner buffer for reading
877    ///
878    /// ```
879    /// # // bbqueue test shim!
880    /// # fn bbqtest() {
881    /// use bbqueue::BBBuffer;
882    ///
883    /// // Create and split a new buffer of 6 elements
884    /// let buffer: BBBuffer<6> = BBBuffer::new();
885    /// let (mut prod, mut cons) = buffer.try_split().unwrap();
886    ///
887    /// // Successfully obtain and commit a grant of four bytes
888    /// let mut grant = prod.grant_max_remaining(4).unwrap();
889    /// grant.buf().copy_from_slice(&[1, 2, 3, 4]);
890    /// grant.commit(4);
891    ///
892    /// // Obtain a read grant, and copy to a buffer
893    /// let mut grant = cons.read().unwrap();
894    /// let mut buf = [0u8; 4];
895    /// buf.copy_from_slice(grant.buf());
896    /// assert_eq!(&buf, &[1, 2, 3, 4]);
897    /// # // bbqueue test shim!
898    /// # }
899    /// #
900    /// # fn main() {
901    /// # #[cfg(not(feature = "thumbv6"))]
902    /// # bbqtest();
903    /// # }
904    /// ```
905    pub fn buf(&self) -> &[u8] {
906        self.buf
907    }
908
909    /// Obtain mutable access to the read grant
910    ///
911    /// This is useful if you are performing in-place operations
912    /// on an incoming packet, such as decryption
913    pub fn buf_mut(&mut self) -> &mut [u8] {
914        self.buf
915    }
916
917    /// Sometimes, it's not possible for the lifetimes to check out. For example,
918    /// if you need to hand this buffer to a function that expects to receive a
919    /// `&'static [u8]`, it is not possible for the inner reference to outlive the
920    /// grant itself.
921    ///
922    /// You MUST guarantee that in no cases, the reference that is returned here outlives
923    /// the grant itself. Once the grant has been released, referencing the data contained
924    /// WILL cause undefined behavior.
925    ///
926    /// Additionally, you must ensure that a separate reference to this data is not created
927    /// to this data, e.g. using `Deref` or the `buf()` method of this grant.
928    pub unsafe fn as_static_buf(&self) -> &'static [u8] {
929        transmute::<&[u8], &'static [u8]>(self.buf)
930    }
931
932    #[inline(always)]
933    pub(crate) fn release_inner(&mut self, used: usize) {
934        let inner = unsafe { &self.bbq.as_ref() };
935
936        // If there is no grant in progress, return early. This
937        // generally means we are dropping the grant within a
938        // wrapper structure
939        if !inner.read_in_progress.load(Acquire) {
940            return;
941        }
942
943        // This should always be checked by the public interfaces
944        debug_assert!(used <= self.buf.len());
945
946        // This should be fine, purely incrementing
947        let _ = atomic::fetch_add(&inner.read, used, Release);
948
949        inner.read_in_progress.store(false, Release);
950    }
951
952    /// Configures the amount of bytes to be released on drop.
953    pub fn to_release(&mut self, amt: usize) {
954        self.to_release = self.buf.len().min(amt);
955    }
956}
957
958impl<'a, const N: usize> SplitGrantR<'a, N> {
959    /// Release a sequence of bytes from the buffer, allowing the space
960    /// to be used by later writes. This consumes the grant.
961    ///
962    /// If `used` is larger than the given grant, the full grant will
963    /// be released.
964    ///
965    /// NOTE:  If the `thumbv6` feature is selected, this function takes a short critical
966    /// section while releasing.
967    pub fn release(mut self, used: usize) {
968        // Saturate the grant release
969        let used = min(self.combined_len(), used);
970
971        self.release_inner(used);
972        forget(self);
973    }
974
975    /// Obtain access to both inner buffers for reading
976    ///
977    /// ```
978    /// # // bbqueue test shim!
979    /// # fn bbqtest() {
980    /// use bbqueue::BBBuffer;
981    ///
982    /// // Create and split a new buffer of 6 elements
983    /// let buffer: BBBuffer<6> = BBBuffer::new();
984    /// let (mut prod, mut cons) = buffer.try_split().unwrap();
985    ///
986    /// // Successfully obtain and commit a grant of four bytes
987    /// let mut grant = prod.grant_max_remaining(4).unwrap();
988    /// grant.buf().copy_from_slice(&[1, 2, 3, 4]);
989    /// grant.commit(4);
990    ///
991    /// // Obtain a read grant, and copy to a buffer
992    /// let mut grant = cons.read().unwrap();
993    /// let mut buf = [0u8; 4];
994    /// buf.copy_from_slice(grant.buf());
995    /// assert_eq!(&buf, &[1, 2, 3, 4]);
996    /// # // bbqueue test shim!
997    /// # }
998    /// #
999    /// # fn main() {
1000    /// # #[cfg(not(feature = "thumbv6"))]
1001    /// # bbqtest();
1002    /// # }
1003    /// ```
1004    pub fn bufs(&self) -> (&[u8], &[u8]) {
1005        (self.buf1, self.buf2)
1006    }
1007
1008    /// Obtain mutable access to both parts of the read grant
1009    ///
1010    /// This is useful if you are performing in-place operations
1011    /// on an incoming packet, such as decryption
1012    pub fn bufs_mut(&mut self) -> (&mut [u8], &mut [u8]) {
1013        (self.buf1, self.buf2)
1014    }
1015
1016    #[inline(always)]
1017    pub(crate) fn release_inner(&mut self, used: usize) {
1018        let inner = unsafe { &self.bbq.as_ref() };
1019
1020        // If there is no grant in progress, return early. This
1021        // generally means we are dropping the grant within a
1022        // wrapper structure
1023        if !inner.read_in_progress.load(Acquire) {
1024            return;
1025        }
1026
1027        // This should always be checked by the public interfaces
1028        debug_assert!(used <= self.combined_len());
1029
1030        if used <= self.buf1.len() {
1031            // This should be fine, purely incrementing
1032            let _ = atomic::fetch_add(&inner.read, used, Release);
1033        } else {
1034            // Also release parts of the second buffer
1035            inner.read.store(used - self.buf1.len(), Release);
1036        }
1037
1038        inner.read_in_progress.store(false, Release);
1039    }
1040
1041    /// Configures the amount of bytes to be released on drop.
1042    pub fn to_release(&mut self, amt: usize) {
1043        self.to_release = self.combined_len().min(amt);
1044    }
1045
1046    /// The combined length of both buffers
1047    pub fn combined_len(&self) -> usize {
1048        self.buf1.len() + self.buf2.len()
1049    }
1050}
1051
1052impl<'a, const N: usize> Drop for GrantW<'a, N> {
1053    fn drop(&mut self) {
1054        self.commit_inner(self.to_commit)
1055    }
1056}
1057
1058impl<'a, const N: usize> Drop for GrantR<'a, N> {
1059    fn drop(&mut self) {
1060        self.release_inner(self.to_release)
1061    }
1062}
1063
1064impl<'a, const N: usize> Drop for SplitGrantR<'a, N> {
1065    fn drop(&mut self) {
1066        self.release_inner(self.to_release)
1067    }
1068}
1069
1070impl<'a, const N: usize> Deref for GrantW<'a, N> {
1071    type Target = [u8];
1072
1073    fn deref(&self) -> &Self::Target {
1074        self.buf
1075    }
1076}
1077
1078impl<'a, const N: usize> DerefMut for GrantW<'a, N> {
1079    fn deref_mut(&mut self) -> &mut [u8] {
1080        self.buf
1081    }
1082}
1083
1084impl<'a, const N: usize> Deref for GrantR<'a, N> {
1085    type Target = [u8];
1086
1087    fn deref(&self) -> &Self::Target {
1088        self.buf
1089    }
1090}
1091
1092impl<'a, const N: usize> DerefMut for GrantR<'a, N> {
1093    fn deref_mut(&mut self) -> &mut [u8] {
1094        self.buf
1095    }
1096}
1097
1098#[cfg(feature = "thumbv6")]
1099mod atomic {
1100    use core::sync::atomic::{
1101        AtomicBool, AtomicUsize,
1102        Ordering::{self, Acquire, Release},
1103    };
1104    use cortex_m::interrupt::free;
1105
1106    #[inline(always)]
1107    pub fn fetch_add(atomic: &AtomicUsize, val: usize, _order: Ordering) -> usize {
1108        free(|_| {
1109            let prev = atomic.load(Acquire);
1110            atomic.store(prev.wrapping_add(val), Release);
1111            prev
1112        })
1113    }
1114
1115    #[inline(always)]
1116    pub fn fetch_sub(atomic: &AtomicUsize, val: usize, _order: Ordering) -> usize {
1117        free(|_| {
1118            let prev = atomic.load(Acquire);
1119            atomic.store(prev.wrapping_sub(val), Release);
1120            prev
1121        })
1122    }
1123
1124    #[inline(always)]
1125    pub fn swap(atomic: &AtomicBool, val: bool, _order: Ordering) -> bool {
1126        free(|_| {
1127            let prev = atomic.load(Acquire);
1128            atomic.store(val, Release);
1129            prev
1130        })
1131    }
1132}
1133
1134#[cfg(not(feature = "thumbv6"))]
1135mod atomic {
1136    use core::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
1137
1138    #[inline(always)]
1139    pub fn fetch_add(atomic: &AtomicUsize, val: usize, order: Ordering) -> usize {
1140        atomic.fetch_add(val, order)
1141    }
1142
1143    #[inline(always)]
1144    pub fn fetch_sub(atomic: &AtomicUsize, val: usize, order: Ordering) -> usize {
1145        atomic.fetch_sub(val, order)
1146    }
1147
1148    #[inline(always)]
1149    pub fn swap(atomic: &AtomicBool, val: bool, order: Ordering) -> bool {
1150        atomic.swap(val, order)
1151    }
1152}