bbqueue_ng/
bbbuffer.rs

1use crate::{
2    framed::{FrameConsumer, FrameProducer},
3    Error, Result,
4};
5use core::{
6    cell::UnsafeCell,
7    cmp::min,
8    marker::PhantomData,
9    mem::{forget, transmute, MaybeUninit},
10    ops::{Deref, DerefMut},
11    ptr::NonNull,
12    result::Result as CoreResult,
13    slice::from_raw_parts_mut,
14    sync::atomic::{
15        AtomicBool, AtomicUsize,
16        Ordering::{AcqRel, Acquire, Release},
17    },
18};
19
20/// A backing structure for a BBQueue. Can be used to create either
21/// a BBQueue or a split Producer/Consumer pair
22pub struct BBBuffer<const N: usize> {
23    buf: UnsafeCell<MaybeUninit<[u8; N]>>,
24
25    /// Where the next byte will be written
26    write: AtomicUsize,
27
28    /// Where the next byte will be read from
29    read: AtomicUsize,
30
31    /// Used in the inverted case to mark the end of the
32    /// readable streak. Otherwise will == sizeof::<self.buf>().
33    /// Writer is responsible for placing this at the correct
34    /// place when entering an inverted condition, and Reader
35    /// is responsible for moving it back to sizeof::<self.buf>()
36    /// when exiting the inverted condition
37    last: AtomicUsize,
38
39    /// Used by the Writer to remember what bytes are currently
40    /// allowed to be written to, but are not yet ready to be
41    /// read from
42    reserve: AtomicUsize,
43
44    /// Is there an active read grant?
45    read_in_progress: AtomicBool,
46
47    /// Is there an active write grant?
48    write_in_progress: AtomicBool,
49
50    /// Have we already split?
51    already_split: AtomicBool,
52}
53
54unsafe impl<const A: usize> Sync for BBBuffer<{ A }> {}
55
56impl<'a, const N: usize> BBBuffer<{ N }> {
57    /// Attempt to split the `BBBuffer` into `Consumer` and `Producer` halves to gain access to the
58    /// buffer. If buffer has already been split, an error will be returned.
59    ///
60    /// NOTE: When splitting, the underlying buffer will be explicitly initialized
61    /// to zero. This may take a measurable amount of time, depending on the size
62    /// of the buffer. This is necessary to prevent undefined behavior. If the buffer
63    /// is placed at `static` scope within the `.bss` region, the explicit initialization
64    /// will be elided (as it is already performed as part of memory initialization)
65    ///
66    /// NOTE:  If the `thumbv6` feature is selected, this function takes a short critical section
67    /// while splitting.
68    ///
69    /// ```rust
70    /// # // bbqueue test shim!
71    /// # fn bbqtest() {
72    /// use bbqueue_ng::BBBuffer;
73    ///
74    /// // Create and split a new buffer
75    /// let buffer: BBBuffer<6> = BBBuffer::new();
76    /// let (prod, cons) = buffer.try_split().unwrap();
77    ///
78    /// // Not possible to split twice
79    /// assert!(buffer.try_split().is_err());
80    /// # // bbqueue test shim!
81    /// # }
82    /// #
83    /// # fn main() {
84    /// # #[cfg(not(feature = "thumbv6"))]
85    /// # bbqtest();
86    /// # }
87    /// ```
88    pub fn try_split(&'a self) -> Result<(Producer<'a, { N }>, Consumer<'a, { N }>)> {
89        if atomic::swap(&self.already_split, true, AcqRel) {
90            return Err(Error::AlreadySplit);
91        }
92
93        unsafe {
94            // Explicitly zero the data to avoid undefined behavior.
95            // This is required, because we hand out references to the buffers,
96            // which mean that creating them as references is technically UB for now
97            let mu_ptr = self.buf.get();
98            (*mu_ptr).as_mut_ptr().write_bytes(0u8, 1);
99
100            let nn1 = NonNull::new_unchecked(self as *const _ as *mut _);
101            let nn2 = NonNull::new_unchecked(self as *const _ as *mut _);
102
103            Ok((
104                Producer {
105                    bbq: nn1,
106                    pd: PhantomData,
107                },
108                Consumer {
109                    bbq: nn2,
110                    pd: PhantomData,
111                },
112            ))
113        }
114    }
115
116    /// Attempt to split the `BBBuffer` into `FrameConsumer` and `FrameProducer` halves
117    /// to gain access to the buffer. If buffer has already been split, an error
118    /// will be returned.
119    ///
120    /// NOTE: When splitting, the underlying buffer will be explicitly initialized
121    /// to zero. This may take a measurable amount of time, depending on the size
122    /// of the buffer. This is necessary to prevent undefined behavior. If the buffer
123    /// is placed at `static` scope within the `.bss` region, the explicit initialization
124    /// will be elided (as it is already performed as part of memory initialization)
125    ///
126    /// NOTE:  If the `thumbv6` feature is selected, this function takes a short critical
127    /// section while splitting.
128    pub fn try_split_framed(
129        &'a self,
130    ) -> Result<(FrameProducer<'a, { N }>, FrameConsumer<'a, { N }>)> {
131        let (producer, consumer) = self.try_split()?;
132        Ok((FrameProducer { producer }, FrameConsumer { consumer }))
133    }
134
135    /// Attempt to release the Producer and Consumer
136    ///
137    /// This re-initializes the buffer so it may be split in a different mode at a later
138    /// time. There must be no read or write grants active, or an error will be returned.
139    ///
140    /// The `Producer` and `Consumer` must be from THIS `BBBuffer`, or an error will
141    /// be returned.
142    ///
143    /// ```rust
144    /// # // bbqueue test shim!
145    /// # fn bbqtest() {
146    /// use bbqueue_ng::BBBuffer;
147    ///
148    /// // Create and split a new buffer
149    /// let buffer: BBBuffer<6> = BBBuffer::new();
150    /// let (prod, cons) = buffer.try_split().unwrap();
151    ///
152    /// // Not possible to split twice
153    /// assert!(buffer.try_split().is_err());
154    ///
155    /// // Release the producer and consumer
156    /// assert!(buffer.try_release(prod, cons).is_ok());
157    ///
158    /// // Split the buffer in framed mode
159    /// let (fprod, fcons) = buffer.try_split_framed().unwrap();
160    /// # // bbqueue test shim!
161    /// # }
162    /// #
163    /// # fn main() {
164    /// # #[cfg(not(feature = "thumbv6"))]
165    /// # bbqtest();
166    /// # }
167    /// ```
168    pub fn try_release(
169        &'a self,
170        prod: Producer<'a, { N }>,
171        cons: Consumer<'a, { N }>,
172    ) -> CoreResult<(), (Producer<'a, { N }>, Consumer<'a, { N }>)> {
173        // Note: Re-entrancy is not possible because we require ownership
174        // of the producer and consumer, which are not cloneable. We also
175        // can assume the buffer has been split, because
176
177        // Are these our producers and consumers?
178        let our_prod = prod.bbq.as_ptr() as *const Self == self;
179        let our_cons = cons.bbq.as_ptr() as *const Self == self;
180
181        if !(our_prod && our_cons) {
182            // Can't release, not our producer and consumer
183            return Err((prod, cons));
184        }
185
186        let wr_in_progress = self.write_in_progress.load(Acquire);
187        let rd_in_progress = self.read_in_progress.load(Acquire);
188
189        if wr_in_progress || rd_in_progress {
190            // Can't release, active grant(s) in progress
191            return Err((prod, cons));
192        }
193
194        // Drop the producer and consumer halves
195        drop(prod);
196        drop(cons);
197
198        // Re-initialize the buffer (not totally needed, but nice to do)
199        self.write.store(0, Release);
200        self.read.store(0, Release);
201        self.reserve.store(0, Release);
202        self.last.store(0, Release);
203
204        // Mark the buffer as ready to be split
205        self.already_split.store(false, Release);
206
207        Ok(())
208    }
209
210    /// Attempt to release the Producer and Consumer in Framed mode
211    ///
212    /// This re-initializes the buffer so it may be split in a different mode at a later
213    /// time. There must be no read or write grants active, or an error will be returned.
214    ///
215    /// The `FrameProducer` and `FrameConsumer` must be from THIS `BBBuffer`, or an error
216    /// will be returned.
217    pub fn try_release_framed(
218        &'a self,
219        prod: FrameProducer<'a, { N }>,
220        cons: FrameConsumer<'a, { N }>,
221    ) -> CoreResult<(), (FrameProducer<'a, { N }>, FrameConsumer<'a, { N }>)> {
222        self.try_release(prod.producer, cons.consumer)
223            .map_err(|(producer, consumer)| {
224                // Restore the wrapper types
225                (FrameProducer { producer }, FrameConsumer { consumer })
226            })
227    }
228}
229
230impl<const A: usize> BBBuffer<{ A }> {
231    /// Create a new constant inner portion of a `BBBuffer`.
232    ///
233    /// NOTE: This is only necessary to use when creating a `BBBuffer` at static
234    /// scope, and is generally never used directly. This process is necessary to
235    /// work around current limitations in `const fn`, and will be replaced in
236    /// the future.
237    ///
238    /// ```rust,no_run
239    /// use bbqueue_ng::BBBuffer;
240    ///
241    /// static BUF: BBBuffer<6> = BBBuffer::new();
242    ///
243    /// fn main() {
244    ///    let (prod, cons) = BUF.try_split().unwrap();
245    /// }
246    /// ```
247    pub const fn new() -> Self {
248        Self {
249            // This will not be initialized until we split the buffer
250            buf: UnsafeCell::new(MaybeUninit::uninit()),
251
252            /// Owned by the writer
253            write: AtomicUsize::new(0),
254
255            /// Owned by the reader
256            read: AtomicUsize::new(0),
257
258            /// Cooperatively owned
259            ///
260            /// NOTE: This should generally be initialized as size_of::<self.buf>(), however
261            /// this would prevent the structure from being entirely zero-initialized,
262            /// and can cause the .data section to be much larger than necessary. By
263            /// forcing the `last` pointer to be zero initially, we place the structure
264            /// in an "inverted" condition, which will be resolved on the first commited
265            /// bytes that are written to the structure.
266            ///
267            /// When read == last == write, no bytes will be allowed to be read (good), but
268            /// write grants can be given out (also good).
269            last: AtomicUsize::new(0),
270
271            /// Owned by the Writer, "private"
272            reserve: AtomicUsize::new(0),
273
274            /// Owned by the Reader, "private"
275            read_in_progress: AtomicBool::new(false),
276
277            /// Owned by the Writer, "private"
278            write_in_progress: AtomicBool::new(false),
279
280            /// We haven't split at the start
281            already_split: AtomicBool::new(false),
282        }
283    }
284}
285
286/// `Producer` is the primary interface for pushing data into a `BBBuffer`.
287/// There are various methods for obtaining a grant to write to the buffer, with
288/// different potential tradeoffs. As all grants are required to be a contiguous
289/// range of data, different strategies are sometimes useful when making the decision
290/// between maximizing usage of the buffer, and ensuring a given grant is successful.
291///
292/// As a short summary of currently possible grants:
293///
294/// * `grant_exact(N)`
295///   * User will receive a grant `sz == N` (or receive an error)
296///   * This may cause a wraparound if a grant of size N is not available
297///       at the end of the ring.
298///   * If this grant caused a wraparound, the bytes that were "skipped" at the
299///       end of the ring will not be available until the reader reaches them,
300///       regardless of whether the grant commited any data or not.
301///   * Maximum possible waste due to skipping: `N - 1` bytes
302/// * `grant_max_remaining(N)`
303///   * User will receive a grant `0 < sz <= N` (or receive an error)
304///   * This will only cause a wrap to the beginning of the ring if exactly
305///       zero bytes are available at the end of the ring.
306///   * Maximum possible waste due to skipping: 0 bytes
307///
308/// See [this github issue](https://github.com/jamesmunns/bbqueue/issues/38) for a
309/// discussion of grant methods that could be added in the future.
310pub struct Producer<'a, const N: usize> {
311    bbq: NonNull<BBBuffer<N>>,
312    pd: PhantomData<&'a ()>,
313}
314
315unsafe impl<'a, const N: usize> Send for Producer<'a, { N }> {}
316
317impl<'a, const N: usize> Producer<'a, { N }> {
318    /// Request a writable, contiguous section of memory of exactly
319    /// `sz` bytes. If the buffer size requested is not available,
320    /// an error will be returned.
321    ///
322    /// This method may cause the buffer to wrap around early if the
323    /// requested space is not available at the end of the buffer, but
324    /// is available at the beginning
325    ///
326    /// ```rust
327    /// # // bbqueue test shim!
328    /// # fn bbqtest() {
329    /// use bbqueue_ng::BBBuffer;
330    ///
331    /// // Create and split a new buffer of 6 elements
332    /// let buffer: BBBuffer<6> = BBBuffer::new();
333    /// let (mut prod, cons) = buffer.try_split().unwrap();
334    ///
335    /// // Successfully obtain and commit a grant of four bytes
336    /// let mut grant = prod.grant_exact(4).unwrap();
337    /// assert_eq!(grant.buf().len(), 4);
338    /// grant.commit(4);
339    ///
340    /// // Try to obtain a grant of three bytes
341    /// assert!(prod.grant_exact(3).is_err());
342    /// # // bbqueue test shim!
343    /// # }
344    /// #
345    /// # fn main() {
346    /// # #[cfg(not(feature = "thumbv6"))]
347    /// # bbqtest();
348    /// # }
349    /// ```
350    pub fn grant_exact(&mut self, sz: usize) -> Result<GrantW<'a, { N }>> {
351        let inner = unsafe { &self.bbq.as_ref() };
352
353        if atomic::swap(&inner.write_in_progress, true, AcqRel) {
354            return Err(Error::GrantInProgress);
355        }
356
357        // Writer component. Must never write to `read`,
358        // be careful writing to `load`
359        let write = inner.write.load(Acquire);
360        let read = inner.read.load(Acquire);
361        let max = N;
362        let already_inverted = write < read;
363
364        let start = if already_inverted {
365            if (write + sz) < read {
366                // Inverted, room is still available
367                write
368            } else {
369                // Inverted, no room is available
370                inner.write_in_progress.store(false, Release);
371                return Err(Error::InsufficientSize);
372            }
373        } else {
374            if write + sz <= max {
375                // Non inverted condition
376                write
377            } else {
378                // Not inverted, but need to go inverted
379
380                // NOTE: We check sz < read, NOT <=, because
381                // write must never == read in an inverted condition, since
382                // we will then not be able to tell if we are inverted or not
383                if sz < read {
384                    // Invertible situation
385                    0
386                } else {
387                    // Not invertible, no space
388                    inner.write_in_progress.store(false, Release);
389                    return Err(Error::InsufficientSize);
390                }
391            }
392        };
393
394        // Safe write, only viewed by this task
395        inner.reserve.store(start + sz, Release);
396
397        // This is sound, as UnsafeCell, MaybeUninit, and GenericArray
398        // are all `#[repr(Transparent)]
399        let start_of_buf_ptr = inner.buf.get().cast::<u8>();
400        let grant_slice =
401            unsafe { from_raw_parts_mut(start_of_buf_ptr.offset(start as isize), sz) };
402
403        Ok(GrantW {
404            buf: grant_slice,
405            bbq: self.bbq,
406            to_commit: 0,
407        })
408    }
409
410    /// Request a writable, contiguous section of memory of up to
411    /// `sz` bytes. If a buffer of size `sz` is not available without
412    /// wrapping, but some space (0 < available < sz) is available without
413    /// wrapping, then a grant will be given for the remaining size at the
414    /// end of the buffer. If no space is available for writing, an error
415    /// will be returned.
416    ///
417    /// ```
418    /// # // bbqueue test shim!
419    /// # fn bbqtest() {
420    /// use bbqueue_ng::BBBuffer;
421    ///
422    /// // Create and split a new buffer of 6 elements
423    /// let buffer: BBBuffer<6> = BBBuffer::new();
424    /// let (mut prod, mut cons) = buffer.try_split().unwrap();
425    ///
426    /// // Successfully obtain and commit a grant of four bytes
427    /// let mut grant = prod.grant_max_remaining(4).unwrap();
428    /// assert_eq!(grant.buf().len(), 4);
429    /// grant.commit(4);
430    ///
431    /// // Release the four initial commited bytes
432    /// let mut grant = cons.read().unwrap();
433    /// assert_eq!(grant.buf().len(), 4);
434    /// grant.release(4);
435    ///
436    /// // Try to obtain a grant of three bytes, get two bytes
437    /// let mut grant = prod.grant_max_remaining(3).unwrap();
438    /// assert_eq!(grant.buf().len(), 2);
439    /// grant.commit(2);
440    /// # // bbqueue test shim!
441    /// # }
442    /// #
443    /// # fn main() {
444    /// # #[cfg(not(feature = "thumbv6"))]
445    /// # bbqtest();
446    /// # }
447    /// ```
448    pub fn grant_max_remaining(&mut self, mut sz: usize) -> Result<GrantW<'a, { N }>> {
449        let inner = unsafe { &self.bbq.as_ref() };
450
451        if atomic::swap(&inner.write_in_progress, true, AcqRel) {
452            return Err(Error::GrantInProgress);
453        }
454
455        // Writer component. Must never write to `read`,
456        // be careful writing to `load`
457        let write = inner.write.load(Acquire);
458        let read = inner.read.load(Acquire);
459        let max = N;
460
461        let already_inverted = write < read;
462
463        let start = if already_inverted {
464            // In inverted case, read is always > write
465            let remain = read - write - 1;
466
467            if remain != 0 {
468                sz = min(remain, sz);
469                write
470            } else {
471                // Inverted, no room is available
472                inner.write_in_progress.store(false, Release);
473                return Err(Error::InsufficientSize);
474            }
475        } else {
476            if write != max {
477                // Some (or all) room remaining in un-inverted case
478                sz = min(max - write, sz);
479                write
480            } else {
481                // Not inverted, but need to go inverted
482
483                // NOTE: We check read > 1, NOT read >= 1, because
484                // write must never == read in an inverted condition, since
485                // we will then not be able to tell if we are inverted or not
486                if read > 1 {
487                    sz = min(read - 1, sz);
488                    0
489                } else {
490                    // Not invertible, no space
491                    inner.write_in_progress.store(false, Release);
492                    return Err(Error::InsufficientSize);
493                }
494            }
495        };
496
497        // Safe write, only viewed by this task
498        inner.reserve.store(start + sz, Release);
499
500        // This is sound, as UnsafeCell, MaybeUninit, and GenericArray
501        // are all `#[repr(Transparent)]
502        let start_of_buf_ptr = inner.buf.get().cast::<u8>();
503        let grant_slice =
504            unsafe { from_raw_parts_mut(start_of_buf_ptr.offset(start as isize), sz) };
505
506        Ok(GrantW {
507            buf: grant_slice,
508            bbq: self.bbq,
509            to_commit: 0,
510        })
511    }
512}
513
514/// `Consumer` is the primary interface for reading data from a `BBBuffer`.
515pub struct Consumer<'a, const N: usize> {
516    bbq: NonNull<BBBuffer<N>>,
517    pd: PhantomData<&'a ()>,
518}
519
520unsafe impl<'a, const N: usize> Send for Consumer<'a, { N }> {}
521
522impl<'a, const N: usize> Consumer<'a, { N }> {
523    /// Obtains a contiguous slice of committed bytes. This slice may not
524    /// contain ALL available bytes, if the writer has wrapped around. The
525    /// remaining bytes will be available after all readable bytes are
526    /// released
527    ///
528    /// ```rust
529    /// # // bbqueue test shim!
530    /// # fn bbqtest() {
531    /// use bbqueue_ng::BBBuffer;
532    ///
533    /// // Create and split a new buffer of 6 elements
534    /// let buffer: BBBuffer<6> = BBBuffer::new();
535    /// let (mut prod, mut cons) = buffer.try_split().unwrap();
536    ///
537    /// // Successfully obtain and commit a grant of four bytes
538    /// let mut grant = prod.grant_max_remaining(4).unwrap();
539    /// assert_eq!(grant.buf().len(), 4);
540    /// grant.commit(4);
541    ///
542    /// // Obtain a read grant
543    /// let mut grant = cons.read().unwrap();
544    /// assert_eq!(grant.buf().len(), 4);
545    /// # // bbqueue test shim!
546    /// # }
547    /// #
548    /// # fn main() {
549    /// # #[cfg(not(feature = "thumbv6"))]
550    /// # bbqtest();
551    /// # }
552    /// ```
553    pub fn read(&mut self) -> Result<GrantR<'a, { N }>> {
554        let inner = unsafe { &self.bbq.as_ref() };
555
556        if atomic::swap(&inner.read_in_progress, true, AcqRel) {
557            return Err(Error::GrantInProgress);
558        }
559
560        let write = inner.write.load(Acquire);
561        let last = inner.last.load(Acquire);
562        let mut read = inner.read.load(Acquire);
563
564        // Resolve the inverted case or end of read
565        if (read == last) && (write < read) {
566            read = 0;
567            // This has some room for error, the other thread reads this
568            // Impact to Grant:
569            //   Grant checks if read < write to see if inverted. If not inverted, but
570            //     no space left, Grant will initiate an inversion, but will not trigger it
571            // Impact to Commit:
572            //   Commit does not check read, but if Grant has started an inversion,
573            //   grant could move Last to the prior write position
574            // MOVING READ BACKWARDS!
575            inner.read.store(0, Release);
576        }
577
578        let sz = if write < read {
579            // Inverted, only believe last
580            last
581        } else {
582            // Not inverted, only believe write
583            write
584        } - read;
585
586        if sz == 0 {
587            inner.read_in_progress.store(false, Release);
588            return Err(Error::InsufficientSize);
589        }
590
591        // This is sound, as UnsafeCell, MaybeUninit, and GenericArray
592        // are all `#[repr(Transparent)]
593        let start_of_buf_ptr = inner.buf.get().cast::<u8>();
594        let grant_slice = unsafe { from_raw_parts_mut(start_of_buf_ptr.offset(read as isize), sz) };
595
596        Ok(GrantR {
597            buf: grant_slice,
598            bbq: self.bbq,
599            to_release: 0,
600        })
601    }
602
603    /// Obtains two disjoint slices, which are each contiguous of committed bytes.
604    /// Combined these contain all previously commited data.
605    pub fn split_read(&mut self) -> Result<SplitGrantR<'a, { N }>> {
606        let inner = unsafe { &self.bbq.as_ref() };
607
608        if atomic::swap(&inner.read_in_progress, true, AcqRel) {
609            return Err(Error::GrantInProgress);
610        }
611
612        let write = inner.write.load(Acquire);
613        let last = inner.last.load(Acquire);
614        let mut read = inner.read.load(Acquire);
615
616        // Resolve the inverted case or end of read
617        if (read == last) && (write < read) {
618            read = 0;
619            // This has some room for error, the other thread reads this
620            // Impact to Grant:
621            //   Grant checks if read < write to see if inverted. If not inverted, but
622            //     no space left, Grant will initiate an inversion, but will not trigger it
623            // Impact to Commit:
624            //   Commit does not check read, but if Grant has started an inversion,
625            //   grant could move Last to the prior write position
626            // MOVING READ BACKWARDS!
627            inner.read.store(0, Release);
628        }
629
630        let (sz1, sz2) = if write < read {
631            // Inverted, only believe last
632            (last - read, write)
633        } else {
634            // Not inverted, only believe write
635            (write - read, 0)
636        };
637
638        if sz1 == 0 {
639            inner.read_in_progress.store(false, Release);
640            return Err(Error::InsufficientSize);
641        }
642
643        // This is sound, as UnsafeCell, MaybeUninit, and GenericArray
644        // are all `#[repr(Transparent)]
645        let start_of_buf_ptr = inner.buf.get().cast::<u8>();
646        let grant_slice1 =
647            unsafe { from_raw_parts_mut(start_of_buf_ptr.offset(read as isize), sz1) };
648        let grant_slice2 = unsafe { from_raw_parts_mut(start_of_buf_ptr, sz2) };
649
650        Ok(SplitGrantR {
651            buf1: grant_slice1,
652            buf2: grant_slice2,
653            bbq: self.bbq,
654            to_release: 0,
655        })
656    }
657}
658
659impl<const N: usize> BBBuffer<{ N }> {
660    /// Returns the size of the backing storage.
661    ///
662    /// This is the maximum number of bytes that can be stored in this queue.
663    ///
664    /// ```rust
665    /// # // bbqueue test shim!
666    /// # fn bbqtest() {
667    /// use bbqueue_ng::BBBuffer;
668    ///
669    /// // Create a new buffer of 6 elements
670    /// let buffer: BBBuffer<6> = BBBuffer::new();
671    /// assert_eq!(buffer.capacity(), 6);
672    /// # // bbqueue test shim!
673    /// # }
674    /// #
675    /// # fn main() {
676    /// # #[cfg(not(feature = "thumbv6"))]
677    /// # bbqtest();
678    /// # }
679    /// ```
680    pub fn capacity(&self) -> usize {
681        N
682    }
683}
684
685/// A structure representing a contiguous region of memory that
686/// may be written to, and potentially "committed" to the queue.
687///
688/// NOTE: If the grant is dropped without explicitly commiting
689/// the contents, or by setting a the number of bytes to
690/// automatically be committed with `to_commit()`, then no bytes
691/// will be comitted for writing.
692///
693/// If the `thumbv6` feature is selected, dropping the grant
694/// without committing it takes a short critical section,
695#[derive(Debug, PartialEq)]
696pub struct GrantW<'a, const N: usize> {
697    pub(crate) buf: &'a mut [u8],
698    bbq: NonNull<BBBuffer<N>>,
699    pub(crate) to_commit: usize,
700}
701
702unsafe impl<'a, const N: usize> Send for GrantW<'a, { N }> {}
703
704/// A structure representing a contiguous region of memory that
705/// may be read from, and potentially "released" (or cleared)
706/// from the queue
707///
708/// NOTE: If the grant is dropped without explicitly releasing
709/// the contents, or by setting the number of bytes to automatically
710/// be released with `to_release()`, then no bytes will be released
711/// as read.
712///
713///
714/// If the `thumbv6` feature is selected, dropping the grant
715/// without releasing it takes a short critical section,
716#[derive(Debug, PartialEq)]
717pub struct GrantR<'a, const N: usize> {
718    pub(crate) buf: &'a mut [u8],
719    bbq: NonNull<BBBuffer<N>>,
720    pub(crate) to_release: usize,
721}
722
723/// A structure representing up to two contiguous regions of memory that
724/// may be read from, and potentially "released" (or cleared)
725/// from the queue
726#[derive(Debug, PartialEq)]
727pub struct SplitGrantR<'a, const N: usize> {
728    pub(crate) buf1: &'a mut [u8],
729    pub(crate) buf2: &'a mut [u8],
730    bbq: NonNull<BBBuffer<N>>,
731    pub(crate) to_release: usize,
732}
733
734unsafe impl<'a, const N: usize> Send for GrantR<'a, { N }> {}
735
736unsafe impl<'a, const N: usize> Send for SplitGrantR<'a, { N }> {}
737
738impl<'a, const N: usize> GrantW<'a, { N }> {
739    /// Finalizes a writable grant given by `grant()` or `grant_max()`.
740    /// This makes the data available to be read via `read()`. This consumes
741    /// the grant.
742    ///
743    /// If `used` is larger than the given grant, the maximum amount will
744    /// be commited
745    ///
746    /// NOTE:  If the `thumbv6` feature is selected, this function takes a short critical
747    /// section while committing.
748    pub fn commit(mut self, used: usize) {
749        self.commit_inner(used);
750        forget(self);
751    }
752
753    /// Obtain access to the inner buffer for writing
754    ///
755    /// ```rust
756    /// # // bbqueue test shim!
757    /// # fn bbqtest() {
758    /// use bbqueue_ng::BBBuffer;
759    ///
760    /// // Create and split a new buffer of 6 elements
761    /// let buffer: BBBuffer<6> = BBBuffer::new();
762    /// let (mut prod, mut cons) = buffer.try_split().unwrap();
763    ///
764    /// // Successfully obtain and commit a grant of four bytes
765    /// let mut grant = prod.grant_max_remaining(4).unwrap();
766    /// grant.buf().copy_from_slice(&[1, 2, 3, 4]);
767    /// grant.commit(4);
768    /// # // bbqueue test shim!
769    /// # }
770    /// #
771    /// # fn main() {
772    /// # #[cfg(not(feature = "thumbv6"))]
773    /// # bbqtest();
774    /// # }
775    /// ```
776    pub fn buf(&mut self) -> &mut [u8] {
777        self.buf
778    }
779
780    /// Sometimes, it's not possible for the lifetimes to check out. For example,
781    /// if you need to hand this buffer to a function that expects to receive a
782    /// `&'static mut [u8]`, it is not possible for the inner reference to outlive the
783    /// grant itself.
784    ///
785    /// You MUST guarantee that in no cases, the reference that is returned here outlives
786    /// the grant itself. Once the grant has been released, referencing the data contained
787    /// WILL cause undefined behavior.
788    ///
789    /// Additionally, you must ensure that a separate reference to this data is not created
790    /// to this data, e.g. using `DerefMut` or the `buf()` method of this grant.
791    pub unsafe fn as_static_mut_buf(&mut self) -> &'static mut [u8] {
792        transmute::<&mut [u8], &'static mut [u8]>(self.buf)
793    }
794
795    #[inline(always)]
796    pub(crate) fn commit_inner(&mut self, used: usize) {
797        let inner = unsafe { &self.bbq.as_ref() };
798
799        // If there is no grant in progress, return early. This
800        // generally means we are dropping the grant within a
801        // wrapper structure
802        if !inner.write_in_progress.load(Acquire) {
803            return;
804        }
805
806        // Writer component. Must never write to READ,
807        // be careful writing to LAST
808
809        // Saturate the grant commit
810        let len = self.buf.len();
811        let used = min(len, used);
812
813        let write = inner.write.load(Acquire);
814        atomic::fetch_sub(&inner.reserve, len - used, AcqRel);
815
816        let max = N;
817        let last = inner.last.load(Acquire);
818        let new_write = inner.reserve.load(Acquire);
819
820        if (new_write < write) && (write != max) {
821            // We have already wrapped, but we are skipping some bytes at the end of the ring.
822            // Mark `last` where the write pointer used to be to hold the line here
823            inner.last.store(write, Release);
824        } else if new_write > last {
825            // We're about to pass the last pointer, which was previously the artificial
826            // end of the ring. Now that we've passed it, we can "unlock" the section
827            // that was previously skipped.
828            //
829            // Since new_write is strictly larger than last, it is safe to move this as
830            // the other thread will still be halted by the (about to be updated) write
831            // value
832            inner.last.store(max, Release);
833        }
834        // else: If new_write == last, either:
835        // * last == max, so no need to write, OR
836        // * If we write in the end chunk again, we'll update last to max next time
837        // * If we write to the start chunk in a wrap, we'll update last when we
838        //     move write backwards
839
840        // Write must be updated AFTER last, otherwise read could think it was
841        // time to invert early!
842        inner.write.store(new_write, Release);
843
844        // Allow subsequent grants
845        inner.write_in_progress.store(false, Release);
846    }
847
848    /// Configures the amount of bytes to be commited on drop.
849    pub fn to_commit(&mut self, amt: usize) {
850        self.to_commit = self.buf.len().min(amt);
851    }
852}
853
854impl<'a, const N: usize> GrantR<'a, { N }> {
855    /// Release a sequence of bytes from the buffer, allowing the space
856    /// to be used by later writes. This consumes the grant.
857    ///
858    /// If `used` is larger than the given grant, the full grant will
859    /// be released.
860    ///
861    /// NOTE:  If the `thumbv6` feature is selected, this function takes a short critical
862    /// section while releasing.
863    pub fn release(mut self, used: usize) {
864        // Saturate the grant release
865        let used = min(self.buf.len(), used);
866
867        self.release_inner(used);
868        forget(self);
869    }
870
871    pub(crate) fn shrink(&mut self, len: usize) {
872        let mut new_buf: &mut [u8] = &mut [];
873        core::mem::swap(&mut self.buf, &mut new_buf);
874        let (new, _) = new_buf.split_at_mut(len);
875        self.buf = new;
876    }
877
878    /// Obtain access to the inner buffer for reading
879    ///
880    /// ```
881    /// # // bbqueue test shim!
882    /// # fn bbqtest() {
883    /// use bbqueue_ng::BBBuffer;
884    ///
885    /// // Create and split a new buffer of 6 elements
886    /// let buffer: BBBuffer<6> = BBBuffer::new();
887    /// let (mut prod, mut cons) = buffer.try_split().unwrap();
888    ///
889    /// // Successfully obtain and commit a grant of four bytes
890    /// let mut grant = prod.grant_max_remaining(4).unwrap();
891    /// grant.buf().copy_from_slice(&[1, 2, 3, 4]);
892    /// grant.commit(4);
893    ///
894    /// // Obtain a read grant, and copy to a buffer
895    /// let mut grant = cons.read().unwrap();
896    /// let mut buf = [0u8; 4];
897    /// buf.copy_from_slice(grant.buf());
898    /// assert_eq!(&buf, &[1, 2, 3, 4]);
899    /// # // bbqueue test shim!
900    /// # }
901    /// #
902    /// # fn main() {
903    /// # #[cfg(not(feature = "thumbv6"))]
904    /// # bbqtest();
905    /// # }
906    /// ```
907    pub fn buf(&self) -> &[u8] {
908        self.buf
909    }
910
911    /// Obtain mutable access to the read grant
912    ///
913    /// This is useful if you are performing in-place operations
914    /// on an incoming packet, such as decryption
915    pub fn buf_mut(&mut self) -> &mut [u8] {
916        self.buf
917    }
918
919    /// Sometimes, it's not possible for the lifetimes to check out. For example,
920    /// if you need to hand this buffer to a function that expects to receive a
921    /// `&'static [u8]`, it is not possible for the inner reference to outlive the
922    /// grant itself.
923    ///
924    /// You MUST guarantee that in no cases, the reference that is returned here outlives
925    /// the grant itself. Once the grant has been released, referencing the data contained
926    /// WILL cause undefined behavior.
927    ///
928    /// Additionally, you must ensure that a separate reference to this data is not created
929    /// to this data, e.g. using `Deref` or the `buf()` method of this grant.
930    pub unsafe fn as_static_buf(&self) -> &'static [u8] {
931        transmute::<&[u8], &'static [u8]>(self.buf)
932    }
933
934    #[inline(always)]
935    pub(crate) fn release_inner(&mut self, used: usize) {
936        let inner = unsafe { &self.bbq.as_ref() };
937
938        // If there is no grant in progress, return early. This
939        // generally means we are dropping the grant within a
940        // wrapper structure
941        if !inner.read_in_progress.load(Acquire) {
942            return;
943        }
944
945        // This should always be checked by the public interfaces
946        debug_assert!(used <= self.buf.len());
947
948        // This should be fine, purely incrementing
949        let _ = atomic::fetch_add(&inner.read, used, Release);
950
951        inner.read_in_progress.store(false, Release);
952    }
953
954    /// Configures the amount of bytes to be released on drop.
955    pub fn to_release(&mut self, amt: usize) {
956        self.to_release = self.buf.len().min(amt);
957    }
958}
959
960impl<'a, const N: usize> SplitGrantR<'a, { N }> {
961    /// Release a sequence of bytes from the buffer, allowing the space
962    /// to be used by later writes. This consumes the grant.
963    ///
964    /// If `used` is larger than the given grant, the full grant will
965    /// be released.
966    ///
967    /// NOTE:  If the `thumbv6` feature is selected, this function takes a short critical
968    /// section while releasing.
969    pub fn release(mut self, used: usize) {
970        // Saturate the grant release
971        let used = min(self.combined_len(), used);
972
973        self.release_inner(used);
974        forget(self);
975    }
976
977    /// Obtain access to both inner buffers for reading
978    ///
979    /// ```
980    /// # // bbqueue test shim!
981    /// # fn bbqtest() {
982    /// use bbqueue_ng::BBBuffer;
983    ///
984    /// // Create and split a new buffer of 6 elements
985    /// let buffer: BBBuffer<6> = BBBuffer::new();
986    /// let (mut prod, mut cons) = buffer.try_split().unwrap();
987    ///
988    /// // Successfully obtain and commit a grant of four bytes
989    /// let mut grant = prod.grant_max_remaining(4).unwrap();
990    /// grant.buf().copy_from_slice(&[1, 2, 3, 4]);
991    /// grant.commit(4);
992    ///
993    /// // Obtain a read grant, and copy to a buffer
994    /// let mut grant = cons.read().unwrap();
995    /// let mut buf = [0u8; 4];
996    /// buf.copy_from_slice(grant.buf());
997    /// assert_eq!(&buf, &[1, 2, 3, 4]);
998    /// # // bbqueue test shim!
999    /// # }
1000    /// #
1001    /// # fn main() {
1002    /// # #[cfg(not(feature = "thumbv6"))]
1003    /// # bbqtest();
1004    /// # }
1005    /// ```
1006    pub fn bufs(&self) -> (&[u8], &[u8]) {
1007        (self.buf1, self.buf2)
1008    }
1009
1010    /// Obtain mutable access to both parts of the read grant
1011    ///
1012    /// This is useful if you are performing in-place operations
1013    /// on an incoming packet, such as decryption
1014    pub fn bufs_mut(&mut self) -> (&mut [u8], &mut [u8]) {
1015        (self.buf1, self.buf2)
1016    }
1017
1018    #[inline(always)]
1019    pub(crate) fn release_inner(&mut self, used: usize) {
1020        let inner = unsafe { &self.bbq.as_ref() };
1021
1022        // If there is no grant in progress, return early. This
1023        // generally means we are dropping the grant within a
1024        // wrapper structure
1025        if !inner.read_in_progress.load(Acquire) {
1026            return;
1027        }
1028
1029        // This should always be checked by the public interfaces
1030        debug_assert!(used <= self.combined_len());
1031
1032        if used <= self.buf1.len() {
1033            // This should be fine, purely incrementing
1034            let _ = atomic::fetch_add(&inner.read, used, Release);
1035        } else {
1036            // Also release parts of the second buffer
1037            inner.read.store(used - self.buf1.len(), Release);
1038        }
1039
1040        inner.read_in_progress.store(false, Release);
1041    }
1042
1043    /// Configures the amount of bytes to be released on drop.
1044    pub fn to_release(&mut self, amt: usize) {
1045        self.to_release = self.combined_len().min(amt);
1046    }
1047
1048    /// The combined length of both buffers
1049    pub fn combined_len(&self) -> usize {
1050        self.buf1.len() + self.buf2.len()
1051    }
1052}
1053
1054impl<'a, const N: usize> Drop for GrantW<'a, N> {
1055    fn drop(&mut self) {
1056        self.commit_inner(self.to_commit)
1057    }
1058}
1059
1060impl<'a, const N: usize> Drop for GrantR<'a, N> {
1061    fn drop(&mut self) {
1062        self.release_inner(self.to_release)
1063    }
1064}
1065
1066impl<'a, const N: usize> Deref for GrantW<'a, N> {
1067    type Target = [u8];
1068
1069    fn deref(&self) -> &Self::Target {
1070        self.buf
1071    }
1072}
1073
1074impl<'a, const N: usize> DerefMut for GrantW<'a, N> {
1075    fn deref_mut(&mut self) -> &mut [u8] {
1076        self.buf
1077    }
1078}
1079
1080impl<'a, const N: usize> Deref for GrantR<'a, N> {
1081    type Target = [u8];
1082
1083    fn deref(&self) -> &Self::Target {
1084        self.buf
1085    }
1086}
1087
1088impl<'a, const N: usize> DerefMut for GrantR<'a, N> {
1089    fn deref_mut(&mut self) -> &mut [u8] {
1090        self.buf
1091    }
1092}
1093
1094#[cfg(feature = "thumbv6")]
1095mod atomic {
1096    use core::sync::atomic::{
1097        AtomicBool, AtomicUsize,
1098        Ordering::{self, Acquire, Release},
1099    };
1100    use cortex_m::interrupt::free;
1101
1102    #[inline(always)]
1103    pub fn fetch_add(atomic: &AtomicUsize, val: usize, _order: Ordering) -> usize {
1104        free(|_| {
1105            let prev = atomic.load(Acquire);
1106            atomic.store(prev.wrapping_add(val), Release);
1107            prev
1108        })
1109    }
1110
1111    #[inline(always)]
1112    pub fn fetch_sub(atomic: &AtomicUsize, val: usize, _order: Ordering) -> usize {
1113        free(|_| {
1114            let prev = atomic.load(Acquire);
1115            atomic.store(prev.wrapping_sub(val), Release);
1116            prev
1117        })
1118    }
1119
1120    #[inline(always)]
1121    pub fn swap(atomic: &AtomicBool, val: bool, _order: Ordering) -> bool {
1122        free(|_| {
1123            let prev = atomic.load(Acquire);
1124            atomic.store(val, Release);
1125            prev
1126        })
1127    }
1128}
1129
1130#[cfg(not(feature = "thumbv6"))]
1131mod atomic {
1132    use core::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
1133
1134    #[inline(always)]
1135    pub fn fetch_add(atomic: &AtomicUsize, val: usize, order: Ordering) -> usize {
1136        atomic.fetch_add(val, order)
1137    }
1138
1139    #[inline(always)]
1140    pub fn fetch_sub(atomic: &AtomicUsize, val: usize, order: Ordering) -> usize {
1141        atomic.fetch_sub(val, order)
1142    }
1143
1144    #[inline(always)]
1145    pub fn swap(atomic: &AtomicBool, val: bool, order: Ordering) -> bool {
1146        atomic.swap(val, order)
1147    }
1148}