bbqueue_heap/bbbuffer.rs
1use crate::{
2 framed::{FrameConsumer, FrameProducer},
3 Error, Result,
4};
5use core::{
6 cell::UnsafeCell,
7 cmp::min,
8 marker::PhantomData,
9 mem::{forget, transmute},
10 ops::{Deref, DerefMut},
11 ptr::NonNull,
12 result::Result as CoreResult,
13 slice::from_raw_parts_mut,
14 sync::atomic::{
15 AtomicBool, AtomicUsize,
16 Ordering::{AcqRel, Acquire, Release},
17 },
18};
19
20#[derive(Debug)]
21/// A backing structure for a BBQueue. Can be used to create either
22/// a BBQueue or a split Producer/Consumer pair
23pub struct BBBuffer {
24 buf: Box<UnsafeCell<[u8]>>,
25
26 /// The size of the bbbuffer.
27 /// We prefer to use this one than the one from
28 /// the Box as the Box is accessed concurrently.
29 capacity: usize,
30
31 /// Where the next byte will be written
32 write: AtomicUsize,
33
34 /// Where the next byte will be read from
35 read: AtomicUsize,
36
37 /// Used in the inverted case to mark the end of the
38 /// readable streak. Otherwise will == sizeof::<self.buf>().
39 /// Writer is responsible for placing this at the correct
40 /// place when entering an inverted condition, and Reader
41 /// is responsible for moving it back to sizeof::<self.buf>()
42 /// when exiting the inverted condition
43 last: AtomicUsize,
44
45 /// Used by the Writer to remember what bytes are currently
46 /// allowed to be written to, but are not yet ready to be
47 /// read from
48 reserve: AtomicUsize,
49
50 /// Is there an active read grant?
51 read_in_progress: AtomicBool,
52
53 /// Is there an active write grant?
54 write_in_progress: AtomicBool,
55
56 /// Have we already split?
57 already_split: AtomicBool,
58}
59
60unsafe impl Sync for BBBuffer {}
61
62impl<'a> BBBuffer {
63 /// Attempt to split the `BBBuffer` into `Consumer` and `Producer` halves to gain access to the
64 /// buffer. If buffer has already been split, an error will be returned.
65 ///
66 /// NOTE: When splitting, the underlying buffer will be explicitly initialized
67 /// to zero. This may take a measurable amount of time, depending on the size
68 /// of the buffer. This is necessary to prevent undefined behavior. If the buffer
69 /// is placed at `static` scope within the `.bss` region, the explicit initialization
70 /// will be elided (as it is already performed as part of memory initialization)
71 ///
72 /// NOTE: If the `thumbv6` feature is selected, this function takes a short critical section
73 /// while splitting.
74 ///
75 /// ```rust
76 /// # // bbqueue test shim!
77 /// # fn bbqtest() {
78 /// use bbqueue::BBBuffer;
79 ///
80 /// // Create and split a new buffer
81 /// let buffer: BBBuffer<6> = BBBuffer::new();
82 /// let (prod, cons) = buffer.try_split().unwrap();
83 ///
84 /// // Not possible to split twice
85 /// assert!(buffer.try_split().is_err());
86 /// # // bbqueue test shim!
87 /// # }
88 /// #
89 /// # fn main() {
90 /// # #[cfg(not(feature = "thumbv6"))]
91 /// # bbqtest();
92 /// # }
93 /// ```
94 pub fn try_split(&'a self) -> Result<(Producer<'a>, Consumer<'a>)> {
95 if atomic::swap(&self.already_split, true, AcqRel) {
96 return Err(Error::AlreadySplit);
97 }
98
99 unsafe {
100 // Explicitly zero the data to avoid undefined behavior.
101 // This is required, because we hand out references to the buffers,
102 // which mean that creating them as references is technically UB for now
103 let mu_ptr = self.buf.get();
104 (*mu_ptr).as_mut_ptr().write_bytes(0u8, 1);
105
106 let nn1 = NonNull::new_unchecked(self as *const _ as *mut _);
107 let nn2 = NonNull::new_unchecked(self as *const _ as *mut _);
108
109 Ok((
110 Producer {
111 bbq: nn1,
112 pd: PhantomData,
113 },
114 Consumer {
115 bbq: nn2,
116 pd: PhantomData,
117 },
118 ))
119 }
120 }
121
122 /// Attempt to split the `BBBuffer` into `FrameConsumer` and `FrameProducer` halves
123 /// to gain access to the buffer. If buffer has already been split, an error
124 /// will be returned.
125 ///
126 /// NOTE: When splitting, the underlying buffer will be explicitly initialized
127 /// to zero. This may take a measurable amount of time, depending on the size
128 /// of the buffer. This is necessary to prevent undefined behavior. If the buffer
129 /// is placed at `static` scope within the `.bss` region, the explicit initialization
130 /// will be elided (as it is already performed as part of memory initialization)
131 ///
132 /// NOTE: If the `thumbv6` feature is selected, this function takes a short critical
133 /// section while splitting.
134 pub fn try_split_framed(&'a self) -> Result<(FrameProducer<'a>, FrameConsumer<'a>)> {
135 let (producer, consumer) = self.try_split()?;
136 Ok((FrameProducer { producer }, FrameConsumer { consumer }))
137 }
138
139 /// Attempt to release the Producer and Consumer
140 ///
141 /// This re-initializes the buffer so it may be split in a different mode at a later
142 /// time. There must be no read or write grants active, or an error will be returned.
143 ///
144 /// The `Producer` and `Consumer` must be from THIS `BBBuffer`, or an error will
145 /// be returned.
146 ///
147 /// ```rust
148 /// # // bbqueue test shim!
149 /// # fn bbqtest() {
150 /// use bbqueue::BBBuffer;
151 ///
152 /// // Create and split a new buffer
153 /// let buffer: BBBuffer<6> = BBBuffer::new();
154 /// let (prod, cons) = buffer.try_split().unwrap();
155 ///
156 /// // Not possible to split twice
157 /// assert!(buffer.try_split().is_err());
158 ///
159 /// // Release the producer and consumer
160 /// assert!(buffer.try_release(prod, cons).is_ok());
161 ///
162 /// // Split the buffer in framed mode
163 /// let (fprod, fcons) = buffer.try_split_framed().unwrap();
164 /// # // bbqueue test shim!
165 /// # }
166 /// #
167 /// # fn main() {
168 /// # #[cfg(not(feature = "thumbv6"))]
169 /// # bbqtest();
170 /// # }
171 /// ```
172 pub fn try_release(
173 &'a self,
174 prod: Producer<'a>,
175 cons: Consumer<'a>,
176 ) -> CoreResult<(), (Producer<'a>, Consumer<'a>)> {
177 // Note: Re-entrancy is not possible because we require ownership
178 // of the producer and consumer, which are not cloneable. We also
179 // can assume the buffer has been split, because
180
181 // Are these our producers and consumers?
182 let our_prod = prod.bbq.as_ptr() as *const Self == self;
183 let our_cons = cons.bbq.as_ptr() as *const Self == self;
184
185 if !(our_prod && our_cons) {
186 // Can't release, not our producer and consumer
187 return Err((prod, cons));
188 }
189
190 let wr_in_progress = self.write_in_progress.load(Acquire);
191 let rd_in_progress = self.read_in_progress.load(Acquire);
192
193 if wr_in_progress || rd_in_progress {
194 // Can't release, active grant(s) in progress
195 return Err((prod, cons));
196 }
197
198 // Drop the producer and consumer halves
199 // Clippy's hint are ignored because this is done intentionnaly to prevent any use after
200 // the release fo the lock.
201 #[allow(clippy::drop_non_drop)]
202 drop(prod);
203 #[allow(clippy::drop_non_drop)]
204 drop(cons);
205
206 // Re-initialize the buffer (not totally needed, but nice to do)
207 self.write.store(0, Release);
208 self.read.store(0, Release);
209 self.reserve.store(0, Release);
210 self.last.store(0, Release);
211
212 // Mark the buffer as ready to be split
213 self.already_split.store(false, Release);
214
215 Ok(())
216 }
217
218 /// Attempt to release the Producer and Consumer in Framed mode
219 ///
220 /// This re-initializes the buffer so it may be split in a different mode at a later
221 /// time. There must be no read or write grants active, or an error will be returned.
222 ///
223 /// The `FrameProducer` and `FrameConsumer` must be from THIS `BBBuffer`, or an error
224 /// will be returned.
225 pub fn try_release_framed(
226 &'a self,
227 prod: FrameProducer<'a>,
228 cons: FrameConsumer<'a>,
229 ) -> CoreResult<(), (FrameProducer<'a>, FrameConsumer<'a>)> {
230 self.try_release(prod.producer, cons.consumer)
231 .map_err(|(producer, consumer)| {
232 // Restore the wrapper types
233 (FrameProducer { producer }, FrameConsumer { consumer })
234 })
235 }
236}
237
238impl BBBuffer {
239 /// Create a new constant inner portion of a `BBBuffer`.
240 ///
241 /// NOTE: This is only necessary to use when creating a `BBBuffer` at static
242 /// scope, and is generally never used directly. This process is necessary to
243 /// work around current limitations in `const fn`, and will be replaced in
244 /// the future.
245 ///
246 /// ```rust,no_run
247 /// use bbqueue::BBBuffer;
248 ///
249 /// static BUF: BBBuffer<6> = BBBuffer::new();
250 ///
251 /// fn main() {
252 /// let (prod, cons) = BUF.try_split().unwrap();
253 /// }
254 /// ```
255 pub fn new(cap: usize) -> Self {
256 Self {
257 buf: unsafe {
258 std::mem::transmute::<Box<[u8]>, Box<UnsafeCell<[u8]>>>(
259 vec![0; cap].into_boxed_slice(),
260 )
261 },
262
263 capacity: cap,
264
265 // Owned by the writer
266 write: AtomicUsize::new(0),
267
268 // Owned by the reader
269 read: AtomicUsize::new(0),
270
271 // Cooperatively owned
272 //
273 // NOTE: This should generally be initialized as size_of::<self.buf>(), however
274 // this would prevent the structure from being entirely zero-initialized,
275 // and can cause the .data section to be much larger than necessary. By
276 // forcing the `last` pointer to be zero initially, we place the structure
277 // in an "inverted" condition, which will be resolved on the first commited
278 // bytes that are written to the structure.
279 //
280 // When read == last == write, no bytes will be allowed to be read (good), but
281 // write grants can be given out (also good).
282 last: AtomicUsize::new(0),
283
284 // Owned by the Writer, "private"
285 reserve: AtomicUsize::new(0),
286
287 // Owned by the Reader, "private"
288 read_in_progress: AtomicBool::new(false),
289
290 // Owned by the Writer, "private"
291 write_in_progress: AtomicBool::new(false),
292
293 // We haven't split at the start
294 already_split: AtomicBool::new(false),
295 }
296 }
297}
298
299/// `Producer` is the primary interface for pushing data into a `BBBuffer`.
300/// There are various methods for obtaining a grant to write to the buffer, with
301/// different potential tradeoffs. As all grants are required to be a contiguous
302/// range of data, different strategies are sometimes useful when making the decision
303/// between maximizing usage of the buffer, and ensuring a given grant is successful.
304///
305/// As a short summary of currently possible grants:
306///
307/// * `grant_exact(N)`
308/// * User will receive a grant `sz == N` (or receive an error)
309/// * This may cause a wraparound if a grant of size N is not available
310/// at the end of the ring.
311/// * If this grant caused a wraparound, the bytes that were "skipped" at the
312/// end of the ring will not be available until the reader reaches them,
313/// regardless of whether the grant commited any data or not.
314/// * Maximum possible waste due to skipping: `N - 1` bytes
315/// * `grant_max_remaining(N)`
316/// * User will receive a grant `0 < sz <= N` (or receive an error)
317/// * This will only cause a wrap to the beginning of the ring if exactly
318/// zero bytes are available at the end of the ring.
319/// * Maximum possible waste due to skipping: 0 bytes
320///
321/// See [this github issue](https://github.com/jamesmunns/bbqueue/issues/38) for a
322/// discussion of grant methods that could be added in the future.
323pub struct Producer<'a> {
324 bbq: NonNull<BBBuffer>,
325 pd: PhantomData<&'a ()>,
326}
327
328unsafe impl<'a> Send for Producer<'a> {}
329
330impl<'a> Producer<'a> {
331 /// Request a writable, contiguous section of memory of exactly
332 /// `sz` bytes. If the buffer size requested is not available,
333 /// an error will be returned.
334 ///
335 /// This method may cause the buffer to wrap around early if the
336 /// requested space is not available at the end of the buffer, but
337 /// is available at the beginning
338 ///
339 /// ```rust
340 /// # // bbqueue test shim!
341 /// # fn bbqtest() {
342 /// use bbqueue::BBBuffer;
343 ///
344 /// // Create and split a new buffer of 6 elements
345 /// let buffer: BBBuffer<6> = BBBuffer::new();
346 /// let (mut prod, cons) = buffer.try_split().unwrap();
347 ///
348 /// // Successfully obtain and commit a grant of four bytes
349 /// let mut grant = prod.grant_exact(4).unwrap();
350 /// assert_eq!(grant.buf().len(), 4);
351 /// grant.commit(4);
352 ///
353 /// // Try to obtain a grant of three bytes
354 /// assert!(prod.grant_exact(3).is_err());
355 /// # // bbqueue test shim!
356 /// # }
357 /// #
358 /// # fn main() {
359 /// # #[cfg(not(feature = "thumbv6"))]
360 /// # bbqtest();
361 /// # }
362 /// ```
363 pub fn grant_exact(&mut self, sz: usize) -> Result<GrantW<'a>> {
364 let inner = unsafe { &self.bbq.as_ref() };
365
366 if atomic::swap(&inner.write_in_progress, true, AcqRel) {
367 return Err(Error::GrantInProgress);
368 }
369
370 // Writer component. Must never write to `read`,
371 // be careful writing to `load`
372 let write = inner.write.load(Acquire);
373 let read = inner.read.load(Acquire);
374 let max = inner.capacity();
375 let already_inverted = write < read;
376
377 let start = if already_inverted {
378 if (write + sz) < read {
379 // Inverted, room is still available
380 write
381 } else {
382 // Inverted, no room is available
383 inner.write_in_progress.store(false, Release);
384 return Err(Error::InsufficientSize);
385 }
386 } else {
387 #[allow(clippy::collapsible_if)]
388 if write + sz <= max {
389 // Non inverted condition
390 write
391 } else {
392 // Not inverted, but need to go inverted
393
394 // NOTE: We check sz < read, NOT <=, because
395 // write must never == read in an inverted condition, since
396 // we will then not be able to tell if we are inverted or not
397 if sz < read {
398 // Invertible situation
399 0
400 } else {
401 // Not invertible, no space
402 inner.write_in_progress.store(false, Release);
403 return Err(Error::InsufficientSize);
404 }
405 }
406 };
407
408 // Safe write, only viewed by this task
409 inner.reserve.store(start + sz, Release);
410
411 // This is sound, as UnsafeCell, MaybeUninit, and GenericArray
412 // are all `#[repr(Transparent)]
413 let start_of_buf_ptr = inner.buf.get().cast::<u8>();
414 let grant_slice = unsafe { from_raw_parts_mut(start_of_buf_ptr.add(start), sz) };
415
416 Ok(GrantW {
417 buf: grant_slice,
418 bbq: self.bbq,
419 to_commit: 0,
420 })
421 }
422
423 /// Request a writable, contiguous section of memory of up to
424 /// `sz` bytes. If a buffer of size `sz` is not available without
425 /// wrapping, but some space (0 < available < sz) is available without
426 /// wrapping, then a grant will be given for the remaining size at the
427 /// end of the buffer. If no space is available for writing, an error
428 /// will be returned.
429 ///
430 /// ```
431 /// # // bbqueue test shim!
432 /// # fn bbqtest() {
433 /// use bbqueue::BBBuffer;
434 ///
435 /// // Create and split a new buffer of 6 elements
436 /// let buffer: BBBuffer<6> = BBBuffer::new();
437 /// let (mut prod, mut cons) = buffer.try_split().unwrap();
438 ///
439 /// // Successfully obtain and commit a grant of four bytes
440 /// let mut grant = prod.grant_max_remaining(4).unwrap();
441 /// assert_eq!(grant.buf().len(), 4);
442 /// grant.commit(4);
443 ///
444 /// // Release the four initial commited bytes
445 /// let mut grant = cons.read().unwrap();
446 /// assert_eq!(grant.buf().len(), 4);
447 /// grant.release(4);
448 ///
449 /// // Try to obtain a grant of three bytes, get two bytes
450 /// let mut grant = prod.grant_max_remaining(3).unwrap();
451 /// assert_eq!(grant.buf().len(), 2);
452 /// grant.commit(2);
453 /// # // bbqueue test shim!
454 /// # }
455 /// #
456 /// # fn main() {
457 /// # #[cfg(not(feature = "thumbv6"))]
458 /// # bbqtest();
459 /// # }
460 /// ```
461 pub fn grant_max_remaining(&mut self, mut sz: usize) -> Result<GrantW<'a>> {
462 let inner = unsafe { &self.bbq.as_ref() };
463
464 if atomic::swap(&inner.write_in_progress, true, AcqRel) {
465 return Err(Error::GrantInProgress);
466 }
467
468 // Writer component. Must never write to `read`,
469 // be careful writing to `load`
470 let write = inner.write.load(Acquire);
471 let read = inner.read.load(Acquire);
472 let max = inner.capacity();
473
474 let already_inverted = write < read;
475
476 let start = if already_inverted {
477 // In inverted case, read is always > write
478 let remain = read - write - 1;
479
480 if remain != 0 {
481 sz = min(remain, sz);
482 write
483 } else {
484 // Inverted, no room is available
485 inner.write_in_progress.store(false, Release);
486 return Err(Error::InsufficientSize);
487 }
488 } else {
489 #[allow(clippy::collapsible_if)]
490 if write != max {
491 // Some (or all) room remaining in un-inverted case
492 sz = min(max - write, sz);
493 write
494 } else {
495 // Not inverted, but need to go inverted
496
497 // NOTE: We check read > 1, NOT read >= 1, because
498 // write must never == read in an inverted condition, since
499 // we will then not be able to tell if we are inverted or not
500 if read > 1 {
501 sz = min(read - 1, sz);
502 0
503 } else {
504 // Not invertible, no space
505 inner.write_in_progress.store(false, Release);
506 return Err(Error::InsufficientSize);
507 }
508 }
509 };
510
511 // Safe write, only viewed by this task
512 inner.reserve.store(start + sz, Release);
513
514 // This is sound, as UnsafeCell, MaybeUninit, and GenericArray
515 // are all `#[repr(Transparent)]
516 let start_of_buf_ptr = inner.buf.get().cast::<u8>();
517 let grant_slice = unsafe { from_raw_parts_mut(start_of_buf_ptr.add(start), sz) };
518
519 Ok(GrantW {
520 buf: grant_slice,
521 bbq: self.bbq,
522 to_commit: 0,
523 })
524 }
525}
526
527/// `Consumer` is the primary interface for reading data from a `BBBuffer`.
528pub struct Consumer<'a> {
529 bbq: NonNull<BBBuffer>,
530 pd: PhantomData<&'a ()>,
531}
532
533unsafe impl<'a> Send for Consumer<'a> {}
534
535impl<'a> Consumer<'a> {
536 /// Obtains a contiguous slice of committed bytes. This slice may not
537 /// contain ALL available bytes, if the writer has wrapped around. The
538 /// remaining bytes will be available after all readable bytes are
539 /// released
540 ///
541 /// ```rust
542 /// # // bbqueue test shim!
543 /// # fn bbqtest() {
544 /// use bbqueue::BBBuffer;
545 ///
546 /// // Create and split a new buffer of 6 elements
547 /// let buffer: BBBuffer<6> = BBBuffer::new();
548 /// let (mut prod, mut cons) = buffer.try_split().unwrap();
549 ///
550 /// // Successfully obtain and commit a grant of four bytes
551 /// let mut grant = prod.grant_max_remaining(4).unwrap();
552 /// assert_eq!(grant.buf().len(), 4);
553 /// grant.commit(4);
554 ///
555 /// // Obtain a read grant
556 /// let mut grant = cons.read().unwrap();
557 /// assert_eq!(grant.buf().len(), 4);
558 /// # // bbqueue test shim!
559 /// # }
560 /// #
561 /// # fn main() {
562 /// # #[cfg(not(feature = "thumbv6"))]
563 /// # bbqtest();
564 /// # }
565 /// ```
566 pub fn read(&mut self) -> Result<GrantR<'a>> {
567 let inner = unsafe { &self.bbq.as_ref() };
568
569 if atomic::swap(&inner.read_in_progress, true, AcqRel) {
570 return Err(Error::GrantInProgress);
571 }
572
573 let write = inner.write.load(Acquire);
574 let last = inner.last.load(Acquire);
575 let mut read = inner.read.load(Acquire);
576
577 // Resolve the inverted case or end of read
578 if (read == last) && (write < read) {
579 read = 0;
580 // This has some room for error, the other thread reads this
581 // Impact to Grant:
582 // Grant checks if read < write to see if inverted. If not inverted, but
583 // no space left, Grant will initiate an inversion, but will not trigger it
584 // Impact to Commit:
585 // Commit does not check read, but if Grant has started an inversion,
586 // grant could move Last to the prior write position
587 // MOVING READ BACKWARDS!
588 inner.read.store(0, Release);
589 }
590
591 let sz = if write < read {
592 // Inverted, only believe last
593 last
594 } else {
595 // Not inverted, only believe write
596 write
597 } - read;
598
599 if sz == 0 {
600 inner.read_in_progress.store(false, Release);
601 return Err(Error::InsufficientSize);
602 }
603
604 // This is sound, as UnsafeCell, MaybeUninit, and GenericArray
605 // are all `#[repr(Transparent)]
606 let start_of_buf_ptr = inner.buf.get().cast::<u8>();
607 let grant_slice = unsafe { from_raw_parts_mut(start_of_buf_ptr.add(read), sz) };
608
609 Ok(GrantR {
610 buf: grant_slice,
611 bbq: self.bbq,
612 to_release: 0,
613 })
614 }
615
616 /// Obtains two disjoint slices, which are each contiguous of committed bytes.
617 /// Combined these contain all previously commited data.
618 pub fn split_read(&mut self) -> Result<SplitGrantR<'a>> {
619 let inner = unsafe { &self.bbq.as_ref() };
620
621 if atomic::swap(&inner.read_in_progress, true, AcqRel) {
622 return Err(Error::GrantInProgress);
623 }
624
625 let write = inner.write.load(Acquire);
626 let last = inner.last.load(Acquire);
627 let mut read = inner.read.load(Acquire);
628
629 // Resolve the inverted case or end of read
630 if (read == last) && (write < read) {
631 read = 0;
632 // This has some room for error, the other thread reads this
633 // Impact to Grant:
634 // Grant checks if read < write to see if inverted. If not inverted, but
635 // no space left, Grant will initiate an inversion, but will not trigger it
636 // Impact to Commit:
637 // Commit does not check read, but if Grant has started an inversion,
638 // grant could move Last to the prior write position
639 // MOVING READ BACKWARDS!
640 inner.read.store(0, Release);
641 }
642
643 let (sz1, sz2) = if write < read {
644 // Inverted, only believe last
645 (last - read, write)
646 } else {
647 // Not inverted, only believe write
648 (write - read, 0)
649 };
650
651 if sz1 == 0 {
652 inner.read_in_progress.store(false, Release);
653 return Err(Error::InsufficientSize);
654 }
655
656 // This is sound, as UnsafeCell, MaybeUninit, and GenericArray
657 // are all `#[repr(Transparent)]
658 let start_of_buf_ptr = inner.buf.get().cast::<u8>();
659 let grant_slice1 = unsafe { from_raw_parts_mut(start_of_buf_ptr.add(read), sz1) };
660 let grant_slice2 = unsafe { from_raw_parts_mut(start_of_buf_ptr, sz2) };
661
662 Ok(SplitGrantR {
663 buf1: grant_slice1,
664 buf2: grant_slice2,
665 bbq: self.bbq,
666 to_release: 0,
667 })
668 }
669}
670
671impl BBBuffer {
672 /// Returns the size of the backing storage.
673 ///
674 /// This is the maximum number of bytes that can be stored in this queue.
675 ///
676 /// ```rust
677 /// # // bbqueue test shim!
678 /// # fn bbqtest() {
679 /// use bbqueue::BBBuffer;
680 ///
681 /// // Create a new buffer of 6 elements
682 /// let buffer: BBBuffer<6> = BBBuffer::new();
683 /// assert_eq!(buffer.capacity(), 6);
684 /// # // bbqueue test shim!
685 /// # }
686 /// #
687 /// # fn main() {
688 /// # #[cfg(not(feature = "thumbv6"))]
689 /// # bbqtest();
690 /// # }
691 /// ```
692 pub const fn capacity(&self) -> usize {
693 self.capacity
694 }
695}
696
697/// A structure representing a contiguous region of memory that
698/// may be written to, and potentially "committed" to the queue.
699///
700/// NOTE: If the grant is dropped without explicitly commiting
701/// the contents, or by setting a the number of bytes to
702/// automatically be committed with `to_commit()`, then no bytes
703/// will be comitted for writing.
704///
705/// If the `thumbv6` feature is selected, dropping the grant
706/// without committing it takes a short critical section,
707#[derive(Debug, PartialEq)]
708pub struct GrantW<'a> {
709 pub(crate) buf: &'a mut [u8],
710 bbq: NonNull<BBBuffer>,
711 pub(crate) to_commit: usize,
712}
713
714unsafe impl<'a> Send for GrantW<'a> {}
715
716/// A structure representing a contiguous region of memory that
717/// may be read from, and potentially "released" (or cleared)
718/// from the queue
719///
720/// NOTE: If the grant is dropped without explicitly releasing
721/// the contents, or by setting the number of bytes to automatically
722/// be released with `to_release()`, then no bytes will be released
723/// as read.
724///
725///
726/// If the `thumbv6` feature is selected, dropping the grant
727/// without releasing it takes a short critical section,
728#[derive(Debug, PartialEq)]
729pub struct GrantR<'a> {
730 pub(crate) buf: &'a mut [u8],
731 bbq: NonNull<BBBuffer>,
732 pub(crate) to_release: usize,
733}
734
735/// A structure representing up to two contiguous regions of memory that
736/// may be read from, and potentially "released" (or cleared)
737/// from the queue
738#[derive(Debug, PartialEq)]
739pub struct SplitGrantR<'a> {
740 pub(crate) buf1: &'a mut [u8],
741 pub(crate) buf2: &'a mut [u8],
742 bbq: NonNull<BBBuffer>,
743 pub(crate) to_release: usize,
744}
745
746unsafe impl<'a> Send for GrantR<'a> {}
747
748unsafe impl<'a> Send for SplitGrantR<'a> {}
749
750impl<'a> GrantW<'a> {
751 /// Finalizes a writable grant given by `grant()` or `grant_max()`.
752 /// This makes the data available to be read via `read()`. This consumes
753 /// the grant.
754 ///
755 /// If `used` is larger than the given grant, the maximum amount will
756 /// be commited
757 ///
758 /// NOTE: If the `thumbv6` feature is selected, this function takes a short critical
759 /// section while committing.
760 pub fn commit(mut self, used: usize) {
761 self.commit_inner(used);
762 forget(self);
763 }
764
765 /// Obtain access to the inner buffer for writing
766 ///
767 /// ```rust
768 /// # // bbqueue test shim!
769 /// # fn bbqtest() {
770 /// use bbqueue::BBBuffer;
771 ///
772 /// // Create and split a new buffer of 6 elements
773 /// let buffer: BBBuffer<6> = BBBuffer::new();
774 /// let (mut prod, mut cons) = buffer.try_split().unwrap();
775 ///
776 /// // Successfully obtain and commit a grant of four bytes
777 /// let mut grant = prod.grant_max_remaining(4).unwrap();
778 /// grant.buf().copy_from_slice(&[1, 2, 3, 4]);
779 /// grant.commit(4);
780 /// # // bbqueue test shim!
781 /// # }
782 /// #
783 /// # fn main() {
784 /// # #[cfg(not(feature = "thumbv6"))]
785 /// # bbqtest();
786 /// # }
787 /// ```
788 pub fn buf(&mut self) -> &mut [u8] {
789 self.buf
790 }
791
792 /// Sometimes, it's not possible for the lifetimes to check out. For example,
793 /// if you need to hand this buffer to a function that expects to receive a
794 /// `&'static mut [u8]`, it is not possible for the inner reference to outlive the
795 /// grant itself.
796 ///
797 /// # Safety
798 ///
799 /// You MUST guarantee that in no cases, the reference that is returned here outlives
800 /// the grant itself. Once the grant has been released, referencing the data contained
801 /// WILL cause undefined behavior.
802 ///
803 /// Additionally, you must ensure that a separate reference to this data is not created
804 /// to this data, e.g. using `DerefMut` or the `buf()` method of this grant.
805 pub unsafe fn as_static_mut_buf(&mut self) -> &'static mut [u8] {
806 unsafe { transmute::<&mut [u8], &'static mut [u8]>(self.buf) }
807 }
808
809 #[inline(always)]
810 pub(crate) fn commit_inner(&mut self, used: usize) {
811 let inner = unsafe { &self.bbq.as_ref() };
812
813 // If there is no grant in progress, return early. This
814 // generally means we are dropping the grant within a
815 // wrapper structure
816 if !inner.write_in_progress.load(Acquire) {
817 return;
818 }
819
820 // Writer component. Must never write to READ,
821 // be careful writing to LAST
822
823 // Saturate the grant commit
824 let len = self.buf.len();
825 let used = min(len, used);
826
827 let write = inner.write.load(Acquire);
828 atomic::fetch_sub(&inner.reserve, len - used, AcqRel);
829
830 let max = inner.capacity();
831 let last = inner.last.load(Acquire);
832 let new_write = inner.reserve.load(Acquire);
833
834 if (new_write < write) && (write != max) {
835 // We have already wrapped, but we are skipping some bytes at the end of the ring.
836 // Mark `last` where the write pointer used to be to hold the line here
837 inner.last.store(write, Release);
838 } else if new_write > last {
839 // We're about to pass the last pointer, which was previously the artificial
840 // end of the ring. Now that we've passed it, we can "unlock" the section
841 // that was previously skipped.
842 //
843 // Since new_write is strictly larger than last, it is safe to move this as
844 // the other thread will still be halted by the (about to be updated) write
845 // value
846 inner.last.store(max, Release);
847 }
848 // else: If new_write == last, either:
849 // * last == max, so no need to write, OR
850 // * If we write in the end chunk again, we'll update last to max next time
851 // * If we write to the start chunk in a wrap, we'll update last when we
852 // move write backwards
853
854 // Write must be updated AFTER last, otherwise read could think it was
855 // time to invert early!
856 inner.write.store(new_write, Release);
857
858 // Allow subsequent grants
859 inner.write_in_progress.store(false, Release);
860 }
861
862 /// Configures the amount of bytes to be commited on drop.
863 pub fn to_commit(&mut self, amt: usize) {
864 self.to_commit = self.buf.len().min(amt);
865 }
866}
867
868impl<'a> GrantR<'a> {
869 /// Release a sequence of bytes from the buffer, allowing the space
870 /// to be used by later writes. This consumes the grant.
871 ///
872 /// If `used` is larger than the given grant, the full grant will
873 /// be released.
874 ///
875 /// NOTE: If the `thumbv6` feature is selected, this function takes a short critical
876 /// section while releasing.
877 pub fn release(mut self, used: usize) {
878 // Saturate the grant release
879 let used = min(self.buf.len(), used);
880
881 self.release_inner(used);
882 forget(self);
883 }
884
885 pub(crate) fn shrink(&mut self, len: usize) {
886 let mut new_buf: &mut [u8] = &mut [];
887 core::mem::swap(&mut self.buf, &mut new_buf);
888 let (new, _) = new_buf.split_at_mut(len);
889 self.buf = new;
890 }
891
892 /// Obtain access to the inner buffer for reading
893 ///
894 /// ```
895 /// # // bbqueue test shim!
896 /// # fn bbqtest() {
897 /// use bbqueue::BBBuffer;
898 ///
899 /// // Create and split a new buffer of 6 elements
900 /// let buffer: BBBuffer<6> = BBBuffer::new();
901 /// let (mut prod, mut cons) = buffer.try_split().unwrap();
902 ///
903 /// // Successfully obtain and commit a grant of four bytes
904 /// let mut grant = prod.grant_max_remaining(4).unwrap();
905 /// grant.buf().copy_from_slice(&[1, 2, 3, 4]);
906 /// grant.commit(4);
907 ///
908 /// // Obtain a read grant, and copy to a buffer
909 /// let mut grant = cons.read().unwrap();
910 /// let mut buf = [0u8; 4];
911 /// buf.copy_from_slice(grant.buf());
912 /// assert_eq!(&buf, &[1, 2, 3, 4]);
913 /// # // bbqueue test shim!
914 /// # }
915 /// #
916 /// # fn main() {
917 /// # #[cfg(not(feature = "thumbv6"))]
918 /// # bbqtest();
919 /// # }
920 /// ```
921 pub fn buf(&self) -> &[u8] {
922 self.buf
923 }
924
925 /// Obtain mutable access to the read grant
926 ///
927 /// This is useful if you are performing in-place operations
928 /// on an incoming packet, such as decryption
929 pub fn buf_mut(&mut self) -> &mut [u8] {
930 self.buf
931 }
932
933 /// Sometimes, it's not possible for the lifetimes to check out. For example,
934 /// if you need to hand this buffer to a function that expects to receive a
935 /// `&'static [u8]`, it is not possible for the inner reference to outlive the
936 /// grant itself.
937 ///
938 /// # Safety
939 ///
940 /// You MUST guarantee that in no cases, the reference that is returned here outlives
941 /// the grant itself. Once the grant has been released, referencing the data contained
942 /// WILL cause undefined behavior.
943 ///
944 /// Additionally, you must ensure that a separate reference to this data is not created
945 /// to this data, e.g. using `Deref` or the `buf()` method of this grant.
946 pub unsafe fn as_static_buf(&self) -> &'static [u8] {
947 unsafe { transmute::<&[u8], &'static [u8]>(self.buf) }
948 }
949
950 #[inline(always)]
951 pub(crate) fn release_inner(&mut self, used: usize) {
952 let inner = unsafe { &self.bbq.as_ref() };
953
954 // If there is no grant in progress, return early. This
955 // generally means we are dropping the grant within a
956 // wrapper structure
957 if !inner.read_in_progress.load(Acquire) {
958 return;
959 }
960
961 // This should always be checked by the public interfaces
962 debug_assert!(used <= self.buf.len());
963
964 // This should be fine, purely incrementing
965 let _ = atomic::fetch_add(&inner.read, used, Release);
966
967 inner.read_in_progress.store(false, Release);
968 }
969
970 /// Configures the amount of bytes to be released on drop.
971 pub fn to_release(&mut self, amt: usize) {
972 self.to_release = self.buf.len().min(amt);
973 }
974}
975
976impl<'a> SplitGrantR<'a> {
977 /// Release a sequence of bytes from the buffer, allowing the space
978 /// to be used by later writes. This consumes the grant.
979 ///
980 /// If `used` is larger than the given grant, the full grant will
981 /// be released.
982 ///
983 /// NOTE: If the `thumbv6` feature is selected, this function takes a short critical
984 /// section while releasing.
985 pub fn release(mut self, used: usize) {
986 // Saturate the grant release
987 let used = min(self.combined_len(), used);
988
989 self.release_inner(used);
990 forget(self);
991 }
992
993 /// Obtain access to both inner buffers for reading
994 ///
995 /// ```
996 /// # // bbqueue test shim!
997 /// # fn bbqtest() {
998 /// use bbqueue::BBBuffer;
999 ///
1000 /// // Create and split a new buffer of 6 elements
1001 /// let buffer: BBBuffer<6> = BBBuffer::new();
1002 /// let (mut prod, mut cons) = buffer.try_split().unwrap();
1003 ///
1004 /// // Successfully obtain and commit a grant of four bytes
1005 /// let mut grant = prod.grant_max_remaining(4).unwrap();
1006 /// grant.buf().copy_from_slice(&[1, 2, 3, 4]);
1007 /// grant.commit(4);
1008 ///
1009 /// // Obtain a read grant, and copy to a buffer
1010 /// let mut grant = cons.read().unwrap();
1011 /// let mut buf = [0u8; 4];
1012 /// buf.copy_from_slice(grant.buf());
1013 /// assert_eq!(&buf, &[1, 2, 3, 4]);
1014 /// # // bbqueue test shim!
1015 /// # }
1016 /// #
1017 /// # fn main() {
1018 /// # #[cfg(not(feature = "thumbv6"))]
1019 /// # bbqtest();
1020 /// # }
1021 /// ```
1022 pub fn bufs(&self) -> (&[u8], &[u8]) {
1023 (self.buf1, self.buf2)
1024 }
1025
1026 /// Obtain mutable access to both parts of the read grant
1027 ///
1028 /// This is useful if you are performing in-place operations
1029 /// on an incoming packet, such as decryption
1030 pub fn bufs_mut(&mut self) -> (&mut [u8], &mut [u8]) {
1031 (self.buf1, self.buf2)
1032 }
1033
1034 #[inline(always)]
1035 pub(crate) fn release_inner(&mut self, used: usize) {
1036 let inner = unsafe { &self.bbq.as_ref() };
1037
1038 // If there is no grant in progress, return early. This
1039 // generally means we are dropping the grant within a
1040 // wrapper structure
1041 if !inner.read_in_progress.load(Acquire) {
1042 return;
1043 }
1044
1045 // This should always be checked by the public interfaces
1046 debug_assert!(used <= self.combined_len());
1047
1048 if used <= self.buf1.len() {
1049 // This should be fine, purely incrementing
1050 let _ = atomic::fetch_add(&inner.read, used, Release);
1051 } else {
1052 // Also release parts of the second buffer
1053 inner.read.store(used - self.buf1.len(), Release);
1054 }
1055
1056 inner.read_in_progress.store(false, Release);
1057 }
1058
1059 /// Configures the amount of bytes to be released on drop.
1060 pub fn to_release(&mut self, amt: usize) {
1061 self.to_release = self.combined_len().min(amt);
1062 }
1063
1064 /// The combined length of both buffers
1065 pub fn combined_len(&self) -> usize {
1066 self.buf1.len() + self.buf2.len()
1067 }
1068}
1069
1070impl<'a> Drop for GrantW<'a> {
1071 fn drop(&mut self) {
1072 self.commit_inner(self.to_commit)
1073 }
1074}
1075
1076impl<'a> Drop for GrantR<'a> {
1077 fn drop(&mut self) {
1078 self.release_inner(self.to_release)
1079 }
1080}
1081
1082impl<'a> Drop for SplitGrantR<'a> {
1083 fn drop(&mut self) {
1084 self.release_inner(self.to_release)
1085 }
1086}
1087
1088impl<'a> Deref for GrantW<'a> {
1089 type Target = [u8];
1090
1091 fn deref(&self) -> &Self::Target {
1092 self.buf
1093 }
1094}
1095
1096impl<'a> DerefMut for GrantW<'a> {
1097 fn deref_mut(&mut self) -> &mut [u8] {
1098 self.buf
1099 }
1100}
1101
1102impl<'a> Deref for GrantR<'a> {
1103 type Target = [u8];
1104
1105 fn deref(&self) -> &Self::Target {
1106 self.buf
1107 }
1108}
1109
1110impl<'a> DerefMut for GrantR<'a> {
1111 fn deref_mut(&mut self) -> &mut [u8] {
1112 self.buf
1113 }
1114}
1115
1116#[cfg(feature = "thumbv6")]
1117mod atomic {
1118 use core::sync::atomic::{
1119 AtomicBool, AtomicUsize,
1120 Ordering::{self, Acquire, Release},
1121 };
1122 use cortex_m::interrupt::free;
1123
1124 #[inline(always)]
1125 pub fn fetch_add(atomic: &AtomicUsize, val: usize, _order: Ordering) -> usize {
1126 free(|_| {
1127 let prev = atomic.load(Acquire);
1128 atomic.store(prev.wrapping_add(val), Release);
1129 prev
1130 })
1131 }
1132
1133 #[inline(always)]
1134 pub fn fetch_sub(atomic: &AtomicUsize, val: usize, _order: Ordering) -> usize {
1135 free(|_| {
1136 let prev = atomic.load(Acquire);
1137 atomic.store(prev.wrapping_sub(val), Release);
1138 prev
1139 })
1140 }
1141
1142 #[inline(always)]
1143 pub fn swap(atomic: &AtomicBool, val: bool, _order: Ordering) -> bool {
1144 free(|_| {
1145 let prev = atomic.load(Acquire);
1146 atomic.store(val, Release);
1147 prev
1148 })
1149 }
1150}
1151
1152#[cfg(not(feature = "thumbv6"))]
1153mod atomic {
1154 use core::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
1155
1156 #[inline(always)]
1157 pub fn fetch_add(atomic: &AtomicUsize, val: usize, order: Ordering) -> usize {
1158 atomic.fetch_add(val, order)
1159 }
1160
1161 #[inline(always)]
1162 pub fn fetch_sub(atomic: &AtomicUsize, val: usize, order: Ordering) -> usize {
1163 atomic.fetch_sub(val, order)
1164 }
1165
1166 #[inline(always)]
1167 pub fn swap(atomic: &AtomicBool, val: bool, order: Ordering) -> bool {
1168 atomic.swap(val, order)
1169 }
1170}