slice_ring_buf/
lib.rs

1//! A ring buffer implementation optimized for working with slices. Note this pretty
2//! much does the same thing as [`VecDeque`], but with the added ability to index
3//! using negative values, as well as working with buffers allocated on the stack.
4//! This crate can also be used without the standard library (`#![no_std]`).
5//!
6//! This crate has no consumer/producer logic, and is meant to be used for DSP or as
7//! a base for other data structures.
8//!
9//! This data type is optimized for manipulating data in chunks with slices.
10//! Indexing one element at a time is slow. If your algorithm indexes elements one
11//! at a time and only uses buffers that have a size equal to a power of two, then
12//! consider my crate [`bit_mask_ring_buf`].
13//!
14//! A self-expanding version of this data structure can be found in my crate
15//! [`expanding_slice_rb`].
16//!
17//! ## Example
18//! ```rust
19//! use core::num::NonZeroUsize;
20//! use slice_ring_buf::{SliceRB, SliceRbRefMut};
21//!
22//! // Create a ring buffer with type u32. The data will be
23//! // initialized with the value of `0`.
24//! let mut rb = SliceRB::<u32>::new(NonZeroUsize::new(4).unwrap(), 0);
25//!
26//! // Memcpy data from a slice into the ring buffer at arbitrary
27//! // `isize` indexes. Earlier data will not be copied if it will
28//! // be overwritten by newer data, avoiding unecessary memcpy's.
29//! // The correct placement of the newer data will still be preserved.
30//! rb.write_latest(&[0, 2, 3, 4, 1], 0);
31//! assert_eq!(rb[0], 1);
32//! assert_eq!(rb[1], 2);
33//! assert_eq!(rb[2], 3);
34//! assert_eq!(rb[3], 4);
35//!
36//! // Memcpy into slices at arbitrary `isize` indexes and length.
37//! let mut read_buffer = [0u32; 7];
38//! rb.read_into(&mut read_buffer, 2);
39//! assert_eq!(read_buffer, [3, 4, 1, 2, 3, 4, 1]);
40//!
41//! // Read/write by retrieving slices directly.
42//! let (s1, s2) = rb.as_slices_len(1, 4);
43//! assert_eq!(s1, &[2, 3, 4]);
44//! assert_eq!(s2, &[1]);
45//!
46//! // Read/write to buffer by indexing. (Note that indexing
47//! // one element at a time is slow.)
48//! rb[0] = 0;
49//! rb[1] = 1;
50//! rb[2] = 2;
51//! rb[3] = 3;
52//!
53//! // Wrap when reading/writing outside of bounds.
54//! assert_eq!(rb[-1], 3);
55//! assert_eq!(rb[10], 2);
56//!
57//! // Aligned/stack data may also be used.
58//! let mut stack_data = [0u32, 1, 2, 3];
59//! let mut rb_ref = SliceRbRefMut::new(&mut stack_data);
60//! rb_ref[-4] = 5;
61//! let (s1, s2) = rb_ref.as_slices_len(0, 3);
62//! assert_eq!(s1, &[5, 1, 2]);
63//! assert_eq!(s2, &[]);
64//! ```
65//!
66//! [`VecDeque`]: https://doc.rust-lang.org/std/collections/struct.VecDeque.html
67//! [`bit_mask_ring_buf`]: https://crates.io/crates/bit_mask_ring_buf
68//! [`expanding_slice_rb`]: https://crates.io/crates/expanding_slice_rb/
69
70#![no_std]
71
72#[cfg(feature = "alloc")]
73extern crate alloc;
74
75mod inner;
76mod referenced;
77
78pub use referenced::{SliceRbRef, SliceRbRefMut};
79
80#[cfg(feature = "alloc")]
81mod owned;
82#[cfg(feature = "alloc")]
83pub use owned::SliceRB;