fring/lib.rs
1//! Fast ring buffer intended for no_std targets.
2//!
3//! `fring` ("fast ring") is a fast and lightweight circular buffer, designed
4//! for embedded systems and other no_std targets. ("Circular buffer" means
5//! it is a FIFO queue, stored as an array, and the data wraps back to the
6//! beginning of the array once it reaches the end.) The memory footprint of
7//! a `fring::Buffer` is the buffer itself plus two `usize` indices.
8//!
9//! The buffer allows a single producer and a single consumer, which may
10//! operate concurrently. Memory safety and thread safety are enforced at
11//! compile time; the buffer is lock-free at runtime. The buffer length is
12//! required to be a power of two, and the only arithmetic operations used by
13//! buffer operations are addition/subtraction and bitwise and.
14//!
15//! The only way to use a [`Buffer`] is to split it into a [`Producer`] and a
16//! [`Consumer`]. Then one may call `Producer.write()` and `Consumer.read()`,
17//! or various other methods which are provided by `Producer` and `Consumer`.
18//!
19//! Example of safe threaded use:
20//! ```rust
21//! # const N: usize = 8;
22//! # fn make_data(_: fring::Producer<N>) {}
23//! # fn use_data(_: fring::Consumer<N>) {}
24//! fn main() {
25//! let mut buffer = fring::Buffer::<N>::new();
26//! let (producer, consumer) = buffer.split();
27//! std::thread::scope(|s| {
28//! s.spawn(|| {
29//! make_data(producer);
30//! });
31//! use_data(consumer);
32//! });
33//! }
34//! ```
35//!
36//! Example of static use (requires `unsafe`):
37//! ```rust
38//! # const N: usize = 8;
39//! # fn write_data(_: fring::Producer<N>) {}
40//! # fn use_data(_: fring::Consumer<N>) {}
41//! static BUFFER: fring::Buffer<N> = fring::Buffer::new();
42//!
43//! fn interrupt_handler() {
44//! // UNSAFE: this is safe because this is the only place we ever
45//! // call BUFFER.producer(), and interrupt_handler() is not reentrant
46//! let producer = unsafe { BUFFER.producer() };
47//! write_data(producer);
48//! }
49//!
50//! fn main() {
51//! // UNSAFE: this is safe because this is the only place we ever
52//! // call BUFFER.consumer(), and main() is not reentrant
53//! let consumer = unsafe { BUFFER.consumer() };
54//! use_data(consumer);
55//! }
56//! ```
57
58#![no_std]
59
60use core::sync::atomic::{AtomicUsize, Ordering::Relaxed};
61
62/// A `Buffer<N>` consists of a `[u8; N]` array along with two `usize`
63/// indices into the array. `N` must be a power of two. (If you need more
64/// flexibility with sizing, consider using a `bbqueue::BBBuffer` instead.)
65/// A `Buffer<N>` can hold `N` bytes of data and guarantees FIFO ordering.
66/// The only way to use a `Buffer` is to split it into a [`Producer`] and a
67/// [`Consumer`], which may then be passed to different threads or contexts.
68pub struct Buffer<const N: usize> {
69 data: core::cell::UnsafeCell<[u8; N]>,
70 head: AtomicUsize, // head = next index to be read
71 tail: AtomicUsize, // tail = next index to be written
72}
73// `head` and `tail` are allowed to increment all the way to `usize::MAX`
74// and wrap around. We maintain the invariants `0 <= tail - head <= N` and
75// `0 <= N + head - tail <= N` (note that these may be *wrapping* subtractions).
76// Indices into `data` are given by `head % N` and `tail % N`. Since `N`
77// is a power of 2, these are equal to `head & (N - 1)` and `tail & (N - 1)`.
78// When the buffer is empty, `head == tail`. When the buffer is full,
79// `head + N == tail` (and note this may be a *wrapping* addition).
80
81/// A `Producer` is a smart pointer to a `Buffer`, which is endowed with
82/// the right to add data into the buffer. Only one `Producer` may exist
83/// at one time for any given buffer. The methods of a `Producer` are the
84/// only way to insert data into a `Buffer`.
85pub struct Producer<'a, const N: usize> {
86 buffer: &'a Buffer<N>,
87 // The Producer is allowed to increment buffer.tail (up to a maximum
88 // value of buffer.head + N), but may not modify buffer.head.
89}
90
91/// A `Consumer` is a smart pointer to a `Buffer`, which is endowed with
92/// the right to remove data from the buffer. Only one `Consumer` may exist
93/// at one time for any given buffer. The methods of a `Consumer` are the
94/// only way to read data out of a `Buffer`.
95pub struct Consumer<'a, const N: usize> {
96 buffer: &'a Buffer<N>,
97 // The Consumer is allowed to increment buffer.head (up to a maximum
98 // value of buffer.tail), but may not modify buffer.tail.
99}
100
101/// A `Region` is a smart pointer to a specific region of data in a [`Buffer`].
102/// The `Region` derefs to `[u8]` and may generally be used in the same way as
103/// a slice (e.g. `region[i]`, `region.len()`). When a `Region` is dropped,
104/// it updates the associated `Buffer` to indicate that this section of the
105/// buffer is finished being read or written. If a `Region` is forgotten
106/// instead of dropped, the buffer will not be updated and the same region will
107/// be re-issued by the next read/write.
108///
109/// A Region holds a mutable (i.e. exclusive) reference to its owner (of type
110/// `T`), which is either a `Producer` (for writing to a buffer) or a `Consumer`
111/// (for reading from a buffer). Therefore, for a given buffer, at most
112/// one region for reading (referring to the consumer) and one region for writing
113/// (referring to the producer) can exist at any time. This is the mechanism by
114/// which thread safety for the ring buffer is enforced at compile time.
115pub struct Region<'b, T> {
116 region: &'b mut [u8], // points to a subslice of Buffer.data
117 index_to_increment: &'b AtomicUsize, // points to Buffer.head or Buffer.tail
118 _owner: &'b mut T, // points to a Producer or Consumer
119}
120
121impl<const N: usize> Buffer<N> {
122 const SIZE_CHECK: () = assert!(
123 (N != 0) && ((N - 1) & N == 0),
124 "buffer size must be a power of 2"
125 );
126 /// Return a new, empty buffer. The memory backing the buffer is zero-initialized.
127 pub const fn new() -> Self {
128 // Force a compile-time failure if N is not a power of 2.
129 let _ = Self::SIZE_CHECK;
130 Buffer {
131 data: core::cell::UnsafeCell::new([0; N]),
132 head: AtomicUsize::new(0),
133 tail: AtomicUsize::new(0),
134 }
135 }
136 /// Split the `Buffer` into a `Producer` and a `Consumer`. This function is the
137 /// only safe way to create a `Producer` or a `Consumer`. This function requires
138 /// a mutable (i.e. exclusive) reference to the buffer, and the lifetime of that
139 /// reference is equal to the lifetimes of the producer and consumer which are
140 /// returned. Therefore, for a given buffer, only one producer and one consumer
141 /// can exist at one time.
142 pub fn split(&mut self) -> (Producer<N>, Consumer<N>) {
143 (Producer { buffer: self }, Consumer { buffer: self })
144 }
145 /// Return a `Producer` associated with this buffer. UNSAFE: the caller must
146 /// ensure that at most one `Producer` for this buffer exists at any time.
147 pub unsafe fn producer(&self) -> Producer<N> {
148 Producer { buffer: self }
149 }
150 /// Return a `Consumer` associated with this buffer. UNSAFE: the caller must
151 /// ensure that at most one `Consumer` for this buffer exists at any time.
152 pub unsafe fn consumer(&self) -> Consumer<N> {
153 Consumer { buffer: self }
154 }
155}
156
157impl<const N: usize> Buffer<N> {
158 #[inline(always)]
159 fn calc_pointers(&self, indices: [usize; 2], target_len: usize) -> (*mut u8, usize, usize) {
160 // length calculations which are shared between `slice()` and `split_slice()`
161 let [start, end] = indices;
162 (
163 // points to the element of Buffer.data at position `start`
164 unsafe { (self.data.get() as *mut u8).add(start & (N - 1)) },
165 // maximum length from `start` which doesn't wrap around
166 N - (start & (N - 1)),
167 // maximum length <= `target_len` which fits between `start` and `end`
168 core::cmp::min(target_len, end.wrapping_sub(start)),
169 )
170 }
171 /// Internal use only. Return a u8 slice extending from `indices.0` to `indices.1`,
172 /// except that the slice shall not be longer than `target_len`, and the slice shall
173 /// not wrap around the end of the buffer. Start and end indices are wrapped to the
174 /// buffer length. UNSAFE: caller is responsible for ensuring that overlapping
175 /// slices are never created, since we return a mutable (i.e. exclusive) slice.
176 unsafe fn slice(&self, indices: [usize; 2], target_len: usize) -> &mut [u8] {
177 let (start_ptr, wrap_len, len) = self.calc_pointers(indices, target_len);
178 core::slice::from_raw_parts_mut(start_ptr, core::cmp::min(len, wrap_len))
179 }
180 /// Internal use only. Return a pair of u8 slices which are logically contiguous in
181 /// the buffer, extending from `indices.0` to `indices.1`, except that the total
182 /// length shall not exceed `target_len`. Start and end indices are wrapped to the
183 /// buffer length. UNSAFE: caller is responsible for ensuring that overlapping
184 /// slices are never created, since we return mutable (i.e. exclusive) slices.
185 unsafe fn split_slice(&self, indices: [usize; 2], target_len: usize) -> [&mut [u8]; 2] {
186 let (start_ptr, wrap_len, len) = self.calc_pointers(indices, target_len);
187 if len <= wrap_len {
188 [
189 core::slice::from_raw_parts_mut(start_ptr, 0),
190 core::slice::from_raw_parts_mut(start_ptr, len),
191 ]
192 } else {
193 let data_ptr = self.data.get() as *mut u8;
194 [
195 core::slice::from_raw_parts_mut(start_ptr, wrap_len),
196 core::slice::from_raw_parts_mut(data_ptr, len - wrap_len),
197 ]
198 }
199 }
200}
201
202unsafe impl<const N: usize> Send for Buffer<N> {}
203/// `Buffer<N>` is `Send` and `Sync` because accesses to its internal data are
204/// only possible via a single `Producer` and a single `Consumer` at any time.
205unsafe impl<const N: usize> Sync for Buffer<N> {}
206
207impl<'a, const N: usize> Producer<'a, N> {
208 fn indices(&self) -> [usize; 2] {
209 [
210 self.buffer.tail.load(Relaxed),
211 self.buffer.head.load(Relaxed).wrapping_add(N),
212 ]
213 }
214 /// Return a `Region` for up to `target_len` bytes to be written into
215 /// the buffer. The returned region may be shorter than `target_len`.
216 /// The returned region has length zero if and only if the buffer is full.
217 /// The returned region is guaranteed to be not longer than `target_len`.
218 /// To write the largest possible length, set `target_len = usize::MAX`.
219 pub fn write<'b>(&'b mut self, target_len: usize) -> Region<'b, Self> {
220 Region {
221 region: unsafe { self.buffer.slice(self.indices(), target_len) },
222 index_to_increment: &self.buffer.tail,
223 _owner: self,
224 }
225 }
226 /// If the buffer has room for `*item`, write it into the buffer and return `Ok`.
227 /// Otherwise, return `Err`.
228 pub fn write_ref<T: ?Sized>(&mut self, item: &T) -> Result<(), ()> {
229 let src = unsafe {
230 core::slice::from_raw_parts(item as *const _ as *const u8, core::mem::size_of_val(item))
231 };
232 let dst = unsafe { self.buffer.split_slice(self.indices(), src.len()) };
233 if dst[0].len() + dst[1].len() == src.len() {
234 let (src0, src1) = src.split_at(dst[0].len());
235 dst[0].copy_from_slice(src0);
236 dst[1].copy_from_slice(src1);
237 self.buffer.tail.fetch_add(src.len(), Relaxed);
238 Ok(())
239 } else {
240 Err(())
241 }
242 }
243 /// Return the amount of empty space currently available in the buffer.
244 /// If the consumer is reading concurrently with this call, then the amount
245 /// of empty space may increase, but it will not decrease below the value
246 /// which is returned.
247 pub fn empty_size(&self) -> usize {
248 let [start, end] = self.indices();
249 end.wrapping_sub(start)
250 }
251}
252
253impl<'a, const N: usize> Consumer<'a, N> {
254 fn indices(&self) -> [usize; 2] {
255 [
256 self.buffer.head.load(Relaxed),
257 self.buffer.tail.load(Relaxed),
258 ]
259 }
260 /// Return a `Region` for up to `target_len` bytes to be read from
261 /// the buffer. The returned region may be shorter than `target_len`.
262 /// The returned region has length zero if and only if the buffer is empty.
263 /// The returned region is guaranteed to be not longer than `target_len`.
264 /// To read the largest possible length, set `target_len = usize::MAX`.
265 ///
266 /// Even though we are reading from the buffer, the `Region` which is returned
267 /// is mutable. Its memory is available for arbitrary use by the caller
268 /// for as long as the `Region` remains in scope.
269 pub fn read<'b>(&'b mut self, target_len: usize) -> Region<'b, Self> {
270 Region {
271 region: unsafe { self.buffer.slice(self.indices(), target_len) },
272 index_to_increment: &self.buffer.head,
273 _owner: self,
274 }
275 }
276 /// If the buffer contains enough bytes to make an instance of `T`, then write
277 /// them into `*item` and return `Ok`. Otherwise, return `Err`. UNSAFE: caller
278 /// must guarantee that the bytes contained in the buffer constitute a valid
279 /// instance of `T`. In consequence, if `T` is an integer type or an integer
280 /// array or slice type, then it is safe to call this function.
281 pub unsafe fn read_ref<T: Copy + ?Sized>(&mut self, item: &mut T) -> Result<(), ()> {
282 let dst = unsafe {
283 core::slice::from_raw_parts_mut(item as *mut _ as *mut u8, core::mem::size_of_val(item))
284 };
285 let src = unsafe { self.buffer.split_slice(self.indices(), dst.len()) };
286 if src[0].len() + src[1].len() == dst.len() {
287 let (dst0, dst1) = dst.split_at_mut(src[0].len());
288 dst0.copy_from_slice(src[0]);
289 dst1.copy_from_slice(src[1]);
290 self.buffer.head.fetch_add(dst.len(), Relaxed);
291 Ok(())
292 } else {
293 Err(())
294 }
295 }
296 /// Return the amount of data currently stored in the buffer.
297 /// If the producer is writing concurrently with this call,
298 /// then the amount of data may increase, but it will not
299 /// decrease below the value which is returned.
300 pub fn data_size(&self) -> usize {
301 let [start, end] = self.indices();
302 end.wrapping_sub(start)
303 }
304 /// Discard all data which is currently stored in the buffer.
305 /// If the producer is writing concurrently with this call,
306 /// then the producer's newest data may not be discarded.
307 pub fn flush(&mut self) {
308 self.buffer
309 .head
310 .store(self.buffer.tail.load(Relaxed), Relaxed);
311 }
312}
313
314impl<'b, T> Region<'b, T> {
315 /// Update the buffer to indicate that the first `num` bytes of this region are
316 /// finished being read or written. The start and length of this region will be
317 /// updated such that the remaining `region.len() - num` bytes remain in this
318 /// region for future reading or writing.
319 pub fn consume(&mut self, num: usize) {
320 assert!(num <= self.region.len());
321 self.index_to_increment.fetch_add(num, Relaxed);
322 // UNSAFE: this is safe because we are replacing self.region with a subslice
323 // of self.region, and it is constrained to keep the same lifetime.
324 self.region = unsafe {
325 core::slice::from_raw_parts_mut(
326 self.region.as_mut_ptr().add(num),
327 self.region.len() - num,
328 )
329 }
330 }
331 /// Update the buffer to indicate that the first `num` bytes of this region are
332 /// finished being read or written, and the remaining `region.len() - num` bytes
333 /// will not be used. `region.partial_drop(0)` is equivalent to
334 /// `core::mem::forget(region)`.
335 pub fn partial_drop(self, num: usize) {
336 assert!(num <= self.region.len());
337 self.index_to_increment.fetch_add(num, Relaxed);
338 core::mem::forget(self); // don't run drop() now!
339 }
340}
341
342impl<'b, T> Drop for Region<'b, T> {
343 /// Update the buffer to indicate that the memory being read or written is now
344 /// ready for use. Dropping a `Region` requires a single addition operation to
345 /// one field of the `Buffer`.
346 fn drop(&mut self) {
347 self.index_to_increment.fetch_add(self.region.len(), Relaxed);
348 }
349}
350
351impl<'b, T> core::ops::Deref for Region<'b, T> {
352 type Target = [u8];
353 fn deref(&self) -> &[u8] {
354 self.region
355 }
356}
357
358impl<'b, T> core::ops::DerefMut for Region<'b, T> {
359 fn deref_mut(&mut self) -> &mut [u8] {
360 self.region
361 }
362}
363
364#[test]
365fn index_wraparound() {
366 // This can't be tested using the public interface because it would
367 // take too long to get `head` and `tail` incremented to usize::MAX.
368 let mut b = Buffer::<64>::new();
369 b.head.fetch_sub(128, Relaxed);
370 b.tail.fetch_sub(128, Relaxed);
371 // Now b.head == b.tail == usize::MAX - 127
372 let (mut p, mut c) = b.split();
373 for _ in 0..4 {
374 assert!(p.empty_size() == 64);
375 assert!(p.write(32).len() == 32);
376 assert!(p.empty_size() == 32);
377 assert!(p.write(usize::MAX).len() == 32);
378 assert!(p.empty_size() == 0);
379 assert!(p.write(usize::MAX).len() == 0);
380 assert!(c.data_size() == 64);
381 assert!(c.read(32).len() == 32);
382 assert!(c.data_size() == 32);
383 assert!(c.read(usize::MAX).len() == 32);
384 assert!(c.data_size() == 0);
385 assert!(c.read(usize::MAX).len() == 0);
386 }
387 assert!(b.head.load(Relaxed) == 128);
388 assert!(b.tail.load(Relaxed) == 128);
389}