Skip to main content

embedded_buffer_pool/
lib.rs

1#![no_std]
2
3//! Fixed-size [`BufferPool`] for [`no_std`] firmware using [`embassy_sync`].
4//!
5//! The pool holds `N` values of type `T` (**1..=32** slots). A bitmask tracks which
6//! slots are free; [`BufferGuard`] and [`MappedBufferGuard`] return a slot to the pool on
7//! [`Drop`]. When the pool is empty, [`BufferPool::take`] registers a waker and completes
8//! when another task releases a buffer.
9//!
10//! # Requirements
11//!
12//! - Acquire APIs take `&'static self`: the pool is intended to live in a `static` item.
13//! - Choose a [`RawMutex`] (`M`) compatible with your executor (for example the
14//!   critical-section mutex in `embassy_sync`).
15//! - `T` must be [`Copy`] because [`BufferPool::new`] is a `const fn`.
16//!
17//! # Example
18//!
19//! ```rust,ignore
20//! use embedded_buffer_pool::BufferPool;
21//! use embassy_sync::blocking_mutex::raw::CriticalSectionRawMutex;
22//!
23//! /// Shared pool: two 64-byte packet buffers.
24//! static POOL: BufferPool<CriticalSectionRawMutex, [u8; 64], 2> =
25//!     BufferPool::new([[0u8; 64]; 2]);
26//!
27//! fn try_fill() -> Option<()> {
28//!     let mut guard = POOL.try_take()?;
29//!     guard[0] = 0xAA;
30//!     Some(())
31//! }
32//!
33//! async fn wait_for_buffer() {
34//!     let mut guard = POOL.take().await;
35//!     guard[0] = 0xBB;
36//! }
37//! ```
38//!
39//! [`embassy_sync`]: https://docs.rs/embassy-sync
40//! [`no_std`]: https://doc.rust-lang.org/nomicon/no-std.html
41//!
42//! For arrays built by repeating an expression (constructors, `const` values, literals),
43//! see [`array_new`].
44
45mod macros;
46
47use core::cell::UnsafeCell;
48use core::future::poll_fn;
49use core::ops::{Deref, DerefMut};
50use core::task::Poll;
51
52use embassy_sync::{
53    blocking_mutex::{Mutex, raw::RawMutex},
54    waitqueue::WakerRegistration,
55};
56
57#[repr(transparent)]
58#[derive(Debug)]
59struct BufferPtr<T: ?Sized>(*mut T);
60
61unsafe impl<T: ?Sized> Send for BufferPtr<T> {}
62unsafe impl<T: ?Sized> Sync for BufferPtr<T> {}
63
64struct State {
65    /// Bitmask of free slots: bit `i` set means slot `i` is available.
66    available: u32,
67    waker: WakerRegistration,
68}
69
70/// Pool of `N` buffers of type `T`, synchronized with mutex `M`.
71///
72/// Free slots are tracked with a `u32` bitmask, so **`N` must be in 1..=32**.
73pub struct BufferPool<M: RawMutex, T, const N: usize> {
74    buffer: UnsafeCell<[T; N]>,
75    state: Mutex<M, State>,
76}
77
78unsafe impl<M: RawMutex + Send, T: Send, const N: usize> Send for BufferPool<M, T, N> {}
79unsafe impl<M: RawMutex + Sync, T: Send, const N: usize> Sync for BufferPool<M, T, N> {}
80
81impl<M: RawMutex, T: Copy, const N: usize> BufferPool<M, T, N> {
82    /// Creates a pool with the given backing storage; all slots start available.
83    ///
84    /// # Valid `N`
85    ///
86    /// `N` must be in **1..=32**. Otherwise [`BufferPool::new`] panics when run, or fails
87    /// const evaluation if used in a `const` item.
88    pub const fn new(buffer: [T; N]) -> Self {
89        assert!(N > 0 && N <= 32);
90        Self {
91            buffer: UnsafeCell::new(buffer),
92            state: Mutex::new(State {
93                available: u32::MAX >> (32 - N),
94                waker: WakerRegistration::new(),
95            }),
96        }
97    }
98
99    /// Tries to take one buffer without blocking. Returns [`None`] if the pool is empty.
100    pub fn try_take(&'static self) -> Option<BufferGuard<M, T>> {
101        unsafe {
102            self.state.lock_mut(|state| {
103                if state.available == 0 {
104                    return None;
105                }
106                let index = state.available.trailing_zeros() as usize;
107                state.available &= !(1 << index);
108                let buffer = &mut (*self.buffer.get())[index];
109                Some(BufferGuard {
110                    store: &self.state,
111                    ptr: BufferPtr(buffer),
112                    index,
113                })
114            })
115        }
116    }
117
118    /// Waits until a buffer is available, then returns a guard.
119    ///
120    /// If the pool is empty, the current task’s waker is registered; when another task
121    /// drops a [`BufferGuard`] or [`MappedBufferGuard`], waiters are woken.
122    pub fn take(&'static self) -> impl Future<Output = BufferGuard<M, T>> {
123        poll_fn(|cx| unsafe {
124            self.state.lock_mut(|state| {
125                if state.available == 0 {
126                    state.waker.register(cx.waker());
127                    return Poll::Pending;
128                }
129                let index = state.available.trailing_zeros() as usize;
130                state.available &= !(1 << index);
131                let buffer = &mut (*self.buffer.get())[index];
132                Poll::Ready(BufferGuard {
133                    store: &self.state,
134                    ptr: BufferPtr(buffer),
135                    index,
136                })
137            })
138        })
139    }
140}
141
142/// Exclusive handle to one pooled `T`. Releasing the slot on [`Drop`].
143///
144/// Derefs to `T` via [`Deref`] / [`DerefMut`]. Use [`BufferGuard::map`] to borrow a
145/// subfield or slice while keeping the same pool slot.
146pub struct BufferGuard<M: RawMutex + 'static, T> {
147    store: &'static Mutex<M, State>,
148    ptr: BufferPtr<T>,
149    index: usize,
150}
151
152impl<M: RawMutex + 'static, T> Drop for BufferGuard<M, T> {
153    fn drop(&mut self) {
154        unsafe {
155            self.store.lock_mut(|state| {
156                state.available |= 1 << self.index;
157                state.waker.wake();
158            });
159        }
160    }
161}
162
163impl<M: RawMutex + 'static, T> Deref for BufferGuard<M, T> {
164    type Target = T;
165
166    fn deref(&self) -> &Self::Target {
167        unsafe { &*self.ptr.0 }
168    }
169}
170
171impl<M: RawMutex + 'static, T> DerefMut for BufferGuard<M, T> {
172    fn deref_mut(&mut self) -> &mut Self::Target {
173        unsafe { &mut *self.ptr.0 }
174    }
175}
176
177impl<M: RawMutex + 'static, T> BufferGuard<M, T> {
178    /// Narrows the guard to a `&mut U` derived from the buffer (for example a slice into
179    /// a larger `[u8]`). The original guard is consumed without running its [`Drop`]; the
180    /// returned [`MappedBufferGuard`] still returns the pool slot when dropped.
181    ///
182    /// This is an **associated function**, not a method: the guard is passed as `orig`,
183    /// not `self`. You call it as `BufferGuard::map(guard, f)`. If the first parameter
184    /// were `self`, `guard.map(...)` would always resolve to this routine and **shadow**
185    /// a `map` method on `T`; with `orig: Self`, `guard.map(...)` can still go through
186    /// [`Deref`] / [`DerefMut`] to `T::map` when you want the inner value’s API.
187    pub fn map<U: ?Sized>(
188        orig: Self,
189        fun: impl FnOnce(&mut T) -> &mut U,
190    ) -> MappedBufferGuard<M, U> {
191        let store = orig.store;
192        let index = orig.index;
193        let value = fun(unsafe { &mut *orig.ptr.0 });
194        // Ownership of the slot is moved to the mapped guard; do not run `BufferGuard::drop`.
195        core::mem::forget(orig);
196        MappedBufferGuard {
197            store,
198            value,
199            index,
200        }
201    }
202}
203
204/// Like [`BufferGuard`], but derefs to a borrowed `U` (often a slice or inner field).
205///
206/// Dropping this guard marks the same pool index free as the original [`BufferGuard`].
207pub struct MappedBufferGuard<M: RawMutex + 'static, U: ?Sized> {
208    store: &'static Mutex<M, State>,
209    index: usize,
210    value: *mut U,
211}
212
213impl<M: RawMutex + 'static, U: ?Sized> Drop for MappedBufferGuard<M, U> {
214    fn drop(&mut self) {
215        unsafe {
216            self.store.lock_mut(|state| {
217                state.available |= 1 << self.index;
218                state.waker.wake();
219            });
220        }
221    }
222}
223
224impl<M: RawMutex + 'static, U: ?Sized> Deref for MappedBufferGuard<M, U> {
225    type Target = U;
226
227    fn deref(&self) -> &Self::Target {
228        unsafe { &*self.value }
229    }
230}
231
232impl<M: RawMutex + 'static, U: ?Sized> DerefMut for MappedBufferGuard<M, U> {
233    fn deref_mut(&mut self) -> &mut Self::Target {
234        unsafe { &mut *self.value }
235    }
236}
237
238impl<M: RawMutex + 'static, U: ?Sized> MappedBufferGuard<M, U> {
239    /// Chains [`BufferGuard::map`]: re-borrows `&mut U` into `&mut V` without releasing the
240    /// pool slot until the final mapped guard is dropped.
241    ///
242    /// Same as [`BufferGuard::map`]: `orig: Self` instead of `self`, so call
243    /// `MappedBufferGuard::map(guard, f)` and keep `guard.map(...)` available for `U::map`
244    /// via [`Deref`] / [`DerefMut`].
245    pub fn map<V: ?Sized>(
246        orig: Self,
247        fun: impl FnOnce(&mut U) -> &mut V,
248    ) -> MappedBufferGuard<M, V> {
249        let store = orig.store;
250        let index = orig.index;
251        let value = fun(unsafe { &mut *orig.value });
252        core::mem::forget(orig);
253        MappedBufferGuard {
254            store,
255            value,
256            index,
257        }
258    }
259}