zigzag_alloc/alloc/bump.rs
1//! Lock-free bump (linear) allocator.
2//!
3//! A bump allocator maintains a single monotonically-increasing offset into a
4//! fixed backing buffer. Each allocation simply advances the offset by the
5//! required (aligned) size.
6//!
7//! ## Trade-offs
8//!
9//! | Pro | Con |
10//! |-----|-----|
11//! | O(1) allocation with zero fragmentation | No individual deallocation |
12//! | Thread-safe without a mutex (CAS loop) | Must call [`reset`] to reclaim memory |
13//! | Minimal memory overhead per allocation | Buffer size fixed at construction |
14//!
15//! ## Usage
16//!
17//! ```rust,ignore
18//! static mut BUF: [u8; 4096] = [0u8; 4096];
19//! let bump = BumpAllocator::new(unsafe { &mut BUF });
20//! let vec: ExVec<u32> = ExVec::new(&bump);
21//! ```
22
23use core::{
24 alloc::Layout,
25 ptr::NonNull,
26 sync::atomic::{AtomicUsize, Ordering},
27};
28
29use super::allocator::Allocator;
30use crate::simd;
31
32/// A thread-safe, lock-free bump allocator backed by a static byte buffer.
33///
34/// Allocations are fulfilled by atomically advancing an internal offset;
35/// no individual deallocation is possible. Call [`reset`](Self::reset) (or
36/// [`reset_zeroed`](Self::reset_zeroed)) to reclaim the entire buffer at once.
37///
38/// # Thread Safety
39///
40/// `BumpAllocator` is `Sync` + `Send`. Concurrent `alloc` calls use a
41/// compare-and-swap loop to guarantee each thread receives a non-overlapping
42/// slice of the backing buffer.
43///
44/// # Invariants
45///
46/// * `start` always points to the first byte of the backing buffer.
47/// * `0 <= offset <= size` at all times.
48/// * Memory in `[start, start + offset)` has been "handed out" and must not be
49/// written by the allocator itself until `reset` is called.
50pub struct BumpAllocator {
51 /// Pointer to the first byte of the backing buffer.
52 start: *mut u8,
53 /// Total size of the backing buffer in bytes.
54 size: usize,
55 /// Monotonically-increasing byte offset; updated atomically.
56 offset: AtomicUsize,
57}
58
59// SAFETY: The bump allocator never aliases its own backing buffer after handing
60// out a pointer, and the CAS loop ensures no two threads receive the same range.
61// Therefore it is safe to share (`Sync`) and transfer (`Send`) across threads.
62unsafe impl Sync for BumpAllocator {}
63unsafe impl Send for BumpAllocator {}
64
65impl BumpAllocator {
66 /// Creates a new bump allocator that owns the given static buffer.
67 ///
68 /// The buffer must outlive the allocator (enforced by the `'static` bound).
69 ///
70 /// # Example
71 ///
72 /// ```rust,ignore
73 /// static mut BUF: [u8; 65536] = [0u8; 65536];
74 /// let bump = BumpAllocator::new(unsafe { &mut BUF });
75 /// ```
76 pub fn new(buf: &'static mut [u8]) -> Self {
77 Self {
78 start: buf.as_mut_ptr(),
79 size: buf.len(),
80 offset: AtomicUsize::new(0),
81 }
82 }
83
84 /// Returns the number of bytes that have been allocated from the buffer.
85 #[inline]
86 pub fn used(&self) -> usize { self.offset.load(Ordering::Relaxed) }
87
88 /// Returns the number of bytes still available for allocation.
89 #[inline]
90 pub fn remaining(&self) -> usize { self.size.saturating_sub(self.used()) }
91
92 /// Returns the total capacity of the backing buffer in bytes.
93 #[inline]
94 pub fn capacity(&self) -> usize { self.size }
95
96 /// Resets the allocator, making the entire buffer available again.
97 ///
98 /// # Safety
99 ///
100 /// All pointers previously returned by [`alloc`](Allocator::alloc) or
101 /// [`alloc_slice`](Self::alloc_slice) become **invalid** after this call.
102 /// Any subsequent access to those pointers is undefined behaviour.
103 #[inline]
104 pub fn reset(&mut self) {
105 // Relaxed ordering is sufficient here because `reset` takes `&mut self`,
106 // ensuring exclusive access — no concurrent `alloc` can be in progress.
107 self.offset.store(0, Ordering::Relaxed);
108 }
109
110 /// Resets the allocator and zero-fills the entire backing buffer with SIMD.
111 ///
112 /// Useful when the buffer may contain sensitive data that should not persist.
113 ///
114 /// # Safety
115 ///
116 /// Same as [`reset`](Self::reset): all previously returned pointers are
117 /// invalidated.
118 pub fn reset_zeroed(&mut self) {
119 self.offset.store(0, Ordering::Relaxed);
120 // SAFETY: `start` is valid for `size` bytes because it was obtained
121 // from a `&'static mut [u8]` of exactly that length.
122 unsafe { simd::fill_bytes(self.start, 0, self.size) };
123 }
124
125 /// Allocates a mutable byte slice of `size` bytes with the given `align`.
126 ///
127 /// Returns `None` if the remaining capacity is insufficient or if `align`
128 /// is not a power of two.
129 ///
130 /// # Example
131 ///
132 /// ```rust,ignore
133 /// let slice: &mut [u8] = bump.alloc_slice(64, 16).unwrap();
134 /// ```
135 pub fn alloc_slice(&self, size: usize, align: usize) -> Option<&mut [u8]> {
136 let layout = Layout::from_size_align(size, align).ok()?;
137 let ptr = self.alloc_raw(layout)?;
138 // SAFETY: `alloc_raw` returns a pointer valid for exactly `size` bytes
139 // with the requested alignment. No aliasing can occur because `alloc_raw`
140 // atomically claimed this range.
141 unsafe {
142 Some(core::slice::from_raw_parts_mut(ptr.as_ptr(), size))
143 }
144 }
145
146 /// Core lock-free allocation primitive.
147 ///
148 /// Attempts to claim `layout.size()` bytes (properly aligned) from the
149 /// backing buffer by atomically incrementing the offset.
150 ///
151 /// Returns `None` if there is not enough space.
152 ///
153 /// # Implementation Notes
154 ///
155 /// The CAS loop (`compare_exchange_weak`) is the only synchronisation
156 /// primitive used. On failure it retries with the latest observed value,
157 /// which is correct because `offset` only ever increases.
158 fn alloc_raw(&self, layout: Layout) -> Option<NonNull<u8>> {
159 // SAFETY: Alignment is computed with standard bit-masking arithmetic.
160 // `compare_exchange_weak` guarantees that the range `[aligned, end)` is
161 // claimed by exactly one thread at a time.
162 let size = layout.size();
163 let align = layout.align();
164 let mut current = self.offset.load(Ordering::Relaxed);
165 loop {
166 // Round `current` up to the required alignment.
167 let aligned = current.checked_add(align - 1)? & !(align - 1);
168 let end = aligned.checked_add(size)?;
169 if end > self.size { return None; }
170
171 // Atomically claim the range [aligned, end).
172 // `AcqRel` on success ensures the written data is visible to other
173 // threads that subsequently observe `offset >= end`.
174 match self.offset.compare_exchange_weak(
175 current, end,
176 Ordering::AcqRel,
177 Ordering::Relaxed,
178 ) {
179 Ok(_) => {
180 // SAFETY: `aligned` is within `[0, size)` of the backing
181 // buffer, so `start + aligned` is a valid, aligned pointer.
182 return NonNull::new(unsafe { self.start.add(aligned) });
183 }
184 Err(actual) => current = actual,
185 }
186 }
187 }
188}
189
190impl Allocator for BumpAllocator {
191 /// Allocates a block satisfying `layout` from the backing buffer.
192 ///
193 /// # Safety
194 ///
195 /// * `layout.size()` must be greater than zero.
196 /// * The returned pointer is valid until [`reset`](Self::reset) is called
197 /// or the allocator is dropped (though dropping does not invalidate the
198 /// underlying `'static` buffer).
199 unsafe fn alloc(&self, layout: Layout) -> Option<NonNull<u8>> {
200 self.alloc_raw(layout)
201 }
202
203 /// No-op. Individual deallocation is not supported by a bump allocator.
204 ///
205 /// Use [`reset`](Self::reset) to reclaim all memory at once.
206 ///
207 /// # Safety
208 ///
209 /// Even though this method is a no-op, callers must not use `ptr` after
210 /// calling `dealloc` — doing so would be use-after-free once `reset` is
211 /// eventually called.
212 unsafe fn dealloc(&self, _: NonNull<u8>, _: Layout) {}
213}