stack_allocator/
lib.rs

1#![cfg_attr(not(feature = "std"), no_std)]
2#![warn(missing_docs)]
3#![doc = include_str!("../README.md")]
4#![cfg_attr(nightly, feature(allocator_api))]
5use core::cell::UnsafeCell;
6use core::mem::MaybeUninit;
7use core::ptr::NonNull;
8use core::sync::atomic::AtomicUsize;
9use core::sync::atomic::Ordering;
10
11extern crate alloc;
12
13#[cfg(not(nightly))]
14use allocator_api2::alloc::{AllocError, Allocator, Layout};
15#[cfg(nightly)]
16use core::alloc::{AllocError, Allocator, Layout};
17
18/// A simple bump‑allocator that lives on the stack (or in static memory).
19///
20/// It will tries to reuse freed memory only if it is the most recently allocated block.
21///
22/// `N` is the size of the backing buffer in bytes.
23pub struct StackAllocator<const N: usize> {
24    /// The buffer that backs all allocations.
25    buf: UnsafeCell<MaybeUninit<[u8; N]>>,
26    /// Offset of the next free byte inside `buf`.
27    offset: AtomicUsize,
28}
29
30impl<const N: usize> Default for StackAllocator<N> {
31    fn default() -> Self {
32        Self::new()
33    }
34}
35
36unsafe impl<const N: usize> Send for StackAllocator<N> {}
37unsafe impl<const N: usize> Sync for StackAllocator<N> {}
38
39impl<const N: usize> StackAllocator<N> {
40    /// Create a fresh allocator.  The buffer starts empty.
41    pub const fn new() -> Self {
42        Self {
43            buf: UnsafeCell::new(MaybeUninit::uninit()),
44            offset: AtomicUsize::new(0),
45        }
46    }
47
48    /// Reset the allocator, discarding all previously allocated memory.
49    ///
50    /// # Safety
51    /// the caller must guarantee that no live allocation
52    /// created by this allocator is still in use.
53    pub unsafe fn reset(&mut self) {
54        self.offset.store(0, Ordering::Release);
55    }
56
57    /// Align `addr` upwards to `align`.  `align` must be a power of two.
58    #[inline]
59    const fn align_up(addr: usize, align: usize) -> usize {
60        (addr + align - 1) & !(align - 1)
61    }
62
63    /// Get the current offset of the allocator.
64    pub fn current_offset(&self) -> usize {
65        self.offset.load(Ordering::Acquire)
66    }
67}
68
69unsafe impl<const N: usize> Allocator for StackAllocator<N> {
70    fn allocate(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
71        //println!("StackAllocator allocate: layout={:?}", layout);
72        let base = self.buf.get() as usize;
73        let mut current = self.offset.load(Ordering::Acquire);
74        let mut start;
75        loop {
76            // Compute the aligned pointer address, then get the offset.
77            let current_ptr = base + current;
78            let aligned_ptr = Self::align_up(current_ptr, layout.align());
79            start = aligned_ptr - base;
80            let end = start.checked_add(layout.size()).ok_or(AllocError)?;
81
82            // Ensure we stay inside the buffer.
83            if end > N {
84                return Err(AllocError);
85            }
86
87            // Update the bump pointer.
88            if self
89                .offset
90                .compare_exchange(current, end, Ordering::Release, Ordering::Relaxed)
91                .is_ok()
92            {
93                break;
94            }
95            current = self.offset.load(Ordering::Acquire);
96        }
97        // SAFETY: `start..end` is inside `self.buf` and properly aligned.
98        let ptr = unsafe { self.buf.get().cast::<u8>().add(start) };
99        Ok(NonNull::slice_from_raw_parts(
100            NonNull::new(ptr).unwrap(),
101            layout.size(),
102        ))
103    }
104
105    unsafe fn deallocate(&self, ptr: NonNull<u8>, layout: Layout) {
106        //println!("StackAllocator deallocate: layout={:?}", layout);
107        // Compute the start offset of the allocation.
108        let base = self.buf.get() as usize;
109        let start = ptr.as_ptr() as usize - base;
110        let end = start + layout.size();
111
112        // If this block is the most recent allocation, move the bump pointer back.
113        let _ = self
114            .offset
115            .compare_exchange(end, start, Ordering::Release, Ordering::Relaxed);
116    }
117
118    unsafe fn grow(
119        &self,
120        ptr: NonNull<u8>,
121        old_layout: Layout,
122        new_layout: Layout,
123    ) -> Result<NonNull<[u8]>, AllocError> {
124        /*println!(
125            "StackAllocator grow: old={:?}, new={:?}",
126            old_layout, new_layout
127        );*/
128        // `grow` is only allowed when the block being grown is the most recent allocation.
129        // Compute the start offset of the existing allocation.
130        let base = self.buf.get() as usize;
131        let old_start = ptr.as_ptr() as usize - base;
132
133        // Verify that the allocator's current offset matches the end of this allocation.
134        let expected_offset = old_start + old_layout.size();
135        let current_offset = self.offset.load(Ordering::Acquire);
136        if current_offset != expected_offset {
137            return Err(AllocError);
138        }
139
140        // The new layout must be at least as large as the old one.
141        if new_layout.size() < old_layout.size() {
142            return Err(AllocError);
143        }
144
145        // Compute the new end of the allocation, checking for overflow and buffer limits.
146        let new_end = old_start.checked_add(new_layout.size()).ok_or(AllocError)?;
147        if new_end > N {
148            return Err(AllocError);
149        }
150
151        // Attempt to bump the allocator's offset forward. We don't retry on failure, since that
152        // would require copying the data to a new location.
153        if self
154            .offset
155            .compare_exchange(
156                expected_offset,
157                new_end,
158                Ordering::Release,
159                Ordering::Relaxed,
160            )
161            .is_err()
162        {
163            // We failed to grow in place; allocate a new block and return that if possible.
164            return self.allocate(new_layout);
165        }
166        // Return the same pointer, now representing a slice of the larger size.
167        Ok(NonNull::slice_from_raw_parts(ptr, new_layout.size()))
168    }
169
170    unsafe fn shrink(
171        &self,
172        ptr: NonNull<u8>,
173        old_layout: Layout,
174        new_layout: Layout,
175    ) -> Result<NonNull<[u8]>, AllocError> {
176        // Compute the start offset of the existing allocation.
177        let base = self.buf.get() as usize;
178        let old_start = ptr.as_ptr() as usize - base;
179
180        // Verify that the allocator's current offset matches the end of this allocation.
181        let expected_offset = old_start + old_layout.size();
182        let current_offset = self.offset.load(Ordering::Acquire);
183        if current_offset != expected_offset {
184            return Err(AllocError);
185        }
186
187        // The new layout must be no larger than the old one.
188        if new_layout.size() > old_layout.size() {
189            return Err(AllocError);
190        }
191
192        // Compute the new end of the allocation.
193        let new_end = old_start + new_layout.size();
194
195        // Attempt to move the bump pointer backwards.
196        // We simply don't care if this fails, as it only means that some other
197        // allocation happened in the meantime.
198        // In that case, the memory will be reclaimed later when `reset` is called.
199        _ = self.offset.compare_exchange(
200            expected_offset,
201            new_end,
202            Ordering::Release,
203            Ordering::Relaxed,
204        );
205
206        // Return the same pointer, now representing a slice of the smaller size.
207        Ok(NonNull::slice_from_raw_parts(ptr, new_layout.size()))
208    }
209    fn by_ref(&self) -> &Self
210    where
211        Self: Sized,
212    {
213        self
214    }
215}
216
217/// A hybrid allocator that first tries to allocate from a stack‑backed bump
218/// allocator and falls back to a user‑provided allocator.
219///
220/// `N` - size of the stack buffer in bytes.
221/// `F` - the fallback allocator type (e.g. `std::alloc::Global` or any custom
222/// allocator that implements `Allocator`).
223pub struct HybridAllocator<const N: usize, F: Allocator> {
224    stack_alloc: StackAllocator<N>,
225    fallback: F,
226}
227
228#[cfg(feature = "alloc")]
229impl<const N: usize> Default for HybridAllocator<N, alloc::alloc::Global> {
230    fn default() -> Self {
231        Self::new(alloc::alloc::Global)
232    }
233}
234
235impl<const N: usize, F: Allocator> HybridAllocator<N, F> {
236    /// Create a new hybrid allocator.
237    ///
238    /// The caller supplies the fallback allocator that will be used when the
239    /// stack buffer cannot satisfy a request.
240    pub const fn new(fallback: F) -> Self {
241        Self {
242            stack_alloc: StackAllocator::new(),
243            fallback,
244        }
245    }
246
247    /// Reset the allocator, discarding all previously allocated memory.
248    ///
249    /// # Safety
250    /// the caller must guarantee that no live allocation
251    /// created by this allocator is still in use.
252    pub unsafe fn reset(&mut self) {
253        self.stack_alloc.reset();
254    }
255
256    /// Get the current offset of the stack allocator.
257    pub fn current_offset(&self) -> usize {
258        self.stack_alloc.current_offset()
259    }
260
261    /// Get a reference to the fallback allocator.
262    pub fn fallback(&self) -> &F {
263        &self.fallback
264    }
265
266    /// Check if the last allocation used the fallback allocator.
267    /// if true, the stack buffer is exhausted and all further allocations
268    /// will go to the fallback until reset is called.
269    pub fn is_stack_exausted(&self) -> bool {
270        self.current_offset() >= N
271    }
272}
273
274unsafe impl<const N: usize, F: Allocator> Allocator for HybridAllocator<N, F> {
275    fn allocate(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
276        // Try the stack allocator first; on failure delegate to the fallback.
277        match self.stack_alloc.allocate(layout) {
278            ok @ Ok(_) => ok,
279            Err(_) => self.fallback.allocate(layout),
280        }
281    }
282
283    unsafe fn deallocate(&self, ptr: NonNull<u8>, layout: Layout) {
284        // Determine whether the pointer belongs to the stack buffer.
285        let base = self.stack_alloc.buf.get() as usize;
286        let end = base + N;
287        let addr = ptr.as_ptr() as usize;
288
289        if (base..end).contains(&addr) {
290            self.stack_alloc.deallocate(ptr, layout);
291        } else {
292            self.fallback.deallocate(ptr, layout);
293        }
294    }
295
296    unsafe fn grow(
297        &self,
298        ptr: NonNull<u8>,
299        old_layout: Layout,
300        new_layout: Layout,
301    ) -> Result<NonNull<[u8]>, AllocError> {
302        let base = self.stack_alloc.buf.get() as usize;
303        let addr = ptr.as_ptr() as usize;
304
305        if (base..base + N).contains(&addr) {
306            // Attempt to grow in place using the stack allocator.
307            if let Ok(res) = self.stack_alloc.grow(ptr, old_layout, new_layout) {
308                return Ok(res);
309            } else {
310                // We need to alloc manually a new block and copy the data.
311                let mut new_ptr = self.fallback.allocate(new_layout)?;
312                core::ptr::copy_nonoverlapping(
313                    ptr.as_ptr(),
314                    new_ptr.as_mut() as *mut [u8] as *mut u8,
315                    old_layout.size(),
316                );
317                // Deallocate the old block.
318                self.stack_alloc.deallocate(ptr, old_layout);
319                return Ok(new_ptr);
320            }
321        }
322        // Fallback allocator handles the grow request.
323        self.fallback.grow(ptr, old_layout, new_layout)
324    }
325
326    unsafe fn shrink(
327        &self,
328        ptr: NonNull<u8>,
329        old_layout: Layout,
330        new_layout: Layout,
331    ) -> Result<NonNull<[u8]>, AllocError> {
332        let base = self.stack_alloc.buf.get() as usize;
333        let addr = ptr.as_ptr() as usize;
334
335        if (base..base + N).contains(&addr) {
336            // Attempt to shrink in place using the stack allocator.
337            if let Ok(res) = self.stack_alloc.shrink(ptr, old_layout, new_layout) {
338                // StackAllocator will always succeed in shrinking.
339                return Ok(res);
340            }
341        }
342        // Fallback allocator handles the shrink request.
343        self.fallback.shrink(ptr, old_layout, new_layout)
344    }
345
346    fn by_ref(&self) -> &Self
347    where
348        Self: Sized,
349    {
350        self
351    }
352}