stack_allocator/
lib.rs

1#![cfg_attr(not(feature = "std"), no_std)]
2#![warn(missing_docs)]
3#![doc = include_str!("../README.md")]
4#![cfg_attr(nightly, feature(allocator_api))]
5use core::cell::UnsafeCell;
6use core::mem::MaybeUninit;
7use core::ptr::NonNull;
8use core::sync::atomic::AtomicUsize;
9use core::sync::atomic::Ordering;
10
11extern crate alloc;
12
13#[cfg(not(nightly))]
14use allocator_api2::alloc::{AllocError, Allocator, Layout};
15#[cfg(nightly)]
16use core::alloc::{AllocError, Allocator, Layout};
17
18/// A simple bump‑allocator that lives on the stack (or in static memory).
19///
20/// It will tries to reuse freed memory only if it is the most recently allocated block.
21///
22/// `N` is the size of the backing buffer in bytes.
23pub struct StackAllocator<const N: usize> {
24    /// The buffer that backs all allocations.
25    buf: UnsafeCell<MaybeUninit<[u8; N]>>,
26    /// Offset of the next free byte inside `buf`.
27    offset: AtomicUsize,
28}
29
30impl<const N: usize> Default for StackAllocator<N> {
31    fn default() -> Self {
32        Self::new()
33    }
34}
35
36unsafe impl<const N: usize> Send for StackAllocator<N> {}
37unsafe impl<const N: usize> Sync for StackAllocator<N> {}
38
39impl<const N: usize> StackAllocator<N> {
40    /// Create a fresh allocator.  The buffer starts empty.
41    pub const fn new() -> Self {
42        Self {
43            buf: UnsafeCell::new(MaybeUninit::uninit()),
44            offset: AtomicUsize::new(0),
45        }
46    }
47
48    /// Reset the allocator, discarding all previously allocated memory.
49    ///
50    /// # Safety
51    /// the caller must guarantee that no live allocation
52    /// created by this allocator is still in use.
53    pub unsafe fn reset(&mut self) {
54        self.offset.store(0, Ordering::Release);
55    }
56
57    /// Align `addr` upwards to `align`.  `align` must be a power of two.
58    #[inline]
59    const fn align_up(addr: usize, align: usize) -> usize {
60        (addr + align - 1) & !(align - 1)
61    }
62
63    /// Get the current offset of the allocator.
64    pub fn current_offset(&self) -> usize {
65        self.offset.load(Ordering::Acquire)
66    }
67}
68
69unsafe impl<const N: usize> Allocator for StackAllocator<N> {
70    fn allocate(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
71        //println!("StackAllocator allocate: layout={:?}", layout);
72        let mut start;
73        loop {
74            // Compute the start of the allocation respecting alignment.
75            start = Self::align_up(self.offset.load(Ordering::Acquire), layout.align());
76            let end = start.checked_add(layout.size()).ok_or(AllocError)?;
77
78            // Ensure we stay inside the buffer.
79            if end > N {
80                return Err(AllocError);
81            }
82
83            // Update the bump pointer.
84            if self
85                .offset
86                .compare_exchange(start, end, Ordering::Release, Ordering::Relaxed)
87                .is_ok()
88            {
89                break;
90            }
91        }
92        // SAFETY: `start..end` is inside `self.buf` and properly aligned.
93        let ptr = unsafe { self.buf.get().cast::<u8>().add(start) };
94        Ok(NonNull::slice_from_raw_parts(
95            NonNull::new(ptr).unwrap(),
96            layout.size(),
97        ))
98    }
99
100    unsafe fn deallocate(&self, ptr: NonNull<u8>, layout: Layout) {
101        //println!("StackAllocator deallocate: layout={:?}", layout);
102        // Compute the start offset of the allocation.
103        let base = self.buf.get() as usize;
104        let start = ptr.as_ptr() as usize - base;
105        let end = start + layout.size();
106
107        // If this block is the most recent allocation, move the bump pointer back.
108        let _ = self
109            .offset
110            .compare_exchange(end, start, Ordering::Release, Ordering::Relaxed);
111    }
112
113    unsafe fn grow(
114        &self,
115        ptr: NonNull<u8>,
116        old_layout: Layout,
117        new_layout: Layout,
118    ) -> Result<NonNull<[u8]>, AllocError> {
119        /*println!(
120            "StackAllocator grow: old={:?}, new={:?}",
121            old_layout, new_layout
122        );*/
123        // `grow` is only allowed when the block being grown is the most recent allocation.
124        // Compute the start offset of the existing allocation.
125        let base = self.buf.get() as usize;
126        let old_start = ptr.as_ptr() as usize - base;
127
128        // Verify that the allocator's current offset matches the end of this allocation.
129        let expected_offset = old_start + old_layout.size();
130        let current_offset = self.offset.load(Ordering::Acquire);
131        if current_offset != expected_offset {
132            return Err(AllocError);
133        }
134
135        // The new layout must be at least as large as the old one.
136        if new_layout.size() < old_layout.size() {
137            return Err(AllocError);
138        }
139
140        // Compute the new end of the allocation, checking for overflow and buffer limits.
141        let new_end = old_start.checked_add(new_layout.size()).ok_or(AllocError)?;
142        if new_end > N {
143            return Err(AllocError);
144        }
145
146        // Attempt to bump the allocator's offset forward. We don't retry on failure, since that
147        // would require copying the data to a new location.
148        if self
149            .offset
150            .compare_exchange(
151                expected_offset,
152                new_end,
153                Ordering::Release,
154                Ordering::Relaxed,
155            )
156            .is_err()
157        {
158            // We failed to grow in place; allocate a new block and return that if possible.
159            return self.allocate(new_layout);
160        }
161        // Return the same pointer, now representing a slice of the larger size.
162        Ok(NonNull::slice_from_raw_parts(ptr, new_layout.size()))
163    }
164
165    unsafe fn shrink(
166        &self,
167        ptr: NonNull<u8>,
168        old_layout: Layout,
169        new_layout: Layout,
170    ) -> Result<NonNull<[u8]>, AllocError> {
171        // Compute the start offset of the existing allocation.
172        let base = self.buf.get() as usize;
173        let old_start = ptr.as_ptr() as usize - base;
174
175        // Verify that the allocator's current offset matches the end of this allocation.
176        let expected_offset = old_start + old_layout.size();
177        let current_offset = self.offset.load(Ordering::Acquire);
178        if current_offset != expected_offset {
179            return Err(AllocError);
180        }
181
182        // The new layout must be no larger than the old one.
183        if new_layout.size() > old_layout.size() {
184            return Err(AllocError);
185        }
186
187        // Compute the new end of the allocation.
188        let new_end = old_start + new_layout.size();
189
190        // Attempt to move the bump pointer backwards.
191        // We simply don't care if this fails, as it only means that some other
192        // allocation happened in the meantime.
193        // In that case, the memory will be reclaimed later when `reset` is called.
194        _ = self.offset.compare_exchange(
195            expected_offset,
196            new_end,
197            Ordering::Release,
198            Ordering::Relaxed,
199        );
200
201        // Return the same pointer, now representing a slice of the smaller size.
202        Ok(NonNull::slice_from_raw_parts(ptr, new_layout.size()))
203    }
204    fn by_ref(&self) -> &Self
205    where
206        Self: Sized,
207    {
208        self
209    }
210}
211
212/// A hybrid allocator that first tries to allocate from a stack‑backed bump
213/// allocator and falls back to a user‑provided allocator.
214///
215/// `N` - size of the stack buffer in bytes.
216/// `F` - the fallback allocator type (e.g. `std::alloc::Global` or any custom
217/// allocator that implements `Allocator`).
218pub struct HybridAllocator<const N: usize, F: Allocator> {
219    stack_alloc: StackAllocator<N>,
220    fallback: F,
221}
222
223#[cfg(feature = "alloc")]
224impl<const N: usize> Default for HybridAllocator<N, alloc::alloc::Global> {
225    fn default() -> Self {
226        Self::new(alloc::alloc::Global)
227    }
228}
229
230impl<const N: usize, F: Allocator> HybridAllocator<N, F> {
231    /// Create a new hybrid allocator.
232    ///
233    /// The caller supplies the fallback allocator that will be used when the
234    /// stack buffer cannot satisfy a request.
235    pub const fn new(fallback: F) -> Self {
236        Self {
237            stack_alloc: StackAllocator::new(),
238            fallback,
239        }
240    }
241
242    /// Reset the allocator, discarding all previously allocated memory.
243    ///
244    /// # Safety
245    /// the caller must guarantee that no live allocation
246    /// created by this allocator is still in use.
247    pub unsafe fn reset(&mut self) {
248        self.stack_alloc.reset();
249    }
250
251    /// Get the current offset of the stack allocator.
252    pub fn current_offset(&self) -> usize {
253        self.stack_alloc.current_offset()
254    }
255
256    /// Get a reference to the fallback allocator.
257    pub fn fallback(&self) -> &F {
258        &self.fallback
259    }
260
261    /// Check if the last allocation used the fallback allocator.
262    /// if true, the stack buffer is exhausted and all further allocations
263    /// will go to the fallback until reset is called.
264    pub fn is_stack_exausted(&self) -> bool {
265        self.current_offset() >= N
266    }
267}
268
269unsafe impl<const N: usize, F: Allocator> Allocator for HybridAllocator<N, F> {
270    fn allocate(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
271        // Try the stack allocator first; on failure delegate to the fallback.
272        match self.stack_alloc.allocate(layout) {
273            ok @ Ok(_) => ok,
274            Err(_) => self.fallback.allocate(layout),
275        }
276    }
277
278    unsafe fn deallocate(&self, ptr: NonNull<u8>, layout: Layout) {
279        // Determine whether the pointer belongs to the stack buffer.
280        let base = self.stack_alloc.buf.get() as usize;
281        let end = base + N;
282        let addr = ptr.as_ptr() as usize;
283
284        if (base..end).contains(&addr) {
285            self.stack_alloc.deallocate(ptr, layout);
286        } else {
287            self.fallback.deallocate(ptr, layout);
288        }
289    }
290
291    unsafe fn grow(
292        &self,
293        ptr: NonNull<u8>,
294        old_layout: Layout,
295        new_layout: Layout,
296    ) -> Result<NonNull<[u8]>, AllocError> {
297        let base = self.stack_alloc.buf.get() as usize;
298        let addr = ptr.as_ptr() as usize;
299
300        if (base..base + N).contains(&addr) {
301            // Attempt to grow in place using the stack allocator.
302            if let Ok(res) = self.stack_alloc.grow(ptr, old_layout, new_layout) {
303                return Ok(res);
304            } else {
305                // We need to alloc manually a new block and copy the data.
306                let mut new_ptr = self.fallback.allocate(new_layout)?;
307                core::ptr::copy_nonoverlapping(
308                    ptr.as_ptr(),
309                    new_ptr.as_mut() as *mut [u8] as *mut u8,
310                    old_layout.size(),
311                );
312                // Deallocate the old block.
313                self.stack_alloc.deallocate(ptr, old_layout);
314                return Ok(new_ptr);
315            }
316        }
317        // Fallback allocator handles the grow request.
318        self.fallback.grow(ptr, old_layout, new_layout)
319    }
320
321    unsafe fn shrink(
322        &self,
323        ptr: NonNull<u8>,
324        old_layout: Layout,
325        new_layout: Layout,
326    ) -> Result<NonNull<[u8]>, AllocError> {
327        let base = self.stack_alloc.buf.get() as usize;
328        let addr = ptr.as_ptr() as usize;
329
330        if (base..base + N).contains(&addr) {
331            // Attempt to shrink in place using the stack allocator.
332            if let Ok(res) = self.stack_alloc.shrink(ptr, old_layout, new_layout) {
333                // StackAllocator will always succeed in shrinking.
334                return Ok(res);
335            }
336        }
337        // Fallback allocator handles the shrink request.
338        self.fallback.shrink(ptr, old_layout, new_layout)
339    }
340
341    fn by_ref(&self) -> &Self
342    where
343        Self: Sized,
344    {
345        self
346    }
347}