silly_alloc/bump/
mod.rs

1/*!
2
3Bump allocators.
4
5Bump allocators work on a linear chunk of memory and only store a pointer where the next available byte is. New allocations are made by moving that pointer forwards, which is easy and fast. The downside is that memory cannot be freed and reused, so it should be used for short-lived programs.
6
7# Examples
8
9## Using an array or slice as heap
10
11```rust
12use silly_alloc::SliceBumpAllocator;
13
14const ARENA_SIZE: usize = 64 * 1024 * 1024;
15static arena: [u8; ARENA_SIZE] = [0u8; ARENA_SIZE];
16
17#[global_allocator]
18static ALLOCATOR: SliceBumpAllocator = SliceBumpAllocator::with_slice(arena.as_slice());
19```
20
21## Using the entire WebAssembly Memory as heap
22
23```rust
24use silly_alloc::WasmBumpAllocator;
25
26#[global_allocator]
27static ALLOCATOR: WasmBumpAllocator = WasmBumpAllocator::with_memory();
28```
29
30Note that `WasmBumpAllocator` respects the heap start address that is provided by the linker, making sure `static`s and other data doesn’t get corrupted by runtime allocations.
31
32*/
33use core::{
34    alloc::{GlobalAlloc, Layout},
35    cell::UnsafeCell,
36    fmt::Debug,
37    marker::PhantomData,
38    ptr::null_mut,
39};
40
41pub mod head;
42pub use head::{Head, SingleThreadedHead, ThreadSafeHead};
43
44#[cfg(target_arch = "wasm32")]
45pub mod wasm;
46#[cfg(target_arch = "wasm32")]
47pub use wasm::{ThreadsafeWasmBumpAllocator, WasmBumpAllocator};
48
49/// Trait to model a consecutive chunk of linear memory for bump allocators.
50pub trait BumpAllocatorArena {
51    /// Returns the first pointer in the arena.
52    fn start(&self) -> *const u8;
53    /// Returns the current size of the arena in bytes.
54    fn size(&self) -> usize;
55    /// Ensures that the arena is at least `min_size` bytes big, or returns an error if that is not possible.
56    fn ensure_min_size(&self, min_size: usize) -> BumpAllocatorArenaResult<usize>;
57    /// Returns the number of bytes `ptr` is pointing past the end of the arena. Returns `None` if `ptr` is not pointing past the end.
58    fn past_end(&self, ptr: *const u8) -> Option<usize> {
59        // FIXME: This is probably not the best way to handle big memory sizes that overflow isize. Currently this panics whenever we can’t convert an `isize` to an `usize`.
60        let v = unsafe {
61            self.start()
62                .offset(self.size().try_into().unwrap())
63                .offset_from(ptr)
64        };
65        if v > 0 {
66            None
67        } else {
68            Some(v.abs().try_into().unwrap())
69        }
70    }
71}
72
73#[derive(Clone, Debug)]
74pub enum BumpAllocatorArenaError {
75    GrowthFailed,
76    Unknown,
77}
78
79pub type BumpAllocatorArenaResult<T> = core::result::Result<T, BumpAllocatorArenaError>;
80
81impl BumpAllocatorArena for &[u8] {
82    fn start(&self) -> *const u8 {
83        self.as_ptr()
84    }
85
86    fn size(&self) -> usize {
87        self.len()
88    }
89
90    fn ensure_min_size(&self, _min_size: usize) -> BumpAllocatorArenaResult<usize> {
91        Err(BumpAllocatorArenaError::GrowthFailed)
92    }
93}
94
95/// A generic bump allocator.
96///
97/// The bump allocator works on memory `M`, tracking where the remaining
98/// free memory starts using head `H`.
99pub struct BumpAllocator<'a, M: BumpAllocatorArena = &'a [u8], H: Head = SingleThreadedHead> {
100    head: UnsafeCell<H>,
101    memory: M,
102    lifetime: PhantomData<&'a u8>,
103}
104
105/// A `BumpAllocator` that uses the given byte slice as the arena.
106pub type SliceBumpAllocator<'a> = BumpAllocator<'a, &'a [u8], SingleThreadedHead>;
107
108impl<'a> SliceBumpAllocator<'a> {
109    pub const fn with_slice(arena: &'a [u8]) -> SliceBumpAllocator<'a> {
110        BumpAllocator {
111            memory: arena,
112            head: UnsafeCell::new(SingleThreadedHead::new()),
113            lifetime: PhantomData,
114        }
115    }
116}
117
118/// A `BumpAllocator` that uses the given slice as the arena.
119pub type ThreadsafeSliceBumpAllocator<'a> = BumpAllocator<'a, &'a [u8], ThreadSafeHead>;
120
121impl<'a> ThreadsafeSliceBumpAllocator<'a> {
122    pub const fn with_slice(arena: &'a [u8]) -> ThreadsafeSliceBumpAllocator<'a> {
123        BumpAllocator {
124            memory: arena,
125            head: UnsafeCell::new(ThreadSafeHead::new()),
126            lifetime: PhantomData,
127        }
128    }
129}
130
131impl<'a, M: BumpAllocatorArena, H: Head + Default> BumpAllocator<'a, M, H> {
132    pub const fn new(memory: M, head: H) -> Self {
133        BumpAllocator {
134            memory,
135            head: UnsafeCell::new(head),
136            lifetime: PhantomData,
137        }
138    }
139
140    pub unsafe fn reset(&self) {
141        if let Some(head) = self.try_as_head_mut() {
142            head.set(self.memory.start() as usize);
143        }
144    }
145
146    fn get_head_ptr(&self) -> Option<*const u8> {
147        let offset: isize = self.as_head_mut().num_bytes_used().try_into().ok()?;
148        unsafe { Some(self.memory.start().offset(offset)) }
149    }
150
151    fn try_as_head_mut(&self) -> Option<&mut H> {
152        unsafe { self.head.get().as_mut() }
153    }
154
155    fn as_head_mut(&self) -> &mut H {
156        self.try_as_head_mut().unwrap()
157    }
158
159    pub fn arena(&self) -> &dyn BumpAllocatorArena {
160        &self.memory
161    }
162}
163
164impl<'a, M: BumpAllocatorArena, H: Head + Default> Debug for BumpAllocator<'a, M, H> {
165    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
166        let head: i64 = unsafe { self.head.get().as_ref() }
167            .unwrap()
168            .num_bytes_used() as i64;
169        let size = self.memory.size();
170        f.debug_struct("BumpAllocator")
171            .field("head", &head)
172            .field("size", &size)
173            .finish()
174    }
175}
176
177unsafe impl<'a, M: BumpAllocatorArena, H: Head> Sync for BumpAllocator<'a, M, H> {}
178
179unsafe impl<'a, M: BumpAllocatorArena, H: Head + Default> GlobalAlloc for BumpAllocator<'a, M, H> {
180    unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
181        let align = layout.align();
182        let size = layout.size();
183        let ptr = match self.get_head_ptr() {
184            Some(ptr) => ptr,
185            _ => return null_mut(),
186        };
187        let offset = ptr.align_offset(align);
188        let head = self.as_head_mut();
189        let last_byte_of_new_allocation = self.memory.start().offset(
190            (head.num_bytes_used() + offset + size - 1)
191                .try_into()
192                .unwrap(),
193        );
194        if let Some(needed_bytes) = self.memory.past_end(last_byte_of_new_allocation) {
195            match self
196                .memory
197                .ensure_min_size(self.memory.size() + needed_bytes)
198            {
199                Err(_) => return null_mut(),
200                _ => {}
201            };
202        }
203
204        head.bump(offset + size);
205        ptr.offset(offset.try_into().unwrap()) as *mut u8
206    }
207
208    unsafe fn dealloc(&self, _ptr: *mut u8, _layout: Layout) {}
209}
210
211#[cfg(test)]
212mod tests {
213    use super::*;
214    use std::vec::Vec;
215    use xorshift;
216
217    #[test]
218    fn increment() {
219        let arena = [0u8; 1024];
220        {
221            let allocator = SliceBumpAllocator::with_slice(arena.as_slice());
222            unsafe {
223                let ptr1 = allocator.alloc(Layout::from_size_align(3, 4).unwrap()) as usize;
224                assert!(ptr1 % 4 == 0);
225                let ptr2 = allocator.alloc(Layout::from_size_align(3, 4).unwrap()) as usize;
226                assert!(ptr2 % 4 == 0);
227                assert!(
228                    ptr1 + 4 == ptr2,
229                    "Expected ptr2 to be 4 bytes after pt1, got ptr1=0x{:08x} ptr2=0x{:08x}",
230                    ptr1,
231                    ptr2
232                );
233            }
234        }
235    }
236
237    #[test]
238    fn null() {
239        let arena = [0u8; 4];
240        {
241            let allocator = SliceBumpAllocator::with_slice(arena.as_slice());
242            unsafe {
243                let ptr1 = allocator.alloc(Layout::from_size_align(4, 4).unwrap()) as usize;
244                assert_eq!(ptr1 % 4, 0);
245                let ptr2 = allocator.alloc(Layout::from_size_align(4, 4).unwrap()) as usize;
246                assert_eq!(ptr2, 0);
247            }
248        }
249    }
250
251    #[test]
252    fn use_last_byte() {
253        let arena = [0u8; 4];
254        {
255            let allocator = SliceBumpAllocator::with_slice(arena.as_slice());
256            unsafe {
257                let ptr1 = allocator.alloc(Layout::from_size_align(3, 4).unwrap()) as usize;
258                assert_eq!(ptr1 % 4, 0);
259                let ptr2 = allocator.alloc(Layout::from_size_align(1, 1).unwrap()) as usize;
260                assert_eq!(ptr2, arena.as_ptr().offset(3) as usize);
261            }
262        }
263    }
264
265    #[test]
266    fn minifuzz() {
267        const SIZE: usize = 1024 * 1024;
268
269        use xorshift::{Rng, SeedableRng};
270        let mut rng = xorshift::Xoroshiro128::from_seed(&[1u64, 2, 3, 4]);
271
272        for _attempts in 1..100 {
273            let mut arena = Vec::with_capacity(SIZE);
274            arena.resize(SIZE, 0);
275            let allocator = SliceBumpAllocator::with_slice(arena.as_slice());
276            let mut last_ptr: Option<usize> = None;
277            for _allocation in 1..10 {
278                let size = rng.gen_range(1, 32);
279                let alignment = 1 << rng.gen_range(1, 5);
280                let layout = Layout::from_size_align(size, alignment).unwrap();
281                let ptr = unsafe { allocator.alloc(layout) as usize };
282                if let Some(last_ptr) = last_ptr {
283                    assert!(ptr > last_ptr, "Pointer didn’t bump")
284                }
285                drop(last_ptr.insert(ptr));
286                assert_eq!(ptr % alignment, 0);
287            }
288        }
289    }
290}