stack-arena 0.12.0

A fast, stack-like arena allocator for efficient memory management, implemented in Rust.
Documentation
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
use std::{alloc::Layout, ptr::NonNull};

use crate::{AllocError, Allocator, chunk::Chunk};

/// A buffer-based arena allocator that manages a single memory chunk.
///
/// `BufferArena` is a low-level component used by [`StackArena`](crate::StackArena) to manage individual
/// memory chunks. It provides efficient allocation within a single contiguous memory
/// region and implements the [`Allocator`](crate::Allocator) trait for memory management operations.
///
/// Each `BufferArena` contains a memory chunk and a pointer to the current position
/// in the buffer, allowing for efficient bump allocation (also known as linear allocation).
///
/// # Memory Management
///
/// - Uses a simple bump allocation strategy for maximum performance
/// - Allocates memory sequentially from a single contiguous chunk
/// - Follows LIFO (Last-In-First-Out) deallocation pattern
/// - Provides methods to check remaining capacity and allocation suitability
///
/// # Performance
///
/// `BufferArena` is designed for maximum allocation speed:
///
/// - Allocation is a simple pointer increment operation
/// - No fragmentation within a single chunk
/// - Minimal overhead per allocation
/// - Efficient memory reuse with the LIFO pattern
#[derive(Debug)]
pub struct BufferArena {
    /// The underlying buffer of bytes
    pub(crate) store: Chunk,
    /// The current offset within the buffer
    pub(crate) ptr: NonNull<u8>,
}

impl Default for BufferArena {
    #[inline(always)]
    fn default() -> Self {
        let store = Chunk::default();
        let ptr = store.base();
        Self { store, ptr }
    }
}

impl BufferArena {
    /// Creates a new buffer arena with the specified capacity.
    ///
    /// The capacity will be rounded up to the next power of two if it's not
    /// already a power of two.
    ///
    /// # Parameters
    ///
    /// * `capacity` - The capacity of the buffer in bytes
    ///
    /// # Examples
    ///
    /// ```
    /// use stack_arena::BufferArena;
    ///
    /// let arena = BufferArena::with_capacity(4096);
    /// ```
    #[inline]
    pub fn with_capacity(capacity: usize) -> Self {
        let store = Chunk::with_capacity(capacity);
        let ptr = store.base();
        Self { store, ptr }
    }

    /// Returns the number of bytes currently used in the buffer.
    ///
    /// # Examples
    ///
    /// ```
    /// use stack_arena::BufferArena;
    ///
    /// let arena = BufferArena::with_capacity(4096);
    /// assert_eq!(arena.used(), 0);
    /// ```
    #[inline]
    pub fn used(&self) -> usize {
        unsafe { self.ptr.byte_offset_from_unsigned(self.store.base()) }
    }

    /// Returns the number of bytes remaining in the buffer.
    ///
    /// # Examples
    ///
    /// ```
    /// use stack_arena::BufferArena;
    ///
    /// let arena = BufferArena::with_capacity(4096);
    /// assert_eq!(arena.remaining(), 4096);
    /// ```
    #[inline]
    pub fn remaining(&self) -> usize {
        unsafe { self.store.limit().byte_offset_from_unsigned(self.ptr) }
    }

    /// Checks if the given pointer is contained within this buffer.
    ///
    /// # Parameters
    ///
    /// * `ptr` - The pointer to check
    ///
    /// # Returns
    ///
    /// `true` if the pointer is within the buffer's memory range, `false` otherwise.
    #[inline]
    pub fn contains(&self, ptr: NonNull<u8>) -> bool {
        self.store.contains(ptr)
    }

    /// Returns `true` if the buffer has enough space to allocate the given layout.
    ///
    /// This method checks if there is enough space in the buffer to allocate
    /// memory with the specified layout, taking alignment requirements into account.
    ///
    /// # Parameters
    ///
    /// * `layout` - The memory layout to check
    ///
    /// # Returns
    ///
    /// `true` if there is enough space, `false` otherwise.
    #[inline]
    pub fn sufficient_for(&self, layout: Layout) -> bool {
        self.ptr.align_offset(layout.align()) + layout.size() <= self.remaining()
    }
}

/// Implements conversion from `Chunk` to `BufferArena`.
///
/// This allows creating a `BufferArena` from an existing `Chunk`.
impl From<Chunk> for BufferArena {
    #[inline]
    fn from(value: Chunk) -> Self {
        let ptr = value.base();
        Self { store: value, ptr }
    }
}

/// Implementation of the `Allocator` trait for `BufferArena`.
///
/// This implementation provides efficient memory allocation within a single
/// contiguous memory chunk using a bump allocation strategy. It follows the
/// LIFO (Last-In-First-Out) pattern for memory management.
impl Allocator for BufferArena {
    /// Allocates memory with the specified layout.
    ///
    /// This method allocates memory from the current position in the buffer,
    /// ensuring proper alignment. It advances the buffer pointer by the size
    /// of the allocation.
    ///
    /// # Safety
    ///
    /// This method is unsafe because it returns a raw pointer that must be used
    /// carefully to avoid memory safety issues.
    #[inline]
    unsafe fn allocate(&mut self, layout: Layout) -> Result<NonNull<[u8]>, crate::AllocError> {
        debug_assert!(layout.align() > 0);
        debug_assert!(layout.align() <= 4096);
        if self.sufficient_for(layout) {
            let ptr = unsafe { self.ptr.add(self.ptr.align_offset(layout.align())) };
            self.ptr = unsafe { ptr.add(layout.size()) };
            Ok(NonNull::slice_from_raw_parts(ptr, layout.size()))
        } else {
            Err(AllocError::CapacityExceeded {
                requested: layout.size(),
                remaining: self.remaining(),
            })
        }
    }

    /// Deallocates memory previously allocated by this allocator.
    ///
    /// This method resets the buffer pointer to the start of the deallocated
    /// memory, effectively "freeing" all memory allocated after it as well.
    /// This implements the LIFO deallocation pattern.
    ///
    /// # Safety
    ///
    /// This method is unsafe because it must be called with a pointer returned
    /// by `allocate` and the same layout that was used to allocate it.
    #[inline]
    unsafe fn deallocate(&mut self, ptr: NonNull<u8>, layout: Layout) {
        debug_assert!(self.store.contains(ptr.cast()));
        debug_assert!(unsafe { ptr.add(layout.size()) } <= self.store.limit());
        self.ptr = ptr;
    }

    /// Grows a previously allocated memory block to a larger size.
    ///
    /// This method extends the size of an existing allocation by advancing
    /// the buffer pointer. It **only supports growing the last allocation**
    /// (following LIFO pattern). Attempting to grow any other allocation
    /// will trigger a debug assertion failure.
    ///
    /// # Safety
    ///
    /// This method is unsafe because it must be called with a pointer returned
    /// by `allocate` and the same layout that was used to allocate it.
    #[inline]
    unsafe fn grow(
        &mut self,
        ptr: NonNull<u8>,
        old_layout: Layout,
        new_layout: Layout,
    ) -> Result<NonNull<[u8]>, crate::AllocError> {
        debug_assert_eq!(
            unsafe { ptr.add(old_layout.size()) },
            self.ptr,
            "last allocation only"
        );
        debug_assert_eq!(old_layout.align(), new_layout.align());
        match old_layout.size().cmp(&new_layout.size()) {
            std::cmp::Ordering::Less => {
                if unsafe { ptr.add(new_layout.size()) } <= self.store.limit() {
                    self.ptr = unsafe { ptr.add(new_layout.size()) };
                    return Ok(NonNull::slice_from_raw_parts(ptr, new_layout.size()));
                } else {
                    Err(AllocError::CapacityExceeded {
                        requested: new_layout.size() - old_layout.size(),
                        remaining: self.remaining(),
                    })
                }
            }
            std::cmp::Ordering::Equal => Ok(NonNull::slice_from_raw_parts(ptr, new_layout.size())),
            std::cmp::Ordering::Greater => unreachable!("use shrink instead"),
        }
    }

    /// Shrinks a previously allocated memory block to a smaller size.
    ///
    /// This method reduces the size of an existing allocation by moving
    /// the buffer pointer back. It assumes the memory block being shrunk is the
    /// most recently allocated block (following LIFO pattern).
    ///
    /// # Safety
    ///
    /// This method is unsafe because it must be called with a pointer returned
    /// by `allocate` and the same layout that was used to allocate it.
    #[inline]
    unsafe fn shrink(
        &mut self,
        ptr: NonNull<u8>,
        old_layout: Layout,
        new_layout: Layout,
    ) -> Result<NonNull<[u8]>, crate::AllocError> {
        debug_assert_eq!(
            unsafe { ptr.add(old_layout.size()) },
            self.ptr,
            "last allocation only"
        );
        debug_assert_eq!(old_layout.align(), new_layout.align());
        match old_layout.size().cmp(&new_layout.size()) {
            std::cmp::Ordering::Greater => {
                self.ptr = unsafe { ptr.add(new_layout.size()) };
                return Ok(NonNull::slice_from_raw_parts(ptr, new_layout.size()));
            }
            std::cmp::Ordering::Equal => Ok(NonNull::slice_from_raw_parts(ptr, new_layout.size())),
            std::cmp::Ordering::Less => unreachable!("use grow instead"),
        }
    }
}

/// Implements conversion from `BufferArena` to `Chunk`.
///
/// This allows extracting the underlying chunk from a `BufferArena`.
impl Into<Chunk> for BufferArena {
    /// Converts the buffer arena into its underlying chunk.
    #[inline]
    fn into(self) -> Chunk {
        self.store
    }
}

#[cfg(test)]
mod tests {
    use super::*;

    #[test]
    fn test_buffer_arena_new_and_capacity() {
        let cap = 4096;
        let arena = BufferArena::with_capacity(cap);
        assert_eq!(arena.used(), 0);
        assert_eq!(arena.remaining(), cap);
    }

    #[test]
    fn test_chunk_conversion() {
        let chunk = Chunk::with_capacity(4096);
        let arena: BufferArena = chunk.into();
        assert_eq!(arena.used(), 0);
        assert_eq!(arena.remaining(), 4096);

        let to_chunk: Chunk = arena.into();
        assert_eq!(to_chunk.capacity(), 4096);
    }

    #[test]
    fn test_allocate_and_deallocate() {
        let mut arena = BufferArena::with_capacity(4096);
        let layout = Layout::from_size_align(8, 1).unwrap();
        let ptr = unsafe { arena.allocate(layout).unwrap() };
        assert_eq!(ptr.len(), 8);
        assert_eq!(arena.used(), 8);

        // Deallocate should reset offset if LIFO
        unsafe { arena.deallocate(ptr.cast(), layout) };
        assert_eq!(arena.used(), 0);
    }

    #[test]
    fn test_alignment() {
        let mut arena = BufferArena::with_capacity(4096);
        unsafe { arena.ptr.write_bytes(0, arena.remaining()) };
        let mut prev_end = arena.ptr;

        for (i, align) in [1, 2, 4, 8, 16, 32, 4096].into_iter().rev().enumerate() {
            let size = i + 1;
            let layout = Layout::from_size_align(size, align).unwrap();
            let ptr = unsafe { arena.allocate_zeroed(layout).unwrap() };
            let addr = ptr.cast::<u8>().as_ptr() as usize;
            // Check alignment
            assert_eq!(addr % align, 0, "addr {ptr:?} not aligned to {align}");
            // Write data and verify
            let fill = size as u8;
            unsafe { ptr.cast::<u8>().write_bytes(fill, layout.size()) };
            let data = unsafe { ptr.as_ref() };
            assert_eq!(data, vec![fill; size].as_slice());

            // Ensure allocations do not overlap
            assert!(ptr.cast() >= prev_end, "Allocation overlapped previous");
            prev_end = unsafe { ptr.cast().add(layout.size()) };
        }
        assert_eq!(arena.used(), 79);
        let written =
            unsafe { std::slice::from_raw_parts(arena.store.base().as_ptr(), arena.used()) };
        assert_eq!(
            written,
            [
                1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
                0, 0, 0, 0, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 0, 0, 0, 0, 0,
                4, 4, 4, 4, 5, 5, 5, 5, 5, 0, 6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7
            ]
            .as_ref()
        );
        unsafe { arena.deallocate(arena.store.base(), Layout::from_size_align_unchecked(8, 1)) };
        assert_eq!(arena.used(), 0);
    }

    #[test]
    fn test_allocate_full_arena() {
        let mut arena = BufferArena::with_capacity(4096);
        let layout = Layout::from_size_align(4096, 1).unwrap();
        let ptr = unsafe { arena.allocate(layout).unwrap() };
        assert_eq!(ptr.len(), 4096);
        assert_eq!(arena.used(), 4096);

        // Next allocation should fail
        let layout2 = Layout::from_size_align(1, 1).unwrap();
        assert!(unsafe { arena.allocate(layout2) }.is_err());
    }

    #[test]
    fn test_grow_allocation() {
        let mut arena = BufferArena::with_capacity(4096);
        let layout = Layout::from_size_align(8, 1).unwrap();
        let ptr = unsafe { arena.allocate(layout).unwrap() };

        let new_layout = Layout::from_size_align(16, 1).unwrap();
        let grown = unsafe { arena.grow(ptr.cast(), layout, new_layout).unwrap() };
        assert_eq!(grown.len(), 16);
        assert_eq!(arena.used(), 16);
    }

    #[test]
    fn test_shrink_allocation() {
        let mut arena = BufferArena::with_capacity(4096);
        let layout = Layout::from_size_align(16, 1).unwrap();
        let ptr = unsafe { arena.allocate(layout).unwrap() };

        let new_layout = Layout::from_size_align(8, 1).unwrap();
        let shrunk = unsafe { arena.shrink(ptr.cast(), layout, new_layout).unwrap() };
        assert_eq!(shrunk.len(), 8);
        // The used offset should be reduced accordingly
        assert_eq!(arena.used(), 8);
    }

    #[test]
    fn test_multiple_allocate_and_deallocate() {
        use std::alloc::Layout;

        let mut arena = BufferArena::with_capacity(4096);
        let layout = Layout::from_size_align(8, 1).unwrap();

        // Allocate and deallocate 5 times in a row
        for _ in 0..5 {
            let ptr = unsafe { arena.allocate(layout).unwrap() };
            assert_eq!(ptr.len(), 8);
            assert_eq!(arena.used(), 8);
            unsafe { ptr.cast::<u8>().write_bytes(0xAA, layout.size()) };
            assert_eq!(unsafe { ptr.as_ref() }, [0xAA; 8].as_ref());

            // Deallocate should reset offset if LIFO
            unsafe { arena.deallocate(ptr.cast(), layout) };
            assert_eq!(arena.used(), 0);
        }

        // Allocate several, then deallocate in reverse order
        let mut ptrs = Vec::new();
        for _ in 0..4 {
            let ptr = unsafe { arena.allocate(layout).unwrap() };
            unsafe { ptr.cast::<u8>().write_bytes(0xAA, layout.size()) };
            assert_eq!(unsafe { ptr.as_ref() }, [0xAA; 8].as_ref());
            ptrs.push(ptr);
        }
        assert_eq!(arena.used(), 32);

        for ptr in ptrs.into_iter().rev() {
            unsafe { arena.deallocate(ptr.cast(), layout) };
        }
        assert_eq!(arena.used(), 0);
    }

    #[test]
    fn test_multi_alloc() {
        // Create a BufferArena with a very small chunk size
        let mut arena = BufferArena::with_capacity(4096);

        // First allocation - should fit in the chunk
        let layout = Layout::from_size_align(8, 8).unwrap();
        let ptr = unsafe { arena.allocate(layout) }.unwrap();
        unsafe { ptr.cast::<u8>().write_bytes(0xAA, layout.size()) };

        // Verify data
        let data1 = unsafe { ptr.as_ref() };
        assert_eq!(data1, [0xAA; 8].as_slice());

        // Check remaining space
        let remaining = arena.remaining();
        assert_eq!(remaining, 4088);

        // Try to grow the allocation
        // If there's enough space, grow the allocation
        let new_layout = Layout::from_size_align(layout.size() + 4, layout.align()).unwrap();
        let grown_ptr = unsafe { arena.grow(ptr.cast(), layout, new_layout) }.unwrap();

        // Write to the newly grown portion
        unsafe {
            grown_ptr
                .cast::<u8>()
                .add(layout.size())
                .write_bytes(0xBB, 4)
        };

        assert_eq!(arena.remaining(), 4084);

        // Verify the grown data
        let grown_data = unsafe {
            std::slice::from_raw_parts(grown_ptr.as_ptr() as *const u8, new_layout.size())
        };
        let mut expected = vec![0xAA; layout.size()];
        expected.extend_from_slice(&[0xBB; 4]);
        assert_eq!(grown_data, expected.as_slice());

        let layout = unsafe { Layout::from_size_align_unchecked(4, 4) };
        let ptr = unsafe { arena.allocate(layout).unwrap() };
        assert_eq!(ptr.len(), layout.size());
        assert_eq!(arena.remaining(), 4080);
    }
}