stack_arena/
stack_arena.rs

1//! A fast, stack-like arena allocator for efficient memory management.
2//!
3//! # Overview
4//!
5//! `StackArena` provides an efficient memory allocation strategy for scenarios where:
6//! - Multiple objects need to be allocated in sequence
7//! - Objects follow a stack-like (LIFO) allocation/deallocation pattern
8//! - Memory usage needs to be minimized with low per-allocation overhead
9//! - Performance is critical, such as in parsers, interpreters, or compilers
10//!
11//! # Features
12//!
13//! - Efficient allocation of variable-sized objects with minimal overhead
14//! - Stack-like (LIFO) allocation and deallocation pattern
15//! - Memory is managed in chunks, automatically growing when needed
16//! - Implements the [`Allocator`](crate::Allocator) trait for compatibility with allocation APIs
17//! - Low-level memory management with minimal overhead
18//! - Automatic chunk reuse for improved performance
19//! - Significantly faster than the system allocator for small, frequent allocations
20//!
21//! # Usage Examples
22//!
23//! Basic usage:
24//!
25//! ```
26//! use stack_arena::{StackArena, Allocator};
27//!
28//! let mut arena = StackArena::new();
29//!
30//! // Allocate memory for an object
31//! let layout = std::alloc::Layout::from_size_align(10, 1).unwrap();
32//! let ptr = unsafe { arena.allocate(layout).unwrap() };
33//!
34//! // Use the allocated memory
35//! unsafe { std::ptr::write_bytes(ptr.as_ptr() as *mut u8, 0xAA, 10) };
36//!
37//! // Deallocate when done
38//! unsafe { arena.deallocate(ptr.cast(), layout) };
39//! ```
40//!
41//! Creating with a custom chunk size:
42//!
43//! ```
44//! use stack_arena::StackArena;
45//!
46//! // Create an arena with 8KB chunks
47//! let mut arena = StackArena::with_chunk_size(8192);
48//! assert!(arena.is_empty());
49//! ```
50//!
51//! For higher-level operations, use [`ObjectStack`](crate::ObjectStack) instead:
52//!
53//! ```
54//! use stack_arena::ObjectStack;
55//!
56//! let mut stack = ObjectStack::new();
57//! stack.extend("Hello, ");
58//! stack.extend("world!");
59//! let ptr = stack.finish(); // Finalizes the object
60//! ```
61//!
62//! # Memory Management
63//!
64//! `StackArena` manages memory in chunks:
65//!
66//! - When the current chunk is full, a new larger chunk is allocated
67//! - Old chunks are stored for later reuse when objects are deallocated
68//! - Memory is allocated contiguously within chunks for better cache locality
69//! - The LIFO pattern allows for efficient memory reuse
70//!
71//! # Safety
72//!
73//! - All returned pointers are valid until the corresponding object is deallocated
74//! - Objects are stored in contiguous memory chunks
75//! - Unsafe operations are used internally for performance
76//! - The library follows LIFO (Last-In-First-Out) allocation pattern for efficiency
77//! - Users must ensure proper memory safety when working with raw pointers
78
79use std::{alloc::Layout, ptr::NonNull};
80
81use crate::{Allocator, BufferArena, chunk::Chunk};
82
83#[derive(Debug)]
84/// A stack-like arena allocator that efficiently manages memory in chunks.
85///
86/// `StackArena` provides a low-level memory allocation strategy where objects are allocated
87/// in a stack-like fashion (Last-In-First-Out). It manages memory in chunks, automatically
88/// allocating new chunks when needed, which reduces allocation overhead and improves performance.
89///
90/// # Memory Management
91///
92/// - Memory is organized in chunks, with a current active chunk for allocations
93/// - When the current chunk is full, a new larger chunk is allocated
94/// - Old chunks are stored to keep previous allocations valid until explicitly deallocated
95/// - Objects are allocated contiguously within chunks for better cache locality
96/// - Follows LIFO (Last-In-First-Out) allocation/deallocation pattern
97///
98/// # Use Cases
99///
100/// `StackArena` is particularly well-suited for:
101///
102/// - Parsers and compilers that need to allocate many temporary objects
103/// - Interpreters that need efficient memory management for short-lived objects
104/// - Data processing pipelines with predictable allocation patterns
105/// - Any scenario where many small allocations are made in sequence
106///
107/// # Thread Safety
108///
109/// `StackArena` is not thread-safe and should not be shared between threads
110/// without external synchronization.
111///
112/// # Performance
113///
114/// This allocator is optimized for scenarios with many small allocations that follow
115/// a stack-like pattern. Benchmarks show it significantly outperforms the system allocator:
116///
117/// - Up to 10x faster for consecutive small allocations
118/// - Minimal overhead for allocation and deallocation
119/// - Efficient memory reuse with the LIFO pattern
120pub struct StackArena {
121    /// Stores old chunks to keep previous allocations valid until explicitly deallocated.
122    /// This ensures that pointers returned by `allocate` remain valid even after new chunks are allocated.
123    store: Vec<Chunk>,
124
125    /// Tracks all allocated objects in LIFO order for proper deallocation.
126    stack: Vec<NonNull<[u8]>>,
127
128    /// The current active chunk used for new allocations.
129    current: BufferArena,
130
131    /// Default chunk size used when allocating new chunks.
132    /// When a new chunk is needed, its size will be at least this value.
133    default_chunk_size: usize,
134}
135
136impl Default for StackArena {
137    fn default() -> Self {
138        let default_chunk_size = 1 << 12;
139        Self {
140            store: Vec::with_capacity(16),
141            stack: Vec::with_capacity(256),
142            current: Default::default(),
143            default_chunk_size,
144        }
145    }
146}
147
148impl StackArena {
149    /// Creates a new empty `StackArena` with a default initial capacity.
150    ///
151    /// The initial chunk size is 1024 bytes.
152    ///
153    /// # Examples
154    ///
155    /// ```
156    /// use stack_arena::StackArena;
157    ///
158    /// let arena = StackArena::new();
159    /// assert!(arena.is_empty());
160    /// ```
161    #[inline(always)]
162    pub fn new() -> Self {
163        Self::with_chunk_size(4096)
164    }
165
166    /// Creates a new empty `StackArena` with a specified chunk size.
167    ///
168    /// # Parameters
169    ///
170    /// * `chunk_size` - The size in bytes to use for new chunks
171    ///
172    /// # Examples
173    ///
174    /// ```
175    /// use stack_arena::StackArena;
176    ///
177    /// // Create an arena with 4096-byte chunks
178    /// let arena = StackArena::with_chunk_size(4096);
179    /// assert!(arena.is_empty());
180    /// ```
181    #[inline]
182    pub fn with_chunk_size(chunk_size: usize) -> Self {
183        let current = BufferArena::with_capacity(chunk_size);
184        Self {
185            store: Vec::with_capacity(4),
186            stack: Vec::with_capacity(32),
187            current,
188            default_chunk_size: chunk_size,
189        }
190    }
191
192    /// Returns the number of objects currently on the stack.
193    ///
194    /// # Examples
195    ///
196    /// ```
197    /// use stack_arena::{StackArena, Allocator};
198    /// use std::alloc::Layout;
199    ///
200    /// let mut arena = StackArena::new();
201    /// assert_eq!(arena.len(), 0);
202    ///
203    /// // Allocate memory for an object
204    /// let layout = Layout::from_size_align(5, 1).unwrap();
205    /// let ptr = unsafe { arena.allocate(layout).unwrap() };
206    /// assert_eq!(arena.len(), 1);
207    /// ```
208    #[inline]
209    pub fn len(&self) -> usize {
210        self.stack.len()
211    }
212
213    /// Returns `true` if the arena contains no objects.
214    ///
215    /// # Examples
216    ///
217    /// ```
218    /// use stack_arena::{StackArena, Allocator};
219    /// use std::alloc::Layout;
220    ///
221    /// let mut arena = StackArena::new();
222    /// assert!(arena.is_empty());
223    ///
224    /// // Allocate memory for an object
225    /// let layout = Layout::from_size_align(5, 1).unwrap();
226    /// let ptr = unsafe { arena.allocate(layout).unwrap() };
227    /// assert!(!arena.is_empty());
228    /// ```
229    #[inline]
230    pub fn is_empty(&self) -> bool {
231        self.len() == 0
232    }
233
234    /// Removes the most recently pushed object from the stack.
235    ///
236    /// This method follows the LIFO (Last-In-First-Out) principle.
237    /// After popping, any pointers to the popped object become invalid.
238    ///
239    /// # Panics
240    ///
241    /// Panics if the stack is empty or if there is a partial object
242    /// being built (i.e., if `extend` has been called but `finish` has not).
243    ///
244    /// # Examples
245    ///
246    /// ```
247    /// use stack_arena::{StackArena, Allocator};
248    /// use std::alloc::Layout;
249    ///
250    /// let mut arena = StackArena::new();
251    ///
252    /// // Allocate first object
253    /// let layout1 = Layout::from_size_align(5, 1).unwrap();
254    /// let ptr1 = unsafe { arena.allocate(layout1).unwrap() };
255    ///
256    /// // Allocate second object
257    /// let layout2 = Layout::from_size_align(5, 1).unwrap();
258    /// let ptr2 = unsafe { arena.allocate(layout2).unwrap() };
259    /// assert_eq!(arena.len(), 2);
260    ///
261    /// arena.pop();
262    /// assert_eq!(arena.len(), 1);
263    /// ```
264    #[inline]
265    pub fn pop(&mut self) -> Option<NonNull<[u8]>> {
266        let ptr = *self.stack.last().unwrap();
267        let layout = Layout::for_value(unsafe { ptr.as_ref() });
268        unsafe { self.deallocate(ptr.cast(), layout) };
269        Some(ptr)
270    }
271
272    /// Returns the top object on the stack without removing it.
273    ///
274    /// This method returns a reference to the most recently allocated object
275    /// on the stack.
276    ///
277    /// # Returns
278    ///
279    /// An optional non-null pointer to the top object, or None if the stack is empty.
280    ///
281    /// # Examples
282    ///
283    /// ```
284    /// use stack_arena::{StackArena, Allocator};
285    /// use std::alloc::Layout;
286    ///
287    /// let mut arena = StackArena::new();
288    /// let layout = Layout::from_size_align(8, 1).unwrap();
289    /// let ptr = unsafe { arena.allocate(layout).unwrap() };
290    ///
291    /// // Get the top object
292    /// let top = arena.top();
293    /// assert!(top.is_some());
294    /// ```
295    #[inline]
296    pub fn top(&mut self) -> Option<NonNull<[u8]>> {
297        self.stack.last().map(|&ptr| ptr)
298    }
299
300    /// Rolls back to a specific object, freeing it and all objects allocated after it.
301    ///
302    /// This method allows for rolling back to a specific point in the allocation
303    /// history by providing a reference to an object on the stack.
304    ///
305    /// # Parameters
306    ///
307    /// * `data` - A reference to the object to free, along with all objects
308    ///   allocated after it.
309    ///
310    /// # Examples
311    ///
312    /// ```
313    /// use stack_arena::{StackArena, Allocator};
314    /// use std::alloc::Layout;
315    ///
316    /// let mut arena = StackArena::new();
317    ///
318    /// // Allocate some objects
319    /// let layout1 = Layout::from_size_align(8, 1).unwrap();
320    /// let ptr1 = unsafe { arena.allocate(layout1).unwrap() };
321    ///
322    /// let layout2 = Layout::from_size_align(16, 1).unwrap();
323    /// let ptr2 = unsafe { arena.allocate(layout2).unwrap() };
324    ///
325    /// // Roll back to the first object
326    /// unsafe {
327    ///     arena.rollback(ptr1.as_ref());
328    /// }
329    /// assert_eq!(arena.len(), 0); // All objects are freed
330    /// ```
331    #[inline]
332    pub fn rollback(&mut self, data: &[u8]) {
333        let data = data.as_ref();
334        unsafe {
335            self.deallocate(
336                NonNull::new_unchecked(data.as_ptr().cast_mut()),
337                Layout::for_value(data),
338            )
339        };
340    }
341}
342
343/// Implementation of the `Allocator` trait for `StackArena`.
344///
345/// This implementation allows `StackArena` to be used with APIs that require
346/// an allocator. The implementation follows the stack-like allocation pattern,
347/// where memory is allocated in chunks and objects are allocated contiguously.
348///
349/// # Safety
350///
351/// The implementation uses unsafe code internally to manage memory efficiently.
352/// Users should follow the LIFO (Last-In-First-Out) pattern when deallocating
353/// memory to ensure proper behavior.
354///
355/// # Performance
356///
357/// This allocator is optimized for scenarios with many small allocations that follow
358/// a stack-like pattern. It significantly outperforms the system allocator in these cases
359/// as demonstrated in the benchmarks.
360impl Allocator for StackArena {
361    /// Allocates memory with the specified layout.
362    ///
363    /// This method allocates a new object with the given layout in the current chunk.
364    /// If the current chunk doesn't have enough space, a new chunk is allocated.
365    ///
366    /// # Safety
367    ///
368    /// This method is unsafe because it returns a raw pointer that must be used
369    /// carefully to avoid memory safety issues.
370    unsafe fn allocate(
371        &mut self,
372        layout: std::alloc::Layout,
373    ) -> Result<std::ptr::NonNull<[u8]>, crate::AllocError> {
374        if !self.current.sufficient_for(layout) {
375            let capacity = layout.size().max(self.default_chunk_size);
376            let prev = std::mem::replace(&mut self.current, BufferArena::with_capacity(capacity));
377            self.store.push(prev.into());
378        }
379
380        unsafe {
381            let object = self.current.allocate(layout)?;
382            self.stack.push(object);
383            Ok(object)
384        }
385    }
386
387    /// Deallocates memory previously allocated by `allocate`.
388    ///
389    /// This method frees the specified object and all objects allocated after it,
390    /// following the stack-like (LIFO) deallocation pattern.
391    ///
392    /// # Safety
393    ///
394    /// This method is unsafe because it must be called with a pointer returned
395    /// by `allocate` and the same layout that was used to allocate it.
396    #[inline]
397    unsafe fn deallocate(&mut self, ptr: std::ptr::NonNull<u8>, layout: std::alloc::Layout) {
398        let object = NonNull::slice_from_raw_parts(ptr, layout.size());
399        let pos = self
400            .stack
401            .iter()
402            .rposition(|item| std::ptr::eq(item.as_ptr(), object.as_ptr()))
403            .unwrap();
404        self.stack.truncate(pos);
405        if !self.current.contains(ptr) {
406            let pos = self
407                .store
408                .iter()
409                .rposition(|item| item.contains(ptr))
410                .unwrap();
411            std::mem::swap(&mut self.current.store, &mut self.store[pos]);
412            self.store.truncate(pos);
413        }
414        debug_assert!(self.current.contains(ptr));
415        unsafe { self.current.deallocate(ptr, layout) };
416    }
417
418    /// Grows a previously allocated memory block to a larger size.
419    ///
420    /// This method is used to extend an existing allocation to a larger size.
421    /// It's used internally by the `extend` method. It **only supports growing
422    /// the last allocation** (following LIFO pattern). Attempting to grow any
423    /// other allocation will trigger a debug assertion failure.
424    ///
425    /// # Safety
426    ///
427    /// This method is unsafe because it must be called with a pointer returned
428    /// by `allocate` and the same layout that was used to allocate it.
429    unsafe fn grow(
430        &mut self,
431        ptr: NonNull<u8>,
432        old_layout: std::alloc::Layout,
433        new_layout: std::alloc::Layout,
434    ) -> Result<NonNull<[u8]>, crate::AllocError> {
435        let top = self.stack.pop().unwrap();
436        debug_assert_eq!(
437            top.cast().as_ptr(),
438            ptr.as_ptr(),
439            "this operation is only supported for the last allocation"
440        );
441        let object = if self.current.remaining() >= new_layout.size() - old_layout.size() {
442            unsafe { self.current.grow(ptr, old_layout, new_layout) }?
443        } else {
444            let capacity = new_layout.size().max(self.default_chunk_size);
445            let prev = std::mem::replace(&mut self.current, BufferArena::with_capacity(capacity));
446            self.store.push(prev.into());
447            unsafe {
448                let object = self.current.allocate(new_layout)?;
449                object
450                    .cast()
451                    .copy_from_nonoverlapping(ptr, old_layout.size());
452                object
453            }
454        };
455        self.stack.push(object);
456        Ok(object)
457    }
458
459    /// Shrinks a previously allocated memory block to a smaller size.
460    ///
461    /// This method is used to reduce the size of an existing allocation.
462    ///
463    /// # Safety
464    ///
465    /// This method is unsafe because it must be called with a pointer returned
466    /// by `allocate` and the same layout that was used to allocate it.
467    #[inline]
468    unsafe fn shrink(
469        &mut self,
470        ptr: NonNull<u8>,
471        old_layout: Layout,
472        new_layout: Layout,
473    ) -> Result<NonNull<[u8]>, crate::AllocError> {
474        let top = self.stack.pop().unwrap();
475        debug_assert_eq!(
476            top.cast().as_ptr(),
477            ptr.as_ptr(),
478            "this operation is only supported for the last allocation"
479        );
480        debug_assert_eq!(top.cast().as_ptr(), ptr.as_ptr());
481        debug_assert_eq!(self.current.ptr, unsafe { ptr.add(old_layout.size()) });
482        let object = unsafe { self.current.shrink(ptr, old_layout, new_layout) }?;
483        self.stack.push(object);
484        Ok(object)
485    }
486}
487
488#[cfg(test)]
489mod tests {
490    use super::*;
491
492    #[test]
493    fn test_new() {
494        let stack = StackArena::new();
495        assert_eq!(stack.len(), 0);
496        assert!(stack.is_empty());
497    }
498
499    #[test]
500    fn test_allocate_deallocate() {
501        let mut stack = StackArena::new();
502
503        // Allocate some memory
504        let layout = Layout::from_size_align(16, 8).unwrap();
505        let ptr = unsafe { stack.allocate(layout) }.unwrap();
506
507        // Write some data
508        unsafe { std::ptr::write_bytes(ptr.as_ptr() as *mut u8, 0xAA, 16) };
509
510        // Verify data
511        let data = unsafe { std::slice::from_raw_parts(ptr.as_ptr() as *const u8, 16) };
512        assert_eq!(data, [0xAA; 16].as_slice());
513
514        // Deallocate
515        unsafe { stack.deallocate(ptr.cast(), layout) };
516        assert_eq!(stack.len(), 0);
517    }
518
519    #[test]
520    fn test_multi_allocations() {
521        let mut stack = StackArena::new();
522        let n = 10;
523        let mut allocations = Vec::with_capacity(n);
524
525        // Allocate multiple objects
526        for i in 0..n {
527            let layout = Layout::array::<u8>(i + 1).unwrap();
528            let ptr = unsafe { stack.allocate(layout) }.unwrap();
529
530            // Write some data
531            unsafe { std::ptr::write_bytes(ptr.as_ptr() as *mut u8, i as u8, i + 1) };
532
533            allocations.push((ptr, layout));
534        }
535
536        assert_eq!(stack.len(), n);
537
538        // Verify data
539        for (i, (ptr, _)) in allocations.iter().enumerate() {
540            let data = unsafe { std::slice::from_raw_parts(ptr.as_ptr() as *const u8, i + 1) };
541            assert_eq!(data, vec![i as u8; i + 1].as_slice());
542        }
543
544        // Deallocate in LIFO order
545        for (ptr, layout) in allocations.into_iter().rev() {
546            unsafe { stack.deallocate(ptr.cast(), layout) };
547        }
548
549        assert_eq!(stack.len(), 0);
550    }
551
552    #[test]
553    fn test_grow_shrink() {
554        let mut stack = StackArena::new();
555
556        // Allocate initial memory
557        let initial_layout = Layout::from_size_align(8, 8).unwrap();
558        let ptr = unsafe { stack.allocate(initial_layout) }.unwrap();
559
560        // Write initial data
561        unsafe { std::ptr::write_bytes(ptr.as_ptr() as *mut u8, 0xAA, 8) };
562
563        // Grow the allocation
564        let new_layout = Layout::from_size_align(16, 8).unwrap();
565        let grown_ptr = unsafe { stack.grow(ptr.cast(), initial_layout, new_layout) }.unwrap();
566
567        // Verify initial data is preserved
568        let data = unsafe { std::slice::from_raw_parts(grown_ptr.as_ptr() as *const u8, 8) };
569        assert_eq!(data, [0xAA; 8].as_slice());
570
571        // Write to the new space
572        unsafe { std::ptr::write_bytes((grown_ptr.as_ptr() as *mut u8).add(8), 0xBB, 8) };
573
574        // Shrink back to original size
575        let shrunk_ptr =
576            unsafe { stack.shrink(grown_ptr.cast(), new_layout, initial_layout) }.unwrap();
577
578        // Verify data
579        let final_data = unsafe { std::slice::from_raw_parts(shrunk_ptr.as_ptr() as *const u8, 8) };
580        assert_eq!(final_data, [0xAA; 8].as_slice());
581
582        // Deallocate
583        unsafe { stack.deallocate(shrunk_ptr.cast(), initial_layout) };
584        assert_eq!(stack.len(), 0);
585    }
586
587    /// Helper function to allocate memory and write data to it
588    #[inline]
589    fn allocate_and_write<A: Allocator>(allocator: &mut A, size: usize) -> (NonNull<[u8]>, Layout) {
590        let layout = Layout::from_size_align(size, 8).unwrap();
591        let ptr = unsafe { allocator.allocate(layout).unwrap() };
592
593        // Write some data
594        unsafe { std::ptr::write_bytes(ptr.as_ptr() as *mut u8, 0xAA, size) };
595
596        (ptr, layout)
597    }
598
599    /// Helper function to perform consecutive allocations
600    #[inline]
601    fn perform_consecutive_allocations<A: Allocator>(
602        allocator: &mut A,
603        count: usize,
604    ) -> Vec<(NonNull<[u8]>, Layout)> {
605        let mut ptrs = Vec::with_capacity(count);
606
607        for i in 0..count {
608            // Use different small sizes for each allocation
609            let size = (i % 8) + 1; // Sizes from 1 to 8 bytes
610            let (ptr, layout) = allocate_and_write(allocator, size);
611            ptrs.push((ptr, layout));
612        }
613
614        ptrs
615    }
616
617    #[test]
618    fn test_consecutive_allocations() {
619        let mut stack = StackArena::new();
620        let n = 10000;
621        let allocations = perform_consecutive_allocations(&mut stack, n);
622
623        assert_eq!(stack.len(), n);
624
625        // Deallocate in LIFO order
626        for (ptr, layout) in allocations.into_iter().rev() {
627            unsafe { stack.deallocate(ptr.cast(), layout) };
628        }
629
630        assert_eq!(stack.len(), 0);
631    }
632
633    #[test]
634    fn test_custom_chunk_size() {
635        // Create a stack arena with a custom chunk size
636        let mut stack = StackArena::with_chunk_size(256);
637
638        // Allocate an object that fits within the chunk
639        let small_layout = Layout::from_size_align(64, 8).unwrap();
640        let small_ptr = unsafe { stack.allocate(small_layout) }.unwrap();
641
642        // Allocate an object that exceeds the default chunk size (1024) but is less than
643        // our custom chunk size (256)
644        let medium_layout = Layout::from_size_align(128, 8).unwrap();
645        let medium_ptr = unsafe { stack.allocate(medium_layout) }.unwrap();
646
647        // Allocate an object that exceeds our custom chunk size
648        let large_layout = Layout::from_size_align(512, 8).unwrap();
649        let large_ptr = unsafe { stack.allocate(large_layout) }.unwrap();
650
651        // Verify all allocations succeeded
652        assert_eq!(stack.len(), 3);
653
654        // Clean up
655        unsafe {
656            stack.deallocate(large_ptr.cast(), large_layout);
657            stack.deallocate(medium_ptr.cast(), medium_layout);
658            stack.deallocate(small_ptr.cast(), small_layout);
659        }
660
661        assert_eq!(stack.len(), 0);
662    }
663
664    #[test]
665    fn test_cross_chunk_allocation_deallocation() {
666        // Create a stack arena with a very small chunk size to easily trigger new chunk allocations
667        let mut stack = StackArena::with_chunk_size(32);
668
669        // Allocate objects that will span across multiple chunks
670        let mut allocations = Vec::new();
671
672        let specs = [
673            (16, 8, 0xAA),
674            (8, 8, 0xBB),
675            (16, 8, 0xCC),
676            (8, 8, 0xDD),
677            (39, 4, 0xEE),
678        ];
679        for (size, align, fill) in specs {
680            let layout = unsafe { Layout::from_size_align_unchecked(size, align) };
681            let ptr = unsafe { stack.allocate(layout).unwrap() };
682            unsafe { ptr.cast::<u8>().write_bytes(fill, layout.size()) };
683            allocations.push((ptr, layout));
684        }
685
686        // Verify all allocations
687        assert_eq!(stack.len(), specs.len());
688
689        for ((size, _align, fill), (ptr, _layout)) in std::iter::zip(specs, &allocations) {
690            let data = unsafe { ptr.as_ref() };
691            let expected = vec![fill; size];
692            assert_eq!(data, &expected);
693        }
694
695        // Deallocate in LIFO order to test chunk reuse
696        for (ptr, layout) in allocations.into_iter().rev() {
697            unsafe { stack.deallocate(ptr.cast(), layout) };
698        }
699
700        // Verify all memory has been deallocated
701        assert_eq!(stack.len(), 0);
702
703        // Allocate again to test chunk reuse
704        let layout5 = Layout::from_size_align(24, 8).unwrap();
705        let ptr5 = unsafe { stack.allocate(layout5) }.unwrap();
706        unsafe { ptr5.cast::<u8>().write_bytes(0xFF, layout5.size()) };
707
708        // Verify the new allocation
709        assert_eq!(stack.len(), 1);
710        let data5 =
711            unsafe { std::slice::from_raw_parts(ptr5.as_ptr() as *const u8, layout5.size()) };
712        assert_eq!(data5, [0xFF; 24].as_slice());
713    }
714
715    #[test]
716    fn test_stack_arena_grow_with_new_chunk() {
717        // Create a stack arena with a very small chunk size
718        let mut stack = StackArena::with_chunk_size(32);
719
720        // First allocation - should fit in the first chunk
721        let layout1 = Layout::from_size_align(8, 8).unwrap();
722        let ptr1 = unsafe { stack.allocate(layout1) }.unwrap();
723        unsafe { ptr1.cast::<u8>().write_bytes(0xAA, layout1.size()) };
724
725        // Allocate a second object to fill the first chunk
726        let layout2 = Layout::from_size_align(16, 8).unwrap();
727        let ptr2 = unsafe { stack.allocate(layout2) }.unwrap();
728        unsafe { ptr2.cast::<u8>().write_bytes(0xBB, layout2.size()) };
729
730        // Verify data integrity
731        let data1 =
732            unsafe { std::slice::from_raw_parts(ptr1.as_ptr() as *const u8, layout1.size()) };
733        assert_eq!(data1, [0xAA; 8].as_slice());
734
735        let data2 =
736            unsafe { std::slice::from_raw_parts(ptr2.as_ptr() as *const u8, layout2.size()) };
737        assert_eq!(data2, [0xBB; 16].as_slice());
738
739        // Deallocate the second object
740        unsafe { stack.deallocate(ptr2.cast(), layout2) };
741
742        // Now grow the first allocation to trigger a new chunk allocation
743        // The new size won't fit in the first chunk, so it should be copied to a new chunk
744        let layout1_grown = Layout::from_size_align(24, 8).unwrap();
745
746        let ptr1_grown = unsafe { stack.grow(ptr1.cast(), layout1, layout1_grown) }.unwrap();
747
748        // Write to the newly grown portion
749        unsafe {
750            ptr1_grown
751                .cast::<u8>()
752                .add(layout1.size())
753                .write_bytes(0xCC, layout1_grown.size() - layout1.size())
754        };
755
756        // Verify the grown object has correct data
757        let data1_grown = unsafe {
758            std::slice::from_raw_parts(ptr1_grown.as_ptr() as *const u8, layout1_grown.size())
759        };
760        let mut expected1_grown = vec![0xAA; layout1.size()];
761        expected1_grown.extend_from_slice(&vec![0xCC; layout1_grown.size() - layout1.size()]);
762        assert_eq!(data1_grown, expected1_grown.as_slice());
763
764        // Final cleanup
765        unsafe { stack.deallocate(ptr1_grown.cast(), layout1_grown) };
766        assert_eq!(stack.len(), 0);
767    }
768
769    #[test]
770    fn test_ptr_eq() {
771        let a: NonNull<[u8]> = NonNull::slice_from_raw_parts(NonNull::dangling(), 2);
772        let b: NonNull<[u8]> = NonNull::slice_from_raw_parts(NonNull::dangling(), 2);
773        assert!(std::ptr::eq(a.as_ptr(), b.as_ptr()));
774    }
775}