Skip to main content

arena_arc/
lib.rs

1//! A fast, chunk-based allocator for building contiguous slices and sharing them via handles.
2//!
3//! This crate provides [`Allocator`] to allocate variable-length slices from a pre-allocated
4//! chunk and return zero-copy [`Handle`]s. The handles share the underlying buffer and are cheap
5//! to clone.
6//!
7//! # Safety notes
8//! - **Zombie objects**: When a previously allocated slice is no longer used but the same chunk
9//!   is still held by other handles, that slice’s memory is not reclaimed individually. It is
10//!   only reclaimed when the entire chunk is freed.
11//! - **Extra memory overhead**: Chunks are allocated with a fixed capacity `N`, which can leave
12//!   unused space. When a request exceeds the remaining space, a new chunk is allocated and the
13//!   unused tail of the old chunk becomes additional overhead.
14//! - **Deferred drops**: Elements are dropped only when the last handle for a chunk is released,
15//!   so resource reclamation does not happen per element immediately.
16//! - **Cycles with handles**: If the allocated `T` contains an `ArcSlice`/`ArcSingle` created by
17//!   the same allocator (directly or indirectly), this can form a reference cycle and prevent the
18//!   chunk from ever being freed.
19//!
20//! # Example
21//! ```
22//! use arena_arc::Allocator;
23//!
24//! let mut allocator: Allocator<u32, u32, 16> = Allocator::new();
25//! let handle = allocator.alloc(4, |i| (i * 2) as u32);
26//! assert_eq!(handle.get(), &[0, 2, 4, 6]);
27//! ```
28use std::{
29    cell::UnsafeCell, fmt::Debug, mem::MaybeUninit, ops::Deref, ptr::NonNull,
30    sync::atomic::AtomicUsize,
31};
32/// Trait bound for index types used by [`Handle`] and [`Allocator`].
33///
34/// Must be convertible to and from `usize` with debug-friendly errors.
35pub trait IndexType:
36    Copy
37    + TryFrom<usize, Error: std::fmt::Debug>
38    + TryInto<usize, Error: std::fmt::Debug>
39    + PartialEq
40    + Eq
41    + Debug
42{
43}
44impl IndexType for u16 {}
45impl IndexType for u32 {}
46impl IndexType for u64 {}
47impl IndexType for u128 {}
48impl IndexType for usize {}
49
50#[repr(C)]
51struct Header {
52    // The reference count for the buffer, used to determine when it can be safely deallocated.
53    ref_count: AtomicUsize,
54    // The index of the next item to be allocated.
55    alloc_index: AtomicUsize,
56    // The capacity of the buffer.
57    capacity: usize,
58}
59
60// #[repr(C)] is prevent the compiler from reordering the fields of the struct,
61// which is important for our use case because we need to be able to safely access the fields of the buffer from multiple threads without worrying
62#[repr(C)]
63struct Buffer<T> {
64    // The header of the buffer, containing the reference count, allocation index, and capacity.
65    header: Header,
66    // The buffer data, stored as an array of T
67    data: UnsafeCell<[MaybeUninit<T>]>,
68}
69
70impl<T> Buffer<T> {
71    fn layout_for_capacity(capacity: usize) -> std::alloc::Layout {
72        // Layout must match the allocation in ChunkRef::new.
73        let header_layout = std::alloc::Layout::new::<Header>();
74        let array_layout = std::alloc::Layout::array::<MaybeUninit<T>>(capacity)
75            .expect("Failed to create array layout for buffer data");
76        let (layout, _offset) = header_layout
77            .extend(array_layout)
78            .expect("Failed to extend header layout with data layout");
79        layout
80    }
81
82    fn left_space(&self) -> usize {
83        self.header.capacity
84            - self
85                .header
86                .alloc_index
87                .load(std::sync::atomic::Ordering::Acquire)
88    }
89}
90
91impl<T> Drop for Buffer<T> {
92    fn drop(&mut self) {
93        // get the allocated length of the buffer
94        let len = self
95            .header
96            .alloc_index
97            .load(std::sync::atomic::Ordering::Acquire);
98        // SAFETY: We are the only thread that can access the buffer, so it is safe to drop the items in the buffer.
99        unsafe {
100            let data = &mut *self.data.get();
101            data.iter_mut().take(len).for_each(|item| {
102                item.assume_init_drop();
103            });
104        }
105    }
106}
107
108/// A reference-counted pointer to an allocation chunk.
109///
110/// This is an internal building block used by [`Handle`] and [`Allocator`].
111struct ChunkRef<T> {
112    inner: Option<NonNull<Buffer<T>>>,
113}
114
115impl<T> Clone for ChunkRef<T> {
116    fn clone(&self) -> Self {
117        if self.inner.is_none() {
118            return Self { inner: None };
119        }
120        // Increment the reference count of the buffer.
121        let buffer = unsafe { self.inner.as_ref().unwrap().as_ref() };
122        buffer
123            .header
124            .ref_count
125            .fetch_add(1, std::sync::atomic::Ordering::Relaxed);
126        Self { inner: self.inner }
127    }
128}
129
130impl<T> Drop for ChunkRef<T> {
131    fn drop(&mut self) {
132        if self.inner.is_none() {
133            return;
134        }
135        let inner_ref = self.inner.as_ref().unwrap();
136        // Decrement the reference count of the buffer.
137        let buffer = unsafe { inner_ref.as_ref() };
138        if buffer
139            .header
140            .ref_count
141            .fetch_sub(1, std::sync::atomic::Ordering::AcqRel)
142            == 1
143        {
144            // SAFETY: We are the last owner of the buffer, so it is safe to deallocate it.
145            unsafe {
146                let layout = Buffer::<T>::layout_for_capacity(buffer.header.capacity);
147                std::ptr::drop_in_place(inner_ref.as_ptr());
148                std::alloc::dealloc(inner_ref.as_ptr() as *mut u8, layout);
149            }
150        }
151    }
152}
153
154impl<T> ChunkRef<T> {
155    fn new(capacity: usize) -> Self {
156        // SAFETY: We create a layout for the header and the data array.
157        let layout = Buffer::<T>::layout_for_capacity(capacity);
158
159        let ptr = unsafe { std::alloc::alloc(layout) };
160        if ptr.is_null() {
161            std::alloc::handle_alloc_error(layout);
162        }
163
164        // Construct a fat pointer to the buffer with the correct length metadata.
165        let ptr = std::ptr::slice_from_raw_parts_mut(ptr as *mut (), capacity) as *mut Buffer<T>;
166
167        unsafe {
168            // Initialize the header of the buffer.
169            let header = &mut (*ptr).header;
170            header
171                .ref_count
172                .store(1, std::sync::atomic::Ordering::Release);
173            header
174                .alloc_index
175                .store(0, std::sync::atomic::Ordering::Release);
176            header.capacity = capacity;
177        }
178
179        Self {
180            inner: Some(NonNull::new(ptr).expect("Failed to create NonNull pointer")),
181        }
182    }
183
184    unsafe fn buffer(&self) -> &Buffer<T> {
185        unsafe {
186            self.inner
187                .as_ref()
188                .expect("Dereferenced null chunk")
189                .as_ref()
190        }
191    }
192
193    fn is_null(&self) -> bool {
194        self.inner.is_none()
195    }
196
197    fn null() -> Self {
198        Self { inner: None }
199    }
200}
201
202/// A read-only handle to a slice allocated from an [`Allocator`].
203///
204/// Cloning a handle is cheap and shares the underlying buffer.
205/// The slice remains valid as long as any handle referencing the same chunk exists.
206pub struct ArcSlice<T, L: IndexType = u32> {
207    // A pointer to the buffer that this handle is allocated from.
208    chunk: ChunkRef<T>,
209    // The index of the item in the buffer that this handle points to.
210    index: L,
211    // The slice length of the item that this handle points to.
212    len: L,
213}
214
215unsafe impl<T, L: IndexType> Send for ArcSlice<T, L> where T: Send + Sync {}
216unsafe impl<T, L: IndexType> Sync for ArcSlice<T, L> where T: Send + Sync {}
217
218impl<T, L: IndexType> Clone for ArcSlice<T, L> {
219    fn clone(&self) -> Self {
220        Self {
221            chunk: self.chunk.clone(),
222            index: self.index,
223            len: self.len,
224        }
225    }
226}
227
228impl<T, L: IndexType> ArcSlice<T, L> {
229    /// Returns the underlying slice.
230    ///
231    /// This is zero-copy and only creates a shared reference into the backing chunk.
232    pub fn get(&self) -> &[T] {
233        if self.chunk.is_null() {
234            // For zero-length allocations, we return a reference to an empty slice without accessing the chunk.
235            return &[];
236        }
237        unsafe {
238            let buffer = self.chunk.buffer();
239            let data = &*buffer.data.get();
240            let start = self
241                .index
242                .try_into()
243                .expect("Index exceeds index type capacity");
244            let len = self
245                .len
246                .try_into()
247                .expect("Length exceeds index type capacity");
248            let slice = &data[start..start + len];
249            // SAFETY: The data in this range has been initialized by the allocator
250            std::mem::transmute::<&[MaybeUninit<T>], &[T]>(slice)
251        }
252    }
253
254    /// Creates an empty slice handle that points to an empty slice without modifying the chunk.
255    pub fn empty() -> Self {
256        Self {
257            chunk: ChunkRef::null(),
258            index: 0
259                .try_into()
260                .expect("Index 0 should be valid for any index type"),
261            len: 0
262                .try_into()
263                .expect("Length 0 should be valid for any index type"),
264        }
265    }
266}
267
268impl<T, L: IndexType> Deref for ArcSlice<T, L> {
269    type Target = [T];
270
271    fn deref(&self) -> &Self::Target {
272        self.get()
273    }
274}
275
276pub struct ArcSingle<T, L: IndexType = u32> {
277    chunk: ChunkRef<T>,
278    index: L,
279}
280
281unsafe impl<T, L: IndexType> Send for ArcSingle<T, L> where T: Send + Sync {}
282unsafe impl<T, L: IndexType> Sync for ArcSingle<T, L> where T: Send + Sync {}
283
284impl<T, L: IndexType> Clone for ArcSingle<T, L> {
285    fn clone(&self) -> Self {
286        Self {
287            chunk: self.chunk.clone(),
288            index: self.index,
289        }
290    }
291}
292
293impl<T, L: IndexType> ArcSingle<T, L> {
294    /// Returns the referenced item.
295    ///
296    /// This is zero-copy and only creates a shared reference into the backing chunk.
297    pub fn get(&self) -> &T {
298        unsafe {
299            let buffer = self.chunk.buffer();
300            let data = &*buffer.data.get();
301            let start = self
302                .index
303                .try_into()
304                .expect("Index exceeds index type capacity");
305            let slice = &data[start];
306            // SAFETY: The data in this range has been initialized by the allocator
307            std::mem::transmute::<&MaybeUninit<T>, &T>(slice)
308        }
309    }
310
311    /// Creates an [`ArcSingle`] from an [`ArcSlice`] of length 1.
312    ///
313    /// # Panics
314    /// Panics if the slice length is not 1.
315    pub fn from_slice(handle: ArcSlice<T, L>) -> Self {
316        assert_eq!(
317            handle.len,
318            1.try_into().expect("Length exceeds index type capacity"),
319            "ArcSingle can only be created from a slice of length 1"
320        );
321        Self {
322            chunk: handle.chunk.clone(),
323            index: handle.index,
324        }
325    }
326}
327
328impl<T, L: IndexType> Deref for ArcSingle<T, L> {
329    type Target = T;
330
331    fn deref(&self) -> &Self::Target {
332        self.get()
333    }
334}
335
336impl<T, L: IndexType> TryFrom<ArcSlice<T, L>> for ArcSingle<T, L> {
337    type Error = &'static str;
338
339    fn try_from(value: ArcSlice<T, L>) -> Result<Self, Self::Error> {
340        if value.len != 1.try_into().expect("Length exceeds index type capacity") {
341            return Err("ArcSingle can only be created from a slice of length 1");
342        }
343        Ok(Self {
344            chunk: value.chunk,
345            index: value.index,
346        })
347    }
348}
349
350impl<T, L: IndexType> From<ArcSingle<T, L>> for ArcSlice<T, L> {
351    fn from(value: ArcSingle<T, L>) -> Self {
352        Self {
353            chunk: value.chunk,
354            index: value.index,
355            len: 1.try_into().expect("Length exceeds index type capacity"),
356        }
357    }
358}
359
360/// Chunk-based allocator for variable-length slices.
361///
362/// - `T`: element type.
363/// - `L`: index type used for slice offsets (defaults to `u32`).
364/// - `N`: default chunk capacity.
365pub struct Allocator<T, L: IndexType = u32, const N: usize = 256> {
366    chunk: ChunkRef<T>,
367    phantom: std::marker::PhantomData<L>,
368}
369
370impl<T, L: IndexType, const N: usize> std::fmt::Debug for Allocator<T, L, N> {
371    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
372        write!(
373            f,
374            "Allocator<T={}, L={}, N={}>",
375            std::any::type_name::<T>(),
376            std::any::type_name::<L>(),
377            N
378        )
379    }
380}
381
382impl<T, L: IndexType, const N: usize> Default for Allocator<T, L, N> {
383    fn default() -> Self {
384        Self::new()
385    }
386}
387
388impl<T, L: IndexType, const N: usize> Allocator<T, L, N> {
389    /// Create a new allocator with a single chunk of capacity `N`.
390    pub fn new() -> Self {
391        Self {
392            chunk: ChunkRef::new(N),
393            phantom: std::marker::PhantomData,
394        }
395    }
396
397    /// Allocate a slice of length `len` and initialize elements with `init`.
398    ///
399    /// Returns a [`Handle`] that can be cheaply cloned.
400    pub fn alloc<F>(&mut self, len: L, mut init: F) -> ArcSlice<T, L>
401    where
402        F: FnMut(usize) -> T,
403    {
404        if len == 0.try_into().expect("Length exceeds index type capacity") {
405            // For zero-length allocations, we can return a handle that points to an empty slice without modifying the chunk.
406            return ArcSlice {
407                chunk: ChunkRef::null(),
408                index: 0
409                    .try_into()
410                    .expect("Index 0 should be valid for any index type"),
411                len: 0
412                    .try_into()
413                    .expect("Length 0 should be valid for any index type"),
414            };
415        }
416        let len: usize = len.try_into().expect("Length exceeds index type capacity");
417        let left_space = unsafe { self.chunk.buffer().left_space() };
418        if N < len {
419            // 如果请求的长度超过了chunk的容量,直接分配一个新的chunk,但不改变当前chunk的状态,以便后续的分配仍然可以使用当前chunk。
420            let chunk: ChunkRef<T> = ChunkRef::new(len);
421            unsafe {
422                let buffer = chunk.buffer(); // 我们可以保证这个chunk是唯一的,所以可以安全地获取可变引用
423                // 使用FnOnce初始化chunk的数据
424                let data = &mut *buffer.data.get();
425                for (i, item) in data.iter_mut().take(len).enumerate() {
426                    item.as_mut_ptr().write(init(i));
427                }
428                // 更新chunk的分配索引
429                buffer
430                    .header
431                    .alloc_index
432                    .store(len, std::sync::atomic::Ordering::Release);
433            }
434            return ArcSlice {
435                chunk,
436                index: 0
437                    .try_into()
438                    .expect("Index 0 should be valid for any index type"),
439                len: len.try_into().expect("Length exceeds index type capacity"),
440            };
441        }
442        if left_space < len {
443            // 如果当前chunk剩余空间不足以分配请求的长度
444            let chunk: ChunkRef<T> = ChunkRef::new(N);
445            unsafe {
446                let buffer = chunk.buffer(); // 我们可以保证这个chunk是唯一的,所以可以安全地获取可变引用
447                // 使用FnOnce初始化chunk的数据
448                let data = &mut *buffer.data.get();
449                for (i, item) in data.iter_mut().take(len).enumerate() {
450                    item.as_mut_ptr().write(init(i));
451                }
452                // 更新chunk的分配索引
453                buffer
454                    .header
455                    .alloc_index
456                    .store(len, std::sync::atomic::Ordering::Release);
457            }
458            if left_space < N - len {
459                // 如果当前chunk剩余空间不足以分配请求的长度,但重分配新的chunk再填充的剩余空间比比self持有的剩余空间更大,则直接丢弃当前chunk,使用新的chunk。
460                self.chunk = chunk.clone();
461            }
462            return ArcSlice {
463                chunk,
464                index: 0
465                    .try_into()
466                    .expect("Index 0 should be valid for any index type"),
467                len: len.try_into().expect("Length exceeds index type capacity"),
468            };
469        }
470        // 如果当前chunk剩余空间足以分配请求的长度
471        unsafe {
472            let buffer = self.chunk.buffer();
473
474            // 1. 读取 index
475            let index = buffer
476                .header
477                .alloc_index
478                .load(std::sync::atomic::Ordering::Relaxed);
479
480            // 2. 使用裸指针写入,避免创建 &mut [T]
481            // 获取 *mut [MaybeUninit<T>] -> *mut MaybeUninit<T>
482            let base_ptr = buffer.data.get() as *mut MaybeUninit<T>;
483
484            for i in 0..len {
485                // init(i) 可能会 panic,但这在这里是安全的(Leak on panic)
486                let val = init(i);
487                // ptr::write 只操作特定地址,不产生大范围的引用别名限制
488                base_ptr.add(index + i).write(MaybeUninit::new(val));
489            }
490            // 3. 提交 index
491            buffer
492                .header
493                .alloc_index
494                .store(index + len, std::sync::atomic::Ordering::Release);
495            ArcSlice {
496                chunk: self.chunk.clone(),
497                index: index.try_into().expect("Index cap"),
498                len: len.try_into().expect("Length cap"),
499            }
500        }
501    }
502
503    /// Allocate a slice with fallible initialization.
504    ///
505    /// The `init` function can return an error to indicate initialization failure.
506    /// In that case, any already-initialized elements will be dropped and the error will be returned.
507    pub fn try_alloc<F, E>(&mut self, len: L, mut init: F) -> Result<ArcSlice<T, L>, E>
508    where
509        F: FnMut(usize) -> Result<T, E>,
510    {
511        if len == 0.try_into().expect("Length exceeds index type capacity") {
512            // For zero-length allocations, we can return a handle that points to an empty slice without modifying the chunk.
513            return Ok(ArcSlice {
514                chunk: ChunkRef::null(),
515                index: 0
516                    .try_into()
517                    .expect("Index 0 should be valid for any index type"),
518                len: 0
519                    .try_into()
520                    .expect("Length 0 should be valid for any index type"),
521            });
522        }
523        let len: usize = len.try_into().expect("Length exceeds index type capacity");
524        let left_space = unsafe { self.chunk.buffer().left_space() };
525        if N < len {
526            // 如果请求的长度超过了chunk的容量,直接分配一个新的chunk,但不改变当前chunk的状态,以便后续的分配仍然可以使用当前chunk。
527            let chunk: ChunkRef<T> = ChunkRef::new(len);
528            unsafe {
529                let buffer = chunk.buffer(); // 我们可以保证这个chunk是唯一的,所以可以安全地获取可变引用
530                // 使用FnOnce初始化chunk的数据
531                let data = &mut *buffer.data.get();
532                for (i, item) in data.iter_mut().take(len).enumerate() {
533                    match init(i) {
534                        Ok(val) => item.as_mut_ptr().write(val),
535                        Err(e) => {
536                            // 初始化失败,清理已初始化的元素并返回错误
537                            for item in data.iter_mut().take(i) {
538                                item.assume_init_drop();
539                            }
540                            return Err(e);
541                        }
542                    }
543                }
544                // 更新chunk的分配索引
545                buffer
546                    .header
547                    .alloc_index
548                    .store(len, std::sync::atomic::Ordering::Release);
549            }
550            return Ok(ArcSlice {
551                chunk,
552                index: 0
553                    .try_into()
554                    .expect("Index 0 should be valid for any index type"),
555                len: len.try_into().expect("Length exceeds index type capacity"),
556            });
557        }
558        if left_space < len {
559            // 如果当前chunk剩余空间不足以分配请求的长度
560            let chunk: ChunkRef<T> = ChunkRef::new(N);
561            unsafe {
562                let buffer = chunk.buffer(); // 我们可以保证这个chunk是唯一的,所以可以安全地获取可变引用
563                // 使用FnOnce初始化chunk的数据
564                let data = &mut *buffer.data.get();
565                for (i, item) in data.iter_mut().take(len).enumerate() {
566                    match init(i) {
567                        Ok(val) => item.as_mut_ptr().write(val),
568                        Err(e) => {
569                            // 初始化失败,清理已初始化的元素并返回错误
570                            for item in data.iter_mut().take(i) {
571                                item.assume_init_drop();
572                            }
573                            return Err(e);
574                        }
575                    }
576                }
577                // 更新chunk的分配索引
578                buffer
579                    .header
580                    .alloc_index
581                    .store(len, std::sync::atomic::Ordering::Release);
582            }
583            if left_space < N - len {
584                // 如果当前chunk剩余空间不足以分配请求的长度,但重分配新的chunk再填充的剩余空间比比self持有的剩余空间更大,则直接丢弃当前chunk,使用新的chunk。
585                self.chunk = chunk.clone();
586            }
587            return Ok(ArcSlice {
588                chunk,
589                index: 0
590                    .try_into()
591                    .expect("Index 0 should be valid for any index type"),
592                len: len.try_into().expect("Length exceeds index type capacity"),
593            });
594        }
595        // 如果当前chunk剩余空间足以分配请求的长度
596        unsafe {
597            let buffer = self.chunk.buffer();
598
599            // 1. 读取 index
600            let index = buffer
601                .header
602                .alloc_index
603                .load(std::sync::atomic::Ordering::Relaxed);
604
605            // 2. 使用裸指针写入,避免创建 &mut [T]
606            // 获取 *mut [MaybeUninit<T>] -> *mut MaybeUninit<T>
607            let base_ptr = buffer.data.get() as *mut MaybeUninit<T>;
608
609            for i in 0..len {
610                // init(i) 可能会 panic,但这在这里是安全的(Leak on panic)
611                match init(i) {
612                    // ptr::write 只操作特定地址,不产生大范围的引用别名限制
613                    Ok(val) => base_ptr.add(index + i).write(MaybeUninit::new(val)),
614                    Err(e) => {
615                        // 初始化失败,清理已初始化的元素并返回错误
616                        for j in 0..i {
617                            std::ptr::drop_in_place(base_ptr.add(index + j) as *mut T);
618                        }
619                        return Err(e);
620                    }
621                }
622            }
623            // 3. 提交 index
624            buffer
625                .header
626                .alloc_index
627                .store(index + len, std::sync::atomic::Ordering::Release);
628            Ok(ArcSlice {
629                chunk: self.chunk.clone(),
630                index: index.try_into().expect("Index cap"),
631                len: len.try_into().expect("Length cap"),
632            })
633        }
634    }
635
636    /// Allocate a single element and return it as an [`ArcSingle`].
637    pub fn alloc_single<F>(&mut self, init: F) -> ArcSingle<T, L>
638    where
639        F: FnOnce() -> T,
640    {
641        let mut init = Some(init);
642        let slice = self.alloc(
643            1.try_into().expect("Length exceeds index type capacity"),
644            |_| init.take().expect("init called more than once")(),
645        );
646        ArcSingle::from_slice(slice)
647    }
648
649    /// Allocate a single element with fallible initialization and return it as an [`ArcSingle`].
650    pub fn try_alloc_single<F, E>(&mut self, init: F) -> Result<ArcSingle<T, L>, E>
651    where
652        F: FnOnce() -> Result<T, E>,
653    {
654        let mut init = Some(init);
655        self.try_alloc(
656            1.try_into().expect("Length exceeds index type capacity"),
657            |_| init.take().expect("init called more than once")(),
658        )
659        .map(ArcSingle::from_slice)
660    }
661
662    /// Allocate a single element from an existing value.
663    pub fn alloc_value(&mut self, value: T) -> ArcSingle<T, L> {
664        self.alloc_single(|| value)
665    }
666
667    /// Allocate a zero-length slice, which returns a handle that points to an empty slice without modifying the chunk.
668    pub fn alloc_empty() -> ArcSlice<T, L> {
669        ArcSlice::empty()
670    }
671}
672
673#[cfg(test)]
674mod tests {
675    use super::*;
676    use std::sync::atomic::{AtomicUsize, Ordering};
677
678    #[test]
679    fn test_allocator() {
680        let mut allocator: Allocator<u32, u32, 16> = Allocator::new();
681        let handle1 = allocator.alloc(10, |i| i as u32);
682        let handle2 = allocator.alloc(20, |i| (i + 10) as u32);
683        let handle3 = handle1.clone();
684        assert_eq!(handle1.get(), &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]);
685        assert_eq!(
686            handle2.get(),
687            &[
688                10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29
689            ]
690        );
691        assert_eq!(handle3.get(), &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]);
692    }
693
694    #[derive(Debug)]
695    struct DropCounter<'a> {
696        #[allow(dead_code)]
697        value: usize,
698        drops: &'a AtomicUsize,
699    }
700
701    impl<'a> Drop for DropCounter<'a> {
702        fn drop(&mut self) {
703            self.drops.fetch_add(1, Ordering::SeqCst);
704        }
705    }
706
707    #[test]
708    fn test_drop_runs_exactly_once_per_item() {
709        static DROPS: AtomicUsize = AtomicUsize::new(0);
710
711        {
712            let mut allocator: Allocator<DropCounter, u32, 8> = Allocator::new();
713            let handle = allocator.alloc(8, |i| DropCounter {
714                value: i,
715                drops: &DROPS,
716            });
717            assert_eq!(handle.get().len(), 8);
718            // Cloning handle should not affect drop count until all handles are dropped.
719            let _handle2 = handle.clone();
720            assert_eq!(DROPS.load(Ordering::SeqCst), 0);
721        }
722
723        // All 8 items must be dropped exactly once when the chunk is freed.
724        assert_eq!(DROPS.load(Ordering::SeqCst), 8);
725    }
726
727    #[test]
728    fn test_multiple_chunks_and_large_allocs() {
729        static DROPS: AtomicUsize = AtomicUsize::new(0);
730
731        {
732            let mut allocator: Allocator<DropCounter, u32, 4> = Allocator::new();
733            let h1 = allocator.alloc(3, |i| DropCounter {
734                value: i,
735                drops: &DROPS,
736            });
737            let h2 = allocator.alloc(2, |i| DropCounter {
738                value: i + 3,
739                drops: &DROPS,
740            });
741            let h3 = allocator.alloc(10, |i| DropCounter {
742                value: i + 5,
743                drops: &DROPS,
744            });
745
746            assert_eq!(h1.get().len(), 3);
747            assert_eq!(h2.get().len(), 2);
748            assert_eq!(h3.get().len(), 10);
749            let _ = (h1, h2, h3);
750        }
751
752        // 3 + 2 + 10 items dropped exactly once.
753        assert_eq!(DROPS.load(Ordering::SeqCst), 15);
754    }
755
756    #[test]
757    fn test_zst_allocations() {
758        // Ensure zero-sized types are handled without UB.
759        let mut allocator: Allocator<(), u32, 16> = Allocator::new();
760        let h1 = allocator.alloc(0, |_| ());
761        let h2 = allocator.alloc(8, |_| ());
762        assert_eq!(h1.get().len(), 0);
763        assert_eq!(h2.get().len(), 8);
764    }
765
766    #[test]
767    fn test_handle_survives_chunk_rotation() {
768        let mut allocator: Allocator<u32, u32, 4> = Allocator::new();
769        let h1 = allocator.alloc(4, |i| i as u32);
770        // Force new chunk and potentially replace current chunk.
771        let _h2 = allocator.alloc(3, |i| (i + 10) as u32);
772        let h3 = allocator.alloc(4, |i| (i + 20) as u32);
773
774        assert_eq!(h1.get(), &[0, 1, 2, 3]);
775        assert_eq!(h3.get(), &[20, 21, 22, 23]);
776    }
777
778    #[test]
779    fn test_many_small_allocations() {
780        let mut allocator: Allocator<u32, u32, 8> = Allocator::new();
781        let mut handles = Vec::new();
782        for i in 0..32 {
783            let h = allocator.alloc(1, |_| i as u32);
784            handles.push(h);
785        }
786        for (i, h) in handles.iter().enumerate() {
787            assert_eq!(h.get(), &[(i as u32)]);
788        }
789    }
790
791    #[cfg(miri)]
792    mod miri_tests {
793        use super::*;
794        use std::cell::Cell;
795
796        #[test]
797        fn miri_stress_realloc_and_drop() {
798            // Exercise multiple allocation paths under Miri to catch UB.
799            let mut allocator: Allocator<u64, u32, 8> = Allocator::new();
800            let h1 = allocator.alloc(8, |i| i as u64);
801            let h2 = allocator.alloc(1, |i| (i + 100) as u64);
802            let h3 = allocator.alloc(16, |i| (i + 200) as u64);
803            assert_eq!(h1.get()[0], 0);
804            assert_eq!(h2.get()[0], 100);
805            assert_eq!(h3.get()[0], 200);
806            let _ = (h1, h2, h3);
807        }
808
809        #[derive(Debug)]
810        struct Poison<'a> {
811            alive: &'a Cell<bool>,
812        }
813
814        impl<'a> Drop for Poison<'a> {
815            fn drop(&mut self) {
816                // If this ever becomes false before drop, it indicates a use-after-free.
817                self.alive.set(false);
818            }
819        }
820
821        #[test]
822        fn miri_use_after_free_guard() {
823            let flag = Cell::new(true);
824            {
825                let mut allocator: Allocator<Poison, u32, 4> = Allocator::new();
826                let handle = allocator.alloc(4, |_| Poison { alive: &flag });
827                assert!(flag.get());
828                drop(handle);
829                // Dropping just the handle should not drop the chunk.
830                assert!(flag.get());
831                // allocator dropped at end of scope, which should drop the chunk.
832            }
833            // Flag should be flipped by drops once the chunk is freed.
834            assert!(!flag.get());
835        }
836
837        #[test]
838        fn miri_many_small_allocations() {
839            let mut allocator: Allocator<u64, u32, 4> = Allocator::new();
840            let mut handles = Vec::new();
841            for i in 0..64u64 {
842                let h = allocator.alloc(1, |_| i);
843                handles.push(h);
844            }
845            for (i, h) in handles.iter().enumerate() {
846                assert_eq!(h.get(), &[(i as u64)]);
847            }
848        }
849
850        #[test]
851        fn miri_clone_stress() {
852            let mut allocator: Allocator<u32, u32, 8> = Allocator::new();
853            let h = allocator.alloc(8, |i| i as u32);
854            let mut clones = Vec::new();
855            for _ in 0..32 {
856                clones.push(h.clone());
857            }
858            for c in clones.iter() {
859                assert_eq!(c.get()[0], 0);
860            }
861            drop(h);
862            // All clones still valid until dropped.
863            for c in clones.iter() {
864                assert_eq!(c.get()[7], 7);
865            }
866        }
867    }
868}