Skip to main content

cubecl_common/
arena.rs

1use std::{
2    cell::UnsafeCell,
3    marker::PhantomData,
4    sync::{
5        Arc,
6        atomic::{AtomicU32, Ordering},
7    },
8    vec::Vec,
9};
10
11/// The raw storage for an item, potentially uninitialized.
12///
13/// Aligned to 64 bytes (typical cache-line size) to prevent false sharing
14/// when different threads access adjacent slots concurrently.
15#[repr(C, align(64))]
16pub struct Bytes<const MAX_ITEM_SIZE: usize> {
17    bytes: [u8; MAX_ITEM_SIZE],
18}
19
20/// A circular, allocation-free arena for reusable memory blocks.
21///
22/// `Arena` manages a fixed-capacity pool of [`Bytes`] buffers, each up to
23/// `MAX_ITEM_SIZE` bytes. After the pool is lazily initialized, subsequent
24/// allocations scan from an internal cursor to find a free slot, avoiding
25/// further heap allocation.
26///
27/// # Const Parameters
28///
29/// - `MAX_ITEM_COUNT` — maximum number of buffers in the pool.
30/// - `MAX_ITEM_SIZE` — capacity of each individual buffer in bytes.
31///
32/// # How It Works
33///
34/// The arena maintains a vector of reference-counted buffer slots. When a
35/// caller requests memory, the arena advances its cursor through the pool
36/// looking for a slot whose reference count is zero, then hands back a
37/// [`Bytes`] handle to that slot. The cursor wraps around, giving the
38/// allocation pattern its circular behavior.
39///
40/// Because a `Bytes` handle can outlive the `Arena` itself (e.g. when the
41/// owning thread exits but the handle was sent elsewhere), each slot is
42/// wrapped in an `Arc` to keep the underlying storage alive. A separate
43/// `AtomicU32` reference count tracks logical ownership independently of
44/// the `Arc` strong count, so the arena can reliably detect which slots
45/// are free.
46///
47/// # Use Case
48///
49/// This is useful as a replacement for repeated `Arc<dyn Trait>` allocations.
50pub struct Arena<const MAX_ITEM_COUNT: usize, const MAX_ITEM_SIZE: usize> {
51    /// Backing storage for each slot. Wrapped in `Arc` so that a [`Bytes`]
52    /// handle remains valid even after the `Arena` (and its owning thread)
53    /// is dropped.
54    buffer: Vec<Arc<UnsafeCell<Bytes<MAX_ITEM_SIZE>>>>,
55    /// Logical reference counts, one per slot. Tracked separately from the
56    /// `Arc` strong count because the arena may be dropped while outstanding
57    /// `Bytes` handles still exist — the `Arc` keeps memory alive, but this
58    /// counter tells the arena whether a slot can be reclaimed.
59    ref_counts: Vec<Arc<AtomicU32>>,
60    /// Current scan position in the circular pool. Advanced on each
61    /// allocation attempt and wraps at `MAX_ITEM_COUNT`.
62    cursor: usize,
63}
64
65/// An initialized, immutable handle to a slot in the arena.
66///
67/// This type is `Send + Sync` and can be cheaply cloned. Each clone
68/// increments a logical reference count; when the last clone is dropped,
69/// the stored object's destructor runs and the slot becomes available for
70/// reuse by the arena.
71pub struct ReservedMemory<const MAX_ITEM_SIZE: usize> {
72    data: Arc<UnsafeCell<Bytes<MAX_ITEM_SIZE>>>,
73    ref_count: Arc<AtomicU32>,
74    drop_fn: fn(&mut Bytes<MAX_ITEM_SIZE>),
75}
76
77/// An uninitialized handle to a reserved arena slot.
78///
79/// Obtained from [`Arena::reserve`]. Must be initialized via [`init`](Self::init)
80/// to produce a usable [`ReservedMemory`].
81///
82/// This type is intentionally `!Send` and `!Sync` — it must be initialized on
83/// the same thread that reserved it.
84pub struct UninitReservedMemory<const MAX_ITEM_SIZE: usize> {
85    data: Arc<UnsafeCell<Bytes<MAX_ITEM_SIZE>>>,
86    ref_count: Arc<AtomicU32>,
87    /// Used to assert the position in the arena.
88    #[cfg(test)]
89    index: usize,
90    // Add this type to make sure the object is `!Sync`.
91    not_sync: PhantomData<*const ()>,
92}
93
94impl<const MAX_ITEM_SIZE: usize> UninitReservedMemory<MAX_ITEM_SIZE> {
95    /// Initialize the reserved memory.
96    ///
97    /// # Panics
98    ///
99    /// If the given object isn't safe to store in this arena.
100    pub fn init<O>(self, obj: O) -> ReservedMemory<MAX_ITEM_SIZE> {
101        assert!(
102            accept_obj::<O, MAX_ITEM_SIZE>(),
103            "Object isn't safe to store in this arena"
104        );
105
106        self.init_with_func(
107            |bytes| {
108                let ptr = core::ptr::from_mut(bytes);
109                unsafe {
110                    core::ptr::write(ptr as *mut O, obj);
111                };
112            },
113            |bytes| {
114                let ptr = core::ptr::from_mut(bytes);
115                unsafe {
116                    core::ptr::drop_in_place(ptr as *mut O);
117                }
118            },
119        )
120    }
121
122    /// Writes to the reserved slot using `init_data` and attaches `drop_fn`
123    /// as the destructor to run when the last [`ReservedMemory`] clone is dropped.
124    fn init_with_func<F>(
125        self,
126        init_data: F,
127        drop_fn: fn(&mut Bytes<MAX_ITEM_SIZE>),
128    ) -> ReservedMemory<MAX_ITEM_SIZE>
129    where
130        F: FnOnce(&mut Bytes<MAX_ITEM_SIZE>),
131    {
132        // SAFETY: We access the `UnsafeCell` contents mutably. This is sound
133        // because strong_count == 2 means only two owners exist: the arena's
134        // buffer slot and this `UninitReservedMemory`. The arena never reads
135        // through the `UnsafeCell` — only the holder of `UninitReservedMemory`
136        // writes, so there is no data race.
137        assert_eq!(
138            Arc::strong_count(&self.data),
139            2,
140            "Slot must be held by exactly two owners (the arena and this \
141             UninitReservedMemory) to guarantee exclusive write access."
142        );
143
144        let bytes_mut = unsafe { self.data.as_ref().get().as_mut().unwrap() };
145        init_data(bytes_mut);
146
147        ReservedMemory {
148            data: self.data,
149            ref_count: self.ref_count,
150            drop_fn,
151        }
152    }
153}
154
155impl<const MAX_ITEM_SIZE: usize> core::fmt::Debug for ReservedMemory<MAX_ITEM_SIZE> {
156    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
157        f.debug_struct("ReservedMemory")
158            .field("data", &self.data)
159            .field("drop_fn", &self.drop_fn)
160            .finish()
161    }
162}
163
164impl<const MAX_ITEM_SIZE: usize> Clone for ReservedMemory<MAX_ITEM_SIZE> {
165    fn clone(&self) -> Self {
166        self.ref_count.fetch_add(1, Ordering::Release);
167
168        Self {
169            data: self.data.clone(),
170            ref_count: self.ref_count.clone(),
171            drop_fn: self.drop_fn,
172        }
173    }
174}
175
176impl<const MAX_ITEM_SIZE: usize> Drop for ReservedMemory<MAX_ITEM_SIZE> {
177    fn drop(&mut self) {
178        // `ref_count` equals the number of live `ReservedMemory` clones.
179        //   reserve() → stores 1   (the one clone that `init` will produce)
180        //   init()    → consumes UninitReservedMemory, count unchanged at 1
181        //   clone()   → fetch_add  (count grows: 2, 3, …)
182        //   drop()    → fetch_sub  (count shrinks; previous == 1 means we
183        //                           were the last clone, so run the destructor)
184        //
185        // The arena never touches `ref_count`; slot freeness is tracked via
186        // `Arc::strong_count` on the backing buffer instead. So the same
187        // logic is correct whether or not the arena is still alive.
188        let previous = self.ref_count.fetch_sub(1, Ordering::Release);
189
190        if previous == 1 {
191            // SAFETY: We are the last user of this slot. The data pointer is valid,
192            // initialized, and no other `ReservedMemory` clone exists.
193            let bytes_mut = unsafe { self.data.get().as_mut().unwrap() };
194            (self.drop_fn)(bytes_mut);
195        }
196    }
197}
198
199// SAFETY: After initialization, the data behind `ReservedMemory` is immutable
200// (no `&mut` access is possible while any clone exists). The logical ref_count
201// is an `AtomicU32` with proper ordering, and the backing `Arc` guarantees the
202// storage outlives all handles. These together satisfy the `Send` and `Sync`
203// contracts.
204unsafe impl<const MAX_ITEM_SIZE: usize> Send for ReservedMemory<MAX_ITEM_SIZE> {}
205unsafe impl<const MAX_ITEM_SIZE: usize> Sync for ReservedMemory<MAX_ITEM_SIZE> {}
206
207impl<const MAX_ITEM_SIZE: usize> std::convert::AsRef<Bytes<MAX_ITEM_SIZE>>
208    for ReservedMemory<MAX_ITEM_SIZE>
209{
210    /// Gets the reserved bytes.
211    fn as_ref(&self) -> &Bytes<MAX_ITEM_SIZE> {
212        // The pointer is valid and the data is readonly.
213        unsafe { self.data.as_ref().get().as_ref().unwrap() }
214    }
215}
216
217impl<const MAX_ITEM_COUNT: usize, const MAX_ITEM_SIZE: usize> Default
218    for Arena<MAX_ITEM_COUNT, MAX_ITEM_SIZE>
219{
220    fn default() -> Self {
221        Self::new()
222    }
223}
224
225impl<const MAX_ITEM_COUNT: usize, const MAX_ITEM_SIZE: usize> Arena<MAX_ITEM_COUNT, MAX_ITEM_SIZE> {
226    /// Creates a new, empty `Arena`.
227    ///
228    /// The internal buffer is not allocated until the first call to [`reserve`](Self::reserve).
229    pub const fn new() -> Self {
230        Self {
231            buffer: Vec::new(),
232            ref_counts: Vec::new(),
233            cursor: 0,
234        }
235    }
236
237    /// Returns `true` if an object of type `O` fits within a single slot.
238    ///
239    /// Checks that both the size and alignment of `O` are compatible with
240    /// [`Bytes<MAX_ITEM_SIZE>`].
241    pub const fn accept<O>() -> bool {
242        accept_obj::<O, MAX_ITEM_SIZE>()
243    }
244
245    /// Attempts to reserve an uninitialized slot in the arena.
246    ///
247    /// On the first call, the internal buffer is lazily allocated to
248    /// `MAX_ITEM_COUNT` slots. Subsequent calls scan from the current cursor
249    /// position, wrapping around circularly, looking for a slot whose backing
250    /// `Arc` has a strong count of 1 (meaning no outstanding
251    /// [`ReservedMemory`] handles reference it).
252    ///
253    /// # Returns
254    ///
255    /// - `Some(UninitReservedMemory)` — a handle to the reserved slot, ready
256    ///   to be initialized via [`UninitReservedMemory::init`].
257    /// - `None` — all slots are currently in use.
258    pub fn reserve(&mut self) -> Option<UninitReservedMemory<MAX_ITEM_SIZE>> {
259        if self.buffer.is_empty() {
260            for _ in 0..MAX_ITEM_COUNT {
261                self.ref_counts.push(Arc::new(AtomicU32::new(0)));
262
263                // Here we need to disable the clippy warning since we manually ensure the type is
264                // send sync and we need to wrap it in an Arc because the bytes might outlive the
265                // current arena.
266                #[allow(clippy::arc_with_non_send_sync)]
267                self.buffer.push(Arc::new(UnsafeCell::new(Bytes {
268                    bytes: [0; MAX_ITEM_SIZE],
269                })));
270            }
271        }
272
273        for i in 0..MAX_ITEM_COUNT {
274            let i = (i + self.cursor) % MAX_ITEM_COUNT;
275            let item = &self.buffer[i];
276
277            // SAFETY: `Arc::strong_count` is not synchronized, but this is safe
278            // because `reserve` takes `&mut self`, guaranteeing single-threaded
279            // access to the arena side. The only concurrent mutation is a
280            // `ReservedMemory` being dropped on another thread, which performs a
281            // `Release`-ordered `Arc::drop` before the strong count decrements.
282            // A stale (too-high) read here is harmless — we simply skip a slot
283            // that is actually free, and will find it on the next call.
284            if Arc::strong_count(item) == 1 {
285                self.cursor = (i + 1) % MAX_ITEM_COUNT;
286                let data = item.clone();
287                let ref_count = self.ref_counts[i].clone();
288                ref_count.store(1, Ordering::Release);
289
290                return Some(UninitReservedMemory {
291                    data,
292                    ref_count,
293                    #[cfg(test)]
294                    index: i,
295                    not_sync: PhantomData,
296                });
297            }
298        }
299
300        None
301    }
302}
303
304const fn accept_obj<O, const MAX_ITEM_SIZE: usize>() -> bool {
305    size_of::<O>() <= size_of::<Bytes<MAX_ITEM_SIZE>>()
306        && align_of::<O>() <= align_of::<Bytes<MAX_ITEM_SIZE>>()
307}
308
309#[cfg(test)]
310mod tests {
311    use super::*;
312
313    const MAX_ITEM_SIZE: usize = 2048;
314
315    #[test]
316    fn test_lazy_initialization() {
317        let mut arena = Arena::<10, MAX_ITEM_SIZE>::new();
318        assert_eq!(
319            arena.buffer.len(),
320            0,
321            "Buffer should be empty before first reservation"
322        );
323
324        arena.reserve();
325
326        assert_eq!(
327            arena.buffer.len(),
328            10,
329            "Buffer should be initialized to size"
330        );
331    }
332
333    #[test]
334    fn test_sequential_allocation_moves_cursor() {
335        let mut arena = Arena::<3, MAX_ITEM_SIZE>::new();
336
337        // First allocation
338        let _ = arena.reserve().expect("Should allocate");
339        assert_eq!(arena.cursor, 1);
340
341        // Second allocation
342        let _ = arena.reserve().expect("Should allocate");
343        assert_eq!(arena.cursor, 2);
344    }
345
346    #[test]
347    fn test_reuse_of_freed_data() {
348        let mut arena = Arena::<2, MAX_ITEM_SIZE>::new();
349
350        // Fill the arena
351        let data0 = arena.reserve().unwrap();
352        let _data1 = arena.reserve().unwrap();
353
354        // Arena is now full (counts are 2)
355        assert!(arena.reserve().is_none(), "Should be full");
356
357        // Manually "free" index 0 by setting count to 0 (simulating ManagedOperation drop)
358        let data0_index = data0.index;
359        core::mem::drop(data0);
360
361        // Should now be able to reserve again, and it should pick up index 0
362        let data2 = arena.reserve().expect("Should reuse index 0");
363        assert_eq!(data0_index, data2.index);
364    }
365
366    #[test]
367    fn test_circular_cursor_search() {
368        let mut arena = Arena::<3, MAX_ITEM_SIZE>::new();
369
370        // Fill 0, 1, 2
371        let _d0 = arena.reserve().unwrap();
372        let d1 = arena.reserve().unwrap();
373        let _d2 = arena.reserve().unwrap();
374
375        // Free index 1 (the middle)
376        core::mem::drop(d1);
377
378        // Currently cursor is at 2. The search starts at (cursor + i) % size.
379        // It should wrap around and find index 1.
380        let _ = arena.reserve().expect("Should find the hole at index 1");
381        assert_eq!(arena.cursor, 2);
382    }
383
384    #[test]
385    fn test_full_arena_returns_none() {
386        let mut arena = Arena::<5, MAX_ITEM_SIZE>::new();
387
388        let mut reserved = Vec::new();
389
390        for _ in 0..5 {
391            let item = arena.reserve();
392            assert!(item.is_some());
393            reserved.push(item);
394        }
395
396        // Next one should fail
397        assert!(arena.reserve().is_none());
398    }
399}
400
401#[cfg(test)]
402mod drop_lifecycle_tests {
403    //! These tests verify the drop-timing contract of `ReservedMemory`:
404    //! `drop_fn` must run exactly once, and **only when the last clone is
405    //! released**.
406    //!
407    //! Every assertion is expressed through `Arc::strong_count` on a
408    //! payload-owned anchor. Each case is
409    //! runnable under `cargo miri test` to catch UB.
410
411    use super::*;
412    use alloc::boxed::Box;
413    use alloc::vec::Vec;
414    use std::sync::Arc;
415
416    struct Payload {
417        _anchor: Arc<()>,
418    }
419
420    #[test]
421    fn last_clone_runs_destructor_with_one_clone() {
422        let anchor = Arc::new(());
423        let mut arena = Arena::<4, 256>::new();
424        let reserved = arena.reserve().unwrap().init(Payload {
425            _anchor: anchor.clone(),
426        });
427        assert_eq!(Arc::strong_count(&anchor), 2);
428        drop(reserved);
429        assert_eq!(
430            Arc::strong_count(&anchor),
431            1,
432            "single ReservedMemory must run drop_fn on drop"
433        );
434    }
435
436    #[test]
437    fn destructor_deferred_until_last_of_two_clones() {
438        let anchor = Arc::new(());
439        let mut arena = Arena::<4, 256>::new();
440        let a = arena.reserve().unwrap().init(Payload {
441            _anchor: anchor.clone(),
442        });
443        let b = a.clone();
444
445        drop(a);
446        assert_eq!(
447            Arc::strong_count(&anchor),
448            2,
449            "destructor fired prematurely — `b` still owns the payload"
450        );
451        drop(b);
452        assert_eq!(Arc::strong_count(&anchor), 1);
453    }
454
455    #[test]
456    fn destructor_deferred_until_last_of_many_clones() {
457        let anchor = Arc::new(());
458        let mut arena = Arena::<4, 256>::new();
459        let first = arena.reserve().unwrap().init(Payload {
460            _anchor: anchor.clone(),
461        });
462
463        const N: usize = 16;
464        let clones: Vec<_> = (0..N).map(|_| first.clone()).collect();
465        drop(first);
466        assert_eq!(Arc::strong_count(&anchor), 2);
467
468        for (i, c) in clones.into_iter().enumerate() {
469            drop(c);
470            let expected = if i + 1 == N { 1 } else { 2 };
471            assert_eq!(
472                Arc::strong_count(&anchor),
473                expected,
474                "premature destructor after dropping clone {i}"
475            );
476        }
477    }
478
479    /// After the destructor runs once, re-cloning a surviving clone and
480    /// dropping it again must not run the destructor a second time.
481    #[test]
482    fn destructor_runs_exactly_once_across_refill_cycle() {
483        let anchor1 = Arc::new(());
484        let anchor2 = Arc::new(());
485        let mut arena = Arena::<1, 256>::new();
486
487        let first = arena.reserve().unwrap().init(Payload {
488            _anchor: anchor1.clone(),
489        });
490        drop(first);
491        assert_eq!(Arc::strong_count(&anchor1), 1);
492
493        // Slot is free again — reuse it with a brand-new payload.
494        let second = arena.reserve().unwrap().init(Payload {
495            _anchor: anchor2.clone(),
496        });
497        assert_eq!(
498            Arc::strong_count(&anchor1),
499            1,
500            "refilling the slot must not touch the prior payload's anchor"
501        );
502        assert_eq!(Arc::strong_count(&anchor2), 2);
503        drop(second);
504        assert_eq!(Arc::strong_count(&anchor2), 1);
505    }
506
507    /// A payload owning a heap allocation (`Box`) gives Miri something
508    /// concrete to complain about if the destructor is missed or runs
509    /// twice: a double drop is a double-free.
510    #[test]
511    fn heap_owning_payload_drops_exactly_once() {
512        struct HeapOwner(#[allow(dead_code)] Box<[u64; 8]>);
513
514        let mut arena = Arena::<2, 256>::new();
515        let a = arena
516            .reserve()
517            .unwrap()
518            .init(HeapOwner(Box::new([1, 2, 3, 4, 5, 6, 7, 8])));
519        let b = a.clone();
520        let c = a.clone();
521        drop(a);
522        drop(b);
523        drop(c);
524        // Miri would flag a double-free on the Box here if the destructor
525        // fired more than once, or a leak under `-Zmiri-ignore-leaks=no`
526        // if it never fired.
527    }
528}
529
530#[cfg(test)]
531mod concurrent_drop_timing_tests {
532    //! Concurrent counterparts to `drop_lifecycle_tests`. The existing
533    //! `concurrent_tests` module checks that `drop_fn` runs exactly once
534    //! under contention but not *when* it runs — a premature destructor
535    //! satisfies "exactly once" while still corrupting surviving clones.
536    //! These tests bracket drop timing with live observers.
537
538    use super::*;
539    use std::sync::{Arc, Barrier};
540    use std::thread;
541
542    /// Drop N clones concurrently and verify that the payload's anchor is
543    /// released exactly once, after all clones finish. Running under Miri
544    /// with `-Zmiri-disable-isolation -Zmiri-preemption-rate=...` flags
545    /// is not required — this test observes the post-condition on the
546    /// main thread after the join.
547    #[test]
548    fn concurrent_drops_release_anchor_exactly_once() {
549        let anchor = Arc::new(());
550        let mut arena = Arena::<4, 256>::new();
551
552        struct Payload {
553            #[allow(dead_code)]
554            anchor: Arc<()>,
555        }
556
557        let reserved = arena.reserve().unwrap().init(Payload {
558            anchor: anchor.clone(),
559        });
560
561        const N: usize = 8;
562        let barrier = Arc::new(Barrier::new(N));
563        let mut handles = Vec::with_capacity(N);
564        for _ in 0..N - 1 {
565            let clone = reserved.clone();
566            let b = barrier.clone();
567            handles.push(thread::spawn(move || {
568                b.wait();
569                drop(clone);
570            }));
571        }
572        {
573            let b = barrier.clone();
574            let original = reserved;
575            handles.push(thread::spawn(move || {
576                b.wait();
577                drop(original);
578            }));
579        }
580
581        for h in handles {
582            h.join().unwrap();
583        }
584
585        assert_eq!(
586            Arc::strong_count(&anchor),
587            1,
588            "after all clones drop, payload anchor must be released exactly once"
589        );
590    }
591}
592
593#[cfg(test)]
594mod concurrent_tests {
595    use super::*;
596    use std::sync::{Arc, Barrier, Mutex};
597    use std::{thread, vec};
598
599    const MAX_ITEM_SIZE: usize = 2048;
600
601    /// Wraps an arena in a Mutex for shared cross-thread access.
602    fn shared_arena<const N: usize>() -> Arc<Mutex<Arena<N, MAX_ITEM_SIZE>>> {
603        #[allow(clippy::arc_with_non_send_sync)]
604        Arc::new(Mutex::new(Arena::<N, MAX_ITEM_SIZE>::new()))
605    }
606
607    /// Verifies that `drop_fn` is called exactly once even when multiple threads
608    /// hold clones and release them concurrently.
609    #[test]
610    fn test_drop_called_exactly_once_under_contention() {
611        let drop_count = Arc::new(std::sync::atomic::AtomicUsize::new(0));
612        let arena = shared_arena::<4>();
613
614        let uninit = arena.lock().unwrap().reserve().unwrap();
615
616        struct Probe(Arc<std::sync::atomic::AtomicUsize>);
617        impl Drop for Probe {
618            fn drop(&mut self) {
619                self.0.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
620            }
621        }
622
623        let reserved = uninit.init(Probe(drop_count.clone()));
624
625        // Spawn 32 threads, each clones and drops ReservedMemory concurrently.
626        let barrier = Arc::new(Barrier::new(32));
627        let mut handles = vec![];
628
629        for _ in 0..32 {
630            let r = reserved.clone();
631            let b = barrier.clone();
632            handles.push(thread::spawn(move || {
633                b.wait(); // all threads drop at the same time
634                drop(r);
635            }));
636        }
637
638        drop(reserved); // drop the original too
639        for h in handles {
640            h.join().unwrap();
641        }
642
643        assert_eq!(
644            drop_count.load(std::sync::atomic::Ordering::Relaxed),
645            1,
646            "drop_fn must be called exactly once"
647        );
648    }
649
650    /// Verifies that a slot becomes available for reuse after all `ReservedMemory`
651    /// clones are dropped across threads.
652    #[test]
653    fn test_slot_reuse_after_concurrent_drop() {
654        let arena = shared_arena::<1>();
655        let uninit = arena.lock().unwrap().reserve().unwrap();
656        let reserved = uninit.init(42u64);
657
658        let barrier = Arc::new(Barrier::new(8));
659        let mut handles = vec![];
660
661        for _ in 0..8 {
662            let r = reserved.clone();
663            let b = barrier.clone();
664            handles.push(thread::spawn(move || {
665                b.wait();
666                drop(r);
667            }));
668        }
669
670        drop(reserved);
671        for h in handles {
672            h.join().unwrap();
673        }
674
675        // All clones dropped — the single slot should be free again.
676        assert!(
677            arena.lock().unwrap().reserve().is_some(),
678            "Slot should be available after all clones are dropped"
679        );
680    }
681
682    /// Verifies that `ReservedMemory` clones dropped after the arena is dropped
683    /// still correctly run `drop_fn` (the count == 1 case).
684    #[test]
685    fn test_drop_after_arena_dropped() {
686        let drop_count = Arc::new(std::sync::atomic::AtomicUsize::new(0));
687
688        struct Probe(Arc<std::sync::atomic::AtomicUsize>);
689        impl Drop for Probe {
690            fn drop(&mut self) {
691                self.0.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
692            }
693        }
694
695        let reserved = {
696            let mut arena = Arena::<4, MAX_ITEM_SIZE>::new();
697            let uninit = arena.reserve().unwrap();
698            uninit.init(Probe(drop_count.clone()))
699            // arena drops here
700        };
701
702        // Spawn threads that hold clones past the arena's lifetime.
703        let barrier = Arc::new(Barrier::new(8));
704        let mut handles = vec![];
705
706        for _ in 0..8 {
707            let r = reserved.clone();
708            let b = barrier.clone();
709            handles.push(thread::spawn(move || {
710                b.wait();
711                drop(r);
712            }));
713        }
714
715        drop(reserved);
716        for h in handles {
717            h.join().unwrap();
718        }
719
720        assert_eq!(
721            drop_count.load(std::sync::atomic::Ordering::Relaxed),
722            1,
723            "drop_fn must fire exactly once even when arena is dropped first"
724        );
725    }
726}