Skip to main content

shm_primitives/
varslot_pool.rs

1//! VarSlotPool — lock-free shared-memory allocator for large payloads.
2//!
3//! The pool is partitioned into size classes. Each class maintains a
4//! Treiber stack free list over a contiguous array of slots. Slots are
5//! shared across all peers; ownership is tracked via `owner_peer` for
6//! crash recovery.
7//!
8//! Memory layout (relative to the pool's base offset in the segment):
9//!
10//! ```text
11//! [SizeClassHeader × num_classes]          (num_classes × 64 bytes, 64-byte aligned)
12//! For each class i, in order:
13//!   [VarSlotMeta × slot_count[i]]          (16 × slot_count bytes, 64-byte aligned)
14//!   [u8 × slot_size[i] × slot_count[i]]    (data region, 64-byte aligned)
15//! ```
16//!
17//! The same layout is reconstructed on both host and guest side from
18//! the `SizeClassConfig` slice stored in the segment header.
19
20use core::mem::size_of;
21
22use crate::sync::{AtomicU32, AtomicU64, Ordering};
23use crate::{Region, SlotState, VarSlotMeta};
24
25// ── helpers ──────────────────────────────────────────────────────────────────
26
27const fn align_up(n: usize, align: usize) -> usize {
28    (n + align - 1) & !(align - 1)
29}
30
31/// Sentinel value in the free list meaning "end of list / class exhausted".
32const EMPTY: u32 = u32::MAX;
33
34/// Pack a (slot_idx, aba_gen) pair into a single u64 for the Treiber head.
35#[inline]
36fn pack(slot_idx: u32, aba_gen: u32) -> u64 {
37    ((aba_gen as u64) << 32) | (slot_idx as u64)
38}
39
40/// Unpack a Treiber head back to (slot_idx, aba_gen).
41#[inline]
42fn unpack(v: u64) -> (u32, u32) {
43    (v as u32, (v >> 32) as u32)
44}
45
46// ── public types ─────────────────────────────────────────────────────────────
47
48/// Configuration for one size class, supplied at pool creation and at attach.
49///
50/// r[impl shm.varslot.classes]
51#[derive(Clone, Copy, Debug, PartialEq, Eq)]
52pub struct SizeClassConfig {
53    /// Size of each slot's data region in bytes.
54    pub slot_size: u32,
55    /// Number of slots in this class's initial extent.
56    pub slot_count: u32,
57}
58
59/// Reference to an allocated slot — returned by [`VarSlotPool::allocate`].
60///
61/// This is the value that goes into a `framing` slot-ref entry.
62#[derive(Clone, Copy, Debug, PartialEq, Eq)]
63pub struct SlotRef {
64    /// Index into the pool's class array.
65    pub class_idx: u8,
66    /// Extent index within the class (always 0 until dynamic growth is wired up).
67    pub extent_idx: u8,
68    /// Slot index within the extent.
69    pub slot_idx: u32,
70    /// Generation counter at the time of allocation (for double-free detection).
71    pub generation: u32,
72}
73
74/// Returned by [`VarSlotPool::free`] when the generation check fails.
75#[derive(Debug)]
76pub struct DoubleFreeError {
77    pub slot: SlotRef,
78}
79
80// ── shared-memory header ──────────────────────────────────────────────────────
81
82/// Per-class header that lives inside the shared memory region.
83///
84/// r[impl shm.varslot.freelist]
85#[repr(C, align(64))]
86pub struct SizeClassHeader {
87    /// Size of each slot's data region in bytes.
88    pub slot_size: u32,
89    /// Number of slots in this class (across all current extents).
90    pub slot_count: u32,
91    /// Treiber stack head: packed `(slot_idx: u32, aba_gen: u32)`.
92    /// `slot_idx == EMPTY` means the free list is empty.
93    pub free_head: AtomicU64,
94    _pad: [u8; 48],
95}
96
97#[cfg(not(loom))]
98const _: () = assert!(size_of::<SizeClassHeader>() == 64);
99
100// ── VarSlotPool ──────────────────────────────────────────────────────────────
101
102/// Per-class runtime view (Rust-side, not in shared memory).
103struct ClassView {
104    header: *mut SizeClassHeader,
105    meta: *mut VarSlotMeta,
106    data: *mut u8,
107    slot_count: u32,
108    slot_size: u32,
109}
110
111unsafe impl Send for ClassView {}
112unsafe impl Sync for ClassView {}
113
114/// Lock-free slot allocator operating on a shared memory `Region`.
115///
116/// r[impl shm.varslot]
117pub struct VarSlotPool {
118    classes: Vec<ClassView>,
119}
120
121unsafe impl Send for VarSlotPool {}
122unsafe impl Sync for VarSlotPool {}
123
124impl VarSlotPool {
125    /// Compute the byte offsets of each class's metadata and data arrays,
126    /// plus the total size required.
127    ///
128    /// Both `init` and `attach` use this to reconstruct the layout from the
129    /// same `configs` slice.
130    pub fn layout(configs: &[SizeClassConfig]) -> PoolLayout {
131        let headers_size = align_up(configs.len() * size_of::<SizeClassHeader>(), 64);
132        let mut offset = headers_size;
133        let mut class_offsets = Vec::with_capacity(configs.len());
134
135        for cfg in configs {
136            let meta_offset = offset;
137            offset += align_up(size_of::<VarSlotMeta>() * cfg.slot_count as usize, 64);
138            let data_offset = offset;
139            offset += align_up(cfg.slot_size as usize * cfg.slot_count as usize, 64);
140            class_offsets.push(ClassOffsets {
141                meta_offset,
142                data_offset,
143            });
144        }
145
146        PoolLayout {
147            total_size: offset,
148            class_offsets,
149        }
150    }
151
152    /// Total bytes this pool needs in the region.
153    pub fn required_size(configs: &[SizeClassConfig]) -> usize {
154        Self::layout(configs).total_size
155    }
156
157    /// Discover size-class configs from the in-segment class headers.
158    ///
159    /// # Safety
160    ///
161    /// `region` must point to a valid mapped segment region and `base_offset`
162    /// must be the var-slot pool base from the segment header.
163    pub unsafe fn discover_configs(
164        region: Region,
165        base_offset: usize,
166        num_classes: u32,
167    ) -> Result<Vec<SizeClassConfig>, &'static str> {
168        if num_classes == 0 {
169            return Err("segment missing var-slot classes");
170        }
171
172        let headers_size = num_classes as usize * size_of::<SizeClassHeader>();
173        if base_offset
174            .checked_add(headers_size)
175            .is_none_or(|end| end > region.len())
176        {
177            return Err("var-slot class header table out of bounds");
178        }
179
180        let mut configs = Vec::with_capacity(num_classes as usize);
181        for class_idx in 0..num_classes as usize {
182            let header_off = base_offset + class_idx * size_of::<SizeClassHeader>();
183            let header = unsafe { region.get::<SizeClassHeader>(header_off) };
184            if header.slot_size == 0 || header.slot_count == 0 {
185                return Err("invalid var-slot class config in segment");
186            }
187            configs.push(SizeClassConfig {
188                slot_size: header.slot_size,
189                slot_count: header.slot_count,
190            });
191        }
192
193        Ok(configs)
194    }
195
196    /// Initialize a new pool in `region` at `base_offset`.
197    ///
198    /// Writes all headers and builds the initial free lists. The region bytes
199    /// must already be zeroed (segment creation zeros the mmap).
200    ///
201    /// # Safety
202    ///
203    /// `region` must be exclusively owned (no concurrent readers/writers).
204    /// `base_offset` must be 64-byte aligned.
205    ///
206    /// r[impl shm.varslot.slot-meta]
207    pub unsafe fn init(region: Region, base_offset: usize, configs: &[SizeClassConfig]) -> Self {
208        assert!(
209            base_offset.is_multiple_of(64),
210            "base_offset must be 64-byte aligned"
211        );
212
213        let layout = Self::layout(configs);
214        assert!(
215            base_offset + layout.total_size <= region.len(),
216            "region too small for VarSlotPool"
217        );
218
219        let mut classes = Vec::with_capacity(configs.len());
220
221        for (i, (cfg, offsets)) in configs.iter().zip(layout.class_offsets.iter()).enumerate() {
222            // Write SizeClassHeader
223            let hdr_off = base_offset + i * size_of::<SizeClassHeader>();
224            let header: *mut SizeClassHeader =
225                unsafe { region.get_mut::<SizeClassHeader>(hdr_off) };
226
227            unsafe {
228                (*header).slot_size = cfg.slot_size;
229                (*header).slot_count = cfg.slot_count;
230                // Free list starts at slot 0
231                (*header).free_head = AtomicU64::new(if cfg.slot_count > 0 {
232                    pack(0, 0)
233                } else {
234                    pack(EMPTY, 0)
235                });
236                (*header)._pad = [0u8; 48];
237            }
238
239            // Init VarSlotMeta array — build a linked free list: 0 → 1 → … → n-1 → EMPTY
240            let meta_ptr = region.offset(base_offset + offsets.meta_offset) as *mut VarSlotMeta;
241            let data_ptr = region.offset(base_offset + offsets.data_offset);
242
243            for slot in 0..cfg.slot_count {
244                let m = unsafe { &mut *meta_ptr.add(slot as usize) };
245                m.generation = AtomicU32::new(0);
246                m.state = AtomicU32::new(SlotState::Free as u32);
247                m.owner_peer = AtomicU32::new(0);
248                m.next_free = AtomicU32::new(if slot + 1 < cfg.slot_count {
249                    slot + 1
250                } else {
251                    EMPTY
252                });
253            }
254
255            classes.push(ClassView {
256                header,
257                meta: meta_ptr,
258                data: data_ptr,
259                slot_count: cfg.slot_count,
260                slot_size: cfg.slot_size,
261            });
262        }
263
264        Self { classes }
265    }
266
267    /// Attach to an existing, already-initialized pool.
268    ///
269    /// # Safety
270    ///
271    /// The pool at `base_offset` must have been initialized with the same `configs`.
272    pub unsafe fn attach(region: Region, base_offset: usize, configs: &[SizeClassConfig]) -> Self {
273        assert!(
274            base_offset.is_multiple_of(64),
275            "base_offset must be 64-byte aligned"
276        );
277
278        let layout = Self::layout(configs);
279        assert!(
280            base_offset + layout.total_size <= region.len(),
281            "region too small for VarSlotPool"
282        );
283
284        let mut classes = Vec::with_capacity(configs.len());
285
286        for (i, (cfg, offsets)) in configs.iter().zip(layout.class_offsets.iter()).enumerate() {
287            let hdr_off = base_offset + i * size_of::<SizeClassHeader>();
288            let header: *mut SizeClassHeader =
289                unsafe { region.get_mut::<SizeClassHeader>(hdr_off) };
290            let meta_ptr = region.offset(base_offset + offsets.meta_offset) as *mut VarSlotMeta;
291            let data_ptr = region.offset(base_offset + offsets.data_offset);
292
293            classes.push(ClassView {
294                header,
295                meta: meta_ptr,
296                data: data_ptr,
297                slot_count: cfg.slot_count,
298                slot_size: cfg.slot_size,
299            });
300        }
301
302        Self { classes }
303    }
304
305    /// Allocate a slot for a payload of `size` bytes.
306    ///
307    /// Finds the smallest size class where `slot_size >= size` and pops from
308    /// its free list. If that class is exhausted, tries the next larger one.
309    /// Returns `None` if all suitable classes are exhausted (backpressure).
310    ///
311    /// r[impl shm.varslot.selection]
312    /// r[impl shm.varslot.allocate]
313    pub fn allocate(&self, size: u32, owner_peer: u8) -> Option<SlotRef> {
314        // Find the first class whose slot_size >= size.
315        let start = self.classes.iter().position(|c| c.slot_size >= size)?;
316
317        for (class_idx, view) in self.classes[start..].iter().enumerate() {
318            let class_idx = (start + class_idx) as u8;
319            if let Some(slot_ref) = self.try_alloc_from(class_idx, view, owner_peer) {
320                return Some(slot_ref);
321            }
322        }
323        None
324    }
325
326    fn try_alloc_from(&self, class_idx: u8, view: &ClassView, owner_peer: u8) -> Option<SlotRef> {
327        let header = unsafe { &*view.header };
328        loop {
329            let head = header.free_head.load(Ordering::Acquire);
330            let (slot_idx, aba_gen) = unpack(head);
331
332            if slot_idx == EMPTY {
333                return None; // class exhausted
334            }
335
336            let meta = unsafe { &*view.meta.add(slot_idx as usize) };
337            let next = meta.next_free.load(Ordering::Acquire);
338
339            // CAS the free head from (slot_idx, aba_gen) to (next, aba_gen+1)
340            let new_head = pack(next, aba_gen.wrapping_add(1));
341            if header
342                .free_head
343                .compare_exchange(head, new_head, Ordering::AcqRel, Ordering::Acquire)
344                .is_ok()
345            {
346                // Won the slot — update metadata.
347                let new_gen = meta
348                    .generation
349                    .fetch_add(1, Ordering::AcqRel)
350                    .wrapping_add(1);
351                meta.state
352                    .store(SlotState::Allocated as u32, Ordering::Release);
353                meta.owner_peer.store(owner_peer as u32, Ordering::Release);
354
355                return Some(SlotRef {
356                    class_idx,
357                    extent_idx: 0,
358                    slot_idx,
359                    generation: new_gen,
360                });
361            }
362            // CAS failed — retry
363        }
364    }
365
366    /// Free a previously allocated slot.
367    ///
368    /// Verifies the generation matches to detect double-frees, then pushes
369    /// the slot back onto the class's Treiber free list.
370    ///
371    /// r[impl shm.varslot.free]
372    pub fn free(&self, slot_ref: SlotRef) -> Result<(), DoubleFreeError> {
373        let view = &self.classes[slot_ref.class_idx as usize];
374        let meta = unsafe { &*view.meta.add(slot_ref.slot_idx as usize) };
375
376        // Detect double-free: slot must still be Allocated and generation must match.
377        // State check catches immediate double-free; generation check catches stale handles
378        // after the slot has been recycled by another allocator.
379        if meta.state.load(Ordering::Acquire) != SlotState::Allocated as u32
380            || meta.generation.load(Ordering::Acquire) != slot_ref.generation
381        {
382            return Err(DoubleFreeError { slot: slot_ref });
383        }
384
385        meta.state.store(SlotState::Free as u32, Ordering::Release);
386        meta.owner_peer.store(0, Ordering::Release);
387
388        let header = unsafe { &*view.header };
389        loop {
390            let head = header.free_head.load(Ordering::Acquire);
391            let (head_idx, aba_gen) = unpack(head);
392
393            meta.next_free.store(head_idx, Ordering::Release);
394
395            let new_head = pack(slot_ref.slot_idx, aba_gen.wrapping_add(1));
396            if header
397                .free_head
398                .compare_exchange(head, new_head, Ordering::AcqRel, Ordering::Acquire)
399                .is_ok()
400            {
401                return Ok(());
402            }
403            // CAS failed — retry
404        }
405    }
406
407    /// Return a mutable slice into the slot's data area.
408    ///
409    /// # Safety
410    ///
411    /// The slot must be currently allocated and the caller must ensure
412    /// no concurrent access to the same slot's data.
413    pub unsafe fn slot_data_mut<'a>(&self, slot_ref: &SlotRef) -> &'a mut [u8] {
414        let view = &self.classes[slot_ref.class_idx as usize];
415        let offset = slot_ref.slot_idx as usize * view.slot_size as usize;
416        unsafe { core::slice::from_raw_parts_mut(view.data.add(offset), view.slot_size as usize) }
417    }
418
419    /// Return an immutable slice into the slot's data area.
420    ///
421    /// # Safety
422    ///
423    /// The slot must be currently allocated and contain readable payload bytes.
424    pub unsafe fn slot_data<'a>(&self, slot_ref: &SlotRef) -> &'a [u8] {
425        let view = &self.classes[slot_ref.class_idx as usize];
426        let offset = slot_ref.slot_idx as usize * view.slot_size as usize;
427        unsafe { core::slice::from_raw_parts(view.data.add(offset), view.slot_size as usize) }
428    }
429
430    /// Return the number of size classes.
431    pub fn class_count(&self) -> usize {
432        self.classes.len()
433    }
434
435    /// Return the slot size for a given class.
436    pub fn slot_size(&self, class_idx: usize) -> u32 {
437        self.classes[class_idx].slot_size
438    }
439
440    /// Crash recovery: reclaim all slots owned by `peer_id` back to their
441    /// respective free lists.
442    ///
443    /// Must be called only by the host after confirming the peer is dead.
444    ///
445    /// r[impl shm.varslot.crash-recovery]
446    pub fn reclaim_peer_slots(&self, peer_id: u8) {
447        for (class_idx, view) in self.classes.iter().enumerate() {
448            for slot_idx in 0..view.slot_count {
449                let meta = unsafe { &*view.meta.add(slot_idx as usize) };
450                let owner = meta.owner_peer.load(Ordering::Acquire);
451                if owner != peer_id as u32 {
452                    continue;
453                }
454                let state = meta.state.load(Ordering::Acquire);
455                if state != SlotState::Allocated as u32 {
456                    continue;
457                }
458
459                let slot_gen = meta.generation.load(Ordering::Acquire);
460                let _ = self.free(SlotRef {
461                    class_idx: class_idx as u8,
462                    extent_idx: 0,
463                    slot_idx,
464                    generation: slot_gen,
465                });
466            }
467        }
468    }
469}
470
471// ── layout helper ─────────────────────────────────────────────────────────────
472
473/// Byte offsets of a single class's metadata and data arrays.
474pub struct ClassOffsets {
475    pub meta_offset: usize,
476    pub data_offset: usize,
477}
478
479/// Result of [`VarSlotPool::layout`].
480pub struct PoolLayout {
481    /// Total bytes needed in the region.
482    pub total_size: usize,
483    /// Per-class offsets (relative to pool base).
484    pub class_offsets: Vec<ClassOffsets>,
485}
486
487// ── tests ─────────────────────────────────────────────────────────────────────
488
489#[cfg(all(test, not(loom)))]
490mod tests {
491    use super::*;
492    use crate::HeapRegion;
493
494    const CLASSES: &[SizeClassConfig] = &[
495        SizeClassConfig {
496            slot_size: 1024,
497            slot_count: 8,
498        },
499        SizeClassConfig {
500            slot_size: 16384,
501            slot_count: 4,
502        },
503        SizeClassConfig {
504            slot_size: 262144,
505            slot_count: 2,
506        },
507    ];
508
509    fn make_pool() -> (HeapRegion, VarSlotPool) {
510        let size = VarSlotPool::required_size(CLASSES);
511        let region = HeapRegion::new_zeroed(size);
512        let pool = unsafe { VarSlotPool::init(region.region(), 0, CLASSES) };
513        (region, pool)
514    }
515
516    #[test]
517    fn alloc_and_free_basic() {
518        let (_region, pool) = make_pool();
519
520        let slot = pool.allocate(512, 1).expect("should allocate from class 0");
521        assert_eq!(slot.class_idx, 0);
522        assert_eq!(slot.slot_idx, 0);
523        assert_eq!(slot.generation, 1);
524
525        pool.free(slot).expect("free should succeed");
526    }
527
528    #[test]
529    fn alloc_fills_smallest_fitting_class() {
530        let (_region, pool) = make_pool();
531
532        // 2000 bytes doesn't fit in class 0 (1024), goes to class 1 (16384)
533        let slot = pool
534            .allocate(2000, 0)
535            .expect("should allocate from class 1");
536        assert_eq!(slot.class_idx, 1);
537    }
538
539    #[test]
540    fn alloc_exhausts_class_falls_through() {
541        let (_region, pool) = make_pool();
542
543        // Exhaust all 8 slots in class 0
544        let mut slots = Vec::new();
545        for _ in 0..8 {
546            slots.push(pool.allocate(1, 0).expect("should allocate"));
547        }
548        assert!(slots.iter().all(|s| s.class_idx == 0));
549
550        // Class 0 exhausted; should fall through to class 1
551        let overflow = pool.allocate(1, 0).expect("should fall through to class 1");
552        assert_eq!(overflow.class_idx, 1);
553
554        for s in slots {
555            pool.free(s).unwrap();
556        }
557        pool.free(overflow).unwrap();
558    }
559
560    #[test]
561    fn all_classes_exhausted_returns_none() {
562        let (_region, pool) = make_pool();
563
564        // Drain everything: 8 + 4 + 2 = 14 slots
565        let mut slots = Vec::new();
566        while let Some(s) = pool.allocate(1, 0) {
567            slots.push(s);
568        }
569        assert_eq!(slots.len(), 14);
570        assert!(pool.allocate(1, 0).is_none());
571
572        for s in slots {
573            pool.free(s).unwrap();
574        }
575    }
576
577    #[test]
578    fn free_recycles_slot() {
579        let (_region, pool) = make_pool();
580
581        let s1 = pool.allocate(1, 0).unwrap();
582        pool.free(s1).unwrap();
583
584        let s2 = pool.allocate(1, 0).unwrap();
585        // Same physical slot (LIFO Treiber stack), but generation bumped
586        assert_eq!(s2.slot_idx, s1.slot_idx);
587        assert_eq!(s2.generation, s1.generation + 1);
588        pool.free(s2).unwrap();
589    }
590
591    #[test]
592    fn double_free_detected() {
593        let (_region, pool) = make_pool();
594
595        let s = pool.allocate(1, 0).unwrap();
596        pool.free(s).unwrap();
597
598        // Second free with same generation → error
599        assert!(pool.free(s).is_err());
600    }
601
602    #[test]
603    fn slot_data_write_read() {
604        let (_region, pool) = make_pool();
605
606        let s = pool.allocate(100, 0).unwrap();
607        unsafe {
608            let data = pool.slot_data_mut(&s);
609            data[..5].copy_from_slice(b"hello");
610        }
611        unsafe {
612            let data = pool.slot_data_mut(&s);
613            assert_eq!(&data[..5], b"hello");
614        }
615        pool.free(s).unwrap();
616    }
617
618    #[test]
619    fn size_too_large_returns_none() {
620        let (_region, pool) = make_pool();
621
622        // Largest class is 262144; asking for more → None
623        assert!(pool.allocate(300_000, 0).is_none());
624    }
625
626    #[test]
627    fn reclaim_peer_slots() {
628        let (_region, pool) = make_pool();
629
630        // Allocate some slots as peer 7
631        let _s1 = pool.allocate(1, 7).unwrap();
632        let _s2 = pool.allocate(1, 7).unwrap();
633        let s3 = pool.allocate(1, 2).unwrap(); // different peer
634
635        pool.reclaim_peer_slots(7);
636
637        // s3 (peer 2) still allocated; 7 free slots remain in class 0.
638        // Collect all free slots without re-freeing during the loop.
639        let mut freed = Vec::new();
640        while let Some(s) = pool.allocate(1, 0) {
641            freed.push(s);
642        }
643        // class 0: 8 - 1 (s3) = 7 free; class 1: 4 free; class 2: 2 free → 13 total
644        assert_eq!(freed.len(), 13);
645
646        for s in freed {
647            pool.free(s).unwrap();
648        }
649        pool.free(s3).unwrap();
650    }
651
652    #[test]
653    fn layout_is_deterministic() {
654        let l1 = VarSlotPool::layout(CLASSES);
655        let l2 = VarSlotPool::layout(CLASSES);
656        assert_eq!(l1.total_size, l2.total_size);
657        for (a, b) in l1.class_offsets.iter().zip(l2.class_offsets.iter()) {
658            assert_eq!(a.meta_offset, b.meta_offset);
659            assert_eq!(a.data_offset, b.data_offset);
660        }
661    }
662
663    #[test]
664    fn owner_peer_tracked() {
665        let (_region, pool) = make_pool();
666
667        let s = pool.allocate(1, 42).unwrap();
668        let view = &pool.classes[s.class_idx as usize];
669        let meta = unsafe { &*view.meta.add(s.slot_idx as usize) };
670        assert_eq!(meta.owner_peer.load(Ordering::Acquire), 42);
671
672        pool.free(s).unwrap();
673        assert_eq!(meta.owner_peer.load(Ordering::Acquire), 0);
674    }
675}
676
677// ── loom tests ────────────────────────────────────────────────────────────────
678
679#[cfg(loom)]
680#[allow(dead_code)]
681mod loom_tests {
682    use super::*;
683    use crate::HeapRegion;
684    use loom::sync::Arc;
685
686    // Tiny pool: 1 class, 2 slots of 64 bytes. Enough to exercise races without
687    // blowing up loom's state-space budget.
688    const LOOM_CLASSES: &[SizeClassConfig] = &[SizeClassConfig {
689        slot_size: 64,
690        slot_count: 2,
691    }];
692
693    fn loom_pool() -> (HeapRegion, Arc<VarSlotPool>) {
694        let size = VarSlotPool::required_size(LOOM_CLASSES);
695        let region = HeapRegion::new_zeroed(size);
696        let pool = unsafe { VarSlotPool::init(region.region(), 0, LOOM_CLASSES) };
697        (region, Arc::new(pool))
698    }
699
700    /// Two threads concurrently allocate from a 2-slot pool.
701    /// They must get distinct slots (no aliasing).
702    #[test]
703    fn concurrent_alloc_no_aliasing() {
704        loom::model(|| {
705            let (_region, pool) = loom_pool();
706            let pool1 = pool.clone();
707            let pool2 = pool.clone();
708
709            let t1 = loom::thread::spawn(move || pool1.allocate(1, 1));
710            let t2 = loom::thread::spawn(move || pool2.allocate(1, 2));
711
712            let s1 = t1.join().unwrap().expect("thread 1 must get a slot");
713            let s2 = t2.join().unwrap().expect("thread 2 must get a slot");
714
715            // Both threads must have won different physical slots.
716            assert_ne!(s1.slot_idx, s2.slot_idx, "threads must not alias slots");
717
718            pool.free(s1).unwrap();
719            pool.free(s2).unwrap();
720        });
721    }
722
723    /// One thread allocates a slot, then a second thread frees it.
724    /// The generation must be consistent across the handoff.
725    #[test]
726    fn alloc_then_free_cross_thread() {
727        loom::model(|| {
728            let (_region, pool) = loom_pool();
729
730            let slot = pool.allocate(1, 0).expect("must allocate");
731
732            let pool2 = pool.clone();
733            let t = loom::thread::spawn(move || pool2.free(slot));
734
735            t.join().unwrap().expect("cross-thread free must succeed");
736        });
737    }
738
739    /// Concurrent alloc and free on a 2-slot pool: one thread allocates then
740    /// immediately frees in a loop; the other just allocates. Exercises the
741    /// Treiber stack CAS paths under all interleavings.
742    #[test]
743    fn concurrent_alloc_and_free() {
744        loom::model(|| {
745            let (_region, pool) = loom_pool();
746            let pool_alloc = pool.clone();
747            let pool_free = pool.clone();
748
749            // Pre-allocate one slot so the free thread always has something.
750            let initial = pool.allocate(1, 0).expect("initial alloc");
751
752            let t_free = loom::thread::spawn(move || {
753                pool_free.free(initial).expect("free must succeed");
754            });
755
756            let t_alloc = loom::thread::spawn(move || pool_alloc.allocate(1, 0));
757
758            t_free.join().unwrap();
759            let maybe_slot = t_alloc.join().unwrap();
760
761            // After the free, the pool has at least 1 slot available.
762            // The alloc thread may have run before or after the free; either way
763            // it should have found a slot (the pool had 2 to begin with, 1 pre-allocated).
764            if let Some(s) = maybe_slot {
765                pool.free(s).unwrap();
766            }
767        });
768    }
769}