1use core::mem::size_of;
21
22use crate::sync::{AtomicU32, AtomicU64, Ordering};
23use crate::{Region, SlotState, VarSlotMeta};
24
25const fn align_up(n: usize, align: usize) -> usize {
28 (n + align - 1) & !(align - 1)
29}
30
31const EMPTY: u32 = u32::MAX;
33
34#[inline]
36fn pack(slot_idx: u32, aba_gen: u32) -> u64 {
37 ((aba_gen as u64) << 32) | (slot_idx as u64)
38}
39
40#[inline]
42fn unpack(v: u64) -> (u32, u32) {
43 (v as u32, (v >> 32) as u32)
44}
45
46#[derive(Clone, Copy, Debug, PartialEq, Eq)]
52pub struct SizeClassConfig {
53 pub slot_size: u32,
55 pub slot_count: u32,
57}
58
59#[derive(Clone, Copy, Debug, PartialEq, Eq)]
63pub struct SlotRef {
64 pub class_idx: u8,
66 pub extent_idx: u8,
68 pub slot_idx: u32,
70 pub generation: u32,
72}
73
74#[derive(Debug)]
76pub struct DoubleFreeError {
77 pub slot: SlotRef,
78}
79
80#[repr(C, align(64))]
86pub struct SizeClassHeader {
87 pub slot_size: u32,
89 pub slot_count: u32,
91 pub free_head: AtomicU64,
94 _pad: [u8; 48],
95}
96
97#[cfg(not(loom))]
98const _: () = assert!(size_of::<SizeClassHeader>() == 64);
99
100struct ClassView {
104 header: *mut SizeClassHeader,
105 meta: *mut VarSlotMeta,
106 data: *mut u8,
107 slot_count: u32,
108 slot_size: u32,
109}
110
111unsafe impl Send for ClassView {}
112unsafe impl Sync for ClassView {}
113
114pub struct VarSlotPool {
118 classes: Vec<ClassView>,
119}
120
121unsafe impl Send for VarSlotPool {}
122unsafe impl Sync for VarSlotPool {}
123
124impl VarSlotPool {
125 pub fn layout(configs: &[SizeClassConfig]) -> PoolLayout {
131 let headers_size = align_up(configs.len() * size_of::<SizeClassHeader>(), 64);
132 let mut offset = headers_size;
133 let mut class_offsets = Vec::with_capacity(configs.len());
134
135 for cfg in configs {
136 let meta_offset = offset;
137 offset += align_up(size_of::<VarSlotMeta>() * cfg.slot_count as usize, 64);
138 let data_offset = offset;
139 offset += align_up(cfg.slot_size as usize * cfg.slot_count as usize, 64);
140 class_offsets.push(ClassOffsets {
141 meta_offset,
142 data_offset,
143 });
144 }
145
146 PoolLayout {
147 total_size: offset,
148 class_offsets,
149 }
150 }
151
152 pub fn required_size(configs: &[SizeClassConfig]) -> usize {
154 Self::layout(configs).total_size
155 }
156
157 pub unsafe fn discover_configs(
164 region: Region,
165 base_offset: usize,
166 num_classes: u32,
167 ) -> Result<Vec<SizeClassConfig>, &'static str> {
168 if num_classes == 0 {
169 return Err("segment missing var-slot classes");
170 }
171
172 let headers_size = num_classes as usize * size_of::<SizeClassHeader>();
173 if base_offset
174 .checked_add(headers_size)
175 .is_none_or(|end| end > region.len())
176 {
177 return Err("var-slot class header table out of bounds");
178 }
179
180 let mut configs = Vec::with_capacity(num_classes as usize);
181 for class_idx in 0..num_classes as usize {
182 let header_off = base_offset + class_idx * size_of::<SizeClassHeader>();
183 let header = unsafe { region.get::<SizeClassHeader>(header_off) };
184 if header.slot_size == 0 || header.slot_count == 0 {
185 return Err("invalid var-slot class config in segment");
186 }
187 configs.push(SizeClassConfig {
188 slot_size: header.slot_size,
189 slot_count: header.slot_count,
190 });
191 }
192
193 Ok(configs)
194 }
195
196 pub unsafe fn init(region: Region, base_offset: usize, configs: &[SizeClassConfig]) -> Self {
208 assert!(
209 base_offset.is_multiple_of(64),
210 "base_offset must be 64-byte aligned"
211 );
212
213 let layout = Self::layout(configs);
214 assert!(
215 base_offset + layout.total_size <= region.len(),
216 "region too small for VarSlotPool"
217 );
218
219 let mut classes = Vec::with_capacity(configs.len());
220
221 for (i, (cfg, offsets)) in configs.iter().zip(layout.class_offsets.iter()).enumerate() {
222 let hdr_off = base_offset + i * size_of::<SizeClassHeader>();
224 let header: *mut SizeClassHeader =
225 unsafe { region.get_mut::<SizeClassHeader>(hdr_off) };
226
227 unsafe {
228 (*header).slot_size = cfg.slot_size;
229 (*header).slot_count = cfg.slot_count;
230 (*header).free_head = AtomicU64::new(if cfg.slot_count > 0 {
232 pack(0, 0)
233 } else {
234 pack(EMPTY, 0)
235 });
236 (*header)._pad = [0u8; 48];
237 }
238
239 let meta_ptr = region.offset(base_offset + offsets.meta_offset) as *mut VarSlotMeta;
241 let data_ptr = region.offset(base_offset + offsets.data_offset);
242
243 for slot in 0..cfg.slot_count {
244 let m = unsafe { &mut *meta_ptr.add(slot as usize) };
245 m.generation = AtomicU32::new(0);
246 m.state = AtomicU32::new(SlotState::Free as u32);
247 m.owner_peer = AtomicU32::new(0);
248 m.next_free = AtomicU32::new(if slot + 1 < cfg.slot_count {
249 slot + 1
250 } else {
251 EMPTY
252 });
253 }
254
255 classes.push(ClassView {
256 header,
257 meta: meta_ptr,
258 data: data_ptr,
259 slot_count: cfg.slot_count,
260 slot_size: cfg.slot_size,
261 });
262 }
263
264 Self { classes }
265 }
266
267 pub unsafe fn attach(region: Region, base_offset: usize, configs: &[SizeClassConfig]) -> Self {
273 assert!(
274 base_offset.is_multiple_of(64),
275 "base_offset must be 64-byte aligned"
276 );
277
278 let layout = Self::layout(configs);
279 assert!(
280 base_offset + layout.total_size <= region.len(),
281 "region too small for VarSlotPool"
282 );
283
284 let mut classes = Vec::with_capacity(configs.len());
285
286 for (i, (cfg, offsets)) in configs.iter().zip(layout.class_offsets.iter()).enumerate() {
287 let hdr_off = base_offset + i * size_of::<SizeClassHeader>();
288 let header: *mut SizeClassHeader =
289 unsafe { region.get_mut::<SizeClassHeader>(hdr_off) };
290 let meta_ptr = region.offset(base_offset + offsets.meta_offset) as *mut VarSlotMeta;
291 let data_ptr = region.offset(base_offset + offsets.data_offset);
292
293 classes.push(ClassView {
294 header,
295 meta: meta_ptr,
296 data: data_ptr,
297 slot_count: cfg.slot_count,
298 slot_size: cfg.slot_size,
299 });
300 }
301
302 Self { classes }
303 }
304
305 pub fn allocate(&self, size: u32, owner_peer: u8) -> Option<SlotRef> {
314 let start = self.classes.iter().position(|c| c.slot_size >= size)?;
316
317 for (class_idx, view) in self.classes[start..].iter().enumerate() {
318 let class_idx = (start + class_idx) as u8;
319 if let Some(slot_ref) = self.try_alloc_from(class_idx, view, owner_peer) {
320 return Some(slot_ref);
321 }
322 }
323 None
324 }
325
326 fn try_alloc_from(&self, class_idx: u8, view: &ClassView, owner_peer: u8) -> Option<SlotRef> {
327 let header = unsafe { &*view.header };
328 loop {
329 let head = header.free_head.load(Ordering::Acquire);
330 let (slot_idx, aba_gen) = unpack(head);
331
332 if slot_idx == EMPTY {
333 return None; }
335
336 let meta = unsafe { &*view.meta.add(slot_idx as usize) };
337 let next = meta.next_free.load(Ordering::Acquire);
338
339 let new_head = pack(next, aba_gen.wrapping_add(1));
341 if header
342 .free_head
343 .compare_exchange(head, new_head, Ordering::AcqRel, Ordering::Acquire)
344 .is_ok()
345 {
346 let new_gen = meta
348 .generation
349 .fetch_add(1, Ordering::AcqRel)
350 .wrapping_add(1);
351 meta.state
352 .store(SlotState::Allocated as u32, Ordering::Release);
353 meta.owner_peer.store(owner_peer as u32, Ordering::Release);
354
355 return Some(SlotRef {
356 class_idx,
357 extent_idx: 0,
358 slot_idx,
359 generation: new_gen,
360 });
361 }
362 }
364 }
365
366 pub fn free(&self, slot_ref: SlotRef) -> Result<(), DoubleFreeError> {
373 let view = &self.classes[slot_ref.class_idx as usize];
374 let meta = unsafe { &*view.meta.add(slot_ref.slot_idx as usize) };
375
376 if meta.state.load(Ordering::Acquire) != SlotState::Allocated as u32
380 || meta.generation.load(Ordering::Acquire) != slot_ref.generation
381 {
382 return Err(DoubleFreeError { slot: slot_ref });
383 }
384
385 meta.state.store(SlotState::Free as u32, Ordering::Release);
386 meta.owner_peer.store(0, Ordering::Release);
387
388 let header = unsafe { &*view.header };
389 loop {
390 let head = header.free_head.load(Ordering::Acquire);
391 let (head_idx, aba_gen) = unpack(head);
392
393 meta.next_free.store(head_idx, Ordering::Release);
394
395 let new_head = pack(slot_ref.slot_idx, aba_gen.wrapping_add(1));
396 if header
397 .free_head
398 .compare_exchange(head, new_head, Ordering::AcqRel, Ordering::Acquire)
399 .is_ok()
400 {
401 return Ok(());
402 }
403 }
405 }
406
407 pub unsafe fn slot_data_mut<'a>(&self, slot_ref: &SlotRef) -> &'a mut [u8] {
414 let view = &self.classes[slot_ref.class_idx as usize];
415 let offset = slot_ref.slot_idx as usize * view.slot_size as usize;
416 unsafe { core::slice::from_raw_parts_mut(view.data.add(offset), view.slot_size as usize) }
417 }
418
419 pub unsafe fn slot_data<'a>(&self, slot_ref: &SlotRef) -> &'a [u8] {
425 let view = &self.classes[slot_ref.class_idx as usize];
426 let offset = slot_ref.slot_idx as usize * view.slot_size as usize;
427 unsafe { core::slice::from_raw_parts(view.data.add(offset), view.slot_size as usize) }
428 }
429
430 pub fn class_count(&self) -> usize {
432 self.classes.len()
433 }
434
435 pub fn slot_size(&self, class_idx: usize) -> u32 {
437 self.classes[class_idx].slot_size
438 }
439
440 pub fn reclaim_peer_slots(&self, peer_id: u8) {
447 for (class_idx, view) in self.classes.iter().enumerate() {
448 for slot_idx in 0..view.slot_count {
449 let meta = unsafe { &*view.meta.add(slot_idx as usize) };
450 let owner = meta.owner_peer.load(Ordering::Acquire);
451 if owner != peer_id as u32 {
452 continue;
453 }
454 let state = meta.state.load(Ordering::Acquire);
455 if state != SlotState::Allocated as u32 {
456 continue;
457 }
458
459 let slot_gen = meta.generation.load(Ordering::Acquire);
460 let _ = self.free(SlotRef {
461 class_idx: class_idx as u8,
462 extent_idx: 0,
463 slot_idx,
464 generation: slot_gen,
465 });
466 }
467 }
468 }
469}
470
471pub struct ClassOffsets {
475 pub meta_offset: usize,
476 pub data_offset: usize,
477}
478
479pub struct PoolLayout {
481 pub total_size: usize,
483 pub class_offsets: Vec<ClassOffsets>,
485}
486
487#[cfg(all(test, not(loom)))]
490mod tests {
491 use super::*;
492 use crate::HeapRegion;
493
494 const CLASSES: &[SizeClassConfig] = &[
495 SizeClassConfig {
496 slot_size: 1024,
497 slot_count: 8,
498 },
499 SizeClassConfig {
500 slot_size: 16384,
501 slot_count: 4,
502 },
503 SizeClassConfig {
504 slot_size: 262144,
505 slot_count: 2,
506 },
507 ];
508
509 fn make_pool() -> (HeapRegion, VarSlotPool) {
510 let size = VarSlotPool::required_size(CLASSES);
511 let region = HeapRegion::new_zeroed(size);
512 let pool = unsafe { VarSlotPool::init(region.region(), 0, CLASSES) };
513 (region, pool)
514 }
515
516 #[test]
517 fn alloc_and_free_basic() {
518 let (_region, pool) = make_pool();
519
520 let slot = pool.allocate(512, 1).expect("should allocate from class 0");
521 assert_eq!(slot.class_idx, 0);
522 assert_eq!(slot.slot_idx, 0);
523 assert_eq!(slot.generation, 1);
524
525 pool.free(slot).expect("free should succeed");
526 }
527
528 #[test]
529 fn alloc_fills_smallest_fitting_class() {
530 let (_region, pool) = make_pool();
531
532 let slot = pool
534 .allocate(2000, 0)
535 .expect("should allocate from class 1");
536 assert_eq!(slot.class_idx, 1);
537 }
538
539 #[test]
540 fn alloc_exhausts_class_falls_through() {
541 let (_region, pool) = make_pool();
542
543 let mut slots = Vec::new();
545 for _ in 0..8 {
546 slots.push(pool.allocate(1, 0).expect("should allocate"));
547 }
548 assert!(slots.iter().all(|s| s.class_idx == 0));
549
550 let overflow = pool.allocate(1, 0).expect("should fall through to class 1");
552 assert_eq!(overflow.class_idx, 1);
553
554 for s in slots {
555 pool.free(s).unwrap();
556 }
557 pool.free(overflow).unwrap();
558 }
559
560 #[test]
561 fn all_classes_exhausted_returns_none() {
562 let (_region, pool) = make_pool();
563
564 let mut slots = Vec::new();
566 while let Some(s) = pool.allocate(1, 0) {
567 slots.push(s);
568 }
569 assert_eq!(slots.len(), 14);
570 assert!(pool.allocate(1, 0).is_none());
571
572 for s in slots {
573 pool.free(s).unwrap();
574 }
575 }
576
577 #[test]
578 fn free_recycles_slot() {
579 let (_region, pool) = make_pool();
580
581 let s1 = pool.allocate(1, 0).unwrap();
582 pool.free(s1).unwrap();
583
584 let s2 = pool.allocate(1, 0).unwrap();
585 assert_eq!(s2.slot_idx, s1.slot_idx);
587 assert_eq!(s2.generation, s1.generation + 1);
588 pool.free(s2).unwrap();
589 }
590
591 #[test]
592 fn double_free_detected() {
593 let (_region, pool) = make_pool();
594
595 let s = pool.allocate(1, 0).unwrap();
596 pool.free(s).unwrap();
597
598 assert!(pool.free(s).is_err());
600 }
601
602 #[test]
603 fn slot_data_write_read() {
604 let (_region, pool) = make_pool();
605
606 let s = pool.allocate(100, 0).unwrap();
607 unsafe {
608 let data = pool.slot_data_mut(&s);
609 data[..5].copy_from_slice(b"hello");
610 }
611 unsafe {
612 let data = pool.slot_data_mut(&s);
613 assert_eq!(&data[..5], b"hello");
614 }
615 pool.free(s).unwrap();
616 }
617
618 #[test]
619 fn size_too_large_returns_none() {
620 let (_region, pool) = make_pool();
621
622 assert!(pool.allocate(300_000, 0).is_none());
624 }
625
626 #[test]
627 fn reclaim_peer_slots() {
628 let (_region, pool) = make_pool();
629
630 let _s1 = pool.allocate(1, 7).unwrap();
632 let _s2 = pool.allocate(1, 7).unwrap();
633 let s3 = pool.allocate(1, 2).unwrap(); pool.reclaim_peer_slots(7);
636
637 let mut freed = Vec::new();
640 while let Some(s) = pool.allocate(1, 0) {
641 freed.push(s);
642 }
643 assert_eq!(freed.len(), 13);
645
646 for s in freed {
647 pool.free(s).unwrap();
648 }
649 pool.free(s3).unwrap();
650 }
651
652 #[test]
653 fn layout_is_deterministic() {
654 let l1 = VarSlotPool::layout(CLASSES);
655 let l2 = VarSlotPool::layout(CLASSES);
656 assert_eq!(l1.total_size, l2.total_size);
657 for (a, b) in l1.class_offsets.iter().zip(l2.class_offsets.iter()) {
658 assert_eq!(a.meta_offset, b.meta_offset);
659 assert_eq!(a.data_offset, b.data_offset);
660 }
661 }
662
663 #[test]
664 fn owner_peer_tracked() {
665 let (_region, pool) = make_pool();
666
667 let s = pool.allocate(1, 42).unwrap();
668 let view = &pool.classes[s.class_idx as usize];
669 let meta = unsafe { &*view.meta.add(s.slot_idx as usize) };
670 assert_eq!(meta.owner_peer.load(Ordering::Acquire), 42);
671
672 pool.free(s).unwrap();
673 assert_eq!(meta.owner_peer.load(Ordering::Acquire), 0);
674 }
675}
676
677#[cfg(loom)]
680#[allow(dead_code)]
681mod loom_tests {
682 use super::*;
683 use crate::HeapRegion;
684 use loom::sync::Arc;
685
686 const LOOM_CLASSES: &[SizeClassConfig] = &[SizeClassConfig {
689 slot_size: 64,
690 slot_count: 2,
691 }];
692
693 fn loom_pool() -> (HeapRegion, Arc<VarSlotPool>) {
694 let size = VarSlotPool::required_size(LOOM_CLASSES);
695 let region = HeapRegion::new_zeroed(size);
696 let pool = unsafe { VarSlotPool::init(region.region(), 0, LOOM_CLASSES) };
697 (region, Arc::new(pool))
698 }
699
700 #[test]
703 fn concurrent_alloc_no_aliasing() {
704 loom::model(|| {
705 let (_region, pool) = loom_pool();
706 let pool1 = pool.clone();
707 let pool2 = pool.clone();
708
709 let t1 = loom::thread::spawn(move || pool1.allocate(1, 1));
710 let t2 = loom::thread::spawn(move || pool2.allocate(1, 2));
711
712 let s1 = t1.join().unwrap().expect("thread 1 must get a slot");
713 let s2 = t2.join().unwrap().expect("thread 2 must get a slot");
714
715 assert_ne!(s1.slot_idx, s2.slot_idx, "threads must not alias slots");
717
718 pool.free(s1).unwrap();
719 pool.free(s2).unwrap();
720 });
721 }
722
723 #[test]
726 fn alloc_then_free_cross_thread() {
727 loom::model(|| {
728 let (_region, pool) = loom_pool();
729
730 let slot = pool.allocate(1, 0).expect("must allocate");
731
732 let pool2 = pool.clone();
733 let t = loom::thread::spawn(move || pool2.free(slot));
734
735 t.join().unwrap().expect("cross-thread free must succeed");
736 });
737 }
738
739 #[test]
743 fn concurrent_alloc_and_free() {
744 loom::model(|| {
745 let (_region, pool) = loom_pool();
746 let pool_alloc = pool.clone();
747 let pool_free = pool.clone();
748
749 let initial = pool.allocate(1, 0).expect("initial alloc");
751
752 let t_free = loom::thread::spawn(move || {
753 pool_free.free(initial).expect("free must succeed");
754 });
755
756 let t_alloc = loom::thread::spawn(move || pool_alloc.allocate(1, 0));
757
758 t_free.join().unwrap();
759 let maybe_slot = t_alloc.join().unwrap();
760
761 if let Some(s) = maybe_slot {
765 pool.free(s).unwrap();
766 }
767 });
768 }
769}