1use std::{
29 cell::UnsafeCell, fmt::Debug, mem::MaybeUninit, ops::Deref, ptr::NonNull,
30 sync::atomic::AtomicUsize,
31};
32pub trait IndexType:
36 Copy
37 + TryFrom<usize, Error: std::fmt::Debug>
38 + TryInto<usize, Error: std::fmt::Debug>
39 + PartialEq
40 + Eq
41 + Debug
42{
43}
44impl IndexType for u16 {}
45impl IndexType for u32 {}
46impl IndexType for u64 {}
47impl IndexType for u128 {}
48impl IndexType for usize {}
49
50#[repr(C)]
51struct Header {
52 ref_count: AtomicUsize,
54 alloc_index: AtomicUsize,
56 capacity: usize,
58}
59
60#[repr(C)]
63struct Buffer<T> {
64 header: Header,
66 data: UnsafeCell<[MaybeUninit<T>]>,
68}
69
70impl<T> Buffer<T> {
71 fn layout_for_capacity(capacity: usize) -> std::alloc::Layout {
72 let header_layout = std::alloc::Layout::new::<Header>();
74 let array_layout = std::alloc::Layout::array::<MaybeUninit<T>>(capacity)
75 .expect("Failed to create array layout for buffer data");
76 let (layout, _offset) = header_layout
77 .extend(array_layout)
78 .expect("Failed to extend header layout with data layout");
79 layout
80 }
81
82 fn left_space(&self) -> usize {
83 self.header.capacity
84 - self
85 .header
86 .alloc_index
87 .load(std::sync::atomic::Ordering::Acquire)
88 }
89}
90
91impl<T> Drop for Buffer<T> {
92 fn drop(&mut self) {
93 let len = self
95 .header
96 .alloc_index
97 .load(std::sync::atomic::Ordering::Acquire);
98 unsafe {
100 let data = &mut *self.data.get();
101 data.iter_mut().take(len).for_each(|item| {
102 item.assume_init_drop();
103 });
104 }
105 }
106}
107
108struct ChunkRef<T> {
112 inner: Option<NonNull<Buffer<T>>>,
113}
114
115impl<T> Clone for ChunkRef<T> {
116 fn clone(&self) -> Self {
117 if self.inner.is_none() {
118 return Self { inner: None };
119 }
120 let buffer = unsafe { self.inner.as_ref().unwrap().as_ref() };
122 buffer
123 .header
124 .ref_count
125 .fetch_add(1, std::sync::atomic::Ordering::Relaxed);
126 Self { inner: self.inner }
127 }
128}
129
130impl<T> Drop for ChunkRef<T> {
131 fn drop(&mut self) {
132 if self.inner.is_none() {
133 return;
134 }
135 let inner_ref = self.inner.as_ref().unwrap();
136 let buffer = unsafe { inner_ref.as_ref() };
138 if buffer
139 .header
140 .ref_count
141 .fetch_sub(1, std::sync::atomic::Ordering::AcqRel)
142 == 1
143 {
144 unsafe {
146 let layout = Buffer::<T>::layout_for_capacity(buffer.header.capacity);
147 std::ptr::drop_in_place(inner_ref.as_ptr());
148 std::alloc::dealloc(inner_ref.as_ptr() as *mut u8, layout);
149 }
150 }
151 }
152}
153
154impl<T> ChunkRef<T> {
155 fn new(capacity: usize) -> Self {
156 let layout = Buffer::<T>::layout_for_capacity(capacity);
158
159 let ptr = unsafe { std::alloc::alloc(layout) };
160 if ptr.is_null() {
161 std::alloc::handle_alloc_error(layout);
162 }
163
164 let ptr = std::ptr::slice_from_raw_parts_mut(ptr as *mut (), capacity) as *mut Buffer<T>;
166
167 unsafe {
168 let header = &mut (*ptr).header;
170 header
171 .ref_count
172 .store(1, std::sync::atomic::Ordering::Release);
173 header
174 .alloc_index
175 .store(0, std::sync::atomic::Ordering::Release);
176 header.capacity = capacity;
177 }
178
179 Self {
180 inner: Some(NonNull::new(ptr).expect("Failed to create NonNull pointer")),
181 }
182 }
183
184 unsafe fn buffer(&self) -> &Buffer<T> {
185 unsafe {
186 self.inner
187 .as_ref()
188 .expect("Dereferenced null chunk")
189 .as_ref()
190 }
191 }
192
193 fn is_null(&self) -> bool {
194 self.inner.is_none()
195 }
196
197 fn null() -> Self {
198 Self { inner: None }
199 }
200}
201
202pub struct ArcSlice<T, L: IndexType = u32> {
207 chunk: ChunkRef<T>,
209 index: L,
211 len: L,
213}
214
215unsafe impl<T, L: IndexType> Send for ArcSlice<T, L> where T: Send + Sync {}
216unsafe impl<T, L: IndexType> Sync for ArcSlice<T, L> where T: Send + Sync {}
217
218impl<T, L: IndexType> Clone for ArcSlice<T, L> {
219 fn clone(&self) -> Self {
220 Self {
221 chunk: self.chunk.clone(),
222 index: self.index,
223 len: self.len,
224 }
225 }
226}
227
228impl<T, L: IndexType> ArcSlice<T, L> {
229 pub fn get(&self) -> &[T] {
233 if self.chunk.is_null() {
234 return &[];
236 }
237 unsafe {
238 let buffer = self.chunk.buffer();
239 let data = &*buffer.data.get();
240 let start = self
241 .index
242 .try_into()
243 .expect("Index exceeds index type capacity");
244 let len = self
245 .len
246 .try_into()
247 .expect("Length exceeds index type capacity");
248 let slice = &data[start..start + len];
249 std::mem::transmute::<&[MaybeUninit<T>], &[T]>(slice)
251 }
252 }
253
254 pub fn empty() -> Self {
256 Self {
257 chunk: ChunkRef::null(),
258 index: 0
259 .try_into()
260 .expect("Index 0 should be valid for any index type"),
261 len: 0
262 .try_into()
263 .expect("Length 0 should be valid for any index type"),
264 }
265 }
266}
267
268impl<T, L: IndexType> Deref for ArcSlice<T, L> {
269 type Target = [T];
270
271 fn deref(&self) -> &Self::Target {
272 self.get()
273 }
274}
275
276pub struct ArcSingle<T, L: IndexType = u32> {
277 chunk: ChunkRef<T>,
278 index: L,
279}
280
281unsafe impl<T, L: IndexType> Send for ArcSingle<T, L> where T: Send + Sync {}
282unsafe impl<T, L: IndexType> Sync for ArcSingle<T, L> where T: Send + Sync {}
283
284impl<T, L: IndexType> Clone for ArcSingle<T, L> {
285 fn clone(&self) -> Self {
286 Self {
287 chunk: self.chunk.clone(),
288 index: self.index,
289 }
290 }
291}
292
293impl<T, L: IndexType> ArcSingle<T, L> {
294 pub fn get(&self) -> &T {
298 unsafe {
299 let buffer = self.chunk.buffer();
300 let data = &*buffer.data.get();
301 let start = self
302 .index
303 .try_into()
304 .expect("Index exceeds index type capacity");
305 let slice = &data[start];
306 std::mem::transmute::<&MaybeUninit<T>, &T>(slice)
308 }
309 }
310
311 pub fn from_slice(handle: ArcSlice<T, L>) -> Self {
316 assert_eq!(
317 handle.len,
318 1.try_into().expect("Length exceeds index type capacity"),
319 "ArcSingle can only be created from a slice of length 1"
320 );
321 Self {
322 chunk: handle.chunk.clone(),
323 index: handle.index,
324 }
325 }
326}
327
328impl<T, L: IndexType> Deref for ArcSingle<T, L> {
329 type Target = T;
330
331 fn deref(&self) -> &Self::Target {
332 self.get()
333 }
334}
335
336impl<T, L: IndexType> TryFrom<ArcSlice<T, L>> for ArcSingle<T, L> {
337 type Error = &'static str;
338
339 fn try_from(value: ArcSlice<T, L>) -> Result<Self, Self::Error> {
340 if value.len != 1.try_into().expect("Length exceeds index type capacity") {
341 return Err("ArcSingle can only be created from a slice of length 1");
342 }
343 Ok(Self {
344 chunk: value.chunk,
345 index: value.index,
346 })
347 }
348}
349
350impl<T, L: IndexType> From<ArcSingle<T, L>> for ArcSlice<T, L> {
351 fn from(value: ArcSingle<T, L>) -> Self {
352 Self {
353 chunk: value.chunk,
354 index: value.index,
355 len: 1.try_into().expect("Length exceeds index type capacity"),
356 }
357 }
358}
359
360pub struct Allocator<T, L: IndexType = u32, const N: usize = 256> {
366 chunk: ChunkRef<T>,
367 phantom: std::marker::PhantomData<L>,
368}
369
370impl<T, L: IndexType, const N: usize> std::fmt::Debug for Allocator<T, L, N> {
371 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
372 write!(
373 f,
374 "Allocator<T={}, L={}, N={}>",
375 std::any::type_name::<T>(),
376 std::any::type_name::<L>(),
377 N
378 )
379 }
380}
381
382impl<T, L: IndexType, const N: usize> Default for Allocator<T, L, N> {
383 fn default() -> Self {
384 Self::new()
385 }
386}
387
388impl<T, L: IndexType, const N: usize> Allocator<T, L, N> {
389 pub fn new() -> Self {
391 Self {
392 chunk: ChunkRef::new(N),
393 phantom: std::marker::PhantomData,
394 }
395 }
396
397 pub fn alloc<F>(&mut self, len: L, mut init: F) -> ArcSlice<T, L>
401 where
402 F: FnMut(usize) -> T,
403 {
404 if len == 0.try_into().expect("Length exceeds index type capacity") {
405 return ArcSlice {
407 chunk: ChunkRef::null(),
408 index: 0
409 .try_into()
410 .expect("Index 0 should be valid for any index type"),
411 len: 0
412 .try_into()
413 .expect("Length 0 should be valid for any index type"),
414 };
415 }
416 let len: usize = len.try_into().expect("Length exceeds index type capacity");
417 let left_space = unsafe { self.chunk.buffer().left_space() };
418 if N < len {
419 let chunk: ChunkRef<T> = ChunkRef::new(len);
421 unsafe {
422 let buffer = chunk.buffer(); let data = &mut *buffer.data.get();
425 for (i, item) in data.iter_mut().take(len).enumerate() {
426 item.as_mut_ptr().write(init(i));
427 }
428 buffer
430 .header
431 .alloc_index
432 .store(len, std::sync::atomic::Ordering::Release);
433 }
434 return ArcSlice {
435 chunk,
436 index: 0
437 .try_into()
438 .expect("Index 0 should be valid for any index type"),
439 len: len.try_into().expect("Length exceeds index type capacity"),
440 };
441 }
442 if left_space < len {
443 let chunk: ChunkRef<T> = ChunkRef::new(N);
445 unsafe {
446 let buffer = chunk.buffer(); let data = &mut *buffer.data.get();
449 for (i, item) in data.iter_mut().take(len).enumerate() {
450 item.as_mut_ptr().write(init(i));
451 }
452 buffer
454 .header
455 .alloc_index
456 .store(len, std::sync::atomic::Ordering::Release);
457 }
458 if left_space < N - len {
459 self.chunk = chunk.clone();
461 }
462 return ArcSlice {
463 chunk,
464 index: 0
465 .try_into()
466 .expect("Index 0 should be valid for any index type"),
467 len: len.try_into().expect("Length exceeds index type capacity"),
468 };
469 }
470 unsafe {
472 let buffer = self.chunk.buffer();
473
474 let index = buffer
476 .header
477 .alloc_index
478 .load(std::sync::atomic::Ordering::Relaxed);
479
480 let base_ptr = buffer.data.get() as *mut MaybeUninit<T>;
483
484 for i in 0..len {
485 let val = init(i);
487 base_ptr.add(index + i).write(MaybeUninit::new(val));
489 }
490 buffer
492 .header
493 .alloc_index
494 .store(index + len, std::sync::atomic::Ordering::Release);
495 ArcSlice {
496 chunk: self.chunk.clone(),
497 index: index.try_into().expect("Index cap"),
498 len: len.try_into().expect("Length cap"),
499 }
500 }
501 }
502
503 pub fn try_alloc<F, E>(&mut self, len: L, mut init: F) -> Result<ArcSlice<T, L>, E>
508 where
509 F: FnMut(usize) -> Result<T, E>,
510 {
511 if len == 0.try_into().expect("Length exceeds index type capacity") {
512 return Ok(ArcSlice {
514 chunk: ChunkRef::null(),
515 index: 0
516 .try_into()
517 .expect("Index 0 should be valid for any index type"),
518 len: 0
519 .try_into()
520 .expect("Length 0 should be valid for any index type"),
521 });
522 }
523 let len: usize = len.try_into().expect("Length exceeds index type capacity");
524 let left_space = unsafe { self.chunk.buffer().left_space() };
525 if N < len {
526 let chunk: ChunkRef<T> = ChunkRef::new(len);
528 unsafe {
529 let buffer = chunk.buffer(); let data = &mut *buffer.data.get();
532 for (i, item) in data.iter_mut().take(len).enumerate() {
533 match init(i) {
534 Ok(val) => item.as_mut_ptr().write(val),
535 Err(e) => {
536 for item in data.iter_mut().take(i) {
538 item.assume_init_drop();
539 }
540 return Err(e);
541 }
542 }
543 }
544 buffer
546 .header
547 .alloc_index
548 .store(len, std::sync::atomic::Ordering::Release);
549 }
550 return Ok(ArcSlice {
551 chunk,
552 index: 0
553 .try_into()
554 .expect("Index 0 should be valid for any index type"),
555 len: len.try_into().expect("Length exceeds index type capacity"),
556 });
557 }
558 if left_space < len {
559 let chunk: ChunkRef<T> = ChunkRef::new(N);
561 unsafe {
562 let buffer = chunk.buffer(); let data = &mut *buffer.data.get();
565 for (i, item) in data.iter_mut().take(len).enumerate() {
566 match init(i) {
567 Ok(val) => item.as_mut_ptr().write(val),
568 Err(e) => {
569 for item in data.iter_mut().take(i) {
571 item.assume_init_drop();
572 }
573 return Err(e);
574 }
575 }
576 }
577 buffer
579 .header
580 .alloc_index
581 .store(len, std::sync::atomic::Ordering::Release);
582 }
583 if left_space < N - len {
584 self.chunk = chunk.clone();
586 }
587 return Ok(ArcSlice {
588 chunk,
589 index: 0
590 .try_into()
591 .expect("Index 0 should be valid for any index type"),
592 len: len.try_into().expect("Length exceeds index type capacity"),
593 });
594 }
595 unsafe {
597 let buffer = self.chunk.buffer();
598
599 let index = buffer
601 .header
602 .alloc_index
603 .load(std::sync::atomic::Ordering::Relaxed);
604
605 let base_ptr = buffer.data.get() as *mut MaybeUninit<T>;
608
609 for i in 0..len {
610 match init(i) {
612 Ok(val) => base_ptr.add(index + i).write(MaybeUninit::new(val)),
614 Err(e) => {
615 for j in 0..i {
617 std::ptr::drop_in_place(base_ptr.add(index + j) as *mut T);
618 }
619 return Err(e);
620 }
621 }
622 }
623 buffer
625 .header
626 .alloc_index
627 .store(index + len, std::sync::atomic::Ordering::Release);
628 Ok(ArcSlice {
629 chunk: self.chunk.clone(),
630 index: index.try_into().expect("Index cap"),
631 len: len.try_into().expect("Length cap"),
632 })
633 }
634 }
635
636 pub fn alloc_single<F>(&mut self, init: F) -> ArcSingle<T, L>
638 where
639 F: FnOnce() -> T,
640 {
641 let mut init = Some(init);
642 let slice = self.alloc(
643 1.try_into().expect("Length exceeds index type capacity"),
644 |_| init.take().expect("init called more than once")(),
645 );
646 ArcSingle::from_slice(slice)
647 }
648
649 pub fn try_alloc_single<F, E>(&mut self, init: F) -> Result<ArcSingle<T, L>, E>
651 where
652 F: FnOnce() -> Result<T, E>,
653 {
654 let mut init = Some(init);
655 self.try_alloc(
656 1.try_into().expect("Length exceeds index type capacity"),
657 |_| init.take().expect("init called more than once")(),
658 )
659 .map(ArcSingle::from_slice)
660 }
661
662 pub fn alloc_value(&mut self, value: T) -> ArcSingle<T, L> {
664 self.alloc_single(|| value)
665 }
666
667 pub fn alloc_empty() -> ArcSlice<T, L> {
669 ArcSlice::empty()
670 }
671}
672
673#[cfg(test)]
674mod tests {
675 use super::*;
676 use std::sync::atomic::{AtomicUsize, Ordering};
677
678 #[test]
679 fn test_allocator() {
680 let mut allocator: Allocator<u32, u32, 16> = Allocator::new();
681 let handle1 = allocator.alloc(10, |i| i as u32);
682 let handle2 = allocator.alloc(20, |i| (i + 10) as u32);
683 let handle3 = handle1.clone();
684 assert_eq!(handle1.get(), &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]);
685 assert_eq!(
686 handle2.get(),
687 &[
688 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29
689 ]
690 );
691 assert_eq!(handle3.get(), &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]);
692 }
693
694 #[derive(Debug)]
695 struct DropCounter<'a> {
696 #[allow(dead_code)]
697 value: usize,
698 drops: &'a AtomicUsize,
699 }
700
701 impl<'a> Drop for DropCounter<'a> {
702 fn drop(&mut self) {
703 self.drops.fetch_add(1, Ordering::SeqCst);
704 }
705 }
706
707 #[test]
708 fn test_drop_runs_exactly_once_per_item() {
709 static DROPS: AtomicUsize = AtomicUsize::new(0);
710
711 {
712 let mut allocator: Allocator<DropCounter, u32, 8> = Allocator::new();
713 let handle = allocator.alloc(8, |i| DropCounter {
714 value: i,
715 drops: &DROPS,
716 });
717 assert_eq!(handle.get().len(), 8);
718 let _handle2 = handle.clone();
720 assert_eq!(DROPS.load(Ordering::SeqCst), 0);
721 }
722
723 assert_eq!(DROPS.load(Ordering::SeqCst), 8);
725 }
726
727 #[test]
728 fn test_multiple_chunks_and_large_allocs() {
729 static DROPS: AtomicUsize = AtomicUsize::new(0);
730
731 {
732 let mut allocator: Allocator<DropCounter, u32, 4> = Allocator::new();
733 let h1 = allocator.alloc(3, |i| DropCounter {
734 value: i,
735 drops: &DROPS,
736 });
737 let h2 = allocator.alloc(2, |i| DropCounter {
738 value: i + 3,
739 drops: &DROPS,
740 });
741 let h3 = allocator.alloc(10, |i| DropCounter {
742 value: i + 5,
743 drops: &DROPS,
744 });
745
746 assert_eq!(h1.get().len(), 3);
747 assert_eq!(h2.get().len(), 2);
748 assert_eq!(h3.get().len(), 10);
749 let _ = (h1, h2, h3);
750 }
751
752 assert_eq!(DROPS.load(Ordering::SeqCst), 15);
754 }
755
756 #[test]
757 fn test_zst_allocations() {
758 let mut allocator: Allocator<(), u32, 16> = Allocator::new();
760 let h1 = allocator.alloc(0, |_| ());
761 let h2 = allocator.alloc(8, |_| ());
762 assert_eq!(h1.get().len(), 0);
763 assert_eq!(h2.get().len(), 8);
764 }
765
766 #[test]
767 fn test_handle_survives_chunk_rotation() {
768 let mut allocator: Allocator<u32, u32, 4> = Allocator::new();
769 let h1 = allocator.alloc(4, |i| i as u32);
770 let _h2 = allocator.alloc(3, |i| (i + 10) as u32);
772 let h3 = allocator.alloc(4, |i| (i + 20) as u32);
773
774 assert_eq!(h1.get(), &[0, 1, 2, 3]);
775 assert_eq!(h3.get(), &[20, 21, 22, 23]);
776 }
777
778 #[test]
779 fn test_many_small_allocations() {
780 let mut allocator: Allocator<u32, u32, 8> = Allocator::new();
781 let mut handles = Vec::new();
782 for i in 0..32 {
783 let h = allocator.alloc(1, |_| i as u32);
784 handles.push(h);
785 }
786 for (i, h) in handles.iter().enumerate() {
787 assert_eq!(h.get(), &[(i as u32)]);
788 }
789 }
790
791 #[cfg(miri)]
792 mod miri_tests {
793 use super::*;
794 use std::cell::Cell;
795
796 #[test]
797 fn miri_stress_realloc_and_drop() {
798 let mut allocator: Allocator<u64, u32, 8> = Allocator::new();
800 let h1 = allocator.alloc(8, |i| i as u64);
801 let h2 = allocator.alloc(1, |i| (i + 100) as u64);
802 let h3 = allocator.alloc(16, |i| (i + 200) as u64);
803 assert_eq!(h1.get()[0], 0);
804 assert_eq!(h2.get()[0], 100);
805 assert_eq!(h3.get()[0], 200);
806 let _ = (h1, h2, h3);
807 }
808
809 #[derive(Debug)]
810 struct Poison<'a> {
811 alive: &'a Cell<bool>,
812 }
813
814 impl<'a> Drop for Poison<'a> {
815 fn drop(&mut self) {
816 self.alive.set(false);
818 }
819 }
820
821 #[test]
822 fn miri_use_after_free_guard() {
823 let flag = Cell::new(true);
824 {
825 let mut allocator: Allocator<Poison, u32, 4> = Allocator::new();
826 let handle = allocator.alloc(4, |_| Poison { alive: &flag });
827 assert!(flag.get());
828 drop(handle);
829 assert!(flag.get());
831 }
833 assert!(!flag.get());
835 }
836
837 #[test]
838 fn miri_many_small_allocations() {
839 let mut allocator: Allocator<u64, u32, 4> = Allocator::new();
840 let mut handles = Vec::new();
841 for i in 0..64u64 {
842 let h = allocator.alloc(1, |_| i);
843 handles.push(h);
844 }
845 for (i, h) in handles.iter().enumerate() {
846 assert_eq!(h.get(), &[(i as u64)]);
847 }
848 }
849
850 #[test]
851 fn miri_clone_stress() {
852 let mut allocator: Allocator<u32, u32, 8> = Allocator::new();
853 let h = allocator.alloc(8, |i| i as u32);
854 let mut clones = Vec::new();
855 for _ in 0..32 {
856 clones.push(h.clone());
857 }
858 for c in clones.iter() {
859 assert_eq!(c.get()[0], 0);
860 }
861 drop(h);
862 for c in clones.iter() {
864 assert_eq!(c.get()[7], 7);
865 }
866 }
867 }
868}