1use core::ptr;
2use std::cell::UnsafeCell;
3use std::mem::{ManuallyDrop, MaybeUninit};
4use std::sync::atomic::{AtomicUsize, Ordering};
5
6const MAX_CAPACITY: usize = isize::MAX as usize;
7
8const IS_INIT_BITMASK_LEN: usize = 2;
9const SET_ACQUIRE_FLAG: usize = 1 << 1;
10const SET_RELEASE_FLAG: usize = 1 << 0;
11const IS_INIT_BITMASK: usize = SET_ACQUIRE_FLAG | SET_RELEASE_FLAG;
12
13const BITS_PER_BYTE: usize = 8;
14
15#[inline(always)]
16fn atomic_addr(index: usize) -> (usize, usize) {
17 let index = IS_INIT_BITMASK_LEN * index;
18 (index / bits_per_atomic(), index % bits_per_atomic())
19}
20
21#[inline(always)]
22fn atomic_size() -> usize {
23 std::mem::size_of::<usize>()
24}
25
26#[inline(always)]
27fn bits_per_atomic() -> usize {
28 atomic_size() * BITS_PER_BYTE
29}
30
31enum Index {
32 Indices(Box<[AtomicUsize]>),
33 Zst(AtomicUsize),
34}
35
36pub struct AtomicOnceCellArray<T> {
55 data: Box<[MaybeUninit<UnsafeCell<T>>]>,
56 indices: Index,
57}
58
59impl<T> AtomicOnceCellArray<T> {
60 pub fn with_capacity(capacity: usize) -> Self {
61 if capacity > MAX_CAPACITY {
66 panic!("capacity may not exceed {}", MAX_CAPACITY);
67 }
68
69 let mut data = Vec::with_capacity(capacity);
70 unsafe {
71 data.set_len(capacity);
75 };
76
77 let indices = if std::mem::size_of::<T>() > 0 && capacity > 0 {
78 let (max_atomic_index, _atomic_offset) = atomic_addr(capacity);
79 let mut indices = Vec::with_capacity(max_atomic_index + 1);
80 for _ in 0..=max_atomic_index {
81 indices.push(AtomicUsize::new(0));
82 }
83 indices
84 } else {
85 Vec::with_capacity(0)
87 };
88
89 Self {
90 data: data.into_boxed_slice(),
91 indices: if indices.capacity() > 0 {
92 Index::Indices(indices.into_boxed_slice())
93 } else {
94 Index::Zst(AtomicUsize::new(0))
95 },
96 }
97 }
98
99 #[inline(always)]
100 fn start_set(
101 &self,
102 indices: &[AtomicUsize],
103 index: usize,
104 ) -> (usize, usize) {
105 let addr = atomic_addr(index);
107 let set_acquire_flag = SET_ACQUIRE_FLAG << addr.1;
108 match indices[addr.0].fetch_update(Ordering::Acquire, Ordering::Relaxed, |atomic_val| {
109 Some(atomic_val | set_acquire_flag)
110 }) {
111 Ok(atomic_val) => {
112 if atomic_val & (IS_INIT_BITMASK << addr.1) > 0 {
113 panic!("index {} cannot be set more than once", index);
115 }
116 }
117 _ => unreachable!(),
118 };
119
120 addr
121 }
122
123 #[inline(always)]
124 fn end_set(
125 &self,
126 indices: &[AtomicUsize],
127 addr: (usize, usize),
128 ) {
129 let set_release_flag = SET_RELEASE_FLAG << addr.1;
131 match indices[addr.0].fetch_update(Ordering::Release, Ordering::Relaxed, |atomic_val| {
132 Some(atomic_val | set_release_flag)
133 }) {
134 Ok(_) => {}
135 _ => unreachable!(),
136 };
137 }
138
139 pub fn set(
140 &self,
141 index: usize,
142 val: T,
143 ) {
144 if index >= self.capacity() {
147 panic!("index {} must be < capacity {}", index, self.capacity());
149 }
150
151 if std::mem::size_of::<T>() == 0 {
152 let num_initialized = self.zst();
157 if num_initialized.load(Ordering::Acquire) < self.capacity() {
158 let _ = ManuallyDrop::new(val);
160 num_initialized.fetch_add(1, Ordering::Release);
161 } else {
162 panic!("capacity overflow");
166 }
167
168 return;
171 }
172
173 let indices = self.indices();
175 let addr = self.start_set(indices, index);
176
177 {
178 let maybe_uninit = self.ptr_to_maybe_uninit(index);
179 unsafe {
180 let ptr = AtomicOnceCellArray::maybe_uninit_as_ptr(maybe_uninit);
188 AtomicOnceCellArray::unsafe_cell_raw_get(ptr).write(val);
189 }
190 }
191
192 self.end_set(indices, addr);
194 }
195
196 pub fn get(
197 &self,
198 index: usize,
199 ) -> &T {
200 if index >= self.capacity() {
203 panic!("index {} must be < capacity {}", index, self.capacity());
205 }
206
207 if std::mem::size_of::<T>() == 0 {
208 let num_initialized = self.zst().load(Ordering::Acquire);
209 if num_initialized == 0 {
210 panic!("index {} is not initialized", index);
212 }
213 } else {
214 let indices = self.indices();
215
216 let (atomic_index, atomic_offset) = atomic_addr(index);
217 let atomic_val = indices[atomic_index].load(Ordering::Acquire);
218
219 let is_init_bitmask = IS_INIT_BITMASK << atomic_offset;
220 if atomic_val & is_init_bitmask != is_init_bitmask {
221 panic!("index {} is not initialized", index);
223 }
224 }
225
226 let maybe_uninit = self.ptr_to_maybe_uninit(index);
227 let assume_init = unsafe {
228 let maybe_uninit_ref = maybe_uninit.as_ref().unwrap();
231
232 AtomicOnceCellArray::maybe_uninit_assume_init_ref(maybe_uninit_ref)
234 };
235
236 let val = unsafe {
237 &*assume_init.get()
241 };
242
243 val
244 }
245
246 pub unsafe fn get_all_unchecked(&self) -> &[T] {
247 std::mem::transmute::<&[MaybeUninit<UnsafeCell<T>>], &[T]>(&self.data[..])
248 }
249
250 pub fn capacity(&self) -> usize {
251 self.data.len()
252 }
253
254 #[inline(always)]
255 fn zst(&self) -> &AtomicUsize {
256 if std::mem::size_of::<T>() != 0 {
257 unreachable!();
258 }
259
260 match &self.indices {
261 Index::Indices(_) => {
262 unreachable!()
263 }
264 Index::Zst(num_initialized) => num_initialized,
265 }
266 }
267
268 #[inline(always)]
269 fn indices(&self) -> &Box<[AtomicUsize]> {
270 if std::mem::size_of::<T>() == 0 {
271 unreachable!();
272 }
273
274 match &self.indices {
275 Index::Indices(indices) => indices,
276 Index::Zst(_) => {
277 unreachable!()
278 }
279 }
280 }
281
282 #[inline(always)]
283 fn ptr_to_maybe_uninit(
284 &self,
285 index: usize,
286 ) -> *const MaybeUninit<UnsafeCell<T>> {
287 unsafe {
288 self.data.as_ptr().offset(index as isize)
291 }
292 }
293
294 #[inline(always)]
295 fn ptr_to_maybe_uninit_mut(
296 &mut self,
297 index: usize,
298 ) -> *mut MaybeUninit<UnsafeCell<T>> {
299 unsafe {
300 self.data.as_mut_ptr().offset(index as isize)
303 }
304 }
305
306 #[inline(always)]
307 unsafe fn maybe_uninit_as_ptr(
308 maybe_uninit: *const MaybeUninit<UnsafeCell<T>>
309 ) -> *const UnsafeCell<T> {
310 maybe_uninit as *const _ as *const UnsafeCell<T>
313 }
314
315 #[inline(always)]
316 unsafe fn maybe_uninit_as_mut_ptr(
317 maybe_uninit: *mut MaybeUninit<UnsafeCell<T>>
318 ) -> *mut UnsafeCell<T> {
319 maybe_uninit as *mut _ as *mut UnsafeCell<T>
322 }
323
324 #[inline(always)]
325 unsafe fn unsafe_cell_raw_get(cell: *const UnsafeCell<T>) -> *mut T {
326 cell as *const T as *mut T
329 }
330
331 #[inline(always)]
332 unsafe fn maybe_uninit_assume_init_ref(
333 maybe_uninit: &MaybeUninit<UnsafeCell<T>>
334 ) -> &UnsafeCell<T> {
335 &*maybe_uninit.as_ptr()
338 }
339}
340
341impl<T> Drop for AtomicOnceCellArray<T> {
342 fn drop(&mut self) {
343 if std::mem::size_of::<T>() == 0 {
347 let num_initialized = self.zst().load(Ordering::Relaxed);
349 for _ in 0..num_initialized {
350 let maybe_uninit = self.ptr_to_maybe_uninit_mut(0);
351 unsafe {
352 ptr::drop_in_place(AtomicOnceCellArray::maybe_uninit_as_mut_ptr(maybe_uninit))
353 }
354 }
355 return;
356 }
357
358 for index in 0..self.capacity() {
359 let is_initialized = {
360 let indices = self.indices();
361
362 let (atomic_index, atomic_offset) = atomic_addr(index);
363 let atomic_val = indices[atomic_index].load(Ordering::Relaxed);
364
365 let is_init_bitmask = IS_INIT_BITMASK << atomic_offset;
366
367 atomic_val & is_init_bitmask == is_init_bitmask
368 };
369
370 if is_initialized {
371 let maybe_uninit = self.ptr_to_maybe_uninit_mut(index);
372 unsafe {
373 ptr::drop_in_place(AtomicOnceCellArray::maybe_uninit_as_mut_ptr(maybe_uninit))
375 }
376 } else {
377 }
383 }
384 }
385}
386
387unsafe impl<T> Sync for AtomicOnceCellArray<T> {}
388
389#[cfg(test)]
390mod tests {
391 use super::*;
392 use std::sync::mpsc::{Receiver, Sender};
393 use std::sync::{mpsc, Arc};
394 use std::{panic, thread};
395
396 struct DroppableElement {
397 id: usize,
398 sender: Sender<usize>,
399 }
400
401 impl DroppableElement {
402 pub fn new(
403 id: usize,
404 sender: &Sender<usize>,
405 ) -> Self {
406 Self {
407 id,
408 sender: sender.clone(),
409 }
410 }
411 }
412
413 impl Drop for DroppableElement {
414 fn drop(&mut self) {
415 let _ = self.sender.send(self.id);
416 }
417 }
418
419 fn default_drop() -> (AtomicOnceCellArray<DroppableElement>, Receiver<usize>) {
420 let array = AtomicOnceCellArray::with_capacity(10);
421
422 let receiver = {
423 let (sender, receiver) = mpsc::channel();
424 array.set(3, DroppableElement::new(3, &sender));
425 array.set(6, DroppableElement::new(6, &sender));
426 receiver
427 };
428
429 (array, receiver)
430 }
431
432 #[test]
433 fn test_drop() {
434 let (array, receiver) = default_drop();
435
436 assert_eq!(receiver.try_recv().ok(), None);
437
438 std::mem::drop(array);
440
441 let indices = receiver.iter().collect::<Vec<_>>();
442 assert_eq!(indices.len(), 2);
443 assert_eq!(indices[0], 3);
444 assert_eq!(indices[1], 6);
445 }
446
447 #[test]
448 fn test_drop_panic() {
449 let (array, receiver) = default_drop();
450
451 assert_eq!(receiver.try_recv().ok(), None);
452
453 let result = thread::spawn(move || {
454 array.get(4); })
456 .join();
457
458 assert!(result.is_err());
459
460 let indices = receiver.iter().collect::<Vec<_>>();
461 assert_eq!(indices.len(), 2);
462 assert_eq!(indices[0], 3);
463 assert_eq!(indices[1], 6);
464 }
465
466 #[test]
467 fn test_drop_thread() {
468 let (array, receiver) = default_drop();
469
470 assert_eq!(receiver.try_recv().ok(), None);
471
472 let result = thread::spawn(move || {
473 assert_eq!(array.get(6).id, 6);
474 })
476 .join();
477
478 assert!(result.is_ok());
479
480 let indices = receiver.iter().collect::<Vec<_>>();
481 assert_eq!(indices.len(), 2);
482 assert_eq!(indices[0], 3);
483 assert_eq!(indices[1], 6);
484 }
485
486 struct PanicOnDropElement {
487 _id: u32,
488 }
489
490 impl Drop for PanicOnDropElement {
491 fn drop(&mut self) {
492 panic!("element dropped");
493 }
494 }
495
496 fn default_panic_on_drop() -> AtomicOnceCellArray<PanicOnDropElement> {
497 AtomicOnceCellArray::with_capacity(10)
498 }
499
500 #[test]
501 fn test_drop_no_panic() {
502 let array = default_panic_on_drop();
503 std::mem::drop(array);
504 }
505
506 fn default_unallocated_bool() -> AtomicOnceCellArray<bool> {
507 AtomicOnceCellArray::with_capacity(0)
508 }
509
510 #[test]
511 fn test_unallocated() {
512 let array = default_unallocated_bool();
513 assert_eq!(array.capacity(), 0);
514 }
515
516 #[test]
517 #[should_panic(expected = "index 0 must be < capacity 0")]
518 fn test_set_0_unallocated() {
519 let array = default_unallocated_bool();
520 array.set(0, true);
521 }
522
523 #[test]
524 #[should_panic(expected = "index 0 must be < capacity 0")]
525 fn test_get_0_unallocated() {
526 let array = default_unallocated_bool();
527 array.get(0);
528 }
529
530 fn default_i32() -> AtomicOnceCellArray<i32> {
531 AtomicOnceCellArray::with_capacity(10)
532 }
533
534 #[test]
535 fn test_set_0() {
536 let array = default_i32();
537 array.set(0, 7);
538 assert_eq!(array.get(0), &7);
539 }
540
541 #[test]
542 #[should_panic(expected = "index 0 cannot be set more than once")]
543 fn test_set_0_twice() {
544 let array = default_i32();
545 array.set(0, 12);
546 assert_eq!(array.get(0), &12);
547 array.set(0, -2);
548 }
549
550 #[test]
551 #[should_panic(expected = "index 0 is not initialized")]
552 fn test_get_0_uninitialized() {
553 let array = default_i32();
554 array.get(0);
555 }
556
557 #[test]
558 fn test_set_3() {
559 let array = default_i32();
560 assert_eq!(array.capacity(), 10);
561 array.set(3, 8658);
562 assert_eq!(array.get(3), &8658);
563 assert_eq!(array.capacity(), 10);
564 }
565
566 #[test]
567 #[should_panic(expected = "index 3 cannot be set more than once")]
568 fn test_set_3_twice() {
569 let array = default_i32();
570 array.set(3, 12);
571 assert_eq!(array.get(3), &12);
572 array.set(3, -2);
573 }
574
575 #[test]
576 #[should_panic(expected = "index 3 is not initialized")]
577 fn test_get_3_uninitialized() {
578 let array = default_i32();
579 array.get(3);
580 }
581
582 #[test]
583 fn test_set_capacity() {
584 let array = default_i32();
585 array.set(array.capacity() - 1, 4663);
586 assert_eq!(array.get(array.capacity() - 1), &4663);
587 }
588
589 #[test]
590 #[should_panic(expected = "index 9 cannot be set more than once")]
591 fn test_set_capacity_twice() {
592 let array = default_i32();
593 array.set(array.capacity() - 1, -25);
594 assert_eq!(array.get(array.capacity() - 1), &-25);
595 array.set(array.capacity() - 1, 745);
596 }
597
598 #[test]
599 #[should_panic(expected = "index 9 is not initialized")]
600 fn test_get_capacity_uninitialized() {
601 let array = default_i32();
602 array.get(array.capacity() - 1);
603 }
604
605 #[test]
606 #[should_panic(expected = "index 11 must be < capacity 10")]
607 fn test_get_index_out_of_range() {
608 let array = default_i32();
609 array.get(array.capacity() + 1);
610 }
611
612 #[test]
613 #[should_panic(expected = "index 11 must be < capacity 10")]
614 fn test_set_index_out_of_range() {
615 let array = default_i32();
616 array.set(array.capacity() + 1, 0);
617 }
618
619 #[test]
620 fn test_set_parallel() {
621 let array = Arc::new(default_i32());
622
623 let mut join_handles = Vec::with_capacity(array.capacity());
624 for index in 0..array.capacity() {
625 let array = array.clone();
626 join_handles.push(thread::spawn(move || {
627 array.set(index, index as i32 * 2);
628 }));
629 }
630
631 for join_handle in join_handles {
632 join_handle.join().unwrap()
633 }
634
635 for index in 0..array.capacity() {
636 assert_eq!(array.get(index), &(index as i32 * 2));
637 }
638 }
639
640 #[test]
641 #[should_panic]
642 fn test_set_parallel_panic() {
643 let array = Arc::new(AtomicOnceCellArray::with_capacity(100));
644
645 let mut join_handles = Vec::with_capacity(array.capacity());
646 for index in 0..array.capacity() {
647 let array = array.clone();
648 join_handles.push(thread::spawn(move || {
649 array.set(index, index as i32 * 2);
650 }));
651 }
652
653 for index in 0..array.capacity() {
654 assert_eq!(array.get(index), &(index as i32 * 2));
655 }
656 }
657
658 struct ZeroSizedType {}
661
662 fn default_zst() -> AtomicOnceCellArray<ZeroSizedType> {
663 AtomicOnceCellArray::with_capacity(10)
664 }
665
666 #[test]
667 fn test_zst_capacity() {
668 let array = default_zst();
669 assert_eq!(array.capacity(), 10);
670 }
671
672 #[test]
673 fn test_zst_set_7() {
674 let array = default_zst();
675 array.set(7, ZeroSizedType {});
676 array.get(7);
677 }
678
679 #[test]
680 fn test_zst_set_1_2_3() {
681 let array = default_zst();
682 array.set(1, ZeroSizedType {});
683 array.set(2, ZeroSizedType {});
684 array.set(3, ZeroSizedType {});
685 }
686
687 #[test]
688 fn test_zst_set_1_2_get_3() {
689 let array = default_zst();
690 array.set(1, ZeroSizedType {});
691 array.get(1);
692 array.set(2, ZeroSizedType {});
693 array.get(2);
694
695 array.get(3);
699 }
700
701 #[test]
702 fn test_zst_set_7_twice() {
703 let array = default_zst();
704 array.set(7, ZeroSizedType {});
705 array.get(7);
706
707 array.set(7, ZeroSizedType {});
710 }
711
712 #[test]
713 #[should_panic(expected = "index 7 is not initialized")]
714 fn test_zst_get_7_uninitialized() {
715 let array = default_zst();
716
717 array.get(7);
719 }
720
721 mod zst_lifetime {
722 struct PrivateInnerZst {}
723
724 pub struct CannotConstructZstLifetime<'a, T> {
725 _guard: PrivateInnerZst,
726 _phantom: std::marker::PhantomData<&'a T>,
727 }
728 }
729
730 #[test]
731 #[should_panic(expected = "index 0 is not initialized")]
732 fn test_zst_get_0_uninitialized_lifetime<'a>() {
733 use zst_lifetime::CannotConstructZstLifetime;
734
735 let array = AtomicOnceCellArray::with_capacity(1);
736
737 let _val: &CannotConstructZstLifetime<'a, u32> = array.get(0);
739 }
740
741 mod zst_private {
742 struct PrivateInnerZst {}
743
744 pub struct CannotConstructZstInner(PrivateInnerZst);
745 }
746
747 #[test]
748 #[should_panic(expected = "index 0 is not initialized")]
749 fn test_zst_get_0_uninitialized_private_type() {
750 use zst_private::CannotConstructZstInner;
751
752 let array = AtomicOnceCellArray::with_capacity(1);
753
754 let _val: &CannotConstructZstInner = array.get(0);
760 }
761
762 enum Void {}
763
764 #[test]
765 #[should_panic(expected = "index 0 is not initialized")]
766 fn test_zst_get_0_uninitialized_void() {
767 let array = AtomicOnceCellArray::with_capacity(1);
768
769 let _val: &Void = array.get(0);
771 }
772
773 #[test]
774 #[should_panic(expected = "index 11 must be < capacity 10")]
775 fn test_zst_get_index_out_of_range() {
776 let array = default_zst();
777 array.get(array.capacity() + 1);
778 }
779
780 #[test]
781 #[should_panic(expected = "index 11 must be < capacity 10")]
782 fn test_zst_set_index_out_of_range() {
783 let array = default_zst();
784 array.set(array.capacity() + 1, ZeroSizedType {});
785 }
786
787 #[test]
788 fn test_zst_set_parallel() {
789 let array = Arc::new(default_zst());
790
791 let mut join_handles = Vec::with_capacity(array.capacity());
792 for index in 0..array.capacity() {
793 let array = array.clone();
794 join_handles.push(thread::spawn(move || {
795 array.set(index, ZeroSizedType {});
796 }));
797 }
798
799 for join_handle in join_handles {
800 join_handle.join().unwrap()
801 }
802
803 for index in 0..array.capacity() {
804 array.get(index);
805 }
806 }
807
808 #[test]
809 #[should_panic(expected = "capacity overflow")]
810 fn test_zst_overflow() {
811 let array = AtomicOnceCellArray::with_capacity(4);
812 for _ in 0..=array.capacity() {
813 array.set(2, ZeroSizedType {});
814 }
815 }
816
817 #[test]
818 #[should_panic(expected = "capacity may not exceed")]
819 fn test_zst_invalid_capacity() {
820 let array = AtomicOnceCellArray::with_capacity(MAX_CAPACITY + 1);
821 array.set(0, ZeroSizedType {});
822 }
823
824 #[test]
825 fn test_zst_observable_drop() {
826 mod zst_drop {
827 use std::sync::atomic::{AtomicU32, Ordering};
834
835 static ATOMIC_COUNTER: AtomicU32 = AtomicU32::new(0);
836
837 struct PrivateInnerZst {}
838
839 pub struct ObservableZstDrop(PrivateInnerZst);
840
841 impl ObservableZstDrop {
842 pub fn new() -> Self {
843 assert_eq!(std::mem::size_of::<Self>(), 0);
844 ATOMIC_COUNTER.fetch_add(1, Ordering::Relaxed);
845 ObservableZstDrop(PrivateInnerZst {})
846 }
847 }
848
849 impl Drop for ObservableZstDrop {
850 fn drop(&mut self) {
851 ATOMIC_COUNTER.fetch_sub(1, Ordering::Relaxed);
852 }
853 }
854
855 pub fn get_counter() -> u32 {
856 ATOMIC_COUNTER.load(Ordering::Relaxed)
857 }
858 }
859
860 use zst_drop::{get_counter, ObservableZstDrop};
861
862 assert_eq!(get_counter(), 0);
863 let array = AtomicOnceCellArray::with_capacity(5);
864 for index in 0..5 {
865 array.set(index, ObservableZstDrop::new());
866 }
867 assert_eq!(get_counter(), 5);
868
869 std::mem::drop(array);
870 assert_eq!(get_counter(), 0);
871 }
872}