use core::ptr;
use std::cell::UnsafeCell;
use std::mem::{ManuallyDrop, MaybeUninit};
use std::sync::atomic::{AtomicUsize, Ordering};
const MAX_CAPACITY: usize = isize::MAX as usize;
const IS_INIT_BITMASK_LEN: usize = 2;
const SET_ACQUIRE_FLAG: usize = 1 << 1;
const SET_RELEASE_FLAG: usize = 1 << 0;
const IS_INIT_BITMASK: usize = SET_ACQUIRE_FLAG | SET_RELEASE_FLAG;
const BITS_PER_BYTE: usize = 8;
#[inline(always)]
fn atomic_addr(index: usize) -> (usize, usize) {
let index = IS_INIT_BITMASK_LEN * index;
(index / bits_per_atomic(), index % bits_per_atomic())
}
#[inline(always)]
fn atomic_size() -> usize {
std::mem::size_of::<usize>()
}
#[inline(always)]
fn bits_per_atomic() -> usize {
atomic_size() * BITS_PER_BYTE
}
enum Index {
Indices(Box<[AtomicUsize]>),
Zst(AtomicUsize),
}
pub struct AtomicOnceCellArray<T> {
data: Box<[MaybeUninit<UnsafeCell<T>>]>,
indices: Index,
}
impl<T> AtomicOnceCellArray<T> {
pub fn with_capacity(capacity: usize) -> Self {
if capacity > MAX_CAPACITY {
panic!("capacity may not exceed {}", MAX_CAPACITY);
}
let mut data = Vec::with_capacity(capacity);
unsafe {
data.set_len(capacity);
};
let indices = if std::mem::size_of::<T>() > 0 && capacity > 0 {
let (max_atomic_index, _atomic_offset) = atomic_addr(capacity);
let mut indices = Vec::with_capacity(max_atomic_index + 1);
for _ in 0..=max_atomic_index {
indices.push(AtomicUsize::new(0));
}
indices
} else {
Vec::with_capacity(0)
};
Self {
data: data.into_boxed_slice(),
indices: if indices.capacity() > 0 {
Index::Indices(indices.into_boxed_slice())
} else {
Index::Zst(AtomicUsize::new(0))
},
}
}
#[inline(always)]
fn start_set(
&self,
indices: &[AtomicUsize],
index: usize,
) -> (usize, usize) {
let addr = atomic_addr(index);
let set_acquire_flag = SET_ACQUIRE_FLAG << addr.1;
match indices[addr.0].fetch_update(Ordering::Acquire, Ordering::Relaxed, |atomic_val| {
Some(atomic_val | set_acquire_flag)
}) {
Ok(atomic_val) => {
if atomic_val & (IS_INIT_BITMASK << addr.1) > 0 {
panic!("index {} cannot be set more than once", index);
}
}
_ => unreachable!(),
};
addr
}
#[inline(always)]
fn end_set(
&self,
indices: &[AtomicUsize],
addr: (usize, usize),
) {
let set_release_flag = SET_RELEASE_FLAG << addr.1;
match indices[addr.0].fetch_update(Ordering::Release, Ordering::Relaxed, |atomic_val| {
Some(atomic_val | set_release_flag)
}) {
Ok(_) => {}
_ => unreachable!(),
};
}
pub fn set(
&self,
index: usize,
val: T,
) {
if index >= self.capacity() {
panic!("index {} must be < capacity {}", index, self.capacity());
}
if std::mem::size_of::<T>() == 0 {
let num_initialized = self.zst();
if num_initialized.load(Ordering::Acquire) < self.capacity() {
let _ = ManuallyDrop::new(val);
num_initialized.fetch_add(1, Ordering::Release);
} else {
panic!("capacity overflow");
}
return;
}
let indices = self.indices();
let addr = self.start_set(indices, index);
{
let maybe_uninit = self.ptr_to_maybe_uninit(index);
unsafe {
let ptr = AtomicOnceCellArray::maybe_uninit_as_ptr(maybe_uninit);
AtomicOnceCellArray::unsafe_cell_raw_get(ptr).write(val);
}
}
self.end_set(indices, addr);
}
pub fn get(
&self,
index: usize,
) -> &T {
if index >= self.capacity() {
panic!("index {} must be < capacity {}", index, self.capacity());
}
if std::mem::size_of::<T>() == 0 {
let num_initialized = self.zst().load(Ordering::Acquire);
if num_initialized == 0 {
panic!("index {} is not initialized", index);
}
} else {
let indices = self.indices();
let (atomic_index, atomic_offset) = atomic_addr(index);
let atomic_val = indices[atomic_index].load(Ordering::Acquire);
let is_init_bitmask = IS_INIT_BITMASK << atomic_offset;
if atomic_val & is_init_bitmask != is_init_bitmask {
panic!("index {} is not initialized", index);
}
}
let maybe_uninit = self.ptr_to_maybe_uninit(index);
let assume_init = unsafe {
let maybe_uninit_ref = maybe_uninit.as_ref().unwrap();
AtomicOnceCellArray::maybe_uninit_assume_init_ref(maybe_uninit_ref)
};
let val = unsafe {
&*assume_init.get()
};
val
}
pub unsafe fn get_all_unchecked(&self) -> &[T] {
std::mem::transmute::<&[MaybeUninit<UnsafeCell<T>>], &[T]>(&self.data[..])
}
pub fn capacity(&self) -> usize {
self.data.len()
}
#[inline(always)]
fn zst(&self) -> &AtomicUsize {
if std::mem::size_of::<T>() != 0 {
unreachable!();
}
match &self.indices {
Index::Indices(_) => {
unreachable!()
}
Index::Zst(num_initialized) => num_initialized,
}
}
#[inline(always)]
fn indices(&self) -> &Box<[AtomicUsize]> {
if std::mem::size_of::<T>() == 0 {
unreachable!();
}
match &self.indices {
Index::Indices(indices) => indices,
Index::Zst(_) => {
unreachable!()
}
}
}
#[inline(always)]
fn ptr_to_maybe_uninit(
&self,
index: usize,
) -> *const MaybeUninit<UnsafeCell<T>> {
unsafe {
self.data.as_ptr().offset(index as isize)
}
}
#[inline(always)]
fn ptr_to_maybe_uninit_mut(
&mut self,
index: usize,
) -> *mut MaybeUninit<UnsafeCell<T>> {
unsafe {
self.data.as_mut_ptr().offset(index as isize)
}
}
#[inline(always)]
unsafe fn maybe_uninit_as_ptr(
maybe_uninit: *const MaybeUninit<UnsafeCell<T>>
) -> *const UnsafeCell<T> {
maybe_uninit as *const _ as *const UnsafeCell<T>
}
#[inline(always)]
unsafe fn maybe_uninit_as_mut_ptr(
maybe_uninit: *mut MaybeUninit<UnsafeCell<T>>
) -> *mut UnsafeCell<T> {
maybe_uninit as *mut _ as *mut UnsafeCell<T>
}
#[inline(always)]
unsafe fn unsafe_cell_raw_get(cell: *const UnsafeCell<T>) -> *mut T {
cell as *const T as *mut T
}
#[inline(always)]
unsafe fn maybe_uninit_assume_init_ref(
maybe_uninit: &MaybeUninit<UnsafeCell<T>>
) -> &UnsafeCell<T> {
&*maybe_uninit.as_ptr()
}
}
impl<T> Drop for AtomicOnceCellArray<T> {
fn drop(&mut self) {
if std::mem::size_of::<T>() == 0 {
let num_initialized = self.zst().load(Ordering::Relaxed);
for _ in 0..num_initialized {
let maybe_uninit = self.ptr_to_maybe_uninit_mut(0);
unsafe {
ptr::drop_in_place(AtomicOnceCellArray::maybe_uninit_as_mut_ptr(maybe_uninit))
}
}
return;
}
for index in 0..self.capacity() {
let is_initialized = {
let indices = self.indices();
let (atomic_index, atomic_offset) = atomic_addr(index);
let atomic_val = indices[atomic_index].load(Ordering::Relaxed);
let is_init_bitmask = IS_INIT_BITMASK << atomic_offset;
atomic_val & is_init_bitmask == is_init_bitmask
};
if is_initialized {
let maybe_uninit = self.ptr_to_maybe_uninit_mut(index);
unsafe {
ptr::drop_in_place(AtomicOnceCellArray::maybe_uninit_as_mut_ptr(maybe_uninit))
}
} else {
}
}
}
}
unsafe impl<T> Sync for AtomicOnceCellArray<T> {}
#[cfg(test)]
mod tests {
use super::*;
use std::sync::mpsc::{Receiver, Sender};
use std::sync::{mpsc, Arc};
use std::{panic, thread};
struct DroppableElement {
id: usize,
sender: Sender<usize>,
}
impl DroppableElement {
pub fn new(
id: usize,
sender: &Sender<usize>,
) -> Self {
Self {
id,
sender: sender.clone(),
}
}
}
impl Drop for DroppableElement {
fn drop(&mut self) {
let _ = self.sender.send(self.id);
}
}
fn default_drop() -> (AtomicOnceCellArray<DroppableElement>, Receiver<usize>) {
let array = AtomicOnceCellArray::with_capacity(10);
let receiver = {
let (sender, receiver) = mpsc::channel();
array.set(3, DroppableElement::new(3, &sender));
array.set(6, DroppableElement::new(6, &sender));
receiver
};
(array, receiver)
}
#[test]
fn test_drop() {
let (array, receiver) = default_drop();
assert_eq!(receiver.try_recv().ok(), None);
std::mem::drop(array);
let indices = receiver.iter().collect::<Vec<_>>();
assert_eq!(indices.len(), 2);
assert_eq!(indices[0], 3);
assert_eq!(indices[1], 6);
}
#[test]
fn test_drop_panic() {
let (array, receiver) = default_drop();
assert_eq!(receiver.try_recv().ok(), None);
let result = thread::spawn(move || {
array.get(4); })
.join();
assert!(result.is_err());
let indices = receiver.iter().collect::<Vec<_>>();
assert_eq!(indices.len(), 2);
assert_eq!(indices[0], 3);
assert_eq!(indices[1], 6);
}
#[test]
fn test_drop_thread() {
let (array, receiver) = default_drop();
assert_eq!(receiver.try_recv().ok(), None);
let result = thread::spawn(move || {
assert_eq!(array.get(6).id, 6);
})
.join();
assert!(result.is_ok());
let indices = receiver.iter().collect::<Vec<_>>();
assert_eq!(indices.len(), 2);
assert_eq!(indices[0], 3);
assert_eq!(indices[1], 6);
}
struct PanicOnDropElement {
_id: u32,
}
impl Drop for PanicOnDropElement {
fn drop(&mut self) {
panic!("element dropped");
}
}
fn default_panic_on_drop() -> AtomicOnceCellArray<PanicOnDropElement> {
AtomicOnceCellArray::with_capacity(10)
}
#[test]
fn test_drop_no_panic() {
let array = default_panic_on_drop();
std::mem::drop(array);
}
fn default_unallocated_bool() -> AtomicOnceCellArray<bool> {
AtomicOnceCellArray::with_capacity(0)
}
#[test]
fn test_unallocated() {
let array = default_unallocated_bool();
assert_eq!(array.capacity(), 0);
}
#[test]
#[should_panic(expected = "index 0 must be < capacity 0")]
fn test_set_0_unallocated() {
let array = default_unallocated_bool();
array.set(0, true);
}
#[test]
#[should_panic(expected = "index 0 must be < capacity 0")]
fn test_get_0_unallocated() {
let array = default_unallocated_bool();
array.get(0);
}
fn default_i32() -> AtomicOnceCellArray<i32> {
AtomicOnceCellArray::with_capacity(10)
}
#[test]
fn test_set_0() {
let array = default_i32();
array.set(0, 7);
assert_eq!(array.get(0), &7);
}
#[test]
#[should_panic(expected = "index 0 cannot be set more than once")]
fn test_set_0_twice() {
let array = default_i32();
array.set(0, 12);
assert_eq!(array.get(0), &12);
array.set(0, -2);
}
#[test]
#[should_panic(expected = "index 0 is not initialized")]
fn test_get_0_uninitialized() {
let array = default_i32();
array.get(0);
}
#[test]
fn test_set_3() {
let array = default_i32();
assert_eq!(array.capacity(), 10);
array.set(3, 8658);
assert_eq!(array.get(3), &8658);
assert_eq!(array.capacity(), 10);
}
#[test]
#[should_panic(expected = "index 3 cannot be set more than once")]
fn test_set_3_twice() {
let array = default_i32();
array.set(3, 12);
assert_eq!(array.get(3), &12);
array.set(3, -2);
}
#[test]
#[should_panic(expected = "index 3 is not initialized")]
fn test_get_3_uninitialized() {
let array = default_i32();
array.get(3);
}
#[test]
fn test_set_capacity() {
let array = default_i32();
array.set(array.capacity() - 1, 4663);
assert_eq!(array.get(array.capacity() - 1), &4663);
}
#[test]
#[should_panic(expected = "index 9 cannot be set more than once")]
fn test_set_capacity_twice() {
let array = default_i32();
array.set(array.capacity() - 1, -25);
assert_eq!(array.get(array.capacity() - 1), &-25);
array.set(array.capacity() - 1, 745);
}
#[test]
#[should_panic(expected = "index 9 is not initialized")]
fn test_get_capacity_uninitialized() {
let array = default_i32();
array.get(array.capacity() - 1);
}
#[test]
#[should_panic(expected = "index 11 must be < capacity 10")]
fn test_get_index_out_of_range() {
let array = default_i32();
array.get(array.capacity() + 1);
}
#[test]
#[should_panic(expected = "index 11 must be < capacity 10")]
fn test_set_index_out_of_range() {
let array = default_i32();
array.set(array.capacity() + 1, 0);
}
#[test]
fn test_set_parallel() {
let array = Arc::new(default_i32());
let mut join_handles = Vec::with_capacity(array.capacity());
for index in 0..array.capacity() {
let array = array.clone();
join_handles.push(thread::spawn(move || {
array.set(index, index as i32 * 2);
}));
}
for join_handle in join_handles {
join_handle.join().unwrap()
}
for index in 0..array.capacity() {
assert_eq!(array.get(index), &(index as i32 * 2));
}
}
#[test]
#[should_panic]
fn test_set_parallel_panic() {
let array = Arc::new(AtomicOnceCellArray::with_capacity(100));
let mut join_handles = Vec::with_capacity(array.capacity());
for index in 0..array.capacity() {
let array = array.clone();
join_handles.push(thread::spawn(move || {
array.set(index, index as i32 * 2);
}));
}
for index in 0..array.capacity() {
assert_eq!(array.get(index), &(index as i32 * 2));
}
}
struct ZeroSizedType {}
fn default_zst() -> AtomicOnceCellArray<ZeroSizedType> {
AtomicOnceCellArray::with_capacity(10)
}
#[test]
fn test_zst_capacity() {
let array = default_zst();
assert_eq!(array.capacity(), 10);
}
#[test]
fn test_zst_set_7() {
let array = default_zst();
array.set(7, ZeroSizedType {});
array.get(7);
}
#[test]
fn test_zst_set_1_2_3() {
let array = default_zst();
array.set(1, ZeroSizedType {});
array.set(2, ZeroSizedType {});
array.set(3, ZeroSizedType {});
}
#[test]
fn test_zst_set_1_2_get_3() {
let array = default_zst();
array.set(1, ZeroSizedType {});
array.get(1);
array.set(2, ZeroSizedType {});
array.get(2);
array.get(3);
}
#[test]
fn test_zst_set_7_twice() {
let array = default_zst();
array.set(7, ZeroSizedType {});
array.get(7);
array.set(7, ZeroSizedType {});
}
#[test]
#[should_panic(expected = "index 7 is not initialized")]
fn test_zst_get_7_uninitialized() {
let array = default_zst();
array.get(7);
}
mod zst_lifetime {
struct PrivateInnerZst {}
pub struct CannotConstructZstLifetime<'a, T> {
_guard: PrivateInnerZst,
_phantom: std::marker::PhantomData<&'a T>,
}
}
#[test]
#[should_panic(expected = "index 0 is not initialized")]
fn test_zst_get_0_uninitialized_lifetime<'a>() {
use zst_lifetime::CannotConstructZstLifetime;
let array = AtomicOnceCellArray::with_capacity(1);
let _val: &CannotConstructZstLifetime<'a, u32> = array.get(0);
}
mod zst_private {
struct PrivateInnerZst {}
pub struct CannotConstructZstInner(PrivateInnerZst);
}
#[test]
#[should_panic(expected = "index 0 is not initialized")]
fn test_zst_get_0_uninitialized_private_type() {
use zst_private::CannotConstructZstInner;
let array = AtomicOnceCellArray::with_capacity(1);
let _val: &CannotConstructZstInner = array.get(0);
}
enum Void {}
#[test]
#[should_panic(expected = "index 0 is not initialized")]
fn test_zst_get_0_uninitialized_void() {
let array = AtomicOnceCellArray::with_capacity(1);
let _val: &Void = array.get(0);
}
#[test]
#[should_panic(expected = "index 11 must be < capacity 10")]
fn test_zst_get_index_out_of_range() {
let array = default_zst();
array.get(array.capacity() + 1);
}
#[test]
#[should_panic(expected = "index 11 must be < capacity 10")]
fn test_zst_set_index_out_of_range() {
let array = default_zst();
array.set(array.capacity() + 1, ZeroSizedType {});
}
#[test]
fn test_zst_set_parallel() {
let array = Arc::new(default_zst());
let mut join_handles = Vec::with_capacity(array.capacity());
for index in 0..array.capacity() {
let array = array.clone();
join_handles.push(thread::spawn(move || {
array.set(index, ZeroSizedType {});
}));
}
for join_handle in join_handles {
join_handle.join().unwrap()
}
for index in 0..array.capacity() {
array.get(index);
}
}
#[test]
#[should_panic(expected = "capacity overflow")]
fn test_zst_overflow() {
let array = AtomicOnceCellArray::with_capacity(4);
for _ in 0..=array.capacity() {
array.set(2, ZeroSizedType {});
}
}
#[test]
#[should_panic(expected = "capacity may not exceed")]
fn test_zst_invalid_capacity() {
let array = AtomicOnceCellArray::with_capacity(MAX_CAPACITY + 1);
array.set(0, ZeroSizedType {});
}
#[test]
fn test_zst_observable_drop() {
mod zst_drop {
use std::sync::atomic::{AtomicU32, Ordering};
static ATOMIC_COUNTER: AtomicU32 = AtomicU32::new(0);
struct PrivateInnerZst {}
pub struct ObservableZstDrop(PrivateInnerZst);
impl ObservableZstDrop {
pub fn new() -> Self {
assert_eq!(std::mem::size_of::<Self>(), 0);
ATOMIC_COUNTER.fetch_add(1, Ordering::Relaxed);
ObservableZstDrop(PrivateInnerZst {})
}
}
impl Drop for ObservableZstDrop {
fn drop(&mut self) {
ATOMIC_COUNTER.fetch_sub(1, Ordering::Relaxed);
}
}
pub fn get_counter() -> u32 {
ATOMIC_COUNTER.load(Ordering::Relaxed)
}
}
use zst_drop::{get_counter, ObservableZstDrop};
assert_eq!(get_counter(), 0);
let array = AtomicOnceCellArray::with_capacity(5);
for index in 0..5 {
array.set(index, ObservableZstDrop::new());
}
assert_eq!(get_counter(), 5);
std::mem::drop(array);
assert_eq!(get_counter(), 0);
}
}