use crate::pod::Pod;
use core::cell::UnsafeCell;
use core::mem::MaybeUninit;
use core::sync::atomic::{fence, AtomicU64, Ordering};
#[cfg(feature = "atomic-slots")]
#[inline(always)]
const fn stripe_count<T>() -> usize {
core::mem::size_of::<T>().div_ceil(8)
}
#[repr(C, align(64))]
pub(crate) struct Slot<T> {
stamp: AtomicU64,
value: UnsafeCell<MaybeUninit<T>>,
}
unsafe impl<T: Send> Sync for Slot<T> {}
unsafe impl<T: Send> Send for Slot<T> {}
const _: () = assert!(core::mem::align_of::<Slot<u64>>() == 64);
impl<T> Slot<T> {
pub(crate) fn new() -> Self {
Slot {
stamp: AtomicU64::new(0),
value: UnsafeCell::new(MaybeUninit::uninit()),
}
}
#[inline]
pub(crate) fn stamp_load(&self) -> u64 {
self.stamp.load(Ordering::Acquire)
}
}
#[cfg(not(feature = "atomic-slots"))]
impl<T: Pod> Slot<T> {
#[inline]
pub(crate) fn write(&self, seq: u64, value: T) {
let writing = seq * 2 + 1;
let done = seq * 2 + 2;
self.stamp.store(writing, Ordering::Relaxed);
fence(Ordering::Release);
unsafe { core::ptr::write_volatile(self.value.get() as *mut T, value) };
self.stamp.store(done, Ordering::Release);
}
#[inline]
pub(crate) fn write_with(&self, seq: u64, f: impl FnOnce(&mut MaybeUninit<T>)) {
let mut tmp = MaybeUninit::<T>::uninit();
f(&mut tmp);
let writing = seq * 2 + 1;
let done = seq * 2 + 2;
self.stamp.store(writing, Ordering::Relaxed);
fence(Ordering::Release);
unsafe { core::ptr::write_volatile(self.value.get() as *mut T, tmp.assume_init()) };
self.stamp.store(done, Ordering::Release);
}
#[inline]
pub(crate) fn try_read(&self, seq: u64) -> Result<Option<T>, u64> {
let expected = seq * 2 + 2;
let s1 = self.stamp.load(Ordering::Acquire);
if s1 == expected {
let value = unsafe { core::ptr::read_volatile((*self.value.get()).as_ptr()) };
let s2 = self.stamp.load(Ordering::Acquire);
if s1 == s2 {
return Ok(Some(value));
}
return Ok(None); }
if s1 & 1 != 0 {
return Ok(None);
}
Err(s1)
}
}
#[cfg(feature = "atomic-slots")]
impl<T: Pod> Slot<T> {
#[inline]
pub(crate) fn write(&self, seq: u64, value: T) {
let writing = seq * 2 + 1;
let done = seq * 2 + 2;
self.stamp.store(writing, Ordering::Relaxed);
fence(Ordering::Release);
let n = stripe_count::<T>();
let src = &value as *const T as *const u8;
let dst = self.value.get() as *mut u64;
for i in 0..n {
let chunk = unsafe { read_stripe(src, i, core::mem::size_of::<T>()) };
unsafe { AtomicU64::from_ptr(dst.add(i)) }.store(chunk, Ordering::Relaxed);
}
self.stamp.store(done, Ordering::Release);
}
#[inline]
pub(crate) fn write_with(&self, seq: u64, f: impl FnOnce(&mut MaybeUninit<T>)) {
let mut tmp = MaybeUninit::<T>::uninit();
f(&mut tmp);
let value = unsafe { tmp.assume_init() };
self.write(seq, value);
}
#[inline]
pub(crate) fn try_read(&self, seq: u64) -> Result<Option<T>, u64> {
let expected = seq * 2 + 2;
let s1 = self.stamp.load(Ordering::Acquire);
if s1 == expected {
let n = stripe_count::<T>();
let src = self.value.get() as *mut u64;
let mut buf = MaybeUninit::<T>::uninit();
let dst = buf.as_mut_ptr() as *mut u8;
for i in 0..n {
let chunk = unsafe { AtomicU64::from_ptr(src.add(i)) }.load(Ordering::Relaxed);
unsafe { write_stripe(dst, i, chunk, core::mem::size_of::<T>()) };
}
fence(Ordering::Acquire);
let s2 = self.stamp.load(Ordering::Relaxed);
if s1 == s2 {
return Ok(Some(unsafe { buf.assume_init() }));
}
return Ok(None); }
if s1 & 1 != 0 {
return Ok(None);
}
Err(s1)
}
}
#[cfg(feature = "atomic-slots")]
#[inline(always)]
unsafe fn read_stripe(src: *const u8, i: usize, size: usize) -> u64 {
let offset = i * 8;
if offset + 8 <= size {
(src.add(offset) as *const u64).read_unaligned()
} else {
let remaining = size - offset;
let mut buf = 0u64;
core::ptr::copy_nonoverlapping(src.add(offset), &mut buf as *mut u64 as *mut u8, remaining);
buf
}
}
#[cfg(feature = "atomic-slots")]
#[inline(always)]
unsafe fn write_stripe(dst: *mut u8, i: usize, chunk: u64, size: usize) {
let offset = i * 8;
if offset + 8 <= size {
(dst.add(offset) as *mut u64).write_unaligned(chunk);
} else {
let remaining = size - offset;
core::ptr::copy_nonoverlapping(
&chunk as *const u64 as *const u8,
dst.add(offset),
remaining,
);
}
}