use core::{cell::UnsafeCell, mem, sync::atomic::Ordering};
use super::{SeqLock, SeqLockWriteGuard};
use crate::utils::CachePadded;
use super::{AtomicChunk, Chunk};
#[inline]
#[must_use]
fn lock(addr: usize) -> &'static SeqLock {
const LEN: usize = 67;
#[allow(clippy::declare_interior_mutable_const)]
const L: CachePadded<SeqLock> = CachePadded::new(SeqLock::new());
static LOCKS: [CachePadded<SeqLock>; LEN] = [
L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L,
L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L, L,
L, L, L, L, L, L, L,
];
&LOCKS[addr % LEN]
}
macro_rules! atomic {
(uint, $atomic_type:ident, $int_type:ident, $align:expr) => {
#[repr(C, align($align))]
pub(crate) struct $atomic_type {
v: UnsafeCell<$int_type>,
}
impl $atomic_type {
const LEN: usize = mem::size_of::<$int_type>() / mem::size_of::<Chunk>();
#[inline]
unsafe fn chunks(&self) -> &[AtomicChunk; Self::LEN] {
static_assert!($atomic_type::LEN > 1);
static_assert!(mem::size_of::<$int_type>() % mem::size_of::<Chunk>() == 0);
unsafe { &*(self.v.get() as *const $int_type as *const [AtomicChunk; Self::LEN]) }
}
#[cfg(any(test, not(portable_atomic_cmpxchg16b_dynamic)))]
#[inline]
fn optimistic_read(&self) -> $int_type {
let mut dst: [Chunk; Self::LEN] = [0; Self::LEN];
for i in 0..Self::LEN {
unsafe {
dst[i] = self.chunks()[i].load(Ordering::Relaxed);
}
}
unsafe { mem::transmute::<[Chunk; Self::LEN], $int_type>(dst) }
}
#[inline]
fn read(&self, _guard: &SeqLockWriteGuard<'static>) -> $int_type {
unsafe { self.v.get().read() }
}
#[inline]
fn write(&self, val: $int_type, _guard: &SeqLockWriteGuard<'static>) {
let val = unsafe { mem::transmute::<$int_type, [Chunk; Self::LEN]>(val) };
for i in 0..Self::LEN {
unsafe {
self.chunks()[i].store(val[i], Ordering::Relaxed);
}
}
}
}
unsafe impl Sync for $atomic_type {}
#[cfg(any(test, not(portable_atomic_cmpxchg16b_dynamic)))]
no_fetch_ops_impl!($atomic_type, $int_type);
impl $atomic_type {
#[cfg(any(test, not(portable_atomic_cmpxchg16b_dynamic)))]
#[inline]
pub(crate) const fn new(v: $int_type) -> Self {
Self { v: UnsafeCell::new(v) }
}
#[cfg(any(test, not(portable_atomic_cmpxchg16b_dynamic)))]
#[inline]
pub(crate) fn is_lock_free() -> bool {
Self::is_always_lock_free()
}
#[cfg(any(test, not(portable_atomic_cmpxchg16b_dynamic)))]
#[inline]
pub(crate) const fn is_always_lock_free() -> bool {
false
}
#[cfg(any(test, not(portable_atomic_cmpxchg16b_dynamic)))]
#[inline]
pub(crate) fn get_mut(&mut self) -> &mut $int_type {
unsafe { &mut *self.v.get() }
}
#[cfg(any(test, not(portable_atomic_cmpxchg16b_dynamic)))]
#[inline]
pub(crate) fn into_inner(self) -> $int_type {
self.v.into_inner()
}
#[cfg(any(test, not(portable_atomic_cmpxchg16b_dynamic)))]
#[inline]
#[cfg_attr(all(debug_assertions, not(portable_atomic_no_track_caller)), track_caller)]
pub(crate) fn load(&self, order: Ordering) -> $int_type {
crate::utils::assert_load_ordering(order);
let lock = lock(self.v.get() as usize);
if let Some(stamp) = lock.optimistic_read() {
let val = self.optimistic_read();
if lock.validate_read(stamp) {
return val;
}
}
let guard = lock.write();
let val = self.read(&guard);
guard.abort();
val
}
#[cfg(any(test, not(portable_atomic_cmpxchg16b_dynamic)))]
#[inline]
#[cfg_attr(all(debug_assertions, not(portable_atomic_no_track_caller)), track_caller)]
pub(crate) fn store(&self, val: $int_type, order: Ordering) {
crate::utils::assert_store_ordering(order);
let guard = lock(self.v.get() as usize).write();
self.write(val, &guard)
}
#[cfg(any(test, not(portable_atomic_cmpxchg16b_dynamic)))]
#[inline]
pub(crate) fn swap(&self, val: $int_type, _order: Ordering) -> $int_type {
let guard = lock(self.v.get() as usize).write();
let result = self.read(&guard);
self.write(val, &guard);
result
}
#[inline]
#[cfg_attr(all(debug_assertions, not(portable_atomic_no_track_caller)), track_caller)]
pub(crate) fn compare_exchange(
&self,
current: $int_type,
new: $int_type,
success: Ordering,
failure: Ordering,
) -> Result<$int_type, $int_type> {
crate::utils::assert_compare_exchange_ordering(success, failure);
let guard = lock(self.v.get() as usize).write();
let result = self.read(&guard);
if result == current {
self.write(new, &guard);
Ok(result)
} else {
guard.abort();
Err(result)
}
}
#[cfg(any(test, not(portable_atomic_cmpxchg16b_dynamic)))]
#[inline]
#[cfg_attr(all(debug_assertions, not(portable_atomic_no_track_caller)), track_caller)]
pub(crate) fn compare_exchange_weak(
&self,
current: $int_type,
new: $int_type,
success: Ordering,
failure: Ordering,
) -> Result<$int_type, $int_type> {
self.compare_exchange(current, new, success, failure)
}
#[cfg(any(test, not(portable_atomic_cmpxchg16b_dynamic)))]
#[inline]
pub(crate) fn fetch_add(&self, val: $int_type, _order: Ordering) -> $int_type {
let guard = lock(self.v.get() as usize).write();
let result = self.read(&guard);
self.write(result.wrapping_add(val), &guard);
result
}
#[cfg(any(test, not(portable_atomic_cmpxchg16b_dynamic)))]
#[inline]
pub(crate) fn fetch_sub(&self, val: $int_type, _order: Ordering) -> $int_type {
let guard = lock(self.v.get() as usize).write();
let result = self.read(&guard);
self.write(result.wrapping_sub(val), &guard);
result
}
#[cfg(any(test, not(portable_atomic_cmpxchg16b_dynamic)))]
#[inline]
pub(crate) fn fetch_and(&self, val: $int_type, _order: Ordering) -> $int_type {
let guard = lock(self.v.get() as usize).write();
let result = self.read(&guard);
self.write(result & val, &guard);
result
}
#[cfg(any(test, not(portable_atomic_cmpxchg16b_dynamic)))]
#[inline]
pub(crate) fn fetch_nand(&self, val: $int_type, _order: Ordering) -> $int_type {
let guard = lock(self.v.get() as usize).write();
let result = self.read(&guard);
self.write(!(result & val), &guard);
result
}
#[cfg(any(test, not(portable_atomic_cmpxchg16b_dynamic)))]
#[inline]
pub(crate) fn fetch_or(&self, val: $int_type, _order: Ordering) -> $int_type {
let guard = lock(self.v.get() as usize).write();
let result = self.read(&guard);
self.write(result | val, &guard);
result
}
#[cfg(any(test, not(portable_atomic_cmpxchg16b_dynamic)))]
#[inline]
pub(crate) fn fetch_xor(&self, val: $int_type, _order: Ordering) -> $int_type {
let guard = lock(self.v.get() as usize).write();
let result = self.read(&guard);
self.write(result ^ val, &guard);
result
}
#[cfg(any(test, not(portable_atomic_cmpxchg16b_dynamic)))]
#[inline]
pub(crate) fn fetch_max(&self, val: $int_type, _order: Ordering) -> $int_type {
let guard = lock(self.v.get() as usize).write();
let result = self.read(&guard);
self.write(core::cmp::max(result, val), &guard);
result
}
#[cfg(any(test, not(portable_atomic_cmpxchg16b_dynamic)))]
#[inline]
pub(crate) fn fetch_min(&self, val: $int_type, _order: Ordering) -> $int_type {
let guard = lock(self.v.get() as usize).write();
let result = self.read(&guard);
self.write(core::cmp::min(result, val), &guard);
result
}
#[cfg(any(test, not(portable_atomic_cmpxchg16b_dynamic)))]
#[inline]
pub(crate) fn fetch_not(&self, _order: Ordering) -> $int_type {
let guard = lock(self.v.get() as usize).write();
let result = self.read(&guard);
self.write(!result, &guard);
result
}
#[cfg(any(test, not(portable_atomic_cmpxchg16b_dynamic)))]
#[inline]
pub(crate) fn not(&self, order: Ordering) {
self.fetch_not(order);
}
}
};
(int, $atomic_type:ident, $int_type:ident, $align:expr) => {
atomic!(uint, $atomic_type, $int_type, $align);
impl $atomic_type {
#[cfg(any(test, not(portable_atomic_cmpxchg16b_dynamic)))]
#[inline]
pub(crate) fn fetch_neg(&self, _order: Ordering) -> $int_type {
let guard = lock(self.v.get() as usize).write();
let result = self.read(&guard);
self.write(result.wrapping_neg(), &guard);
result
}
#[cfg(any(test, not(portable_atomic_cmpxchg16b_dynamic)))]
#[inline]
pub(crate) fn neg(&self, order: Ordering) {
self.fetch_neg(order);
}
}
};
}
#[cfg(any(target_pointer_width = "16", target_pointer_width = "32"))]
#[cfg_attr(portable_atomic_no_cfg_target_has_atomic, cfg(any(test, portable_atomic_no_atomic_64)))]
#[cfg_attr(
not(portable_atomic_no_cfg_target_has_atomic),
cfg(any(test, not(target_has_atomic = "64")))
)]
atomic!(int, AtomicI64, i64, 8);
#[cfg(any(target_pointer_width = "16", target_pointer_width = "32"))]
#[cfg_attr(portable_atomic_no_cfg_target_has_atomic, cfg(any(test, portable_atomic_no_atomic_64)))]
#[cfg_attr(
not(portable_atomic_no_cfg_target_has_atomic),
cfg(any(test, not(target_has_atomic = "64")))
)]
atomic!(uint, AtomicU64, u64, 8);
#[cfg(any(test, not(portable_atomic_cmpxchg16b_dynamic)))]
atomic!(int, AtomicI128, i128, 16);
atomic!(uint, AtomicU128, u128, 16);
#[cfg(test)]
mod tests {
use super::*;
#[cfg(any(target_pointer_width = "16", target_pointer_width = "32"))]
test_atomic_int!(i64);
#[cfg(any(target_pointer_width = "16", target_pointer_width = "32"))]
test_atomic_int!(u64);
test_atomic_int!(i128);
test_atomic_int!(u128);
}