use std::sync::atomic::Ordering;
pub unsafe trait AtomicInteger: Sync + Send {
type V;
fn new(v: Self::V) -> Self;
fn load(&self, order: Ordering) -> Self::V;
fn store(&self, val: Self::V, order: Ordering);
}
macro_rules! impl_atomic_integer_ops {
($T:path, $V:ty) => {
unsafe impl AtomicInteger for $T {
type V = $V;
fn new(v: Self::V) -> Self {
Self::new(v)
}
fn load(&self, order: Ordering) -> Self::V {
self.load(order)
}
fn store(&self, val: Self::V, order: Ordering) {
self.store(val, order)
}
}
};
}
impl_atomic_integer_ops!(std::sync::atomic::AtomicI8, i8);
impl_atomic_integer_ops!(std::sync::atomic::AtomicI16, i16);
impl_atomic_integer_ops!(std::sync::atomic::AtomicI32, i32);
#[cfg(any(
target_arch = "x86_64",
target_arch = "aarch64",
target_arch = "powerpc64",
target_arch = "s390x",
target_arch = "riscv64"
))]
impl_atomic_integer_ops!(std::sync::atomic::AtomicI64, i64);
impl_atomic_integer_ops!(std::sync::atomic::AtomicU8, u8);
impl_atomic_integer_ops!(std::sync::atomic::AtomicU16, u16);
impl_atomic_integer_ops!(std::sync::atomic::AtomicU32, u32);
#[cfg(any(
target_arch = "x86_64",
target_arch = "aarch64",
target_arch = "powerpc64",
target_arch = "s390x",
target_arch = "riscv64"
))]
impl_atomic_integer_ops!(std::sync::atomic::AtomicU64, u64);
impl_atomic_integer_ops!(std::sync::atomic::AtomicIsize, isize);
impl_atomic_integer_ops!(std::sync::atomic::AtomicUsize, usize);
#[cfg(test)]
mod tests {
use super::*;
use std::fmt::Debug;
use std::sync::atomic::AtomicU32;
fn check_atomic_integer_ops<A: AtomicInteger>()
where
A::V: Copy + Debug + From<u8> + PartialEq,
{
let v = A::V::from(0);
let a = A::new(v);
assert_eq!(a.load(Ordering::Relaxed), v);
let v2 = A::V::from(100);
a.store(v2, Ordering::Relaxed);
assert_eq!(a.load(Ordering::Relaxed), v2);
}
#[test]
fn test_atomic_integer_ops() {
check_atomic_integer_ops::<AtomicU32>()
}
}