use core::ops;
#[cfg_attr(
any(
target_arch = "x86_64",
target_arch = "aarch64",
target_arch = "arm64ec",
target_arch = "powerpc64",
),
repr(align(128))
)]
#[cfg_attr(
any(
target_arch = "arm",
target_arch = "mips",
target_arch = "mips32r6",
target_arch = "mips64",
target_arch = "mips64r6",
target_arch = "sparc",
target_arch = "hexagon",
),
repr(align(32))
)]
#[cfg_attr(target_arch = "m68k", repr(align(16)))]
#[cfg_attr(target_arch = "s390x", repr(align(256)))]
#[cfg_attr(
not(any(
target_arch = "x86_64",
target_arch = "aarch64",
target_arch = "arm64ec",
target_arch = "powerpc64",
target_arch = "arm",
target_arch = "mips",
target_arch = "mips32r6",
target_arch = "mips64",
target_arch = "mips64r6",
target_arch = "sparc",
target_arch = "hexagon",
target_arch = "m68k",
target_arch = "s390x",
)),
repr(align(64))
)]
pub(crate) struct CachePadded<T> {
value: T,
}
impl<T> CachePadded<T> {
#[inline]
pub(crate) const fn new(value: T) -> Self {
Self { value }
}
}
impl<T> ops::Deref for CachePadded<T> {
type Target = T;
#[inline]
fn deref(&self) -> &Self::Target {
&self.value
}
}
pub(crate) struct Backoff {
step: u32,
}
const SPIN_LIMIT: u32 = 4;
impl Backoff {
#[inline]
pub(crate) const fn new() -> Self {
Self { step: 0 }
}
#[inline]
pub(crate) fn snooze(&mut self) {
if self.step <= SPIN_LIMIT {
for _ in 0..1 << self.step {
#[allow(deprecated)]
core::sync::atomic::spin_loop_hint();
}
self.step += 1;
} else {
#[cfg(not(feature = "std"))]
for _ in 0..1 << self.step {
#[allow(deprecated)]
core::sync::atomic::spin_loop_hint();
}
#[cfg(feature = "std")]
std::thread::yield_now();
}
}
}
#[inline]
pub(crate) fn sc_fence() {
cfg_sel!({
#[cfg(all(
any(target_arch = "x86", target_arch = "x86_64"),
not(any(miri, portable_atomic_sanitize_thread)),
any(not(portable_atomic_no_asm), portable_atomic_unstable_asm),
))]
{
crate::imp::x86::sc_fence();
}
#[cfg(else)]
{
core::sync::atomic::fence(core::sync::atomic::Ordering::SeqCst);
}
});
}