include!("macros.rs");
#[cfg(not(any(target_feature = "cmpxchg16b", portable_atomic_target_feature = "cmpxchg16b")))]
#[path = "../fallback/outline_atomics.rs"]
mod fallback;
#[cfg(not(portable_atomic_no_outline_atomics))]
#[cfg(not(target_env = "sgx"))]
#[cfg_attr(
not(target_feature = "sse"),
cfg(not(any(target_feature = "cmpxchg16b", portable_atomic_target_feature = "cmpxchg16b")))
)]
#[cfg(any(
test,
not(all(
any(target_feature = "cmpxchg16b", portable_atomic_target_feature = "cmpxchg16b"),
target_feature = "avx",
)),
))]
#[path = "../detect/x86_64.rs"]
mod detect;
#[cfg(not(portable_atomic_no_asm))]
use core::arch::asm;
use core::sync::atomic::Ordering;
use crate::utils::{Pair, U128};
macro_rules! debug_assert_cmpxchg16b {
() => {
#[cfg(not(any(
target_feature = "cmpxchg16b",
portable_atomic_target_feature = "cmpxchg16b",
)))]
{
debug_assert!(detect::detect().cmpxchg16b());
}
};
}
#[cfg(target_feature = "sse")]
#[cfg(not(all(
not(target_feature = "avx"),
any(portable_atomic_no_outline_atomics, target_env = "sgx", not(target_feature = "sse")),
)))]
macro_rules! debug_assert_cmpxchg16b_avx {
() => {{
debug_assert_cmpxchg16b!();
#[cfg(not(target_feature = "avx"))]
{
debug_assert!(detect::detect().avx());
}
}};
}
#[cfg(target_feature = "sse")]
#[cfg(not(all(
not(target_feature = "avx"),
any(portable_atomic_no_outline_atomics, target_env = "sgx", not(target_feature = "sse")),
)))]
#[cfg(target_pointer_width = "32")]
macro_rules! ptr_modifier {
() => {
":e"
};
}
#[cfg(target_feature = "sse")]
#[cfg(not(all(
not(target_feature = "avx"),
any(portable_atomic_no_outline_atomics, target_env = "sgx", not(target_feature = "sse")),
)))]
#[cfg(target_pointer_width = "64")]
macro_rules! ptr_modifier {
() => {
""
};
}
#[cfg_attr(
not(portable_atomic_no_cmpxchg16b_target_feature),
target_feature(enable = "cmpxchg16b")
)]
#[inline]
unsafe fn cmpxchg16b(dst: *mut u128, old: u128, new: u128) -> (u128, bool) {
debug_assert!(dst as usize % 16 == 0);
debug_assert_cmpxchg16b!();
unsafe {
let r: u8;
let old = U128 { whole: old };
let new = U128 { whole: new };
let (prev_lo, prev_hi);
macro_rules! cmpxchg16b {
($dst:tt) => {
asm!(
"xchg r8, rbx", concat!("lock cmpxchg16b xmmword ptr [", $dst, "]"),
"setne cl",
"mov rbx, r8", inout("r8") new.pair.lo => _,
in("rcx") new.pair.hi,
inout("rax") old.pair.lo => prev_lo,
inout("rdx") old.pair.hi => prev_hi,
in($dst) dst,
lateout("cl") r,
options(nostack),
)
};
}
#[cfg(not(windows))]
#[cfg(target_pointer_width = "32")]
cmpxchg16b!("esi");
#[cfg(not(windows))]
#[cfg(target_pointer_width = "64")]
cmpxchg16b!("rsi");
#[cfg(windows)]
#[cfg(target_pointer_width = "32")]
cmpxchg16b!("r11d");
#[cfg(windows)]
#[cfg(target_pointer_width = "64")]
cmpxchg16b!("r11");
crate::utils::assert_unchecked(r == 0 || r == 1); (U128 { pair: Pair { lo: prev_lo, hi: prev_hi } }.whole, r == 0)
}
}
#[cfg(target_feature = "sse")]
#[cfg(not(all(
not(target_feature = "avx"),
any(portable_atomic_no_outline_atomics, target_env = "sgx", not(target_feature = "sse")),
)))]
#[target_feature(enable = "avx")]
#[inline]
unsafe fn _atomic_load_vmovdqa(src: *mut u128) -> u128 {
debug_assert!(src as usize % 16 == 0);
debug_assert_cmpxchg16b_avx!();
unsafe {
let out: core::arch::x86_64::__m128i;
asm!(
concat!("vmovdqa {out}, xmmword ptr [{src", ptr_modifier!(), "}]"),
src = in(reg) src,
out = out(xmm_reg) out,
options(nostack, preserves_flags),
);
core::mem::transmute(out)
}
}
#[cfg(target_feature = "sse")]
#[cfg(not(all(
not(target_feature = "avx"),
any(portable_atomic_no_outline_atomics, target_env = "sgx", not(target_feature = "sse")),
)))]
#[target_feature(enable = "avx")]
#[inline]
unsafe fn _atomic_store_vmovdqa(dst: *mut u128, val: u128, order: Ordering) {
debug_assert!(dst as usize % 16 == 0);
debug_assert_cmpxchg16b_avx!();
unsafe {
let val: core::arch::x86_64::__m128i = core::mem::transmute(val);
match order {
Ordering::Relaxed | Ordering::Release => {
asm!(
concat!("vmovdqa xmmword ptr [{dst", ptr_modifier!(), "}], {val}"),
dst = in(reg) dst,
val = in(xmm_reg) val,
options(nostack, preserves_flags),
);
}
Ordering::SeqCst => {
let p = core::cell::UnsafeCell::new(core::mem::MaybeUninit::<u64>::uninit());
asm!(
concat!("vmovdqa xmmword ptr [{dst", ptr_modifier!(), "}], {val}"),
concat!("xchg qword ptr [{p", ptr_modifier!(), "}], {tmp}"),
dst = in(reg) dst,
val = in(xmm_reg) val,
p = in(reg) p.get(),
tmp = out(reg) _,
options(nostack, preserves_flags),
);
}
_ => unreachable!(),
}
}
}
#[cfg(not(all(
any(target_feature = "cmpxchg16b", portable_atomic_target_feature = "cmpxchg16b"),
target_feature = "avx",
)))]
#[cfg(not(all(
any(target_feature = "cmpxchg16b", portable_atomic_target_feature = "cmpxchg16b"),
any(portable_atomic_no_outline_atomics, target_env = "sgx", not(target_feature = "sse")),
)))]
macro_rules! load_store_detect {
(
vmovdqa = $vmovdqa:ident
cmpxchg16b = $cmpxchg16b:ident
fallback = $fallback:ident
) => {{
let cpuid = detect::detect();
#[cfg(not(any(
target_feature = "cmpxchg16b",
portable_atomic_target_feature = "cmpxchg16b",
)))]
{
if cpuid.cmpxchg16b() {
#[cfg(target_feature = "avx")]
{
$vmovdqa
}
#[cfg(not(target_feature = "avx"))]
{
#[cfg(target_feature = "sse")]
{
if cpuid.avx() { $vmovdqa } else { $cmpxchg16b }
}
#[cfg(not(target_feature = "sse"))]
{
$cmpxchg16b
}
}
} else {
fallback::$fallback
}
}
#[cfg(any(target_feature = "cmpxchg16b", portable_atomic_target_feature = "cmpxchg16b"))]
{
if cpuid.avx() { $vmovdqa } else { $cmpxchg16b }
}
}};
}
#[inline]
unsafe fn atomic_load(src: *mut u128, _order: Ordering) -> u128 {
#[cfg(all(
any(target_feature = "cmpxchg16b", portable_atomic_target_feature = "cmpxchg16b"),
target_feature = "avx",
))]
unsafe {
_atomic_load_vmovdqa(src)
}
#[cfg(not(all(
any(target_feature = "cmpxchg16b", portable_atomic_target_feature = "cmpxchg16b"),
target_feature = "avx",
)))]
{
#[cfg(all(
any(target_feature = "cmpxchg16b", portable_atomic_target_feature = "cmpxchg16b"),
any(
portable_atomic_no_outline_atomics,
target_env = "sgx",
not(target_feature = "sse")
),
))]
unsafe {
_atomic_load_cmpxchg16b(src)
}
#[cfg(not(all(
any(target_feature = "cmpxchg16b", portable_atomic_target_feature = "cmpxchg16b"),
any(
portable_atomic_no_outline_atomics,
target_env = "sgx",
not(target_feature = "sse")
),
)))]
unsafe {
ifunc!(unsafe fn(src: *mut u128) -> u128 {
load_store_detect! {
vmovdqa = _atomic_load_vmovdqa
cmpxchg16b = _atomic_load_cmpxchg16b
fallback = atomic_load_seqcst
}
})
}
}
}
#[cfg_attr(
not(portable_atomic_no_cmpxchg16b_target_feature),
target_feature(enable = "cmpxchg16b")
)]
#[inline]
unsafe fn _atomic_load_cmpxchg16b(src: *mut u128) -> u128 {
debug_assert!(src as usize % 16 == 0);
debug_assert_cmpxchg16b!();
unsafe {
let (out_lo, out_hi);
macro_rules! cmpxchg16b {
($dst:tt, $save:tt) => {
asm!(
concat!("mov ", $save, ", rbx"), "xor rbx, rbx", concat!("lock cmpxchg16b xmmword ptr [", $dst, "]"),
concat!("mov rbx, ", $save), out($save) _,
in("rcx") 0_u64,
inout("rax") 0_u64 => out_lo,
inout("rdx") 0_u64 => out_hi,
in($dst) src,
options(nostack),
)
};
}
#[cfg(not(windows))]
#[cfg(target_pointer_width = "32")]
cmpxchg16b!("edi", "rsi");
#[cfg(not(windows))]
#[cfg(target_pointer_width = "64")]
cmpxchg16b!("rdi", "rsi");
#[cfg(windows)]
#[cfg(target_pointer_width = "32")]
cmpxchg16b!("r9d", "r8");
#[cfg(windows)]
#[cfg(target_pointer_width = "64")]
cmpxchg16b!("r9", "r8");
U128 { pair: Pair { lo: out_lo, hi: out_hi } }.whole
}
}
#[inline]
unsafe fn atomic_store(dst: *mut u128, val: u128, order: Ordering) {
#[cfg(all(
any(target_feature = "cmpxchg16b", portable_atomic_target_feature = "cmpxchg16b"),
target_feature = "avx",
))]
unsafe {
_atomic_store_vmovdqa(dst, val, order);
}
#[cfg(not(all(
any(target_feature = "cmpxchg16b", portable_atomic_target_feature = "cmpxchg16b"),
target_feature = "avx",
)))]
{
#[cfg(all(
any(target_feature = "cmpxchg16b", portable_atomic_target_feature = "cmpxchg16b"),
any(
portable_atomic_no_outline_atomics,
target_env = "sgx",
not(target_feature = "sse")
),
))]
unsafe {
let _ = order;
_atomic_store_cmpxchg16b(dst, val);
}
#[cfg(not(all(
any(target_feature = "cmpxchg16b", portable_atomic_target_feature = "cmpxchg16b"),
any(
portable_atomic_no_outline_atomics,
target_env = "sgx",
not(target_feature = "sse")
),
)))]
unsafe {
#[cfg(target_feature = "sse")]
fn_alias! {
#[target_feature(enable = "avx")]
unsafe fn(dst: *mut u128, val: u128);
_atomic_store_vmovdqa_non_seqcst = _atomic_store_vmovdqa(Ordering::Release);
_atomic_store_vmovdqa_seqcst = _atomic_store_vmovdqa(Ordering::SeqCst);
}
match order {
Ordering::Relaxed | Ordering::Release => {
ifunc!(unsafe fn(dst: *mut u128, val: u128) {
load_store_detect! {
vmovdqa = _atomic_store_vmovdqa_non_seqcst
cmpxchg16b = _atomic_store_cmpxchg16b
fallback = atomic_store_non_seqcst
}
});
}
Ordering::SeqCst => {
ifunc!(unsafe fn(dst: *mut u128, val: u128) {
load_store_detect! {
vmovdqa = _atomic_store_vmovdqa_seqcst
cmpxchg16b = _atomic_store_cmpxchg16b
fallback = atomic_store_seqcst
}
});
}
_ => unreachable!(),
}
}
}
}
#[cfg_attr(
not(portable_atomic_no_cmpxchg16b_target_feature),
target_feature(enable = "cmpxchg16b")
)]
#[inline]
unsafe fn _atomic_store_cmpxchg16b(dst: *mut u128, val: u128) {
unsafe {
atomic_swap_cmpxchg16b(dst, val, Ordering::SeqCst);
}
}
#[inline]
unsafe fn atomic_compare_exchange(
dst: *mut u128,
old: u128,
new: u128,
_success: Ordering,
_failure: Ordering,
) -> Result<u128, u128> {
#[cfg(any(target_feature = "cmpxchg16b", portable_atomic_target_feature = "cmpxchg16b"))]
let (prev, ok) = unsafe { cmpxchg16b(dst, old, new) };
#[cfg(not(any(target_feature = "cmpxchg16b", portable_atomic_target_feature = "cmpxchg16b")))]
let (prev, ok) = unsafe {
ifunc!(unsafe fn(dst: *mut u128, old: u128, new: u128) -> (u128, bool) {
if detect::detect().cmpxchg16b() {
cmpxchg16b
} else {
fallback::atomic_compare_exchange_seqcst
}
})
};
if ok { Ok(prev) } else { Err(prev) }
}
use self::atomic_compare_exchange as atomic_compare_exchange_weak;
#[cfg_attr(
not(portable_atomic_no_cmpxchg16b_target_feature),
target_feature(enable = "cmpxchg16b")
)]
#[inline]
unsafe fn atomic_swap_cmpxchg16b(dst: *mut u128, val: u128, _order: Ordering) -> u128 {
debug_assert!(dst as usize % 16 == 0);
debug_assert_cmpxchg16b!();
unsafe {
let val = U128 { whole: val };
let (mut prev_lo, mut prev_hi);
macro_rules! cmpxchg16b {
($dst:tt, $save:tt) => {
asm!(
concat!("xchg ", $save, ", rbx"), concat!("mov rax, qword ptr [", $dst, "]"),
concat!("mov rdx, qword ptr [", $dst, " + 8]"),
"2:",
concat!("lock cmpxchg16b xmmword ptr [", $dst, "]"),
"jne 2b",
concat!("mov rbx, ", $save), inout($save) val.pair.lo => _,
in("rcx") val.pair.hi,
out("rax") prev_lo,
out("rdx") prev_hi,
in($dst) dst,
options(nostack),
)
};
}
#[cfg(not(windows))]
#[cfg(target_pointer_width = "32")]
cmpxchg16b!("edi", "rsi");
#[cfg(not(windows))]
#[cfg(target_pointer_width = "64")]
cmpxchg16b!("rdi", "rsi");
#[cfg(windows)]
#[cfg(target_pointer_width = "32")]
cmpxchg16b!("r9d", "r8");
#[cfg(windows)]
#[cfg(target_pointer_width = "64")]
cmpxchg16b!("r9", "r8");
U128 { pair: Pair { lo: prev_lo, hi: prev_hi } }.whole
}
}
macro_rules! atomic_rmw_cas_3 {
($name:ident, $($op:tt)*) => {
#[cfg_attr(
not(portable_atomic_no_cmpxchg16b_target_feature),
target_feature(enable = "cmpxchg16b")
)]
#[inline]
unsafe fn $name(dst: *mut u128, val: u128, _order: Ordering) -> u128 {
debug_assert!(dst as usize % 16 == 0);
debug_assert_cmpxchg16b!();
unsafe {
let val = U128 { whole: val };
let (mut prev_lo, mut prev_hi);
macro_rules! cmpxchg16b {
($dst:tt, $save:tt) => {
asm!(
concat!("mov ", $save, ", rbx"), concat!("mov rax, qword ptr [", $dst, "]"),
concat!("mov rdx, qword ptr [", $dst, " + 8]"),
"2:",
$($op)*
concat!("lock cmpxchg16b xmmword ptr [", $dst, "]"),
"jne 2b",
concat!("mov rbx, ", $save), out($save) _,
out("rcx") _,
out("rax") prev_lo,
out("rdx") prev_hi,
in($dst) dst,
in("r8") val.pair.lo,
in("r9") val.pair.hi,
options(nostack),
)
};
}
#[cfg(not(windows))]
#[cfg(target_pointer_width = "32")]
cmpxchg16b!("edi", "r10");
#[cfg(not(windows))]
#[cfg(target_pointer_width = "64")]
cmpxchg16b!("rdi", "r10");
#[cfg(windows)]
#[cfg(target_pointer_width = "32")]
cmpxchg16b!("r10d", "r11");
#[cfg(windows)]
#[cfg(target_pointer_width = "64")]
cmpxchg16b!("r10", "r11");
U128 { pair: Pair { lo: prev_lo, hi: prev_hi } }.whole
}
}
};
}
macro_rules! atomic_rmw_cas_2 {
($name:ident, $($op:tt)*) => {
#[cfg_attr(
not(portable_atomic_no_cmpxchg16b_target_feature),
target_feature(enable = "cmpxchg16b")
)]
#[inline]
unsafe fn $name(dst: *mut u128, _order: Ordering) -> u128 {
debug_assert!(dst as usize % 16 == 0);
debug_assert_cmpxchg16b!();
unsafe {
let (mut prev_lo, mut prev_hi);
macro_rules! cmpxchg16b {
($dst:tt, $save:tt) => {
asm!(
concat!("mov ", $save, ", rbx"), concat!("mov rax, qword ptr [", $dst, "]"),
concat!("mov rdx, qword ptr [", $dst, " + 8]"),
"2:",
$($op)*
concat!("lock cmpxchg16b xmmword ptr [", $dst, "]"),
"jne 2b",
concat!("mov rbx, ", $save), out($save) _,
out("rcx") _,
out("rax") prev_lo,
out("rdx") prev_hi,
in($dst) dst,
options(nostack),
)
};
}
#[cfg(not(windows))]
#[cfg(target_pointer_width = "32")]
cmpxchg16b!("edi", "rsi");
#[cfg(not(windows))]
#[cfg(target_pointer_width = "64")]
cmpxchg16b!("rdi", "rsi");
#[cfg(windows)]
#[cfg(target_pointer_width = "32")]
cmpxchg16b!("r9d", "r8");
#[cfg(windows)]
#[cfg(target_pointer_width = "64")]
cmpxchg16b!("r9", "r8");
U128 { pair: Pair { lo: prev_lo, hi: prev_hi } }.whole
}
}
};
}
atomic_rmw_cas_3! {
atomic_add_cmpxchg16b,
"mov rbx, rax",
"add rbx, r8",
"mov rcx, rdx",
"adc rcx, r9",
}
atomic_rmw_cas_3! {
atomic_sub_cmpxchg16b,
"mov rbx, rax",
"sub rbx, r8",
"mov rcx, rdx",
"sbb rcx, r9",
}
atomic_rmw_cas_3! {
atomic_and_cmpxchg16b,
"mov rbx, rax",
"and rbx, r8",
"mov rcx, rdx",
"and rcx, r9",
}
atomic_rmw_cas_3! {
atomic_nand_cmpxchg16b,
"mov rbx, rax",
"and rbx, r8",
"not rbx",
"mov rcx, rdx",
"and rcx, r9",
"not rcx",
}
atomic_rmw_cas_3! {
atomic_or_cmpxchg16b,
"mov rbx, rax",
"or rbx, r8",
"mov rcx, rdx",
"or rcx, r9",
}
atomic_rmw_cas_3! {
atomic_xor_cmpxchg16b,
"mov rbx, rax",
"xor rbx, r8",
"mov rcx, rdx",
"xor rcx, r9",
}
atomic_rmw_cas_2! {
atomic_not_cmpxchg16b,
"mov rbx, rax",
"not rbx",
"mov rcx, rdx",
"not rcx",
}
atomic_rmw_cas_2! {
atomic_neg_cmpxchg16b,
"mov rbx, rax",
"neg rbx",
"mov rcx, 0",
"sbb rcx, rdx",
}
atomic_rmw_cas_3! {
atomic_max_cmpxchg16b,
"cmp r8, rax",
"mov rcx, r9",
"sbb rcx, rdx",
"mov rcx, r9",
"cmovl rcx, rdx",
"mov rbx, r8",
"cmovl rbx, rax",
}
atomic_rmw_cas_3! {
atomic_umax_cmpxchg16b,
"cmp r8, rax",
"mov rcx, r9",
"sbb rcx, rdx",
"mov rcx, r9",
"cmovb rcx, rdx",
"mov rbx, r8",
"cmovb rbx, rax",
}
atomic_rmw_cas_3! {
atomic_min_cmpxchg16b,
"cmp r8, rax",
"mov rcx, r9",
"sbb rcx, rdx",
"mov rcx, r9",
"cmovge rcx, rdx",
"mov rbx, r8",
"cmovge rbx, rax",
}
atomic_rmw_cas_3! {
atomic_umin_cmpxchg16b,
"cmp r8, rax",
"mov rcx, r9",
"sbb rcx, rdx",
"mov rcx, r9",
"cmovae rcx, rdx",
"mov rbx, r8",
"cmovae rbx, rax",
}
macro_rules! select_atomic_rmw {
(
unsafe fn $name:ident($($arg:tt)*) $(-> $ret_ty:ty)?;
cmpxchg16b = $cmpxchg16b_fn:ident;
fallback = $seqcst_fallback_fn:ident;
) => {
#[cfg(any(target_feature = "cmpxchg16b", portable_atomic_target_feature = "cmpxchg16b"))]
use self::$cmpxchg16b_fn as $name;
#[cfg(not(any(
target_feature = "cmpxchg16b",
portable_atomic_target_feature = "cmpxchg16b",
)))]
#[inline]
unsafe fn $name($($arg)*, _order: Ordering) $(-> $ret_ty)? {
fn_alias! {
#[cfg_attr(
not(portable_atomic_no_cmpxchg16b_target_feature),
target_feature(enable = "cmpxchg16b")
)]
unsafe fn($($arg)*) $(-> $ret_ty)?;
cmpxchg16b_seqcst_fn = $cmpxchg16b_fn(Ordering::SeqCst);
}
unsafe {
ifunc!(unsafe fn($($arg)*) $(-> $ret_ty)? {
if detect::detect().cmpxchg16b() {
cmpxchg16b_seqcst_fn
} else {
fallback::$seqcst_fallback_fn
}
})
}
}
};
}
select_atomic_rmw! {
unsafe fn atomic_swap(dst: *mut u128, val: u128) -> u128;
cmpxchg16b = atomic_swap_cmpxchg16b;
fallback = atomic_swap_seqcst;
}
select_atomic_rmw! {
unsafe fn atomic_add(dst: *mut u128, val: u128) -> u128;
cmpxchg16b = atomic_add_cmpxchg16b;
fallback = atomic_add_seqcst;
}
select_atomic_rmw! {
unsafe fn atomic_sub(dst: *mut u128, val: u128) -> u128;
cmpxchg16b = atomic_sub_cmpxchg16b;
fallback = atomic_sub_seqcst;
}
select_atomic_rmw! {
unsafe fn atomic_and(dst: *mut u128, val: u128) -> u128;
cmpxchg16b = atomic_and_cmpxchg16b;
fallback = atomic_and_seqcst;
}
select_atomic_rmw! {
unsafe fn atomic_nand(dst: *mut u128, val: u128) -> u128;
cmpxchg16b = atomic_nand_cmpxchg16b;
fallback = atomic_nand_seqcst;
}
select_atomic_rmw! {
unsafe fn atomic_or(dst: *mut u128, val: u128) -> u128;
cmpxchg16b = atomic_or_cmpxchg16b;
fallback = atomic_or_seqcst;
}
select_atomic_rmw! {
unsafe fn atomic_xor(dst: *mut u128, val: u128) -> u128;
cmpxchg16b = atomic_xor_cmpxchg16b;
fallback = atomic_xor_seqcst;
}
select_atomic_rmw! {
unsafe fn atomic_max(dst: *mut u128, val: u128) -> u128;
cmpxchg16b = atomic_max_cmpxchg16b;
fallback = atomic_max_seqcst;
}
select_atomic_rmw! {
unsafe fn atomic_umax(dst: *mut u128, val: u128) -> u128;
cmpxchg16b = atomic_umax_cmpxchg16b;
fallback = atomic_umax_seqcst;
}
select_atomic_rmw! {
unsafe fn atomic_min(dst: *mut u128, val: u128) -> u128;
cmpxchg16b = atomic_min_cmpxchg16b;
fallback = atomic_min_seqcst;
}
select_atomic_rmw! {
unsafe fn atomic_umin(dst: *mut u128, val: u128) -> u128;
cmpxchg16b = atomic_umin_cmpxchg16b;
fallback = atomic_umin_seqcst;
}
select_atomic_rmw! {
unsafe fn atomic_not(dst: *mut u128) -> u128;
cmpxchg16b = atomic_not_cmpxchg16b;
fallback = atomic_not_seqcst;
}
select_atomic_rmw! {
unsafe fn atomic_neg(dst: *mut u128) -> u128;
cmpxchg16b = atomic_neg_cmpxchg16b;
fallback = atomic_neg_seqcst;
}
#[inline]
fn is_lock_free() -> bool {
#[cfg(any(target_feature = "cmpxchg16b", portable_atomic_target_feature = "cmpxchg16b"))]
{
true
}
#[cfg(not(any(target_feature = "cmpxchg16b", portable_atomic_target_feature = "cmpxchg16b")))]
{
detect::detect().cmpxchg16b()
}
}
const IS_ALWAYS_LOCK_FREE: bool =
cfg!(any(target_feature = "cmpxchg16b", portable_atomic_target_feature = "cmpxchg16b"));
atomic128!(AtomicI128, i128, atomic_max, atomic_min);
atomic128!(AtomicU128, u128, atomic_umax, atomic_umin);
#[cfg(test)]
mod tests {
use super::*;
test_atomic_int!(i128);
test_atomic_int!(u128);
stress_test!(u128);
}