#[allow(unused_imports)]
pub(crate) use self::generated::RegSize;
#[macro_use]
#[path = "gen/utils.rs"]
mod generated;
use core::{
mem::{self, ManuallyDrop, MaybeUninit},
sync::atomic::Ordering,
};
macro_rules! static_assert {
($(const $consts:ident: $ty:ty),+ => $($tt:tt)*) => {{
const_eval!($(const $consts: $ty),+ => () { assert!($($tt)*) })
}};
($($ty_params:ident $(: ?Sized $(+ $bounds:path)?)?),+ => $($tt:tt)*) => {{
const_eval!($($ty_params $(: ?Sized $(+ $bounds)?)?),+ => () { assert!($($tt)*) })
}};
($($ty_params:ident $(: $bounds:path)?),+ => $($tt:tt)*) => {{
const_eval!($($ty_params $(: $bounds)?),+ => () { assert!($($tt)*) })
}};
}
macro_rules! const_eval {
($(const $const_params:ident: $ty:ty),+ => $ret:ty { $($tt:tt)* }) => {{
struct _Tmp<$(const $const_params: $ty,)*>;
impl<$(const $const_params: $ty,)*> _Tmp<$($const_params,)*> {
const _VAL: $ret = { $($tt)* };
}
_Tmp::<$($const_params,)*>::_VAL
}};
($($ty_params:ident $(: ?Sized $(+ $bounds:path)?)?),+ => $ret:ty { $($tt:tt)* }) => {{
struct _Tmp<$($ty_params $(: ?Sized $(+ $bounds)?)?),+>(
$(::core::marker::PhantomData<$ty_params>),+
);
impl<$($ty_params $(: ?Sized $(+ $bounds)?)?),+> _Tmp<$($ty_params,)*> {
const _VAL: $ret = { $($tt)* };
}
_Tmp::<$($ty_params,)*>::_VAL
}};
($($ty_params:ident $(: $bounds:path)?),+ => $ret:ty { $($tt:tt)* }) => {{
struct _Tmp<$($ty_params $(: $bounds)?),+>($(::core::marker::PhantomData<$ty_params>),+);
impl<$($ty_params $(: $bounds)?),+> _Tmp<$($ty_params,)*> {
const _VAL: $ret = { $($tt)* };
}
_Tmp::<$($ty_params,)*>::_VAL
}};
(=> $ret:ty { $($tt:tt)* }) => {{
const _VAL: $ret = { $($tt)* };
_VAL
}};
}
macro_rules! const_fn {
(
const_if: #[cfg($($cfg:tt)+)];
$(#[$($attr:tt)*])*
$vis:vis const $($rest:tt)*
) => {
#[cfg($($cfg)+)]
$(#[$($attr)*])*
$vis const $($rest)*
#[cfg(not($($cfg)+))]
$(#[$($attr)*])*
$vis $($rest)*
};
}
#[allow(unused_macros)]
macro_rules! cfg_sel {
({#[cfg(else)] { $($output:tt)* }}) => {
$($output)*
};
({
#[cfg($cfg:meta)]
{ $($output:tt)* }
$($( $rest:tt )+)?
}) => {
#[cfg($cfg)]
cfg_sel! {{#[cfg(else)] { $($output)* }}}
$(
#[cfg(not($cfg))]
cfg_sel! {{ $($rest)+ }}
)?
};
}
#[allow(unused_macros)]
#[cfg(not(atomic_maybe_uninit_no_outline_atomics))]
#[cfg(all(target_arch = "x86_64", not(target_env = "sgx")))]
macro_rules! ifunc {
(unsafe fn($($arg_pat:ident: $arg_ty:ty),* $(,)?) $(-> $ret_ty:ty)? { $($init_body:tt)* }) => {{
type FnTy = unsafe fn($($arg_ty),*) $(-> $ret_ty)?;
static FUNC: core::sync::atomic::AtomicPtr<()>
= core::sync::atomic::AtomicPtr::new(init as *mut ());
#[cold]
unsafe fn init($($arg_pat: $arg_ty),*) $(-> $ret_ty)? {
let func: FnTy = { $($init_body)* };
FUNC.store(func as *mut (), core::sync::atomic::Ordering::Relaxed);
unsafe { func($($arg_pat),*) }
}
let func = {
core::mem::transmute::<*mut (), FnTy>(FUNC.load(core::sync::atomic::Ordering::Relaxed))
};
func($($arg_pat),*)
}};
}
#[allow(unused_macros)]
#[cfg(not(atomic_maybe_uninit_no_outline_atomics))]
#[cfg(all(target_arch = "x86_64", not(target_env = "sgx")))]
macro_rules! fn_alias {
(
$(#[$($fn_attr:tt)*])*
$vis:vis unsafe fn($($arg_pat:ident: $arg_ty:ty),*) $(-> $ret_ty:ty)?;
$(#[$($alias_attr:tt)*])*
$new:ident = $from:ident($($last_args:tt)*);
$($rest:tt)*
) => {
$(#[$($fn_attr)*])*
$(#[$($alias_attr)*])*
$vis unsafe fn $new($($arg_pat: $arg_ty),*) $(-> $ret_ty)? {
unsafe { $from($($arg_pat,)* $($last_args)*) }
}
fn_alias! {
$(#[$($fn_attr)*])*
$vis unsafe fn($($arg_pat: $arg_ty),*) $(-> $ret_ty)?;
$($rest)*
}
};
(
$(#[$($attr:tt)*])*
$vis:vis unsafe fn($($arg_pat:ident: $arg_ty:ty),*) $(-> $ret_ty:ty)?;
) => {}
}
#[inline]
#[must_use]
pub(crate) const unsafe fn transmute_copy_by_val<Src, Dst>(src: Src) -> Dst {
#[repr(C)]
union ConstHack<Src, Dst> {
src: ManuallyDrop<Src>,
dst: ManuallyDrop<Dst>,
}
static_assert!(Src, Dst => mem::size_of::<Src>() >= mem::size_of::<Dst>()); ManuallyDrop::into_inner(unsafe { ConstHack::<Src, Dst> { src: ManuallyDrop::new(src) }.dst })
}
#[allow(dead_code)]
#[inline(always)]
#[cfg_attr(debug_assertions, track_caller)]
pub(crate) const unsafe fn assert_unchecked(cond: bool) {
if !cond {
unsafe { unreachable_unchecked() }
}
}
#[inline]
#[cfg_attr(debug_assertions, track_caller)]
pub(crate) const unsafe fn unreachable_unchecked() -> ! {
#[cfg(debug_assertions)]
unreachable!();
#[cfg(not(debug_assertions))]
unsafe {
core::hint::unreachable_unchecked()
}
}
#[inline]
#[cfg_attr(debug_assertions, track_caller)]
pub(crate) fn assert_load_ordering(order: Ordering) {
match order {
Ordering::Acquire | Ordering::Relaxed | Ordering::SeqCst => {}
Ordering::Release => panic!("there is no such thing as a release load"),
Ordering::AcqRel => panic!("there is no such thing as an acquire-release load"),
_ => unreachable!(),
}
}
#[inline]
#[cfg_attr(debug_assertions, track_caller)]
pub(crate) fn assert_store_ordering(order: Ordering) {
match order {
Ordering::Release | Ordering::Relaxed | Ordering::SeqCst => {}
Ordering::Acquire => panic!("there is no such thing as an acquire store"),
Ordering::AcqRel => panic!("there is no such thing as an acquire-release store"),
_ => unreachable!(),
}
}
#[inline]
#[cfg_attr(debug_assertions, track_caller)]
pub(crate) fn assert_compare_exchange_ordering(success: Ordering, failure: Ordering) {
match success {
Ordering::AcqRel
| Ordering::Acquire
| Ordering::Relaxed
| Ordering::Release
| Ordering::SeqCst => {}
_ => unreachable!(),
}
match failure {
Ordering::Acquire | Ordering::Relaxed | Ordering::SeqCst => {}
Ordering::Release => panic!("there is no such thing as a release failure ordering"),
Ordering::AcqRel => panic!("there is no such thing as an acquire-release failure ordering"),
_ => unreachable!(),
}
}
#[allow(dead_code)]
#[inline]
pub(crate) fn upgrade_success_ordering(success: Ordering, failure: Ordering) -> Ordering {
match (success, failure) {
(Ordering::Relaxed, Ordering::Acquire) => Ordering::Acquire,
(Ordering::Release, Ordering::Acquire) => Ordering::AcqRel,
(_, Ordering::SeqCst) => Ordering::SeqCst,
_ => success,
}
}
#[allow(unused_macros)]
macro_rules! debug_assert_atomic_unsafe_precondition {
($ptr:ident, $ty:ident) => {{
#[cfg(atomic_maybe_uninit_no_strict_provenance)]
#[allow(unused_imports)]
use crate::utils::ptr::{ConstPtrExt as _, MutPtrExt as _};
#[allow(clippy::arithmetic_side_effects)]
{
debug_assert!($ptr.addr() & const_eval!(=> usize { mem::size_of::<$ty>() - 1 }) == 0);
}
}};
}
#[allow(unused_macros)]
macro_rules! if_any {
("", $($tt:tt)*) => { "" };
($cond:tt, $($tt:tt)*) => {
$($tt)*
};
}
#[allow(unused_macros)]
macro_rules! delegate_load_store {
($ty:ident, $base:ident) => {
const _: () = {
assert!(mem::size_of::<$ty>() == mem::size_of::<$base>());
assert!(mem::align_of::<$ty>() == mem::align_of::<$base>());
};
impl AtomicLoad for $ty {
#[inline]
unsafe fn atomic_load(
src: *const MaybeUninit<Self>,
order: Ordering,
) -> MaybeUninit<Self> {
unsafe {
mem::transmute::<MaybeUninit<$base>, MaybeUninit<Self>>(
<$base as AtomicLoad>::atomic_load(src.cast::<MaybeUninit<$base>>(), order),
)
}
}
}
impl AtomicStore for $ty {
#[inline]
unsafe fn atomic_store(
dst: *mut MaybeUninit<Self>,
val: MaybeUninit<Self>,
order: Ordering,
) {
unsafe {
<$base as AtomicStore>::atomic_store(
dst.cast::<MaybeUninit<$base>>(),
mem::transmute::<MaybeUninit<Self>, MaybeUninit<$base>>(val),
order,
);
}
}
}
};
}
#[allow(unused_macros)]
macro_rules! delegate_swap {
($ty:ident, $base:ident) => {
const _: () = {
assert!(mem::size_of::<$ty>() == mem::size_of::<$base>());
assert!(mem::align_of::<$ty>() == mem::align_of::<$base>());
};
impl AtomicSwap for $ty {
#[inline]
unsafe fn atomic_swap(
dst: *mut MaybeUninit<Self>,
val: MaybeUninit<Self>,
order: Ordering,
) -> MaybeUninit<Self> {
unsafe {
mem::transmute::<MaybeUninit<$base>, MaybeUninit<Self>>(
<$base as AtomicSwap>::atomic_swap(
dst.cast::<MaybeUninit<$base>>(),
mem::transmute::<MaybeUninit<Self>, MaybeUninit<$base>>(val),
order,
),
)
}
}
}
};
}
#[allow(unused_macros)]
macro_rules! delegate_cas {
($ty:ident, $base:ident) => {
const _: () = {
assert!(mem::size_of::<$ty>() == mem::size_of::<$base>());
assert!(mem::align_of::<$ty>() == mem::align_of::<$base>());
};
impl AtomicCompareExchange for $ty {
#[inline]
unsafe fn atomic_compare_exchange(
dst: *mut MaybeUninit<Self>,
current: MaybeUninit<Self>,
new: MaybeUninit<Self>,
success: Ordering,
failure: Ordering,
) -> (MaybeUninit<Self>, bool) {
unsafe {
let (out, ok) = <$base as AtomicCompareExchange>::atomic_compare_exchange(
dst.cast::<MaybeUninit<$base>>(),
mem::transmute::<MaybeUninit<Self>, MaybeUninit<$base>>(current),
mem::transmute::<MaybeUninit<Self>, MaybeUninit<$base>>(new),
success,
failure,
);
(mem::transmute::<MaybeUninit<$base>, MaybeUninit<Self>>(out), ok)
}
}
#[inline]
unsafe fn atomic_compare_exchange_weak(
dst: *mut MaybeUninit<Self>,
current: MaybeUninit<Self>,
new: MaybeUninit<Self>,
success: Ordering,
failure: Ordering,
) -> (MaybeUninit<Self>, bool) {
unsafe {
let (out, ok) = <$base as AtomicCompareExchange>::atomic_compare_exchange_weak(
dst.cast::<MaybeUninit<$base>>(),
mem::transmute::<MaybeUninit<Self>, MaybeUninit<$base>>(current),
mem::transmute::<MaybeUninit<Self>, MaybeUninit<$base>>(new),
success,
failure,
);
(mem::transmute::<MaybeUninit<$base>, MaybeUninit<Self>>(out), ok)
}
}
}
};
}
#[allow(unused_macros)]
macro_rules! delegate_all {
($ty:ident, $base:ident) => {
delegate_load_store!($ty, $base);
delegate_swap!($ty, $base);
delegate_cas!($ty, $base);
};
}
#[allow(unused_macros)]
macro_rules! delegate_signed {
($delegate:ident, u8) => {
$delegate!(i8, u8);
};
($delegate:ident, u16) => {
$delegate!(i16, u16);
};
($delegate:ident, u32) => {
$delegate!(i32, u32);
};
($delegate:ident, u64) => {
$delegate!(i64, u64);
};
($delegate:ident, u128) => {
$delegate!(i128, u128);
};
}
#[allow(unused_macros)]
macro_rules! delegate_size {
($delegate:ident) => {
#[cfg(target_pointer_width = "16")]
$delegate!(isize, u16);
#[cfg(target_pointer_width = "16")]
$delegate!(usize, u16);
#[cfg(target_pointer_width = "32")]
$delegate!(isize, u32);
#[cfg(target_pointer_width = "32")]
$delegate!(usize, u32);
#[cfg(target_pointer_width = "64")]
$delegate!(isize, u64);
#[cfg(target_pointer_width = "64")]
$delegate!(usize, u64);
#[cfg(target_pointer_width = "128")]
$delegate!(isize, u128);
#[cfg(target_pointer_width = "128")]
$delegate!(usize, u128);
};
}
#[cfg(not(target_pointer_width = "16"))]
#[allow(dead_code)]
#[repr(C)]
struct Extended<T: Copy, const N: usize> {
#[cfg(target_endian = "big")]
pad: [MaybeUninit<T>; N],
v: MaybeUninit<T>,
#[cfg(target_endian = "little")]
pad: [MaybeUninit<T>; N],
}
#[cfg(not(target_pointer_width = "16"))]
#[allow(dead_code)]
pub(crate) mod extend32 {
macro_rules! extend {
($($ty:ident),* => $out:ident) => {$(
pub(crate) mod $ty {
use core::mem::{self, MaybeUninit};
use super::super::Extended;
const LEN: usize
= (mem::size_of::<$out>() - mem::size_of::<$ty>()) / mem::size_of::<$ty>();
#[allow(clippy::cast_sign_loss)]
const _: () = assert!(unsafe {
zero(MaybeUninit::new(!0)).assume_init() == !(0 as $ty) as $out
});
#[inline(always)]
pub(crate) const fn zero(v: MaybeUninit<$ty>) -> MaybeUninit<$out> {
const PAD: [MaybeUninit<$ty>; LEN] = [MaybeUninit::new(0); LEN];
unsafe { mem::transmute(Extended::<$ty, LEN> { v, pad: PAD }) }
}
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
#[inline(always)]
pub(crate) const fn uninit(v: MaybeUninit<$ty>) -> MaybeUninit<$out> {
const PAD: [MaybeUninit<$ty>; LEN] = [MaybeUninit::uninit(); LEN];
unsafe { mem::transmute(Extended::<$ty, LEN> { v, pad: PAD }) }
}
#[inline(always)]
pub(crate) const fn extract(v: MaybeUninit<$out>) -> MaybeUninit<$ty> {
unsafe { mem::transmute::<MaybeUninit<$out>, Extended::<$ty, LEN>>(v).v }
}
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
#[inline(always)]
pub(crate) const fn identity(v: MaybeUninit<$ty>) -> MaybeUninit<$ty> {
v
}
}
)*};
($($ty:ident),*) => {$(
pub(crate) mod $ty {
use core::mem::MaybeUninit;
#[inline(always)]
pub(crate) const fn identity(v: MaybeUninit<$ty>) -> MaybeUninit<$ty> {
v
}
#[allow(unused_imports)]
pub(crate) use self::identity as zero;
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
#[allow(unused_imports)]
pub(crate) use self::{identity as uninit, identity as extract};
}
)*};
}
extend!(u8, u16 => u32);
extend!(u32, u64);
}
#[cfg(target_pointer_width = "32")]
#[allow(dead_code)]
pub(crate) mod zero_extend64 {
use core::{
mem::{self, MaybeUninit},
ptr,
};
use super::Extended;
const _: () = assert!(unsafe {
ptr(super::ptr::without_provenance_mut(!0)).assume_init() == !0_u32 as u64
});
#[inline]
pub(crate) const fn ptr(v: *mut ()) -> MaybeUninit<u64> {
const PAD: [MaybeUninit<*mut ()>; 1] = [MaybeUninit::new(ptr::null_mut()); 1];
unsafe { mem::transmute(Extended::<*mut (), 1> { v: MaybeUninit::new(v), pad: PAD }) }
}
}
#[allow(dead_code)]
#[derive(Clone, Copy)]
#[repr(C)]
pub(crate) struct Pair<T: Copy> {
#[cfg(any(
target_endian = "little",
target_arch = "aarch64",
target_arch = "arm",
target_arch = "arm64ec",
))]
pub(crate) lo: MaybeUninit<T>,
pub(crate) hi: MaybeUninit<T>,
#[cfg(not(any(
target_endian = "little",
target_arch = "aarch64",
target_arch = "arm",
target_arch = "arm64ec",
)))]
pub(crate) lo: MaybeUninit<T>,
}
macro_rules! pair {
($name:ident, $whole:ident, $half:ident) => {
const _: () = assert!(mem::size_of::<$whole>() == mem::size_of::<$half>() * 2);
#[doc = stringify!($whole)]
#[allow(dead_code)]
#[derive(Clone, Copy)]
#[repr(C)]
pub(crate) union $name {
pub(crate) whole: MaybeUninit<$whole>,
pub(crate) pair: Pair<$half>,
}
};
}
pair!(MaybeUninit128, u128, u64);
pair!(MaybeUninit64, u64, u32);
#[cfg(target_arch = "avr")]
pair!(MaybeUninit16, u16, u8);
#[cfg(not(target_pointer_width = "16"))]
type MinWord = u32;
#[cfg(not(target_pointer_width = "16"))]
#[cfg(target_arch = "s390x")]
type RetInt = u32;
#[cfg(not(target_pointer_width = "16"))]
#[cfg(not(target_arch = "s390x"))]
type RetInt = RegSize;
#[cfg(not(target_pointer_width = "16"))]
#[allow(dead_code)]
#[inline]
pub(crate) fn create_sub_word_mask_values<T>(ptr: *mut T) -> (*mut MinWord, RetInt, RetInt) {
#[cfg(atomic_maybe_uninit_no_strict_provenance)]
use self::ptr::MutPtrExt as _;
const SHIFT_MASK: bool = !cfg!(any(
target_arch = "bpf",
target_arch = "loongarch32",
target_arch = "loongarch64",
target_arch = "mips",
target_arch = "mips32r6",
target_arch = "mips64",
target_arch = "mips64r6",
target_arch = "riscv32",
target_arch = "riscv64",
target_arch = "s390x",
target_arch = "sparc",
target_arch = "sparc64",
target_arch = "xtensa",
));
const PTR_MASK: usize = mem::size_of::<MinWord>() - 1;
const PTR_INV_MASK: usize = !PTR_MASK;
let aligned_ptr = ptr.with_addr(ptr.addr() & PTR_INV_MASK).cast::<MinWord>();
let ptr_lsb = if SHIFT_MASK {
ptr.addr() & PTR_MASK
} else {
ptr.addr()
};
#[allow(clippy::arithmetic_side_effects)]
let shift = if cfg!(any(target_endian = "little", target_arch = "s390x")) {
ptr_lsb << 3
} else {
(ptr_lsb ^ const_eval!(T => usize { mem::size_of::<MinWord>() - mem::size_of::<T>() })) << 3
};
#[allow(clippy::arithmetic_side_effects)]
let mut mask = const_eval!(T => RetInt { (1 << (mem::size_of::<T>() << 3)) - 1 }); if SHIFT_MASK {
mask <<= shift;
}
#[cfg_attr(
any(target_arch = "s390x", target_pointer_width = "32"),
allow(clippy::cast_possible_truncation)
)]
{
(aligned_ptr, shift as RetInt, mask)
}
}
#[allow(dead_code)]
pub(crate) mod ptr {
cfg_sel!({
#[cfg(not(atomic_maybe_uninit_no_strict_provenance))]
{
#[allow(unused_imports)]
pub(crate) use core::ptr::{with_exposed_provenance, without_provenance_mut};
}
#[cfg(else)]
{
#[inline(always)]
#[must_use]
pub(crate) const fn without_provenance_mut<T>(addr: usize) -> *mut T {
#[cfg(miri)]
unsafe {
core::mem::transmute(addr)
}
#[cfg(not(miri))]
{
addr as *mut T
}
}
#[inline(always)]
#[must_use]
#[cfg_attr(miri, track_caller)] pub(crate) fn with_exposed_provenance<T>(addr: usize) -> *const T {
addr as *const T
}
pub(crate) trait ConstPtrExt<T: ?Sized>: Copy {
#[must_use]
fn addr(self) -> usize;
}
pub(crate) trait MutPtrExt<T: ?Sized>: Copy {
#[must_use]
fn addr(self) -> usize;
#[must_use]
fn with_addr(self, addr: usize) -> Self
where
T: Sized;
}
impl<T: ?Sized> ConstPtrExt<T> for *const T {
#[inline(always)]
#[must_use]
fn addr(self) -> usize {
#[cfg(miri)]
unsafe {
core::mem::transmute(self.cast::<()>())
}
#[cfg(not(miri))]
{
self.cast::<()>() as usize
}
}
}
impl<T: ?Sized> MutPtrExt<T> for *mut T {
#[inline(always)]
#[must_use]
fn addr(self) -> usize {
#[cfg(miri)]
unsafe {
core::mem::transmute(self.cast::<()>())
}
#[cfg(not(miri))]
{
self.cast::<()>() as usize
}
}
#[inline]
#[must_use]
fn with_addr(self, addr: usize) -> Self
where
T: Sized,
{
let self_addr = self.addr() as isize;
let dest_addr = addr as isize;
let offset = dest_addr.wrapping_sub(self_addr);
self.cast::<u8>().wrapping_offset(offset).cast::<T>()
}
}
}
});
}