use core::cell::UnsafeCell;
use core::sync::atomic::{
AtomicU64,
Ordering::{self, *},
};
#[cfg_attr(target_arch = "x86", repr(C, align(8)))]
#[cfg_attr(not(target_arch = "x86"), repr(transparent))]
pub struct AtomicF64(
).
UnsafeCell<f64>,
);
unsafe impl Send for AtomicF64 {}
unsafe impl Sync for AtomicF64 {}
const _: [(); core::mem::size_of::<AtomicU64>()] = [(); core::mem::size_of::<AtomicF64>()];
const _: [(); 1] =
[(); (core::mem::align_of::<AtomicF64>() >= core::mem::align_of::<AtomicU64>()) as usize];
impl AtomicF64 {
#[inline]
pub const fn new(float: f64) -> Self {
Self(UnsafeCell::new(float))
}
#[inline]
pub fn get_mut(&mut self) -> &mut f64 {
unsafe { &mut *self.0.get() }
}
#[inline]
pub fn into_inner(self) -> f64 {
self.0.into_inner()
}
#[inline]
pub fn load(&self, ordering: Ordering) -> f64 {
f64::from_bits(self.as_atomic_bits().load(ordering))
}
#[inline]
pub fn store(&self, value: f64, ordering: Ordering) {
self.as_atomic_bits().store(value.to_bits(), ordering);
}
#[inline]
pub fn swap(&self, new_value: f64, ordering: Ordering) -> f64 {
f64::from_bits(self.as_atomic_bits().swap(new_value.to_bits(), ordering))
}
#[inline]
#[allow(deprecated)]
pub fn compare_and_swap(&self, current: f64, new: f64, order: Ordering) -> f64 {
f64::from_bits(self.as_atomic_bits().compare_and_swap(
current.to_bits(),
new.to_bits(),
order,
))
}
#[inline]
pub fn compare_exchange(
&self,
current: f64,
new: f64,
success: Ordering,
failure: Ordering,
) -> Result<f64, f64> {
convert_result(self.as_atomic_bits().compare_exchange(
current.to_bits(),
new.to_bits(),
success,
failure,
))
}
#[inline]
pub fn compare_exchange_weak(
&self,
current: f64,
new: f64,
success: Ordering,
failure: Ordering,
) -> Result<f64, f64> {
convert_result(self.as_atomic_bits().compare_exchange_weak(
current.to_bits(),
new.to_bits(),
success,
failure,
))
}
#[inline]
pub fn fetch_update<F>(
&self,
set_order: Ordering,
fetch_order: Ordering,
mut update: F,
) -> Result<f64, f64>
where
F: FnMut(f64) -> Option<f64>,
{
let res = self
.as_atomic_bits()
.fetch_update(set_order, fetch_order, |prev| {
update(f64::from_bits(prev)).map(f64::to_bits)
});
convert_result(res)
}
#[inline]
fn update_with<F>(&self, order: Ordering, mut update: F) -> f64
where
F: FnMut(f64) -> f64,
{
self.fetch_update(order, super::fail_order_for(order), |f| Some(update(f)))
.unwrap()
}
#[inline]
pub fn fetch_add(&self, val: f64, order: Ordering) -> f64 {
self.update_with(order, |f| f + val)
}
#[inline]
pub fn fetch_sub(&self, val: f64, order: Ordering) -> f64 {
self.update_with(order, |f| f - val)
}
#[inline]
pub fn fetch_abs(&self, order: Ordering) -> f64 {
f64::from_bits(
self.as_atomic_bits()
.fetch_and(0x7fff_ffff_ffff_ffff, order),
)
}
#[inline]
pub fn fetch_neg(&self, order: Ordering) -> f64 {
f64::from_bits(
self.as_atomic_bits()
.fetch_xor(0x8000_0000_0000_0000, order),
)
}
#[inline]
pub fn fetch_min(&self, value: f64, order: Ordering) -> f64 {
self.update_with(order, |f| f.min(value))
}
#[inline]
pub fn fetch_max(&self, value: f64, order: Ordering) -> f64 {
self.update_with(order, |f| f.max(value))
}
#[inline]
pub fn as_atomic_bits(&self) -> &AtomicU64 {
unsafe { &*(&self.0 as *const _ as *const AtomicU64) }
}
}
impl Default for AtomicF64 {
#[inline]
fn default() -> Self {
Self::from(0.0)
}
}
impl core::fmt::Debug for AtomicF64 {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
self.load(SeqCst).fmt(f)
}
}
impl From<f64> for AtomicF64 {
#[inline]
fn from(f: f64) -> Self {
Self::new(f)
}
}
#[cfg(feature = "serde")]
impl serde::Serialize for AtomicF64 {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
serializer.serialize_f64(self.load(Ordering::SeqCst))
}
}
#[cfg(feature = "serde")]
impl<'de> serde::Deserialize<'de> for AtomicF64 {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: serde::Deserializer<'de>,
{
f64::deserialize(deserializer).map(AtomicF64::new)
}
}
#[inline(always)]
fn convert_result(r: Result<u64, u64>) -> Result<f64, f64> {
r.map(f64::from_bits).map_err(f64::from_bits)
}
impl PartialEq for AtomicF64 {
#[inline]
fn eq(&self, o: &AtomicF64) -> bool {
self.load(Relaxed) == o.load(Relaxed)
}
}