1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
use std::sync::atomic::AtomicU64;
pub use std::sync::atomic::Ordering;
pub struct AtomicF64(AtomicU64);
impl AtomicF64 {
pub fn new(x: f64) -> Self {
AtomicF64(AtomicU64::new(x.to_bits()))
}
pub fn load(&self, order: Ordering) -> f64 {
f64::from_bits(self.0.load(order))
}
pub fn store(&self, val: f64, order: Ordering) {
self.0.store(val.to_bits(), order);
}
pub fn into_inner(self) -> f64 {
f64::from_bits(self.0.into_inner())
}
pub fn fetch_add(&self, val: f64) -> f64 {
let mut old = self.load(Ordering::Relaxed);
loop {
let new = old + val;
match self.compare_exchange_weak(old, new, Ordering::Relaxed, Ordering::SeqCst) {
Ok(_) => return old,
Err(x) => {
old = x;
}
}
}
}
pub fn fetch_sub(&self, val: f64) -> f64 {
let mut old = self.load(Ordering::Relaxed);
loop {
let new = old - val;
match self.compare_exchange_weak(old, new, Ordering::Relaxed, Ordering::SeqCst) {
Ok(_) => return old,
Err(x) => {
old = x;
}
}
}
}
pub fn compare_exchange_weak(
&self,
current: f64,
new: f64,
success: Ordering,
failure: Ordering,
) -> Result<f64, f64> {
let c = current.to_bits();
let n = new.to_bits();
match self.0.compare_exchange_weak(c, n, success, failure) {
Ok(x) => Ok(f64::from_bits(x)),
Err(x) => Err(f64::from_bits(x))
}
}
}
#[cfg(test)]
mod tests {
use crate::{AtomicF64, Ordering};
#[test]
fn new_load() {
assert_eq!(3.0, AtomicF64::new(3.0).load(Ordering::Relaxed));
}
}