Skip to main content

kayrx/timer/
atomic.rs

1//! Implementation of an atomic u64 cell. On 64 bit platforms, this is a
2//! re-export of `AtomicU64`. On 32 bit platforms, this is implemented using a
3//! `Mutex`.
4
5pub(crate) use self::imp::AtomicU64;
6
7// `AtomicU64` can only be used on targets with `target_has_atomic` is 64 or greater.
8// Once `cfg_target_has_atomic` feature is stable, we can replace it with
9// `#[cfg(target_has_atomic = "64")]`.
10#[cfg(not(any(target_arch = "arm", target_arch = "mips", target_arch = "powerpc")))]
11mod imp {
12    pub(crate) use std::sync::atomic::AtomicU64;
13}
14
15#[cfg(any(target_arch = "arm", target_arch = "mips", target_arch = "powerpc"))]
16mod imp {
17    use std::sync::atomic::Ordering;
18    use std::sync::Mutex;
19
20    #[derive(Debug)]
21    pub(crate) struct AtomicU64 {
22        inner: Mutex<u64>,
23    }
24
25    impl AtomicU64 {
26        pub(crate) fn new(val: u64) -> AtomicU64 {
27            AtomicU64 {
28                inner: Mutex::new(val),
29            }
30        }
31
32        pub(crate) fn load(&self, _: Ordering) -> u64 {
33            *self.inner.lock().unwrap()
34        }
35
36        pub(crate) fn store(&self, val: u64, _: Ordering) {
37            *self.inner.lock().unwrap() = val;
38        }
39
40        pub(crate) fn fetch_or(&self, val: u64, _: Ordering) -> u64 {
41            let mut lock = self.inner.lock().unwrap();
42            let prev = *lock;
43            *lock = prev | val;
44            prev
45        }
46
47        pub(crate) fn compare_and_swap(&self, old: u64, new: u64, _: Ordering) -> u64 {
48            let mut lock = self.inner.lock().unwrap();
49            let prev = *lock;
50
51            if prev != old {
52                return prev;
53            }
54
55            *lock = new;
56            prev
57        }
58    }
59}