mod bool;
mod int;
mod ptr;
pub use self::bool::AtomicBool;
pub use int::*;
pub use ptr::AtomicPtr;
pub use std::sync::atomic::Ordering;
use crate::runtime::execution::ExecutionState;
use crate::runtime::task::clock::VectorClock;
use crate::runtime::thread;
use crate::silence_warnings;
use crate::sync::{ResourceSignature, ResourceType};
use std::cell::RefCell;
use std::panic::RefUnwindSafe;
static PRINTED_ORDERING_WARNING: std::sync::atomic::AtomicBool = std::sync::atomic::AtomicBool::new(false);
#[inline]
fn maybe_warn_about_ordering(order: Ordering) {
use owo_colors::OwoColorize;
#[allow(clippy::collapsible_if)]
if order != Ordering::SeqCst {
if PRINTED_ORDERING_WARNING
.compare_exchange(false, true, Ordering::Relaxed, Ordering::Relaxed)
.is_ok()
{
if silence_warnings() {
return;
}
if ExecutionState::with(|state| state.config.silence_warnings) {
return;
}
eprintln!(
"{}: Shuttle only correctly models SeqCst atomics and treats all other Orderings \
as if they were SeqCst. Bugs caused by weaker orderings like {:?} may be missed. \
See https://docs.rs/shuttle/*/shuttle/sync/atomic/index.html#warning-about-relaxed-behaviors \
for details or to disable this warning.",
"WARNING".yellow(),
order
);
}
}
}
pub fn fence(order: Ordering) {
if order == Ordering::Relaxed {
panic!("there is no such thing as a relaxed fence");
}
maybe_warn_about_ordering(order);
}
pub use std::sync::atomic::compiler_fence;
#[derive(Debug)]
struct Atomic<T> {
inner: RefCell<T>,
clock: RefCell<Option<VectorClock>>, #[allow(unused)]
signature: ResourceSignature,
}
unsafe impl<T: Sync> Sync for Atomic<T> {}
impl<T: RefUnwindSafe> RefUnwindSafe for Atomic<T> {}
impl<T> Atomic<T> {
#[track_caller]
const fn new(v: T) -> Self {
Self {
inner: RefCell::new(v),
clock: RefCell::new(None),
signature: ResourceSignature::new_const(ResourceType::Atomic),
}
}
}
impl<T: Copy + Eq> Atomic<T> {
fn get_mut(&mut self) -> &mut T {
self.exhale_clock();
self.inner.get_mut()
}
fn into_inner(self) -> T {
self.exhale_clock();
self.inner.into_inner()
}
fn load(&self, order: Ordering) -> T {
maybe_warn_about_ordering(order);
thread::switch();
self.exhale_clock();
let value = *self.inner.borrow();
value
}
fn store(&self, val: T, order: Ordering) {
maybe_warn_about_ordering(order);
thread::switch();
self.inhale_clock();
*self.inner.borrow_mut() = val;
}
fn swap(&self, mut val: T, order: Ordering) -> T {
maybe_warn_about_ordering(order);
thread::switch();
self.exhale_clock(); self.inhale_clock(); std::mem::swap(&mut *self.inner.borrow_mut(), &mut val);
val
}
fn fetch_update<F>(&self, set_order: Ordering, fetch_order: Ordering, mut f: F) -> Result<T, T>
where
F: FnMut(T) -> Option<T>,
{
maybe_warn_about_ordering(set_order);
maybe_warn_about_ordering(fetch_order);
thread::switch();
self.exhale_clock(); let current = *self.inner.borrow();
let ret = if let Some(new) = f(current) {
*self.inner.borrow_mut() = new;
self.inhale_clock(); Ok(current)
} else {
Err(current)
};
ret
}
unsafe fn raw_load(&self) -> T {
*self.inner.borrow()
}
fn init_clock(&self) {
self.clock.borrow_mut().get_or_insert(VectorClock::new());
}
fn inhale_clock(&self) {
self.init_clock();
ExecutionState::with(|s| {
let clock = s.increment_clock();
let mut self_clock = self.clock.borrow_mut();
self_clock.as_mut().unwrap().update(clock);
});
}
fn exhale_clock(&self) {
self.init_clock();
ExecutionState::with(|s| {
let self_clock = self.clock.borrow();
s.update_clock(self_clock.as_ref().unwrap());
});
}
#[cfg(test)]
fn signature(&self) -> ResourceSignature {
self.signature.clone()
}
}
#[cfg(test)]
mod tests {
use std::collections::HashSet;
use crate::sync::atomic::*;
#[test]
fn unique_resource_signatures() {
let atomic_i8 = AtomicI8::new(0);
let atomic_i16 = AtomicI16::new(0);
let atomic_i32 = AtomicI32::new(0);
let atomic_i64 = AtomicI64::new(0);
let atomic_isize = AtomicIsize::new(0);
let atomic_u8 = AtomicU8::new(0);
let atomic_u16 = AtomicU16::new(0);
let atomic_u32 = AtomicU32::new(0);
let atomic_u64 = AtomicU64::new(0);
let atomic_usize = AtomicUsize::new(0);
let atomic_bool = AtomicBool::new(false);
let atomic_ptr = AtomicPtr::new(std::ptr::null_mut::<i32>());
let signatures = HashSet::from([
atomic_i8.signature(),
atomic_i16.signature(),
atomic_i32.signature(),
atomic_i64.signature(),
atomic_isize.signature(),
atomic_u8.signature(),
atomic_u16.signature(),
atomic_u32.signature(),
atomic_u64.signature(),
atomic_usize.signature(),
atomic_bool.signature(),
atomic_ptr.signature(),
]);
assert_eq!(signatures.len(), 12);
}
#[test]
fn atomic_signatures_consistent_across_shuttle_iterations() {
use std::collections::HashSet;
use std::sync::{Arc, Mutex};
let all_signatures = Arc::new(Mutex::new(HashSet::new()));
let all_signatures_clone = all_signatures.clone();
crate::check_random(
move || {
let atomic1 = AtomicBool::new(false);
let atomic2 = AtomicBool::new(true);
all_signatures_clone
.lock()
.unwrap()
.insert((atomic1.load(Ordering::SeqCst), atomic1.signature()));
all_signatures_clone
.lock()
.unwrap()
.insert((atomic2.load(Ordering::SeqCst), atomic2.signature()));
},
10,
);
assert_eq!(all_signatures.lock().unwrap().len(), 2);
}
}