#![warn(
anonymous_parameters,
missing_copy_implementations,
missing_debug_implementations,
missing_docs,
nonstandard_style,
rust_2018_idioms,
single_use_lifetimes,
trivial_casts,
trivial_numeric_casts,
unreachable_pub,
unused_extern_crates,
unused_qualifications,
variant_size_differences
)]
pub mod noinline;
pub mod race_cell;
use std::sync::{
atomic::{AtomicBool, Ordering},
Barrier,
};
pub fn concurrent_test_2(f1: impl FnOnce() + Send, f2: impl FnOnce() + Send) {
let barrier = Barrier::new(2);
std::thread::scope(|s| {
s.spawn(|| {
barrier.wait();
noinline::call_once(f1);
});
barrier.wait();
noinline::call_once(f2);
})
}
pub fn concurrent_test_3(
f1: impl FnOnce() + Send,
f2: impl FnOnce() + Send,
f3: impl FnOnce() + Send,
) {
let barrier = Barrier::new(3);
std::thread::scope(|s| {
s.spawn(|| {
barrier.wait();
noinline::call_once(f1);
});
s.spawn(|| {
barrier.wait();
noinline::call_once(f2);
});
barrier.wait();
noinline::call_once(f3);
})
}
pub fn run_under_contention<AntagonistResult, BenchmarkResult>(
mut antagonist: impl FnMut() -> AntagonistResult + Send,
mut benchmark: impl FnMut() -> BenchmarkResult,
) -> BenchmarkResult {
let start_barrier = Barrier::new(2);
let continue_flag = AtomicBool::new(true);
std::thread::scope(|s| {
s.spawn(|| {
start_barrier.wait();
while continue_flag.load(Ordering::Relaxed) {
antagonist();
}
});
start_barrier.wait();
let result = benchmark();
continue_flag.store(false, Ordering::Relaxed);
result
})
}
#[cfg(test)]
mod tests {
use std::{
sync::atomic::{AtomicUsize, Ordering},
time::Duration,
};
#[test]
fn swap_and_fetch_add() {
const ATOMIC_OPS_COUNT: usize = 100_000_000;
let atom = AtomicUsize::new(0);
let mut last_value = 0;
super::concurrent_test_2(
|| {
for _ in 1..=ATOMIC_OPS_COUNT {
let former_atom = atom.fetch_add(1, Ordering::Relaxed);
assert!((former_atom == 0) || (former_atom == last_value));
last_value = former_atom + 1;
}
},
|| {
for _ in 1..=ATOMIC_OPS_COUNT {
let former_atom = atom.swap(0, Ordering::Relaxed);
assert!(former_atom <= ATOMIC_OPS_COUNT);
}
},
);
}
#[test]
fn fetch_and_or_xor() {
const ATOMIC_OPS_COUNT: usize = 30_000_000;
let atom = AtomicUsize::new(0);
const AND_MASK: usize = 0b0000_0000_0000_0000; const XOR_MASK: usize = 0b0000_1111_0000_1111; const OR_MASK: usize = 0b1111_0000_1111_0000;
super::concurrent_test_3(
|| {
for _ in 1..=ATOMIC_OPS_COUNT {
let old_val = atom.fetch_and(AND_MASK, Ordering::Relaxed);
assert_eq!(old_val & 0b1111_1111_1111_1111, old_val);
assert!((old_val & XOR_MASK == XOR_MASK) || (old_val & XOR_MASK == 0));
assert!((old_val & OR_MASK == OR_MASK) || (old_val & OR_MASK == 0));
}
},
|| {
for _ in 1..=ATOMIC_OPS_COUNT {
let old_val = atom.fetch_or(OR_MASK, Ordering::Relaxed);
assert_eq!(old_val & 0b1111_1111_1111_1111, old_val);
assert!((old_val & XOR_MASK == XOR_MASK) || (old_val & XOR_MASK == 0));
assert!((old_val & OR_MASK == OR_MASK) || (old_val & OR_MASK == 0));
}
},
|| {
for _ in 1..=ATOMIC_OPS_COUNT {
let old_val = atom.fetch_xor(XOR_MASK, Ordering::Relaxed);
assert_eq!(old_val & 0b1111_1111_1111_1111, old_val);
assert!((old_val & XOR_MASK == XOR_MASK) || (old_val & XOR_MASK == 0));
assert!((old_val & OR_MASK == OR_MASK) || (old_val & OR_MASK == 0));
}
},
);
}
#[test]
fn antagonist_showcase() {
let atom = AtomicUsize::new(0);
super::run_under_contention(
|| atom.fetch_add(1, Ordering::Relaxed),
|| std::thread::sleep(Duration::from_millis(100)),
);
assert!(atom.load(Ordering::Relaxed) > 100000);
}
}