#![cfg_attr(feature = "nightly", feature(test))]
#[cfg(all(test, feature = "nightly"))]
extern crate test;
#[cfg(feature = "fnv")]
extern crate fnv;
#[cfg(not(feature = "fnv"))]
use std::collections;
use std::hash::Hash;
use std::cmp;
use std::rc::Rc;
use std::cell::Cell;
use std::sync::Arc;
use std::sync::atomic::Ordering;
use std::borrow::Borrow;
#[cfg(feature = "nightly")]
use std::sync::atomic::AtomicU64;
#[cfg(not(feature = "nightly"))]
use std::sync::atomic::AtomicUsize;
#[cfg(feature = "nightly")]
type Atomic = AtomicU64;
#[cfg(not(feature = "nightly"))]
type Atomic = AtomicUsize;
#[cfg(feature = "fnv")]
type HashMap<K, V> = fnv::FnvHashMap<K, V>;
#[cfg(not(feature = "fnv"))]
type HashMap<K, V> = collections::HashMap<K, V>;
#[derive(Default, Clone, PartialEq, PartialOrd, Eq, Debug)]
pub struct Incr(u64);
#[derive(Default, Clone, PartialEq, Debug)]
pub struct Map<K: Eq + Hash>(HashMap<K, u64>);
#[derive(Default, Clone, PartialEq, PartialOrd, Eq, Debug)]
pub struct RcIncr(Rc<Cell<u64>>);
#[derive(Default, Clone, Debug)]
pub struct AtomicIncr(Arc<Atomic>);
#[derive(Default, Clone, Debug)]
pub struct AtomicMap<K: Eq + Hash>(HashMap<K, AtomicIncr>);
impl Incr {
pub fn is_new(&mut self, val: u64) -> bool {
if val > self.0 {
self.0 = val;
true
} else {
false
}
}
pub fn get(&self) -> u64 { self.0 }
}
impl<K> Map<K>
where K: Eq + Hash
{
pub fn is_new(&mut self, k: K, val: u64) -> bool {
let prev = self.0.entry(k).or_insert(0);
if val > *prev {
*prev = val;
true
} else {
false
}
}
pub fn get<Q>(&self, key: &Q) -> u64
where K: Borrow<Q>,
Q: ?Sized + Hash + Eq
{
self.0.get(key)
.cloned()
.unwrap_or(0)
}
pub fn contains_key<Q>(&self, key: &Q) -> bool
where K: Borrow<Q>,
Q: ?Sized + Hash + Eq
{
self.0.contains_key(key)
}
pub fn len(&self) -> usize { self.0.len() }
pub fn is_empty(&self) -> bool { self.0.is_empty() }
}
impl RcIncr {
pub fn is_new(&self, val: u64) -> bool {
if val > self.get() {
self.0.set(val);
true
} else {
false
}
}
pub fn get(&self) -> u64 { self.0.get() }
}
impl AtomicIncr {
pub fn is_new(&self, val: u64) -> bool {
let mut gt = false;
#[cfg(not(feature = "nightly"))]
let val = val as usize;
loop {
let prev = self.0.load(Ordering::Acquire);
if val > prev {
if let Ok(_) = self.0.compare_exchange(prev, val, Ordering::AcqRel, Ordering::Acquire) {
gt = true;
break
}
} else {
break
}
}
gt
}
pub fn get(&self) -> u64 {
self.0.load(Ordering::Acquire) as u64
}
pub fn into_inner(self) -> Arc<Atomic> {
self.0
}
}
impl<K> AtomicMap<K>
where K: Eq + Hash
{
pub fn is_new<Q>(&self, key: &Q, val: u64) -> bool
where K: Borrow<Q>,
Q: ?Sized + Hash + Eq
{
self.0.get(key)
.map(move |x| x.is_new(val))
.unwrap_or(false)
}
pub fn is_new_or_insert(&mut self, key: K, val: u64) -> bool {
self.0.entry(key)
.or_insert_with(Default::default)
.is_new(val)
}
pub fn insert(&mut self, key: K, val: u64) -> bool {
self.is_new_or_insert(key, val)
}
pub fn get<Q>(&self, key: &Q) -> u64
where K: Borrow<Q>,
Q: ?Sized + Hash + Eq
{
self.0.get(key)
.map(|x| x.get())
.unwrap_or(0)
}
pub fn contains_key<Q>(&self, key: &Q) -> bool
where K: Borrow<Q>,
Q: ?Sized + Hash + Eq
{
self.0.contains_key(key)
}
pub fn len(&self) -> usize { self.0.len() }
pub fn is_empty(&self) -> bool { self.0.is_empty() }
}
impl PartialEq for AtomicIncr {
fn eq(&self, other: &Self) -> bool {
self.get() == other.get()
}
}
impl PartialOrd for AtomicIncr {
fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> {
Some(self.get().cmp(&other.get()))
}
}
impl Eq for AtomicIncr {}
impl From<u64> for Incr {
fn from(val: u64) -> Self {
Incr(val)
}
}
impl From<u64> for RcIncr {
fn from(val: u64) -> Self {
RcIncr(Rc::new(Cell::new(val)))
}
}
impl From<u64> for AtomicIncr {
fn from(val: u64) -> Self {
AtomicIncr(Arc::new(Atomic::new(val)))
}
}
#[allow(unused)]
#[cfg(test)]
mod tests {
use super::*;
use std::sync::atomic::AtomicBool;
use std::thread;
#[cfg(feature = "nightly")]
use test::Bencher;
#[test]
fn atomic_map_key_ergonomics() {
let mut last: AtomicMap<String> = Default::default();
let a = String::from("a");
last.insert(a.clone(), 10);
assert_eq!(last.get(&a), 10);
let mut last: AtomicMap<&'static str> = Default::default();
last.insert("a", 11);
assert_eq!(last.get("a"), 11);
let mut last: AtomicMap<u64> = Default::default();
last.insert(1, 12);
assert_eq!(last.get(&1), 12);
}
macro_rules! stairway_to_heaven {
($f:ident, $t:ident) => {
#[test]
fn $f() {
let mut last: $t = Default::default();
for i in 1..1_000_000u64 {
assert!(last.is_new(i), "i = {}", i);
}
}
}
}
stairway_to_heaven!(all_true_to_one_million_incr, Incr);
stairway_to_heaven!(all_true_to_one_million_rc_incr, RcIncr);
stairway_to_heaven!(all_true_to_one_million_atomic_incr, AtomicIncr);
macro_rules! purgatory {
($f:ident, $t:ident) => {
#[test]
fn $f() {
let mut last: $t = Default::default();
for _ in 1..1_000_000u64 {
assert!(!last.is_new(0), "i = {}", 0);
}
}
}
}
purgatory!(never_true_one_million_times_incr, Incr);
purgatory!(never_true_one_million_times_rc_incr, RcIncr);
purgatory!(never_true_one_million_times_atomic_incr, AtomicIncr);
macro_rules! stairway_to_heaven_bench {
($f:ident, $t:ident) => {
#[cfg(feature = "nightly")]
#[bench]
fn $f(b: &mut Bencher) {
let mut last: $t = Default::default();
let mut i = 1;
b.iter(|| {
i += 1;
last.is_new(i - 1)
})
}
}
}
stairway_to_heaven_bench!(always_increasing_bench_incr, Incr);
stairway_to_heaven_bench!(always_increasing_bench_rc_incr, RcIncr);
stairway_to_heaven_bench!(always_increasing_bench_atomic_incr, AtomicIncr);
macro_rules! purgatory_bench {
($f:ident, $t:ident) => {
#[cfg(feature = "nightly")]
#[bench]
fn $f(b: &mut Bencher) {
let mut last: $t = Default::default();
b.iter(|| {
last.is_new(0)
})
}
}
}
purgatory_bench!(never_incr_bench_incr, Incr);
purgatory_bench!(never_incr_bench_rc_incr, RcIncr);
purgatory_bench!(never_incr_bench_atomic_incr, AtomicIncr);
#[cfg(feature = "nightly")]
#[bench]
fn atomic_incr_nightmare_scenario_except_threads_yield_each_iteration(b: &mut Bencher) {
let n = 24;
let stop = Arc::new(AtomicBool::new(false));
let last: AtomicIncr = Default::default();
let mut threads = Vec::new();
for _ in 0..n {
let val = last.clone().into_inner();
let stop = Arc::clone(&stop);
threads.push(thread::spawn(move || {
loop {
val.fetch_add(1, Ordering::Relaxed);
thread::yield_now();
if stop.load(Ordering::Relaxed) { break }
}
}));
}
let mut i = 1;
b.iter(|| {
let is_new = last.is_new(i);
i = match is_new {
true => i + 1,
false => i.max(last.get()),
};
is_new
});
stop.store(true, Ordering::SeqCst);
}
#[cfg(feature = "nightly")]
#[bench]
fn im_your_worst_nightmare(b: &mut Bencher) {
let n = 24;
let stop = Arc::new(AtomicBool::new(false));
let last: AtomicIncr = Default::default();
let mut threads = Vec::new();
for _ in 0..n {
let val = last.clone().into_inner();
let stop = Arc::clone(&stop);
threads.push(thread::spawn(move || {
loop {
val.fetch_add(1, Ordering::Relaxed);
if stop.load(Ordering::Relaxed) { break }
}
}));
}
let mut i = 1;
b.iter(|| {
let is_new = last.is_new(i);
i = match is_new {
true => i + 1,
false => i.max(last.get()),
};
is_new
});
stop.store(true, Ordering::SeqCst);
}
#[test]
fn check_atomic_incr_default_initial_value() {
let last = AtomicIncr::default();
assert_eq!(last.get(), 0);
}
}