use std::isize;
use std::marker::PhantomData;
use std::ops::Deref;
use std::sync::atomic::{self, AtomicPtr, AtomicUsize, Ordering};
use std::sync::{Mutex, MutexGuard, PoisonError};
use std::thread;
use libc;
const YIELD_EVERY: usize = 16;
const MAX_GUARDS: usize = (isize::MAX) as usize;
pub(crate) struct ReadGuard<'a, T: 'a> {
data: &'a T,
lock: &'a AtomicUsize,
}
impl<'a, T> Deref for ReadGuard<'a, T> {
type Target = T;
fn deref(&self) -> &T {
self.data
}
}
impl<'a, T> Drop for ReadGuard<'a, T> {
fn drop(&mut self) {
self.lock.fetch_sub(1, Ordering::SeqCst);
}
}
pub(crate) struct WriteGuard<'a, T: 'a> {
_guard: MutexGuard<'a, ()>,
lock: &'a HalfLock<T>,
data: &'a T,
}
impl<'a, T> WriteGuard<'a, T> {
pub(crate) fn store(&mut self, val: T) {
let new = Box::into_raw(Box::new(val));
self.data = unsafe { &*new };
let old = self.lock.data.swap(new, Ordering::SeqCst);
self.lock.write_barrier();
drop(unsafe { Box::from_raw(old) });
}
}
impl<'a, T> Deref for WriteGuard<'a, T> {
type Target = T;
fn deref(&self) -> &T {
self.data
}
}
pub(crate) struct HalfLock<T> {
_t: PhantomData<T>,
data: AtomicPtr<T>,
generation: AtomicUsize,
lock: [AtomicUsize; 2],
write_mutex: Mutex<()>,
}
impl<T> HalfLock<T> {
pub(crate) fn new(data: T) -> Self {
let ptr = Box::into_raw(Box::new(data));
Self {
_t: PhantomData,
data: AtomicPtr::new(ptr),
generation: AtomicUsize::new(0),
lock: [AtomicUsize::new(0), AtomicUsize::new(0)],
write_mutex: Mutex::new(()),
}
}
pub(crate) fn read(&self) -> ReadGuard<T> {
let gen = self.generation.load(Ordering::SeqCst);
let lock = &self.lock[gen % 2];
let guard_cnt = lock.fetch_add(1, Ordering::SeqCst);
if guard_cnt > MAX_GUARDS {
unsafe { libc::abort() };
}
let data = self.data.load(Ordering::SeqCst);
let data = unsafe { &*data };
ReadGuard { data, lock }
}
fn update_seen(&self, seen_zero: &mut [bool; 2]) {
for (seen, slot) in seen_zero.iter_mut().zip(&self.lock) {
*seen = *seen || slot.load(Ordering::SeqCst) == 0;
}
}
fn write_barrier(&self) {
let mut seen_zero = [false; 2];
self.update_seen(&mut seen_zero);
self.generation.fetch_add(1, Ordering::SeqCst);
let mut iter = 0usize;
while !seen_zero.iter().all(|s| *s) {
iter = iter.wrapping_add(1);
if cfg!(not(miri)) {
if iter % YIELD_EVERY == 0 {
thread::yield_now();
} else {
#[allow(deprecated)]
atomic::spin_loop_hint();
}
}
self.update_seen(&mut seen_zero);
}
}
pub(crate) fn write(&self) -> WriteGuard<T> {
let guard = self
.write_mutex
.lock()
.unwrap_or_else(PoisonError::into_inner);
let data = self.data.load(Ordering::SeqCst);
let data = unsafe { &*data };
WriteGuard {
data,
_guard: guard,
lock: self,
}
}
}
impl<T> Drop for HalfLock<T> {
fn drop(&mut self) {
unsafe {
let data = Box::from_raw(self.data.load(Ordering::SeqCst));
drop(data);
}
}
}