use std::i32;
use std::sync::atomic::Ordering;
use std::fmt::{Debug, Formatter, Result as FmtResult};
use sys::{futex_wait_bitset, futex_wake_bitset};
use integer_atomics::AtomicU32;
use lock_wrappers::raw::RwLock;
#[cfg(feature = "nightly")]
use std::intrinsics::likely;
#[inline(always)]
#[cfg(not(feature = "nightly"))]
unsafe fn likely(b: bool) -> bool { b }
pub struct RwFutex2 {
futex: AtomicU32,
}
const M_DEATH: u32 = 0b10100000000010000000001000000000;
const F_WRITE_SHOVE: u32 = 0b01000000000000000000000000000000;
const M_WRITERS: u32 = 0b00011111111100000000000000000000;
const M_READERS_QUEUED: u32 = 0b00000000000001111111110000000000;
const M_READERS: u32 = 0b00000000000000000000000111111111;
const ONE_WRITER: u32 = 0b00000000000100000000000000000000;
const ONE_READER_QUEUED: u32 =0b00000000000000000000010000000000;
const ONE_READER: u32 = 0b00000000000000000000000000000001;
const ID_READER: i32 = 1;
const ID_WRITER: i32 = 2;
#[inline(always)]
fn safe_add(dst: &AtomicU32, val: u32, ordering: Ordering) -> u32 {
let mut ret = dst.fetch_add(val, ordering);
if ret & M_DEATH != 0 { die(dst) }
ret = ret.wrapping_add(val);
if ret & M_DEATH != 0 { die(dst) }
ret
}
#[inline(always)]
fn safe_sub(dst: &AtomicU32, val: u32, ordering: Ordering) -> u32 {
safe_add(dst, val.wrapping_neg(), ordering)
}
#[cold]
#[inline(never)]
fn die(dst: &AtomicU32) -> ! {
dst.store(M_DEATH, Ordering::SeqCst);
panic!("Spontaneous futex combustion! (overflow)");
}
impl RwFutex2 {
#[inline(never)]
fn acquire_read_slow(&self, mut val: u32) {
loop {
if val & M_WRITERS == 0 {
break;
}
val = safe_add(&self.futex, ONE_READER_QUEUED - ONE_READER, Ordering::Acquire);
if val & M_WRITERS == 0 {
} else {
if (val & M_READERS == 0) && (val & M_WRITERS != 0) {
futex_wake_bitset(&self.futex, 1, ID_WRITER);
}
futex_wait_bitset(&self.futex, val, ID_READER);
}
val = safe_add(&self.futex, ONE_READER.wrapping_sub(ONE_READER_QUEUED), Ordering::Acquire);
}
}
#[inline(never)]
fn acquire_write_slow(&self, mut val: u32) {
let mut have_lock = false;
loop {
if have_lock {
if val & M_READERS == 0 {
break;
}
} else if val & F_WRITE_SHOVE != 0 {
let newval = self.futex.compare_and_swap(val, val & !F_WRITE_SHOVE, Ordering::Acquire);
if val == newval {
break;
} else {
val = newval;
continue;
}
} else if val & M_WRITERS == ONE_WRITER {
have_lock = true;
if val & M_READERS == 0 {
break;
}
}
futex_wait_bitset(&self.futex, val, ID_WRITER);
val = self.futex.load(Ordering::Acquire);
}
}
#[inline(never)]
fn release_write_slow(&self, val: u32) {
if val & M_WRITERS != 0 {
self.futex.fetch_or(F_WRITE_SHOVE, Ordering::Release);
futex_wake_bitset(&self.futex, 1, ID_WRITER);
} else {
if val & M_READERS_QUEUED != 0 {
futex_wake_bitset(&self.futex, i32::MAX as u32, ID_READER);
}
}
}
}
impl RwLock for RwFutex2 {
type ReadLockState = ();
type WriteLockState = ();
#[inline]
fn acquire_read(&self) {
let val = safe_add(&self.futex, ONE_READER, Ordering::Acquire);
if unsafe { likely(val & M_WRITERS == 0) } {
return;
}
self.acquire_read_slow(val)
}
#[inline]
fn acquire_write(&self) {
let val = safe_add(&self.futex, ONE_WRITER, Ordering::Acquire);
if unsafe { likely((val & F_WRITE_SHOVE == 0)
&& (val & M_WRITERS == ONE_WRITER)
&& (val & M_READERS == 0)) } {
return;
}
self.acquire_write_slow(val)
}
#[inline]
fn release_read(&self, _: ()) {
let val = safe_sub(&self.futex, ONE_READER, Ordering::Release);
if (val & M_READERS == 0) && (val & M_WRITERS != 0) {
futex_wake_bitset(&self.futex, 1, ID_WRITER);
}
}
#[inline]
fn release_write(&self, _: ()) {
let val = safe_sub(&self.futex, ONE_WRITER, Ordering::Release);
if unsafe { likely((val & M_WRITERS == 0)
&& (val & M_READERS_QUEUED == 0)) } {
return;
}
self.release_write_slow(val)
}
}
impl Default for RwFutex2 {
fn default() -> RwFutex2 {
RwFutex2 {
futex: AtomicU32::new(0),
}
}
}
impl Debug for RwFutex2 {
fn fmt(&self, f: &mut Formatter) -> FmtResult {
write!(f, "RwFutex@{:p} (=0x{:08x})", &self.futex as *const _,
self.futex.load(Ordering::SeqCst))
}
}