use std::sync::atomic::Ordering;
#[cfg(feature = "nightly")]
use std::sync::atomic::{ATOMIC_U8_INIT, AtomicU8};
#[cfg(feature = "nightly")]
type U8 = u8;
#[cfg(not(feature = "nightly"))]
use std::sync::atomic::AtomicUsize as AtomicU8;
#[cfg(not(feature = "nightly"))]
use std::sync::atomic::ATOMIC_USIZE_INIT as ATOMIC_U8_INIT;
#[cfg(not(feature = "nightly"))]
type U8 = usize;
use deadlock;
use lock_api::{GuardNoSend, RawMutex as RawMutexTrait, RawMutexFair, RawMutexTimed};
use parking_lot_core::{self, ParkResult, SpinWait, UnparkResult, UnparkToken, DEFAULT_PARK_TOKEN};
use std::time::{Duration, Instant};
pub(crate) const TOKEN_NORMAL: UnparkToken = UnparkToken(0);
pub(crate) const TOKEN_HANDOFF: UnparkToken = UnparkToken(1);
const LOCKED_BIT: U8 = 1;
const PARKED_BIT: U8 = 2;
pub struct RawMutex {
state: AtomicU8,
}
unsafe impl RawMutexTrait for RawMutex {
const INIT: RawMutex = RawMutex {
state: ATOMIC_U8_INIT,
};
type GuardMarker = GuardNoSend;
#[inline]
fn lock(&self) {
if self
.state
.compare_exchange_weak(0, LOCKED_BIT, Ordering::Acquire, Ordering::Relaxed)
.is_err()
{
self.lock_slow(None);
}
unsafe { deadlock::acquire_resource(self as *const _ as usize) };
}
#[inline]
fn try_lock(&self) -> bool {
let mut state = self.state.load(Ordering::Relaxed);
loop {
if state & LOCKED_BIT != 0 {
return false;
}
match self.state.compare_exchange_weak(
state,
state | LOCKED_BIT,
Ordering::Acquire,
Ordering::Relaxed,
) {
Ok(_) => {
unsafe { deadlock::acquire_resource(self as *const _ as usize) };
return true;
}
Err(x) => state = x,
}
}
}
#[inline]
fn unlock(&self) {
unsafe { deadlock::release_resource(self as *const _ as usize) };
if self
.state
.compare_exchange_weak(LOCKED_BIT, 0, Ordering::Release, Ordering::Relaxed)
.is_ok()
{
return;
}
self.unlock_slow(false);
}
}
unsafe impl RawMutexFair for RawMutex {
#[inline]
fn unlock_fair(&self) {
unsafe { deadlock::release_resource(self as *const _ as usize) };
if self
.state
.compare_exchange_weak(LOCKED_BIT, 0, Ordering::Release, Ordering::Relaxed)
.is_ok()
{
return;
}
self.unlock_slow(true);
}
#[inline]
fn bump(&self) {
if self.state.load(Ordering::Relaxed) & PARKED_BIT != 0 {
self.bump_slow();
}
}
}
unsafe impl RawMutexTimed for RawMutex {
type Duration = Duration;
type Instant = Instant;
#[inline]
fn try_lock_until(&self, timeout: Instant) -> bool {
let result = if self
.state
.compare_exchange_weak(0, LOCKED_BIT, Ordering::Acquire, Ordering::Relaxed)
.is_ok()
{
true
} else {
self.lock_slow(Some(timeout))
};
if result {
unsafe { deadlock::acquire_resource(self as *const _ as usize) };
}
result
}
#[inline]
fn try_lock_for(&self, timeout: Duration) -> bool {
let result = if self
.state
.compare_exchange_weak(0, LOCKED_BIT, Ordering::Acquire, Ordering::Relaxed)
.is_ok()
{
true
} else {
self.lock_slow(Some(Instant::now() + timeout))
};
if result {
unsafe { deadlock::acquire_resource(self as *const _ as usize) };
}
result
}
}
impl RawMutex {
#[inline]
pub(crate) fn mark_parked_if_locked(&self) -> bool {
let mut state = self.state.load(Ordering::Relaxed);
loop {
if state & LOCKED_BIT == 0 {
return false;
}
match self.state.compare_exchange_weak(
state,
state | PARKED_BIT,
Ordering::Relaxed,
Ordering::Relaxed,
) {
Ok(_) => return true,
Err(x) => state = x,
}
}
}
#[inline]
pub(crate) fn mark_parked(&self) {
self.state.fetch_or(PARKED_BIT, Ordering::Relaxed);
}
#[cold]
#[inline(never)]
fn lock_slow(&self, timeout: Option<Instant>) -> bool {
let mut spinwait = SpinWait::new();
let mut state = self.state.load(Ordering::Relaxed);
loop {
if state & LOCKED_BIT == 0 {
match self.state.compare_exchange_weak(
state,
state | LOCKED_BIT,
Ordering::Acquire,
Ordering::Relaxed,
) {
Ok(_) => return true,
Err(x) => state = x,
}
continue;
}
if state & PARKED_BIT == 0 && spinwait.spin() {
state = self.state.load(Ordering::Relaxed);
continue;
}
if state & PARKED_BIT == 0 {
if let Err(x) = self.state.compare_exchange_weak(
state,
state | PARKED_BIT,
Ordering::Relaxed,
Ordering::Relaxed,
) {
state = x;
continue;
}
}
unsafe {
let addr = self as *const _ as usize;
let validate = || self.state.load(Ordering::Relaxed) == LOCKED_BIT | PARKED_BIT;
let before_sleep = || {};
let timed_out = |_, was_last_thread| {
if was_last_thread {
self.state.fetch_and(!PARKED_BIT, Ordering::Relaxed);
}
};
match parking_lot_core::park(
addr,
validate,
before_sleep,
timed_out,
DEFAULT_PARK_TOKEN,
timeout,
) {
ParkResult::Unparked(TOKEN_HANDOFF) => return true,
ParkResult::Unparked(_) => (),
ParkResult::Invalid => (),
ParkResult::TimedOut => return false,
}
}
spinwait.reset();
state = self.state.load(Ordering::Relaxed);
}
}
#[cold]
#[inline(never)]
fn unlock_slow(&self, force_fair: bool) {
if self
.state
.compare_exchange(LOCKED_BIT, 0, Ordering::Release, Ordering::Relaxed)
.is_ok()
{
return;
}
unsafe {
let addr = self as *const _ as usize;
let callback = |result: UnparkResult| {
if result.unparked_threads != 0 && (force_fair || result.be_fair) {
if !result.have_more_threads {
self.state.store(LOCKED_BIT, Ordering::Relaxed);
}
return TOKEN_HANDOFF;
}
if result.have_more_threads {
self.state.store(PARKED_BIT, Ordering::Release);
} else {
self.state.store(0, Ordering::Release);
}
TOKEN_NORMAL
};
parking_lot_core::unpark_one(addr, callback);
}
}
#[cold]
#[inline(never)]
fn bump_slow(&self) {
unsafe { deadlock::release_resource(self as *const _ as usize) };
self.unlock_slow(true);
self.lock();
}
}