#[cfg(doc)]
use crate::locked::Locked;
use {
crate::execution_unit::execution_unit_id,
opera::{PhantomNotSend, PhantomNotSync},
parking_lot::{
RawMutex,
lock_api::{RawMutex as RawMutexTrait, RawMutexFair, RawMutexTimed},
},
run_on_drop::on_drop,
static_assertions::assert_not_impl_any,
std::{
cell::Cell,
fmt::{Debug, Formatter},
mem::{self, ManuallyDrop},
ptr,
sync::{
Arc,
atomic::{AtomicUsize, Ordering::Relaxed},
},
time::{Duration, Instant},
},
};
#[cfg(test)]
mod tests;
#[derive(Clone, Default)]
pub struct Lock {
shared: Arc<Shared>,
}
struct Shared {
raw_mutex: RawMutex,
execution_unit_id: AtomicUsize,
tickets: Cell<u64>,
}
pub struct Guard<'a> {
lock: &'a Lock,
_phantom_not_send: PhantomNotSend,
_phantom_not_sync: PhantomNotSync,
}
unsafe impl Send for Lock {}
unsafe impl Sync for Lock {}
assert_not_impl_any!(Guard<'_>: Sync, Send);
impl Default for Shared {
fn default() -> Self {
Self {
raw_mutex: RawMutex::INIT,
execution_unit_id: AtomicUsize::new(0),
tickets: Cell::new(0),
}
}
}
macro_rules! maybe_lock_fast {
($slf:expr, $guard:ident, $ret:expr) => {
let shared = &*$slf.shared;
if shared.execution_unit_id.load(Relaxed) == execution_unit_id() {
let $guard = unsafe { $slf.add_ticket() };
return $ret;
}
};
}
impl Lock {
#[inline]
pub unsafe fn force_unlock(&self) {
unsafe {
self.force_unlock_::<false>();
}
}
#[inline]
pub unsafe fn force_unlock_fair(&self) {
unsafe {
self.force_unlock_::<true>();
}
}
#[inline]
unsafe fn force_unlock_<const FAIR: bool>(&self) {
let shared = &*self.shared;
let guards = shared.tickets.get();
debug_assert!(guards > 0);
shared.tickets.set(guards - 1);
if guards == 1 {
unsafe {
self.force_unlock_slow::<FAIR>();
}
}
}
#[cold]
#[inline]
unsafe fn force_unlock_slow<const FAIR: bool>(&self) {
debug_assert_eq!(
self.shared.execution_unit_id.load(Relaxed),
execution_unit_id(),
);
debug_assert_eq!(self.shared.tickets.get(), 0);
self.shared.execution_unit_id.store(0, Relaxed);
unsafe {
if FAIR {
self.shared.raw_mutex.unlock_fair();
} else {
self.shared.raw_mutex.unlock();
}
}
}
#[inline]
pub fn is_locked(&self) -> bool {
self.shared.raw_mutex.is_locked()
}
#[inline]
pub fn is_locked_by(&self, guard: &Guard<'_>) -> bool {
self == guard.lock
}
#[inline]
pub fn is_locked_by_current_thread(&self) -> bool {
let shared = &*self.shared;
shared.execution_unit_id.load(Relaxed) == execution_unit_id()
}
#[inline]
pub fn lock(&self) -> Guard<'_> {
maybe_lock_fast!(self, guard, guard);
self.lock_slow()
}
#[cold]
#[inline(always)]
fn lock_slow(&self) -> Guard<'_> {
self.shared.raw_mutex.lock();
unsafe { self.add_ticket_after_lock() }
}
#[inline]
unsafe fn add_ticket_after_lock(&self) -> Guard<'_> {
let shared = &*self.shared;
shared.execution_unit_id.store(execution_unit_id(), Relaxed);
unsafe { self.add_ticket() }
}
#[inline]
unsafe fn add_ticket(&self) -> Guard<'_> {
let shared = &*self.shared;
let guards = shared.tickets.get();
if guards == u64::MAX {
#[cold]
fn never() -> ! {
#[allow(clippy::empty_loop)]
loop {}
}
never();
}
shared.tickets.set(guards + 1);
unsafe { self.make_guard_unchecked_() }
}
#[inline]
pub unsafe fn make_guard_unchecked(&self) -> Guard<'_> {
unsafe { self.make_guard_unchecked_() }
}
#[inline]
unsafe fn make_guard_unchecked_(&self) -> Guard<'_> {
Guard {
lock: self,
_phantom_not_send: Default::default(),
_phantom_not_sync: Default::default(),
}
}
#[inline]
pub fn try_lock(&self) -> Option<Guard<'_>> {
maybe_lock_fast!(self, guard, Some(guard));
self.try_lock_slow()
}
#[cold]
#[inline]
fn try_lock_slow(&self) -> Option<Guard<'_>> {
self.shared.raw_mutex.try_lock().then(|| {
unsafe { self.add_ticket_after_lock() }
})
}
#[inline]
pub fn try_lock_for(&self, duration: Duration) -> Option<Guard<'_>> {
maybe_lock_fast!(self, guard, Some(guard));
self.try_lock_for_slow(duration)
}
#[cold]
#[inline]
fn try_lock_for_slow(&self, duration: Duration) -> Option<Guard<'_>> {
self.shared.raw_mutex.try_lock_for(duration).then(|| {
unsafe { self.add_ticket_after_lock() }
})
}
#[inline]
pub fn try_lock_until(&self, instant: Instant) -> Option<Guard<'_>> {
maybe_lock_fast!(self, guard, Some(guard));
self.try_lock_until_slow(instant)
}
#[cold]
#[inline]
fn try_lock_until_slow(&self, instant: Instant) -> Option<Guard<'_>> {
self.shared.raw_mutex.try_lock_until(instant).then(|| {
unsafe { self.add_ticket_after_lock() }
})
}
#[inline]
pub(crate) fn addr(&self) -> *const u8 {
let addr: *const Shared = &*self.shared;
addr.cast()
}
}
impl Debug for Lock {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
f.debug_struct("Lock")
.field("id", &self.addr())
.finish_non_exhaustive()
}
}
impl PartialEq for Lock {
#[inline]
fn eq(&self, other: &Self) -> bool {
ptr::eq::<Shared>(&*self.shared, &*other.shared)
}
}
impl Eq for Lock {}
impl Guard<'_> {
#[inline]
pub fn unlock_fair(self) {
let slf = ManuallyDrop::new(self);
unsafe {
slf.lock.force_unlock_fair();
}
}
#[inline]
pub fn unlocked<T>(&mut self, f: impl FnOnce() -> T) -> T {
self.unlocked_::<_, false>(f)
}
#[inline]
pub fn unlocked_fair<T>(&mut self, f: impl FnOnce() -> T) -> T {
self.unlocked_::<_, true>(f)
}
#[inline]
fn unlocked_<T, const FAIR: bool>(&mut self, f: impl FnOnce() -> T) -> T {
unsafe {
self.lock.force_unlock_::<FAIR>();
}
let _lock = on_drop(|| {
let guard = self.lock.lock();
mem::forget(guard);
});
f()
}
}
impl Drop for Guard<'_> {
#[inline]
fn drop(&mut self) {
unsafe {
self.lock.force_unlock_::<false>();
}
}
}
impl Debug for Guard<'_> {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
f.debug_struct("Guard")
.field("lock_id", &self.lock.addr())
.finish_non_exhaustive()
}
}