use core::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
use crate::backoff::BackOff;
pub struct RawSpin {
lock: AtomicBool,
}
impl RawSpin {
#[must_use]
pub const fn new() -> Self {
Self {
lock: AtomicBool::new(false),
}
}
#[must_use]
pub fn is_locked(&self) -> bool {
self.lock.load(Ordering::Relaxed)
}
#[must_use]
pub fn try_lock(&self) -> bool {
!self.lock.fetch_or(true, Ordering::Acquire)
}
pub fn lock(&self) {
let mut backoff = BackOff::new();
while self.lock.fetch_or(true, Ordering::Acquire) {
while self.is_locked() {
backoff.wait();
}
}
}
pub fn unlock(&self) {
self.lock.store(false, Ordering::Release);
}
}
unsafe impl lock_api::RawMutex for RawSpin {
type GuardMarker = lock_api::GuardSend;
const INIT: Self = Self::new();
#[inline]
fn is_locked(&self) -> bool {
self.is_locked()
}
#[inline]
fn try_lock(&self) -> bool {
self.try_lock()
}
#[inline]
fn lock(&self) {
self.lock()
}
#[inline]
unsafe fn unlock(&self) {
self.unlock()
}
}
pub struct RawRwSpin {
lock: AtomicUsize,
}
const SHARED_LOCK_THRESHOLD: usize = usize::MAX / 32 + 1;
const EXCLUSIVE_PENDING: usize = usize::MAX / 4 + 1;
const EXCLUSIVE_LOCKED: usize = usize::MAX / 2 + 1;
impl RawRwSpin {
#[must_use]
pub const fn new() -> Self {
RawRwSpin {
lock: AtomicUsize::new(0),
}
}
#[must_use]
pub fn is_locked_exclusive(&self) -> bool {
self.lock.load(Ordering::Relaxed) >= EXCLUSIVE_LOCKED
}
#[must_use]
pub fn is_locked(&self) -> bool {
self.lock.load(Ordering::Relaxed) > 0
}
#[must_use]
pub fn try_lock_shared(&self) -> bool {
let count = self.lock.fetch_add(1, Ordering::Acquire);
#[cfg(debug_assertions)]
locks_count_check(count, || {
self.lock.fetch_sub(1, Ordering::Relaxed);
});
if count < EXCLUSIVE_PENDING {
true
} else {
self.lock.fetch_sub(1, Ordering::Relaxed);
false
}
}
#[must_use]
pub fn lock_shared_while(&self, cond: impl FnMut() -> bool) -> bool {
let mut count = self.lock.fetch_add(1, Ordering::Acquire);
let mut backoff = BackOff::new();
let mut cond = cond;
#[cfg(debug_assertions)]
locks_count_check(count, || {
self.lock.fetch_sub(1, Ordering::Relaxed);
});
if count < EXCLUSIVE_PENDING {
return true;
}
if count < EXCLUSIVE_LOCKED {
count = self.lock.fetch_sub(1, Ordering::Relaxed);
loop {
#[cfg(debug_assertions)]
locks_count_check(count, || {});
if !cond() {
return false;
}
if count >= EXCLUSIVE_LOCKED || count < EXCLUSIVE_PENDING {
count = self.lock.fetch_add(1, Ordering::Acquire);
break;
}
backoff.wait();
count = self.lock.load(Ordering::Relaxed);
}
}
loop {
#[cfg(debug_assertions)]
locks_count_check(count, || {
self.lock.fetch_sub(1, Ordering::Relaxed);
});
if count < EXCLUSIVE_LOCKED {
return true;
}
if !cond() {
self.lock.fetch_sub(1, Ordering::Relaxed);
return false;
}
backoff.wait();
count = self.lock.load(Ordering::Acquire);
}
}
pub fn lock_shared(&self) {
let acquired = self.lock_shared_while(|| true);
debug_assert!(acquired);
}
pub fn unlock_shared(&self) {
self.lock.fetch_sub(1, Ordering::Release);
}
#[must_use]
pub fn try_lock_exclusive(&self) -> bool {
self.lock
.compare_exchange(0, EXCLUSIVE_LOCKED, Ordering::Acquire, Ordering::Relaxed)
.is_ok()
}
#[must_use]
pub fn lock_exclusive_while(&self, cond: impl FnMut() -> bool) -> bool {
let mut cond = cond;
let mut backoff = BackOff::new();
let mut in_queue = false;
let mut r = self.lock.compare_exchange_weak(
0,
EXCLUSIVE_LOCKED,
Ordering::Acquire,
Ordering::Relaxed,
);
loop {
match r {
Ok(_) => {
return true;
}
Err(mut count) => {
if !cond() {
if in_queue {
self.lock.fetch_sub(EXCLUSIVE_PENDING, Ordering::Relaxed);
}
return false;
}
if !in_queue && count > 0 && count < EXCLUSIVE_PENDING {
let r = self.lock.compare_exchange_weak(
count,
count + EXCLUSIVE_PENDING,
Ordering::Relaxed,
Ordering::Relaxed,
);
match r {
Ok(_) => {
in_queue = true;
count += EXCLUSIVE_PENDING
}
Err(c) => count = c,
}
}
let expected = if in_queue { EXCLUSIVE_PENDING } else { 0 };
if count != expected {
backoff.wait();
}
r = self.lock.compare_exchange_weak(
expected,
EXCLUSIVE_LOCKED,
Ordering::Acquire,
Ordering::Relaxed,
);
}
}
}
}
pub fn lock_exclusive(&self) {
let acquired = self.lock_exclusive_while(|| true);
debug_assert!(acquired);
}
pub fn unlock_exclusive(&self) {
self.lock.fetch_sub(EXCLUSIVE_LOCKED, Ordering::Release);
}
}
unsafe impl lock_api::RawRwLock for RawRwSpin {
type GuardMarker = lock_api::GuardSend;
const INIT: Self = Self::new();
#[inline]
fn lock_shared(&self) {
self.lock_shared()
}
fn try_lock_shared(&self) -> bool {
self.try_lock_shared()
}
#[inline]
unsafe fn unlock_shared(&self) {
self.unlock_shared()
}
#[inline]
fn lock_exclusive(&self) {
self.lock_exclusive()
}
fn try_lock_exclusive(&self) -> bool {
self.try_lock_exclusive()
}
#[inline]
unsafe fn unlock_exclusive(&self) {
self.unlock_exclusive()
}
#[inline]
fn is_locked(&self) -> bool {
self.is_locked()
}
#[inline]
fn is_locked_exclusive(&self) -> bool {
self.is_locked_exclusive()
}
}
pub type Spin<T> = lock_api::Mutex<RawSpin, T>;
pub type RwSpin<T> = lock_api::RwLock<RawRwSpin, T>;
#[cfg(debug_assertions)]
#[inline]
fn locks_count_check(count: usize, failure: impl FnOnce()) {
if count < EXCLUSIVE_PENDING {
if count > SHARED_LOCK_THRESHOLD {
failure();
too_many_shared_locks(count);
}
} else if count < EXCLUSIVE_LOCKED {
if count > EXCLUSIVE_PENDING + SHARED_LOCK_THRESHOLD {
failure();
too_many_shared_locks(count - EXCLUSIVE_LOCKED);
}
} else {
if count > EXCLUSIVE_LOCKED + SHARED_LOCK_THRESHOLD {
failure();
too_many_shared_locks(count - EXCLUSIVE_LOCKED);
}
}
}
#[cfg(debug_assertions)]
#[cold]
#[inline(never)]
fn too_many_shared_locks(count: usize) -> ! {
panic!(
"There can only be {SHARED_LOCK_THRESHOLD} shared locks at a time. Current count: {count}. This can only happen if the lock is not released properly.",
);
}