use crate::{
AtomicBool, AtomicOrdering, Debug, Deref, DerefMut, FmtResult, Formatter, UnsafeCell,
any_type_name, spin_loop,
};
#[cfg(feature = "std")]
use crate::{Thread, ThreadExt};
#[doc = crate::_tags!(concurrency)]
#[doc = crate::_doc_location!("work/sync")]
#[derive(Default)]
pub struct SpinLock<T, const SPIN: usize = 5, const YIELD: usize = 10, const SLEEP: u64 = 100> {
value: UnsafeCell<T>,
lock: AtomicBool,
}
unsafe impl<T> Send for SpinLock<T> where T: Send {}
unsafe impl<T> Sync for SpinLock<T> where T: Send {}
impl<T, const SPIN: usize, const YIELD: usize, const SLEEP: u64> Debug
for SpinLock<T, SPIN, YIELD, SLEEP>
{
fn fmt(&self, f: &mut Formatter<'_>) -> FmtResult<()> {
let locked = self.lock.load(AtomicOrdering::Acquire);
f.debug_struct("SpinLock")
.field("type", &any_type_name::<T>())
.field("locked", &locked)
.finish()
}
}
#[rustfmt::skip]
impl<T, const SPIN: usize, const YIELD: usize, const SLEEP: u64> SpinLock<T, SPIN, YIELD, SLEEP> {
pub const fn new(value: T) -> Self {
SpinLock { value: UnsafeCell::new(value), lock: AtomicBool::new(false) }
}
pub fn lock(&self) -> SpinLockGuard<'_, T, SPIN, YIELD, SLEEP> {
#[cfg(feature = "std")]
let mut spin = 0usize;
while self.lock.compare_exchange_weak(false, true,
AtomicOrdering::Acquire, AtomicOrdering::Acquire).is_err() {
#[cfg(feature = "std")]
{
if spin < SPIN { spin_loop(); }
else if spin < YIELD { Thread::yield_now(); }
else if SLEEP > 0 { Thread::sleep_ns(SLEEP); }
spin += 1;
}
#[cfg(not(feature = "std"))]
{ spin_loop(); }
}
SpinLockGuard(self)
}
pub fn try_lock(&self) -> Option<SpinLockGuard<'_, T, SPIN, YIELD, SLEEP>> {
self.lock.compare_exchange(false, true, AtomicOrdering::Acquire, AtomicOrdering::Acquire)
.is_ok().then(|| SpinLockGuard(self))
}
pub fn is_locked(&self) -> bool { self.lock.load(AtomicOrdering::Acquire) }
pub fn into_inner(self) -> T { self.value.into_inner() }
pub fn try_into_inner(&self) -> Option<T> {
(!self.is_locked()).then(|| unsafe { self.value.get().read() })
}
#[cfg(debug_assertions)]
#[cfg_attr(nightly_doc, doc(cfg(debug_assertions)))]
pub unsafe fn debug_force_unlock(&self) { self.lock.store(false, AtomicOrdering::SeqCst); }
}
pub struct SpinLockGuard<'a, T, const SPIN: usize, const YIELD: usize, const SLEEP: u64>(
&'a SpinLock<T, SPIN, YIELD, SLEEP>,
);
impl<T, const SPIN: usize, const YIELD: usize, const SLEEP: u64> Debug
for SpinLockGuard<'_, T, SPIN, YIELD, SLEEP>
{
fn fmt(&self, f: &mut Formatter<'_>) -> FmtResult<()> {
f.debug_tuple("SpinLockGuard").field(&any_type_name::<T>()).finish()
}
}
impl<T, const SPIN: usize, const YIELD: usize, const SLEEP: u64> Drop
for SpinLockGuard<'_, T, SPIN, YIELD, SLEEP>
{
fn drop(&mut self) {
self.0.lock.store(false, AtomicOrdering::Release);
}
}
impl<T, const SPIN: usize, const YIELD: usize, const SLEEP: u64> Deref
for SpinLockGuard<'_, T, SPIN, YIELD, SLEEP>
{
type Target = T;
fn deref(&self) -> &Self::Target {
unsafe { &*self.0.value.get() }
}
}
impl<T, const SPIN: usize, const YIELD: usize, const SLEEP: u64> DerefMut
for SpinLockGuard<'_, T, SPIN, YIELD, SLEEP>
{
fn deref_mut(&mut self) -> &mut Self::Target {
unsafe { &mut *self.0.value.get() }
}
}