simink_spinlock 0.1.1

simink 自旋锁
Documentation
#![no_std]
#![feature(negative_impls)]

use core::{
    cell::UnsafeCell,
    fmt,
    ops::{Deref, DerefMut},
    sync::atomic::AtomicBool,
};

use processor::cpu_relax;

pub struct SpinLock<T: ?Sized> {
    inner: AtomicBool,
    data: UnsafeCell<T>,
}

unsafe impl<T: ?Sized + Send> Send for SpinLock<T> {}
unsafe impl<T: ?Sized + Send> Sync for SpinLock<T> {}

pub struct SpinLockGuard<'a, T: ?Sized + 'a> {
    lock: &'a SpinLock<T>,
}

impl<T: ?Sized> !Send for SpinLockGuard<'_, T> {}
unsafe impl<T: ?Sized + Sync> Sync for SpinLockGuard<'_, T> {}

impl<T> SpinLock<T> {
    pub const fn new(t: T) -> SpinLock<T> {
        Self {
            inner: AtomicBool::new(false),
            data: UnsafeCell::new(t),
        }
    }
}

impl<T: ?Sized> SpinLock<T> {
    #[inline]
    fn try_lock_weak(&self) -> Option<SpinLockGuard<T>> {
        if self
            .inner
            .compare_exchange_weak(
                false,
                true,
                core::sync::atomic::Ordering::Acquire,
                core::sync::atomic::Ordering::Relaxed,
            )
            .is_ok()
        {
            Some(SpinLockGuard { lock: self })
        } else {
            None
        }
    }

    pub fn lock(&self) -> SpinLockGuard<'_, T> {
        loop {
            if let Some(guard) = self.try_lock_weak() {
                break guard;
            }

            while self.is_locked() {
                cpu_relax();
            }
        }
    }

    pub fn try_lock(&self) -> Option<SpinLockGuard<T>> {
        if self
            .inner
            .compare_exchange(
                false,
                true,
                core::sync::atomic::Ordering::Acquire,
                core::sync::atomic::Ordering::Relaxed,
            )
            .is_ok()
        {
            Some(SpinLockGuard { lock: self })
        } else {
            None
        }
    }

    #[inline]
    pub fn is_locked(&self) -> bool {
        self.inner.load(core::sync::atomic::Ordering::Relaxed)
    }

    #[inline]
    pub fn unlock(guard: SpinLockGuard<'_, T>) {
        drop(guard)
    }

    #[inline]
    pub fn into_inner(self) -> T
    where
        T: Sized,
    {
        self.data.into_inner()
    }

    #[inline]
    pub fn get_mut(&mut self) -> &mut T {
        self.data.get_mut()
    }
}

impl<T> From<T> for SpinLock<T> {
    fn from(value: T) -> Self {
        SpinLock::new(value)
    }
}

impl<T: ?Sized + Default> Default for SpinLock<T> {
    fn default() -> Self {
        SpinLock::new(Default::default())
    }
}

impl<T: ?Sized + fmt::Debug> fmt::Debug for SpinLock<T> {
    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
        let mut d = f.debug_struct("SpinLock");
        if let Some(guard) = self.try_lock() {
            d.field("data", &&*guard);
        }
        d.finish_non_exhaustive()
    }
}

impl<T: ?Sized> Deref for SpinLockGuard<'_, T> {
    type Target = T;

    fn deref(&self) -> &T {
        unsafe { &*self.lock.data.get() }
    }
}

impl<T: ?Sized> DerefMut for SpinLockGuard<'_, T> {
    fn deref_mut(&mut self) -> &mut T {
        unsafe { &mut *self.lock.data.get() }
    }
}

impl<T: ?Sized> Drop for SpinLockGuard<'_, T> {
    fn drop(&mut self) {
        self.lock
            .inner
            .store(false, core::sync::atomic::Ordering::Release);
    }
}

impl<T: ?Sized + fmt::Debug> fmt::Debug for SpinLockGuard<'_, T> {
    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
        fmt::Debug::fmt(&**self, f)
    }
}

impl<T: ?Sized + fmt::Display> fmt::Display for SpinLockGuard<'_, T> {
    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
        (**self).fmt(f)
    }
}