#[cfg(feature = "smp")]
use core::sync::atomic::{AtomicBool, Ordering};
use core::{
cell::UnsafeCell,
fmt,
marker::PhantomData,
ops::{Deref, DerefMut},
};
use kernel_guard::BaseGuard;
pub struct BaseSpinLock<G: BaseGuard, T: ?Sized> {
_phantom: PhantomData<G>,
#[cfg(feature = "smp")]
lock: AtomicBool,
data: UnsafeCell<T>,
}
pub struct BaseSpinLockGuard<'a, G: BaseGuard, T: ?Sized + 'a> {
_phantom: &'a PhantomData<G>,
irq_state: G::State,
data: *mut T,
#[cfg(feature = "smp")]
lock: &'a AtomicBool,
}
unsafe impl<G: BaseGuard, T: ?Sized + Send> Sync for BaseSpinLock<G, T> {}
unsafe impl<G: BaseGuard, T: ?Sized + Send> Send for BaseSpinLock<G, T> {}
impl<G: BaseGuard, T> BaseSpinLock<G, T> {
#[inline(always)]
pub const fn new(data: T) -> Self {
Self {
_phantom: PhantomData,
data: UnsafeCell::new(data),
#[cfg(feature = "smp")]
lock: AtomicBool::new(false),
}
}
#[inline(always)]
pub fn into_inner(self) -> T {
let BaseSpinLock { data, .. } = self;
data.into_inner()
}
}
impl<G: BaseGuard, T: ?Sized> BaseSpinLock<G, T> {
#[inline(always)]
pub fn lock(&self) -> BaseSpinLockGuard<'_, G, T> {
let irq_state = G::acquire();
#[cfg(feature = "smp")]
{
while self
.lock
.compare_exchange_weak(false, true, Ordering::Acquire, Ordering::Relaxed)
.is_err()
{
while self.is_locked() {
core::hint::spin_loop();
}
}
}
BaseSpinLockGuard {
_phantom: &PhantomData,
irq_state,
data: unsafe { &mut *self.data.get() },
#[cfg(feature = "smp")]
lock: &self.lock,
}
}
#[inline(always)]
pub fn is_locked(&self) -> bool {
cfg_if::cfg_if! {
if #[cfg(feature = "smp")] {
self.lock.load(Ordering::Relaxed)
} else {
false
}
}
}
#[inline(always)]
pub fn try_lock(&self) -> Option<BaseSpinLockGuard<'_, G, T>> {
let irq_state = G::acquire();
cfg_if::cfg_if! {
if #[cfg(feature = "smp")] {
let is_unlocked = self
.lock
.compare_exchange(false, true, Ordering::Acquire, Ordering::Relaxed)
.is_ok();
} else {
let is_unlocked = true;
}
}
if is_unlocked {
Some(BaseSpinLockGuard {
_phantom: &PhantomData,
irq_state,
data: unsafe { &mut *self.data.get() },
#[cfg(feature = "smp")]
lock: &self.lock,
})
} else {
G::release(irq_state);
None
}
}
#[inline(always)]
pub unsafe fn force_unlock(&self) {
#[cfg(feature = "smp")]
self.lock.store(false, Ordering::Release);
}
#[inline(always)]
pub fn get_mut(&mut self) -> &mut T {
unsafe { &mut *self.data.get() }
}
}
impl<G: BaseGuard, T: Default> Default for BaseSpinLock<G, T> {
#[inline(always)]
fn default() -> Self {
Self::new(Default::default())
}
}
impl<G: BaseGuard, T: ?Sized + fmt::Debug> fmt::Debug for BaseSpinLock<G, T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self.try_lock() {
Some(guard) => write!(f, "SpinLock {{ data: ")
.and_then(|()| (*guard).fmt(f))
.and_then(|()| write!(f, "}}")),
None => write!(f, "SpinLock {{ <locked> }}"),
}
}
}
impl<G: BaseGuard, T: ?Sized> Deref for BaseSpinLockGuard<'_, G, T> {
type Target = T;
#[inline(always)]
fn deref(&self) -> &T {
unsafe { &*self.data }
}
}
impl<G: BaseGuard, T: ?Sized> DerefMut for BaseSpinLockGuard<'_, G, T> {
#[inline(always)]
fn deref_mut(&mut self) -> &mut T {
unsafe { &mut *self.data }
}
}
impl<G: BaseGuard, T: ?Sized + fmt::Debug> fmt::Debug for BaseSpinLockGuard<'_, G, T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Debug::fmt(&**self, f)
}
}
impl<G: BaseGuard, T: ?Sized> Drop for BaseSpinLockGuard<'_, G, T> {
#[inline(always)]
fn drop(&mut self) {
#[cfg(feature = "smp")]
self.lock.store(false, Ordering::Release);
G::release(self.irq_state);
}
}
#[cfg(test)]
mod tests {
use std::{
sync::{
Arc,
atomic::{AtomicUsize, Ordering},
mpsc::channel,
},
thread,
};
use super::*;
struct TestGuardIrq;
static mut IRQ_CNT: u32 = 0;
impl BaseGuard for TestGuardIrq {
type State = u32;
fn acquire() -> Self::State {
unsafe {
IRQ_CNT += 1;
IRQ_CNT
}
}
fn release(_: Self::State) {
unsafe {
IRQ_CNT -= 1;
}
}
}
type TestSpinIrq<T> = BaseSpinLock<TestGuardIrq, T>;
type SpinMutex<T> = crate::SpinRaw<T>;
#[derive(Eq, PartialEq, Debug)]
struct NonCopy(i32);
#[test]
fn smoke() {
let m = SpinMutex::<_>::new(());
drop(m.lock());
drop(m.lock());
}
#[test]
#[cfg(feature = "smp")]
fn lots_and_lots() {
static M: SpinMutex<()> = SpinMutex::<_>::new(());
static mut CNT: u32 = 0;
const J: u32 = 1000;
const K: u32 = 3;
fn inc() {
for _ in 0..J {
unsafe {
let _g = M.lock();
CNT += 1;
}
}
}
let (tx, rx) = channel();
let mut ts = Vec::new();
for _ in 0..K {
let tx2 = tx.clone();
ts.push(thread::spawn(move || {
inc();
tx2.send(()).unwrap();
}));
let tx2 = tx.clone();
ts.push(thread::spawn(move || {
inc();
tx2.send(()).unwrap();
}));
}
drop(tx);
for _ in 0..2 * K {
rx.recv().unwrap();
}
assert_eq!(unsafe { CNT }, J * K * 2);
for t in ts {
t.join().unwrap();
}
}
#[test]
#[cfg(feature = "smp")]
fn try_lock() {
let mutex = SpinMutex::<_>::new(42);
let a = mutex.try_lock();
assert_eq!(a.as_ref().map(|r| **r), Some(42));
let b = mutex.try_lock();
assert!(b.is_none());
::core::mem::drop(a);
let c = mutex.try_lock();
assert_eq!(c.as_ref().map(|r| **r), Some(42));
}
#[test]
fn test_irq_lock_restored() {
let m = TestSpinIrq::new(());
let _a = m.lock();
assert_eq!(unsafe { IRQ_CNT }, 1);
::core::mem::drop(_a);
assert_eq!(unsafe { IRQ_CNT }, 0);
}
#[test]
#[cfg(feature = "smp")]
fn test_irq_try_lock_failed() {
let m = TestSpinIrq::new(());
let _a = m.lock();
assert_eq!(unsafe { IRQ_CNT }, 1);
let b = m.try_lock();
assert!(b.is_none());
assert_eq!(unsafe { IRQ_CNT }, 1);
drop(_a);
}
#[test]
fn test_into_inner() {
let m = SpinMutex::<_>::new(NonCopy(10));
assert_eq!(m.into_inner(), NonCopy(10));
}
#[test]
fn test_into_inner_drop() {
struct Foo(Arc<AtomicUsize>);
impl Drop for Foo {
fn drop(&mut self) {
self.0.fetch_add(1, Ordering::SeqCst);
}
}
let num_drops = Arc::new(AtomicUsize::new(0));
let m = SpinMutex::<_>::new(Foo(num_drops.clone()));
assert_eq!(num_drops.load(Ordering::SeqCst), 0);
{
let _inner = m.into_inner();
assert_eq!(num_drops.load(Ordering::SeqCst), 0);
}
assert_eq!(num_drops.load(Ordering::SeqCst), 1);
}
#[test]
fn test_mutex_arc_nested() {
let arc = Arc::new(SpinMutex::<_>::new(1));
let arc2 = Arc::new(SpinMutex::<_>::new(arc));
let (tx, rx) = channel();
let t = thread::spawn(move || {
let lock = arc2.lock();
let lock2 = lock.lock();
assert_eq!(*lock2, 1);
tx.send(()).unwrap();
});
rx.recv().unwrap();
t.join().unwrap();
}
#[test]
fn test_mutex_arc_access_in_unwind() {
let arc = Arc::new(SpinMutex::<_>::new(1));
let arc2 = arc.clone();
let _ = thread::spawn(move || {
struct Unwinder {
i: Arc<SpinMutex<i32>>,
}
impl Drop for Unwinder {
fn drop(&mut self) {
*self.i.lock() += 1;
}
}
let _u = Unwinder { i: arc2 };
panic!();
})
.join();
let lock = arc.lock();
assert_eq!(*lock, 2);
}
#[test]
fn test_mutex_unsized() {
let mutex: &SpinMutex<[i32]> = &SpinMutex::<_>::new([1, 2, 3]);
{
let b = &mut *mutex.lock();
b[0] = 4;
b[2] = 5;
}
let comp: &[i32] = &[4, 2, 5];
assert_eq!(&*mutex.lock(), comp);
}
#[test]
fn test_mutex_force_lock() {
let lock = SpinMutex::<_>::new(());
::std::mem::forget(lock.lock());
unsafe {
lock.force_unlock();
}
assert!(lock.try_lock().is_some());
}
}