spin/mutex/
spin.rs

1//! A naïve spinning mutex.
2//!
3//! Waiting threads hammer an atomic variable until it becomes available. Best-case latency is low, but worst-case
4//! latency is theoretically infinite.
5
6use crate::{
7    atomic::{AtomicBool, Ordering},
8    RelaxStrategy, Spin,
9};
10use core::{
11    cell::UnsafeCell,
12    fmt,
13    marker::PhantomData,
14    mem::ManuallyDrop,
15    ops::{Deref, DerefMut},
16};
17
18/// A [spin lock](https://en.m.wikipedia.org/wiki/Spinlock) providing mutually exclusive access to data.
19///
20/// # Example
21///
22/// ```
23/// use spin;
24///
25/// let lock = spin::mutex::SpinMutex::<_>::new(0);
26///
27/// // Modify the data
28/// *lock.lock() = 2;
29///
30/// // Read the data
31/// let answer = *lock.lock();
32/// assert_eq!(answer, 2);
33/// ```
34///
35/// # Thread safety example
36///
37/// ```
38/// use spin;
39/// use std::sync::{Arc, Barrier};
40///
41/// let thread_count = 1000;
42/// let spin_mutex = Arc::new(spin::mutex::SpinMutex::<_>::new(0));
43///
44/// // We use a barrier to ensure the readout happens after all writing
45/// let barrier = Arc::new(Barrier::new(thread_count + 1));
46///
47/// # let mut ts = Vec::new();
48/// for _ in (0..thread_count) {
49///     let my_barrier = barrier.clone();
50///     let my_lock = spin_mutex.clone();
51/// # let t =
52///     std::thread::spawn(move || {
53///         let mut guard = my_lock.lock();
54///         *guard += 1;
55///
56///         // Release the lock to prevent a deadlock
57///         drop(guard);
58///         my_barrier.wait();
59///     });
60/// # ts.push(t);
61/// }
62///
63/// barrier.wait();
64///
65/// let answer = { *spin_mutex.lock() };
66/// assert_eq!(answer, thread_count);
67///
68/// # for t in ts {
69/// #     t.join().unwrap();
70/// # }
71/// ```
72pub struct SpinMutex<T: ?Sized, R = Spin> {
73    phantom: PhantomData<R>,
74    pub(crate) lock: AtomicBool,
75    data: UnsafeCell<T>,
76}
77
78/// A guard that provides mutable data access.
79///
80/// When the guard falls out of scope it will release the lock.
81pub struct SpinMutexGuard<'a, T: ?Sized + 'a> {
82    lock: &'a AtomicBool,
83    data: *mut T,
84}
85
86// Same unsafe impls as `std::sync::Mutex`
87unsafe impl<T: ?Sized + Send, R> Sync for SpinMutex<T, R> {}
88unsafe impl<T: ?Sized + Send, R> Send for SpinMutex<T, R> {}
89
90unsafe impl<T: ?Sized + Sync> Sync for SpinMutexGuard<'_, T> {}
91unsafe impl<T: ?Sized + Send> Send for SpinMutexGuard<'_, T> {}
92
93impl<T, R> SpinMutex<T, R> {
94    /// Creates a new [`SpinMutex`] wrapping the supplied data.
95    ///
96    /// # Example
97    ///
98    /// ```
99    /// use spin::mutex::SpinMutex;
100    ///
101    /// static MUTEX: SpinMutex<()> = SpinMutex::<_>::new(());
102    ///
103    /// fn demo() {
104    ///     let lock = MUTEX.lock();
105    ///     // do something with lock
106    ///     drop(lock);
107    /// }
108    /// ```
109    #[inline(always)]
110    pub const fn new(data: T) -> Self {
111        SpinMutex {
112            lock: AtomicBool::new(false),
113            data: UnsafeCell::new(data),
114            phantom: PhantomData,
115        }
116    }
117
118    /// Consumes this [`SpinMutex`] and unwraps the underlying data.
119    ///
120    /// # Example
121    ///
122    /// ```
123    /// let lock = spin::mutex::SpinMutex::<_>::new(42);
124    /// assert_eq!(42, lock.into_inner());
125    /// ```
126    #[inline(always)]
127    pub fn into_inner(self) -> T {
128        // We know statically that there are no outstanding references to
129        // `self` so there's no need to lock.
130        let SpinMutex { data, .. } = self;
131        data.into_inner()
132    }
133
134    /// Returns a mutable pointer to the underlying data.
135    ///
136    /// This is mostly meant to be used for applications which require manual unlocking, but where
137    /// storing both the lock and the pointer to the inner data gets inefficient.
138    ///
139    /// # Example
140    /// ```
141    /// let lock = spin::mutex::SpinMutex::<_>::new(42);
142    ///
143    /// unsafe {
144    ///     core::mem::forget(lock.lock());
145    ///
146    ///     assert_eq!(lock.as_mut_ptr().read(), 42);
147    ///     lock.as_mut_ptr().write(58);
148    ///
149    ///     lock.force_unlock();
150    /// }
151    ///
152    /// assert_eq!(*lock.lock(), 58);
153    ///
154    /// ```
155    #[inline(always)]
156    pub fn as_mut_ptr(&self) -> *mut T {
157        self.data.get()
158    }
159}
160
161impl<T: ?Sized, R: RelaxStrategy> SpinMutex<T, R> {
162    /// Locks the [`SpinMutex`] and returns a guard that permits access to the inner data.
163    ///
164    /// The returned value may be dereferenced for data access
165    /// and the lock will be dropped when the guard falls out of scope.
166    ///
167    /// ```
168    /// let lock = spin::mutex::SpinMutex::<_>::new(0);
169    /// {
170    ///     let mut data = lock.lock();
171    ///     // The lock is now locked and the data can be accessed
172    ///     *data += 1;
173    ///     // The lock is implicitly dropped at the end of the scope
174    /// }
175    /// ```
176    #[inline(always)]
177    pub fn lock(&self) -> SpinMutexGuard<T> {
178        // Can fail to lock even if the spinlock is not locked. May be more efficient than `try_lock`
179        // when called in a loop.
180        loop {
181            if let Some(guard) = self.try_lock_weak() {
182                break guard;
183            }
184
185            while self.is_locked() {
186                R::relax();
187            }
188        }
189    }
190}
191
192impl<T: ?Sized, R> SpinMutex<T, R> {
193    /// Returns `true` if the lock is currently held.
194    ///
195    /// # Safety
196    ///
197    /// This function provides no synchronization guarantees and so its result should be considered 'out of date'
198    /// the instant it is called. Do not use it for synchronization purposes. However, it may be useful as a heuristic.
199    #[inline(always)]
200    pub fn is_locked(&self) -> bool {
201        self.lock.load(Ordering::Relaxed)
202    }
203
204    /// Force unlock this [`SpinMutex`].
205    ///
206    /// # Safety
207    ///
208    /// This is *extremely* unsafe if the lock is not held by the current
209    /// thread. However, this can be useful in some instances for exposing the
210    /// lock to FFI that doesn't know how to deal with RAII.
211    #[inline(always)]
212    pub unsafe fn force_unlock(&self) {
213        self.lock.store(false, Ordering::Release);
214    }
215
216    /// Try to lock this [`SpinMutex`], returning a lock guard if successful.
217    ///
218    /// # Example
219    ///
220    /// ```
221    /// let lock = spin::mutex::SpinMutex::<_>::new(42);
222    ///
223    /// let maybe_guard = lock.try_lock();
224    /// assert!(maybe_guard.is_some());
225    ///
226    /// // `maybe_guard` is still held, so the second call fails
227    /// let maybe_guard2 = lock.try_lock();
228    /// assert!(maybe_guard2.is_none());
229    /// ```
230    #[inline(always)]
231    pub fn try_lock(&self) -> Option<SpinMutexGuard<T>> {
232        // The reason for using a strong compare_exchange is explained here:
233        // https://github.com/Amanieu/parking_lot/pull/207#issuecomment-575869107
234        if self
235            .lock
236            .compare_exchange(false, true, Ordering::Acquire, Ordering::Relaxed)
237            .is_ok()
238        {
239            Some(SpinMutexGuard {
240                lock: &self.lock,
241                data: unsafe { &mut *self.data.get() },
242            })
243        } else {
244            None
245        }
246    }
247
248    /// Try to lock this [`SpinMutex`], returning a lock guard if succesful.
249    ///
250    /// Unlike [`SpinMutex::try_lock`], this function is allowed to spuriously fail even when the mutex is unlocked,
251    /// which can result in more efficient code on some platforms.
252    #[inline(always)]
253    pub fn try_lock_weak(&self) -> Option<SpinMutexGuard<T>> {
254        if self
255            .lock
256            .compare_exchange_weak(false, true, Ordering::Acquire, Ordering::Relaxed)
257            .is_ok()
258        {
259            Some(SpinMutexGuard {
260                lock: &self.lock,
261                data: unsafe { &mut *self.data.get() },
262            })
263        } else {
264            None
265        }
266    }
267
268    /// Returns a mutable reference to the underlying data.
269    ///
270    /// Since this call borrows the [`SpinMutex`] mutably, and a mutable reference is guaranteed to be exclusive in
271    /// Rust, no actual locking needs to take place -- the mutable borrow statically guarantees no locks exist. As
272    /// such, this is a 'zero-cost' operation.
273    ///
274    /// # Example
275    ///
276    /// ```
277    /// let mut lock = spin::mutex::SpinMutex::<_>::new(0);
278    /// *lock.get_mut() = 10;
279    /// assert_eq!(*lock.lock(), 10);
280    /// ```
281    #[inline(always)]
282    pub fn get_mut(&mut self) -> &mut T {
283        // We know statically that there are no other references to `self`, so
284        // there's no need to lock the inner mutex.
285        unsafe { &mut *self.data.get() }
286    }
287}
288
289impl<T: ?Sized + fmt::Debug, R> fmt::Debug for SpinMutex<T, R> {
290    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
291        match self.try_lock() {
292            Some(guard) => write!(f, "Mutex {{ data: ")
293                .and_then(|()| (&*guard).fmt(f))
294                .and_then(|()| write!(f, " }}")),
295            None => write!(f, "Mutex {{ <locked> }}"),
296        }
297    }
298}
299
300impl<T: ?Sized + Default, R> Default for SpinMutex<T, R> {
301    fn default() -> Self {
302        Self::new(Default::default())
303    }
304}
305
306impl<T, R> From<T> for SpinMutex<T, R> {
307    fn from(data: T) -> Self {
308        Self::new(data)
309    }
310}
311
312impl<'a, T: ?Sized> SpinMutexGuard<'a, T> {
313    /// Leak the lock guard, yielding a mutable reference to the underlying data.
314    ///
315    /// Note that this function will permanently lock the original [`SpinMutex`].
316    ///
317    /// ```
318    /// let mylock = spin::mutex::SpinMutex::<_>::new(0);
319    ///
320    /// let data: &mut i32 = spin::mutex::SpinMutexGuard::leak(mylock.lock());
321    ///
322    /// *data = 1;
323    /// assert_eq!(*data, 1);
324    /// ```
325    #[inline(always)]
326    pub fn leak(this: Self) -> &'a mut T {
327        // Use ManuallyDrop to avoid stacked-borrow invalidation
328        let mut this = ManuallyDrop::new(this);
329        // We know statically that only we are referencing data
330        unsafe { &mut *this.data }
331    }
332}
333
334impl<'a, T: ?Sized + fmt::Debug> fmt::Debug for SpinMutexGuard<'a, T> {
335    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
336        fmt::Debug::fmt(&**self, f)
337    }
338}
339
340impl<'a, T: ?Sized + fmt::Display> fmt::Display for SpinMutexGuard<'a, T> {
341    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
342        fmt::Display::fmt(&**self, f)
343    }
344}
345
346impl<'a, T: ?Sized> Deref for SpinMutexGuard<'a, T> {
347    type Target = T;
348    fn deref(&self) -> &T {
349        // We know statically that only we are referencing data
350        unsafe { &*self.data }
351    }
352}
353
354impl<'a, T: ?Sized> DerefMut for SpinMutexGuard<'a, T> {
355    fn deref_mut(&mut self) -> &mut T {
356        // We know statically that only we are referencing data
357        unsafe { &mut *self.data }
358    }
359}
360
361impl<'a, T: ?Sized> Drop for SpinMutexGuard<'a, T> {
362    /// The dropping of the MutexGuard will release the lock it was created from.
363    fn drop(&mut self) {
364        self.lock.store(false, Ordering::Release);
365    }
366}
367
368#[cfg(feature = "lock_api")]
369unsafe impl<R: RelaxStrategy> lock_api_crate::RawMutex for SpinMutex<(), R> {
370    type GuardMarker = lock_api_crate::GuardSend;
371
372    const INIT: Self = Self::new(());
373
374    fn lock(&self) {
375        // Prevent guard destructor running
376        core::mem::forget(Self::lock(self));
377    }
378
379    fn try_lock(&self) -> bool {
380        // Prevent guard destructor running
381        Self::try_lock(self).map(core::mem::forget).is_some()
382    }
383
384    unsafe fn unlock(&self) {
385        self.force_unlock();
386    }
387
388    fn is_locked(&self) -> bool {
389        Self::is_locked(self)
390    }
391}
392
393#[cfg(test)]
394mod tests {
395    use std::prelude::v1::*;
396
397    use std::sync::atomic::{AtomicUsize, Ordering};
398    use std::sync::mpsc::channel;
399    use std::sync::Arc;
400    use std::thread;
401
402    type SpinMutex<T> = super::SpinMutex<T>;
403
404    #[derive(Eq, PartialEq, Debug)]
405    struct NonCopy(i32);
406
407    #[test]
408    fn smoke() {
409        let m = SpinMutex::<_>::new(());
410        drop(m.lock());
411        drop(m.lock());
412    }
413
414    #[test]
415    fn lots_and_lots() {
416        static M: SpinMutex<()> = SpinMutex::<_>::new(());
417        static mut CNT: u32 = 0;
418        const J: u32 = 1000;
419        const K: u32 = 3;
420
421        fn inc() {
422            for _ in 0..J {
423                unsafe {
424                    let _g = M.lock();
425                    CNT += 1;
426                }
427            }
428        }
429
430        let (tx, rx) = channel();
431        let mut ts = Vec::new();
432        for _ in 0..K {
433            let tx2 = tx.clone();
434            ts.push(thread::spawn(move || {
435                inc();
436                tx2.send(()).unwrap();
437            }));
438            let tx2 = tx.clone();
439            ts.push(thread::spawn(move || {
440                inc();
441                tx2.send(()).unwrap();
442            }));
443        }
444
445        drop(tx);
446        for _ in 0..2 * K {
447            rx.recv().unwrap();
448        }
449        assert_eq!(unsafe { CNT }, J * K * 2);
450
451        for t in ts {
452            t.join().unwrap();
453        }
454    }
455
456    #[test]
457    fn try_lock() {
458        let mutex = SpinMutex::<_>::new(42);
459
460        // First lock succeeds
461        let a = mutex.try_lock();
462        assert_eq!(a.as_ref().map(|r| **r), Some(42));
463
464        // Additional lock fails
465        let b = mutex.try_lock();
466        assert!(b.is_none());
467
468        // After dropping lock, it succeeds again
469        ::core::mem::drop(a);
470        let c = mutex.try_lock();
471        assert_eq!(c.as_ref().map(|r| **r), Some(42));
472    }
473
474    #[test]
475    fn test_into_inner() {
476        let m = SpinMutex::<_>::new(NonCopy(10));
477        assert_eq!(m.into_inner(), NonCopy(10));
478    }
479
480    #[test]
481    fn test_into_inner_drop() {
482        struct Foo(Arc<AtomicUsize>);
483        impl Drop for Foo {
484            fn drop(&mut self) {
485                self.0.fetch_add(1, Ordering::SeqCst);
486            }
487        }
488        let num_drops = Arc::new(AtomicUsize::new(0));
489        let m = SpinMutex::<_>::new(Foo(num_drops.clone()));
490        assert_eq!(num_drops.load(Ordering::SeqCst), 0);
491        {
492            let _inner = m.into_inner();
493            assert_eq!(num_drops.load(Ordering::SeqCst), 0);
494        }
495        assert_eq!(num_drops.load(Ordering::SeqCst), 1);
496    }
497
498    #[test]
499    fn test_mutex_arc_nested() {
500        // Tests nested mutexes and access
501        // to underlying data.
502        let arc = Arc::new(SpinMutex::<_>::new(1));
503        let arc2 = Arc::new(SpinMutex::<_>::new(arc));
504        let (tx, rx) = channel();
505        let t = thread::spawn(move || {
506            let lock = arc2.lock();
507            let lock2 = lock.lock();
508            assert_eq!(*lock2, 1);
509            tx.send(()).unwrap();
510        });
511        rx.recv().unwrap();
512        t.join().unwrap();
513    }
514
515    #[test]
516    fn test_mutex_arc_access_in_unwind() {
517        let arc = Arc::new(SpinMutex::<_>::new(1));
518        let arc2 = arc.clone();
519        let _ = thread::spawn(move || -> () {
520            struct Unwinder {
521                i: Arc<SpinMutex<i32>>,
522            }
523            impl Drop for Unwinder {
524                fn drop(&mut self) {
525                    *self.i.lock() += 1;
526                }
527            }
528            let _u = Unwinder { i: arc2 };
529            panic!();
530        })
531        .join();
532        let lock = arc.lock();
533        assert_eq!(*lock, 2);
534    }
535
536    #[test]
537    fn test_mutex_unsized() {
538        let mutex: &SpinMutex<[i32]> = &SpinMutex::<_>::new([1, 2, 3]);
539        {
540            let b = &mut *mutex.lock();
541            b[0] = 4;
542            b[2] = 5;
543        }
544        let comp: &[i32] = &[4, 2, 5];
545        assert_eq!(&*mutex.lock(), comp);
546    }
547
548    #[test]
549    fn test_mutex_force_lock() {
550        let lock = SpinMutex::<_>::new(());
551        ::std::mem::forget(lock.lock());
552        unsafe {
553            lock.force_unlock();
554        }
555        assert!(lock.try_lock().is_some());
556    }
557}