1use core::cell::UnsafeCell;
17use core::ops::{Deref, DerefMut};
18use core::sync::atomic::AtomicU32;
19use core::sync::atomic::Ordering::{Acquire, Relaxed, Release};
20
21const UNLOCKED: u32 = 0;
22const LOCKED_NO_WAITERS: u32 = 1;
23const LOCKED_YES_WAITERS: u32 = 2;
24
25pub struct Mutex<T> {
26 state: AtomicU32,
27 value: UnsafeCell<T>,
28}
29
30unsafe impl<T> Sync for Mutex<T> where T: Send {}
31
32pub struct MutexGuard<'a, T> {
33 pub(crate) mutex: &'a Mutex<T>,
34}
35
36unsafe impl<T> Sync for MutexGuard<'_, T> where T: Sync {}
37
38impl<T> Deref for MutexGuard<'_, T> {
39 type Target = T;
40 #[inline]
41 fn deref(&self) -> &T {
42 unsafe { &*self.mutex.value.get() }
43 }
44}
45
46impl<T> DerefMut for MutexGuard<'_, T> {
47 #[inline]
48 fn deref_mut(&mut self) -> &mut T {
49 unsafe { &mut *self.mutex.value.get() }
50 }
51}
52
53impl<T> Mutex<T> {
54 pub const fn new(value: T) -> Self {
55 Self {
56 state: AtomicU32::new(UNLOCKED),
57 value: UnsafeCell::new(value),
58 }
59 }
60
61 #[inline]
62 pub fn lock(&self) -> MutexGuard<'_, T> {
63 if self
64 .state
65 .compare_exchange(UNLOCKED, LOCKED_NO_WAITERS, Acquire, Relaxed)
66 .is_err()
67 {
68 lock_contended(&self.state);
70 }
71 MutexGuard { mutex: self }
72 }
73}
74
75#[cold]
76fn lock_contended(state: &AtomicU32) {
77 let mut spin_count = 0;
78 const MAX_BUSY_LOOP_ITERS: u32 = 100;
79
80 while state.load(Relaxed) == LOCKED_NO_WAITERS && spin_count < MAX_BUSY_LOOP_ITERS {
81 spin_count += 1;
82 core::hint::spin_loop();
83 }
84
85 if state
86 .compare_exchange(UNLOCKED, LOCKED_NO_WAITERS, Acquire, Relaxed)
87 .is_ok()
88 {
89 return;
90 }
91
92 while state.swap(LOCKED_YES_WAITERS, Acquire) != UNLOCKED {
93 crate::futex_wait(state, LOCKED_YES_WAITERS, None);
94 }
95}
96
97impl<T> Drop for MutexGuard<'_, T> {
98 #[inline]
99 fn drop(&mut self) {
100 if self.mutex.state.swap(UNLOCKED, Release) == LOCKED_YES_WAITERS {
101 crate::futex_wake(&self.mutex.state);
102 }
103 }
104}