usync/mutex.rs
1use super::RawRwLock;
2use lock_api::RawRwLock as _RawRwLock;
3use std::fmt;
4
5/// Raw mutex type implemented with lock-free userspace thread queues.
6#[derive(Default)]
7#[repr(transparent)]
8pub struct RawMutex {
9 pub(super) rwlock: RawRwLock,
10}
11
12impl fmt::Debug for RawMutex {
13 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
14 f.pad("RawMutex { .. }")
15 }
16}
17
18unsafe impl lock_api::RawMutex for RawMutex {
19 type GuardMarker = crate::GuardMarker;
20
21 const INIT: Self = Self {
22 rwlock: RawRwLock::INIT,
23 };
24
25 #[inline]
26 fn is_locked(&self) -> bool {
27 self.rwlock.is_locked_exclusive()
28 }
29
30 #[inline]
31 fn lock(&self) {
32 self.rwlock.lock_exclusive()
33 }
34
35 #[inline]
36 fn try_lock(&self) -> bool {
37 self.rwlock.try_lock_exclusive()
38 }
39
40 #[inline]
41 unsafe fn unlock(&self) {
42 self.rwlock.unlock_exclusive()
43 }
44}
45
46/// A mutual exclusion primitive useful for protecting shared data
47///
48/// This mutex will block threads waiting for the lock to become available. The
49/// mutex can be statically initialized or created by the `new`
50/// constructor. Each mutex has a type parameter which represents the data that
51/// it is protecting. The data can only be accessed through the RAII guards
52/// returned from `lock` and `try_lock`, which guarantees that the data is only
53/// ever accessed when the mutex is locked.
54///
55/// # Fairness
56///
57/// This lock is a wrapper for [`RwLock`](type.RwLock.html) underneath and
58/// has similar fairness guarantees. To reiterate, the mutex is unfair by default.
59/// This means that a thread which unlocks the mutex is allowed to re-acquire it again
60/// even when other threads are waiting for the lock.
61///
62/// This greatly improves throughput (read "performance") but could potentially
63/// starve an unlucky thread when there's constant lock contention. The mutex
64/// tries to at least wake up threads in the order that they we're queued as an
65/// attempt to avoid starvation, but it is entirely up to the OS scheduler.
66///
67/// # Differences from the standard library `Mutex`
68///
69/// - No poisoning, the lock is released normally on panic.
70/// - Only requires 1 word (usize) of space, whereas the standard library boxes the
71/// `Mutex` due to platform limitations.
72/// - Can be statically constructed.
73/// - Does not require any drop glue when dropped.
74/// - Inline fast path for the uncontended case.
75/// - Efficient handling of micro-contention using adaptive spinning.
76/// - Allows raw locking & unlocking without a guard.
77///
78/// # Examples
79///
80/// ```
81/// use usync::Mutex;
82/// use std::sync::{Arc, mpsc::channel};
83/// use std::thread;
84///
85/// const N: usize = 10;
86///
87/// // Spawn a few threads to increment a shared variable (non-atomically), and
88/// // let the main thread know once all increments are done.
89/// //
90/// // Here we're using an Arc to share memory among threads, and the data inside
91/// // the Arc is protected with a mutex.
92/// let data = Arc::new(Mutex::new(0));
93///
94/// let (tx, rx) = channel();
95/// for _ in 0..10 {
96/// let (data, tx) = (Arc::clone(&data), tx.clone());
97/// thread::spawn(move || {
98/// // The shared state can only be accessed once the lock is held.
99/// // Our non-atomic increment is safe because we're the only thread
100/// // which can access the shared state when the lock is held.
101/// let mut data = data.lock();
102/// *data += 1;
103/// if *data == N {
104/// tx.send(()).unwrap();
105/// }
106/// // the lock is unlocked here when `data` goes out of scope.
107/// });
108/// }
109///
110/// rx.recv().unwrap();
111/// ```
112pub type Mutex<T> = lock_api::Mutex<RawMutex, T>;
113
114/// An RAII implementation of a "scoped lock" of a mutex. When this structure is
115/// dropped (falls out of scope), the lock will be unlocked.
116///
117/// The data protected by the mutex can be accessed through this guard via its
118/// `Deref` and `DerefMut` implementations.
119pub type MutexGuard<'a, T> = lock_api::MutexGuard<'a, RawMutex, T>;
120
121/// An RAII mutex guard returned by `MutexGuard::map`, which can point to a
122/// subfield of the protected data.
123///
124/// The main difference between `MappedMutexGuard` and `MutexGuard` is that the
125/// former doesn't support temporarily unlocking and re-locking, since that
126/// could introduce soundness issues if the locked object is modified by another
127/// thread.
128pub type MappedMutexGuard<'a, T> = lock_api::MappedMutexGuard<'a, RawMutex, T>;
129
130/// Creates a new mutex in an unlocked state ready for use.
131///
132/// This allows creating a mutex in a constant context on stable Rust.
133pub const fn const_mutex<T>(value: T) -> Mutex<T> {
134 Mutex::const_new(<RawMutex as lock_api::RawMutex>::INIT, value)
135}
136
137#[cfg(test)]
138mod tests {
139 use crate::{Condvar, Mutex};
140 use std::{
141 sync::{
142 atomic::{AtomicUsize, Ordering},
143 mpsc::channel,
144 Arc,
145 },
146 thread,
147 };
148
149 struct Packet<T>(Arc<(Mutex<T>, Condvar)>);
150
151 #[derive(Eq, PartialEq, Debug)]
152 struct NonCopy(i32);
153
154 unsafe impl<T: Send> Send for Packet<T> {}
155 unsafe impl<T> Sync for Packet<T> {}
156
157 #[test]
158 fn smoke() {
159 let m = Mutex::new(());
160 drop(m.lock());
161 drop(m.lock());
162 }
163
164 #[test]
165 fn lots_and_lots() {
166 const J: u32 = 1000;
167 const K: u32 = 3;
168
169 let m = Arc::new(Mutex::new(0));
170
171 fn inc(m: &Mutex<u32>) {
172 for _ in 0..J {
173 *m.lock() += 1;
174 }
175 }
176
177 let (tx, rx) = channel();
178 for _ in 0..K {
179 let tx2 = tx.clone();
180 let m2 = m.clone();
181 thread::spawn(move || {
182 inc(&m2);
183 tx2.send(()).unwrap();
184 });
185 let tx2 = tx.clone();
186 let m2 = m.clone();
187 thread::spawn(move || {
188 inc(&m2);
189 tx2.send(()).unwrap();
190 });
191 }
192
193 drop(tx);
194 for _ in 0..2 * K {
195 rx.recv().unwrap();
196 }
197 assert_eq!(*m.lock(), J * K * 2);
198 }
199
200 #[test]
201 fn try_lock() {
202 let m = Mutex::new(());
203 *m.try_lock().unwrap() = ();
204 }
205
206 #[test]
207 fn test_into_inner() {
208 let m = Mutex::new(NonCopy(10));
209 assert_eq!(m.into_inner(), NonCopy(10));
210 }
211
212 #[test]
213 fn test_into_inner_drop() {
214 struct Foo(Arc<AtomicUsize>);
215 impl Drop for Foo {
216 fn drop(&mut self) {
217 self.0.fetch_add(1, Ordering::SeqCst);
218 }
219 }
220 let num_drops = Arc::new(AtomicUsize::new(0));
221 let m = Mutex::new(Foo(num_drops.clone()));
222 assert_eq!(num_drops.load(Ordering::SeqCst), 0);
223 {
224 let _inner = m.into_inner();
225 assert_eq!(num_drops.load(Ordering::SeqCst), 0);
226 }
227 assert_eq!(num_drops.load(Ordering::SeqCst), 1);
228 }
229
230 #[test]
231 fn test_get_mut() {
232 let mut m = Mutex::new(NonCopy(10));
233 *m.get_mut() = NonCopy(20);
234 assert_eq!(m.into_inner(), NonCopy(20));
235 }
236
237 #[test]
238 fn test_mutex_arc_condvar() {
239 let packet = Packet(Arc::new((Mutex::new(false), Condvar::new())));
240 let packet2 = Packet(packet.0.clone());
241 let (tx, rx) = channel();
242 let _t = thread::spawn(move || {
243 // wait until parent gets in
244 rx.recv().unwrap();
245 let &(ref lock, ref cvar) = &*packet2.0;
246 let mut lock = lock.lock();
247 *lock = true;
248 cvar.notify_one();
249 });
250
251 let &(ref lock, ref cvar) = &*packet.0;
252 let mut lock = lock.lock();
253 tx.send(()).unwrap();
254 assert!(!*lock);
255 while !*lock {
256 cvar.wait(&mut lock);
257 }
258 }
259
260 #[test]
261 fn test_mutex_arc_nested() {
262 // Tests nested mutexes and access
263 // to underlying data.
264 let arc = Arc::new(Mutex::new(1));
265 let arc2 = Arc::new(Mutex::new(arc));
266 let (tx, rx) = channel();
267 let _t = thread::spawn(move || {
268 let lock = arc2.lock();
269 let lock2 = lock.lock();
270 assert_eq!(*lock2, 1);
271 tx.send(()).unwrap();
272 });
273 rx.recv().unwrap();
274 }
275
276 #[test]
277 fn test_mutex_arc_access_in_unwind() {
278 let arc = Arc::new(Mutex::new(1));
279 let arc2 = arc.clone();
280 let _ = thread::spawn(move || {
281 struct Unwinder {
282 i: Arc<Mutex<i32>>,
283 }
284 impl Drop for Unwinder {
285 fn drop(&mut self) {
286 *self.i.lock() += 1;
287 }
288 }
289 let _u = Unwinder { i: arc2 };
290 panic!();
291 })
292 .join();
293 let lock = arc.lock();
294 assert_eq!(*lock, 2);
295 }
296
297 #[test]
298 fn test_mutex_unsized() {
299 let mutex: &Mutex<[i32]> = &Mutex::new([1, 2, 3]);
300 {
301 let b = &mut *mutex.lock();
302 b[0] = 4;
303 b[2] = 5;
304 }
305 let comp: &[i32] = &[4, 2, 5];
306 assert_eq!(&*mutex.lock(), comp);
307 }
308
309 #[test]
310 fn test_mutexguard_sync() {
311 fn sync<T: Sync>(_: T) {}
312
313 let mutex = Mutex::new(());
314 sync(mutex.lock());
315 }
316
317 #[test]
318 fn test_mutex_debug() {
319 let mutex = Mutex::new(vec![0u8, 10]);
320
321 assert_eq!(format!("{:?}", mutex), "Mutex { data: [0, 10] }");
322 let _lock = mutex.lock();
323 assert_eq!(format!("{:?}", mutex), "Mutex { data: <locked> }");
324 }
325}