1use core::cell::UnsafeCell;
2use core::default::Default;
3use core::fmt;
4use core::marker::Sync;
5use core::ops::{Deref, DerefMut, Drop};
6use core::option::Option::{self, None, Some};
7use core::sync::atomic::{spin_loop_hint as cpu_relax, AtomicBool, Ordering, ATOMIC_BOOL_INIT};
8
9pub struct Mutex<T: ?Sized> {
73 lock: AtomicBool,
74 data: UnsafeCell<T>,
75}
76
77#[derive(Debug)]
81pub struct MutexGuard<'a, T: ?Sized + 'a> {
82 lock: &'a AtomicBool,
83 data: &'a mut T,
84}
85
86unsafe impl<T: ?Sized + Send> Sync for Mutex<T> {}
88unsafe impl<T: ?Sized + Send> Send for Mutex<T> {}
89
90impl<T> Mutex<T> {
91 pub const fn new(user_data: T) -> Mutex<T> {
107 Mutex {
108 lock: ATOMIC_BOOL_INIT,
109 data: UnsafeCell::new(user_data),
110 }
111 }
112
113 pub fn into_inner(self) -> T {
115 let Mutex { data, .. } = self;
118 data.into_inner()
119 }
120}
121
122impl<T: ?Sized> Mutex<T> {
123 fn obtain_lock(&self) {
124 while self.lock.compare_and_swap(false, true, Ordering::Acquire) != false {
125 while self.lock.load(Ordering::Relaxed) {
127 cpu_relax();
128 }
129 }
130 }
131
132 pub fn lock(&self) -> MutexGuard<T> {
148 self.obtain_lock();
149 MutexGuard {
150 lock: &self.lock,
151 data: unsafe { &mut *self.data.get() },
152 }
153 }
154
155 pub unsafe fn force_unlock(&self) {
163 self.lock.store(false, Ordering::Release);
164 }
165
166 pub fn try_lock(&self) -> Option<MutexGuard<T>> {
169 if self.lock.compare_and_swap(false, true, Ordering::Acquire) == false {
170 Some(MutexGuard {
171 lock: &self.lock,
172 data: unsafe { &mut *self.data.get() },
173 })
174 } else {
175 None
176 }
177 }
178}
179
180impl<T: ?Sized + fmt::Debug> fmt::Debug for Mutex<T> {
181 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
182 match self.try_lock() {
183 Some(guard) => write!(f, "Mutex {{ data: ")
184 .and_then(|()| (&*guard).fmt(f))
185 .and_then(|()| write!(f, "}}")),
186 None => write!(f, "Mutex {{ <locked> }}"),
187 }
188 }
189}
190
191impl<T: ?Sized + Default> Default for Mutex<T> {
192 fn default() -> Mutex<T> {
193 Mutex::new(Default::default())
194 }
195}
196
197impl<'a, T: ?Sized> Deref for MutexGuard<'a, T> {
198 type Target = T;
199 fn deref<'b>(&'b self) -> &'b T {
200 &*self.data
201 }
202}
203
204impl<'a, T: ?Sized> DerefMut for MutexGuard<'a, T> {
205 fn deref_mut<'b>(&'b mut self) -> &'b mut T {
206 &mut *self.data
207 }
208}
209
210impl<'a, T: ?Sized> Drop for MutexGuard<'a, T> {
211 fn drop(&mut self) {
213 self.lock.store(false, Ordering::Release);
214 }
215}
216
217#[cfg(test)]
218mod tests {
219 use std::prelude::v1::*;
220
221 use std::sync::atomic::{AtomicUsize, Ordering};
222 use std::sync::mpsc::channel;
223 use std::sync::Arc;
224 use std::thread;
225
226 use super::*;
227
228 #[derive(Eq, PartialEq, Debug)]
229 struct NonCopy(i32);
230
231 #[test]
232 fn smoke() {
233 let m = Mutex::new(());
234 drop(m.lock());
235 drop(m.lock());
236 }
237
238 #[test]
239 fn lots_and_lots() {
240 static M: Mutex<()> = Mutex::new(());
241 static mut CNT: u32 = 0;
242 const J: u32 = 1000;
243 const K: u32 = 3;
244
245 fn inc() {
246 for _ in 0..J {
247 unsafe {
248 let _g = M.lock();
249 CNT += 1;
250 }
251 }
252 }
253
254 let (tx, rx) = channel();
255 for _ in 0..K {
256 let tx2 = tx.clone();
257 thread::spawn(move || {
258 inc();
259 tx2.send(()).unwrap();
260 });
261 let tx2 = tx.clone();
262 thread::spawn(move || {
263 inc();
264 tx2.send(()).unwrap();
265 });
266 }
267
268 drop(tx);
269 for _ in 0..2 * K {
270 rx.recv().unwrap();
271 }
272 assert_eq!(unsafe { CNT }, J * K * 2);
273 }
274
275 #[test]
276 fn try_lock() {
277 let mutex = Mutex::new(42);
278
279 let a = mutex.try_lock();
281 assert_eq!(a.as_ref().map(|r| **r), Some(42));
282
283 let b = mutex.try_lock();
285 assert!(b.is_none());
286
287 ::core::mem::drop(a);
289 let c = mutex.try_lock();
290 assert_eq!(c.as_ref().map(|r| **r), Some(42));
291 }
292
293 #[test]
294 fn test_into_inner() {
295 let m = Mutex::new(NonCopy(10));
296 assert_eq!(m.into_inner(), NonCopy(10));
297 }
298
299 #[test]
300 fn test_into_inner_drop() {
301 struct Foo(Arc<AtomicUsize>);
302 impl Drop for Foo {
303 fn drop(&mut self) {
304 self.0.fetch_add(1, Ordering::SeqCst);
305 }
306 }
307 let num_drops = Arc::new(AtomicUsize::new(0));
308 let m = Mutex::new(Foo(num_drops.clone()));
309 assert_eq!(num_drops.load(Ordering::SeqCst), 0);
310 {
311 let _inner = m.into_inner();
312 assert_eq!(num_drops.load(Ordering::SeqCst), 0);
313 }
314 assert_eq!(num_drops.load(Ordering::SeqCst), 1);
315 }
316
317 #[test]
318 fn test_mutex_arc_nested() {
319 let arc = Arc::new(Mutex::new(1));
322 let arc2 = Arc::new(Mutex::new(arc));
323 let (tx, rx) = channel();
324 let _t = thread::spawn(move || {
325 let lock = arc2.lock();
326 let lock2 = lock.lock();
327 assert_eq!(*lock2, 1);
328 tx.send(()).unwrap();
329 });
330 rx.recv().unwrap();
331 }
332
333 #[test]
334 fn test_mutex_arc_access_in_unwind() {
335 let arc = Arc::new(Mutex::new(1));
336 let arc2 = arc.clone();
337 let _ = thread::spawn(move || -> () {
338 struct Unwinder {
339 i: Arc<Mutex<i32>>,
340 }
341 impl Drop for Unwinder {
342 fn drop(&mut self) {
343 *self.i.lock() += 1;
344 }
345 }
346 let _u = Unwinder { i: arc2 };
347 panic!();
348 })
349 .join();
350 let lock = arc.lock();
351 assert_eq!(*lock, 2);
352 }
353
354 #[test]
355 fn test_mutex_unsized() {
356 let mutex: &Mutex<[i32]> = &Mutex::new([1, 2, 3]);
357 {
358 let b = &mut *mutex.lock();
359 b[0] = 4;
360 b[2] = 5;
361 }
362 let comp: &[i32] = &[4, 2, 5];
363 assert_eq!(&*mutex.lock(), comp);
364 }
365
366 #[test]
367 fn test_mutex_force_lock() {
368 let lock = Mutex::new(());
369 ::std::mem::forget(lock.lock());
370 unsafe {
371 lock.force_unlock();
372 }
373 assert!(lock.try_lock().is_some());
374 }
375}