Skip to main content

mutex/
mutex.rs

1use std::cell::UnsafeCell;
2use std::sync::atomic::{AtomicBool, Ordering};
3
4// A mutex that uses Relaxed instead of Release when unlocking (allowing a data race)
5struct BuggyMutex<T>(AtomicBool, UnsafeCell<T>);
6
7unsafe impl<T: Send> Send for BuggyMutex<T> {}
8unsafe impl<T: Send> Sync for BuggyMutex<T> {}
9impl<T> BuggyMutex<T> {
10    pub const fn new(v: T) -> Self {
11        Self(AtomicBool::new(false), UnsafeCell::new(v))
12    }
13    pub fn lock(&self) -> Guard<'_, T> {
14        self.raw_lock();
15        Guard(self)
16    }
17    fn raw_lock(&self) {
18        while self.0.swap(true, Ordering::Acquire) {
19            std::thread::yield_now();
20        }
21    }
22    fn raw_unlock(&self) {
23        // this more or less the bug spin::RwLock used to have
24        self.0.store(false, Ordering::Relaxed);
25    }
26}
27pub struct Guard<'a, T: 'a>(&'a BuggyMutex<T>);
28impl<T> Drop for Guard<'_, T> {
29    fn drop(&mut self) {
30        self.0.raw_unlock();
31    }
32}
33impl<T> std::ops::Deref for Guard<'_, T> {
34    type Target = T;
35    fn deref(&self) -> &T {
36        unsafe { &*(self.0).1.get() }
37    }
38}
39impl<T> std::ops::DerefMut for Guard<'_, T> {
40    fn deref_mut(&mut self) -> &mut T {
41        unsafe { &mut *(self.0).1.get() }
42    }
43}
44
45fn main() {
46    cobb::run_test(cobb::TestCfg::<BuggyMutex<usize>> {
47        threads: 16,
48        iterations: 1000,
49        setup: || BuggyMutex::new(0),
50        test: |mutex, tctx| {
51            *mutex.lock() += tctx.thread_index();
52        },
53        before_each: |m| {
54            *m.lock() = 0;
55        },
56        after_each: |m| {
57            assert_eq!((0..16usize).sum::<usize>(), *m.lock());
58        },
59        ..Default::default()
60    });
61}