qadapt_spin/
rw_lock.rs

1use core::cell::UnsafeCell;
2use core::default::Default;
3use core::fmt;
4use core::ops::{Deref, DerefMut};
5use core::sync::atomic::{spin_loop_hint as cpu_relax, AtomicUsize, Ordering, ATOMIC_USIZE_INIT};
6
7/// A reader-writer lock
8///
9/// This type of lock allows a number of readers or at most one writer at any
10/// point in time. The write portion of this lock typically allows modification
11/// of the underlying data (exclusive access) and the read portion of this lock
12/// typically allows for read-only access (shared access).
13///
14/// The type parameter `T` represents the data that this lock protects. It is
15/// required that `T` satisfies `Send` to be shared across tasks and `Sync` to
16/// allow concurrent access through readers. The RAII guards returned from the
17/// locking methods implement `Deref` (and `DerefMut` for the `write` methods)
18/// to allow access to the contained of the lock.
19///
20/// Based on
21/// <https://jfdube.wordpress.com/2014/01/03/implementing-a-recursive-read-write-spinlock/>
22///
23/// # Examples
24///
25/// ```
26/// use spin;
27///
28/// let lock = spin::RwLock::new(5);
29///
30/// // many reader locks can be held at once
31/// {
32///     let r1 = lock.read();
33///     let r2 = lock.read();
34///     assert_eq!(*r1, 5);
35///     assert_eq!(*r2, 5);
36/// } // read locks are dropped at this point
37///
38/// // only one write lock may be held, however
39/// {
40///     let mut w = lock.write();
41///     *w += 1;
42///     assert_eq!(*w, 6);
43/// } // write lock is dropped here
44/// ```
45pub struct RwLock<T: ?Sized> {
46    lock: AtomicUsize,
47    data: UnsafeCell<T>,
48}
49
50/// A guard to which the protected data can be read
51///
52/// When the guard falls out of scope it will decrement the read count,
53/// potentially releasing the lock.
54#[derive(Debug)]
55pub struct RwLockReadGuard<'a, T: 'a + ?Sized> {
56    lock: &'a AtomicUsize,
57    data: &'a T,
58}
59
60/// A guard to which the protected data can be written
61///
62/// When the guard falls out of scope it will release the lock.
63#[derive(Debug)]
64pub struct RwLockWriteGuard<'a, T: 'a + ?Sized> {
65    lock: &'a AtomicUsize,
66    data: &'a mut T,
67}
68
69// Same unsafe impls as `std::sync::RwLock`
70unsafe impl<T: ?Sized + Send> Send for RwLock<T> {}
71unsafe impl<T: ?Sized + Send + Sync> Sync for RwLock<T> {}
72
73const USIZE_MSB: usize = ::core::isize::MIN as usize;
74
75impl<T> RwLock<T> {
76    /// Creates a new spinlock wrapping the supplied data.
77    ///
78    /// May be used statically:
79    ///
80    /// ```
81    /// use spin;
82    ///
83    /// static RW_LOCK: spin::RwLock<()> = spin::RwLock::new(());
84    ///
85    /// fn demo() {
86    ///     let lock = RW_LOCK.read();
87    ///     // do something with lock
88    ///     drop(lock);
89    /// }
90    /// ```
91    #[inline]
92    pub const fn new(user_data: T) -> RwLock<T> {
93        RwLock {
94            lock: ATOMIC_USIZE_INIT,
95            data: UnsafeCell::new(user_data),
96        }
97    }
98
99    /// Consumes this `RwLock`, returning the underlying data.
100    pub fn into_inner(self) -> T {
101        // We know statically that there are no outstanding references to
102        // `self` so there's no need to lock.
103        let RwLock { data, .. } = self;
104        data.into_inner()
105    }
106}
107
108impl<T: ?Sized> RwLock<T> {
109    /// Locks this rwlock with shared read access, blocking the current thread
110    /// until it can be acquired.
111    ///
112    /// The calling thread will be blocked until there are no more writers which
113    /// hold the lock. There may be other readers currently inside the lock when
114    /// this method returns. This method does not provide any guarantees with
115    /// respect to the ordering of whether contentious readers or writers will
116    /// acquire the lock first.
117    ///
118    /// Returns an RAII guard which will release this thread's shared access
119    /// once it is dropped.
120    ///
121    /// ```
122    /// let mylock = spin::RwLock::new(0);
123    /// {
124    ///     let mut data = mylock.read();
125    ///     // The lock is now locked and the data can be read
126    ///     println!("{}", *data);
127    ///     // The lock is dropped
128    /// }
129    /// ```
130    #[inline]
131    pub fn read<'a>(&'a self) -> RwLockReadGuard<'a, T> {
132        // (funny do-while loop)
133        while {
134            // Old value, with write bit unset
135            let mut old;
136
137            // Wait for for writer to go away before doing expensive atomic ops
138            // (funny do-while loop)
139            while {
140                old = self.lock.load(Ordering::Relaxed);
141                old & USIZE_MSB != 0
142            } {
143                cpu_relax();
144            }
145
146            // unset write bit
147            old &= !USIZE_MSB;
148
149            let new = old + 1;
150            debug_assert!(new != (!USIZE_MSB) & (!0));
151
152            self.lock.compare_and_swap(old, new, Ordering::SeqCst) != old
153        } {
154            cpu_relax();
155        }
156        RwLockReadGuard {
157            lock: &self.lock,
158            data: unsafe { &*self.data.get() },
159        }
160    }
161
162    /// Attempt to acquire this lock with shared read access.
163    ///
164    /// This function will never block and will return immediately if `read`
165    /// would otherwise succeed. Returns `Some` of an RAII guard which will
166    /// release the shared access of this thread when dropped, or `None` if the
167    /// access could not be granted. This method does not provide any
168    /// guarantees with respect to the ordering of whether contentious readers
169    /// or writers will acquire the lock first.
170    ///
171    /// ```
172    /// let mylock = spin::RwLock::new(0);
173    /// {
174    ///     match mylock.try_read() {
175    ///         Some(data) => {
176    ///             // The lock is now locked and the data can be read
177    ///             println!("{}", *data);
178    ///             // The lock is dropped
179    ///         },
180    ///         None => (), // no cigar
181    ///     };
182    /// }
183    /// ```
184    #[inline]
185    pub fn try_read(&self) -> Option<RwLockReadGuard<T>> {
186        // Old value, with write bit unset
187        let old = (!USIZE_MSB) & self.lock.load(Ordering::Relaxed);
188
189        let new = old + 1;
190        debug_assert!(new != (!USIZE_MSB) & (!0));
191        if self.lock.compare_and_swap(old, new, Ordering::SeqCst) == old {
192            Some(RwLockReadGuard {
193                lock: &self.lock,
194                data: unsafe { &*self.data.get() },
195            })
196        } else {
197            None
198        }
199    }
200
201    /// Force decrement the reader count.
202    ///
203    /// This is *extremely* unsafe if there are outstanding `RwLockReadGuard`s
204    /// live, or if called more times than `read` has been called, but can be
205    /// useful in FFI contexts where the caller doesn't know how to deal with
206    /// RAII.
207    pub unsafe fn force_read_decrement(&self) {
208        debug_assert!(self.lock.load(Ordering::Relaxed) & (!USIZE_MSB) > 0);
209        self.lock.fetch_sub(1, Ordering::SeqCst);
210    }
211
212    /// Force unlock exclusive write access.
213    ///
214    /// This is *extremely* unsafe if there are outstanding `RwLockWriteGuard`s
215    /// live, or if called when there are current readers, but can be useful in
216    /// FFI contexts where the caller doesn't know how to deal with RAII.
217    pub unsafe fn force_write_unlock(&self) {
218        debug_assert_eq!(self.lock.load(Ordering::Relaxed), USIZE_MSB);
219        self.lock.store(0, Ordering::Relaxed);
220    }
221
222    /// Lock this rwlock with exclusive write access, blocking the current
223    /// thread until it can be acquired.
224    ///
225    /// This function will not return while other writers or other readers
226    /// currently have access to the lock.
227    ///
228    /// Returns an RAII guard which will drop the write access of this rwlock
229    /// when dropped.
230    ///
231    /// ```
232    /// let mylock = spin::RwLock::new(0);
233    /// {
234    ///     let mut data = mylock.write();
235    ///     // The lock is now locked and the data can be written
236    ///     *data += 1;
237    ///     // The lock is dropped
238    /// }
239    /// ```
240    #[inline]
241    pub fn write<'a>(&'a self) -> RwLockWriteGuard<'a, T> {
242        loop {
243            // Old value, with write bit unset.
244            let old = (!USIZE_MSB) & self.lock.load(Ordering::Relaxed);
245            // Old value, with write bit set.
246            let new = USIZE_MSB | old;
247            if self.lock.compare_and_swap(old, new, Ordering::SeqCst) == old {
248                // Wait for readers to go away, then lock is ours.
249                while self.lock.load(Ordering::Relaxed) != USIZE_MSB {
250                    cpu_relax();
251                }
252                break;
253            }
254        }
255        RwLockWriteGuard {
256            lock: &self.lock,
257            data: unsafe { &mut *self.data.get() },
258        }
259    }
260
261    /// Attempt to lock this rwlock with exclusive write access.
262    ///
263    /// This function does not ever block, and it will return `None` if a call
264    /// to `write` would otherwise block. If successful, an RAII guard is
265    /// returned.
266    ///
267    /// ```
268    /// let mylock = spin::RwLock::new(0);
269    /// {
270    ///     match mylock.try_write() {
271    ///         Some(mut data) => {
272    ///             // The lock is now locked and the data can be written
273    ///             *data += 1;
274    ///             // The lock is implicitly dropped
275    ///         },
276    ///         None => (), // no cigar
277    ///     };
278    /// }
279    /// ```
280    #[inline]
281    pub fn try_write(&self) -> Option<RwLockWriteGuard<T>> {
282        if self.lock.compare_and_swap(0, USIZE_MSB, Ordering::SeqCst) == 0 {
283            Some(RwLockWriteGuard {
284                lock: &self.lock,
285                data: unsafe { &mut *self.data.get() },
286            })
287        } else {
288            None
289        }
290    }
291}
292
293impl<T: ?Sized + fmt::Debug> fmt::Debug for RwLock<T> {
294    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
295        match self.try_read() {
296            Some(guard) => write!(f, "RwLock {{ data: ")
297                .and_then(|()| (&*guard).fmt(f))
298                .and_then(|()| write!(f, "}}")),
299            None => write!(f, "RwLock {{ <locked> }}"),
300        }
301    }
302}
303
304impl<T: ?Sized + Default> Default for RwLock<T> {
305    fn default() -> RwLock<T> {
306        RwLock::new(Default::default())
307    }
308}
309
310impl<'rwlock, T: ?Sized> Deref for RwLockReadGuard<'rwlock, T> {
311    type Target = T;
312
313    fn deref(&self) -> &T {
314        self.data
315    }
316}
317
318impl<'rwlock, T: ?Sized> Deref for RwLockWriteGuard<'rwlock, T> {
319    type Target = T;
320
321    fn deref(&self) -> &T {
322        self.data
323    }
324}
325
326impl<'rwlock, T: ?Sized> DerefMut for RwLockWriteGuard<'rwlock, T> {
327    fn deref_mut(&mut self) -> &mut T {
328        self.data
329    }
330}
331
332impl<'rwlock, T: ?Sized> Drop for RwLockReadGuard<'rwlock, T> {
333    fn drop(&mut self) {
334        debug_assert!(self.lock.load(Ordering::Relaxed) & (!USIZE_MSB) > 0);
335        self.lock.fetch_sub(1, Ordering::SeqCst);
336    }
337}
338
339impl<'rwlock, T: ?Sized> Drop for RwLockWriteGuard<'rwlock, T> {
340    fn drop(&mut self) {
341        debug_assert_eq!(self.lock.load(Ordering::Relaxed), USIZE_MSB);
342        self.lock.store(0, Ordering::Relaxed);
343    }
344}
345
346#[cfg(test)]
347mod tests {
348    use std::prelude::v1::*;
349
350    use std::sync::atomic::{AtomicUsize, Ordering};
351    use std::sync::mpsc::channel;
352    use std::sync::Arc;
353    use std::thread;
354
355    use super::*;
356
357    #[derive(Eq, PartialEq, Debug)]
358    struct NonCopy(i32);
359
360    #[test]
361    fn smoke() {
362        let l = RwLock::new(());
363        drop(l.read());
364        drop(l.write());
365        drop((l.read(), l.read()));
366        drop(l.write());
367    }
368
369    // TODO: needs RNG
370    //#[test]
371    //fn frob() {
372    //    static R: RwLock = RwLock::new();
373    //    const N: usize = 10;
374    //    const M: usize = 1000;
375    //
376    //    let (tx, rx) = channel::<()>();
377    //    for _ in 0..N {
378    //        let tx = tx.clone();
379    //        thread::spawn(move|| {
380    //            let mut rng = rand::thread_rng();
381    //            for _ in 0..M {
382    //                if rng.gen_weighted_bool(N) {
383    //                    drop(R.write());
384    //                } else {
385    //                    drop(R.read());
386    //                }
387    //            }
388    //            drop(tx);
389    //        });
390    //    }
391    //    drop(tx);
392    //    let _ = rx.recv();
393    //    unsafe { R.destroy(); }
394    //}
395
396    #[test]
397    fn test_rw_arc() {
398        let arc = Arc::new(RwLock::new(0));
399        let arc2 = arc.clone();
400        let (tx, rx) = channel();
401
402        thread::spawn(move || {
403            let mut lock = arc2.write();
404            for _ in 0..10 {
405                let tmp = *lock;
406                *lock = -1;
407                thread::yield_now();
408                *lock = tmp + 1;
409            }
410            tx.send(()).unwrap();
411        });
412
413        // Readers try to catch the writer in the act
414        let mut children = Vec::new();
415        for _ in 0..5 {
416            let arc3 = arc.clone();
417            children.push(thread::spawn(move || {
418                let lock = arc3.read();
419                assert!(*lock >= 0);
420            }));
421        }
422
423        // Wait for children to pass their asserts
424        for r in children {
425            assert!(r.join().is_ok());
426        }
427
428        // Wait for writer to finish
429        rx.recv().unwrap();
430        let lock = arc.read();
431        assert_eq!(*lock, 10);
432    }
433
434    #[test]
435    fn test_rw_arc_access_in_unwind() {
436        let arc = Arc::new(RwLock::new(1));
437        let arc2 = arc.clone();
438        let _ = thread::spawn(move || -> () {
439            struct Unwinder {
440                i: Arc<RwLock<isize>>,
441            }
442            impl Drop for Unwinder {
443                fn drop(&mut self) {
444                    let mut lock = self.i.write();
445                    *lock += 1;
446                }
447            }
448            let _u = Unwinder { i: arc2 };
449            panic!();
450        })
451        .join();
452        let lock = arc.read();
453        assert_eq!(*lock, 2);
454    }
455
456    #[test]
457    fn test_rwlock_unsized() {
458        let rw: &RwLock<[i32]> = &RwLock::new([1, 2, 3]);
459        {
460            let b = &mut *rw.write();
461            b[0] = 4;
462            b[2] = 5;
463        }
464        let comp: &[i32] = &[4, 2, 5];
465        assert_eq!(&*rw.read(), comp);
466    }
467
468    #[test]
469    fn test_rwlock_try_write() {
470        use std::mem::drop;
471
472        let lock = RwLock::new(0isize);
473        let read_guard = lock.read();
474
475        let write_result = lock.try_write();
476        match write_result {
477            None => (),
478            Some(_) => assert!(
479                false,
480                "try_write should not succeed while read_guard is in scope"
481            ),
482        }
483
484        drop(read_guard);
485    }
486
487    #[test]
488    fn test_into_inner() {
489        let m = RwLock::new(NonCopy(10));
490        assert_eq!(m.into_inner(), NonCopy(10));
491    }
492
493    #[test]
494    fn test_into_inner_drop() {
495        struct Foo(Arc<AtomicUsize>);
496        impl Drop for Foo {
497            fn drop(&mut self) {
498                self.0.fetch_add(1, Ordering::SeqCst);
499            }
500        }
501        let num_drops = Arc::new(AtomicUsize::new(0));
502        let m = RwLock::new(Foo(num_drops.clone()));
503        assert_eq!(num_drops.load(Ordering::SeqCst), 0);
504        {
505            let _inner = m.into_inner();
506            assert_eq!(num_drops.load(Ordering::SeqCst), 0);
507        }
508        assert_eq!(num_drops.load(Ordering::SeqCst), 1);
509    }
510
511    #[test]
512    fn test_force_read_decrement() {
513        let m = RwLock::new(());
514        ::std::mem::forget(m.read());
515        ::std::mem::forget(m.read());
516        ::std::mem::forget(m.read());
517        assert!(m.try_write().is_none());
518        unsafe {
519            m.force_read_decrement();
520            m.force_read_decrement();
521        }
522        assert!(m.try_write().is_none());
523        unsafe {
524            m.force_read_decrement();
525        }
526        assert!(m.try_write().is_some());
527    }
528
529    #[test]
530    fn test_force_write_unlock() {
531        let m = RwLock::new(());
532        ::std::mem::forget(m.write());
533        assert!(m.try_read().is_none());
534        unsafe {
535            m.force_write_unlock();
536        }
537        assert!(m.try_read().is_some());
538    }
539}