nospin/
rwlock.rs

1//! A lock that provides data access to either one writer or many readers.
2use core::{
3    cell::UnsafeCell,
4    fmt,
5    mem::{ManuallyDrop, drop, forget},
6    ops::{Deref, DerefMut},
7};
8
9struct NonAtomicUsize {
10    value: UnsafeCell<usize>,
11}
12
13#[non_exhaustive]
14#[derive(Clone, Copy)]
15struct Ordering;
16
17impl Ordering {
18    #[allow(non_upper_case_globals)]
19    pub const Relaxed: Ordering = Ordering;
20    #[allow(non_upper_case_globals)]
21    pub const Release: Ordering = Ordering;
22    #[allow(non_upper_case_globals)]
23    pub const Acquire: Ordering = Ordering;
24    #[allow(non_upper_case_globals)]
25    pub const AcqRel: Ordering = Ordering;
26    #[allow(dead_code)]
27    #[allow(non_upper_case_globals)]
28    pub const SeqCst: Ordering = Ordering;
29}
30
31impl NonAtomicUsize {
32    pub const fn new(value: usize) -> NonAtomicUsize {
33        Self {
34            value: UnsafeCell::new(value),
35        }
36    }
37
38    pub fn fetch_add(&self, value: usize, _order: Ordering) -> usize {
39        self.update_with(|x| x + value)
40    }
41
42    pub fn fetch_sub(&self, value: usize, _order: Ordering) -> usize {
43        self.update_with(|x| x - value)
44    }
45
46    pub fn fetch_and(&self, value: usize, _order: Ordering) -> usize {
47        self.update_with(|x| x & value)
48    }
49
50    pub fn fetch_or(&self, value: usize, _order: Ordering) -> usize {
51        self.update_with(|x| x | value)
52    }
53
54    #[inline]
55    fn update_with<F>(&self, f: F) -> usize
56    where
57        F: Fn(usize) -> usize,
58    {
59        let value = self.get();
60        self.set(f(value));
61        value
62    }
63
64    #[inline]
65    fn get(&self) -> usize {
66        unsafe { *self.value.get() }
67    }
68
69    fn set(&self, value: usize) {
70        unsafe { *self.value.get() = value }
71    }
72
73    #[inline]
74    pub fn load(&self, _order: Ordering) -> usize {
75        self.get()
76    }
77
78    #[inline]
79    pub fn store(&self, value: usize, _order: Ordering) {
80        self.set(value);
81    }
82
83    pub fn compare_exchange(
84        &self,
85        current: usize,
86        new: usize,
87        _success: Ordering,
88        _failure: Ordering,
89    ) -> Result<usize, usize> {
90        let value = self.get();
91        if value == current {
92            self.set(new);
93            Ok(new)
94        } else {
95            Err(value)
96        }
97    }
98}
99
100/// A lock that provides data access to either one writer or many readers.
101///
102/// This lock behaves in a similar manner to its namesake `std::sync::RwLock` but
103/// it is NOT thread-safe and is intended for single-threaded environments.
104///
105/// This type of lock allows a number of readers or at most one writer at any
106/// point in time. The write portion of this lock typically allows modification
107/// of the underlying data (exclusive access) and the read portion of this lock
108/// typically allows for read-only access (shared access).
109///
110/// The type parameter `T` represents the data that this lock protects. It is
111/// required that `T` satisfies `Send` to be shared across tasks and `Sync` to
112/// allow concurrent access through readers. The RAII guards returned from the
113/// locking methods implement `Deref` (and `DerefMut` for the `write` methods)
114/// to allow access to the contained of the lock.
115///
116/// An [`RwLockUpgradableGuard`] can be upgraded to a writable guard through the
117/// [`RwLockUpgradableGuard::upgrade`](RwLockUpgradableGuard::upgrade) and
118/// [`RwLockUpgradableGuard::try_upgrade`](RwLockUpgradableGuard::try_upgrade) functions.
119/// Writable or upgradeable guards can be downgraded through their respective `downgrade`
120/// functions.
121///
122/// Based on Facebook's
123/// [`folly/RWSpinLock.h`](https://github.com/facebook/folly/blob/a0394d84f2d5c3e50ebfd0566f9d3acb52cfab5a/folly/synchronization/RWSpinLock.h).
124/// This implementation is unfair to writers - if the lock always has readers, then no writers will
125/// ever get a chance. Using an upgradeable lock guard can *somewhat* alleviate this issue as no
126/// new readers are allowed when an upgradeable guard is held, but upgradeable guards can be taken
127/// when there are existing readers. However if the lock is that highly contended and writes are
128/// crucial then this implementation may be a poor choice.
129///
130/// # Examples
131///
132/// ```
133/// use nospin;
134///
135/// let lock = nospin::RwLock::new(5);
136///
137/// // many reader locks can be held at once
138/// {
139///     let r1 = lock.read();
140///     let r2 = lock.read();
141///     assert_eq!(*r1, 5);
142///     assert_eq!(*r2, 5);
143/// } // read locks are dropped at this point
144///
145/// // only one write lock may be held, however
146/// {
147///     let mut w = lock.write();
148///     *w += 1;
149///     assert_eq!(*w, 6);
150/// } // write lock is dropped here
151/// ```
152pub struct RwLock<T: ?Sized> {
153    lock: NonAtomicUsize,
154    data: UnsafeCell<T>,
155}
156
157const READER: usize = 1 << 2;
158const UPGRADED: usize = 1 << 1;
159const WRITER: usize = 1;
160
161/// A guard that provides immutable data access.
162///
163/// When the guard falls out of scope it will decrement the read count,
164/// potentially releasing the lock.
165pub struct RwLockReadGuard<'a, T: 'a + ?Sized> {
166    lock: &'a NonAtomicUsize,
167    data: *const T,
168}
169
170/// A guard that provides mutable data access.
171///
172/// When the guard falls out of scope it will release the lock.
173pub struct RwLockWriteGuard<'a, T: 'a + ?Sized> {
174    inner: &'a RwLock<T>,
175    data: *mut T,
176}
177
178/// A guard that provides immutable data access but can be upgraded to [`RwLockWriteGuard`].
179///
180/// No writers or other upgradeable guards can exist while this is in scope. New reader
181/// creation is prevented (to alleviate writer starvation) but there may be existing readers
182/// when the lock is acquired.
183///
184/// When the guard falls out of scope it will release the lock.
185pub struct RwLockUpgradableGuard<'a, T: 'a + ?Sized> {
186    inner: &'a RwLock<T>,
187    data: *const T,
188}
189
190// Same unsafe impls as `std::sync::RwLock`
191unsafe impl<T: ?Sized + Send> Send for RwLock<T> {}
192unsafe impl<T: ?Sized + Send + Sync> Sync for RwLock<T> {}
193
194unsafe impl<T: ?Sized + Send + Sync> Send for RwLockWriteGuard<'_, T> {}
195unsafe impl<T: ?Sized + Send + Sync> Sync for RwLockWriteGuard<'_, T> {}
196
197unsafe impl<T: ?Sized + Sync> Send for RwLockReadGuard<'_, T> {}
198unsafe impl<T: ?Sized + Sync> Sync for RwLockReadGuard<'_, T> {}
199
200unsafe impl<T: ?Sized + Send + Sync> Send for RwLockUpgradableGuard<'_, T> {}
201unsafe impl<T: ?Sized + Send + Sync> Sync for RwLockUpgradableGuard<'_, T> {}
202
203impl<T> RwLock<T> {
204    /// Creates a new spinlock wrapping the supplied data.
205    ///
206    /// May be used statically:
207    ///
208    /// ```
209    /// use nospin;
210    ///
211    /// static RW_LOCK: nospin::RwLock<()> = nospin::RwLock::new(());
212    ///
213    /// fn demo() {
214    ///     let lock = RW_LOCK.read();
215    ///     // do something with lock
216    ///     drop(lock);
217    /// }
218    /// ```
219    #[inline]
220    pub const fn new(data: T) -> Self {
221        RwLock {
222            lock: NonAtomicUsize::new(0),
223            data: UnsafeCell::new(data),
224        }
225    }
226
227    /// Consumes this `RwLock`, returning the underlying data.
228    #[inline]
229    pub fn into_inner(self) -> T {
230        // We know statically that there are no outstanding references to
231        // `self` so there's no need to lock.
232        let RwLock { data, .. } = self;
233        data.into_inner()
234    }
235    /// Returns a mutable pointer to the underying data.
236    ///
237    /// This is mostly meant to be used for applications which require manual unlocking, but where
238    /// storing both the lock and the pointer to the inner data gets inefficient.
239    ///
240    /// While this is safe, writing to the data is undefined behavior unless the current thread has
241    /// acquired a write lock, and reading requires either a read or write lock.
242    ///
243    /// # Example
244    /// ```
245    /// let lock = nospin::RwLock::new(42);
246    ///
247    /// unsafe {
248    ///     core::mem::forget(lock.write());
249    ///
250    ///     assert_eq!(lock.as_mut_ptr().read(), 42);
251    ///     lock.as_mut_ptr().write(58);
252    ///
253    ///     lock.force_write_unlock();
254    /// }
255    ///
256    /// assert_eq!(*lock.read(), 58);
257    ///
258    /// ```
259    #[inline(always)]
260    pub fn as_mut_ptr(&self) -> *mut T {
261        self.data.get()
262    }
263}
264
265impl<T: ?Sized> RwLock<T> {
266    /// Locks this rwlock with shared read access, panicking if it can be acquired.
267    ///
268    /// There may be other readers currently inside the lock when this method
269    /// returns. This method does not provide any guarantees with respect to the
270    /// ordering of whether contentious readers or writers will acquire the lock
271    /// first.
272    ///
273    /// Returns an RAII guard which will release this thread's shared access
274    /// once it is dropped.
275    ///
276    /// ```
277    /// let mylock = nospin::RwLock::new(0);
278    /// {
279    ///     let mut data = mylock.read();
280    ///     // The lock is now locked and the data can be read
281    ///     println!("{}", *data);
282    ///     // The lock is dropped
283    /// }
284    /// ```
285    #[inline]
286    pub fn read(&self) -> RwLockReadGuard<T> {
287        self.try_read()
288            .expect("Failed to get read lock, who are you waiting for?")
289    }
290
291    /// Lock this rwlock with exclusive write access, panicking if it can be acquired.
292    ///
293    /// This function will not return while other writers or other readers
294    /// currently have access to the lock.
295    ///
296    /// Returns an RAII guard which will drop the write access of this rwlock
297    /// when dropped.
298    ///
299    /// ```
300    /// let mylock = nospin::RwLock::new(0);
301    /// {
302    ///     let mut data = mylock.write();
303    ///     // The lock is now locked and the data can be written
304    ///     *data += 1;
305    ///     // The lock is dropped
306    /// }
307    /// ```
308    #[inline]
309    pub fn write(&self) -> RwLockWriteGuard<T> {
310        self.try_write()
311            .expect("Failed to get read lock, who are you waiting for?")
312    }
313
314    /// Obtain a readable lock guard that can later be upgraded to a writable lock guard.
315    /// Upgrades can be done through the [`RwLockUpgradableGuard::upgrade`](RwLockUpgradableGuard::upgrade) method.
316    #[inline]
317    pub fn upgradeable_read(&self) -> RwLockUpgradableGuard<T> {
318        self.try_upgradeable_read()
319            .expect("Failed to get read lock, who are you waiting for?")
320    }
321}
322
323impl<T: ?Sized> RwLock<T> {
324    // Acquire a read lock, returning the new lock value.
325    fn acquire_reader(&self) -> usize {
326        // An arbitrary cap that allows us to catch overflows long before they happen
327        const MAX_READERS: usize = usize::MAX / READER / 2;
328
329        let value = self.lock.fetch_add(READER, Ordering::Acquire);
330
331        if value > MAX_READERS * READER {
332            self.lock.fetch_sub(READER, Ordering::Relaxed);
333            panic!("Too many lock readers, cannot safely proceed");
334        } else {
335            value
336        }
337    }
338
339    /// Attempt to acquire this lock with shared read access.
340    ///
341    /// This function will never block and will return immediately if `read`
342    /// would otherwise succeed. Returns `Some` of an RAII guard which will
343    /// release the shared access of this thread when dropped, or `None` if the
344    /// access could not be granted. This method does not provide any
345    /// guarantees with respect to the ordering of whether contentious readers
346    /// or writers will acquire the lock first.
347    ///
348    /// ```
349    /// let mylock = nospin::RwLock::new(0);
350    /// {
351    ///     match mylock.try_read() {
352    ///         Some(data) => {
353    ///             // The lock is now locked and the data can be read
354    ///             println!("{}", *data);
355    ///             // The lock is dropped
356    ///         },
357    ///         None => (), // no cigar
358    ///     };
359    /// }
360    /// ```
361    #[inline]
362    pub fn try_read(&self) -> Option<RwLockReadGuard<T>> {
363        let value = self.acquire_reader();
364
365        // We check the UPGRADED bit here so that new readers are prevented when an UPGRADED lock is held.
366        // This helps reduce writer starvation.
367        if value & (WRITER | UPGRADED) != 0 {
368            // Lock is taken, undo.
369            self.lock.fetch_sub(READER, Ordering::Release);
370            None
371        } else {
372            Some(RwLockReadGuard {
373                lock: &self.lock,
374                data: unsafe { &*self.data.get() },
375            })
376        }
377    }
378
379    /// Return the number of readers that currently hold the lock (including upgradable readers).
380    ///
381    /// # Safety
382    ///
383    /// This function provides no synchronization guarantees and so its result should be considered 'out of date'
384    /// the instant it is called. Do not use it for synchronization purposes. However, it may be useful as a heuristic.
385    pub fn reader_count(&self) -> usize {
386        let state = self.lock.load(Ordering::Relaxed);
387        state / READER + (state & UPGRADED) / UPGRADED
388    }
389
390    /// Return the number of writers that currently hold the lock.
391    ///
392    /// Because [`RwLock`] guarantees exclusive mutable access, this function may only return either `0` or `1`.
393    ///
394    /// # Safety
395    ///
396    /// This function provides no synchronization guarantees and so its result should be considered 'out of date'
397    /// the instant it is called. Do not use it for synchronization purposes. However, it may be useful as a heuristic.
398    pub fn writer_count(&self) -> usize {
399        (self.lock.load(Ordering::Relaxed) & WRITER) / WRITER
400    }
401
402    /// Force decrement the reader count.
403    ///
404    /// # Safety
405    ///
406    /// This is *extremely* unsafe if there are outstanding `RwLockReadGuard`s
407    /// live, or if called more times than `read` has been called, but can be
408    /// useful in FFI contexts where the caller doesn't know how to deal with
409    /// RAII. The underlying atomic operation uses `Ordering::Release`.
410    #[inline]
411    pub unsafe fn force_read_decrement(&self) {
412        debug_assert!(self.lock.load(Ordering::Relaxed) & !WRITER > 0);
413        self.lock.fetch_sub(READER, Ordering::Release);
414    }
415
416    /// Force unlock exclusive write access.
417    ///
418    /// # Safety
419    ///
420    /// This is *extremely* unsafe if there are outstanding `RwLockWriteGuard`s
421    /// live, or if called when there are current readers, but can be useful in
422    /// FFI contexts where the caller doesn't know how to deal with RAII. The
423    /// underlying atomic operation uses `Ordering::Release`.
424    #[inline]
425    pub unsafe fn force_write_unlock(&self) {
426        debug_assert_eq!(self.lock.load(Ordering::Relaxed) & !(WRITER | UPGRADED), 0);
427        self.lock.fetch_and(!(WRITER | UPGRADED), Ordering::Release);
428    }
429
430    /// Attempt to lock this rwlock with exclusive write access.
431    ///
432    /// This function does not ever block, and it will return `None` if a call
433    /// to `write` would otherwise block. If successful, an RAII guard is
434    /// returned.
435    ///
436    /// ```
437    /// let mylock = nospin::RwLock::new(0);
438    /// {
439    ///     match mylock.try_write() {
440    ///         Some(mut data) => {
441    ///             // The lock is now locked and the data can be written
442    ///             *data += 1;
443    ///             // The lock is implicitly dropped
444    ///         },
445    ///         None => (), // no cigar
446    ///     };
447    /// }
448    /// ```
449    #[inline]
450    pub fn try_write(&self) -> Option<RwLockWriteGuard<T>> {
451        if self
452            .lock
453            .compare_exchange(0, WRITER, Ordering::Acquire, Ordering::Relaxed)
454            .is_ok()
455        {
456            Some(RwLockWriteGuard {
457                inner: self,
458                data: unsafe { &mut *self.data.get() },
459            })
460        } else {
461            None
462        }
463    }
464
465    /// Attempt to lock this rwlock with exclusive write access.
466    ///
467    /// Unlike [`RwLock::try_write`], this function is allowed to spuriously fail even when acquiring exclusive write access
468    /// would otherwise succeed, which can result in more efficient code on some platforms.
469    #[inline]
470    pub fn try_write_weak(&self) -> Option<RwLockWriteGuard<T>> {
471        self.try_write()
472    }
473
474    /// Tries to obtain an upgradeable lock guard.
475    #[inline]
476    pub fn try_upgradeable_read(&self) -> Option<RwLockUpgradableGuard<T>> {
477        if self.lock.fetch_or(UPGRADED, Ordering::Acquire) & (WRITER | UPGRADED) == 0 {
478            Some(RwLockUpgradableGuard {
479                inner: self,
480                data: unsafe { &*self.data.get() },
481            })
482        } else {
483            // We can't unflip the UPGRADED bit back just yet as there is another upgradeable or write lock.
484            // When they unlock, they will clear the bit.
485            None
486        }
487    }
488
489    /// Returns a mutable reference to the underlying data.
490    ///
491    /// Since this call borrows the `RwLock` mutably, no actual locking needs to
492    /// take place -- the mutable borrow statically guarantees no locks exist.
493    ///
494    /// # Examples
495    ///
496    /// ```
497    /// let mut lock = nospin::RwLock::new(0);
498    /// *lock.get_mut() = 10;
499    /// assert_eq!(*lock.read(), 10);
500    /// ```
501    pub fn get_mut(&mut self) -> &mut T {
502        // We know statically that there are no other references to `self`, so
503        // there's no need to lock the inner lock.
504        unsafe { &mut *self.data.get() }
505    }
506}
507
508impl<T: ?Sized + fmt::Debug> fmt::Debug for RwLock<T> {
509    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
510        match self.try_read() {
511            Some(guard) => write!(f, "RwLock {{ data: ")
512                .and_then(|()| (*guard).fmt(f))
513                .and_then(|()| write!(f, " }}")),
514            None => write!(f, "RwLock {{ <locked> }}"),
515        }
516    }
517}
518
519impl<T: Default> Default for RwLock<T> {
520    fn default() -> Self {
521        Self::new(Default::default())
522    }
523}
524
525impl<T> From<T> for RwLock<T> {
526    fn from(data: T) -> Self {
527        Self::new(data)
528    }
529}
530
531impl<'rwlock, T: ?Sized> RwLockReadGuard<'rwlock, T> {
532    /// Leak the lock guard, yielding a reference to the underlying data.
533    ///
534    /// Note that this function will permanently lock the original lock for all but reading locks.
535    ///
536    /// ```
537    /// let mylock = nospin::RwLock::new(0);
538    ///
539    /// let data: &i32 = nospin::RwLockReadGuard::leak(mylock.read());
540    ///
541    /// assert_eq!(*data, 0);
542    /// ```
543    #[inline]
544    pub fn leak(this: Self) -> &'rwlock T {
545        let this = ManuallyDrop::new(this);
546        // Safety: We know statically that only we are referencing data
547        unsafe { &*this.data }
548    }
549}
550
551impl<T: ?Sized + fmt::Debug> fmt::Debug for RwLockReadGuard<'_, T> {
552    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
553        fmt::Debug::fmt(&**self, f)
554    }
555}
556
557impl<T: ?Sized + fmt::Display> fmt::Display for RwLockReadGuard<'_, T> {
558    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
559        fmt::Display::fmt(&**self, f)
560    }
561}
562
563impl<'rwlock, T: ?Sized + fmt::Debug> RwLockUpgradableGuard<'rwlock, T> {
564    /// Upgrades an upgradeable lock guard to a writable lock guard.
565    ///
566    /// ```
567    /// let mylock = nospin::RwLock::new(0);
568    ///
569    /// let upgradeable = mylock.upgradeable_read(); // Readable, but not yet writable
570    /// let writable = upgradeable.upgrade();
571    /// ```
572    #[inline]
573    pub fn upgrade(self) -> RwLockWriteGuard<'rwlock, T> {
574        self.try_upgrade()
575            .expect("Failed to get read lock, who are you waiting for?")
576    }
577}
578
579impl<'rwlock, T: ?Sized> RwLockUpgradableGuard<'rwlock, T> {
580    /// Tries to upgrade an upgradeable lock guard to a writable lock guard.
581    ///
582    /// ```
583    /// let mylock = nospin::RwLock::new(0);
584    /// let upgradeable = mylock.upgradeable_read(); // Readable, but not yet writable
585    ///
586    /// match upgradeable.try_upgrade() {
587    ///     Ok(writable) => /* upgrade successful - use writable lock guard */ (),
588    ///     Err(upgradeable) => /* upgrade unsuccessful */ (),
589    /// };
590    /// ```
591    #[inline]
592    pub fn try_upgrade(self) -> Result<RwLockWriteGuard<'rwlock, T>, Self> {
593        if self
594            .inner
595            .lock
596            .compare_exchange(UPGRADED, WRITER, Ordering::Acquire, Ordering::Relaxed)
597            .is_ok()
598        {
599            let inner = self.inner;
600
601            // Forget the old guard so its destructor doesn't run (before mutably aliasing data below)
602            forget(self);
603
604            // Upgrade successful
605            Ok(RwLockWriteGuard {
606                inner,
607                data: unsafe { &mut *inner.data.get() },
608            })
609        } else {
610            Err(self)
611        }
612    }
613
614    /// Tries to upgrade an upgradeable lock guard to a writable lock guard.
615    ///
616    /// Unlike [`RwLockUpgradableGuard::try_upgrade`], this function is allowed to spuriously fail even when upgrading
617    /// would otherwise succeed, which can result in more efficient code on some platforms.
618    #[inline]
619    pub fn try_upgrade_weak(self) -> Result<RwLockWriteGuard<'rwlock, T>, Self> {
620        self.try_upgrade()
621    }
622
623    #[inline]
624    /// Downgrades the upgradeable lock guard to a readable, shared lock guard. Cannot fail and is guaranteed not to spin.
625    ///
626    /// ```
627    /// let mylock = nospin::RwLock::new(1);
628    ///
629    /// let upgradeable = mylock.upgradeable_read();
630    /// assert!(mylock.try_read().is_none());
631    /// assert_eq!(*upgradeable, 1);
632    ///
633    /// let readable = upgradeable.downgrade(); // This is guaranteed not to spin
634    /// assert!(mylock.try_read().is_some());
635    /// assert_eq!(*readable, 1);
636    /// ```
637    pub fn downgrade(self) -> RwLockReadGuard<'rwlock, T> {
638        // Reserve the read guard for ourselves
639        self.inner.acquire_reader();
640
641        let inner = self.inner;
642
643        // Dropping self removes the UPGRADED bit
644        drop(self);
645
646        RwLockReadGuard {
647            lock: &inner.lock,
648            data: unsafe { &*inner.data.get() },
649        }
650    }
651
652    /// Leak the lock guard, yielding a reference to the underlying data.
653    ///
654    /// Note that this function will permanently lock the original lock.
655    ///
656    /// ```
657    /// let mylock = nospin::RwLock::new(0);
658    ///
659    /// let data: &i32 = nospin::RwLockUpgradableGuard::leak(mylock.upgradeable_read());
660    ///
661    /// assert_eq!(*data, 0);
662    /// ```
663    #[inline]
664    pub fn leak(this: Self) -> &'rwlock T {
665        let this = ManuallyDrop::new(this);
666        // Safety: We know statically that only we are referencing data
667        unsafe { &*this.data }
668    }
669}
670
671impl<T: ?Sized + fmt::Debug> fmt::Debug for RwLockUpgradableGuard<'_, T> {
672    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
673        fmt::Debug::fmt(&**self, f)
674    }
675}
676
677impl<T: ?Sized + fmt::Display> fmt::Display for RwLockUpgradableGuard<'_, T> {
678    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
679        fmt::Display::fmt(&**self, f)
680    }
681}
682
683impl<'rwlock, T: ?Sized> RwLockWriteGuard<'rwlock, T> {
684    /// Downgrades the writable lock guard to a readable, shared lock guard. Cannot fail and is guaranteed not to spin.
685    ///
686    /// ```
687    /// let mylock = nospin::RwLock::new(0);
688    ///
689    /// let mut writable = mylock.write();
690    /// *writable = 1;
691    ///
692    /// let readable = writable.downgrade(); // This is guaranteed not to spin
693    /// # let readable_2 = mylock.try_read().unwrap();
694    /// assert_eq!(*readable, 1);
695    /// ```
696    #[inline]
697    pub fn downgrade(self) -> RwLockReadGuard<'rwlock, T> {
698        // Reserve the read guard for ourselves
699        self.inner.acquire_reader();
700
701        let inner = self.inner;
702
703        // Dropping self removes the UPGRADED bit
704        drop(self);
705
706        RwLockReadGuard {
707            lock: &inner.lock,
708            data: unsafe { &*inner.data.get() },
709        }
710    }
711
712    /// Downgrades the writable lock guard to an upgradable, shared lock guard. Cannot fail and is guaranteed not to spin.
713    ///
714    /// ```
715    /// let mylock = nospin::RwLock::new(0);
716    ///
717    /// let mut writable = mylock.write();
718    /// *writable = 1;
719    ///
720    /// let readable = writable.downgrade_to_upgradeable(); // This is guaranteed not to spin
721    /// assert_eq!(*readable, 1);
722    /// ```
723    #[inline]
724    pub fn downgrade_to_upgradeable(self) -> RwLockUpgradableGuard<'rwlock, T> {
725        debug_assert_eq!(
726            self.inner.lock.load(Ordering::Acquire) & (WRITER | UPGRADED),
727            WRITER
728        );
729
730        // Reserve the read guard for ourselves
731        self.inner.lock.store(UPGRADED, Ordering::Release);
732
733        let inner = self.inner;
734
735        // Dropping self removes the UPGRADED bit
736        forget(self);
737
738        RwLockUpgradableGuard {
739            inner,
740            data: unsafe { &*inner.data.get() },
741        }
742    }
743
744    /// Leak the lock guard, yielding a mutable reference to the underlying data.
745    ///
746    /// Note that this function will permanently lock the original lock.
747    ///
748    /// ```
749    /// let mylock = nospin::RwLock::new(0);
750    ///
751    /// let data: &mut i32 = nospin::RwLockWriteGuard::leak(mylock.write());
752    ///
753    /// *data = 1;
754    /// assert_eq!(*data, 1);
755    /// ```
756    #[inline]
757    pub fn leak(this: Self) -> &'rwlock mut T {
758        let mut this = ManuallyDrop::new(this);
759        // Safety: We know statically that only we are referencing data
760        unsafe { &mut *this.data }
761    }
762}
763
764impl<T: ?Sized + fmt::Debug> fmt::Debug for RwLockWriteGuard<'_, T> {
765    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
766        fmt::Debug::fmt(&**self, f)
767    }
768}
769
770impl<T: ?Sized + fmt::Display> fmt::Display for RwLockWriteGuard<'_, T> {
771    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
772        fmt::Display::fmt(&**self, f)
773    }
774}
775
776impl<T: ?Sized> Deref for RwLockReadGuard<'_, T> {
777    type Target = T;
778
779    fn deref(&self) -> &T {
780        // Safety: We know statically that only we are referencing data
781        unsafe { &*self.data }
782    }
783}
784
785impl<T: ?Sized> Deref for RwLockUpgradableGuard<'_, T> {
786    type Target = T;
787
788    fn deref(&self) -> &T {
789        // Safety: We know statically that only we are referencing data
790        unsafe { &*self.data }
791    }
792}
793
794impl<T: ?Sized> Deref for RwLockWriteGuard<'_, T> {
795    type Target = T;
796
797    fn deref(&self) -> &T {
798        // Safety: We know statically that only we are referencing data
799        unsafe { &*self.data }
800    }
801}
802
803impl<T: ?Sized> DerefMut for RwLockWriteGuard<'_, T> {
804    fn deref_mut(&mut self) -> &mut T {
805        // Safety: We know statically that only we are referencing data
806        unsafe { &mut *self.data }
807    }
808}
809
810impl<T: ?Sized> Drop for RwLockReadGuard<'_, T> {
811    fn drop(&mut self) {
812        debug_assert!(self.lock.load(Ordering::Relaxed) & !(WRITER | UPGRADED) > 0);
813        self.lock.fetch_sub(READER, Ordering::Release);
814    }
815}
816
817impl<T: ?Sized> Drop for RwLockUpgradableGuard<'_, T> {
818    fn drop(&mut self) {
819        debug_assert_eq!(
820            self.inner.lock.load(Ordering::Relaxed) & (WRITER | UPGRADED),
821            UPGRADED
822        );
823        self.inner.lock.fetch_sub(UPGRADED, Ordering::AcqRel);
824    }
825}
826
827impl<T: ?Sized> Drop for RwLockWriteGuard<'_, T> {
828    fn drop(&mut self) {
829        debug_assert_eq!(self.inner.lock.load(Ordering::Relaxed) & WRITER, WRITER);
830
831        // Writer is responsible for clearing both WRITER and UPGRADED bits.
832        // The UPGRADED bit may be set if an upgradeable lock attempts an upgrade while this lock is held.
833        self.inner
834            .lock
835            .fetch_and(!(WRITER | UPGRADED), Ordering::Release);
836    }
837}
838
839#[cfg(feature = "lock_api")]
840unsafe impl lock_api_crate::RawRwLock for RwLock<()> {
841    type GuardMarker = lock_api_crate::GuardSend;
842
843    #[allow(clippy::declare_interior_mutable_const)]
844    const INIT: Self = Self::new(());
845
846    #[inline(always)]
847    fn lock_exclusive(&self) {
848        // Prevent guard destructor running
849        core::mem::forget(self.write());
850    }
851
852    #[inline(always)]
853    fn try_lock_exclusive(&self) -> bool {
854        // Prevent guard destructor running
855        self.try_write().map(core::mem::forget).is_some()
856    }
857
858    #[inline(always)]
859    unsafe fn unlock_exclusive(&self) {
860        drop(RwLockWriteGuard {
861            inner: self,
862            data: &mut (),
863        });
864    }
865
866    #[inline(always)]
867    fn lock_shared(&self) {
868        // Prevent guard destructor running
869        core::mem::forget(self.read());
870    }
871
872    #[inline(always)]
873    fn try_lock_shared(&self) -> bool {
874        // Prevent guard destructor running
875        self.try_read().map(core::mem::forget).is_some()
876    }
877
878    #[inline(always)]
879    unsafe fn unlock_shared(&self) {
880        drop(RwLockReadGuard {
881            lock: &self.lock,
882            data: &(),
883        });
884    }
885
886    #[inline(always)]
887    fn is_locked(&self) -> bool {
888        self.lock.load(Ordering::Relaxed) != 0
889    }
890}
891
892#[cfg(feature = "lock_api")]
893unsafe impl lock_api_crate::RawRwLockUpgrade for RwLock<()> {
894    #[inline(always)]
895    fn lock_upgradable(&self) {
896        // Prevent guard destructor running
897        core::mem::forget(self.upgradeable_read());
898    }
899
900    #[inline(always)]
901    fn try_lock_upgradable(&self) -> bool {
902        // Prevent guard destructor running
903        self.try_upgradeable_read().map(core::mem::forget).is_some()
904    }
905
906    #[inline(always)]
907    unsafe fn unlock_upgradable(&self) {
908        drop(RwLockUpgradableGuard {
909            inner: self,
910            data: &(),
911        });
912    }
913
914    #[inline(always)]
915    unsafe fn upgrade(&self) {
916        let tmp_guard = RwLockUpgradableGuard {
917            inner: self,
918            data: &(),
919        };
920        core::mem::forget(tmp_guard.upgrade());
921    }
922
923    #[inline(always)]
924    unsafe fn try_upgrade(&self) -> bool {
925        let tmp_guard = RwLockUpgradableGuard {
926            inner: self,
927            data: &(),
928        };
929        tmp_guard.try_upgrade().map(core::mem::forget).is_ok()
930    }
931}
932
933#[cfg(feature = "lock_api")]
934unsafe impl lock_api_crate::RawRwLockDowngrade for RwLock<()> {
935    unsafe fn downgrade(&self) {
936        let tmp_guard = RwLockWriteGuard {
937            inner: self,
938            data: &mut (),
939        };
940        core::mem::forget(tmp_guard.downgrade());
941    }
942}
943
944#[cfg(feature = "lock_api")]
945unsafe impl lock_api_crate::RawRwLockUpgradeDowngrade for RwLock<()> {
946    unsafe fn downgrade_upgradable(&self) {
947        let tmp_guard = RwLockUpgradableGuard {
948            inner: self,
949            data: &(),
950        };
951        core::mem::forget(tmp_guard.downgrade());
952    }
953
954    unsafe fn downgrade_to_upgradable(&self) {
955        let tmp_guard = RwLockWriteGuard {
956            inner: self,
957            data: &mut (),
958        };
959        core::mem::forget(tmp_guard.downgrade_to_upgradeable());
960    }
961}
962
963#[cfg(test)]
964mod tests {
965    use std::prelude::v1::*;
966
967    use std::mem::forget;
968    use std::sync::Arc;
969    use std::sync::atomic::{AtomicUsize, Ordering};
970    use std::thread;
971
972    type RwLock<T> = super::RwLock<T>;
973
974    #[derive(Eq, PartialEq, Debug)]
975    struct NonCopy(i32);
976
977    #[test]
978    fn smoke() {
979        let l = RwLock::new(());
980        drop(l.read());
981        drop(l.write());
982        drop((l.read(), l.read()));
983        drop(l.write());
984    }
985
986    #[test]
987    fn test_rw_access_in_unwind() {
988        let arc = Arc::new(RwLock::new(1));
989        let arc2 = arc.clone();
990        let _ = thread::spawn(move || {
991            struct Unwinder {
992                i: Arc<RwLock<isize>>,
993            }
994            impl Drop for Unwinder {
995                fn drop(&mut self) {
996                    let mut lock = self.i.write();
997                    *lock += 1;
998                }
999            }
1000            let _u = Unwinder { i: arc2 };
1001            panic!();
1002        })
1003        .join();
1004        let lock = arc.read();
1005        assert_eq!(*lock, 2);
1006    }
1007
1008    #[test]
1009    fn test_rwlock_unsized() {
1010        let rw: &RwLock<[i32]> = &RwLock::new([1, 2, 3]);
1011        {
1012            let b = &mut *rw.write();
1013            b[0] = 4;
1014            b[2] = 5;
1015        }
1016        let comp: &[i32] = &[4, 2, 5];
1017        assert_eq!(&*rw.read(), comp);
1018    }
1019
1020    #[test]
1021    fn test_rwlock_try_write() {
1022        use std::mem::drop;
1023
1024        let lock = RwLock::new(0isize);
1025        let read_guard = lock.read();
1026
1027        let write_result = lock.try_write();
1028        match write_result {
1029            None => (),
1030            Some(_) => panic!("try_write should not succeed while read_guard is in scope"),
1031        }
1032
1033        drop(read_guard);
1034    }
1035
1036    #[test]
1037    fn test_rw_try_read() {
1038        let m = RwLock::new(0);
1039        forget(m.write());
1040        assert!(m.try_read().is_none());
1041    }
1042
1043    #[test]
1044    fn test_into_inner() {
1045        let m = RwLock::new(NonCopy(10));
1046        assert_eq!(m.into_inner(), NonCopy(10));
1047    }
1048
1049    #[test]
1050    fn test_into_inner_drop() {
1051        struct Foo(Arc<AtomicUsize>);
1052        impl Drop for Foo {
1053            fn drop(&mut self) {
1054                self.0.fetch_add(1, Ordering::SeqCst);
1055            }
1056        }
1057        let num_drops = Arc::new(AtomicUsize::new(0));
1058        let m = RwLock::new(Foo(num_drops.clone()));
1059        assert_eq!(num_drops.load(Ordering::SeqCst), 0);
1060        {
1061            let _inner = m.into_inner();
1062            assert_eq!(num_drops.load(Ordering::SeqCst), 0);
1063        }
1064        assert_eq!(num_drops.load(Ordering::SeqCst), 1);
1065    }
1066
1067    #[test]
1068    fn test_force_read_decrement() {
1069        let m = RwLock::new(());
1070        forget(m.read());
1071        forget(m.read());
1072        forget(m.read());
1073        assert!(m.try_write().is_none());
1074        unsafe {
1075            m.force_read_decrement();
1076            m.force_read_decrement();
1077        }
1078        assert!(m.try_write().is_none());
1079        unsafe {
1080            m.force_read_decrement();
1081        }
1082        assert!(m.try_write().is_some());
1083    }
1084
1085    #[test]
1086    fn test_force_write_unlock() {
1087        let m = RwLock::new(());
1088        forget(m.write());
1089        assert!(m.try_read().is_none());
1090        unsafe {
1091            m.force_write_unlock();
1092        }
1093        assert!(m.try_read().is_some());
1094    }
1095
1096    #[test]
1097    fn test_upgrade_downgrade() {
1098        let m = RwLock::new(());
1099        {
1100            let _r = m.read();
1101            let upg = m.try_upgradeable_read().unwrap();
1102            assert!(m.try_read().is_none());
1103            assert!(m.try_write().is_none());
1104            assert!(upg.try_upgrade().is_err());
1105        }
1106        {
1107            let w = m.write();
1108            assert!(m.try_upgradeable_read().is_none());
1109            let _r = w.downgrade();
1110            assert!(m.try_upgradeable_read().is_some());
1111            assert!(m.try_read().is_some());
1112            assert!(m.try_write().is_none());
1113        }
1114        {
1115            let _u = m.upgradeable_read();
1116            assert!(m.try_upgradeable_read().is_none());
1117        }
1118
1119        assert!(m.try_upgradeable_read().unwrap().try_upgrade().is_ok());
1120    }
1121}