static_locks/
rwlock.rs

1// Copyright 2016 Amanieu d'Antras
2//
3// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
4// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
5// http://opensource.org/licenses/MIT>, at your option. This file may not be
6// copied, modified, or distributed except according to those terms.
7
8use parking_lot::{
9    lock_api::{
10        RawRwLock as _,
11        RawRwLockFair as _,
12        RawRwLockDowngrade as _,
13        RawRwLockTimed as _,
14        RawRwLockRecursive as _,
15        RawRwLockRecursiveTimed as _,
16        RawRwLockUpgrade as _,
17        RawRwLockUpgradeFair as _,
18        RawRwLockUpgradeDowngrade as _,
19        RawRwLockUpgradeTimed as _
20    },
21    RawRwLock,
22};
23use core::cell::UnsafeCell;
24use core::fmt;
25use core::marker::PhantomData;
26use core::mem;
27use core::time::Duration;
28use core::ops::{Deref, DerefMut};
29use std::time::Instant;
30
31#[cfg(feature = "owning_ref_support")]
32use owning_ref::StableAddress;
33
34#[cfg(feature = "serde_support")]
35use serde::{Deserialize, Deserializer, Serialize, Serializer};
36
37/// A reader-writer lock
38///
39/// This type of lock allows a number of readers or at most one writer at any
40/// point in time. The write portion of this lock typically allows modification
41/// of the underlying data (exclusive access) and the read portion of this lock
42/// typically allows for read-only access (shared access).
43///
44/// The type parameter `T` represents the data that this lock protects. It is
45/// required that `T` satisfies `Send` to be shared across threads and `Sync` to
46/// allow concurrent access through readers. The RAII guards returned from the
47/// locking methods implement `Deref` (and `DerefMut` for the `write` methods)
48/// to allow access to the contained of the lock.
49pub struct RwLock<T: ?Sized> {
50    raw: RawRwLock,
51    data: UnsafeCell<T>,
52}
53
54// Copied and modified from serde
55#[cfg(feature = "serde_support")]
56impl<T> Serialize for RwLock<T>
57where
58    T: Serialize + ?Sized,
59{
60    fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
61    where
62        S: Serializer,
63    {
64        self.read().serialize(serializer)
65    }
66}
67
68#[cfg(feature = "serde_support")]
69impl<'de, T> Deserialize<'de> for RwLock<T>
70where
71    T: Deserialize<'de> + ?Sized,
72{
73    fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
74    where
75        D: Deserializer<'de>,
76    {
77        Deserialize::deserialize(deserializer).map(RwLock::new)
78    }
79}
80
81unsafe impl<T: ?Sized + Send> Send for RwLock<T> {}
82unsafe impl<T: ?Sized + Send + Sync> Sync for RwLock<T> {}
83
84impl<T> RwLock<T> {
85    /// Creates a new instance of an `RwLock<T>` which is unlocked.
86    #[inline]
87    pub const fn new(val: T) -> RwLock<T> {
88        RwLock {
89            data: UnsafeCell::new(val),
90            raw: RawRwLock::INIT,
91        }
92    }
93
94    /// Consumes this `RwLock`, returning the underlying data.
95    #[inline]
96    #[allow(unused_unsafe)]
97    pub fn into_inner(self) -> T {
98        unsafe { self.data.into_inner() }
99    }
100}
101
102impl<T: ?Sized> RwLock<T> {
103    /// # Safety
104    ///
105    /// The lock must be held when calling this method.
106    #[inline]
107    unsafe fn read_guard(&self) -> RwLockReadGuard<'_, T> {
108        RwLockReadGuard {
109            rwlock: self,
110            marker: PhantomData,
111        }
112    }
113
114    /// # Safety
115    ///
116    /// The lock must be held when calling this method.
117    #[inline]
118    unsafe fn write_guard(&self) -> RwLockWriteGuard<'_, T> {
119        RwLockWriteGuard {
120            rwlock: self,
121            marker: PhantomData,
122        }
123    }
124
125    /// Locks this `RwLock` with shared read access, blocking the current thread
126    /// until it can be acquired.
127    ///
128    /// The calling thread will be blocked until there are no more writers which
129    /// hold the lock. There may be other readers currently inside the lock when
130    /// this method returns.
131    ///
132    /// Note that attempts to recursively acquire a read lock on a `RwLock` when
133    /// the current thread already holds one may result in a deadlock.
134    ///
135    /// Returns an RAII guard which will release this thread's shared access
136    /// once it is dropped.
137    #[inline]
138    pub fn read(&self) -> RwLockReadGuard<'_, T> {
139        self.raw.lock_shared();
140        // SAFETY: The lock is held, as required.
141        unsafe { self.read_guard() }
142    }
143
144    /// Attempts to acquire this `RwLock` with shared read access.
145    ///
146    /// If the access could not be granted at this time, then `None` is returned.
147    /// Otherwise, an RAII guard is returned which will release the shared access
148    /// when it is dropped.
149    ///
150    /// This function does not block.
151    #[inline]
152    pub fn try_read(&self) -> Option<RwLockReadGuard<'_, T>> {
153        if self.raw.try_lock_shared() {
154            // SAFETY: The lock is held, as required.
155            Some(unsafe { self.read_guard() })
156        } else {
157            None
158        }
159    }
160
161    /// Locks this `RwLock` with exclusive write access, blocking the current
162    /// thread until it can be acquired.
163    ///
164    /// This function will not return while other writers or other readers
165    /// currently have access to the lock.
166    ///
167    /// Returns an RAII guard which will drop the write access of this `RwLock`
168    /// when dropped.
169    #[inline]
170    pub fn write(&self) -> RwLockWriteGuard<'_, T> {
171        self.raw.lock_exclusive();
172        // SAFETY: The lock is held, as required.
173        unsafe { self.write_guard() }
174    }
175
176    /// Attempts to lock this `RwLock` with exclusive write access.
177    ///
178    /// If the lock could not be acquired at this time, then `None` is returned.
179    /// Otherwise, an RAII guard is returned which will release the lock when
180    /// it is dropped.
181    ///
182    /// This function does not block.
183    #[inline]
184    pub fn try_write(&self) -> Option<RwLockWriteGuard<'_, T>> {
185        if self.raw.try_lock_exclusive() {
186            // SAFETY: The lock is held, as required.
187            Some(unsafe { self.write_guard() })
188        } else {
189            None
190        }
191    }
192
193    /// Returns a mutable reference to the underlying data.
194    ///
195    /// Since this call borrows the `RwLock` mutably, no actual locking needs to
196    /// take place---the mutable borrow statically guarantees no locks exist.
197    #[inline]
198    pub fn get_mut(&mut self) -> &mut T {
199        unsafe { &mut *self.data.get() }
200    }
201
202    /// Forcibly unlocks a read lock.
203    ///
204    /// This is useful when combined with `mem::forget` to hold a lock without
205    /// the need to maintain a `RwLockReadGuard` object alive, for example when
206    /// dealing with FFI.
207    ///
208    /// # Safety
209    ///
210    /// This method must only be called if the current thread logically owns a
211    /// `RwLockReadGuard` but that guard has be discarded using `mem::forget`.
212    /// Behavior is undefined if a rwlock is read-unlocked when not read-locked.
213    #[inline]
214    pub unsafe fn force_unlock_read(&self) {
215        self.raw.unlock_shared();
216    }
217
218    /// Forcibly unlocks a write lock.
219    ///
220    /// This is useful when combined with `mem::forget` to hold a lock without
221    /// the need to maintain a `RwLockWriteGuard` object alive, for example when
222    /// dealing with FFI.
223    ///
224    /// # Safety
225    ///
226    /// This method must only be called if the current thread logically owns a
227    /// `RwLockWriteGuard` but that guard has be discarded using `mem::forget`.
228    /// Behavior is undefined if a rwlock is write-unlocked when not write-locked.
229    #[inline]
230    pub unsafe fn force_unlock_write(&self) {
231        self.raw.unlock_exclusive();
232    }
233
234    /// Returns the underlying raw reader-writer lock object.
235    ///
236    /// Note that you will most likely need to import the `RawRwLock` trait from
237    /// `lock_api` to be able to call functions on the raw
238    /// reader-writer lock.
239    ///
240    /// # Safety
241    ///
242    /// This method is unsafe because it allows unlocking a mutex while
243    /// still holding a reference to a lock guard.
244    pub unsafe fn raw(&self) -> &RawRwLock {
245        &self.raw
246    }
247}
248
249impl<T: ?Sized> RwLock<T> {
250    /// Forcibly unlocks a read lock using a fair unlock procotol.
251    ///
252    /// This is useful when combined with `mem::forget` to hold a lock without
253    /// the need to maintain a `RwLockReadGuard` object alive, for example when
254    /// dealing with FFI.
255    ///
256    /// # Safety
257    ///
258    /// This method must only be called if the current thread logically owns a
259    /// `RwLockReadGuard` but that guard has be discarded using `mem::forget`.
260    /// Behavior is undefined if a rwlock is read-unlocked when not read-locked.
261    #[inline]
262    pub unsafe fn force_unlock_read_fair(&self) {
263        self.raw.unlock_shared_fair();
264    }
265
266    /// Forcibly unlocks a write lock using a fair unlock procotol.
267    ///
268    /// This is useful when combined with `mem::forget` to hold a lock without
269    /// the need to maintain a `RwLockWriteGuard` object alive, for example when
270    /// dealing with FFI.
271    ///
272    /// # Safety
273    ///
274    /// This method must only be called if the current thread logically owns a
275    /// `RwLockWriteGuard` but that guard has be discarded using `mem::forget`.
276    /// Behavior is undefined if a rwlock is write-unlocked when not write-locked.
277    #[inline]
278    pub unsafe fn force_unlock_write_fair(&self) {
279        self.raw.unlock_exclusive_fair();
280    }
281}
282
283impl<T: ?Sized> RwLock<T> {
284    /// Attempts to acquire this `RwLock` with shared read access until a timeout
285    /// is reached.
286    ///
287    /// If the access could not be granted before the timeout expires, then
288    /// `None` is returned. Otherwise, an RAII guard is returned which will
289    /// release the shared access when it is dropped.
290    #[inline]
291    pub fn try_read_for(&self, timeout: Duration) -> Option<RwLockReadGuard<'_, T>> {
292        if self.raw.try_lock_shared_for(timeout) {
293            // SAFETY: The lock is held, as required.
294            Some(unsafe { self.read_guard() })
295        } else {
296            None
297        }
298    }
299
300    /// Attempts to acquire this `RwLock` with shared read access until a timeout
301    /// is reached.
302    ///
303    /// If the access could not be granted before the timeout expires, then
304    /// `None` is returned. Otherwise, an RAII guard is returned which will
305    /// release the shared access when it is dropped.
306    #[inline]
307    pub fn try_read_until(&self, timeout: Instant) -> Option<RwLockReadGuard<'_, T>> {
308        if self.raw.try_lock_shared_until(timeout) {
309            // SAFETY: The lock is held, as required.
310            Some(unsafe { self.read_guard() })
311        } else {
312            None
313        }
314    }
315
316    /// Attempts to acquire this `RwLock` with exclusive write access until a
317    /// timeout is reached.
318    ///
319    /// If the access could not be granted before the timeout expires, then
320    /// `None` is returned. Otherwise, an RAII guard is returned which will
321    /// release the exclusive access when it is dropped.
322    #[inline]
323    pub fn try_write_for(&self, timeout: Duration) -> Option<RwLockWriteGuard<'_, T>> {
324        if self.raw.try_lock_exclusive_for(timeout) {
325            // SAFETY: The lock is held, as required.
326            Some(unsafe { self.write_guard() })
327        } else {
328            None
329        }
330    }
331
332    /// Attempts to acquire this `RwLock` with exclusive write access until a
333    /// timeout is reached.
334    ///
335    /// If the access could not be granted before the timeout expires, then
336    /// `None` is returned. Otherwise, an RAII guard is returned which will
337    /// release the exclusive access when it is dropped.
338    #[inline]
339    pub fn try_write_until(&self, timeout: Instant) -> Option<RwLockWriteGuard<'_, T>> {
340        if self.raw.try_lock_exclusive_until(timeout) {
341            // SAFETY: The lock is held, as required.
342            Some(unsafe { self.write_guard() })
343        } else {
344            None
345        }
346    }
347}
348
349impl<T: ?Sized> RwLock<T> {
350    /// Locks this `RwLock` with shared read access, blocking the current thread
351    /// until it can be acquired.
352    ///
353    /// The calling thread will be blocked until there are no more writers which
354    /// hold the lock. There may be other readers currently inside the lock when
355    /// this method returns.
356    ///
357    /// Unlike `read`, this method is guaranteed to succeed without blocking if
358    /// another read lock is held at the time of the call. This allows a thread
359    /// to recursively lock a `RwLock`. However using this method can cause
360    /// writers to starve since readers no longer block if a writer is waiting
361    /// for the lock.
362    ///
363    /// Returns an RAII guard which will release this thread's shared access
364    /// once it is dropped.
365    #[inline]
366    pub fn read_recursive(&self) -> RwLockReadGuard<'_, T> {
367        self.raw.lock_shared_recursive();
368        // SAFETY: The lock is held, as required.
369        unsafe { self.read_guard() }
370    }
371
372    /// Attempts to acquire this `RwLock` with shared read access.
373    ///
374    /// If the access could not be granted at this time, then `None` is returned.
375    /// Otherwise, an RAII guard is returned which will release the shared access
376    /// when it is dropped.
377    ///
378    /// This method is guaranteed to succeed if another read lock is held at the
379    /// time of the call. See the documentation for `read_recursive` for details.
380    ///
381    /// This function does not block.
382    #[inline]
383    pub fn try_read_recursive(&self) -> Option<RwLockReadGuard<'_, T>> {
384        if self.raw.try_lock_shared_recursive() {
385            // SAFETY: The lock is held, as required.
386            Some(unsafe { self.read_guard() })
387        } else {
388            None
389        }
390    }
391}
392
393impl<T: ?Sized> RwLock<T> {
394    /// Attempts to acquire this `RwLock` with shared read access until a timeout
395    /// is reached.
396    ///
397    /// If the access could not be granted before the timeout expires, then
398    /// `None` is returned. Otherwise, an RAII guard is returned which will
399    /// release the shared access when it is dropped.
400    ///
401    /// This method is guaranteed to succeed without blocking if another read
402    /// lock is held at the time of the call. See the documentation for
403    /// `read_recursive` for details.
404    #[inline]
405    pub fn try_read_recursive_for(
406        &self,
407        timeout: Duration,
408    ) -> Option<RwLockReadGuard<'_, T>> {
409        if self.raw.try_lock_shared_recursive_for(timeout) {
410            // SAFETY: The lock is held, as required.
411            Some(unsafe { self.read_guard() })
412        } else {
413            None
414        }
415    }
416
417    /// Attempts to acquire this `RwLock` with shared read access until a timeout
418    /// is reached.
419    ///
420    /// If the access could not be granted before the timeout expires, then
421    /// `None` is returned. Otherwise, an RAII guard is returned which will
422    /// release the shared access when it is dropped.
423    #[inline]
424    pub fn try_read_recursive_until(
425        &self,
426        timeout: Instant,
427    ) -> Option<RwLockReadGuard<'_, T>> {
428        if self.raw.try_lock_shared_recursive_until(timeout) {
429            // SAFETY: The lock is held, as required.
430            Some(unsafe { self.read_guard() })
431        } else {
432            None
433        }
434    }
435}
436
437impl<T: ?Sized> RwLock<T> {
438    /// # Safety
439    ///
440    /// The lock must be held when calling this method.
441    #[inline]
442    unsafe fn upgradable_guard(&self) -> RwLockUpgradableReadGuard<'_, T> {
443        RwLockUpgradableReadGuard {
444            rwlock: self,
445            marker: PhantomData,
446        }
447    }
448
449    /// Locks this `RwLock` with upgradable read access, blocking the current thread
450    /// until it can be acquired.
451    ///
452    /// The calling thread will be blocked until there are no more writers or other
453    /// upgradable reads which hold the lock. There may be other readers currently
454    /// inside the lock when this method returns.
455    ///
456    /// Returns an RAII guard which will release this thread's shared access
457    /// once it is dropped.
458    #[inline]
459    pub fn upgradable_read(&self) -> RwLockUpgradableReadGuard<'_, T> {
460        self.raw.lock_upgradable();
461        // SAFETY: The lock is held, as required.
462        unsafe { self.upgradable_guard() }
463    }
464
465    /// Attempts to acquire this `RwLock` with upgradable read access.
466    ///
467    /// If the access could not be granted at this time, then `None` is returned.
468    /// Otherwise, an RAII guard is returned which will release the shared access
469    /// when it is dropped.
470    ///
471    /// This function does not block.
472    #[inline]
473    pub fn try_upgradable_read(&self) -> Option<RwLockUpgradableReadGuard<'_, T>> {
474        if self.raw.try_lock_upgradable() {
475            // SAFETY: The lock is held, as required.
476            Some(unsafe { self.upgradable_guard() })
477        } else {
478            None
479        }
480    }
481}
482
483impl<T: ?Sized> RwLock<T> {
484    /// Attempts to acquire this `RwLock` with upgradable read access until a timeout
485    /// is reached.
486    ///
487    /// If the access could not be granted before the timeout expires, then
488    /// `None` is returned. Otherwise, an RAII guard is returned which will
489    /// release the shared access when it is dropped.
490    #[inline]
491    pub fn try_upgradable_read_for(
492        &self,
493        timeout: Duration,
494    ) -> Option<RwLockUpgradableReadGuard<'_, T>> {
495        if self.raw.try_lock_upgradable_for(timeout) {
496            // SAFETY: The lock is held, as required.
497            Some(unsafe { self.upgradable_guard() })
498        } else {
499            None
500        }
501    }
502
503    /// Attempts to acquire this `RwLock` with upgradable read access until a timeout
504    /// is reached.
505    ///
506    /// If the access could not be granted before the timeout expires, then
507    /// `None` is returned. Otherwise, an RAII guard is returned which will
508    /// release the shared access when it is dropped.
509    #[inline]
510    pub fn try_upgradable_read_until(
511        &self,
512        timeout: Instant,
513    ) -> Option<RwLockUpgradableReadGuard<'_, T>> {
514        if self.raw.try_lock_upgradable_until(timeout) {
515            // SAFETY: The lock is held, as required.
516            Some(unsafe { self.upgradable_guard() })
517        } else {
518            None
519        }
520    }
521}
522
523impl<T: ?Sized + Default> Default for RwLock<T> {
524    #[inline]
525    fn default() -> RwLock<T> {
526        RwLock::new(Default::default())
527    }
528}
529
530impl<T> From<T> for RwLock<T> {
531    #[inline]
532    fn from(t: T) -> RwLock<T> {
533        RwLock::new(t)
534    }
535}
536
537impl<T: ?Sized + fmt::Debug> fmt::Debug for RwLock<T> {
538    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
539        match self.try_read() {
540            Some(guard) => f.debug_struct("RwLock").field("data", &&*guard).finish(),
541            None => {
542                struct LockedPlaceholder;
543                impl fmt::Debug for LockedPlaceholder {
544                    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
545                        f.write_str("<locked>")
546                    }
547                }
548
549                f.debug_struct("RwLock")
550                    .field("data", &LockedPlaceholder)
551                    .finish()
552            }
553        }
554    }
555}
556
557/// RAII structure used to release the shared read access of a lock when
558/// dropped.
559#[must_use = "if unused the RwLock will immediately unlock"]
560pub struct RwLockReadGuard<'a, T: ?Sized> {
561    rwlock: &'a RwLock<T>,
562    marker: PhantomData<(&'a T, *mut ())>,
563}
564
565unsafe impl<'a, T: ?Sized + Sync + 'a> Sync for RwLockReadGuard<'a, T> {}
566
567impl<'a, T: ?Sized + 'a> RwLockReadGuard<'a, T> {
568    /// Returns a reference to the original reader-writer lock object.
569    pub fn rwlock(s: &Self) -> &'a RwLock<T> {
570        s.rwlock
571    }
572
573    /// Make a new `MappedRwLockReadGuard` for a component of the locked data.
574    ///
575    /// This operation cannot fail as the `RwLockReadGuard` passed
576    /// in already locked the data.
577    ///
578    /// This is an associated function that needs to be
579    /// used as `RwLockReadGuard::map(...)`. A method would interfere with methods of
580    /// the same name on the contents of the locked data.
581    #[inline]
582    pub fn map<U: ?Sized, F>(s: Self, f: F) -> MappedRwLockReadGuard<'a, U>
583    where
584        F: FnOnce(&T) -> &U,
585    {
586        let raw = &s.rwlock.raw;
587        let data = f(unsafe { &*s.rwlock.data.get() });
588        mem::forget(s);
589        MappedRwLockReadGuard {
590            raw,
591            data,
592            marker: PhantomData,
593        }
594    }
595
596    /// Attempts to make  a new `MappedRwLockReadGuard` for a component of the
597    /// locked data. The original guard is return if the closure returns `None`.
598    ///
599    /// This operation cannot fail as the `RwLockReadGuard` passed
600    /// in already locked the data.
601    ///
602    /// This is an associated function that needs to be
603    /// used as `RwLockReadGuard::map(...)`. A method would interfere with methods of
604    /// the same name on the contents of the locked data.
605    #[inline]
606    pub fn try_map<U: ?Sized, F>(s: Self, f: F) -> Result<MappedRwLockReadGuard<'a, U>, Self>
607    where
608        F: FnOnce(&T) -> Option<&U>,
609    {
610        let raw = &s.rwlock.raw;
611        let data = match f(unsafe { &*s.rwlock.data.get() }) {
612            Some(data) => data,
613            None => return Err(s),
614        };
615        mem::forget(s);
616        Ok(MappedRwLockReadGuard {
617            raw,
618            data,
619            marker: PhantomData,
620        })
621    }
622
623    /// Temporarily unlocks the `RwLock` to execute the given function.
624    ///
625    /// The `RwLock` is unlocked a fair unlock protocol.
626    ///
627    /// This is safe because `&mut` guarantees that there exist no other
628    /// references to the data protected by the `RwLock`.
629    #[inline]
630    pub fn unlocked<F, U>(s: &mut Self, f: F) -> U
631    where
632        F: FnOnce() -> U,
633    {
634        s.rwlock.raw.unlock_shared();
635        defer!(s.rwlock.raw.lock_shared());
636        f()
637    }
638}
639
640impl<'a, T: ?Sized + 'a> RwLockReadGuard<'a, T> {
641    /// Unlocks the `RwLock` using a fair unlock protocol.
642    ///
643    /// By default, `RwLock` is unfair and allow the current thread to re-lock
644    /// the `RwLock` before another has the chance to acquire the lock, even if
645    /// that thread has been blocked on the `RwLock` for a long time. This is
646    /// the default because it allows much higher throughput as it avoids
647    /// forcing a context switch on every `RwLock` unlock. This can result in one
648    /// thread acquiring a `RwLock` many more times than other threads.
649    ///
650    /// However in some cases it can be beneficial to ensure fairness by forcing
651    /// the lock to pass on to a waiting thread if there is one. This is done by
652    /// using this method instead of dropping the `RwLockReadGuard` normally.
653    #[inline]
654    pub fn unlock_fair(s: Self) {
655        s.rwlock.raw.unlock_shared_fair();
656        mem::forget(s);
657    }
658
659    /// Temporarily unlocks the `RwLock` to execute the given function.
660    ///
661    /// The `RwLock` is unlocked a fair unlock protocol.
662    ///
663    /// This is safe because `&mut` guarantees that there exist no other
664    /// references to the data protected by the `RwLock`.
665    #[inline]
666    pub fn unlocked_fair<F, U>(s: &mut Self, f: F) -> U
667    where
668        F: FnOnce() -> U,
669    {
670        s.rwlock.raw.unlock_shared_fair();
671        defer!(s.rwlock.raw.lock_shared());
672        f()
673    }
674
675    /// Temporarily yields the `RwLock` to a waiting thread if there is one.
676    ///
677    /// This method is functionally equivalent to calling `unlock_fair` followed
678    /// by `read`, however it can be much more efficient in the case where there
679    /// are no waiting threads.
680    #[inline]
681    pub fn bump(s: &mut Self) {
682        s.rwlock.raw.bump_shared();
683    }
684}
685
686impl<'a, T: ?Sized + 'a> Deref for RwLockReadGuard<'a, T> {
687    type Target = T;
688    #[inline]
689    fn deref(&self) -> &T {
690        unsafe { &*self.rwlock.data.get() }
691    }
692}
693
694impl<'a, T: ?Sized + 'a> Drop for RwLockReadGuard<'a, T> {
695    #[inline]
696    fn drop(&mut self) {
697        self.rwlock.raw.unlock_shared();
698    }
699}
700
701impl<'a, T: fmt::Debug + ?Sized + 'a> fmt::Debug for RwLockReadGuard<'a, T> {
702    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
703        fmt::Debug::fmt(&**self, f)
704    }
705}
706
707impl<'a, T: fmt::Display + ?Sized + 'a> fmt::Display
708    for RwLockReadGuard<'a, T>
709{
710    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
711        (**self).fmt(f)
712    }
713}
714
715#[cfg(feature = "owning_ref_support")]
716unsafe impl<'a, T: ?Sized + 'a> StableAddress for RwLockReadGuard<'a, T> {}
717
718/// RAII structure used to release the exclusive write access of a lock when
719/// dropped.
720#[must_use = "if unused the RwLock will immediately unlock"]
721pub struct RwLockWriteGuard<'a, T: ?Sized> {
722    rwlock: &'a RwLock<T>,
723    marker: PhantomData<(&'a mut T, *mut ())>,
724}
725
726unsafe impl<'a, T: ?Sized + Sync + 'a> Sync for RwLockWriteGuard<'a, T> {}
727
728impl<'a, T: ?Sized + 'a> RwLockWriteGuard<'a, T> {
729    /// Returns a reference to the original reader-writer lock object.
730    pub fn rwlock(s: &Self) -> &'a RwLock<T> {
731        s.rwlock
732    }
733
734    /// Make a new `MappedRwLockWriteGuard` for a component of the locked data.
735    ///
736    /// This operation cannot fail as the `RwLockWriteGuard` passed
737    /// in already locked the data.
738    ///
739    /// This is an associated function that needs to be
740    /// used as `RwLockWriteGuard::map(...)`. A method would interfere with methods of
741    /// the same name on the contents of the locked data.
742    #[inline]
743    pub fn map<U: ?Sized, F>(s: Self, f: F) -> MappedRwLockWriteGuard<'a, U>
744    where
745        F: FnOnce(&mut T) -> &mut U,
746    {
747        let raw = &s.rwlock.raw;
748        let data = f(unsafe { &mut *s.rwlock.data.get() });
749        mem::forget(s);
750        MappedRwLockWriteGuard {
751            raw,
752            data,
753            marker: PhantomData,
754        }
755    }
756
757    /// Attempts to make  a new `MappedRwLockWriteGuard` for a component of the
758    /// locked data. The original guard is return if the closure returns `None`.
759    ///
760    /// This operation cannot fail as the `RwLockWriteGuard` passed
761    /// in already locked the data.
762    ///
763    /// This is an associated function that needs to be
764    /// used as `RwLockWriteGuard::map(...)`. A method would interfere with methods of
765    /// the same name on the contents of the locked data.
766    #[inline]
767    pub fn try_map<U: ?Sized, F>(s: Self, f: F) -> Result<MappedRwLockWriteGuard<'a, U>, Self>
768    where
769        F: FnOnce(&mut T) -> Option<&mut U>,
770    {
771        let raw = &s.rwlock.raw;
772        let data = match f(unsafe { &mut *s.rwlock.data.get() }) {
773            Some(data) => data,
774            None => return Err(s),
775        };
776        mem::forget(s);
777        Ok(MappedRwLockWriteGuard {
778            raw,
779            data,
780            marker: PhantomData,
781        })
782    }
783
784    /// Temporarily unlocks the `RwLock` to execute the given function.
785    ///
786    /// This is safe because `&mut` guarantees that there exist no other
787    /// references to the data protected by the `RwLock`.
788    #[inline]
789    pub fn unlocked<F, U>(s: &mut Self, f: F) -> U
790    where
791        F: FnOnce() -> U,
792    {
793        s.rwlock.raw.unlock_exclusive();
794        defer!(s.rwlock.raw.lock_exclusive());
795        f()
796    }
797}
798
799impl<'a, T: ?Sized + 'a> RwLockWriteGuard<'a, T> {
800    /// Atomically downgrades a write lock into a read lock without allowing any
801    /// writers to take exclusive access of the lock in the meantime.
802    ///
803    /// Note that if there are any writers currently waiting to take the lock
804    /// then other readers may not be able to acquire the lock even if it was
805    /// downgraded.
806    pub fn downgrade(s: Self) -> RwLockReadGuard<'a, T> {
807        s.rwlock.raw.downgrade();
808        let rwlock = s.rwlock;
809        mem::forget(s);
810        RwLockReadGuard {
811            rwlock,
812            marker: PhantomData,
813        }
814    }
815}
816
817impl<'a, T: ?Sized + 'a> RwLockWriteGuard<'a, T> {
818    /// Atomically downgrades a write lock into an upgradable read lock without allowing any
819    /// writers to take exclusive access of the lock in the meantime.
820    ///
821    /// Note that if there are any writers currently waiting to take the lock
822    /// then other readers may not be able to acquire the lock even if it was
823    /// downgraded.
824    pub fn downgrade_to_upgradable(s: Self) -> RwLockUpgradableReadGuard<'a, T> {
825        s.rwlock.raw.downgrade_to_upgradable();
826        let rwlock = s.rwlock;
827        mem::forget(s);
828        RwLockUpgradableReadGuard {
829            rwlock,
830            marker: PhantomData,
831        }
832    }
833}
834
835impl<'a, T: ?Sized + 'a> RwLockWriteGuard<'a, T> {
836    /// Unlocks the `RwLock` using a fair unlock protocol.
837    ///
838    /// By default, `RwLock` is unfair and allow the current thread to re-lock
839    /// the `RwLock` before another has the chance to acquire the lock, even if
840    /// that thread has been blocked on the `RwLock` for a long time. This is
841    /// the default because it allows much higher throughput as it avoids
842    /// forcing a context switch on every `RwLock` unlock. This can result in one
843    /// thread acquiring a `RwLock` many more times than other threads.
844    ///
845    /// However in some cases it can be beneficial to ensure fairness by forcing
846    /// the lock to pass on to a waiting thread if there is one. This is done by
847    /// using this method instead of dropping the `RwLockWriteGuard` normally.
848    #[inline]
849    pub fn unlock_fair(s: Self) {
850        s.rwlock.raw.unlock_exclusive_fair();
851        mem::forget(s);
852    }
853
854    /// Temporarily unlocks the `RwLock` to execute the given function.
855    ///
856    /// The `RwLock` is unlocked a fair unlock protocol.
857    ///
858    /// This is safe because `&mut` guarantees that there exist no other
859    /// references to the data protected by the `RwLock`.
860    #[inline]
861    pub fn unlocked_fair<F, U>(s: &mut Self, f: F) -> U
862    where
863        F: FnOnce() -> U,
864    {
865        s.rwlock.raw.unlock_exclusive_fair();
866        defer!(s.rwlock.raw.lock_exclusive());
867        f()
868    }
869
870    /// Temporarily yields the `RwLock` to a waiting thread if there is one.
871    ///
872    /// This method is functionally equivalent to calling `unlock_fair` followed
873    /// by `write`, however it can be much more efficient in the case where there
874    /// are no waiting threads.
875    #[inline]
876    pub fn bump(s: &mut Self) {
877        s.rwlock.raw.bump_exclusive();
878    }
879}
880
881impl<'a, T: ?Sized + 'a> Deref for RwLockWriteGuard<'a, T> {
882    type Target = T;
883    #[inline]
884    fn deref(&self) -> &T {
885        unsafe { &*self.rwlock.data.get() }
886    }
887}
888
889impl<'a, T: ?Sized + 'a> DerefMut for RwLockWriteGuard<'a, T> {
890    #[inline]
891    fn deref_mut(&mut self) -> &mut T {
892        unsafe { &mut *self.rwlock.data.get() }
893    }
894}
895
896impl<'a, T: ?Sized + 'a> Drop for RwLockWriteGuard<'a, T> {
897    #[inline]
898    fn drop(&mut self) {
899        self.rwlock.raw.unlock_exclusive();
900    }
901}
902
903impl<'a, T: fmt::Debug + ?Sized + 'a> fmt::Debug for RwLockWriteGuard<'a, T> {
904    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
905        fmt::Debug::fmt(&**self, f)
906    }
907}
908
909impl<'a, T: fmt::Display + ?Sized + 'a> fmt::Display
910    for RwLockWriteGuard<'a, T>
911{
912    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
913        (**self).fmt(f)
914    }
915}
916
917#[cfg(feature = "owning_ref_support")]
918unsafe impl<'a, T: ?Sized + 'a> StableAddress for RwLockWriteGuard<'a, T> {}
919
920/// RAII structure used to release the upgradable read access of a lock when
921/// dropped.
922#[must_use = "if unused the RwLock will immediately unlock"]
923pub struct RwLockUpgradableReadGuard<'a, T: ?Sized> {
924    rwlock: &'a RwLock<T>,
925    marker: PhantomData<(&'a T, *mut ())>,
926}
927
928unsafe impl<'a, T: ?Sized + Sync + 'a> Sync
929    for RwLockUpgradableReadGuard<'a, T>
930{
931}
932
933impl<'a, T: ?Sized + 'a> RwLockUpgradableReadGuard<'a, T> {
934    /// Returns a reference to the original reader-writer lock object.
935    pub fn rwlock(s: &Self) -> &'a RwLock<T> {
936        s.rwlock
937    }
938
939    /// Temporarily unlocks the `RwLock` to execute the given function.
940    ///
941    /// This is safe because `&mut` guarantees that there exist no other
942    /// references to the data protected by the `RwLock`.
943    #[inline]
944    pub fn unlocked<F, U>(s: &mut Self, f: F) -> U
945    where
946        F: FnOnce() -> U,
947    {
948        s.rwlock.raw.unlock_upgradable();
949        defer!(s.rwlock.raw.lock_upgradable());
950        f()
951    }
952
953    /// Atomically upgrades an upgradable read lock lock into a exclusive write lock,
954    /// blocking the current thread until it can be acquired.
955    pub fn upgrade(s: Self) -> RwLockWriteGuard<'a, T> {
956        s.rwlock.raw.upgrade();
957        let rwlock = s.rwlock;
958        mem::forget(s);
959        RwLockWriteGuard {
960            rwlock,
961            marker: PhantomData,
962        }
963    }
964
965    /// Tries to atomically upgrade an upgradable read lock into a exclusive write lock.
966    ///
967    /// If the access could not be granted at this time, then the current guard is returned.
968    pub fn try_upgrade(s: Self) -> Result<RwLockWriteGuard<'a, T>, Self> {
969        if s.rwlock.raw.try_upgrade() {
970            let rwlock = s.rwlock;
971            mem::forget(s);
972            Ok(RwLockWriteGuard {
973                rwlock,
974                marker: PhantomData,
975            })
976        } else {
977            Err(s)
978        }
979    }
980}
981
982impl<'a, T: ?Sized + 'a> RwLockUpgradableReadGuard<'a, T> {
983    /// Unlocks the `RwLock` using a fair unlock protocol.
984    ///
985    /// By default, `RwLock` is unfair and allow the current thread to re-lock
986    /// the `RwLock` before another has the chance to acquire the lock, even if
987    /// that thread has been blocked on the `RwLock` for a long time. This is
988    /// the default because it allows much higher throughput as it avoids
989    /// forcing a context switch on every `RwLock` unlock. This can result in one
990    /// thread acquiring a `RwLock` many more times than other threads.
991    ///
992    /// However in some cases it can be beneficial to ensure fairness by forcing
993    /// the lock to pass on to a waiting thread if there is one. This is done by
994    /// using this method instead of dropping the `RwLockUpgradableReadGuard` normally.
995    #[inline]
996    pub fn unlock_fair(s: Self) {
997        s.rwlock.raw.unlock_upgradable_fair();
998        mem::forget(s);
999    }
1000
1001    /// Temporarily unlocks the `RwLock` to execute the given function.
1002    ///
1003    /// The `RwLock` is unlocked a fair unlock protocol.
1004    ///
1005    /// This is safe because `&mut` guarantees that there exist no other
1006    /// references to the data protected by the `RwLock`.
1007    #[inline]
1008    pub fn unlocked_fair<F, U>(s: &mut Self, f: F) -> U
1009    where
1010        F: FnOnce() -> U,
1011    {
1012        s.rwlock.raw.unlock_upgradable_fair();
1013        defer!(s.rwlock.raw.lock_upgradable());
1014        f()
1015    }
1016
1017    /// Temporarily yields the `RwLock` to a waiting thread if there is one.
1018    ///
1019    /// This method is functionally equivalent to calling `unlock_fair` followed
1020    /// by `upgradable_read`, however it can be much more efficient in the case where there
1021    /// are no waiting threads.
1022    #[inline]
1023    pub fn bump(s: &mut Self) {
1024        s.rwlock.raw.bump_upgradable();
1025    }
1026}
1027
1028impl<'a, T: ?Sized + 'a> RwLockUpgradableReadGuard<'a, T> {
1029    /// Atomically downgrades an upgradable read lock lock into a shared read lock
1030    /// without allowing any writers to take exclusive access of the lock in the
1031    /// meantime.
1032    ///
1033    /// Note that if there are any writers currently waiting to take the lock
1034    /// then other readers may not be able to acquire the lock even if it was
1035    /// downgraded.
1036    pub fn downgrade(s: Self) -> RwLockReadGuard<'a, T> {
1037        s.rwlock.raw.downgrade_upgradable();
1038        let rwlock = s.rwlock;
1039        mem::forget(s);
1040        RwLockReadGuard {
1041            rwlock,
1042            marker: PhantomData,
1043        }
1044    }
1045}
1046
1047impl<'a, T: ?Sized + 'a> RwLockUpgradableReadGuard<'a, T> {
1048    /// Tries to atomically upgrade an upgradable read lock into a exclusive
1049    /// write lock, until a timeout is reached.
1050    ///
1051    /// If the access could not be granted before the timeout expires, then
1052    /// the current guard is returned.
1053    pub fn try_upgrade_for(
1054        s: Self,
1055        timeout: Duration,
1056    ) -> Result<RwLockWriteGuard<'a, T>, Self> {
1057        if s.rwlock.raw.try_upgrade_for(timeout) {
1058            let rwlock = s.rwlock;
1059            mem::forget(s);
1060            Ok(RwLockWriteGuard {
1061                rwlock,
1062                marker: PhantomData,
1063            })
1064        } else {
1065            Err(s)
1066        }
1067    }
1068
1069    /// Tries to atomically upgrade an upgradable read lock into a exclusive
1070    /// write lock, until a timeout is reached.
1071    ///
1072    /// If the access could not be granted before the timeout expires, then
1073    /// the current guard is returned.
1074    #[inline]
1075    pub fn try_upgrade_until(
1076        s: Self,
1077        timeout: Instant,
1078    ) -> Result<RwLockWriteGuard<'a, T>, Self> {
1079        if s.rwlock.raw.try_upgrade_until(timeout) {
1080            let rwlock = s.rwlock;
1081            mem::forget(s);
1082            Ok(RwLockWriteGuard {
1083                rwlock,
1084                marker: PhantomData,
1085            })
1086        } else {
1087            Err(s)
1088        }
1089    }
1090}
1091
1092impl<'a, T: ?Sized + 'a> Deref for RwLockUpgradableReadGuard<'a, T> {
1093    type Target = T;
1094    #[inline]
1095    fn deref(&self) -> &T {
1096        unsafe { &*self.rwlock.data.get() }
1097    }
1098}
1099
1100impl<'a, T: ?Sized + 'a> Drop for RwLockUpgradableReadGuard<'a, T> {
1101    #[inline]
1102    fn drop(&mut self) {
1103        self.rwlock.raw.unlock_upgradable();
1104    }
1105}
1106
1107impl<'a, T: fmt::Debug + ?Sized + 'a> fmt::Debug
1108    for RwLockUpgradableReadGuard<'a, T>
1109{
1110    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1111        fmt::Debug::fmt(&**self, f)
1112    }
1113}
1114
1115impl<'a, T: fmt::Display + ?Sized + 'a> fmt::Display
1116    for RwLockUpgradableReadGuard<'a, T>
1117{
1118    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1119        (**self).fmt(f)
1120    }
1121}
1122
1123#[cfg(feature = "owning_ref_support")]
1124unsafe impl<'a, T: ?Sized + 'a> StableAddress
1125    for RwLockUpgradableReadGuard<'a, T>
1126{
1127}
1128
1129/// An RAII read lock guard returned by `RwLockReadGuard::map`, which can point to a
1130/// subfield of the protected data.
1131///
1132/// The main difference between `MappedRwLockReadGuard` and `RwLockReadGuard` is that the
1133/// former doesn't support temporarily unlocking and re-locking, since that
1134/// could introduce soundness issues if the locked object is modified by another
1135/// thread.
1136#[must_use = "if unused the RwLock will immediately unlock"]
1137pub struct MappedRwLockReadGuard<'a, T: ?Sized> {
1138    raw: &'a RawRwLock,
1139    data: *const T,
1140    marker: PhantomData<&'a T>,
1141}
1142
1143unsafe impl<'a, T: ?Sized + Sync + 'a> Sync for MappedRwLockReadGuard<'a, T> {}
1144
1145impl<'a, T: ?Sized + 'a> MappedRwLockReadGuard<'a, T> {
1146    /// Make a new `MappedRwLockReadGuard` for a component of the locked data.
1147    ///
1148    /// This operation cannot fail as the `MappedRwLockReadGuard` passed
1149    /// in already locked the data.
1150    ///
1151    /// This is an associated function that needs to be
1152    /// used as `MappedRwLockReadGuard::map(...)`. A method would interfere with methods of
1153    /// the same name on the contents of the locked data.
1154    #[inline]
1155    pub fn map<U: ?Sized, F>(s: Self, f: F) -> MappedRwLockReadGuard<'a, U>
1156    where
1157        F: FnOnce(&T) -> &U,
1158    {
1159        let raw = s.raw;
1160        let data = f(unsafe { &*s.data });
1161        mem::forget(s);
1162        MappedRwLockReadGuard {
1163            raw,
1164            data,
1165            marker: PhantomData,
1166        }
1167    }
1168
1169    /// Attempts to make  a new `MappedRwLockReadGuard` for a component of the
1170    /// locked data. The original guard is return if the closure returns `None`.
1171    ///
1172    /// This operation cannot fail as the `MappedRwLockReadGuard` passed
1173    /// in already locked the data.
1174    ///
1175    /// This is an associated function that needs to be
1176    /// used as `MappedRwLockReadGuard::map(...)`. A method would interfere with methods of
1177    /// the same name on the contents of the locked data.
1178    #[inline]
1179    pub fn try_map<U: ?Sized, F>(s: Self, f: F) -> Result<MappedRwLockReadGuard<'a, U>, Self>
1180    where
1181        F: FnOnce(&T) -> Option<&U>,
1182    {
1183        let raw = s.raw;
1184        let data = match f(unsafe { &*s.data }) {
1185            Some(data) => data,
1186            None => return Err(s),
1187        };
1188        mem::forget(s);
1189        Ok(MappedRwLockReadGuard {
1190            raw,
1191            data,
1192            marker: PhantomData,
1193        })
1194    }
1195}
1196
1197impl<'a, T: ?Sized + 'a> MappedRwLockReadGuard<'a, T> {
1198    /// Unlocks the `RwLock` using a fair unlock protocol.
1199    ///
1200    /// By default, `RwLock` is unfair and allow the current thread to re-lock
1201    /// the `RwLock` before another has the chance to acquire the lock, even if
1202    /// that thread has been blocked on the `RwLock` for a long time. This is
1203    /// the default because it allows much higher throughput as it avoids
1204    /// forcing a context switch on every `RwLock` unlock. This can result in one
1205    /// thread acquiring a `RwLock` many more times than other threads.
1206    ///
1207    /// However in some cases it can be beneficial to ensure fairness by forcing
1208    /// the lock to pass on to a waiting thread if there is one. This is done by
1209    /// using this method instead of dropping the `MappedRwLockReadGuard` normally.
1210    #[inline]
1211    pub fn unlock_fair(s: Self) {
1212        s.raw.unlock_shared_fair();
1213        mem::forget(s);
1214    }
1215}
1216
1217impl<'a, T: ?Sized + 'a> Deref for MappedRwLockReadGuard<'a, T> {
1218    type Target = T;
1219    #[inline]
1220    fn deref(&self) -> &T {
1221        unsafe { &*self.data }
1222    }
1223}
1224
1225impl<'a, T: ?Sized + 'a> Drop for MappedRwLockReadGuard<'a, T> {
1226    #[inline]
1227    fn drop(&mut self) {
1228        self.raw.unlock_shared();
1229    }
1230}
1231
1232impl<'a, T: fmt::Debug + ?Sized + 'a> fmt::Debug
1233    for MappedRwLockReadGuard<'a, T>
1234{
1235    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1236        fmt::Debug::fmt(&**self, f)
1237    }
1238}
1239
1240impl<'a, T: fmt::Display + ?Sized + 'a> fmt::Display
1241    for MappedRwLockReadGuard<'a, T>
1242{
1243    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1244        (**self).fmt(f)
1245    }
1246}
1247
1248#[cfg(feature = "owning_ref_support")]
1249unsafe impl<'a, T: ?Sized + 'a> StableAddress
1250    for MappedRwLockReadGuard<'a, T>
1251{
1252}
1253
1254/// An RAII write lock guard returned by `RwLockWriteGuard::map`, which can point to a
1255/// subfield of the protected data.
1256///
1257/// The main difference between `MappedRwLockWriteGuard` and `RwLockWriteGuard` is that the
1258/// former doesn't support temporarily unlocking and re-locking, since that
1259/// could introduce soundness issues if the locked object is modified by another
1260/// thread.
1261#[must_use = "if unused the RwLock will immediately unlock"]
1262pub struct MappedRwLockWriteGuard<'a, T: ?Sized> {
1263    raw: &'a RawRwLock,
1264    data: *mut T,
1265    marker: PhantomData<&'a mut T>,
1266}
1267
1268unsafe impl<'a, T: ?Sized + Sync + 'a> Sync
1269    for MappedRwLockWriteGuard<'a, T>
1270{
1271}
1272
1273impl<'a, T: ?Sized + 'a> MappedRwLockWriteGuard<'a, T> {
1274    /// Make a new `MappedRwLockWriteGuard` for a component of the locked data.
1275    ///
1276    /// This operation cannot fail as the `MappedRwLockWriteGuard` passed
1277    /// in already locked the data.
1278    ///
1279    /// This is an associated function that needs to be
1280    /// used as `MappedRwLockWriteGuard::map(...)`. A method would interfere with methods of
1281    /// the same name on the contents of the locked data.
1282    #[inline]
1283    pub fn map<U: ?Sized, F>(s: Self, f: F) -> MappedRwLockWriteGuard<'a, U>
1284    where
1285        F: FnOnce(&mut T) -> &mut U,
1286    {
1287        let raw = s.raw;
1288        let data = f(unsafe { &mut *s.data });
1289        mem::forget(s);
1290        MappedRwLockWriteGuard {
1291            raw,
1292            data,
1293            marker: PhantomData,
1294        }
1295    }
1296
1297    /// Attempts to make  a new `MappedRwLockWriteGuard` for a component of the
1298    /// locked data. The original guard is return if the closure returns `None`.
1299    ///
1300    /// This operation cannot fail as the `MappedRwLockWriteGuard` passed
1301    /// in already locked the data.
1302    ///
1303    /// This is an associated function that needs to be
1304    /// used as `MappedRwLockWriteGuard::map(...)`. A method would interfere with methods of
1305    /// the same name on the contents of the locked data.
1306    #[inline]
1307    pub fn try_map<U: ?Sized, F>(s: Self, f: F) -> Result<MappedRwLockWriteGuard<'a, U>, Self>
1308    where
1309        F: FnOnce(&mut T) -> Option<&mut U>,
1310    {
1311        let raw = s.raw;
1312        let data = match f(unsafe { &mut *s.data }) {
1313            Some(data) => data,
1314            None => return Err(s),
1315        };
1316        mem::forget(s);
1317        Ok(MappedRwLockWriteGuard {
1318            raw,
1319            data,
1320            marker: PhantomData,
1321        })
1322    }
1323}
1324
1325impl<'a, T: ?Sized + 'a> MappedRwLockWriteGuard<'a, T> {
1326    /// Atomically downgrades a write lock into a read lock without allowing any
1327    /// writers to take exclusive access of the lock in the meantime.
1328    ///
1329    /// Note that if there are any writers currently waiting to take the lock
1330    /// then other readers may not be able to acquire the lock even if it was
1331    /// downgraded.
1332    pub fn downgrade(s: Self) -> MappedRwLockReadGuard<'a, T> {
1333        s.raw.downgrade();
1334        let raw = s.raw;
1335        let data = s.data;
1336        mem::forget(s);
1337        MappedRwLockReadGuard {
1338            raw,
1339            data,
1340            marker: PhantomData,
1341        }
1342    }
1343}
1344
1345impl<'a, T: ?Sized + 'a> MappedRwLockWriteGuard<'a, T> {
1346    /// Unlocks the `RwLock` using a fair unlock protocol.
1347    ///
1348    /// By default, `RwLock` is unfair and allow the current thread to re-lock
1349    /// the `RwLock` before another has the chance to acquire the lock, even if
1350    /// that thread has been blocked on the `RwLock` for a long time. This is
1351    /// the default because it allows much higher throughput as it avoids
1352    /// forcing a context switch on every `RwLock` unlock. This can result in one
1353    /// thread acquiring a `RwLock` many more times than other threads.
1354    ///
1355    /// However in some cases it can be beneficial to ensure fairness by forcing
1356    /// the lock to pass on to a waiting thread if there is one. This is done by
1357    /// using this method instead of dropping the `MappedRwLockWriteGuard` normally.
1358    #[inline]
1359    pub fn unlock_fair(s: Self) {
1360        s.raw.unlock_exclusive_fair();
1361        mem::forget(s);
1362    }
1363}
1364
1365impl<'a, T: ?Sized + 'a> Deref for MappedRwLockWriteGuard<'a, T> {
1366    type Target = T;
1367    #[inline]
1368    fn deref(&self) -> &T {
1369        unsafe { &*self.data }
1370    }
1371}
1372
1373impl<'a, T: ?Sized + 'a> DerefMut for MappedRwLockWriteGuard<'a, T> {
1374    #[inline]
1375    fn deref_mut(&mut self) -> &mut T {
1376        unsafe { &mut *self.data }
1377    }
1378}
1379
1380impl<'a, T: ?Sized + 'a> Drop for MappedRwLockWriteGuard<'a, T> {
1381    #[inline]
1382    fn drop(&mut self) {
1383        self.raw.unlock_exclusive();
1384    }
1385}
1386
1387impl<'a, T: fmt::Debug + ?Sized + 'a> fmt::Debug
1388    for MappedRwLockWriteGuard<'a, T>
1389{
1390    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1391        fmt::Debug::fmt(&**self, f)
1392    }
1393}
1394
1395impl<'a, T: fmt::Display + ?Sized + 'a> fmt::Display
1396    for MappedRwLockWriteGuard<'a, T>
1397{
1398    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1399        (**self).fmt(f)
1400    }
1401}
1402
1403#[cfg(feature = "owning_ref_support")]
1404unsafe impl<'a, T: ?Sized + 'a> StableAddress
1405    for MappedRwLockWriteGuard<'a, T>
1406{
1407}
1408
1409
1410#[cfg(test)]
1411mod tests {
1412    use crate::{RwLock, RwLockUpgradableReadGuard, RwLockWriteGuard};
1413    use rand::Rng;
1414    use std::sync::atomic::{AtomicUsize, Ordering};
1415    use std::sync::mpsc::channel;
1416    use std::sync::Arc;
1417    use std::thread;
1418    use std::time::Duration;
1419
1420    #[cfg(feature = "serde_support")]
1421    use bincode::{deserialize, serialize};
1422
1423    #[derive(Eq, PartialEq, Debug)]
1424    struct NonCopy(i32);
1425
1426    #[test]
1427    fn smoke() {
1428        let l = RwLock::new(());
1429        drop(l.read());
1430        drop(l.write());
1431        drop(l.upgradable_read());
1432        drop((l.read(), l.read()));
1433        drop((l.read(), l.upgradable_read()));
1434        drop(l.write());
1435    }
1436
1437    #[test]
1438    fn frob() {
1439        const N: u32 = 10;
1440        const M: u32 = 1000;
1441
1442        let r = Arc::new(RwLock::new(()));
1443
1444        let (tx, rx) = channel::<()>();
1445        for _ in 0..N {
1446            let tx = tx.clone();
1447            let r = r.clone();
1448            thread::spawn(move || {
1449                let mut rng = rand::thread_rng();
1450                for _ in 0..M {
1451                    if rng.gen_bool(1.0 / N as f64) {
1452                        drop(r.write());
1453                    } else {
1454                        drop(r.read());
1455                    }
1456                }
1457                drop(tx);
1458            });
1459        }
1460        drop(tx);
1461        let _ = rx.recv();
1462    }
1463
1464    #[test]
1465    fn test_rw_arc_no_poison_wr() {
1466        let arc = Arc::new(RwLock::new(1));
1467        let arc2 = arc.clone();
1468        let _: Result<(), _> = thread::spawn(move || {
1469            let _lock = arc2.write();
1470            panic!();
1471        })
1472        .join();
1473        let lock = arc.read();
1474        assert_eq!(*lock, 1);
1475    }
1476
1477    #[test]
1478    fn test_rw_arc_no_poison_ww() {
1479        let arc = Arc::new(RwLock::new(1));
1480        let arc2 = arc.clone();
1481        let _: Result<(), _> = thread::spawn(move || {
1482            let _lock = arc2.write();
1483            panic!();
1484        })
1485        .join();
1486        let lock = arc.write();
1487        assert_eq!(*lock, 1);
1488    }
1489
1490    #[test]
1491    fn test_rw_arc_no_poison_rr() {
1492        let arc = Arc::new(RwLock::new(1));
1493        let arc2 = arc.clone();
1494        let _: Result<(), _> = thread::spawn(move || {
1495            let _lock = arc2.read();
1496            panic!();
1497        })
1498        .join();
1499        let lock = arc.read();
1500        assert_eq!(*lock, 1);
1501    }
1502
1503    #[test]
1504    fn test_rw_arc_no_poison_rw() {
1505        let arc = Arc::new(RwLock::new(1));
1506        let arc2 = arc.clone();
1507        let _: Result<(), _> = thread::spawn(move || {
1508            let _lock = arc2.read();
1509            panic!()
1510        })
1511        .join();
1512        let lock = arc.write();
1513        assert_eq!(*lock, 1);
1514    }
1515
1516    #[test]
1517    fn test_ruw_arc() {
1518        let arc = Arc::new(RwLock::new(0));
1519        let arc2 = arc.clone();
1520        let (tx, rx) = channel();
1521
1522        thread::spawn(move || {
1523            for _ in 0..10 {
1524                let mut lock = arc2.write();
1525                let tmp = *lock;
1526                *lock = -1;
1527                thread::yield_now();
1528                *lock = tmp + 1;
1529            }
1530            tx.send(()).unwrap();
1531        });
1532
1533        let mut children = Vec::new();
1534
1535        // Upgradable readers try to catch the writer in the act and also
1536        // try to touch the value
1537        for _ in 0..5 {
1538            let arc3 = arc.clone();
1539            children.push(thread::spawn(move || {
1540                let lock = arc3.upgradable_read();
1541                let tmp = *lock;
1542                assert!(tmp >= 0);
1543                thread::yield_now();
1544                let mut lock = RwLockUpgradableReadGuard::upgrade(lock);
1545                assert_eq!(tmp, *lock);
1546                *lock = -1;
1547                thread::yield_now();
1548                *lock = tmp + 1;
1549            }));
1550        }
1551
1552        // Readers try to catch the writers in the act
1553        for _ in 0..5 {
1554            let arc4 = arc.clone();
1555            children.push(thread::spawn(move || {
1556                let lock = arc4.read();
1557                assert!(*lock >= 0);
1558            }));
1559        }
1560
1561        // Wait for children to pass their asserts
1562        for r in children {
1563            assert!(r.join().is_ok());
1564        }
1565
1566        // Wait for writer to finish
1567        rx.recv().unwrap();
1568        let lock = arc.read();
1569        assert_eq!(*lock, 15);
1570    }
1571
1572    #[test]
1573    fn test_rw_arc() {
1574        let arc = Arc::new(RwLock::new(0));
1575        let arc2 = arc.clone();
1576        let (tx, rx) = channel();
1577
1578        thread::spawn(move || {
1579            let mut lock = arc2.write();
1580            for _ in 0..10 {
1581                let tmp = *lock;
1582                *lock = -1;
1583                thread::yield_now();
1584                *lock = tmp + 1;
1585            }
1586            tx.send(()).unwrap();
1587        });
1588
1589        // Readers try to catch the writer in the act
1590        let mut children = Vec::new();
1591        for _ in 0..5 {
1592            let arc3 = arc.clone();
1593            children.push(thread::spawn(move || {
1594                let lock = arc3.read();
1595                assert!(*lock >= 0);
1596            }));
1597        }
1598
1599        // Wait for children to pass their asserts
1600        for r in children {
1601            assert!(r.join().is_ok());
1602        }
1603
1604        // Wait for writer to finish
1605        rx.recv().unwrap();
1606        let lock = arc.read();
1607        assert_eq!(*lock, 10);
1608    }
1609
1610    #[test]
1611    fn test_rw_arc_access_in_unwind() {
1612        let arc = Arc::new(RwLock::new(1));
1613        let arc2 = arc.clone();
1614        let _ = thread::spawn(move || {
1615            struct Unwinder {
1616                i: Arc<RwLock<isize>>,
1617            }
1618            impl Drop for Unwinder {
1619                fn drop(&mut self) {
1620                    let mut lock = self.i.write();
1621                    *lock += 1;
1622                }
1623            }
1624            let _u = Unwinder { i: arc2 };
1625            panic!();
1626        })
1627        .join();
1628        let lock = arc.read();
1629        assert_eq!(*lock, 2);
1630    }
1631
1632    #[test]
1633    fn test_rwlock_unsized() {
1634        let rw: &RwLock<[i32]> = &RwLock::new([1, 2, 3]);
1635        {
1636            let b = &mut *rw.write();
1637            b[0] = 4;
1638            b[2] = 5;
1639        }
1640        let comp: &[i32] = &[4, 2, 5];
1641        assert_eq!(&*rw.read(), comp);
1642    }
1643
1644    #[test]
1645    fn test_rwlock_try_read() {
1646        let lock = RwLock::new(0isize);
1647        {
1648            let read_guard = lock.read();
1649
1650            let read_result = lock.try_read();
1651            assert!(
1652                read_result.is_some(),
1653                "try_read should succeed while read_guard is in scope"
1654            );
1655
1656            drop(read_guard);
1657        }
1658        {
1659            let upgrade_guard = lock.upgradable_read();
1660
1661            let read_result = lock.try_read();
1662            assert!(
1663                read_result.is_some(),
1664                "try_read should succeed while upgrade_guard is in scope"
1665            );
1666
1667            drop(upgrade_guard);
1668        }
1669        {
1670            let write_guard = lock.write();
1671
1672            let read_result = lock.try_read();
1673            assert!(
1674                read_result.is_none(),
1675                "try_read should fail while write_guard is in scope"
1676            );
1677
1678            drop(write_guard);
1679        }
1680    }
1681
1682    #[test]
1683    fn test_rwlock_try_write() {
1684        let lock = RwLock::new(0isize);
1685        {
1686            let read_guard = lock.read();
1687
1688            let write_result = lock.try_write();
1689            assert!(
1690                write_result.is_none(),
1691                "try_write should fail while read_guard is in scope"
1692            );
1693
1694            drop(read_guard);
1695        }
1696        {
1697            let upgrade_guard = lock.upgradable_read();
1698
1699            let write_result = lock.try_write();
1700            assert!(
1701                write_result.is_none(),
1702                "try_write should fail while upgrade_guard is in scope"
1703            );
1704
1705            drop(upgrade_guard);
1706        }
1707        {
1708            let write_guard = lock.write();
1709
1710            let write_result = lock.try_write();
1711            assert!(
1712                write_result.is_none(),
1713                "try_write should fail while write_guard is in scope"
1714            );
1715
1716            drop(write_guard);
1717        }
1718    }
1719
1720    #[test]
1721    fn test_rwlock_try_upgrade() {
1722        let lock = RwLock::new(0isize);
1723        {
1724            let read_guard = lock.read();
1725
1726            let upgrade_result = lock.try_upgradable_read();
1727            assert!(
1728                upgrade_result.is_some(),
1729                "try_upgradable_read should succeed while read_guard is in scope"
1730            );
1731
1732            drop(read_guard);
1733        }
1734        {
1735            let upgrade_guard = lock.upgradable_read();
1736
1737            let upgrade_result = lock.try_upgradable_read();
1738            assert!(
1739                upgrade_result.is_none(),
1740                "try_upgradable_read should fail while upgrade_guard is in scope"
1741            );
1742
1743            drop(upgrade_guard);
1744        }
1745        {
1746            let write_guard = lock.write();
1747
1748            let upgrade_result = lock.try_upgradable_read();
1749            assert!(
1750                upgrade_result.is_none(),
1751                "try_upgradable should fail while write_guard is in scope"
1752            );
1753
1754            drop(write_guard);
1755        }
1756    }
1757
1758    #[test]
1759    fn test_into_inner() {
1760        let m = RwLock::new(NonCopy(10));
1761        assert_eq!(m.into_inner(), NonCopy(10));
1762    }
1763
1764    #[test]
1765    fn test_into_inner_drop() {
1766        struct Foo(Arc<AtomicUsize>);
1767        impl Drop for Foo {
1768            fn drop(&mut self) {
1769                self.0.fetch_add(1, Ordering::SeqCst);
1770            }
1771        }
1772        let num_drops = Arc::new(AtomicUsize::new(0));
1773        let m = RwLock::new(Foo(num_drops.clone()));
1774        assert_eq!(num_drops.load(Ordering::SeqCst), 0);
1775        {
1776            let _inner = m.into_inner();
1777            assert_eq!(num_drops.load(Ordering::SeqCst), 0);
1778        }
1779        assert_eq!(num_drops.load(Ordering::SeqCst), 1);
1780    }
1781
1782    #[test]
1783    fn test_get_mut() {
1784        let mut m = RwLock::new(NonCopy(10));
1785        *m.get_mut() = NonCopy(20);
1786        assert_eq!(m.into_inner(), NonCopy(20));
1787    }
1788
1789    #[test]
1790    fn test_rwlockguard_sync() {
1791        fn sync<T: Sync>(_: T) {}
1792
1793        let rwlock = RwLock::new(());
1794        sync(rwlock.read());
1795        sync(rwlock.write());
1796    }
1797
1798    #[test]
1799    fn test_rwlock_downgrade() {
1800        let x = Arc::new(RwLock::new(0));
1801        let mut handles = Vec::new();
1802        for _ in 0..8 {
1803            let x = x.clone();
1804            handles.push(thread::spawn(move || {
1805                for _ in 0..100 {
1806                    let mut writer = x.write();
1807                    *writer += 1;
1808                    let cur_val = *writer;
1809                    let reader = RwLockWriteGuard::downgrade(writer);
1810                    assert_eq!(cur_val, *reader);
1811                }
1812            }));
1813        }
1814        for handle in handles {
1815            handle.join().unwrap()
1816        }
1817        assert_eq!(*x.read(), 800);
1818    }
1819
1820    #[test]
1821    fn test_rwlock_recursive() {
1822        let arc = Arc::new(RwLock::new(1));
1823        let arc2 = arc.clone();
1824        let _lock1 = arc.read();
1825        thread::spawn(move || {
1826            let _lock = arc2.write();
1827        });
1828
1829        if cfg!(not(all(target_env = "sgx", target_vendor = "fortanix"))) {
1830            thread::sleep(Duration::from_millis(100));
1831        } else {
1832            // FIXME: https://github.com/fortanix/rust-sgx/issues/31
1833            for _ in 0..100 {
1834                thread::yield_now();
1835            }
1836        }
1837
1838        // A normal read would block here since there is a pending writer
1839        let _lock2 = arc.read_recursive();
1840    }
1841
1842    #[test]
1843    fn test_rwlock_debug() {
1844        let x = RwLock::new(vec![0u8, 10]);
1845
1846        assert_eq!(format!("{:?}", x), "RwLock { data: [0, 10] }");
1847        let _lock = x.write();
1848        assert_eq!(format!("{:?}", x), "RwLock { data: <locked> }");
1849    }
1850
1851    #[test]
1852    fn test_clone() {
1853        let rwlock = RwLock::new(Arc::new(1));
1854        let a = rwlock.read_recursive();
1855        let b = a.clone();
1856        assert_eq!(Arc::strong_count(&b), 2);
1857    }
1858
1859    #[cfg(feature = "serde_support")]
1860    #[test]
1861    fn test_serde() {
1862        let contents: Vec<u8> = vec![0, 1, 2];
1863        let mutex = RwLock::new(contents.clone());
1864
1865        let serialized = serialize(&mutex).unwrap();
1866        let deserialized: RwLock<Vec<u8>> = deserialize(&serialized).unwrap();
1867
1868        assert_eq!(*(mutex.read()), *(deserialized.read()));
1869        assert_eq!(contents, *(deserialized.read()));
1870    }
1871}