spin_sync/rwlock.rs
1// Copyright 2020 Shin Yoshida
2//
3// "LGPL-3.0-or-later OR Apache-2.0 OR BSD-2-Clause"
4//
5// This is part of spin-sync
6//
7// spin-sync is free software: you can redistribute it and/or modify
8// it under the terms of the GNU Lesser General Public License as published by
9// the Free Software Foundation, either version 3 of the License, or
10// (at your option) any later version.
11//
12// spin-sync is distributed in the hope that it will be useful,
13// but WITHOUT ANY WARRANTY; without even the implied warranty of
14// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15// GNU Lesser General Public License for more details.
16//
17// You should have received a copy of the GNU Lesser General Public License
18// along with spin-sync. If not, see <http://www.gnu.org/licenses/>.
19//
20//
21// Licensed under the Apache License, Version 2.0 (the "License");
22// you may not use this file except in compliance with the License.
23// You may obtain a copy of the License at
24//
25// http://www.apache.org/licenses/LICENSE-2.0
26//
27// Unless required by applicable law or agreed to in writing, software
28// distributed under the License is distributed on an "AS IS" BASIS,
29// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
30// See the License for the specific language governing permissions and
31// limitations under the License.
32//
33//
34// Redistribution and use in source and binary forms, with or without modification, are permitted
35// provided that the following conditions are met:
36//
37// 1. Redistributions of source code must retain the above copyright notice, this list of
38// conditions and the following disclaimer.
39// 2. Redistributions in binary form must reproduce the above copyright notice, this
40// list of conditions and the following disclaimer in the documentation and/or other
41// materials provided with the distribution.
42//
43// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
44// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
45// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
46// IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
47// INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
48// NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
49// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
50// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
51// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
52// POSSIBILITY OF SUCH DAMAGE.
53
54use std::cell::UnsafeCell;
55use std::fmt;
56use std::ops::{Deref, DerefMut};
57use std::panic::{RefUnwindSafe, UnwindSafe};
58use std::sync::atomic::{AtomicU64, Ordering};
59
60use crate::misc::{PhantomRwLock, PhantomRwLockReadGuard, PhantomRwLockWriteGuard};
61use crate::result::{LockResult, PoisonError, TryLockError, TryLockResult};
62
63/// A reader-writer lock.
64///
65/// It behaves like std::sync::RwLock except for using spinlock.
66/// What is more, the constructor is a const function; i.e. it is possible to declare
67/// `static RwLock<T>` variable as long as the inner data can be built statically.
68///
69/// This type of lock allows either a number of readers or at most one writer
70/// at the same time. Readers are allowed read-only access (shared access)
71/// to the underlying data while the writer is allowed read/write access
72/// (exclusive access.)
73///
74/// In comparison, a [`Mutex`] does not distinguish between readers and writers,
75/// therefore blocking any threads waiting for the lock to become available.
76/// An `RwLock` will allow any number of readers to acquire the lock as long as
77/// a writer is not holding the lock.
78///
79/// There is no priority difference with respect to the ordering of
80/// whether contentious readers or writers will acquire the lock first.
81///
82/// # Poisoning
83///
84/// An `RwLock`, like [`Mutex`], will become poisoned on a panic. Note, however,
85/// that an `RwLock` may only be poisoned if a panic occurs while it is locked
86/// exclusively (write mode). If a panic occurs in any reader, then the lock
87/// will not be poisoned.
88///
89/// [`Mutex`]: struct.Mutex.html
90///
91/// # Examples
92///
93/// Create a variable protected by a RwLock, increment it by 2 in worker threads
94/// at the same time, and check the variable was updated rightly.
95///
96/// ```
97/// use spin_sync::RwLock;
98/// use std::sync::Arc;
99/// use std::thread;
100///
101/// const WORKER_NUM: usize = 10;
102/// let mut handles = Vec::with_capacity(WORKER_NUM);
103///
104/// // We can declare static RwLock<usize> variable because RwLock::new is a const function.
105/// static RWLOCK: RwLock<usize> = RwLock::new(0);
106///
107/// // Create worker threads to inclement the value by 2.
108/// for _ in 0..WORKER_NUM {
109/// let handle = thread::spawn(move || {
110/// let mut num = RWLOCK.write().unwrap();
111/// *num += 2;
112/// });
113///
114/// handles.push(handle);
115/// }
116///
117/// // Make sure the value is always multipile of 2 even if some worker threads
118/// // are working.
119/// //
120/// // Enclosing the lock with `{}` to drop it before waiting for the worker
121/// // threads; otherwise, deadlocks could be occurred.
122/// {
123/// let num = RWLOCK.read().unwrap();
124/// assert_eq!(0, *num % 2);
125/// }
126///
127/// // Wait for the all worker threads are finished.
128/// for handle in handles {
129/// handle.join().unwrap();
130/// }
131///
132/// // Make sure the value is incremented by 2 times the worker count.
133/// let num = RWLOCK.read().unwrap();
134/// assert_eq!(2 * WORKER_NUM, *num);
135/// ```
136pub struct RwLock<T: ?Sized> {
137 // Each bit represents as follows.
138 // - The most significant bit: poison flag
139 // - The 2nd most significant bit: exclusive write lock flag
140 // - The others: shared read lock count
141 // Use helper functions for lock state.
142 lock: AtomicU64,
143
144 _phantom: PhantomRwLock<T>,
145 data: UnsafeCell<T>,
146}
147
148impl<T> RwLock<T> {
149 /// Creates a new instance in unlocked state ready for use.
150 ///
151 /// # Examples
152 ///
153 /// Declare as a static variable.
154 ///
155 /// ```
156 /// use spin_sync::RwLock;
157 ///
158 /// static LOCK: RwLock<i32> = RwLock::new(5);
159 /// ```
160 ///
161 /// Declare as a local variable.
162 ///
163 /// ```
164 /// use spin_sync::RwLock;
165 ///
166 /// let lock = RwLock::new(5);
167 /// ```
168 pub const fn new(t: T) -> Self {
169 Self {
170 lock: AtomicU64::new(INIT),
171 data: UnsafeCell::new(t),
172 _phantom: PhantomRwLock {},
173 }
174 }
175
176 /// Consumes this instance and returns the underlying data.
177 ///
178 /// Note that this method won't acquire any lock because we know there is
179 /// no other references to `self`.
180 ///
181 /// # Errors
182 ///
183 /// If another user panicked while holding the exclusive write lock of this instance,
184 /// this method call wraps the guard in an error and returns it.
185 ///
186 /// # Examples
187 ///
188 /// ```
189 /// use spin_sync::RwLock;
190 ///
191 /// let rwlock = RwLock::new(0);
192 /// assert_eq!(0, rwlock.into_inner().unwrap());
193 /// ```
194 pub fn into_inner(self) -> LockResult<T> {
195 // We know statically that there are no outstanding references to
196 // `self` so there's no need to lock the inner lock.
197 let is_err = self.is_poisoned();
198 let data = self.data.into_inner();
199
200 if is_err {
201 Err(PoisonError::new(data))
202 } else {
203 Ok(data)
204 }
205 }
206}
207
208impl<T: ?Sized> RwLock<T> {
209 /// The maximum shared read locks of each instance.
210 pub const MAX_READ_LOCK_COUNT: u64 = SHARED_LOCK_MASK;
211
212 /// Blocks the current thread until acquiring a shared read lock, and
213 /// returns an RAII guard object.
214 ///
215 /// The actual flow will be as follows.
216 ///
217 /// 1. User calls this method.
218 /// 1. Blocks until this thread acquires a shared read lock
219 /// (i.e. until the exclusive write lock is held.)
220 /// 1. Creates an RAII guard object.
221 /// 1. Wrapps the guard in `Result` and returns it. If this instance has been
222 /// poisoned, it is wrapped in an `Err`; otherwise wrapped in an `Ok`.
223 /// 1. User accesses to the underlying data to read through the guard.
224 /// (No write access is then.)
225 /// 1. The guard is dropped (falls out of scope) and the lock is released.
226 ///
227 /// # Errors
228 ///
229 /// If another user panicked while holding the exclusive write lock of this instance,
230 /// this method call wraps the guard in an error and returns it.
231 ///
232 /// # Panics
233 ///
234 /// This method panics if `MAX_READ_LOCK_COUNT` shared locks are.
235 ///
236 /// # Examples
237 ///
238 /// ```
239 /// use spin_sync::RwLock;
240 ///
241 /// let lock = RwLock::new(1);
242 ///
243 /// let guard1 = lock.read().unwrap();
244 /// assert_eq!(1, *guard1);
245 ///
246 /// let guard2 = lock.read().unwrap();
247 /// assert_eq!(1, *guard2);
248 /// ```
249 pub fn read(&self) -> LockResult<RwLockReadGuard<'_, T>> {
250 loop {
251 match self.try_lock(acquire_shared_lock, is_locked_exclusively) {
252 s if is_locked_exclusively(s) => std::thread::yield_now(),
253 s if is_poisoned(s) => return Err(PoisonError::new(RwLockReadGuard::new(self))),
254 _ => return Ok(RwLockReadGuard::new(self)),
255 }
256 }
257 }
258
259 /// Attempts to acquire a shared read lock and returns an RAII guard object if succeeded.
260 ///
261 /// Behaves like [`read`] except for this method returns an error immediately
262 /// if the exclusive write lock is being held.
263 ///
264 /// This function does not block.
265 ///
266 /// The actual flow will be as follows.
267 ///
268 /// 1. User calls this method.
269 /// 1. Tries to acquire a shared read lock. If failed (i.e. if the exclusive write
270 /// lock is being held,) returns an error immediately and this flow is finished here.
271 /// 1. Creates an RAII guard object.
272 /// 1. Wrapps the guard in `Result` and returns it. If this instance has been poisoned,
273 /// it is wrapped in an `Err`; otherwise wrapped in an `Ok`.
274 /// 1. User accesses to the underlying data to read through the guard.
275 /// (No write access is at then.)
276 /// 1. The guard is dropped (falls out of scope) and the lock is released.
277 ///
278 /// [`read`]: #method.read
279 ///
280 /// # Panics
281 ///
282 /// This method panics if `MAX_READ_LOCK` shared read locks are.
283 ///
284 /// # Errors
285 ///
286 /// - If another user is holding the exclusive write lock,
287 /// [`TryLockError::WouldBlock`] is returned.
288 /// - If this method call succeeded to acquire a shared read lock, and if another
289 /// user had panicked while holding the exclusive write lock,
290 /// [`TryLockError::Poisoned`] is returned.
291 ///
292 /// [`TryLockError::WouldBlock`]: type.TryLockError.html
293 /// [`TryLockError::Poisoned`]: type.TryLockError.html
294 ///
295 /// # Examples
296 ///
297 /// ```
298 /// use spin_sync::RwLock;
299 ///
300 /// let lock = RwLock::new(1);
301 ///
302 /// let guard0 = lock.try_read().unwrap();
303 /// assert_eq!(1, *guard0);
304 ///
305 /// let guard1 = lock.try_read().unwrap();
306 /// assert_eq!(1, *guard1);
307 /// ```
308 pub fn try_read(&self) -> TryLockResult<RwLockReadGuard<T>> {
309 match self.try_lock(acquire_shared_lock, is_locked_exclusively) {
310 s if is_locked_exclusively(s) => Err(TryLockError::WouldBlock),
311 s if is_poisoned(s) => Err(TryLockError::Poisoned(PoisonError::new(
312 RwLockReadGuard::new(self),
313 ))),
314 _ => Ok(RwLockReadGuard::new(self)),
315 }
316 }
317
318 /// Attempts to acquire the exclusive write lock and returns an RAII guard object
319 /// if succeeded.
320 ///
321 /// Behaves like [`write`] except for this method returns an error immediately
322 /// if any other lock (either read lock or write lock) is being held.
323 ///
324 /// This method does not block.
325 ///
326 /// The actual flow will be as follows.
327 ///
328 /// 1. User calls this method.
329 /// 1. Tries to acquire the exclusive write lock. If failed (i.e. if any other lock is
330 /// being held,) returns an error immediately and this flow is finished here.
331 /// 1. Creates an RAII guard object.
332 /// 1. Wraps the guard in `Result` and returns it. If this instance has been poisoned,
333 /// it is wrapped in an `Err`; otherwise wrapped in an `Ok`.
334 /// 1. User accesses to the underlying data to read/write through the guard.
335 /// (No other access is then.)
336 /// 1. The guard is dropped (falls out of scope) and the lock is released.
337 ///
338 /// [`write`]: #method.write
339 ///
340 /// # Errors
341 ///
342 /// - If another user is holding any other lock (either read lock or write lock),
343 /// [`TryLockError::WouldBlock`] is returned.
344 /// - If this method call succeeded to acquire the lock, and if another user had panicked
345 /// while holding the exclusive write lock, [`TryLockError::Poisoned`] is returned.
346 ///
347 /// [`TryLockError::WouldBlock`]: type.TryLockError.html
348 /// [`TryLockError::Poisoned`]: type.TryLockError.html
349 ///
350 /// # Examples
351 ///
352 /// ```
353 /// use spin_sync::RwLock;
354 ///
355 /// let lock = RwLock::new(1);
356 ///
357 /// let mut guard = lock.try_write().unwrap();
358 /// assert_eq!(1, *guard);
359 ///
360 /// *guard += 1;
361 /// assert_eq!(2, *guard);
362 ///
363 /// assert!(lock.try_write().is_err());
364 /// assert!(lock.try_read().is_err());
365 /// ```
366 pub fn try_write(&self) -> TryLockResult<RwLockWriteGuard<T>> {
367 match self.try_lock(acquire_exclusive_lock, is_locked) {
368 s if is_locked(s) => Err(TryLockError::WouldBlock),
369 s if is_poisoned(s) => Err(TryLockError::Poisoned(PoisonError::new(
370 RwLockWriteGuard::new(self),
371 ))),
372 _ => Ok(RwLockWriteGuard::new(self)),
373 }
374 }
375
376 /// Blocks the current thread until acquiring the exclusive write lock, and
377 /// returns an RAII guard object.
378 ///
379 /// The actual flow will be as follows.
380 ///
381 /// 1. User calls this method.
382 /// 1. Blocks until this thread acquires the exclusive write lock
383 /// (i.e. until any other lock is held.)
384 /// 1. Creates an RAII guard object.
385 /// 1. Wrapps the guard in Result and returns it. If this instance has been
386 /// poisoned, it is wrapped in an `Err`; otherwise wrapped in an `Ok`.
387 /// 1. User accesses to the underlying data to read/write through the guard.
388 /// (No other access is then.)
389 /// 1. The guard is dropped (falls out of scope) and the lock is released.
390 ///
391 /// # Errors
392 ///
393 /// If another user panicked while holding the exclusive write lock of this instance,
394 /// this method call wraps the guard in an error and returns it.
395 ///
396 /// # Examples
397 ///
398 /// ```
399 /// use spin_sync::RwLock;
400 ///
401 /// let lock = RwLock::new(0);
402 ///
403 /// let mut guard = lock.write().unwrap();
404 /// assert_eq!(0, *guard);
405 ///
406 /// *guard += 1;
407 /// assert_eq!(1, *guard);
408 ///
409 /// assert_eq!(true, lock.try_read().is_err());
410 /// assert_eq!(true, lock.try_write().is_err());
411 /// ```
412 pub fn write(&self) -> LockResult<RwLockWriteGuard<'_, T>> {
413 loop {
414 match self.try_lock(acquire_exclusive_lock, is_locked) {
415 s if is_locked(s) => std::thread::yield_now(),
416 s if is_poisoned(s) => return Err(PoisonError::new(RwLockWriteGuard::new(self))),
417 _ => return Ok(RwLockWriteGuard::new(self)),
418 }
419 }
420 }
421
422 /// Try to acquire lock and return the lock status before updated.
423 fn try_lock<AcqFn, LockCheckFn>(&self, acq_fn: AcqFn, lock_check_fn: LockCheckFn) -> LockStatus
424 where
425 AcqFn: Fn(LockStatus) -> LockStatus,
426 LockCheckFn: Fn(LockStatus) -> bool,
427 {
428 // Assume not poisoned, no user is holding the lock at first.
429 let mut expected = INIT;
430
431 loop {
432 // Try to acquire the lock.
433 let desired = acq_fn(expected);
434 let current = self
435 .lock
436 .compare_and_swap(expected, desired, Ordering::Acquire);
437
438 // Succeeded.
439 if current == expected {
440 return current;
441 }
442
443 // Locked.
444 if lock_check_fn(current) {
445 return current;
446 }
447
448 // - The first assumption was wrong.
449 // - Another user changes the lock status at the same time.
450 // Try again soon.
451 expected = current;
452 }
453 }
454
455 /// Determines whether the lock is poisoned or not.
456 ///
457 /// # Warning
458 ///
459 /// This function won't acquire any lock. If another thread is active,
460 /// the rwlock can become poisoned at any time. You should not trust a `false`
461 /// value for program correctness without additional synchronization.
462 ///
463 /// # Examples
464 ///
465 /// ```
466 /// use spin_sync::RwLock;
467 /// use std::sync::Arc;
468 /// use std::thread;
469 ///
470 /// let lock = Arc::new(RwLock::new(0));
471 /// assert_eq!(false, lock.is_poisoned());
472 ///
473 /// {
474 /// let lock = lock.clone();
475 ///
476 /// let _ = thread::spawn(move || {
477 /// // This panic while holding the lock (`_guard` is in scope) will poison
478 /// // the instance.
479 /// let _guard = lock.write().unwrap();
480 /// panic!("Poison here");
481 /// }).join();
482 /// }
483 ///
484 /// assert_eq!(true, lock.is_poisoned());
485 /// ```
486 pub fn is_poisoned(&self) -> bool {
487 let status = self.lock.load(Ordering::Relaxed);
488 is_poisoned(status)
489 }
490
491 /// Returns a mutable reference to the underlying data.
492 ///
493 /// Note that this method won't acquire any lock because we know there is
494 /// no other references to `self`.
495 ///
496 /// # Errors
497 ///
498 /// If another user panicked while holding the exclusive write lock of this instance,
499 /// this method call wraps the guard in an error and returns it.
500 ///
501 /// # Examples
502 ///
503 /// ```
504 /// use spin_sync::RwLock;
505 ///
506 /// let mut lock = RwLock::new(0);
507 /// *lock.get_mut().unwrap() = 10;
508 /// assert_eq!(*lock.read().unwrap(), 10);
509 /// ```
510 pub fn get_mut(&mut self) -> LockResult<&mut T> {
511 // We know statically that there are no other references to `self`, so
512 // there's no need to lock the inner lock.
513 let data = unsafe { &mut *self.data.get() };
514 if self.is_poisoned() {
515 Err(PoisonError::new(data))
516 } else {
517 Ok(data)
518 }
519 }
520}
521
522impl<T> From<T> for RwLock<T> {
523 fn from(t: T) -> Self {
524 RwLock::new(t)
525 }
526}
527
528impl<T: Default> Default for RwLock<T> {
529 fn default() -> Self {
530 RwLock::new(T::default())
531 }
532}
533
534impl<T: ?Sized + fmt::Debug> fmt::Debug for RwLock<T> {
535 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
536 match self.try_read() {
537 Ok(guard) => f.debug_struct("RwLock").field("data", &&*guard).finish(),
538 Err(TryLockError::Poisoned(err)) => f
539 .debug_struct("RwLock")
540 .field("data", &&**err.get_ref())
541 .finish(),
542 Err(TryLockError::WouldBlock) => {
543 struct LockedPlaceholder;
544 impl fmt::Debug for LockedPlaceholder {
545 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
546 f.write_str("<locked>")
547 }
548 }
549
550 f.debug_struct("RwLock")
551 .field("data", &LockedPlaceholder)
552 .finish()
553 }
554 }
555 }
556}
557
558/// An RAII implementation of a "scoped shared read lock" of a RwLock.
559///
560/// When this instance is dropped (falls out of scope), the lock will be released.
561///
562/// The data protected by the RwLock can be accessed to read
563/// through this guard via its `Deref` implementation.
564///
565/// This instance is created by [`read`] and [`try_read`] methods on
566/// [`RwLock`].
567///
568/// [`read`]: struct.RwLock.html#method.read
569/// [`try_read`]: struct.RwLock.html#method.try_read
570/// [`RwLock`]: struct.RwLock.html
571#[must_use = "if unused the RwLock will immediately unlock"]
572pub struct RwLockReadGuard<'a, T: ?Sized + 'a> {
573 rwlock: &'a RwLock<T>,
574 _phantom: PhantomRwLockReadGuard<'a, T>, // To implement !Send.
575}
576
577impl<'a, T: ?Sized> RwLockReadGuard<'a, T> {
578 fn new(rwlock: &'a RwLock<T>) -> Self {
579 Self {
580 rwlock,
581 _phantom: Default::default(),
582 }
583 }
584}
585
586impl<T: ?Sized> Deref for RwLockReadGuard<'_, T> {
587 type Target = T;
588
589 fn deref(&self) -> &Self::Target {
590 unsafe { &*self.rwlock.data.get() }
591 }
592}
593
594impl<T: ?Sized> Drop for RwLockReadGuard<'_, T> {
595 /// Make sure to release the shared read lock.
596 /// This function will never poison the rwlock.
597 fn drop(&mut self) {
598 // Assume not poisoned and no other user is holding the lock at first.
599 let mut expected = acquire_shared_lock(INIT);
600
601 loop {
602 let desired = release_shared_lock(expected);
603 let current = self
604 .rwlock
605 .lock
606 .compare_and_swap(expected, desired, Ordering::Release);
607
608 // Succeeded to release the lock.
609 if current == expected {
610 return;
611 }
612
613 // - Assumption was wrong.
614 // - Another user release the lock at the same time.
615 // Try again.
616 expected = current;
617 }
618 }
619}
620
621impl<T: ?Sized + fmt::Display> fmt::Display for RwLockReadGuard<'_, T> {
622 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
623 (**self).fmt(f)
624 }
625}
626
627impl<T: fmt::Debug> fmt::Debug for RwLockReadGuard<'_, T> {
628 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
629 f.debug_struct("RwLockReadGuard")
630 .field("lock", &self.rwlock)
631 .finish()
632 }
633}
634
635/// An RAII implementation of a "scoped exclusive write lock" of a RwLock.
636///
637/// When this instance is dropped (falls out of scope), the lock will be released.
638///
639/// The data protected by the RwLock can be accessed to read/write
640/// through this guard via its `Deref` and `DerefMut` implementation.
641///
642/// This instance is created by [`write`] and [`try_write`] methods on
643/// [`RwLock`].
644///
645/// [`write`]: struct.RwLock.html#method.write
646/// [`try_write`]: struct.RwLock.html#method.try_write
647/// [`RwLock`]: struct.RwLock.html
648#[must_use = "if unused the RwLock will immediately unlock"]
649pub struct RwLockWriteGuard<'a, T: ?Sized + 'a> {
650 rwlock: &'a RwLock<T>,
651 _phantom: PhantomRwLockWriteGuard<'a, T>, // To implement !Send.
652}
653
654impl<'a, T: ?Sized> RwLockWriteGuard<'a, T> {
655 fn new(rwlock: &'a RwLock<T>) -> Self {
656 Self {
657 rwlock,
658 _phantom: Default::default(),
659 }
660 }
661}
662
663impl<T: ?Sized> Deref for RwLockWriteGuard<'_, T> {
664 type Target = T;
665
666 fn deref(&self) -> &Self::Target {
667 unsafe { &*self.rwlock.data.get() }
668 }
669}
670
671impl<T: ?Sized> DerefMut for RwLockWriteGuard<'_, T> {
672 fn deref_mut(&mut self) -> &mut Self::Target {
673 unsafe { &mut *self.rwlock.data.get() }
674 }
675}
676
677impl<T: ?Sized> Drop for RwLockWriteGuard<'_, T> {
678 /// Make sure to release the exclusive write lock.
679 ///
680 /// If this user panicked, poison the lock.
681 fn drop(&mut self) {
682 let old_status = self.rwlock.lock.load(Ordering::Relaxed);
683
684 let mut new_status = release_exclusive_lock(old_status);
685 if std::thread::panicking() {
686 new_status = set_poison_flag(new_status);
687 }
688
689 self.rwlock.lock.store(new_status, Ordering::Release);
690 }
691}
692
693impl<T: ?Sized + fmt::Display> fmt::Display for RwLockWriteGuard<'_, T> {
694 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
695 (**self).fmt(f)
696 }
697}
698
699impl<T: fmt::Debug> fmt::Debug for RwLockWriteGuard<'_, T> {
700 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
701 f.debug_struct("RwLockWriteGuard")
702 .field("lock", &self.rwlock)
703 .finish()
704 }
705}
706
707//
708// Marker Traits
709//
710impl<T: ?Sized> UnwindSafe for RwLock<T> {}
711impl<T: ?Sized> RefUnwindSafe for RwLock<T> {}
712
713unsafe impl<T: ?Sized + Send> Send for RwLock<T> {}
714unsafe impl<T: ?Sized + Send + Sync> Sync for RwLock<T> {}
715
716unsafe impl<T: ?Sized + Sync> Sync for RwLockReadGuard<'_, T> {}
717unsafe impl<T: ?Sized + Sync> Sync for RwLockWriteGuard<'_, T> {}
718
719//
720// Helpers for Lock State
721//
722type LockStatus = u64;
723
724const INIT: LockStatus = 0;
725const SHARED_LOCK_MASK: LockStatus = 0x3fffffffffffffff;
726const EXCLUSIVE_LOCK_FLAG: LockStatus = 0x4000000000000000;
727const POISON_FLAG: LockStatus = 0x8000000000000000;
728
729#[must_use]
730#[inline]
731fn is_poisoned(s: LockStatus) -> bool {
732 (s & POISON_FLAG) != 0
733}
734
735#[must_use]
736#[inline]
737fn set_poison_flag(s: LockStatus) -> LockStatus {
738 s | POISON_FLAG
739}
740
741#[must_use]
742#[inline]
743fn is_locked(s: LockStatus) -> bool {
744 s & (!POISON_FLAG) != 0
745}
746
747#[must_use]
748#[inline]
749fn is_locked_exclusively(s: LockStatus) -> bool {
750 let ret = (s & EXCLUSIVE_LOCK_FLAG) != 0;
751
752 if ret {
753 debug_assert_eq!(0, s & SHARED_LOCK_MASK);
754 }
755
756 ret
757}
758
759#[must_use]
760#[inline]
761fn acquire_exclusive_lock(s: LockStatus) -> LockStatus {
762 debug_assert_eq!(false, is_locked(s));
763 s | EXCLUSIVE_LOCK_FLAG
764}
765
766#[must_use]
767#[inline]
768fn release_exclusive_lock(s: LockStatus) -> LockStatus {
769 debug_assert_eq!(true, is_locked_exclusively(s));
770 s & (!EXCLUSIVE_LOCK_FLAG)
771}
772
773#[must_use]
774#[inline]
775fn count_shared_locks(s: LockStatus) -> u64 {
776 let ret = s & SHARED_LOCK_MASK;
777
778 if 0 < ret {
779 debug_assert_eq!(0, s & EXCLUSIVE_LOCK_FLAG);
780 }
781
782 ret
783}
784
785/// # Panic
786///
787/// Cause panic if the maximum count of shared locks are being holded. (maximum
788/// number is 0x3fffffffffffffff.)
789#[must_use]
790#[inline]
791fn acquire_shared_lock(s: LockStatus) -> LockStatus {
792 debug_assert_eq!(false, is_locked_exclusively(s));
793
794 if count_shared_locks(s) == SHARED_LOCK_MASK {
795 panic!("rwlock maximum reader count exceeded");
796 }
797
798 s + 1
799}
800
801#[must_use]
802#[inline]
803fn release_shared_lock(s: LockStatus) -> LockStatus {
804 debug_assert!(0 < count_shared_locks(s));
805 s - 1
806}
807
808#[cfg(test)]
809mod rwlock_tests {
810 use super::*;
811
812 #[test]
813 fn try_many_times() {
814 let lock = RwLock::new(0);
815
816 // Try to write at first.
817 {
818 let mut guard0 = lock.try_write().unwrap();
819 assert_eq!(0, *guard0);
820
821 *guard0 += 1;
822 assert_eq!(1, *guard0);
823
824 let result1 = lock.try_read();
825 assert!(result1.is_err());
826
827 let result2 = lock.try_write();
828 assert!(result2.is_err());
829
830 let result3 = lock.try_read();
831 assert!(result3.is_err());
832
833 let result4 = lock.try_write();
834 assert!(result4.is_err());
835 }
836
837 // Try to read at first.
838 {
839 let guard0 = lock.try_read().unwrap();
840 assert_eq!(1, *guard0);
841
842 let result1 = lock.try_write();
843 assert!(result1.is_err());
844
845 let guard2 = lock.try_read().unwrap();
846 assert_eq!(1, *guard2);
847
848 let result3 = lock.try_write();
849 assert!(result3.is_err());
850
851 let guard4 = lock.try_read().unwrap();
852 assert_eq!(1, *guard4);
853
854 let result5 = lock.try_write();
855 assert!(result5.is_err());
856 }
857 }
858}
859
860#[cfg(test)]
861mod lock_state_tests {
862 use super::*;
863
864 #[test]
865 fn flag_duplication() {
866 assert_eq!(0, INIT & SHARED_LOCK_MASK);
867 assert_eq!(0, INIT & EXCLUSIVE_LOCK_FLAG);
868 assert_eq!(0, INIT & POISON_FLAG);
869 assert_eq!(0, SHARED_LOCK_MASK & EXCLUSIVE_LOCK_FLAG);
870 assert_eq!(0, SHARED_LOCK_MASK & POISON_FLAG);
871 assert_eq!(0, EXCLUSIVE_LOCK_FLAG & POISON_FLAG);
872 }
873
874 #[test]
875 fn flag_uses_all_bits() {
876 assert_eq!(
877 std::u64::MAX,
878 INIT | SHARED_LOCK_MASK | EXCLUSIVE_LOCK_FLAG | POISON_FLAG
879 );
880 }
881}