lock_api/remutex.rs
1// Copyright 2018 Amanieu d'Antras
2//
3// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
4// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
5// http://opensource.org/licenses/MIT>, at your option. This file may not be
6// copied, modified, or distributed except according to those terms.
7
8use crate::{
9 mutex::{RawMutex, RawMutexFair, RawMutexTimed},
10 GuardNoSend,
11};
12use core::{
13 cell::{Cell, UnsafeCell},
14 fmt,
15 marker::PhantomData,
16 mem,
17 num::NonZeroUsize,
18 ops::Deref,
19 sync::atomic::{AtomicUsize, Ordering},
20};
21
22#[cfg(feature = "arc_lock")]
23use alloc::sync::Arc;
24#[cfg(feature = "arc_lock")]
25use core::mem::ManuallyDrop;
26#[cfg(feature = "arc_lock")]
27use core::ptr;
28
29#[cfg(feature = "owning_ref")]
30use owning_ref::StableAddress;
31
32#[cfg(feature = "serde")]
33use serde::{Deserialize, Deserializer, Serialize, Serializer};
34
35/// Helper trait which returns a non-zero thread ID.
36///
37/// The simplest way to implement this trait is to return the address of a
38/// thread-local variable.
39///
40/// # Safety
41///
42/// Implementations of this trait must ensure that no two active threads share
43/// the same thread ID. However the ID of a thread that has exited can be
44/// re-used since that thread is no longer active.
45pub unsafe trait GetThreadId {
46 /// Initial value.
47 // A “non-constant” const item is a legacy way to supply an initialized value to downstream
48 // static items. Can hopefully be replaced with `const fn new() -> Self` at some point.
49 #[allow(clippy::declare_interior_mutable_const)]
50 const INIT: Self;
51
52 /// Returns a non-zero thread ID which identifies the current thread of
53 /// execution.
54 fn nonzero_thread_id(&self) -> NonZeroUsize;
55}
56
57/// A raw mutex type that wraps another raw mutex to provide reentrancy.
58///
59/// Although this has the same methods as the [`RawMutex`] trait, it does
60/// not implement it, and should not be used in the same way, since this
61/// mutex can successfully acquire a lock multiple times in the same thread.
62/// Only use this when you know you want a raw mutex that can be locked
63/// reentrantly; you probably want [`ReentrantMutex`] instead.
64pub struct RawReentrantMutex<R, G> {
65 owner: AtomicUsize,
66 lock_count: Cell<usize>,
67 mutex: R,
68 get_thread_id: G,
69}
70
71unsafe impl<R: RawMutex + Send, G: GetThreadId + Send> Send for RawReentrantMutex<R, G> {}
72unsafe impl<R: RawMutex + Sync, G: GetThreadId + Sync> Sync for RawReentrantMutex<R, G> {}
73
74impl<R: RawMutex, G: GetThreadId> RawReentrantMutex<R, G> {
75 /// Initial value for an unlocked mutex.
76 #[allow(clippy::declare_interior_mutable_const)]
77 pub const INIT: Self = RawReentrantMutex {
78 owner: AtomicUsize::new(0),
79 lock_count: Cell::new(0),
80 mutex: R::INIT,
81 get_thread_id: G::INIT,
82 };
83
84 #[inline]
85 fn lock_internal<F: FnOnce() -> bool>(&self, try_lock: F) -> bool {
86 let id = self.get_thread_id.nonzero_thread_id().get();
87 if self.owner.load(Ordering::Relaxed) == id {
88 self.lock_count.set(
89 self.lock_count
90 .get()
91 .checked_add(1)
92 .expect("ReentrantMutex lock count overflow"),
93 );
94 } else {
95 if !try_lock() {
96 return false;
97 }
98 self.owner.store(id, Ordering::Relaxed);
99 debug_assert_eq!(self.lock_count.get(), 0);
100 self.lock_count.set(1);
101 }
102 true
103 }
104
105 /// Acquires this mutex, blocking if it's held by another thread.
106 #[inline]
107 pub fn lock(&self) {
108 self.lock_internal(|| {
109 self.mutex.lock();
110 true
111 });
112 }
113
114 /// Attempts to acquire this mutex without blocking. Returns `true`
115 /// if the lock was successfully acquired and `false` otherwise.
116 #[inline]
117 pub fn try_lock(&self) -> bool {
118 self.lock_internal(|| self.mutex.try_lock())
119 }
120
121 /// Unlocks this mutex. The inner mutex may not be unlocked if
122 /// this mutex was acquired previously in the current thread.
123 ///
124 /// # Safety
125 ///
126 /// This method may only be called if the mutex is held by the current thread.
127 #[inline]
128 pub unsafe fn unlock(&self) {
129 let lock_count = self.lock_count.get() - 1;
130 self.lock_count.set(lock_count);
131 if lock_count == 0 {
132 self.owner.store(0, Ordering::Relaxed);
133 self.mutex.unlock();
134 }
135 }
136
137 /// Checks whether the mutex is currently locked.
138 #[inline]
139 pub fn is_locked(&self) -> bool {
140 self.mutex.is_locked()
141 }
142
143 /// Checks whether the mutex is currently held by the current thread.
144 #[inline]
145 pub fn is_owned_by_current_thread(&self) -> bool {
146 let id = self.get_thread_id.nonzero_thread_id().get();
147 self.owner.load(Ordering::Relaxed) == id
148 }
149}
150
151impl<R: RawMutexFair, G: GetThreadId> RawReentrantMutex<R, G> {
152 /// Unlocks this mutex using a fair unlock protocol. The inner mutex
153 /// may not be unlocked if this mutex was acquired previously in the
154 /// current thread.
155 ///
156 /// # Safety
157 ///
158 /// This method may only be called if the mutex is held by the current thread.
159 #[inline]
160 pub unsafe fn unlock_fair(&self) {
161 let lock_count = self.lock_count.get() - 1;
162 self.lock_count.set(lock_count);
163 if lock_count == 0 {
164 self.owner.store(0, Ordering::Relaxed);
165 self.mutex.unlock_fair();
166 }
167 }
168
169 /// Temporarily yields the mutex to a waiting thread if there is one.
170 ///
171 /// This method is functionally equivalent to calling `unlock_fair` followed
172 /// by `lock`, however it can be much more efficient in the case where there
173 /// are no waiting threads.
174 ///
175 /// # Safety
176 ///
177 /// This method may only be called if the mutex is held by the current thread.
178 #[inline]
179 pub unsafe fn bump(&self) {
180 if self.lock_count.get() == 1 {
181 let id = self.owner.load(Ordering::Relaxed);
182 self.owner.store(0, Ordering::Relaxed);
183 self.lock_count.set(0);
184 self.mutex.bump();
185 self.owner.store(id, Ordering::Relaxed);
186 self.lock_count.set(1);
187 }
188 }
189}
190
191impl<R: RawMutexTimed, G: GetThreadId> RawReentrantMutex<R, G> {
192 /// Attempts to acquire this lock until a timeout is reached.
193 #[inline]
194 pub fn try_lock_until(&self, timeout: R::Instant) -> bool {
195 self.lock_internal(|| self.mutex.try_lock_until(timeout))
196 }
197
198 /// Attempts to acquire this lock until a timeout is reached.
199 #[inline]
200 pub fn try_lock_for(&self, timeout: R::Duration) -> bool {
201 self.lock_internal(|| self.mutex.try_lock_for(timeout))
202 }
203}
204
205/// A mutex which can be recursively locked by a single thread.
206///
207/// This type is identical to `Mutex` except for the following points:
208///
209/// - Locking multiple times from the same thread will work correctly instead of
210/// deadlocking.
211/// - `ReentrantMutexGuard` does not give mutable references to the locked data.
212/// Use a `RefCell` if you need this.
213///
214/// See [`Mutex`](crate::Mutex) for more details about the underlying mutex
215/// primitive.
216pub struct ReentrantMutex<R, G, T: ?Sized> {
217 raw: RawReentrantMutex<R, G>,
218 data: UnsafeCell<T>,
219}
220
221unsafe impl<R: RawMutex + Send, G: GetThreadId + Send, T: ?Sized + Send> Send
222 for ReentrantMutex<R, G, T>
223{
224}
225unsafe impl<R: RawMutex + Sync, G: GetThreadId + Sync, T: ?Sized + Send> Sync
226 for ReentrantMutex<R, G, T>
227{
228}
229
230impl<R: RawMutex, G: GetThreadId, T> ReentrantMutex<R, G, T> {
231 /// Creates a new reentrant mutex in an unlocked state ready for use.
232 #[inline]
233 pub const fn new(val: T) -> ReentrantMutex<R, G, T> {
234 ReentrantMutex {
235 data: UnsafeCell::new(val),
236 raw: RawReentrantMutex {
237 owner: AtomicUsize::new(0),
238 lock_count: Cell::new(0),
239 mutex: R::INIT,
240 get_thread_id: G::INIT,
241 },
242 }
243 }
244
245 /// Consumes this mutex, returning the underlying data.
246 #[inline]
247 pub fn into_inner(self) -> T {
248 self.data.into_inner()
249 }
250}
251
252impl<R, G, T> ReentrantMutex<R, G, T> {
253 /// Creates a new reentrant mutex based on a pre-existing raw mutex and a
254 /// helper to get the thread ID.
255 #[inline]
256 pub const fn from_raw(raw_mutex: R, get_thread_id: G, val: T) -> ReentrantMutex<R, G, T> {
257 ReentrantMutex {
258 data: UnsafeCell::new(val),
259 raw: RawReentrantMutex {
260 owner: AtomicUsize::new(0),
261 lock_count: Cell::new(0),
262 mutex: raw_mutex,
263 get_thread_id,
264 },
265 }
266 }
267
268 /// Creates a new reentrant mutex based on a pre-existing raw mutex and a
269 /// helper to get the thread ID.
270 ///
271 /// This allows creating a reentrant mutex in a constant context on stable
272 /// Rust.
273 ///
274 /// This method is a legacy alias for [`from_raw`](Self::from_raw).
275 #[inline]
276 pub const fn const_new(raw_mutex: R, get_thread_id: G, val: T) -> ReentrantMutex<R, G, T> {
277 Self::from_raw(raw_mutex, get_thread_id, val)
278 }
279}
280
281impl<R: RawMutex, G: GetThreadId, T: ?Sized> ReentrantMutex<R, G, T> {
282 /// Creates a new `ReentrantMutexGuard` without checking if the lock is held.
283 ///
284 /// # Safety
285 ///
286 /// This method must only be called if the thread logically holds the lock.
287 ///
288 /// Calling this function when a guard has already been produced is undefined behaviour unless
289 /// the guard was forgotten with `mem::forget`.
290 #[inline]
291 pub unsafe fn make_guard_unchecked(&self) -> ReentrantMutexGuard<'_, R, G, T> {
292 ReentrantMutexGuard {
293 remutex: &self,
294 marker: PhantomData,
295 }
296 }
297
298 /// Acquires a reentrant mutex, blocking the current thread until it is able
299 /// to do so.
300 ///
301 /// If the mutex is held by another thread then this function will block the
302 /// local thread until it is available to acquire the mutex. If the mutex is
303 /// already held by the current thread then this function will increment the
304 /// lock reference count and return immediately. Upon returning,
305 /// the thread is the only thread with the mutex held. An RAII guard is
306 /// returned to allow scoped unlock of the lock. When the guard goes out of
307 /// scope, the mutex will be unlocked.
308 #[inline]
309 #[track_caller]
310 pub fn lock(&self) -> ReentrantMutexGuard<'_, R, G, T> {
311 self.raw.lock();
312 // SAFETY: The lock is held, as required.
313 unsafe { self.make_guard_unchecked() }
314 }
315
316 /// Attempts to acquire this lock.
317 ///
318 /// If the lock could not be acquired at this time, then `None` is returned.
319 /// Otherwise, an RAII guard is returned. The lock will be unlocked when the
320 /// guard is dropped.
321 ///
322 /// This function does not block.
323 #[inline]
324 #[track_caller]
325 pub fn try_lock(&self) -> Option<ReentrantMutexGuard<'_, R, G, T>> {
326 if self.raw.try_lock() {
327 // SAFETY: The lock is held, as required.
328 Some(unsafe { self.make_guard_unchecked() })
329 } else {
330 None
331 }
332 }
333
334 /// Returns a mutable reference to the underlying data.
335 ///
336 /// Since this call borrows the `ReentrantMutex` mutably, no actual locking needs to
337 /// take place---the mutable borrow statically guarantees no locks exist.
338 #[inline]
339 pub fn get_mut(&mut self) -> &mut T {
340 unsafe { &mut *self.data.get() }
341 }
342
343 /// Checks whether the mutex is currently locked.
344 #[inline]
345 #[track_caller]
346 pub fn is_locked(&self) -> bool {
347 self.raw.is_locked()
348 }
349
350 /// Checks whether the mutex is currently held by the current thread.
351 #[inline]
352 #[track_caller]
353 pub fn is_owned_by_current_thread(&self) -> bool {
354 self.raw.is_owned_by_current_thread()
355 }
356
357 /// Forcibly unlocks the mutex.
358 ///
359 /// This is useful when combined with `mem::forget` to hold a lock without
360 /// the need to maintain a `ReentrantMutexGuard` object alive, for example when
361 /// dealing with FFI.
362 ///
363 /// # Safety
364 ///
365 /// This method must only be called if the current thread logically owns a
366 /// `ReentrantMutexGuard` but that guard has be discarded using `mem::forget`.
367 /// Behavior is undefined if a mutex is unlocked when not locked.
368 #[inline]
369 #[track_caller]
370 pub unsafe fn force_unlock(&self) {
371 self.raw.unlock();
372 }
373
374 /// Returns the underlying raw mutex object.
375 ///
376 /// Note that you will most likely need to import the `RawMutex` trait from
377 /// `lock_api` to be able to call functions on the raw mutex.
378 ///
379 /// # Safety
380 ///
381 /// This method is unsafe because it allows unlocking a mutex while
382 /// still holding a reference to a `ReentrantMutexGuard`.
383 #[inline]
384 pub unsafe fn raw(&self) -> &R {
385 &self.raw.mutex
386 }
387
388 /// Returns a raw pointer to the underlying data.
389 ///
390 /// This is useful when combined with `mem::forget` to hold a lock without
391 /// the need to maintain a `ReentrantMutexGuard` object alive, for example
392 /// when dealing with FFI.
393 ///
394 /// # Safety
395 ///
396 /// You must ensure that there are no data races when dereferencing the
397 /// returned pointer, for example if the current thread logically owns a
398 /// `ReentrantMutexGuard` but that guard has been discarded using
399 /// `mem::forget`.
400 #[inline]
401 pub fn data_ptr(&self) -> *mut T {
402 self.data.get()
403 }
404
405 /// Creates a new `ArcReentrantMutexGuard` without checking if the lock is held.
406 ///
407 /// # Safety
408 ///
409 /// This method must only be called if the thread logically holds the lock.
410 ///
411 /// Calling this function when a guard has already been produced is undefined behaviour unless
412 /// the guard was forgotten with `mem::forget`.
413 #[cfg(feature = "arc_lock")]
414 #[inline]
415 pub unsafe fn make_arc_guard_unchecked(self: &Arc<Self>) -> ArcReentrantMutexGuard<R, G, T> {
416 ArcReentrantMutexGuard {
417 remutex: self.clone(),
418 marker: PhantomData,
419 }
420 }
421
422 /// Acquires a reentrant mutex through an `Arc`.
423 ///
424 /// This method is similar to the `lock` method; however, it requires the `ReentrantMutex` to be inside of an
425 /// `Arc` and the resulting mutex guard has no lifetime requirements.
426 #[cfg(feature = "arc_lock")]
427 #[inline]
428 #[track_caller]
429 pub fn lock_arc(self: &Arc<Self>) -> ArcReentrantMutexGuard<R, G, T> {
430 self.raw.lock();
431 // SAFETY: locking guarantee is upheld
432 unsafe { self.make_arc_guard_unchecked() }
433 }
434
435 /// Attempts to acquire a reentrant mutex through an `Arc`.
436 ///
437 /// This method is similar to the `try_lock` method; however, it requires the `ReentrantMutex` to be inside
438 /// of an `Arc` and the resulting mutex guard has no lifetime requirements.
439 #[cfg(feature = "arc_lock")]
440 #[inline]
441 #[track_caller]
442 pub fn try_lock_arc(self: &Arc<Self>) -> Option<ArcReentrantMutexGuard<R, G, T>> {
443 if self.raw.try_lock() {
444 // SAFETY: locking guarantee is upheld
445 Some(unsafe { self.make_arc_guard_unchecked() })
446 } else {
447 None
448 }
449 }
450}
451
452impl<R: RawMutexFair, G: GetThreadId, T: ?Sized> ReentrantMutex<R, G, T> {
453 /// Forcibly unlocks the mutex using a fair unlock protocol.
454 ///
455 /// This is useful when combined with `mem::forget` to hold a lock without
456 /// the need to maintain a `ReentrantMutexGuard` object alive, for example when
457 /// dealing with FFI.
458 ///
459 /// # Safety
460 ///
461 /// This method must only be called if the current thread logically owns a
462 /// `ReentrantMutexGuard` but that guard has be discarded using `mem::forget`.
463 /// Behavior is undefined if a mutex is unlocked when not locked.
464 #[inline]
465 #[track_caller]
466 pub unsafe fn force_unlock_fair(&self) {
467 self.raw.unlock_fair();
468 }
469}
470
471impl<R: RawMutexTimed, G: GetThreadId, T: ?Sized> ReentrantMutex<R, G, T> {
472 /// Attempts to acquire this lock until a timeout is reached.
473 ///
474 /// If the lock could not be acquired before the timeout expired, then
475 /// `None` is returned. Otherwise, an RAII guard is returned. The lock will
476 /// be unlocked when the guard is dropped.
477 #[inline]
478 #[track_caller]
479 pub fn try_lock_for(&self, timeout: R::Duration) -> Option<ReentrantMutexGuard<'_, R, G, T>> {
480 if self.raw.try_lock_for(timeout) {
481 // SAFETY: The lock is held, as required.
482 Some(unsafe { self.make_guard_unchecked() })
483 } else {
484 None
485 }
486 }
487
488 /// Attempts to acquire this lock until a timeout is reached.
489 ///
490 /// If the lock could not be acquired before the timeout expired, then
491 /// `None` is returned. Otherwise, an RAII guard is returned. The lock will
492 /// be unlocked when the guard is dropped.
493 #[inline]
494 #[track_caller]
495 pub fn try_lock_until(&self, timeout: R::Instant) -> Option<ReentrantMutexGuard<'_, R, G, T>> {
496 if self.raw.try_lock_until(timeout) {
497 // SAFETY: The lock is held, as required.
498 Some(unsafe { self.make_guard_unchecked() })
499 } else {
500 None
501 }
502 }
503
504 /// Attempts to acquire this lock until a timeout is reached, through an `Arc`.
505 ///
506 /// This method is similar to the `try_lock_for` method; however, it requires the `ReentrantMutex` to be
507 /// inside of an `Arc` and the resulting mutex guard has no lifetime requirements.
508 #[cfg(feature = "arc_lock")]
509 #[inline]
510 #[track_caller]
511 pub fn try_lock_arc_for(
512 self: &Arc<Self>,
513 timeout: R::Duration,
514 ) -> Option<ArcReentrantMutexGuard<R, G, T>> {
515 if self.raw.try_lock_for(timeout) {
516 // SAFETY: locking guarantee is upheld
517 Some(unsafe { self.make_arc_guard_unchecked() })
518 } else {
519 None
520 }
521 }
522
523 /// Attempts to acquire this lock until a timeout is reached, through an `Arc`.
524 ///
525 /// This method is similar to the `try_lock_until` method; however, it requires the `ReentrantMutex` to be
526 /// inside of an `Arc` and the resulting mutex guard has no lifetime requirements.
527 #[cfg(feature = "arc_lock")]
528 #[inline]
529 #[track_caller]
530 pub fn try_lock_arc_until(
531 self: &Arc<Self>,
532 timeout: R::Instant,
533 ) -> Option<ArcReentrantMutexGuard<R, G, T>> {
534 if self.raw.try_lock_until(timeout) {
535 // SAFETY: locking guarantee is upheld
536 Some(unsafe { self.make_arc_guard_unchecked() })
537 } else {
538 None
539 }
540 }
541}
542
543impl<R: RawMutex, G: GetThreadId, T: ?Sized + Default> Default for ReentrantMutex<R, G, T> {
544 #[inline]
545 fn default() -> ReentrantMutex<R, G, T> {
546 ReentrantMutex::new(Default::default())
547 }
548}
549
550impl<R: RawMutex, G: GetThreadId, T> From<T> for ReentrantMutex<R, G, T> {
551 #[inline]
552 fn from(t: T) -> ReentrantMutex<R, G, T> {
553 ReentrantMutex::new(t)
554 }
555}
556
557impl<R: RawMutex, G: GetThreadId, T: ?Sized + fmt::Debug> fmt::Debug for ReentrantMutex<R, G, T> {
558 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
559 match self.try_lock() {
560 Some(guard) => f
561 .debug_struct("ReentrantMutex")
562 .field("data", &&*guard)
563 .finish(),
564 None => {
565 struct LockedPlaceholder;
566 impl fmt::Debug for LockedPlaceholder {
567 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
568 f.write_str("<locked>")
569 }
570 }
571
572 f.debug_struct("ReentrantMutex")
573 .field("data", &LockedPlaceholder)
574 .finish()
575 }
576 }
577 }
578}
579
580// Copied and modified from serde
581#[cfg(feature = "serde")]
582impl<R, G, T> Serialize for ReentrantMutex<R, G, T>
583where
584 R: RawMutex,
585 G: GetThreadId,
586 T: Serialize + ?Sized,
587{
588 fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
589 where
590 S: Serializer,
591 {
592 self.lock().serialize(serializer)
593 }
594}
595
596#[cfg(feature = "serde")]
597impl<'de, R, G, T> Deserialize<'de> for ReentrantMutex<R, G, T>
598where
599 R: RawMutex,
600 G: GetThreadId,
601 T: Deserialize<'de> + ?Sized,
602{
603 fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
604 where
605 D: Deserializer<'de>,
606 {
607 Deserialize::deserialize(deserializer).map(ReentrantMutex::new)
608 }
609}
610
611/// An RAII implementation of a "scoped lock" of a reentrant mutex. When this structure
612/// is dropped (falls out of scope), the lock will be unlocked.
613///
614/// The data protected by the mutex can be accessed through this guard via its
615/// `Deref` implementation.
616#[clippy::has_significant_drop]
617#[must_use = "if unused the ReentrantMutex will immediately unlock"]
618pub struct ReentrantMutexGuard<'a, R: RawMutex, G: GetThreadId, T: ?Sized> {
619 remutex: &'a ReentrantMutex<R, G, T>,
620 marker: PhantomData<(&'a T, GuardNoSend)>,
621}
622
623unsafe impl<'a, R: RawMutex + Sync + 'a, G: GetThreadId + Sync + 'a, T: ?Sized + Sync + 'a> Sync
624 for ReentrantMutexGuard<'a, R, G, T>
625{
626}
627
628impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> ReentrantMutexGuard<'a, R, G, T> {
629 /// Returns a reference to the original `ReentrantMutex` object.
630 pub fn remutex(s: &Self) -> &'a ReentrantMutex<R, G, T> {
631 s.remutex
632 }
633
634 /// Makes a new `MappedReentrantMutexGuard` for a component of the locked data.
635 ///
636 /// This operation cannot fail as the `ReentrantMutexGuard` passed
637 /// in already locked the mutex.
638 ///
639 /// This is an associated function that needs to be
640 /// used as `ReentrantMutexGuard::map(...)`. A method would interfere with methods of
641 /// the same name on the contents of the locked data.
642 #[inline]
643 pub fn map<U: ?Sized, F>(s: Self, f: F) -> MappedReentrantMutexGuard<'a, R, G, U>
644 where
645 F: FnOnce(&T) -> &U,
646 {
647 let raw = &s.remutex.raw;
648 let data = f(unsafe { &*s.remutex.data.get() });
649 mem::forget(s);
650 MappedReentrantMutexGuard {
651 raw,
652 data,
653 marker: PhantomData,
654 }
655 }
656
657 /// Attempts to make a new `MappedReentrantMutexGuard` for a component of the
658 /// locked data. The original guard is return if the closure returns `None`.
659 ///
660 /// This operation cannot fail as the `ReentrantMutexGuard` passed
661 /// in already locked the mutex.
662 ///
663 /// This is an associated function that needs to be
664 /// used as `ReentrantMutexGuard::try_map(...)`. A method would interfere with methods of
665 /// the same name on the contents of the locked data.
666 #[inline]
667 pub fn try_map<U: ?Sized, F>(
668 s: Self,
669 f: F,
670 ) -> Result<MappedReentrantMutexGuard<'a, R, G, U>, Self>
671 where
672 F: FnOnce(&T) -> Option<&U>,
673 {
674 let raw = &s.remutex.raw;
675 let data = match f(unsafe { &*s.remutex.data.get() }) {
676 Some(data) => data,
677 None => return Err(s),
678 };
679 mem::forget(s);
680 Ok(MappedReentrantMutexGuard {
681 raw,
682 data,
683 marker: PhantomData,
684 })
685 }
686
687 /// Attempts to make a new `MappedReentrantMutexGuard` for a component of the
688 /// locked data. The original guard is returned alongside arbitrary user data
689 /// if the closure returns `Err`.
690 ///
691 /// This operation cannot fail as the `ReentrantMutexGuard` passed
692 /// in already locked the mutex.
693 ///
694 /// This is an associated function that needs to be
695 /// used as `ReentrantMutexGuard::try_map_or_err(...)`. A method would interfere with methods of
696 /// the same name on the contents of the locked data.
697 #[inline]
698 pub fn try_map_or_err<U: ?Sized, F, E>(
699 s: Self,
700 f: F,
701 ) -> Result<MappedReentrantMutexGuard<'a, R, G, U>, (Self, E)>
702 where
703 F: FnOnce(&T) -> Result<&U, E>,
704 {
705 let raw = &s.remutex.raw;
706 let data = match f(unsafe { &*s.remutex.data.get() }) {
707 Ok(data) => data,
708 Err(e) => return Err((s, e)),
709 };
710 mem::forget(s);
711 Ok(MappedReentrantMutexGuard {
712 raw,
713 data,
714 marker: PhantomData,
715 })
716 }
717
718 /// Temporarily unlocks the mutex to execute the given function.
719 ///
720 /// This is safe because `&mut` guarantees that there exist no other
721 /// references to the data protected by the mutex.
722 #[inline]
723 #[track_caller]
724 pub fn unlocked<F, U>(s: &mut Self, f: F) -> U
725 where
726 F: FnOnce() -> U,
727 {
728 // Safety: A ReentrantMutexGuard always holds the lock.
729 unsafe {
730 s.remutex.raw.unlock();
731 }
732 defer!(s.remutex.raw.lock());
733 f()
734 }
735}
736
737impl<'a, R: RawMutexFair + 'a, G: GetThreadId + 'a, T: ?Sized + 'a>
738 ReentrantMutexGuard<'a, R, G, T>
739{
740 /// Unlocks the mutex using a fair unlock protocol.
741 ///
742 /// By default, mutexes are unfair and allow the current thread to re-lock
743 /// the mutex before another has the chance to acquire the lock, even if
744 /// that thread has been blocked on the mutex for a long time. This is the
745 /// default because it allows much higher throughput as it avoids forcing a
746 /// context switch on every mutex unlock. This can result in one thread
747 /// acquiring a mutex many more times than other threads.
748 ///
749 /// However in some cases it can be beneficial to ensure fairness by forcing
750 /// the lock to pass on to a waiting thread if there is one. This is done by
751 /// using this method instead of dropping the `ReentrantMutexGuard` normally.
752 #[inline]
753 #[track_caller]
754 pub fn unlock_fair(s: Self) {
755 // Safety: A ReentrantMutexGuard always holds the lock
756 unsafe {
757 s.remutex.raw.unlock_fair();
758 }
759 mem::forget(s);
760 }
761
762 /// Temporarily unlocks the mutex to execute the given function.
763 ///
764 /// The mutex is unlocked a fair unlock protocol.
765 ///
766 /// This is safe because `&mut` guarantees that there exist no other
767 /// references to the data protected by the mutex.
768 #[inline]
769 #[track_caller]
770 pub fn unlocked_fair<F, U>(s: &mut Self, f: F) -> U
771 where
772 F: FnOnce() -> U,
773 {
774 // Safety: A ReentrantMutexGuard always holds the lock
775 unsafe {
776 s.remutex.raw.unlock_fair();
777 }
778 defer!(s.remutex.raw.lock());
779 f()
780 }
781
782 /// Temporarily yields the mutex to a waiting thread if there is one.
783 ///
784 /// This method is functionally equivalent to calling `unlock_fair` followed
785 /// by `lock`, however it can be much more efficient in the case where there
786 /// are no waiting threads.
787 #[inline]
788 #[track_caller]
789 pub fn bump(s: &mut Self) {
790 // Safety: A ReentrantMutexGuard always holds the lock
791 unsafe {
792 s.remutex.raw.bump();
793 }
794 }
795}
796
797impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> Deref
798 for ReentrantMutexGuard<'a, R, G, T>
799{
800 type Target = T;
801 #[inline]
802 fn deref(&self) -> &T {
803 unsafe { &*self.remutex.data.get() }
804 }
805}
806
807impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> Drop
808 for ReentrantMutexGuard<'a, R, G, T>
809{
810 #[inline]
811 fn drop(&mut self) {
812 // Safety: A ReentrantMutexGuard always holds the lock.
813 unsafe {
814 self.remutex.raw.unlock();
815 }
816 }
817}
818
819impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: fmt::Debug + ?Sized + 'a> fmt::Debug
820 for ReentrantMutexGuard<'a, R, G, T>
821{
822 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
823 fmt::Debug::fmt(&**self, f)
824 }
825}
826
827impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: fmt::Display + ?Sized + 'a> fmt::Display
828 for ReentrantMutexGuard<'a, R, G, T>
829{
830 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
831 (**self).fmt(f)
832 }
833}
834
835#[cfg(feature = "owning_ref")]
836unsafe impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> StableAddress
837 for ReentrantMutexGuard<'a, R, G, T>
838{
839}
840
841/// An RAII mutex guard returned by the `Arc` locking operations on `ReentrantMutex`.
842///
843/// This is similar to the `ReentrantMutexGuard` struct, except instead of using a reference to unlock the
844/// `Mutex` it uses an `Arc<ReentrantMutex>`. This has several advantages, most notably that it has an `'static`
845/// lifetime.
846#[cfg(feature = "arc_lock")]
847#[clippy::has_significant_drop]
848#[must_use = "if unused the ReentrantMutex will immediately unlock"]
849pub struct ArcReentrantMutexGuard<R: RawMutex, G: GetThreadId, T: ?Sized> {
850 remutex: Arc<ReentrantMutex<R, G, T>>,
851 marker: PhantomData<GuardNoSend>,
852}
853
854#[cfg(feature = "arc_lock")]
855impl<R: RawMutex, G: GetThreadId, T: ?Sized> ArcReentrantMutexGuard<R, G, T> {
856 /// Returns a reference to the `ReentrantMutex` this object is guarding, contained in its `Arc`.
857 pub fn remutex(s: &Self) -> &Arc<ReentrantMutex<R, G, T>> {
858 &s.remutex
859 }
860
861 /// Unlocks the mutex and returns the `Arc` that was held by the [`ArcReentrantMutexGuard`].
862 #[inline]
863 pub fn into_arc(s: Self) -> Arc<ReentrantMutex<R, G, T>> {
864 // SAFETY: Skip our Drop impl and manually unlock the mutex.
865 let s = ManuallyDrop::new(s);
866 unsafe {
867 s.remutex.raw.unlock();
868 ptr::read(&s.remutex)
869 }
870 }
871
872 /// Temporarily unlocks the mutex to execute the given function.
873 ///
874 /// This is safe because `&mut` guarantees that there exist no other
875 /// references to the data protected by the mutex.
876 #[inline]
877 #[track_caller]
878 pub fn unlocked<F, U>(s: &mut Self, f: F) -> U
879 where
880 F: FnOnce() -> U,
881 {
882 // Safety: A ReentrantMutexGuard always holds the lock.
883 unsafe {
884 s.remutex.raw.unlock();
885 }
886 defer!(s.remutex.raw.lock());
887 f()
888 }
889}
890
891#[cfg(feature = "arc_lock")]
892impl<R: RawMutexFair, G: GetThreadId, T: ?Sized> ArcReentrantMutexGuard<R, G, T> {
893 /// Unlocks the mutex using a fair unlock protocol.
894 ///
895 /// This is functionally identical to the `unlock_fair` method on [`ReentrantMutexGuard`].
896 #[inline]
897 #[track_caller]
898 pub fn unlock_fair(s: Self) {
899 drop(Self::into_arc_fair(s));
900 }
901
902 /// Unlocks the mutex using a fair unlock protocol and returns the `Arc` that was held by the [`ArcReentrantMutexGuard`].
903 #[inline]
904 pub fn into_arc_fair(s: Self) -> Arc<ReentrantMutex<R, G, T>> {
905 // SAFETY: Skip our Drop impl and manually unlock the mutex.
906 let s = ManuallyDrop::new(s);
907 unsafe {
908 s.remutex.raw.unlock_fair();
909 ptr::read(&s.remutex)
910 }
911 }
912
913 /// Temporarily unlocks the mutex to execute the given function.
914 ///
915 /// This is functionally identical to the `unlocked_fair` method on [`ReentrantMutexGuard`].
916 #[inline]
917 #[track_caller]
918 pub fn unlocked_fair<F, U>(s: &mut Self, f: F) -> U
919 where
920 F: FnOnce() -> U,
921 {
922 // Safety: A ReentrantMutexGuard always holds the lock
923 unsafe {
924 s.remutex.raw.unlock_fair();
925 }
926 defer!(s.remutex.raw.lock());
927 f()
928 }
929
930 /// Temporarily yields the mutex to a waiting thread if there is one.
931 ///
932 /// This is functionally equivalent to the `bump` method on [`ReentrantMutexGuard`].
933 #[inline]
934 #[track_caller]
935 pub fn bump(s: &mut Self) {
936 // Safety: A ReentrantMutexGuard always holds the lock
937 unsafe {
938 s.remutex.raw.bump();
939 }
940 }
941}
942
943#[cfg(feature = "arc_lock")]
944impl<R: RawMutex, G: GetThreadId, T: ?Sized> Deref for ArcReentrantMutexGuard<R, G, T> {
945 type Target = T;
946 #[inline]
947 fn deref(&self) -> &T {
948 unsafe { &*self.remutex.data.get() }
949 }
950}
951
952#[cfg(feature = "arc_lock")]
953impl<R: RawMutex, G: GetThreadId, T: ?Sized> Drop for ArcReentrantMutexGuard<R, G, T> {
954 #[inline]
955 fn drop(&mut self) {
956 // Safety: A ReentrantMutexGuard always holds the lock.
957 unsafe {
958 self.remutex.raw.unlock();
959 }
960 }
961}
962
963/// An RAII mutex guard returned by `ReentrantMutexGuard::map`, which can point to a
964/// subfield of the protected data.
965///
966/// The main difference between `MappedReentrantMutexGuard` and `ReentrantMutexGuard` is that the
967/// former doesn't support temporarily unlocking and re-locking, since that
968/// could introduce soundness issues if the locked object is modified by another
969/// thread.
970#[clippy::has_significant_drop]
971#[must_use = "if unused the ReentrantMutex will immediately unlock"]
972pub struct MappedReentrantMutexGuard<'a, R: RawMutex, G: GetThreadId, T: ?Sized> {
973 raw: &'a RawReentrantMutex<R, G>,
974 data: *const T,
975 marker: PhantomData<&'a T>,
976}
977
978unsafe impl<'a, R: RawMutex + Sync + 'a, G: GetThreadId + Sync + 'a, T: ?Sized + Sync + 'a> Sync
979 for MappedReentrantMutexGuard<'a, R, G, T>
980{
981}
982
983impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a>
984 MappedReentrantMutexGuard<'a, R, G, T>
985{
986 /// Makes a new `MappedReentrantMutexGuard` for a component of the locked data.
987 ///
988 /// This operation cannot fail as the `MappedReentrantMutexGuard` passed
989 /// in already locked the mutex.
990 ///
991 /// This is an associated function that needs to be
992 /// used as `MappedReentrantMutexGuard::map(...)`. A method would interfere with methods of
993 /// the same name on the contents of the locked data.
994 #[inline]
995 pub fn map<U: ?Sized, F>(s: Self, f: F) -> MappedReentrantMutexGuard<'a, R, G, U>
996 where
997 F: FnOnce(&T) -> &U,
998 {
999 let raw = s.raw;
1000 let data = f(unsafe { &*s.data });
1001 mem::forget(s);
1002 MappedReentrantMutexGuard {
1003 raw,
1004 data,
1005 marker: PhantomData,
1006 }
1007 }
1008
1009 /// Attempts to make a new `MappedReentrantMutexGuard` for a component of the
1010 /// locked data. The original guard is return if the closure returns `None`.
1011 ///
1012 /// This operation cannot fail as the `MappedReentrantMutexGuard` passed
1013 /// in already locked the mutex.
1014 ///
1015 /// This is an associated function that needs to be
1016 /// used as `MappedReentrantMutexGuard::try_map(...)`. A method would interfere with methods of
1017 /// the same name on the contents of the locked data.
1018 #[inline]
1019 pub fn try_map<U: ?Sized, F>(
1020 s: Self,
1021 f: F,
1022 ) -> Result<MappedReentrantMutexGuard<'a, R, G, U>, Self>
1023 where
1024 F: FnOnce(&T) -> Option<&U>,
1025 {
1026 let raw = s.raw;
1027 let data = match f(unsafe { &*s.data }) {
1028 Some(data) => data,
1029 None => return Err(s),
1030 };
1031 mem::forget(s);
1032 Ok(MappedReentrantMutexGuard {
1033 raw,
1034 data,
1035 marker: PhantomData,
1036 })
1037 }
1038
1039 /// Attempts to make a new `MappedReentrantMutexGuard` for a component of the
1040 /// locked data. The original guard is returned alongside arbitrary user data
1041 /// if the closure returns `Err`.
1042 ///
1043 /// This operation cannot fail as the `MappedReentrantMutexGuard` passed
1044 /// in already locked the mutex.
1045 ///
1046 /// This is an associated function that needs to be
1047 /// used as `MappedReentrantMutexGuard::try_map_or_err(...)`. A method would interfere with methods of
1048 /// the same name on the contents of the locked data.
1049 #[inline]
1050 pub fn try_map_or_err<U: ?Sized, F, E>(
1051 s: Self,
1052 f: F,
1053 ) -> Result<MappedReentrantMutexGuard<'a, R, G, U>, (Self, E)>
1054 where
1055 F: FnOnce(&T) -> Result<&U, E>,
1056 {
1057 let raw = s.raw;
1058 let data = match f(unsafe { &*s.data }) {
1059 Ok(data) => data,
1060 Err(e) => return Err((s, e)),
1061 };
1062 mem::forget(s);
1063 Ok(MappedReentrantMutexGuard {
1064 raw,
1065 data,
1066 marker: PhantomData,
1067 })
1068 }
1069}
1070
1071impl<'a, R: RawMutexFair + 'a, G: GetThreadId + 'a, T: ?Sized + 'a>
1072 MappedReentrantMutexGuard<'a, R, G, T>
1073{
1074 /// Unlocks the mutex using a fair unlock protocol.
1075 ///
1076 /// By default, mutexes are unfair and allow the current thread to re-lock
1077 /// the mutex before another has the chance to acquire the lock, even if
1078 /// that thread has been blocked on the mutex for a long time. This is the
1079 /// default because it allows much higher throughput as it avoids forcing a
1080 /// context switch on every mutex unlock. This can result in one thread
1081 /// acquiring a mutex many more times than other threads.
1082 ///
1083 /// However in some cases it can be beneficial to ensure fairness by forcing
1084 /// the lock to pass on to a waiting thread if there is one. This is done by
1085 /// using this method instead of dropping the `ReentrantMutexGuard` normally.
1086 #[inline]
1087 #[track_caller]
1088 pub fn unlock_fair(s: Self) {
1089 // Safety: A MappedReentrantMutexGuard always holds the lock
1090 unsafe {
1091 s.raw.unlock_fair();
1092 }
1093 mem::forget(s);
1094 }
1095}
1096
1097impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> Deref
1098 for MappedReentrantMutexGuard<'a, R, G, T>
1099{
1100 type Target = T;
1101 #[inline]
1102 fn deref(&self) -> &T {
1103 unsafe { &*self.data }
1104 }
1105}
1106
1107impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> Drop
1108 for MappedReentrantMutexGuard<'a, R, G, T>
1109{
1110 #[inline]
1111 fn drop(&mut self) {
1112 // Safety: A MappedReentrantMutexGuard always holds the lock.
1113 unsafe {
1114 self.raw.unlock();
1115 }
1116 }
1117}
1118
1119impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: fmt::Debug + ?Sized + 'a> fmt::Debug
1120 for MappedReentrantMutexGuard<'a, R, G, T>
1121{
1122 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1123 fmt::Debug::fmt(&**self, f)
1124 }
1125}
1126
1127impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: fmt::Display + ?Sized + 'a> fmt::Display
1128 for MappedReentrantMutexGuard<'a, R, G, T>
1129{
1130 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1131 (**self).fmt(f)
1132 }
1133}
1134
1135#[cfg(feature = "owning_ref")]
1136unsafe impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> StableAddress
1137 for MappedReentrantMutexGuard<'a, R, G, T>
1138{
1139}