tracing_mutex/stdsync/
tracing.rs

1use std::fmt;
2use std::ops::Deref;
3use std::ops::DerefMut;
4use std::sync;
5use std::sync::LockResult;
6use std::sync::OnceState;
7use std::sync::PoisonError;
8use std::sync::TryLockError;
9use std::sync::TryLockResult;
10use std::sync::WaitTimeoutResult;
11use std::time::Duration;
12
13use crate::BorrowedMutex;
14use crate::LazyMutexId;
15use crate::util::PrivateTraced;
16
17#[cfg(has_std__sync__LazyLock)]
18pub use lazy_lock::LazyLock;
19
20#[cfg(has_std__sync__LazyLock)]
21mod lazy_lock;
22
23/// Wrapper for [`std::sync::Mutex`].
24///
25/// Refer to the [crate-level][`crate`] documentation for the differences between this struct and
26/// the one it wraps.
27#[derive(Debug, Default)]
28pub struct Mutex<T: ?Sized> {
29    id: LazyMutexId,
30    inner: sync::Mutex<T>,
31}
32
33/// Wrapper for [`std::sync::MutexGuard`].
34///
35/// Refer to the [crate-level][`crate`] documentation for the differences between this struct and
36/// the one it wraps.
37pub struct MutexGuard<'a, T: ?Sized> {
38    inner: sync::MutexGuard<'a, T>,
39    _mutex: BorrowedMutex<'a>,
40}
41
42fn map_lockresult<T, I, F>(result: LockResult<I>, mapper: F) -> LockResult<T>
43where
44    F: FnOnce(I) -> T,
45{
46    match result {
47        Ok(inner) => Ok(mapper(inner)),
48        Err(poisoned) => Err(PoisonError::new(mapper(poisoned.into_inner()))),
49    }
50}
51
52fn map_trylockresult<T, I, F>(result: TryLockResult<I>, mapper: F) -> TryLockResult<T>
53where
54    F: FnOnce(I) -> T,
55{
56    match result {
57        Ok(inner) => Ok(mapper(inner)),
58        Err(TryLockError::WouldBlock) => Err(TryLockError::WouldBlock),
59        Err(TryLockError::Poisoned(poisoned)) => {
60            Err(PoisonError::new(mapper(poisoned.into_inner())).into())
61        }
62    }
63}
64
65impl<T> Mutex<T> {
66    /// Create a new tracing mutex with the provided value.
67    pub const fn new(t: T) -> Self {
68        Self {
69            inner: sync::Mutex::new(t),
70            id: LazyMutexId::new(),
71        }
72    }
73}
74
75impl<T: ?Sized> Mutex<T> {
76    /// Wrapper for [`std::sync::Mutex::lock`].
77    ///
78    /// # Panics
79    ///
80    /// This method participates in lock dependency tracking. If acquiring this lock introduces a
81    /// dependency cycle, this method will panic.
82    #[track_caller]
83    pub fn lock(&self) -> LockResult<MutexGuard<'_, T>> {
84        let mutex = self.id.get_borrowed();
85        let result = self.inner.lock();
86
87        let mapper = |guard| MutexGuard {
88            _mutex: mutex,
89            inner: guard,
90        };
91
92        map_lockresult(result, mapper)
93    }
94
95    /// Wrapper for [`std::sync::Mutex::try_lock`].
96    ///
97    /// # Panics
98    ///
99    /// This method participates in lock dependency tracking. If acquiring this lock introduces a
100    /// dependency cycle, this method will panic.
101    #[track_caller]
102    pub fn try_lock(&self) -> TryLockResult<MutexGuard<'_, T>> {
103        let mutex = self.id.get_borrowed();
104        let result = self.inner.try_lock();
105
106        let mapper = |guard| MutexGuard {
107            _mutex: mutex,
108            inner: guard,
109        };
110
111        map_trylockresult(result, mapper)
112    }
113
114    /// Wrapper for [`std::sync::Mutex::is_poisoned`].
115    pub fn is_poisoned(&self) -> bool {
116        self.inner.is_poisoned()
117    }
118
119    /// Return a mutable reference to the underlying data.
120    ///
121    /// This method does not block as the locking is handled compile-time by the type system.
122    pub fn get_mut(&mut self) -> LockResult<&mut T> {
123        self.inner.get_mut()
124    }
125
126    /// Unwrap the mutex and return its inner value.
127    pub fn into_inner(self) -> LockResult<T>
128    where
129        T: Sized,
130    {
131        self.inner.into_inner()
132    }
133}
134
135impl<T: ?Sized> PrivateTraced for Mutex<T> {
136    fn get_id(&self) -> &crate::MutexId {
137        &self.id
138    }
139}
140
141impl<T> From<T> for Mutex<T> {
142    fn from(t: T) -> Self {
143        Self::new(t)
144    }
145}
146
147impl<T: ?Sized> Deref for MutexGuard<'_, T> {
148    type Target = T;
149
150    #[inline]
151    fn deref(&self) -> &Self::Target {
152        &self.inner
153    }
154}
155
156impl<T: ?Sized> DerefMut for MutexGuard<'_, T> {
157    #[inline]
158    fn deref_mut(&mut self) -> &mut Self::Target {
159        &mut self.inner
160    }
161}
162
163impl<T: ?Sized + fmt::Debug> fmt::Debug for MutexGuard<'_, T> {
164    #[inline]
165    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
166        self.inner.fmt(f)
167    }
168}
169
170impl<T: fmt::Display + ?Sized> fmt::Display for MutexGuard<'_, T> {
171    #[inline]
172    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
173        self.inner.fmt(f)
174    }
175}
176
177/// Wrapper around [`std::sync::Condvar`].
178///
179/// Allows `TracingMutexGuard` to be used with a `Condvar`. Unlike other structs in this module,
180/// this wrapper does not add any additional dependency tracking or other overhead on top of the
181/// primitive it wraps. All dependency tracking happens through the mutexes itself.
182///
183/// # Panics
184///
185/// This struct does not add any panics over the base implementation of `Condvar`, but panics due to
186/// dependency tracking may poison associated mutexes.
187///
188/// # Examples
189///
190/// ```
191/// use std::sync::Arc;
192/// use std::thread;
193///
194/// use tracing_mutex::stdsync::tracing::{Condvar, Mutex};
195///
196/// let pair = Arc::new((Mutex::new(false), Condvar::new()));
197/// let pair2 = Arc::clone(&pair);
198///
199/// // Spawn a thread that will unlock the condvar
200/// thread::spawn(move || {
201///     let (lock, condvar) = &*pair2;
202///     *lock.lock().unwrap() = true;
203///     condvar.notify_one();
204/// });
205///
206/// // Wait until the thread unlocks the condvar
207/// let (lock, condvar) = &*pair;
208/// let guard = lock.lock().unwrap();
209/// let guard = condvar.wait_while(guard, |started| !*started).unwrap();
210///
211/// // Guard should read true now
212/// assert!(*guard);
213/// ```
214#[derive(Debug, Default)]
215pub struct Condvar(sync::Condvar);
216
217impl Condvar {
218    /// Creates a new condition variable which is ready to be waited on and notified.
219    pub const fn new() -> Self {
220        Self(sync::Condvar::new())
221    }
222
223    /// Wrapper for [`std::sync::Condvar::wait`].
224    pub fn wait<'a, T>(&self, guard: MutexGuard<'a, T>) -> LockResult<MutexGuard<'a, T>> {
225        let MutexGuard { _mutex, inner } = guard;
226
227        map_lockresult(self.0.wait(inner), |inner| MutexGuard { _mutex, inner })
228    }
229
230    /// Wrapper for [`std::sync::Condvar::wait_while`].
231    pub fn wait_while<'a, T, F>(
232        &self,
233        guard: MutexGuard<'a, T>,
234        condition: F,
235    ) -> LockResult<MutexGuard<'a, T>>
236    where
237        F: FnMut(&mut T) -> bool,
238    {
239        let MutexGuard { _mutex, inner } = guard;
240
241        map_lockresult(self.0.wait_while(inner, condition), |inner| MutexGuard {
242            _mutex,
243            inner,
244        })
245    }
246
247    /// Wrapper for [`std::sync::Condvar::wait_timeout`].
248    pub fn wait_timeout<'a, T>(
249        &self,
250        guard: MutexGuard<'a, T>,
251        dur: Duration,
252    ) -> LockResult<(MutexGuard<'a, T>, WaitTimeoutResult)> {
253        let MutexGuard { _mutex, inner } = guard;
254
255        map_lockresult(self.0.wait_timeout(inner, dur), |(inner, result)| {
256            (MutexGuard { _mutex, inner }, result)
257        })
258    }
259
260    /// Wrapper for [`std::sync::Condvar::wait_timeout_while`].
261    pub fn wait_timeout_while<'a, T, F>(
262        &self,
263        guard: MutexGuard<'a, T>,
264        dur: Duration,
265        condition: F,
266    ) -> LockResult<(MutexGuard<'a, T>, WaitTimeoutResult)>
267    where
268        F: FnMut(&mut T) -> bool,
269    {
270        let MutexGuard { _mutex, inner } = guard;
271
272        map_lockresult(
273            self.0.wait_timeout_while(inner, dur, condition),
274            |(inner, result)| (MutexGuard { _mutex, inner }, result),
275        )
276    }
277
278    /// Wrapper for [`std::sync::Condvar::notify_one`].
279    pub fn notify_one(&self) {
280        self.0.notify_one();
281    }
282
283    /// Wrapper for [`std::sync::Condvar::notify_all`].
284    pub fn notify_all(&self) {
285        self.0.notify_all();
286    }
287}
288
289/// Wrapper for [`std::sync::RwLock`].
290#[derive(Debug, Default)]
291pub struct RwLock<T: ?Sized> {
292    id: LazyMutexId,
293    inner: sync::RwLock<T>,
294}
295
296/// Hybrid wrapper for both [`std::sync::RwLockReadGuard`] and [`std::sync::RwLockWriteGuard`].
297///
298/// Please refer to [`RwLockReadGuard`] and [`RwLockWriteGuard`] for usable types.
299pub struct TracingRwLockGuard<'a, L> {
300    inner: L,
301    _mutex: BorrowedMutex<'a>,
302}
303
304/// Wrapper around [`std::sync::RwLockReadGuard`].
305pub type RwLockReadGuard<'a, T> = TracingRwLockGuard<'a, sync::RwLockReadGuard<'a, T>>;
306/// Wrapper around [`std::sync::RwLockWriteGuard`].
307pub type RwLockWriteGuard<'a, T> = TracingRwLockGuard<'a, sync::RwLockWriteGuard<'a, T>>;
308
309impl<T> RwLock<T> {
310    pub const fn new(t: T) -> Self {
311        Self {
312            inner: sync::RwLock::new(t),
313            id: LazyMutexId::new(),
314        }
315    }
316}
317
318impl<T: ?Sized> RwLock<T> {
319    /// Wrapper for [`std::sync::RwLock::read`].
320    ///
321    /// # Panics
322    ///
323    /// This method participates in lock dependency tracking. If acquiring this lock introduces a
324    /// dependency cycle, this method will panic.
325    #[track_caller]
326    pub fn read(&self) -> LockResult<RwLockReadGuard<'_, T>> {
327        let mutex = self.id.get_borrowed();
328        let result = self.inner.read();
329
330        map_lockresult(result, |inner| TracingRwLockGuard {
331            inner,
332            _mutex: mutex,
333        })
334    }
335
336    /// Wrapper for [`std::sync::RwLock::write`].
337    ///
338    /// # Panics
339    ///
340    /// This method participates in lock dependency tracking. If acquiring this lock introduces a
341    /// dependency cycle, this method will panic.
342    #[track_caller]
343    pub fn write(&self) -> LockResult<RwLockWriteGuard<'_, T>> {
344        let mutex = self.id.get_borrowed();
345        let result = self.inner.write();
346
347        map_lockresult(result, |inner| TracingRwLockGuard {
348            inner,
349            _mutex: mutex,
350        })
351    }
352
353    /// Wrapper for [`std::sync::RwLock::try_read`].
354    ///
355    /// # Panics
356    ///
357    /// This method participates in lock dependency tracking. If acquiring this lock introduces a
358    /// dependency cycle, this method will panic.
359    #[track_caller]
360    pub fn try_read(&self) -> TryLockResult<RwLockReadGuard<'_, T>> {
361        let mutex = self.id.get_borrowed();
362        let result = self.inner.try_read();
363
364        map_trylockresult(result, |inner| TracingRwLockGuard {
365            inner,
366            _mutex: mutex,
367        })
368    }
369
370    /// Wrapper for [`std::sync::RwLock::try_write`].
371    ///
372    /// # Panics
373    ///
374    /// This method participates in lock dependency tracking. If acquiring this lock introduces a
375    /// dependency cycle, this method will panic.
376    #[track_caller]
377    pub fn try_write(&self) -> TryLockResult<RwLockWriteGuard<'_, T>> {
378        let mutex = self.id.get_borrowed();
379        let result = self.inner.try_write();
380
381        map_trylockresult(result, |inner| TracingRwLockGuard {
382            inner,
383            _mutex: mutex,
384        })
385    }
386
387    /// Return a mutable reference to the underlying data.
388    ///
389    /// This method does not block as the locking is handled compile-time by the type system.
390    pub fn get_mut(&mut self) -> LockResult<&mut T> {
391        self.inner.get_mut()
392    }
393
394    /// Unwrap the mutex and return its inner value.
395    pub fn into_inner(self) -> LockResult<T>
396    where
397        T: Sized,
398    {
399        self.inner.into_inner()
400    }
401}
402
403impl<T: ?Sized> PrivateTraced for RwLock<T> {
404    fn get_id(&self) -> &crate::MutexId {
405        &self.id
406    }
407}
408
409impl<T> From<T> for RwLock<T> {
410    fn from(t: T) -> Self {
411        Self::new(t)
412    }
413}
414
415impl<L, T> Deref for TracingRwLockGuard<'_, L>
416where
417    T: ?Sized,
418    L: Deref<Target = T>,
419{
420    type Target = T;
421
422    #[inline]
423    fn deref(&self) -> &Self::Target {
424        self.inner.deref()
425    }
426}
427
428impl<L, T> DerefMut for TracingRwLockGuard<'_, L>
429where
430    T: ?Sized,
431    L: Deref<Target = T> + DerefMut,
432{
433    #[inline]
434    fn deref_mut(&mut self) -> &mut Self::Target {
435        self.inner.deref_mut()
436    }
437}
438
439impl<L> fmt::Debug for TracingRwLockGuard<'_, L>
440where
441    L: fmt::Debug,
442{
443    #[inline]
444    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
445        self.inner.fmt(f)
446    }
447}
448
449impl<L> fmt::Display for TracingRwLockGuard<'_, L>
450where
451    L: fmt::Display,
452{
453    #[inline]
454    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
455        self.inner.fmt(f)
456    }
457}
458
459/// Wrapper around [`std::sync::Once`].
460///
461/// Refer to the [crate-level][`crate`] documentaiton for the differences between this struct
462/// and the one it wraps.
463#[derive(Debug)]
464pub struct Once {
465    inner: sync::Once,
466    mutex_id: LazyMutexId,
467}
468
469// New without default is intentional, `std::sync::Once` doesn't implement it either
470#[allow(clippy::new_without_default)]
471impl Once {
472    /// Create a new `Once` value.
473    pub const fn new() -> Self {
474        Self {
475            inner: sync::Once::new(),
476            mutex_id: LazyMutexId::new(),
477        }
478    }
479
480    /// Wrapper for [`std::sync::Once::call_once`].
481    ///
482    /// # Panics
483    ///
484    /// In addition to the panics that `Once` can cause, this method will panic if calling it
485    /// introduces a cycle in the lock dependency graph.
486    pub fn call_once<F>(&self, f: F)
487    where
488        F: FnOnce(),
489    {
490        self.mutex_id.with_held(|| self.inner.call_once(f))
491    }
492
493    /// Performs the same operation as [`call_once`][Once::call_once] except it ignores
494    /// poisoning.
495    ///
496    /// # Panics
497    ///
498    /// This method participates in lock dependency tracking. If acquiring this lock introduces a
499    /// dependency cycle, this method will panic.
500    pub fn call_once_force<F>(&self, f: F)
501    where
502        F: FnOnce(&OnceState),
503    {
504        self.mutex_id.with_held(|| self.inner.call_once_force(f))
505    }
506
507    /// Returns true if some `call_once` has completed successfully.
508    pub fn is_completed(&self) -> bool {
509        self.inner.is_completed()
510    }
511}
512
513impl PrivateTraced for Once {
514    fn get_id(&self) -> &crate::MutexId {
515        &self.mutex_id
516    }
517}
518
519/// Wrapper for [`std::sync::OnceLock`]
520///
521/// The exact locking behaviour of [`std::sync::OnceLock`] is currently undefined, but may
522/// deadlock in the event of reentrant initialization attempts. This wrapper participates in
523/// cycle detection as normal and will therefore panic in the event of reentrancy.
524///
525/// Most of this primitive's methods do not involve locking and as such are simply passed
526/// through to the inner implementation.
527///
528/// # Examples
529///
530/// ```
531/// use tracing_mutex::stdsync::tracing::OnceLock;
532///
533/// static LOCK: OnceLock<i32> = OnceLock::new();
534/// assert!(LOCK.get().is_none());
535///
536/// std::thread::spawn(|| {
537///    let value: &i32 = LOCK.get_or_init(|| 42);
538///    assert_eq!(value, &42);
539/// }).join().unwrap();
540///
541/// let value: Option<&i32> = LOCK.get();
542/// assert_eq!(value, Some(&42));
543/// ```
544#[derive(Debug)]
545pub struct OnceLock<T> {
546    id: LazyMutexId,
547    inner: sync::OnceLock<T>,
548}
549
550// N.B. this impl inlines everything that directly calls the inner implementation as there
551// should be 0 overhead to doing so.
552impl<T> OnceLock<T> {
553    /// Creates a new empty cell
554    pub const fn new() -> Self {
555        Self {
556            id: LazyMutexId::new(),
557            inner: sync::OnceLock::new(),
558        }
559    }
560
561    /// Gets a reference to the underlying value.
562    ///
563    /// This method does not attempt to lock and therefore does not participate in cycle
564    /// detection.
565    #[inline]
566    pub fn get(&self) -> Option<&T> {
567        self.inner.get()
568    }
569
570    /// Gets a mutable reference to the underlying value.
571    ///
572    /// This method does not attempt to lock and therefore does not participate in cycle
573    /// detection.
574    #[inline]
575    pub fn get_mut(&mut self) -> Option<&mut T> {
576        self.inner.get_mut()
577    }
578
579    /// Sets the contents of this cell to the underlying value
580    ///
581    /// As this method may block until initialization is complete, it participates in cycle
582    /// detection.
583    pub fn set(&self, value: T) -> Result<(), T> {
584        self.id.with_held(|| self.inner.set(value))
585    }
586
587    /// Gets the contents of the cell, initializing it with `f` if the cell was empty.
588    ///
589    /// This method participates in cycle detection. Reentrancy is considered a cycle.
590    pub fn get_or_init<F>(&self, f: F) -> &T
591    where
592        F: FnOnce() -> T,
593    {
594        self.id.with_held(|| self.inner.get_or_init(f))
595    }
596
597    /// Takes the value out of this `OnceLock`, moving it back to an uninitialized state.
598    ///
599    /// This method does not attempt to lock and therefore does not participate in cycle
600    /// detection.
601    #[inline]
602    pub fn take(&mut self) -> Option<T> {
603        self.inner.take()
604    }
605
606    /// Consumes the `OnceLock`, returning the wrapped value. Returns None if the cell was
607    /// empty.
608    ///
609    /// This method does not attempt to lock and therefore does not participate in cycle
610    /// detection.
611    #[inline]
612    pub fn into_inner(mut self) -> Option<T> {
613        self.take()
614    }
615}
616
617impl<T> PrivateTraced for OnceLock<T> {
618    fn get_id(&self) -> &crate::MutexId {
619        &self.id
620    }
621}
622
623impl<T> Default for OnceLock<T> {
624    #[inline]
625    fn default() -> Self {
626        Self::new()
627    }
628}
629
630impl<T: PartialEq> PartialEq for OnceLock<T> {
631    #[inline]
632    fn eq(&self, other: &Self) -> bool {
633        self.inner == other.inner
634    }
635}
636
637impl<T: Eq> Eq for OnceLock<T> {}
638
639impl<T: Clone> Clone for OnceLock<T> {
640    fn clone(&self) -> Self {
641        Self {
642            id: LazyMutexId::new(),
643            inner: self.inner.clone(),
644        }
645    }
646}
647
648impl<T> From<T> for OnceLock<T> {
649    #[inline]
650    fn from(value: T) -> Self {
651        Self {
652            id: LazyMutexId::new(),
653            inner: sync::OnceLock::from(value),
654        }
655    }
656}
657
658#[cfg(test)]
659mod tests {
660    use std::sync::Arc;
661    use std::thread;
662
663    use super::*;
664
665    #[test]
666    fn test_mutex_usage() {
667        let mutex = Arc::new(Mutex::new(0));
668
669        assert_eq!(*mutex.lock().unwrap(), 0);
670        *mutex.lock().unwrap() = 1;
671        assert_eq!(*mutex.lock().unwrap(), 1);
672
673        let mutex_clone = mutex.clone();
674
675        let _guard = mutex.lock().unwrap();
676
677        // Now try to cause a blocking exception in another thread
678        let handle = thread::spawn(move || {
679            let result = mutex_clone.try_lock().unwrap_err();
680
681            assert!(matches!(result, TryLockError::WouldBlock));
682        });
683
684        handle.join().unwrap();
685    }
686
687    #[test]
688    fn test_rwlock_usage() {
689        let rwlock = Arc::new(RwLock::new(0));
690
691        assert_eq!(*rwlock.read().unwrap(), 0);
692        assert_eq!(*rwlock.write().unwrap(), 0);
693        *rwlock.write().unwrap() = 1;
694        assert_eq!(*rwlock.read().unwrap(), 1);
695        assert_eq!(*rwlock.write().unwrap(), 1);
696
697        let rwlock_clone = rwlock.clone();
698
699        let _read_lock = rwlock.read().unwrap();
700
701        // Now try to cause a blocking exception in another thread
702        let handle = thread::spawn(move || {
703            let write_result = rwlock_clone.try_write().unwrap_err();
704
705            assert!(matches!(write_result, TryLockError::WouldBlock));
706
707            // Should be able to get a read lock just fine.
708            let _read_lock = rwlock_clone.read().unwrap();
709        });
710
711        handle.join().unwrap();
712    }
713
714    #[test]
715    fn test_once_usage() {
716        let once = Arc::new(Once::new());
717        let once_clone = once.clone();
718
719        assert!(!once.is_completed());
720
721        let handle = thread::spawn(move || {
722            assert!(!once_clone.is_completed());
723
724            once_clone.call_once(|| {});
725
726            assert!(once_clone.is_completed());
727        });
728
729        handle.join().unwrap();
730
731        assert!(once.is_completed());
732    }
733
734    #[test]
735    #[should_panic(expected = "Found cycle in mutex dependency graph")]
736    fn test_detect_cycle() {
737        let a = Mutex::new(());
738        let b = Mutex::new(());
739
740        let hold_a = a.lock().unwrap();
741        let _ = b.lock();
742
743        drop(hold_a);
744
745        let _hold_b = b.lock().unwrap();
746        let _ = a.lock();
747    }
748}