lilos_rwlock/lib.rs
1//! A [read-write lock] for use with [`lilos`].
2//!
3//! There's a small family of related types in this here crate:
4//!
5//! - [`RwLock<T>`] contains some data of type `T` and allows either multiple
6//! shared references, or one exclusive reference, but not both simultaneously.
7//! - [`SharedGuard<T>`] represents a shared reference to the data guarded by a
8//! `RwLock` and allows access to it (via `Deref`).
9//! - [`ActionPermit<T>`] represents an _exclusive_ reference to the data
10//! guarded by a `RwLock`, but once you start doing something that can modify
11//! the data, you can't `await`, to ensure that cancellation won't corrupt the
12//! guarded data.
13//! - [`ExclusiveGuard<T>`] allows arbitrary exclusive access, even across
14//! `await` points, but you have to promise the library that the data is
15//! inherently cancel-safe (by using the [`lilos::util::CancelSafe`] marker
16//! type).
17//!
18//! See the docs on [`RwLock`] for more details.
19//!
20//! [`lilos`]: https://docs.rs/lilos/
21//! [read-write lock]: https://en.wikipedia.org/wiki/Readers%E2%80%93writer_lock
22
23#![no_std]
24#![warn(
25 elided_lifetimes_in_paths,
26 explicit_outlives_requirements,
27 missing_debug_implementations,
28 missing_docs,
29 semicolon_in_expressions_from_macros,
30 single_use_lifetimes,
31 trivial_casts,
32 trivial_numeric_casts,
33 unreachable_pub,
34 unsafe_op_in_unsafe_fn,
35 unused_qualifications
36)]
37
38use core::cell::{Cell, UnsafeCell};
39use core::ops::{Deref, DerefMut};
40use core::pin::Pin;
41use lilos_list::{List, Meta};
42use lilos::util::CancelSafe;
43use pin_project::pin_project;
44use scopeguard::ScopeGuard;
45
46/// A lock that guards data of type `T` and allows, at any one time, shared
47/// access by many readers, or exclusive access by one writer, but not both.
48///
49/// This is similar to [`RefCell`][core::cell::RefCell], but allows programs to
50/// block while waiting for access, and ensures _fairness_ among blocked
51/// processes.
52///
53/// Fairness, in this case, means that access is granted in the order it is
54/// requested.
55///
56/// - If the lock is currently claimed for shared access, _and_ nobody is
57/// waiting for exclusive access, further attempts to lock it for shared access
58/// will succeed immediately --- but an attempt to lock it for exclusive access
59/// will need to wait.
60///
61/// - Once there is at least one exclusive access claim waiting, further
62/// shared access claims (and additional exclusive access claims) now have to
63/// wait behind it.
64///
65/// - Once all outstanding shared access claims are released, the first
66/// exclusive access claim in the wait queue is granted.
67///
68/// - Once _that_ gets released, the next claim(s) in the queue are granted -- a
69/// single exclusive claim, or any number of (consecutive) shared claims.
70///
71///
72/// # Getting an `RwLock`
73///
74/// `RwLock` needs to be pinned to be useful, so you'll generally write:
75///
76/// ```
77/// let my_lock = pin!(RwLock::new(initial_data_here()));
78/// // Drop &mut:
79/// let my_lock = my_lock.into_ref();
80///
81/// let guard = my_lock.lock_shared().await;
82/// guard.do_stuff();
83/// ```
84///
85/// There is also the [`create_rwlock!`] macro that wraps up those first two
86/// lines, if you prefer.
87///
88///
89/// # Using the guarded data
90///
91/// [`RwLock::lock_shared`] places a _shared claim_ on the data, and returns a
92/// future that will resolve when that claim can be granted. That produces a
93/// [`SharedGuard`] that allows shared (`&` reference) access to the `T`
94/// contained in the `RwLock`. (The non-blocking equivalent is
95/// [`RwLock::try_lock_shared`].)
96///
97/// [`RwLock::lock_exclusive`] places an _exclusive claim_ on the data, and
98/// returns a future that will resolve when that claim can be granted. That
99/// produces an [`ActionPermit`] that allows exclusive (`&mut` reference) access
100/// to the `T` --- but without the ability to `await`. See
101/// [`ActionPermit::perform`] for information on how this works. (The
102/// non-blocking equivalent is [`RwLock::try_lock_exclusive`].
103///
104/// In general, holding a `&mut` to guarded data across `await` points means
105/// that there's a risk your future will be cancelled and leave the guarded data
106/// in an invalid state. (This problem exists with all async Mutex-like data
107/// structures in Rust.) You can still do it, as long as you're very careful not
108/// to `await` while the guarded data is in an unacceptable state.
109///
110/// To do this, you wrap the data in a [`CancelSafe`][lilos::util::CancelSafe]
111/// wrapper, which causes [`RwLock::lock_exclusive_assuming_cancel_safe`] to
112/// become available. This returns an [`ExclusiveGuard`] which acts like the
113/// mutex guards in `std`.
114#[derive(Debug)]
115#[pin_project]
116pub struct RwLock<T> {
117 #[pin]
118 lock: LockImpl,
119 contents: UnsafeCell<T>,
120}
121
122impl<T> RwLock<T> {
123 /// Creates a future that will resolve when it successfully locks `self` for
124 /// shared access. Until then, the future will remain pending (i.e. block).
125 ///
126 /// Shared access can be obtained when no other code currently has exclusive
127 /// access, or is waiting for exclusive access.
128 ///
129 /// # Cancellation
130 ///
131 /// Cancel-safe but affects your position in line, to maintain fairness.
132 ///
133 /// If you drop the returned future before it resolves...
134 /// - If it had not successfully locked, nothing happens.
135 /// - If it had, the lock is released.
136 ///
137 /// Dropping the future and re-calling `lock_shared` bumps the caller to the
138 /// back of the priority list, to maintain fairness. Otherwise, the result
139 /// is indistinguishable.
140 pub async fn lock_shared(self: Pin<&Self>) -> SharedGuard<'_, T> {
141 let lock = self.project_ref().lock.lock_shared().await;
142 SharedGuard {
143 _lock: lock,
144 contents: unsafe { &*self.contents.get() },
145 }
146 }
147
148 /// Attempts to lock `self` for shared access.
149 ///
150 /// This will succeed if there are either no other claims on the lock, or if
151 /// there are any number of shared claims. In this case, it will return
152 /// `Ok(guard)`, where `guard` allows shared access to the guarded data.
153 ///
154 /// If there is an outstanding exclusive claim on the lock, this will fail
155 /// with `Err(InUse)`.
156 pub fn try_lock_shared(
157 self: Pin<&Self>,
158 ) -> Result<SharedGuard<'_, T>, InUse> {
159 let lock = self.project_ref().lock.try_lock_shared()?;
160 Ok(SharedGuard {
161 _lock: lock,
162 contents: unsafe { &*self.contents.get() },
163 })
164 }
165
166 /// Creates a future that will resolve when it successfully locks `self` for
167 /// exclusive access. Until then, the future will remain pending (i.e.
168 /// block).
169 ///
170 /// Exclusive access can be obtained when other code currently has neither
171 /// shared nor exclusive access.
172 ///
173 /// This returns an [`ActionPermit`] that lets you perform non-`async`
174 /// actions on the guarded data. If you need a version that can be held
175 /// across an `await` point, and you've read and understood the caveats
176 /// described on the [`CancelSafe`] type, then have a look at
177 /// [`Self::lock_exclusive_assuming_cancel_safe`].
178 ///
179 /// # Cancellation
180 ///
181 /// Cancel-safe but affects your position in line, to maintain fairness.
182 ///
183 /// If you drop the returned future before it resolves...
184 /// - If it had not successfully locked, nothing happens.
185 /// - If it had, the lock is released.
186 ///
187 /// Dropping the future and re-calling `lock_exclusive` bumps the caller to
188 /// the back of the priority list, to maintain fairness. Otherwise, the
189 /// result is indistinguishable.
190 pub async fn lock_exclusive(self: Pin<&Self>) -> ActionPermit<'_, T> {
191 let lock = self.project_ref().lock.lock_exclusive().await;
192 ActionPermit {
193 _lock: lock,
194 contents: unsafe { &mut *self.contents.get() },
195 }
196 }
197
198 /// Attempts to lock `self` for exclusive access.
199 ///
200 /// This will succeed if there are no other claims on the lock of any kind.
201 /// In this case, it will return `Ok(permit)`, where `permit` is an
202 /// [`ActionPermit`] allowing you to take non-`async` actions on the guarded
203 /// data.
204 ///
205 /// If there is an outstanding exclusive claim on the lock, this will fail
206 /// with `Err(InUse)`.
207 ///
208 /// If you need an exclusive lock that can be held across `await` points,
209 /// and you have read and understand the caveats described on the
210 /// [`CancelSafe`] type, then have a look at
211 /// [`Self::try_lock_exclusive_assuming_cancel_safe`].
212 pub fn try_lock_exclusive(
213 self: Pin<&Self>,
214 ) -> Result<ActionPermit<'_, T>, InUse> {
215 let lock = self.project_ref().lock.try_lock_exclusive()?;
216 Ok(ActionPermit {
217 _lock: lock,
218 contents: unsafe { &mut *self.contents.get() },
219 })
220 }
221
222 /// Returns an `RwLock` containing `contents`.
223 ///
224 /// The result needs to be pinned to be useful, so you'll generally write:
225 ///
226 /// ```
227 /// let my_rwlock = pin!(RwLock::new(contents));
228 /// let my_rwlock = my_rwlock.into_ref();
229 /// ```
230 pub const fn new(contents: T) -> Self {
231 // The Access value chosen here doesn't matter.
232 Self {
233 lock: LockImpl {
234 readers: Cell::new(0),
235 waiters: List::new(),
236 },
237 contents: UnsafeCell::new(contents),
238 }
239 }
240}
241
242#[derive(Debug)]
243#[pin_project]
244struct LockImpl {
245 readers: Cell<isize>,
246 #[pin]
247 waiters: List<Meta<Access>>,
248}
249
250impl LockImpl {
251 async fn lock_shared(self: Pin<&Self>) -> SharedInternal<'_> {
252 // Fast path:
253 if let Ok(guard) = self.try_lock_shared() {
254 return guard;
255 }
256
257 // Add ourselves to the wait list. Unlike many synchronization
258 // primitives, we have no need to register a cleanup action here,
259 // because simply getting evicted from the wait list doesn't grant us
260 // any access we'd need to give up -- that's handled below.
261 self.project_ref()
262 .waiters
263 .join_with_cleanup(Meta(Access::Shared), || {
264 // The release routine advances the reader count _for us_ so
265 // that our access is assured even if we're not promptly polled.
266 // This means we have to reverse that change if we're cancelled.
267 unsafe {
268 self.release_shared();
269 }
270 })
271 .await;
272
273 // Having been detached from the list means that we are among the
274 // observers who have been granted shared access -- otherwise we'd be
275 // left in the queue.
276 SharedInternal { lock: self }
277 }
278
279 fn try_lock_shared(self: Pin<&Self>) -> Result<SharedInternal<'_>, InUse> {
280 // Interestingly, this next check appears to be the entire difference
281 // between a writer-biased and a reader-biased lock. Are reader-biased
282 // locks a thing people need? If so this could be configurable...
283 if !self.waiters.is_empty() {
284 // If there's anything on the list, we can't lock it right now.
285 return Err(InUse);
286 }
287
288 let r = self.readers.get();
289 if (0..isize::MAX).contains(&r) {
290 self.readers.set(r + 1);
291 Ok(SharedInternal { lock: self })
292 } else {
293 Err(InUse)
294 }
295 }
296
297 fn process_exclusive_cancellation(self: Pin<&Self>) {
298 if self.readers.get() >= 0 {
299 // Wake any number of pending _shared_ users and record their
300 // count, to keep them from getting scooped before they're
301 // polled.
302 self.project_ref().waiters.wake_while(|Meta(access)| {
303 if access == &Access::Shared {
304 let r = self.readers.get();
305 // We do not want to overflow the reader count during this
306 // wake-frenzy.
307 if r < isize::MAX {
308 self.readers.set(self.readers.get() + 1);
309 return true;
310 }
311 }
312 false
313 });
314 }
315 }
316
317 async fn lock_exclusive(self: Pin<&Self>) -> ExclusiveInternal<'_> {
318 // Fast path...
319 if let Ok(permit) = self.try_lock_exclusive() {
320 return permit;
321 }
322
323 // Cancellation behavior here is slightly subtle because we can have
324 // effects even _before_ we're detached, by blocking processing of a
325 // sequence of shared waiters. So we set a trap: on drop, we will invoke
326 // the `process_exclusive_cancellation` hook to restore the invariant
327 // that any sequence of shared claims is coalesced.
328 //
329 // We do not need to do this if we get detached. So, we disarm the trap
330 // on detach. Because we can't actually run code on detach, we're
331 // instead exploiting the cleanup hook to cover the two cases:
332 //
333 // 1. Detached, but then dropped before polling.
334 // 2. Detached, and then polled.
335 //
336 //
337 let mut trap = Some(scopeguard::guard((), |_| {
338 self.process_exclusive_cancellation();
339 }));
340
341 self.project_ref()
342 .waiters
343 .join_with_cleanup(Meta(Access::Exclusive), || {
344 // Disarm the trap.
345 ScopeGuard::into_inner(trap.take().unwrap());
346 // The release routine decrements the reader count _for us_ so
347 // that our access is assured even if we're not promptly polled.
348 // This means we have to reverse that change if we're cancelled.
349 unsafe {
350 self.release_exclusive();
351 }
352 })
353 .await;
354 // Disarm the trap.
355 ScopeGuard::into_inner(trap.take().unwrap());
356
357 // The fact that we have been detached means that we have exclusive
358 // control.
359 ExclusiveInternal { lock: self }
360 }
361
362 fn try_lock_exclusive(
363 self: Pin<&Self>,
364 ) -> Result<ExclusiveInternal<'_>, InUse> {
365 let r = self.readers.get();
366 if r == 0 {
367 self.readers.set(-1);
368 Ok(ExclusiveInternal { lock: self })
369 } else {
370 Err(InUse)
371 }
372 }
373
374 // # Safety
375 //
376 // Must only be called in contexts where one previously obtained shared
377 // access permit is being retired. Use in any other context may cause the
378 // lock to grant exclusive access to another observer, resulting in
379 // aliasing.
380 unsafe fn release_exclusive(self: Pin<&Self>) {
381 let prev = self.readers.get();
382 debug_assert!(
383 prev < 0,
384 "release_exclusive used with no exclusive lock outstanding"
385 );
386 self.readers.set(prev + 1);
387
388 if prev == -1 {
389 // We are the last exclusive lock being released. (Yes, there can be
390 // more than one exclusive lock because of map_split.)
391
392 let p = self.project_ref();
393
394 // Wake a _single_ exclusive lock attempt if one exists at the head
395 // of the queue.
396 if p.waiters.wake_head_if(|Meta(access)| access == &Access::Exclusive) {
397 // Record it, to keep it from getting scooped.
398 self.readers.set(-1);
399 } else {
400 // Wake any number of pending _shared_ users and record their
401 // count, to keep them from getting scooped before they're
402 // polled.
403 p.waiters.wake_while(|Meta(access)| {
404 if access == &Access::Shared {
405 let r = self.readers.get();
406 // We do not want to overflow the reader count during this
407 // wake-frenzy.
408 if r < isize::MAX {
409 self.readers.set(self.readers.get() + 1);
410 return true;
411 }
412 }
413 false
414 });
415 }
416 }
417 }
418
419 // # Safety
420 //
421 // Must only be called in contexts where one previously obtained shared
422 // access permit is being retired. Use in any other context may cause the
423 // lock to grant exclusive access to another observer, resulting in
424 // aliasing.
425 unsafe fn release_shared(self: Pin<&Self>) {
426 let prev = self.readers.get();
427 debug_assert!(
428 prev > 0,
429 "release_shared used with no shared lock outstanding"
430 );
431 self.readers.set(prev - 1);
432 match prev {
433 1 => {
434 // It's our job to try and wake a writer, if one exists. (The list
435 // should, at this point, either be empty or contain a single
436 // exclusive access plus an arbitrary list.)
437 if self
438 .project_ref()
439 .waiters
440 .wake_head_if(|Meta(access)| access == &Access::Exclusive)
441 {
442 // Found one. Record its count to ensure that nobody scoops it
443 // before it gets polled.
444 self.readers.set(-1);
445 }
446 }
447 isize::MAX => {
448 // We somehow filled up the reader count, so it's our job to
449 // attempt to wake a _reader,_ weirdly.
450 if self
451 .project_ref()
452 .waiters
453 .wake_head_if(|Meta(access)| access == &Access::Shared)
454 {
455 // Set the count back to saturated.
456 self.readers.set(isize::MAX);
457 }
458 }
459 _ => (),
460 }
461 }
462}
463
464/// Internal type for marking the sort of access a node is after.
465#[derive(Copy, Clone, Debug, Eq, PartialEq)]
466enum Access {
467 /// An observer who wants shared access.
468 Shared,
469 /// An observer who wants exclusive access.
470 Exclusive,
471}
472
473#[derive(Debug)]
474struct SharedInternal<'a> {
475 lock: Pin<&'a LockImpl>,
476}
477
478impl Clone for SharedInternal<'_> {
479 fn clone(&self) -> Self {
480 let prev = self.lock.readers.get();
481 if prev == isize::MAX {
482 panic!();
483 }
484 self.lock.readers.set(prev + 1);
485 Self { lock: self.lock }
486 }
487}
488
489impl Drop for SharedInternal<'_> {
490 fn drop(&mut self) {
491 unsafe {
492 self.lock.release_shared();
493 }
494 }
495}
496
497/// Resource object that grants shared access to guarded data of type `T`.
498#[derive(Debug)]
499#[must_use = "simply dropping SharedGuard unlocks the RwLock immediately"]
500pub struct SharedGuard<'a, T: ?Sized> {
501 _lock: SharedInternal<'a>,
502 contents: &'a T,
503}
504
505impl<'a, T> SharedGuard<'a, T>
506where
507 T: ?Sized,
508{
509 /// Converts a `SharedGuard<T>` into a `SharedGuard<U>` by applying a
510 /// projection function to select a sub-component of the guarded data.
511 ///
512 /// The `SharedGuard<U>` this produces will keep the whole `T` locked for
513 /// shared access, but won't be able to access anything but the chosen
514 /// sub-component `U`.
515 ///
516 /// By transforming an existing reader, this leaves the reader count
517 /// unchanged.
518 pub fn map<U>(guard: Self, f: impl FnOnce(&T) -> &U) -> SharedGuard<'a, U>
519 where
520 U: ?Sized,
521 {
522 SharedGuard {
523 _lock: guard._lock,
524 contents: f(guard.contents),
525 }
526 }
527
528 /// Converts a `SharedGuard<T>` into a pair `SharedGuard<U>` and
529 /// `SharedGuard<V>` by applying a projection function to select two
530 /// sub-components of the guarded data.
531 ///
532 /// Both of the returned guards will keep the whole `T` locked for shared
533 /// access, but won't be able to access access anything but the chosen
534 /// sub-components `U` and `V` (respectively).
535 ///
536 /// This increases the total reader count of the lock by 1.
537 ///
538 /// # Panics
539 ///
540 /// There is a maximum number of supported readers on a lock, which is at
541 /// least `usize::MAX/2`. This number is very difficult to reach in
542 /// non-pathological code. However, if you reach it by splitting a reader,
543 /// this will panic.
544 pub fn map_split<U, V>(
545 guard: Self,
546 f: impl FnOnce(&T) -> (&U, &V),
547 ) -> (SharedGuard<'a, U>, SharedGuard<'a, V>)
548 where
549 U: ?Sized,
550 V: ?Sized,
551 {
552 let (u, v) = f(guard.contents);
553 (
554 SharedGuard {
555 _lock: guard._lock.clone(),
556 contents: u,
557 },
558 SharedGuard {
559 _lock: guard._lock,
560 contents: v,
561 },
562 )
563 }
564}
565
566impl<T> Deref for SharedGuard<'_, T> {
567 type Target = T;
568
569 fn deref(&self) -> &Self::Target {
570 self.contents
571 }
572}
573
574/// Error produced by [`RwLock::try_lock_shared`] and related non-blocking
575/// locking operations.
576#[derive(Copy, Clone, Debug, Eq, PartialEq)]
577pub struct InUse;
578
579/// Internal implementation of both `ActionPermit` and `ExclusiveGuard`,
580/// ensuring that we only need one copy of the `Drop` impl and contents
581/// accessors, and that `ActionPermit` can be conveniently destructured to turn
582/// it into an `ExclusiveGuard`. (Since having a `Drop` impl would otherwise
583/// prevent this.)
584#[derive(Debug)]
585#[must_use = "internal implementation issue"]
586struct ExclusiveInternal<'a> {
587 lock: Pin<&'a LockImpl>,
588}
589
590impl Clone for ExclusiveInternal<'_> {
591 fn clone(&self) -> Self {
592 let prev = self.lock.readers.get();
593 if prev == isize::MIN {
594 panic!();
595 }
596 self.lock.readers.set(prev - 1);
597 Self { lock: self.lock }
598 }
599}
600
601impl Drop for ExclusiveInternal<'_> {
602 fn drop(&mut self) {
603 // Safety: because we exist, we know the lock is locked-exclusive, and
604 // that no other code thinks _they_ have locked it exclusive, so we can
605 // use release_exclusive trivially.
606 unsafe {
607 self.lock.release_exclusive();
608 }
609 }
610}
611
612/// Permit returned by [`RwLock::lock_exclusive`] or
613/// [`RwLock::try_lock_exclusive`] that indicates that the holder has exclusive
614/// access to the lock, and that permits non-`async` alterations to the guarded
615/// data.
616///
617/// See [`ActionPermit::perform`].
618#[derive(Debug)]
619#[must_use = "simply dropping ActionPermit unlocks the RwLock immediately"]
620pub struct ActionPermit<'a, T> {
621 _lock: ExclusiveInternal<'a>,
622 contents: &'a mut T,
623}
624
625impl<'a, T> ActionPermit<'a, T> {
626 /// Run `action` with access to the guarded data.
627 ///
628 /// This function takes a closure to ensure that the code can't `await`.
629 /// This means the code in `action` can break invariants in `T` as long as
630 /// it restores them before returning, without risk of cancellation.
631 pub fn perform<R>(self, action: impl FnOnce(&mut T) -> R) -> R {
632 let Self { _lock, contents } = self;
633 action(contents)
634
635 // Note: we're relying on the Drop impl for _lock to unlock.
636 }
637
638 /// Get a shared reference to the guarded data.
639 ///
640 /// This makes an `ActionPermit` behave like an awkward and expensive
641 /// [`SharedGuard`], but this may be useful for code that wants to check
642 /// properties of the data before committing with [`ActionPermit::perform`].
643 pub fn inspect(&self) -> &T {
644 self.contents
645 }
646
647 /// Converts an `ActionPermit<T>` into an `ActionPermit<U>` using a
648 /// projection function to select a sub-component of `T`.
649 ///
650 /// `projection` selects a part of `T` and returns an exclusive reference to
651 /// it. By applying `map`, you give up the ability to use this
652 /// `ActionPermit` to affect parts of `T` outside the returned sub-component
653 /// `U`.
654 ///
655 /// By transforming an existing writer, this leaves the writer count of the
656 /// lock unchanged.
657 pub fn map<U>(
658 self,
659 projection: impl FnOnce(&mut T) -> &mut U,
660 ) -> ActionPermit<'a, U> {
661 let Self { _lock, contents } = self;
662 ActionPermit {
663 _lock,
664 contents: projection(contents),
665 }
666 }
667
668 /// Converts an `ActionPermit<T>` into a pair `ActionPermit<U>` and
669 /// `ActionPermit<V>` using a projection function to select two
670 /// non-overlapping sub-components of `T`.
671 ///
672 /// This increases the writer count of the lock by 1; writers still have
673 /// exclusive access because they don't overlap.
674 ///
675 /// # Panics
676 ///
677 /// There is a maximum writer count on the lock, which is at least
678 /// `usize::MAX/2`. It's difficult to reach it in non-pathological code.
679 /// However, if you do manage to reach it by splitting, this will panic.
680 pub fn map_split<U, V>(
681 self,
682 split: impl FnOnce(&mut T) -> (&mut U, &mut V),
683 ) -> (ActionPermit<'a, U>, ActionPermit<'a, V>) {
684 let Self { _lock, contents } = self;
685 let (u, v) = split(contents);
686 (
687 ActionPermit {
688 _lock: _lock.clone(),
689 contents: u,
690 },
691 ActionPermit { _lock, contents: v },
692 )
693 }
694}
695
696impl<T> RwLock<CancelSafe<T>> {
697 /// Attempts to lock `self` for exclusive access, succeeding if there are no
698 /// other claims on `self`.
699 ///
700 /// On success, this returns `Ok(guard)`, where `guard` allows direct access
701 /// to the guarded data. The `guard` can be held across `await` points,
702 /// which are also potential cancellation points. This means it's not safe
703 /// to use `guard` to break any invariants in `T` unless you're careful to
704 /// restore them before an `await` --- the compiler will not help you with
705 /// this.
706 ///
707 /// On error, this returns `Err(InUse)`.
708 ///
709 /// See the docs on [`CancelSafe`] for more information.
710 pub fn try_lock_exclusive_assuming_cancel_safe(
711 self: Pin<&Self>,
712 ) -> Result<ExclusiveGuard<'_, T>, InUse> {
713 let ActionPermit { _lock, contents } = self.try_lock_exclusive()?;
714 Ok(ExclusiveGuard {
715 _lock,
716 contents: &mut contents.0,
717 })
718 }
719
720 /// Returns a future that resolves once it is able to lock `self` for
721 /// exclusive access.
722 ///
723 /// On success, this returns a `guard` which allows direct access to the
724 /// guarded data. The `guard` can be held across `await` points, which are
725 /// also potential cancellation points. This means it's not safe to use
726 /// `guard` to break any invariants in `T` unless you're careful to restore
727 /// them before an `await` --- the compiler will not help you with this.
728 ///
729 /// See the docs on [`CancelSafe`] for more information.
730 pub async fn lock_exclusive_assuming_cancel_safe(
731 self: Pin<&Self>,
732 ) -> ExclusiveGuard<'_, T> {
733 let ActionPermit {
734 _lock: lock,
735 contents,
736 } = self.lock_exclusive().await;
737 ExclusiveGuard {
738 _lock: lock,
739 contents: &mut contents.0,
740 }
741 }
742}
743
744/// A resource object that grants read/write access to the data guarded by an
745/// [`RwLock`].
746///
747/// Read/write access means you can arbitrarily break invariants in the guarded
748/// data, even in safe Rust. As a result, this is only available for locks that
749/// explicitly use the wrapper type [`CancelSafe`], e.g.
750/// `RwLock<CancelSafe<MyStruct>>`. See the docs on `CancelSafe` for more
751/// details.
752#[derive(Debug)]
753pub struct ExclusiveGuard<'a, T> {
754 _lock: ExclusiveInternal<'a>,
755 contents: &'a mut T,
756}
757
758impl<'a, T> ExclusiveGuard<'a, T> {
759 /// Converts an `ExclusiveGuard<T>` into an `ExclusiveGuard<U>` using a
760 /// projection function to select a sub-component of `T`.
761 ///
762 /// `projection` selects a part of `T` and returns an exclusive reference to
763 /// it. By applying `map`, you give up the ability to use this
764 /// `ExclusiveGuard` to affect parts of `T` outside the returned
765 /// sub-component `U`.
766 ///
767 /// The returned guard keeps the entire `T` locked for exclusive access.
768 pub fn map<U>(
769 self,
770 projection: impl FnOnce(&mut T) -> &mut U,
771 ) -> ExclusiveGuard<'a, U> {
772 let Self { _lock, contents } = self;
773 ExclusiveGuard {
774 _lock,
775 contents: projection(contents),
776 }
777 }
778
779 /// Converts an `ExclusiveGuard<T>` into a pair `ExclusiveGuard<U>` and
780 /// `ExclusiveGuard<V>` using a projection function to select two
781 /// non-overlapping sub-components of `T`.
782 ///
783 /// # Panics
784 ///
785 /// There is a maximum writer count on the lock, which is at least
786 /// `usize::MAX/2`. It's difficult to reach it in non-pathological code.
787 /// However, if you do manage to reach it by splitting, this will panic.
788 pub fn map_split<U, V>(
789 self,
790 split: impl FnOnce(&mut T) -> (&mut U, &mut V),
791 ) -> (ExclusiveGuard<'a, U>, ExclusiveGuard<'a, V>) {
792 let Self { _lock, contents } = self;
793 let (u, v) = split(contents);
794 (
795 ExclusiveGuard {
796 _lock: _lock.clone(),
797 contents: u,
798 },
799 ExclusiveGuard { _lock, contents: v },
800 )
801 }
802}
803
804impl<T> Deref for ExclusiveGuard<'_, T> {
805 type Target = T;
806
807 fn deref(&self) -> &Self::Target {
808 self.contents
809 }
810}
811
812impl<T> DerefMut for ExclusiveGuard<'_, T> {
813 fn deref_mut(&mut self) -> &mut Self::Target {
814 self.contents
815 }
816}
817
818/// Convenience macro for creating an [`RwLock`].
819#[macro_export]
820macro_rules! create_rwlock {
821 ($var:ident, $contents:expr) => {
822 let $var = core::pin::pin!($crate::RwLock::new($contents));
823 let $var = $var.into_ref();
824 };
825}