range_mutex/lib.rs
1#![deny(unsafe_op_in_unsafe_fn)]
2use parking_lot::Mutex;
3#[cfg(feature = "async")]
4use std::{
5 future::Future,
6 task::{Poll, Waker},
7};
8use std::{
9 cell::UnsafeCell,
10 cmp::Ordering,
11 marker::PhantomData,
12 mem::ManuallyDrop,
13 ops::{Deref, DerefMut, Range, RangeBounds},
14 ptr::NonNull,
15 thread::Thread,
16};
17
18#[cfg(any(test, doctest))]
19mod tests;
20mod util;
21
22enum Waiter {
23 Thread(Thread),
24 #[cfg(feature = "async")]
25 Task(Waker),
26}
27
28impl Waiter {
29 fn unpark(&self) {
30 match self {
31 Waiter::Thread(thread) => thread.unpark(),
32 #[cfg(feature = "async")]
33 Waiter::Task(waker) => waker.wake_by_ref(),
34 }
35 }
36}
37
38#[derive(Default)]
39struct RangesUsed {
40 /// Invariants:
41 /// * For each range: `range.len() > 0`
42 /// * For each adjacent pair of ranges: `r0.end <= r1.start` (i.e. ranges
43 /// are sorted and do not overlap)
44 ///
45 /// This allows performing a binary-search on this vec for overlapping
46 /// ranges.
47 ranges: Vec<Range<usize>>,
48 waiting: Vec<(Waiter, Range<usize>)>,
49}
50
51struct Locked;
52struct NotLocked;
53
54impl RangesUsed {
55 const fn new() -> Self {
56 Self { ranges: Vec::new(), waiting: Vec::new() }
57 }
58
59 fn overlapping_range_idx(
60 &self,
61 range: &Range<usize>,
62 ) -> Result<usize, usize> {
63 debug_assert!(
64 !range.is_empty(),
65 "empty ranges should be handled already"
66 );
67 self.ranges.binary_search_by(|locked_range| {
68 if locked_range.end <= range.start {
69 Ordering::Less
70 } else if locked_range.start >= range.end {
71 Ordering::Greater
72 } else {
73 // This means the range overlaps
74 Ordering::Equal
75 }
76 })
77 }
78
79 /// If `make_waiter` is `None`, no waiter will be inserted.
80 fn lock_range(
81 &mut self,
82 range: &Range<usize>,
83 make_waiter: Option<impl FnOnce() -> Waiter>,
84 ) -> Result<Locked, NotLocked> {
85 let idx = self.overlapping_range_idx(range);
86 match idx {
87 Ok(_overlapping_range_idx) => {
88 if let Some(waiter) = make_waiter {
89 self.waiting.push((waiter(), range.clone()));
90 }
91 Err(NotLocked)
92 }
93 Err(insert_idx) => {
94 self.ranges.insert(insert_idx, range.clone());
95 Ok(Locked)
96 }
97 }
98 }
99
100 fn unlock_range(&mut self, range: &Range<usize>) {
101 let (Ok(idx) | Err(idx)) = self.overlapping_range_idx(range);
102 debug_assert_eq!(&self.ranges[idx], range, "range is locked");
103 self.ranges.remove(idx);
104 self.waiting.retain(|(unparker, waiting_range)| {
105 // TODO: more precise unpark selection
106 // e.g. don't unpark two overlapping waiters,
107 // don't unpark a waiter that overlaps with another existing lock.
108 let should_unpark_and_remove = util::overlaps(range, waiting_range);
109 if should_unpark_and_remove {
110 unparker.unpark();
111 }
112 // return value is should *not* remove
113 !should_unpark_and_remove
114 })
115 }
116
117 fn split_locked_range(
118 &mut self,
119 range: &Range<usize>,
120 mid: usize,
121 ) -> (Range<usize>, Range<usize>) {
122 debug_assert!(mid <= range.len());
123 let (head, tail) =
124 (range.start..range.start + mid, range.start + mid..range.end);
125 let (Ok(idx) | Err(idx)) = self.overlapping_range_idx(range);
126 debug_assert_eq!(&self.ranges[idx], range, "range is locked");
127 self.ranges.splice(idx..=idx, [head.clone(), tail.clone()]);
128 (head, tail)
129 }
130}
131
132/// The trait for types which can be used as the backing store for a
133/// [`RangeMutex`].
134///
135/// # Safety
136///
137/// * `AsUnsafeCell` must be safe to send and share between threads if `T` is `Send`
138/// (it does not need to implement `Send` or `Sync`)
139/// * `Self` and `AsUnsafeCell`'s `AsRef::as_ref` and `AsMut::as_mut` implementations must be
140/// "well-behaved", similar to [`std::ops::DerefPure`]
141pub unsafe trait RangeMutexBackingStorage<T>:
142 AsMut<[T]> + AsRef<[T]>
143{
144 type AsUnsafeCell: AsMut<[UnsafeCell<T>]> + AsRef<[UnsafeCell<T>]>;
145 fn into_unsafecell(self) -> Self::AsUnsafeCell;
146 fn from_unsafecell(value: Self::AsUnsafeCell) -> Self;
147}
148
149unsafe impl<'a, T, const N: usize> RangeMutexBackingStorage<T>
150 for &'a mut [T; N]
151{
152 type AsUnsafeCell = &'a mut [UnsafeCell<T>; N];
153
154 fn into_unsafecell(self) -> Self::AsUnsafeCell {
155 util::wrap_unsafecell_slice(self).try_into().unwrap()
156 }
157
158 fn from_unsafecell(value: Self::AsUnsafeCell) -> Self {
159 util::unwrap_unsafecell_slice(value).try_into().unwrap()
160 }
161}
162
163unsafe impl<T, const N: usize> RangeMutexBackingStorage<T> for [T; N] {
164 type AsUnsafeCell = [UnsafeCell<T>; N];
165
166 fn into_unsafecell(self) -> Self::AsUnsafeCell {
167 util::wrap_unsafecell_array(self)
168 }
169
170 fn from_unsafecell(value: Self::AsUnsafeCell) -> Self {
171 util::unwrap_unsafecell_array(value)
172 }
173}
174
175unsafe impl<'a, T> RangeMutexBackingStorage<T> for &'a mut [T] {
176 type AsUnsafeCell = &'a mut [UnsafeCell<T>];
177
178 fn into_unsafecell(self) -> Self::AsUnsafeCell {
179 util::wrap_unsafecell_slice(self)
180 }
181
182 fn from_unsafecell(value: Self::AsUnsafeCell) -> Self {
183 util::unwrap_unsafecell_slice(value)
184 }
185}
186
187unsafe impl<T> RangeMutexBackingStorage<T> for Box<[T]> {
188 type AsUnsafeCell = Box<[UnsafeCell<T>]>;
189
190 fn into_unsafecell(self) -> Self::AsUnsafeCell {
191 util::wrap_unsafecell_vec(self.into_vec()).into_boxed_slice()
192 }
193
194 fn from_unsafecell(value: Self::AsUnsafeCell) -> Self {
195 util::unwrap_unsafecell_vec(value.into_vec()).into_boxed_slice()
196 }
197}
198
199unsafe impl<T> RangeMutexBackingStorage<T> for Vec<T> {
200 type AsUnsafeCell = Vec<UnsafeCell<T>>;
201
202 fn into_unsafecell(self) -> Self::AsUnsafeCell {
203 util::wrap_unsafecell_vec(self)
204 }
205
206 fn from_unsafecell(value: Self::AsUnsafeCell) -> Self {
207 util::unwrap_unsafecell_vec(value)
208 }
209}
210
211unsafe impl<'a, T> RangeMutexBackingStorage<T> for RangeMutexGuard<'a, T> {
212 type AsUnsafeCell = RangeMutexGuard<'a, UnsafeCell<T>>;
213
214 fn into_unsafecell(self) -> Self::AsUnsafeCell {
215 let this = ManuallyDrop::new(self);
216 RangeMutexGuard {
217 data: NonNull::new(this.data.as_ptr() as _).unwrap(),
218 range: this.range.clone(),
219 used: this.used,
220 _variance: PhantomData,
221 }
222 }
223
224 fn from_unsafecell(value: Self::AsUnsafeCell) -> Self {
225 let this = ManuallyDrop::new(value);
226 RangeMutexGuard {
227 data: NonNull::new(this.data.as_ptr() as _).unwrap(),
228 range: this.range.clone(),
229 used: this.used,
230 _variance: PhantomData,
231 }
232 }
233}
234
235/// A `Mutex`-like type for slices and slice-like containers.
236///
237/// This type acts similarly to [`std::sync::Mutex<[T]>`][std::sync::Mutex],
238/// except that nonoverlapping ranges of the slice can be locked separately.
239///
240/// # Example
241///
242/// ```
243/// use std::sync::Arc;
244/// use std::thread;
245/// use range_mutex::RangeMutex;
246///
247/// const N: usize = 10;
248///
249/// // Spawn a few threads to increment ranges of a shared vector (non-atomically).
250/// let mut data = RangeMutex::new(vec![0; N + 1]);
251///
252/// thread::scope(|scope| {
253/// let data = &data;
254/// for i in 0..N {
255/// scope.spawn(move || {
256/// // The shared state can only be accessed once the lock is held.
257/// // Our non-atomic increment is safe because we're the only thread
258/// // which can access our range of the shared state when the lock is held.
259/// let mut data = data.lock(i..=i+1);
260/// data[0] += 1;
261/// data[1] += 1;
262/// // the lock is unlocked here when `data` goes out of scope.
263/// });
264/// }
265/// });
266///
267/// assert_eq!(data.get_mut(), [1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1]);
268/// ```
269///
270/// ## Zero-Length Ranges
271///
272/// Attempts to lock zero-length ranges of a [`RangeMutex`] will always succeed
273/// (assuming they are not out-of-bounds). Zero-length ranges are not considered
274/// to overlap with any other ranges, including themselves. For example, having
275/// a lock on the (half-open) range `2..6` will not block an attempt to lock
276/// the (half-open) range `4..4`, and vice versa, since the range `4..4` is
277/// zero-length, and thus empty.
278pub struct RangeMutex<T, B: RangeMutexBackingStorage<T>> {
279 used: Mutex<RangesUsed>,
280 data: B::AsUnsafeCell,
281}
282
283unsafe impl<T: Send, B: RangeMutexBackingStorage<T> + Sync> Sync
284 for RangeMutex<T, B>
285{
286}
287unsafe impl<T: Send, B: RangeMutexBackingStorage<T> + Send> Send
288 for RangeMutex<T, B>
289{
290}
291
292impl<T, B: RangeMutexBackingStorage<T>> RangeMutex<T, B> {
293 /// Creates a new `RamgeMutex` in an unlocked state ready for use.
294 pub fn new(values: B) -> Self {
295 let data = B::into_unsafecell(values);
296
297 Self { data, used: Mutex::new(RangesUsed::new()) }
298 }
299
300 /// Consumes this `RangeMutex`, returning the underlying data
301 pub fn into_inner(self) -> B {
302 B::from_unsafecell(self.data)
303 }
304
305 /// Returns a mutable reference to the underlying data.
306 ///
307 /// Since this call borrows the Mutex mutably, no actual locking needs to
308 /// take place – the mutable borrow statically guarantees no locks exist.
309 pub fn get_mut(&mut self) -> &mut [T] {
310 util::unwrap_unsafecell_slice(self.data.as_mut())
311 }
312
313 /// Undo the effect of leaked guards on the borrow state of the
314 /// `RangeMutex`.
315 ///
316 /// This call is similar to [`get_mut`](Self::get_mut) but more specialized. It borrows
317 /// `RangeMutex` mutably to ensure no locks exist and then resets the
318 /// state tracking locks. This is relevant if some `RangeMutexGuard`s have
319 /// been leaked.
320 pub fn undo_leak(&mut self) -> &mut [T] {
321 let used = self.used.get_mut();
322 used.ranges.clear();
323 used.waiting.clear();
324 self.get_mut()
325 }
326
327 /// Attempts to acquire a lock for a range of this slice.
328 ///
329 /// If the lock could not be acquired at this time, then `None` is returned.
330 /// Otherwise, an RAII guard is returned. The lock will be unlocked when the
331 /// guard is dropped.
332 ///
333 /// This function does not block.
334 ///
335 /// # Panics
336 ///
337 /// Panics if the starting point is greater than the end point or if the end
338 /// point is greater than the length of the slice.
339 #[inline]
340 pub fn try_lock(
341 &self,
342 range: impl RangeBounds<usize>,
343 ) -> Option<RangeMutexGuard<'_, T>> {
344 // panics if out of range
345 let range = util::range(self.len(), range);
346 if range.is_empty() {
347 return Some(RangeMutexGuard::empty());
348 }
349 // SAFETY: util::range panics on invalid ranges, and range is not empty
350 unsafe { self.outlined_try_lock(range) }
351 }
352
353 /// Safety:
354 /// * `range` is non-empty
355 /// * `range.start <= range.end`
356 /// * `range.end <= self.len()`
357 unsafe fn outlined_try_lock(&self, range: Range<usize>) -> Option<RangeMutexGuard<'_, T>> {
358 debug_assert!(!range.is_empty() && range.start <= range.end && range.end <= self.len());
359
360 let mut used = self.used.lock();
361
362 match used.lock_range(&range, None::<fn() -> Waiter>) {
363 Err(NotLocked) => None,
364 Ok(Locked) => {
365 let data = &self.data.as_ref()[range.clone()];
366 let data = util::transpose_unsafecell_slice(data);
367 Some(RangeMutexGuard {
368 data: NonNull::new(data.get()).unwrap(),
369 range,
370 used: Some(&self.used),
371 _variance: PhantomData,
372 })
373 }
374 }
375 }
376
377 /// Acquires a lock for a range of this slice, blocking the current thread
378 /// until it is able to do so.
379 ///
380 /// This function will block the local thread until it is available to
381 /// acquire the lock. Upon returning, the thread is the only thread with
382 /// the lock held for the given range. An RAII guard is returned to allow
383 /// scoped unlock of the lock. When the guard goes out of scope, the
384 /// lock will be unlocked.
385 ///
386 /// The exact behavior on locking a range in a thread which already holds
387 /// a lock on an overlapping range is left unspecified. However, this
388 /// function will not return on the second call (it might panic or
389 /// deadlock, for example).
390 ///
391 /// Mutual attempts between mutiple threads to lock overlapping ranges may
392 /// result in a deadlock. To avoid this, have all threads lock ranges in
393 /// ascending or descending order consistently.
394 ///
395 /// ```rust,ignore
396 /// # use range_mutex::RangeMutex;
397 /// # let mutex = RangeMutex::new(vec![0; 8]);
398 /// # std::thread::scope(|scope| {
399 /// # scope.spawn(|| {
400 /// // Thread 1:
401 /// let _g1 = mutex.lock(0..=2);
402 /// let _g2 = mutex.lock(3..=5); // Thread 1 may deadlock here if thread 1 holds 0..=2 and thread 2 holds 4..=7.
403 /// # });
404 ///
405 /// # scope.spawn(|| {
406 /// // Thread 2:
407 /// let _g1 = mutex.lock(4..=7);
408 /// let _g2 = mutex.lock(0..=3); // Thread 2 may deadlock here if thread 1 holds 0..=2 and thread 2 holds 4..=7.
409 /// # });
410 /// # });
411 /// ```
412 ///
413 /// ```rust
414 /// # use range_mutex::RangeMutex;
415 /// # let mutex = RangeMutex::new(vec![0; 8]);
416 /// # std::thread::scope(|scope| {
417 /// # scope.spawn(|| {
418 /// // Thread 1:
419 /// let _g1 = mutex.lock(0..=2); // Either thread 1 will get 0..=2 first, or thread 2 will get 0..=3 first, and then that thread will continue.
420 /// let _g2 = mutex.lock(3..=5);
421 /// # });
422 ///
423 /// # scope.spawn(|| {
424 /// // Thread 2:
425 /// let _g1 = mutex.lock(0..=3); // Either thread 1 will get 0..=2 first, or thread 2 will get 0..=3 first, and then that thread will continue.
426 /// let _g2 = mutex.lock(4..=7);
427 /// # });
428 /// # });
429 /// ```
430 ///
431 /// # Panics
432 ///
433 /// Panics if the starting point is greater than the end point or if the end
434 /// point is greater than the length of the slice.
435 #[inline]
436 pub fn lock(
437 &self,
438 range: impl RangeBounds<usize>,
439 ) -> RangeMutexGuard<'_, T> {
440 // panics if out of range
441 let range = util::range(self.len(), range);
442 if range.is_empty() {
443 return RangeMutexGuard::empty();
444 }
445 // SAFETY: util::range panics on invalid ranges, and range is not empty
446 unsafe { self.outlined_lock(range) }
447 }
448
449 /// Safety:
450 /// * `range` is non-empty
451 /// * `range.start <= range.end`
452 /// * `range.end <= self.len()`
453 unsafe fn outlined_lock(&self, range: Range<usize>) -> RangeMutexGuard<'_, T> {
454 debug_assert!(!range.is_empty() && range.start <= range.end && range.end <= self.len());
455
456 let mut used = self.used.lock();
457 loop {
458 match used.lock_range(
459 &range,
460 Some(|| Waiter::Thread(std::thread::current())),
461 ) {
462 Err(NotLocked) => {
463 // Don't hold the mutex while parked
464 drop(used);
465 std::thread::park();
466 used = self.used.lock();
467 }
468 Ok(Locked) => {
469 let data = &self.data.as_ref()[range.clone()];
470 let data = util::transpose_unsafecell_slice(data);
471 return RangeMutexGuard {
472 data: NonNull::new(data.get()).unwrap(),
473 range,
474 used: Some(&self.used),
475 _variance: PhantomData,
476 };
477 }
478 }
479 }
480 }
481
482 /// Asynchronously acquires a lock for a range of this slice, blocking the
483 /// current task until it is able to do so.
484 ///
485 /// This function will block the local task until it is available to
486 /// acquire the lock. Upon returning, the task is the only task with
487 /// the lock held for the given range. An RAII guard is returned to allow
488 /// scoped unlock of the lock. When the guard goes out of scope, the
489 /// lock will be unlocked.
490 ///
491 /// The exact behavior on locking a range in a task which already holds
492 /// a lock on an overlapping range is left unspecified. However, this
493 /// function will not return on the second call (it might panic or
494 /// deadlock, for example).
495 ///
496 /// Mutual attempts between mutiple tasks to lock overlapping ranges may
497 /// result in a deadlock. To avoid this, have all tasks lock ranges in
498 /// ascending or descending order consistently. See [`lock`](Self::lock)
499 /// for examples of this.
500 ///
501 /// # Panics
502 ///
503 /// Panics if the starting point is greater than the end point or if the end
504 /// point is greater than the length of the slice.
505 #[cfg(feature = "async")]
506 #[inline]
507 pub fn lock_async(
508 &self,
509 range: impl RangeBounds<usize>,
510 ) -> impl Future<Output = RangeMutexGuard<'_, T>> {
511 let range = util::range(self.len(), range);
512 // SAFETY: util::range panics on invalid ranges
513 unsafe { self.outlined_lock_async(range) }
514 }
515
516 /// Safety:
517 /// * `range.start <= range.end`
518 /// * `range.end <= self.len()`
519 ///
520 /// `range` *can* be empty
521 #[cfg(feature = "async")]
522 async unsafe fn outlined_lock_async(
523 &self,
524 range: Range<usize>,
525 ) -> RangeMutexGuard<'_, T> {
526 debug_assert!(range.start <= range.end && range.end <= self.len());
527 std::future::poll_fn(move |ctx| {
528 if range.is_empty() {
529 return Poll::Ready(RangeMutexGuard::empty());
530 }
531 // Don't hold the mutex while waiting, only hold during the poll
532 // call.
533 let mut used = self.used.lock();
534 match used
535 .lock_range(&range, Some(|| Waiter::Task(ctx.waker().clone())))
536 {
537 Err(NotLocked) => Poll::Pending,
538 Ok(Locked) => {
539 let data = &self.data.as_ref()[range.clone()];
540 let data = util::transpose_unsafecell_slice(data);
541 Poll::Ready(RangeMutexGuard {
542 data: NonNull::new(data.get()).unwrap(),
543 range: range.clone(),
544 used: Some(&self.used),
545 _variance: PhantomData,
546 })
547 }
548 }
549 })
550 .await
551 }
552
553 /// Returns the number of elements in the slice.
554 pub fn len(&self) -> usize {
555 self.data.as_ref().len()
556 }
557
558 /// Returns `true` if the slice has a length of 0.
559 pub fn is_empty(&self) -> bool {
560 self.data.as_ref().len() == 0
561 }
562}
563
564/// An RAII implementation of a “scoped lock” of a slice of a [`RangeMutex`].
565/// When this structure is dropped (falls out of scope), the lock will be
566/// unlocked.
567///
568/// The slice protected by the mutex can be accessed through this guard via its
569/// [`Deref`] and [`DerefMut`] implementations.
570///
571/// This structure is created by the [`lock`][RangeMutex::lock] and
572/// [`try_lock`][RangeMutex::try_lock] methods on [`RangeMutex`].
573pub struct RangeMutexGuard<'l, T> {
574 data: NonNull<[T]>,
575 /// RangeMutexGuard<'l, T> should be covariant in 'l, but invariant in T.
576 _variance: PhantomData<&'l mut [T]>,
577 /// `range..is_empty()` if and only if `used.is_none()`
578 range: Range<usize>,
579 used: Option<&'l Mutex<RangesUsed>>,
580}
581
582unsafe impl<T: Send> Send for RangeMutexGuard<'_, T> {}
583unsafe impl<T: Sync> Sync for RangeMutexGuard<'_, T> {}
584
585impl<T> Default for RangeMutexGuard<'_, T> {
586 fn default() -> Self {
587 Self::empty()
588 }
589}
590
591impl<'l, T> RangeMutexGuard<'l, T> {
592 /// A `RangeMutexGuard` pointing to an empty slice.
593 pub fn empty() -> Self {
594 Self {
595 data: NonNull::<[T; 0]>::dangling(),
596 range: 0..0,
597 used: None,
598 _variance: PhantomData,
599 }
600 }
601
602 /// Divide this `RangeMutexGuard` into two at an index.
603 ///
604 /// The first will contain all indices from `[0, mid)`, (excluding the index
605 /// `mid` itself) and the second will contain all indices from `[mid, len)`
606 /// (excluding the index `len` itself).
607 ///
608 /// # Panics
609 ///
610 /// Panics if `mid > len`.
611 pub fn split_at(this: Self, mid: usize) -> (Self, Self) {
612 assert!(mid <= this.len());
613 if mid == 0 {
614 return (Self::empty(), this);
615 } else if mid == this.len() {
616 return (this, Self::empty());
617 }
618 let this = ManuallyDrop::new(this);
619 let mut used = this.used.as_ref().expect("this.len() > 0").lock();
620 // SAFETY: `mid <= this.len()`, so `this.data.add(mid)` is defined.
621 let (head_data, tail_data) =
622 unsafe { util::split_slice_at(this.data, mid) };
623 let (head, tail) = used.split_locked_range(&this.range, mid);
624 (
625 Self {
626 data: head_data,
627 range: head,
628 used: this.used,
629 _variance: PhantomData,
630 },
631 Self {
632 data: tail_data,
633 range: tail,
634 used: this.used,
635 _variance: PhantomData,
636 },
637 )
638 }
639
640 /// Reduce the extent of this `RangeMutexGuard` to a subrange. Elements
641 /// outside of `self[range]` are unlocked, and a guard for `self[range]` is
642 /// returned.
643 ///
644 /// # Panics
645 ///
646 /// Panics if the starting point is greater than the end point or if the end
647 /// point is greater than the length of the slice.
648 pub fn slice(this: Self, range: impl RangeBounds<usize>) -> Self {
649 // TODO: make more efficient
650 let range = util::range(this.len(), range);
651 let (this, _tail) = Self::split_at(this, range.end);
652 let (_head, this) = Self::split_at(this, range.start);
653 this
654 }
655}
656
657impl<T> Deref for RangeMutexGuard<'_, T> {
658 type Target = [T];
659
660 fn deref(&self) -> &Self::Target {
661 // SAFETY: self.data is uniquely accessible to this RangeMutexGuard, so
662 // it is sound to dereference it shared-ly from &self. (Or it is empty
663 // and dangling and thus sound to dereference.)
664 unsafe { self.data.as_ref() }
665 }
666}
667
668impl<T> DerefMut for RangeMutexGuard<'_, T> {
669 fn deref_mut(&mut self) -> &mut Self::Target {
670 // SAFETY: self.data is uniquely accessible to this RangeMutexGuard, so
671 // it is sound to dereference it mutably from &mut self. (Or it is empty
672 // and dangling and thus sound to dereference.)
673 unsafe { self.data.as_mut() }
674 }
675}
676
677impl<'l, T> Drop for RangeMutexGuard<'l, T> {
678 fn drop(&mut self) {
679 if let Some(used) = self.used {
680 let mut used = used.lock();
681 used.unlock_range(&self.range);
682 } else {
683 // `range.is_empty()` if and only if `used.is_none()`
684 debug_assert_eq!(self.range.len(), 0)
685 }
686 }
687}
688
689impl<T> AsRef<[T]> for RangeMutexGuard<'_, T> {
690 fn as_ref(&self) -> &[T] {
691 self
692 }
693}
694
695impl<T> AsMut<[T]> for RangeMutexGuard<'_, T> {
696 fn as_mut(&mut self) -> &mut [T] {
697 self
698 }
699}