rav1d_disjoint_mut/lib.rs
1//! Provably safe abstraction for concurrent, disjoint mutation of contiguous storage.
2//!
3//! [`DisjointMut`] wraps a collection and allows non-overlapping mutable borrows
4//! through a shared `&` reference. Like [`RefCell`](std::cell::RefCell), it enforces
5//! borrowing rules at runtime — but instead of whole-container borrows, it tracks
6//! *ranges* and panics only on truly overlapping access.
7//!
8//! # Safety Model
9//!
10//! By default, every `.index()` and `.index_mut()` call validates that the requested
11//! range doesn't overlap with any outstanding borrow. This makes `DisjointMut` a
12//! **sound safe abstraction**: safe code cannot cause undefined behavior.
13//!
14//! For performance-critical code that has been audited for correctness, the
15//! [`DisjointMut::dangerously_unchecked()`] `unsafe` constructor skips runtime
16//! tracking. The `unsafe` boundary ensures that opting out of tracking is an
17//! explicit, auditable decision — not something that can happen via feature
18//! unification or accident.
19//!
20//! # Example
21//!
22//! ```
23//! use rav1d_disjoint_mut::DisjointMut;
24//!
25//! let mut buf = DisjointMut::new(vec![0u8; 100]);
26//! // Borrow two non-overlapping regions simultaneously through &buf:
27//! let a = buf.index(0..50);
28//! let b = buf.index(50..100);
29//! assert_eq!(a.len() + b.len(), 100);
30//! ```
31
32#![no_std]
33#![deny(unsafe_op_in_unsafe_fn)]
34
35extern crate alloc;
36#[cfg(feature = "std")]
37extern crate std;
38
39#[cfg(feature = "aligned")]
40pub mod align;
41
42use alloc::boxed::Box;
43use alloc::sync::Arc;
44use alloc::vec::Vec;
45use core::cell::UnsafeCell;
46use core::fmt;
47use core::fmt::Debug;
48use core::fmt::Display;
49use core::fmt::Formatter;
50use core::marker::PhantomData;
51use core::mem;
52#[cfg(feature = "zerocopy")]
53use core::mem::ManuallyDrop;
54use core::ops::Deref;
55use core::ops::DerefMut;
56use core::ops::Index;
57use core::ops::Range;
58use core::ops::RangeFrom;
59use core::ops::RangeFull;
60use core::ops::RangeInclusive;
61use core::ops::RangeTo;
62use core::ops::RangeToInclusive;
63use core::ptr;
64use core::ptr::addr_of_mut;
65#[cfg(feature = "zerocopy")]
66use zerocopy::FromBytes;
67#[cfg(feature = "zerocopy")]
68use zerocopy::Immutable;
69#[cfg(feature = "zerocopy")]
70use zerocopy::IntoBytes;
71#[cfg(feature = "zerocopy")]
72use zerocopy::KnownLayout;
73
74// =============================================================================
75// Core types
76// =============================================================================
77
78/// Wraps an indexable collection to allow unchecked concurrent mutable borrows.
79///
80/// This wrapper allows users to concurrently mutably borrow disjoint regions or
81/// elements from a collection. This is necessary to allow multiple threads to
82/// concurrently read and write to disjoint pixel data from the same arrays and
83/// vectors.
84///
85/// Indexing returns a guard which acts as a lock for the borrowed region.
86/// By default, borrows are validated at runtime to ensure that mutably borrowed
87/// regions are actually disjoint with all other borrows for the lifetime of the
88/// returned guard. This makes `DisjointMut` a provably safe abstraction (like `RefCell`).
89///
90/// For audited hot paths, use
91/// [`DisjointMut::dangerously_unchecked`] to skip tracking.
92pub struct DisjointMut<T: ?Sized + AsMutPtr> {
93 tracker: Option<checked::BorrowTracker>,
94
95 inner: UnsafeCell<T>,
96}
97
98/// SAFETY: If `T: Send`, then sending `DisjointMut<T>` across threads is safe.
99/// There is no non-`Sync` state that is left on another thread
100/// when `DisjointMut` gets sent to another thread.
101unsafe impl<T: ?Sized + AsMutPtr + Send> Send for DisjointMut<T> {}
102
103/// SAFETY: `DisjointMut` only provides disjoint mutable access
104/// to `T`'s elements through a shared `&DisjointMut<T>` reference.
105/// Thus, sharing/`Send`ing a `&DisjointMut<T>` across threads is safe.
106///
107/// In checked mode (default), the borrow tracker prevents overlapping borrows,
108/// so no data races are possible. In unchecked mode (`dangerously_unchecked`),
109/// the caller guarantees disjointness via the `unsafe` constructor contract.
110unsafe impl<T: ?Sized + AsMutPtr + Sync> Sync for DisjointMut<T> {}
111
112impl<T: AsMutPtr + Default> Default for DisjointMut<T> {
113 fn default() -> Self {
114 Self::new(T::default())
115 }
116}
117
118impl<T: ?Sized + AsMutPtr> Debug for DisjointMut<T> {
119 fn fmt(&self, f: &mut Formatter) -> fmt::Result {
120 f.debug_struct("DisjointMut")
121 .field("len", &self.len())
122 .field("checked", &self.is_checked())
123 .finish_non_exhaustive()
124 }
125}
126
127impl<T: ?Sized + AsMutPtr> DisjointMut<T> {
128 /// Returns `true` if this instance performs runtime overlap checking.
129 pub const fn is_checked(&self) -> bool {
130 self.tracker.is_some()
131 }
132
133 /// Returns a raw pointer to the inner container, bypassing the borrow tracker.
134 ///
135 /// **Prefer `as_mut_ptr()` or `as_mut_slice()` for element access.** This
136 /// method exists only for reading container metadata (e.g. stride) that
137 /// lives outside the element data. Writing through this pointer or creating
138 /// `&mut T` from it can violate the tracker's guarantees.
139 ///
140 /// # Safety
141 ///
142 /// The returned ptr has the safety requirements of [`UnsafeCell::get`].
143 /// In particular, the ptr returned by [`AsMutPtr::as_mut_ptr`] may be in use.
144 #[doc(hidden)]
145 pub const fn inner(&self) -> *mut T {
146 self.inner.get()
147 }
148}
149
150impl<T: AsMutPtr> DisjointMut<T> {
151 /// Creates a new `DisjointMut` with runtime borrow tracking enabled.
152 ///
153 /// Every `.index()` and `.index_mut()` call will validate that the
154 /// requested range doesn't overlap with any outstanding borrow.
155 pub const fn new(value: T) -> Self {
156 Self {
157 inner: UnsafeCell::new(value),
158 tracker: Some(checked::BorrowTracker::new()),
159 }
160 }
161
162 /// Creates a new `DisjointMut` **without** runtime borrow tracking.
163 ///
164 /// This skips all overlap checking — `.index_mut()` will create `&mut`
165 /// references without verifying that they don't alias. This is faster
166 /// but the caller must manually ensure that all borrows are disjoint.
167 ///
168 /// # Safety
169 ///
170 /// The caller must guarantee that all borrows through this instance
171 /// are non-overlapping. Overlapping mutable borrows cause undefined
172 /// behavior (aliasing `&mut` references). Verify correctness by
173 /// running the full test suite with a tracked (`new()`) instance first.
174 pub const unsafe fn dangerously_unchecked(value: T) -> Self {
175 Self {
176 inner: UnsafeCell::new(value),
177 tracker: None,
178 }
179 }
180
181 pub fn into_inner(self) -> T {
182 self.inner.into_inner()
183 }
184}
185
186// =============================================================================
187// Guard types
188// =============================================================================
189
190/// Scope guard that poisons the `DisjointMut` if the indexing operation panics
191/// (e.g., out-of-bounds). Disarmed via `mem::forget` on success.
192///
193/// Rather than cleaning up the leaked borrow record (which would allow the range
194/// to be re-borrowed in potentially inconsistent state), we poison the entire
195/// data structure. This follows the `std::sync::Mutex` pattern: after a panic,
196/// fail loudly on all subsequent access.
197struct BorrowCleanup<'a, T: ?Sized + AsMutPtr> {
198 parent: Option<&'a DisjointMut<T>>,
199}
200
201impl<T: ?Sized + AsMutPtr> Drop for BorrowCleanup<'_, T> {
202 fn drop(&mut self) {
203 // This only fires on panic (mem::forget on success path).
204 // Poison rather than clean up — the data structure is compromised.
205 if let Some(parent) = self.parent {
206 parent.tracker.as_ref().unwrap().poison();
207 }
208 }
209}
210
211pub struct DisjointMutGuard<'a, T: ?Sized + AsMutPtr, V: ?Sized> {
212 slice: &'a mut V,
213
214 phantom: PhantomData<&'a DisjointMut<T>>,
215
216 /// Reference to parent for borrow removal on drop.
217 /// `None` when parent was created with `dangerously_unchecked`.
218 parent: Option<&'a DisjointMut<T>>,
219 /// Unique ID for this borrow registration.
220 borrow_id: checked::BorrowId,
221}
222
223#[cfg(feature = "zerocopy")]
224impl<'a, T: AsMutPtr> DisjointMutGuard<'a, T, [u8]> {
225 #[inline] // Inline to see alignment to potentially elide checks.
226 fn cast_slice<V: IntoBytes + FromBytes + KnownLayout>(self) -> DisjointMutGuard<'a, T, [V]> {
227 // We don't want to drop the old guard, because we aren't changing or
228 // removing the borrow from parent here.
229 let mut old_guard = ManuallyDrop::new(self);
230 let bytes = mem::take(&mut old_guard.slice);
231 DisjointMutGuard {
232 slice: <[V]>::mut_from_bytes(bytes).unwrap(),
233 phantom: old_guard.phantom,
234 parent: old_guard.parent,
235 borrow_id: old_guard.borrow_id,
236 }
237 }
238
239 #[inline] // Inline to see alignment to potentially elide checks.
240 fn cast<V: IntoBytes + FromBytes + KnownLayout>(self) -> DisjointMutGuard<'a, T, V> {
241 let mut old_guard = ManuallyDrop::new(self);
242 let bytes = mem::take(&mut old_guard.slice);
243 DisjointMutGuard {
244 slice: V::mut_from_bytes(bytes).unwrap(),
245 phantom: old_guard.phantom,
246 parent: old_guard.parent,
247 borrow_id: old_guard.borrow_id,
248 }
249 }
250}
251
252impl<'a, T: ?Sized + AsMutPtr, V: ?Sized> Deref for DisjointMutGuard<'a, T, V> {
253 type Target = V;
254
255 fn deref(&self) -> &Self::Target {
256 self.slice
257 }
258}
259
260impl<'a, T: ?Sized + AsMutPtr, V: ?Sized> DerefMut for DisjointMutGuard<'a, T, V> {
261 fn deref_mut(&mut self) -> &mut Self::Target {
262 self.slice
263 }
264}
265
266pub struct DisjointImmutGuard<'a, T: ?Sized + AsMutPtr, V: ?Sized> {
267 slice: &'a V,
268
269 phantom: PhantomData<&'a DisjointMut<T>>,
270
271 parent: Option<&'a DisjointMut<T>>,
272 borrow_id: checked::BorrowId,
273}
274
275#[cfg(feature = "zerocopy")]
276impl<'a, T: AsMutPtr> DisjointImmutGuard<'a, T, [u8]> {
277 #[inline]
278 fn cast_slice<V: FromBytes + KnownLayout + Immutable>(self) -> DisjointImmutGuard<'a, T, [V]> {
279 let mut old_guard = ManuallyDrop::new(self);
280 let bytes = mem::take(&mut old_guard.slice);
281 DisjointImmutGuard {
282 slice: <[V]>::ref_from_bytes(bytes).unwrap(),
283 phantom: old_guard.phantom,
284 parent: old_guard.parent,
285 borrow_id: old_guard.borrow_id,
286 }
287 }
288
289 #[inline]
290 fn cast<V: FromBytes + KnownLayout + Immutable>(self) -> DisjointImmutGuard<'a, T, V> {
291 let mut old_guard = ManuallyDrop::new(self);
292 let bytes = mem::take(&mut old_guard.slice);
293 DisjointImmutGuard {
294 slice: V::ref_from_bytes(bytes).unwrap(),
295 phantom: old_guard.phantom,
296 parent: old_guard.parent,
297 borrow_id: old_guard.borrow_id,
298 }
299 }
300}
301
302impl<'a, T: ?Sized + AsMutPtr, V: ?Sized> Deref for DisjointImmutGuard<'a, T, V> {
303 type Target = V;
304
305 fn deref(&self) -> &Self::Target {
306 self.slice
307 }
308}
309
310// =============================================================================
311// AsMutPtr trait (sealed — only implemented for types in this crate)
312// =============================================================================
313
314mod sealed {
315 use alloc::boxed::Box;
316 use alloc::vec::Vec;
317
318 /// Sealing trait — prevents external implementations of [`AsMutPtr`](super::AsMutPtr).
319 ///
320 /// This is critical for soundness: an incorrect `AsMutPtr` impl could return
321 /// a pointer to invalid memory, causing UB that the runtime checker cannot catch.
322 /// By sealing the trait, we ensure only audited impls in this crate exist.
323 pub trait Sealed {}
324
325 impl<V: Copy> Sealed for Vec<V> {}
326 impl<V: Copy, const N: usize> Sealed for [V; N] {}
327 impl<V: Copy> Sealed for [V] {}
328 impl<V: Copy> Sealed for Box<[V]> {}
329}
330
331/// Convert from a mutable pointer to a collection to a mutable pointer to the
332/// underlying slice without ever creating a mutable reference to the slice.
333///
334/// This trait exists for the same reason as [`Vec::as_mut_ptr`] - we want to
335/// create a mutable pointer to the underlying slice without ever creating a
336/// mutable reference to the slice.
337///
338/// # Safety
339///
340/// This trait must not ever create a mutable reference to the underlying slice,
341/// as it may be (partially) immutably borrowed concurrently.
342///
343/// # Sealed
344///
345/// This trait is sealed and cannot be implemented outside of this crate.
346/// External types can use the [`ExternalAsMutPtr`] unsafe trait to opt in,
347/// which requires `Copy` element types for data-race safety.
348pub unsafe trait AsMutPtr: sealed::Sealed {
349 type Target: Copy;
350
351 /// Convert a mutable pointer to a collection to a mutable pointer to the
352 /// underlying slice.
353 ///
354 /// # Safety
355 ///
356 /// This method may dereference `ptr` as an immutable reference, so this
357 /// pointer must be safely dereferenceable.
358 unsafe fn as_mut_slice(ptr: *mut Self) -> *mut [Self::Target] {
359 // SAFETY: The safety precondition of this method requires that we can
360 // immutably dereference `ptr`.
361 let len = unsafe { (*ptr).len() };
362 // SAFETY: Mutably dereferencing and calling `.as_mut_ptr()` does not
363 // materialize a mutable reference to the underlying slice according to
364 // its documentated behavior, so we can still allow concurrent immutable
365 // references into that underlying slice.
366 let data = unsafe { Self::as_mut_ptr(ptr) };
367 ptr::slice_from_raw_parts_mut(data, len)
368 }
369
370 /// Convert a mutable pointer to a collection to a mutable pointer to the
371 /// first element of the collection.
372 ///
373 /// # Safety
374 ///
375 /// This method may dereference `ptr` as an immutable reference, so this
376 /// pointer must be safely dereferenceable.
377 ///
378 /// The returned pointer is only safe to dereference within the bounds of
379 /// the underlying collection.
380 unsafe fn as_mut_ptr(ptr: *mut Self) -> *mut Self::Target;
381
382 fn len(&self) -> usize;
383
384 fn is_empty(&self) -> bool {
385 self.len() == 0
386 }
387}
388
389/// Opt-in trait for external types to participate in [`DisjointMut`].
390///
391/// Implement this trait for your container type so it can be used with
392/// `DisjointMut<YourType>`. The `Target` type must be `Copy` to ensure
393/// data races cannot cause memory safety issues beyond producing incorrect
394/// values (no torn reads on non-`Copy` types).
395///
396/// # Safety
397///
398/// Implementors must uphold all of the following:
399///
400/// 1. **No mutable references to the container or its data.** The
401/// `as_mut_ptr` implementation must not create `&mut Self` or
402/// `&mut [Self::Target]`. Creating `&mut` causes a Stacked Borrows
403/// retag that invalidates concurrent borrows on other threads.
404/// Use only shared references (`&Self`) or raw pointer operations.
405///
406/// 2. **No shared references to element data.** Even `&[Self::Target]`
407/// conflicts with `&mut [Self::Target]` guards under Stacked Borrows.
408/// If you need the length, read it from container metadata (which lives
409/// in a separate allocation from the elements), or override
410/// [`ExternalAsMutPtr::as_mut_slice`] with raw pointer metadata.
411///
412/// 3. **Valid pointer.** The returned `*mut Self::Target` must be valid
413/// for reads and writes over `0..self.len()` elements.
414///
415/// 4. **Stable length.** `len()` must return a consistent value for the
416/// lifetime of any outstanding borrow guard.
417///
418/// 5. **Inline data requires `as_mut_slice` override.** The default
419/// `as_mut_slice` calls `(*ptr).len()` which creates `&Self`. For
420/// types where element data is stored inline (e.g. `[V; N]` wrapped
421/// in a newtype), this creates a SharedReadOnly tag covering the
422/// data, which is UB under Stacked Borrows when concurrent mutable
423/// guards exist. **You MUST override `as_mut_slice`** for inline-data
424/// types using `ptr::slice_from_raw_parts_mut(ptr.cast(), N)`.
425///
426/// See the `Vec<V>` and `Aligned<A, [V; N]>` implementations in this
427/// crate for reference patterns.
428pub unsafe trait ExternalAsMutPtr {
429 type Target: Copy;
430
431 /// Returns a mutable pointer to the first element.
432 ///
433 /// # Safety
434 ///
435 /// `ptr` must be safely dereferenceable. The implementation must not
436 /// create `&mut Self` or `&mut [Self::Target]` — only shared references
437 /// to container metadata or raw pointer operations. See the trait-level
438 /// safety docs for full requirements.
439 unsafe fn as_mut_ptr(ptr: *mut Self) -> *mut Self::Target;
440
441 /// Returns a mutable pointer to the underlying slice.
442 ///
443 /// For types where data lives on the heap (e.g. `Vec`-like), creating
444 /// `&Self` to read `len()` is fine — `&Self` only covers the container
445 /// metadata, not the heap allocation:
446 ///
447 /// ```ignore
448 /// unsafe fn as_mut_slice(ptr: *mut Self) -> *mut [Self::Target] {
449 /// let this = unsafe { &*ptr };
450 /// ptr::slice_from_raw_parts_mut(this.as_ptr().cast_mut(), this.len())
451 /// }
452 /// ```
453 ///
454 /// For types where data is stored **inline** (e.g. `[V; N]` in a newtype),
455 /// you must avoid creating `&Self` because it produces a SharedReadOnly
456 /// tag covering the element data, invalidating concurrent `&mut` guards
457 /// under Stacked Borrows:
458 ///
459 /// ```ignore
460 /// unsafe fn as_mut_slice(ptr: *mut Self) -> *mut [Self::Target] {
461 /// ptr::slice_from_raw_parts_mut(ptr.cast(), N) // no reference created
462 /// }
463 /// ```
464 ///
465 /// # Safety
466 ///
467 /// `ptr` must be safely dereferenceable.
468 unsafe fn as_mut_slice(ptr: *mut Self) -> *mut [Self::Target];
469
470 fn len(&self) -> usize;
471
472 fn is_empty(&self) -> bool {
473 self.len() == 0
474 }
475}
476
477// Blanket seal for external types
478impl<T: ExternalAsMutPtr> sealed::Sealed for T {}
479
480// Blanket AsMutPtr for external types
481unsafe impl<T: ExternalAsMutPtr> AsMutPtr for T {
482 type Target = T::Target;
483
484 unsafe fn as_mut_slice(ptr: *mut Self) -> *mut [Self::Target] {
485 unsafe { <T as ExternalAsMutPtr>::as_mut_slice(ptr) }
486 }
487
488 unsafe fn as_mut_ptr(ptr: *mut Self) -> *mut Self::Target {
489 unsafe { <T as ExternalAsMutPtr>::as_mut_ptr(ptr) }
490 }
491
492 fn len(&self) -> usize {
493 <T as ExternalAsMutPtr>::len(self)
494 }
495}
496
497// =============================================================================
498// Core index/index_mut methods
499// =============================================================================
500
501impl<T: ?Sized + AsMutPtr> DisjointMut<T> {
502 pub fn len(&self) -> usize {
503 // Use as_mut_slice to get a fat *mut [T] pointer and read length from
504 // the fat pointer metadata. This avoids creating &T which for some
505 // container types (e.g. Box<[V]>) would create &[V] to the heap data,
506 // conflicting with concurrent &mut [V] guards under Stacked Borrows.
507 self.as_mut_slice().len()
508 }
509
510 pub fn is_empty(&self) -> bool {
511 self.len() == 0
512 }
513
514 /// Returns a raw pointer to the underlying element data, bypassing the tracker.
515 ///
516 /// # Why this exists (instead of using guard `.as_ptr()`)
517 ///
518 /// FFI boundaries (assembly calls, C interop) need raw pointers. Creating a
519 /// tracked guard for the entire buffer would be wrong — assembly code may only
520 /// touch a subset, and pointer arithmetic happens on the callee side. This
521 /// method provides the base pointer for such offset calculations.
522 ///
523 /// Similarly, some code needs pointer identity checks (e.g. `ptr == other_ptr`)
524 /// without actually borrowing data.
525 ///
526 /// The pointer requires `unsafe` to dereference, so the caller accepts
527 /// responsibility for disjointness — same as any raw pointer in Rust.
528 pub fn as_mut_slice(&self) -> *mut [<T as AsMutPtr>::Target] {
529 // SAFETY: The inner cell is safe to access immutably. We never create a
530 // mutable reference to the inner value.
531 unsafe { AsMutPtr::as_mut_slice(self.inner.get()) }
532 }
533
534 /// Returns a raw pointer to the first element. See [`Self::as_mut_slice`] for rationale.
535 pub fn as_mut_ptr(&self) -> *mut <T as AsMutPtr>::Target {
536 // SAFETY: The inner cell is safe to access immutably. We never create a
537 // mutable reference to the inner value.
538 unsafe { AsMutPtr::as_mut_ptr(self.inner.get()) }
539 }
540
541 pub fn get_mut(&mut self) -> &mut T {
542 self.inner.get_mut()
543 }
544
545 /// Mutably borrow a slice or element.
546 ///
547 /// Validates that the requested range doesn't overlap with any outstanding
548 /// borrow, then creates the `&mut` reference. Panics on overlap, OOB, or
549 /// if the data structure has been poisoned by a prior panic.
550 #[inline] // Inline to see bounds checks in order to potentially elide them.
551 #[track_caller]
552 pub fn index_mut<'a, I>(&'a self, index: I) -> DisjointMutGuard<'a, T, I::Output>
553 where
554 I: Into<Bounds> + Clone,
555 I: DisjointMutIndex<[<T as AsMutPtr>::Target]>,
556 {
557 let bounds = index.clone().into();
558 // Register the borrow BEFORE creating the reference.
559 // This prevents a TOCTOU gap where two threads could both create
560 // references to overlapping ranges before either registers.
561 let borrow_id = match &self.tracker {
562 Some(tracker) => tracker.add_mut(&bounds),
563 None => checked::BorrowId::UNCHECKED,
564 };
565 let parent = self.tracker.as_ref().map(|_| self);
566 // Scope guard: if get_mut panics (OOB), poison the data structure.
567 // We don't try to clean up the leaked borrow — poisoning is stricter
568 // and prevents all future access, following std::sync::Mutex semantics.
569 let cleanup = BorrowCleanup { parent };
570 // SAFETY: The borrow has been registered (or we're unchecked).
571 // The indexed region is guaranteed disjoint from all other active borrows.
572 let slice = unsafe { &mut *index.get_mut(self.as_mut_slice()) };
573 // Success — disarm the cleanup guard.
574 mem::forget(cleanup);
575 DisjointMutGuard {
576 slice,
577 parent,
578 borrow_id,
579 phantom: PhantomData,
580 }
581 }
582
583 /// Immutably borrow a slice or element.
584 ///
585 /// Validates that the requested range doesn't overlap with any outstanding
586 /// mutable borrow, then creates the `&` reference. Panics on overlap, OOB,
587 /// or if the data structure has been poisoned by a prior panic.
588 #[inline] // Inline to see bounds checks in order to potentially elide them.
589 #[track_caller]
590 pub fn index<'a, I>(&'a self, index: I) -> DisjointImmutGuard<'a, T, I::Output>
591 where
592 I: Into<Bounds> + Clone,
593 I: DisjointMutIndex<[<T as AsMutPtr>::Target]>,
594 {
595 let bounds = index.clone().into();
596 let borrow_id = match &self.tracker {
597 Some(tracker) => tracker.add_immut(&bounds),
598 None => checked::BorrowId::UNCHECKED,
599 };
600 let parent = self.tracker.as_ref().map(|_| self);
601 let cleanup = BorrowCleanup { parent };
602 // SAFETY: The borrow has been registered (or we're unchecked).
603 let slice = unsafe { &*index.get_mut(self.as_mut_slice()).cast_const() };
604 mem::forget(cleanup);
605 DisjointImmutGuard {
606 slice,
607 parent,
608 borrow_id,
609 phantom: PhantomData,
610 }
611 }
612}
613
614// =============================================================================
615// Zerocopy cast methods (for u8 buffers → typed access)
616// =============================================================================
617
618#[cfg(feature = "zerocopy")]
619impl<T: AsMutPtr<Target = u8>> DisjointMut<T> {
620 /// Check that a casted slice has the expected length.
621 #[inline]
622 fn check_cast_slice_len<I, V>(&self, index: I, slice: &[V])
623 where
624 I: SliceBounds,
625 {
626 let range = index.to_range(self.len() / mem::size_of::<V>());
627 let range_len = range.end - range.start;
628 assert!(slice.len() == range_len);
629 }
630
631 /// Mutably borrow a slice of a convertible type.
632 #[inline]
633 #[track_caller]
634 pub fn mut_slice_as<'a, I, V>(&'a self, index: I) -> DisjointMutGuard<'a, T, [V]>
635 where
636 I: SliceBounds,
637 V: IntoBytes + FromBytes + KnownLayout,
638 {
639 let slice = self.index_mut(index.mul(mem::size_of::<V>())).cast_slice();
640 self.check_cast_slice_len(index, &slice);
641 slice
642 }
643
644 /// Mutably borrow an element of a convertible type.
645 #[inline]
646 #[track_caller]
647 pub fn mut_element_as<'a, V>(&'a self, index: usize) -> DisjointMutGuard<'a, T, V>
648 where
649 V: IntoBytes + FromBytes + KnownLayout,
650 {
651 self.index_mut((index..index + 1).mul(mem::size_of::<V>()))
652 .cast()
653 }
654
655 /// Immutably borrow a slice of a convertible type.
656 #[inline]
657 #[track_caller]
658 pub fn slice_as<'a, I, V>(&'a self, index: I) -> DisjointImmutGuard<'a, T, [V]>
659 where
660 I: SliceBounds,
661 V: FromBytes + KnownLayout + Immutable,
662 {
663 let slice = self.index(index.mul(mem::size_of::<V>())).cast_slice();
664 self.check_cast_slice_len(index, &slice);
665 slice
666 }
667
668 /// Immutably borrow an element of a convertible type.
669 #[inline]
670 #[track_caller]
671 pub fn element_as<'a, V>(&'a self, index: usize) -> DisjointImmutGuard<'a, T, V>
672 where
673 V: FromBytes + KnownLayout + Immutable,
674 {
675 self.index((index..index + 1).mul(mem::size_of::<V>()))
676 .cast()
677 }
678}
679
680// =============================================================================
681// DisjointMutIndex trait (stable SliceIndex equivalent)
682// =============================================================================
683
684/// This trait is a stable implementation of [`std::slice::SliceIndex`] to allow
685/// for indexing into mutable slice raw pointers.
686pub trait DisjointMutIndex<T: ?Sized> {
687 type Output: ?Sized;
688
689 /// Returns a mutable pointer to the output at this indexed location.
690 ///
691 /// # Safety
692 ///
693 /// `slice` must be a valid, dereferencable pointer.
694 unsafe fn get_mut(self, slice: *mut T) -> *mut Self::Output;
695}
696
697// =============================================================================
698// Range translation traits
699// =============================================================================
700
701pub trait TranslateRange {
702 fn mul(&self, by: usize) -> Self;
703}
704
705impl TranslateRange for usize {
706 fn mul(&self, by: usize) -> Self {
707 *self * by
708 }
709}
710
711impl TranslateRange for Range<usize> {
712 fn mul(&self, by: usize) -> Self {
713 self.start * by..self.end * by
714 }
715}
716
717impl TranslateRange for RangeFrom<usize> {
718 fn mul(&self, by: usize) -> Self {
719 self.start * by..
720 }
721}
722
723impl TranslateRange for RangeInclusive<usize> {
724 fn mul(&self, by: usize) -> Self {
725 // 3..=5 with by=4 means elements 3,4,5 → bytes 12..=23 (not 12..=20).
726 // Each element occupies `by` bytes, so the inclusive end in bytes is
727 // one past the last element's start: (end + 1) * by - 1.
728 *self.start() * by..=(*self.end() + 1) * by - 1
729 }
730}
731
732impl TranslateRange for RangeTo<usize> {
733 fn mul(&self, by: usize) -> Self {
734 ..self.end * by
735 }
736}
737
738impl TranslateRange for RangeToInclusive<usize> {
739 fn mul(&self, by: usize) -> Self {
740 // ..=5 with by=4 means elements 0..=5 → bytes 0..=23 (not 0..=20).
741 ..=(self.end + 1) * by - 1
742 }
743}
744
745impl TranslateRange for RangeFull {
746 fn mul(&self, _by: usize) -> Self {
747 *self
748 }
749}
750
751impl TranslateRange for (RangeFrom<usize>, RangeTo<usize>) {
752 fn mul(&self, by: usize) -> Self {
753 (self.0.start * by.., ..self.1.end * by)
754 }
755}
756
757// =============================================================================
758// Bounds type
759// =============================================================================
760
761#[derive(Clone, Default, PartialEq, Eq)]
762pub struct Bounds {
763 /// A [`Range::end`]` == `[`usize::MAX`] is considered unbounded,
764 /// as lengths need to be less than [`isize::MAX`] already.
765 pub(crate) range: Range<usize>,
766}
767
768impl Display for Bounds {
769 fn fmt(&self, f: &mut Formatter) -> fmt::Result {
770 let Range { start, end } = self.range;
771 if start != 0 {
772 write!(f, "{start}")?;
773 }
774 write!(f, "..")?;
775 if end != usize::MAX {
776 write!(f, "{end}")?;
777 }
778 Ok(())
779 }
780}
781
782impl Debug for Bounds {
783 fn fmt(&self, f: &mut Formatter) -> fmt::Result {
784 write!(f, "{}", self)
785 }
786}
787
788impl Bounds {
789 #[cfg(test)]
790 fn is_empty(&self) -> bool {
791 self.range.start >= self.range.end
792 }
793
794 #[cfg(test)]
795 fn overlaps(&self, other: &Bounds) -> bool {
796 // Empty ranges borrow zero bytes and never conflict.
797 if self.is_empty() || other.is_empty() {
798 return false;
799 }
800 let a = &self.range;
801 let b = &other.range;
802 a.start < b.end && b.start < a.end
803 }
804}
805
806impl From<usize> for Bounds {
807 fn from(index: usize) -> Self {
808 Self {
809 range: index..index.checked_add(1).expect("index overflow in Bounds"),
810 }
811 }
812}
813
814impl<T: SliceBounds> From<T> for Bounds {
815 fn from(range: T) -> Self {
816 Self {
817 range: range.to_range(usize::MAX),
818 }
819 }
820}
821
822pub trait SliceBounds: TranslateRange + Clone {
823 fn to_range(&self, len: usize) -> Range<usize>;
824}
825
826impl SliceBounds for Range<usize> {
827 fn to_range(&self, _len: usize) -> Range<usize> {
828 let Self { start, end } = *self;
829 start..end
830 }
831}
832
833impl SliceBounds for RangeFrom<usize> {
834 fn to_range(&self, len: usize) -> Range<usize> {
835 let Self { start } = *self;
836 start..len
837 }
838}
839
840impl SliceBounds for RangeInclusive<usize> {
841 fn to_range(&self, _len: usize) -> Range<usize> {
842 *self.start()..self.end().checked_add(1).expect("range end overflow")
843 }
844}
845
846impl SliceBounds for RangeTo<usize> {
847 fn to_range(&self, _len: usize) -> Range<usize> {
848 let Self { end } = *self;
849 0..end
850 }
851}
852
853impl SliceBounds for RangeToInclusive<usize> {
854 fn to_range(&self, _len: usize) -> Range<usize> {
855 let Self { end } = *self;
856 0..end.checked_add(1).expect("range end overflow")
857 }
858}
859
860impl SliceBounds for RangeFull {
861 fn to_range(&self, len: usize) -> Range<usize> {
862 0..len
863 }
864}
865
866/// A majority of slice ranges are of the form `[start..][..len]`.
867/// This is easy to express with normal slices where we can do the slicing multiple times,
868/// but with [`DisjointMut`], that's harder, so this adds support for
869/// `.index((start.., ..len))` to achieve the same.
870impl SliceBounds for (RangeFrom<usize>, RangeTo<usize>) {
871 fn to_range(&self, _len: usize) -> Range<usize> {
872 let (RangeFrom { start }, RangeTo { end: range_len }) = *self;
873 start..start + range_len
874 }
875}
876
877// =============================================================================
878// DisjointMutIndex implementations
879// =============================================================================
880
881impl<T> DisjointMutIndex<[T]> for usize {
882 type Output = <[T] as Index<usize>>::Output;
883
884 #[inline]
885 #[track_caller]
886 unsafe fn get_mut(self, slice: *mut [T]) -> *mut Self::Output {
887 let index = self;
888 let len = slice.len();
889 if index < len {
890 // SAFETY: We have checked that `self` is less than the allocation
891 // length therefore cannot overflow.
892 unsafe { (slice as *mut T).add(index) }
893 } else {
894 #[inline(never)]
895 #[track_caller]
896 fn out_of_bounds(index: usize, len: usize) -> ! {
897 panic!("index out of bounds: the len is {len} but the index is {index}")
898 }
899 out_of_bounds(index, len);
900 }
901 }
902}
903
904impl<T, I> DisjointMutIndex<[T]> for I
905where
906 I: SliceBounds,
907{
908 type Output = <[T] as Index<Range<usize>>>::Output;
909
910 #[inline]
911 #[track_caller]
912 unsafe fn get_mut(self, slice: *mut [T]) -> *mut Self::Output {
913 let len = slice.len();
914 let Range { start, end } = self.to_range(len);
915 if start <= end && end <= len {
916 // SAFETY: We have checked bounds.
917 let data = unsafe { (slice as *mut T).add(start) };
918 ptr::slice_from_raw_parts_mut(data, end - start)
919 } else {
920 #[inline(never)]
921 #[track_caller]
922 fn out_of_bounds(start: usize, end: usize, len: usize) -> ! {
923 if start > end {
924 panic!("slice index starts at {start} but ends at {end}");
925 }
926 if end > len {
927 panic!("range end index {end} out of range for slice of length {len}");
928 }
929 unreachable!();
930 }
931 out_of_bounds(start, end, len);
932 }
933 }
934}
935
936// =============================================================================
937// Bounds tracking (single mutex, holds lock during reference creation)
938// =============================================================================
939
940mod checked {
941 use super::*;
942 use core::panic::Location;
943 use core::sync::atomic::{AtomicBool, Ordering};
944
945 /// Lightweight spinlock for borrow tracking.
946 ///
947 /// Uses a simple `AtomicBool::swap` for the lock — cheaper than
948 /// `compare_exchange` on the uncontended fast path because `swap`
949 /// is an unconditional store (no branch on old value). For the
950 /// single-threaded case (rav1d `threads=1`), this never spins.
951 ///
952 struct TinyLock(AtomicBool);
953
954 impl TinyLock {
955 const fn new() -> Self {
956 Self(AtomicBool::new(false))
957 }
958
959 #[inline(always)]
960 fn lock(&self) -> TinyGuard<'_> {
961 // Fast path: swap is cheaper than compare_exchange for uncontended locks.
962 // On x86_64, `xchg` is simpler than `lock cmpxchg`.
963 if self.0.swap(true, Ordering::Acquire) {
964 // Contended — spin. This should essentially never happen in rav1d.
965 self.lock_slow();
966 }
967 TinyGuard(&self.0)
968 }
969
970 #[cold]
971 #[inline(never)]
972 fn lock_slow(&self) {
973 loop {
974 // Spin-wait: read without writing to avoid cache line bouncing
975 while self.0.load(Ordering::Relaxed) {
976 core::hint::spin_loop();
977 }
978 if !self.0.swap(true, Ordering::Acquire) {
979 return;
980 }
981 }
982 }
983 }
984
985 struct TinyGuard<'a>(&'a AtomicBool);
986
987 impl<'a> Drop for TinyGuard<'a> {
988 #[inline(always)]
989 fn drop(&mut self) {
990 self.0.store(false, Ordering::Release);
991 }
992 }
993
994 /// Inline capacity: 64 slots with a u64 bitmask for O(1) free-slot finding.
995 const INLINE_SLOTS: usize = 64;
996
997 /// A unique identifier for a borrow registration.
998 ///
999 /// Encoding:
1000 /// - `0..63`: inline slot index
1001 /// - `64..254`: overflow Vec index (value - INLINE_SLOTS)
1002 /// - `EMPTY_SLOT` (254): empty-range borrow, no real slot
1003 /// - `UNCHECKED` (255): unchecked guard, no tracking
1004 #[derive(Clone, Copy, PartialEq, Eq, Debug, Default)]
1005 pub(super) struct BorrowId(u8);
1006
1007 impl BorrowId {
1008 /// Sentinel value for unchecked guards (no tracking).
1009 pub const UNCHECKED: Self = Self(u8::MAX);
1010 }
1011
1012 /// Borrow record storage with 64 inline slots and Vec overflow.
1013 ///
1014 /// The fast path uses a u64 bitmask for O(1) allocation/deallocation.
1015 /// If all 64 inline slots are occupied, borrows spill into a Vec that
1016 /// is allocated on demand. The Vec is never touched if inline capacity
1017 /// suffices.
1018 struct BorrowSlots {
1019 // Parallel arrays for cache efficiency during overlap scans.
1020 starts: [usize; INLINE_SLOTS],
1021 ends: [usize; INLINE_SLOTS],
1022 mutable: [bool; INLINE_SLOTS],
1023 /// Bitmask of occupied inline slots. Bit `i` set iff slot `i` is active.
1024 occupied: u64,
1025 /// Overflow storage, allocated only when >64 concurrent borrows.
1026 overflow: Vec<(usize, usize, bool)>,
1027 }
1028
1029 impl BorrowSlots {
1030 /// Sentinel slot index for empty-range borrows (no real slot allocated).
1031 const EMPTY_SLOT: u8 = u8::MAX - 1; // 254
1032
1033 const fn new() -> Self {
1034 Self {
1035 starts: [0; INLINE_SLOTS],
1036 ends: [0; INLINE_SLOTS],
1037 mutable: [false; INLINE_SLOTS],
1038 occupied: 0,
1039 overflow: Vec::new(),
1040 }
1041 }
1042
1043 /// Allocate a slot and return its BorrowId encoding.
1044 #[inline(always)]
1045 fn alloc(&mut self, start: usize, end: usize, is_mutable: bool) -> u8 {
1046 if start >= end {
1047 return Self::EMPTY_SLOT;
1048 }
1049 let free = self.occupied.trailing_ones() as usize;
1050 if free < INLINE_SLOTS {
1051 // Fast path: inline slot available.
1052 self.starts[free] = start;
1053 self.ends[free] = end;
1054 self.mutable[free] = is_mutable;
1055 self.occupied |= 1u64 << free;
1056 free as u8
1057 } else {
1058 // Slow path: spill to Vec.
1059 self.alloc_overflow(start, end, is_mutable)
1060 }
1061 }
1062
1063 /// Overflow allocation — cold path, never inlined.
1064 #[cold]
1065 #[inline(never)]
1066 fn alloc_overflow(&mut self, start: usize, end: usize, is_mutable: bool) -> u8 {
1067 // Find a free slot in the overflow Vec (tombstoned entries have start >= end).
1068 for (i, entry) in self.overflow.iter_mut().enumerate() {
1069 if entry.0 >= entry.1 {
1070 // Reuse tombstoned slot.
1071 *entry = (start, end, is_mutable);
1072 return (INLINE_SLOTS + i) as u8;
1073 }
1074 }
1075 let idx = self.overflow.len();
1076 // BorrowId is u8, with 254/255 reserved. Max overflow index:
1077 // INLINE_SLOTS + idx must be < 254, so idx < 254 - 64 = 190.
1078 assert!(
1079 INLINE_SLOTS + idx < Self::EMPTY_SLOT as usize,
1080 "DisjointMut: too many concurrent borrows (max {})",
1081 Self::EMPTY_SLOT as usize
1082 );
1083 self.overflow.push((start, end, is_mutable));
1084 (INLINE_SLOTS + idx) as u8
1085 }
1086
1087 /// Free a slot by BorrowId encoding.
1088 #[inline(always)]
1089 fn free(&mut self, slot: u8) {
1090 if slot == Self::EMPTY_SLOT {
1091 return;
1092 }
1093 let idx = slot as usize;
1094 if idx < INLINE_SLOTS {
1095 debug_assert!(
1096 (self.occupied & (1u64 << idx)) != 0,
1097 "BorrowId slot {slot} not occupied"
1098 );
1099 self.occupied &= !(1u64 << idx);
1100 } else {
1101 // Overflow slot — tombstone it (set start >= end).
1102 let ov_idx = idx - INLINE_SLOTS;
1103 debug_assert!(ov_idx < self.overflow.len(), "overflow index out of range");
1104 self.overflow[ov_idx] = (1, 0, false); // tombstone
1105 }
1106 }
1107
1108 /// Check if the range [start, end) overlaps any active borrow.
1109 #[inline(always)]
1110 fn find_overlap_any(&self, start: usize, end: usize) -> Option<(usize, usize, bool)> {
1111 if start >= end {
1112 return None;
1113 }
1114 // Scan inline slots.
1115 let mut mask = self.occupied;
1116 while mask != 0 {
1117 let i = mask.trailing_zeros() as usize;
1118 if self.starts[i] < end && start < self.ends[i] {
1119 return Some((self.starts[i], self.ends[i], self.mutable[i]));
1120 }
1121 mask &= mask - 1;
1122 }
1123 // Scan overflow (cold — only reached if overflow is non-empty).
1124 if !self.overflow.is_empty() {
1125 return self.find_overlap_any_overflow(start, end);
1126 }
1127 None
1128 }
1129
1130 #[cold]
1131 #[inline(never)]
1132 fn find_overlap_any_overflow(
1133 &self,
1134 start: usize,
1135 end: usize,
1136 ) -> Option<(usize, usize, bool)> {
1137 for &(s, e, m) in &self.overflow {
1138 if s < e && s < end && start < e {
1139 return Some((s, e, m));
1140 }
1141 }
1142 None
1143 }
1144
1145 /// Check if the range [start, end) overlaps any active MUTABLE borrow.
1146 #[inline(always)]
1147 fn find_overlap_mut(&self, start: usize, end: usize) -> Option<(usize, usize, bool)> {
1148 if start >= end {
1149 return None;
1150 }
1151 let mut mask = self.occupied;
1152 while mask != 0 {
1153 let i = mask.trailing_zeros() as usize;
1154 if self.mutable[i] && self.starts[i] < end && start < self.ends[i] {
1155 return Some((self.starts[i], self.ends[i], true));
1156 }
1157 mask &= mask - 1;
1158 }
1159 if !self.overflow.is_empty() {
1160 return self.find_overlap_mut_overflow(start, end);
1161 }
1162 None
1163 }
1164
1165 #[cold]
1166 #[inline(never)]
1167 fn find_overlap_mut_overflow(
1168 &self,
1169 start: usize,
1170 end: usize,
1171 ) -> Option<(usize, usize, bool)> {
1172 for &(s, e, m) in &self.overflow {
1173 if m && s < e && s < end && start < e {
1174 return Some((s, e, true));
1175 }
1176 }
1177 None
1178 }
1179 }
1180
1181 /// All active borrows for a single `DisjointMut` instance.
1182 ///
1183 /// Uses 64 inline slots with a u64 bitmask for O(1) allocation and
1184 /// deallocation. If more than 64 concurrent borrows are needed, spills
1185 /// to a heap-allocated Vec (up to 254 total). Overlap checking scans
1186 /// only occupied slots.
1187 ///
1188 /// Like `std::sync::Mutex`, the tracker poisons the data structure when a
1189 /// thread panics while holding a mutable borrow guard. This prevents
1190 /// subsequent access to potentially corrupted data.
1191 pub(super) struct BorrowTracker {
1192 lock: TinyLock,
1193 slots: UnsafeCell<BorrowSlots>,
1194 poisoned: AtomicBool,
1195 }
1196
1197 // SAFETY: BorrowSlots is only accessed while TinyLock is held.
1198 unsafe impl Send for BorrowTracker {}
1199 unsafe impl Sync for BorrowTracker {}
1200
1201 impl Default for BorrowTracker {
1202 fn default() -> Self {
1203 Self::new()
1204 }
1205 }
1206
1207 impl BorrowTracker {
1208 pub const fn new() -> Self {
1209 Self {
1210 lock: TinyLock::new(),
1211 slots: UnsafeCell::new(BorrowSlots::new()),
1212 poisoned: AtomicBool::new(false),
1213 }
1214 }
1215
1216 /// Mark this tracker as poisoned. All future borrow attempts will panic.
1217 pub fn poison(&self) {
1218 self.poisoned.store(true, Ordering::Release);
1219 }
1220
1221 /// Panic if the tracker has been poisoned.
1222 #[inline(always)]
1223 fn check_poisoned(&self) {
1224 if self.poisoned.load(Ordering::Acquire) {
1225 Self::poisoned_panic();
1226 }
1227 }
1228
1229 #[cold]
1230 #[inline(never)]
1231 fn poisoned_panic() -> ! {
1232 panic!("DisjointMut poisoned: a thread panicked while holding a mutable borrow");
1233 }
1234
1235 /// Report an overlap violation with diagnostic info.
1236 #[cold]
1237 #[inline(never)]
1238 #[track_caller]
1239 fn overlap_panic(
1240 new_start: usize,
1241 new_end: usize,
1242 new_mutable: bool,
1243 existing_start: usize,
1244 existing_end: usize,
1245 existing_mutable: bool,
1246 ) -> ! {
1247 let new_mut_str = if new_mutable { "&mut" } else { " &" };
1248 let existing_mut_str = if existing_mutable { "&mut" } else { " &" };
1249 let caller = Location::caller();
1250 #[cfg(feature = "std")]
1251 {
1252 let thread = std::thread::current().id();
1253 panic!(
1254 "\toverlapping DisjointMut:\n current: {new_mut_str} _[{new_start}..{new_end}] \
1255 on {thread:?} at {caller}\nexisting: {existing_mut_str} _[{existing_start}..{existing_end}]",
1256 );
1257 }
1258 #[cfg(not(feature = "std"))]
1259 panic!(
1260 "\toverlapping DisjointMut:\n current: {new_mut_str} _[{new_start}..{new_end}] \
1261 at {caller}\nexisting: {existing_mut_str} _[{existing_start}..{existing_end}]",
1262 );
1263 }
1264
1265 /// Register a mutable borrow. Checks against ALL existing borrows.
1266 #[inline]
1267 #[track_caller]
1268 pub fn add_mut(&self, bounds: &Bounds) -> BorrowId {
1269 let start = bounds.range.start;
1270 let end = bounds.range.end;
1271 if start >= end {
1272 return BorrowId(BorrowSlots::EMPTY_SLOT);
1273 }
1274 self.check_poisoned();
1275 let _guard = self.lock.lock();
1276 // SAFETY: TinyLock is held, so we have exclusive access to slots.
1277 let slots = unsafe { &mut *self.slots.get() };
1278 if let Some((es, ee, em)) = slots.find_overlap_any(start, end) {
1279 Self::overlap_panic(start, end, true, es, ee, em);
1280 }
1281 BorrowId(slots.alloc(start, end, true))
1282 }
1283
1284 /// Register an immutable borrow. Only checks against mutable borrows.
1285 #[inline]
1286 #[track_caller]
1287 pub fn add_immut(&self, bounds: &Bounds) -> BorrowId {
1288 let start = bounds.range.start;
1289 let end = bounds.range.end;
1290 if start >= end {
1291 return BorrowId(BorrowSlots::EMPTY_SLOT);
1292 }
1293 self.check_poisoned();
1294 let _guard = self.lock.lock();
1295 // SAFETY: TinyLock is held, so we have exclusive access to slots.
1296 let slots = unsafe { &mut *self.slots.get() };
1297 if let Some((es, ee, em)) = slots.find_overlap_mut(start, end) {
1298 Self::overlap_panic(start, end, false, es, ee, em);
1299 }
1300 BorrowId(slots.alloc(start, end, false))
1301 }
1302
1303 /// Remove a borrow by slot index. O(1).
1304 #[inline]
1305 pub fn remove(&self, id: BorrowId) {
1306 if id.0 == BorrowSlots::EMPTY_SLOT || id == BorrowId::UNCHECKED {
1307 return;
1308 }
1309 let _guard = self.lock.lock();
1310 // SAFETY: TinyLock is held, so we have exclusive access to slots.
1311 let slots = unsafe { &mut *self.slots.get() };
1312 slots.free(id.0);
1313 }
1314 }
1315}
1316
1317// =============================================================================
1318// Guard Drop impls — deregister borrow on drop
1319// =============================================================================
1320
1321impl<'a, T: ?Sized + AsMutPtr, V: ?Sized> Drop for DisjointMutGuard<'a, T, V> {
1322 fn drop(&mut self) {
1323 if let Some(parent) = self.parent {
1324 let tracker = parent.tracker.as_ref().unwrap();
1325 // If the thread is panicking while we hold a mutable guard,
1326 // the data may be partially written / inconsistent.
1327 // Poison the data structure so all future borrows fail.
1328 #[cfg(feature = "std")]
1329 if std::thread::panicking() {
1330 tracker.poison();
1331 }
1332 tracker.remove(self.borrow_id);
1333 }
1334 }
1335}
1336
1337impl<'a, T: ?Sized + AsMutPtr, V: ?Sized> Drop for DisjointImmutGuard<'a, T, V> {
1338 fn drop(&mut self) {
1339 if let Some(parent) = self.parent {
1340 parent.tracker.as_ref().unwrap().remove(self.borrow_id);
1341 }
1342 }
1343}
1344
1345// =============================================================================
1346// Generic convenience methods via traits (so external types can opt in)
1347// =============================================================================
1348
1349/// Trait for types that support `resize(len, value)`. Implement this for your
1350/// container type so that `DisjointMut<YourType>` gains a `.resize()` method.
1351pub trait Resizable {
1352 type Value;
1353 fn resize(&mut self, new_len: usize, value: Self::Value);
1354}
1355
1356impl<V: Clone> Resizable for Vec<V> {
1357 type Value = V;
1358 fn resize(&mut self, new_len: usize, value: V) {
1359 Vec::resize(self, new_len, value)
1360 }
1361}
1362
1363impl<T: AsMutPtr + Resizable> DisjointMut<T> {
1364 pub fn resize(&mut self, new_len: usize, value: T::Value) {
1365 self.inner.get_mut().resize(new_len, value)
1366 }
1367}
1368
1369/// Fallible version of [`Resizable`]. Returns `Err` on allocation failure.
1370pub trait TryResizable {
1371 type Value;
1372 fn try_resize(
1373 &mut self,
1374 new_len: usize,
1375 value: Self::Value,
1376 ) -> Result<(), alloc::collections::TryReserveError>;
1377}
1378
1379impl<V: Clone> TryResizable for Vec<V> {
1380 type Value = V;
1381 fn try_resize(
1382 &mut self,
1383 new_len: usize,
1384 value: V,
1385 ) -> Result<(), alloc::collections::TryReserveError> {
1386 if new_len > self.len() {
1387 self.try_reserve(new_len - self.len())?;
1388 }
1389 self.resize(new_len, value);
1390 Ok(())
1391 }
1392}
1393
1394impl<T: AsMutPtr + TryResizable> DisjointMut<T> {
1395 pub fn try_resize(
1396 &mut self,
1397 new_len: usize,
1398 value: T::Value,
1399 ) -> Result<(), alloc::collections::TryReserveError> {
1400 self.inner.get_mut().try_resize(new_len, value)
1401 }
1402}
1403
1404/// Fallible version of [`ResizableWith`]. Returns `Err` on allocation failure.
1405pub trait TryResizableWith {
1406 type Item;
1407 fn try_resize_with<F: FnMut() -> Self::Item>(
1408 &mut self,
1409 new_len: usize,
1410 f: F,
1411 ) -> Result<(), alloc::collections::TryReserveError>;
1412}
1413
1414impl<V> TryResizableWith for Vec<V> {
1415 type Item = V;
1416 fn try_resize_with<F: FnMut() -> V>(
1417 &mut self,
1418 new_len: usize,
1419 f: F,
1420 ) -> Result<(), alloc::collections::TryReserveError> {
1421 if new_len > self.len() {
1422 self.try_reserve(new_len - self.len())?;
1423 }
1424 self.resize_with(new_len, f);
1425 Ok(())
1426 }
1427}
1428
1429impl<T: AsMutPtr + TryResizableWith> DisjointMut<T> {
1430 pub fn try_resize_with<F>(
1431 &mut self,
1432 new_len: usize,
1433 f: F,
1434 ) -> Result<(), alloc::collections::TryReserveError>
1435 where
1436 F: FnMut() -> T::Item,
1437 T: TryResizableWith,
1438 {
1439 self.inner.get_mut().try_resize_with(new_len, f)
1440 }
1441}
1442
1443/// Trait for types that support `clear()`.
1444pub trait Clearable {
1445 fn clear(&mut self);
1446}
1447
1448impl<V> Clearable for Vec<V> {
1449 fn clear(&mut self) {
1450 Vec::clear(self)
1451 }
1452}
1453
1454impl<T: AsMutPtr + Clearable> DisjointMut<T> {
1455 pub fn clear(&mut self) {
1456 self.inner.get_mut().clear()
1457 }
1458}
1459
1460/// Trait for types that support `resize_with(len, f)`.
1461pub trait ResizableWith {
1462 type Item;
1463 fn resize_with<F: FnMut() -> Self::Item>(&mut self, new_len: usize, f: F);
1464}
1465
1466impl<V> ResizableWith for Vec<V> {
1467 type Item = V;
1468 fn resize_with<F: FnMut() -> V>(&mut self, new_len: usize, f: F) {
1469 Vec::resize_with(self, new_len, f)
1470 }
1471}
1472
1473impl<T: AsMutPtr + ResizableWith> DisjointMut<T> {
1474 pub fn resize_with<F>(&mut self, new_len: usize, f: F)
1475 where
1476 F: FnMut() -> T::Item,
1477 T: ResizableWith,
1478 {
1479 self.inner.get_mut().resize_with(new_len, f)
1480 }
1481}
1482
1483// =============================================================================
1484// AsMutPtr implementations for standard types
1485// =============================================================================
1486
1487/// SAFETY: We only create `&Vec<V>` (shared reference), never `&mut Vec<V>`.
1488/// This is critical for Stacked Borrows: `&mut Vec` creates a retag-write
1489/// (Unique) on the Vec struct allocation, which conflicts with concurrent
1490/// `&Vec` reads of `len`/`as_ptr` from other threads. Using only shared
1491/// references avoids this data race.
1492///
1493/// The returned `*mut V` pointer retains write provenance from the original
1494/// allocator, not from the reference we read it through. The `UnsafeCell`
1495/// wrapper in `DisjointMut` provides the permission for concurrent writes
1496/// to the heap data.
1497unsafe impl<V: Copy> AsMutPtr for Vec<V> {
1498 type Target = V;
1499
1500 unsafe fn as_mut_slice(ptr: *mut Self) -> *mut [Self::Target] {
1501 // SAFETY: Only creates &Vec (SharedReadOnly). The data pointer value
1502 // stored inside Vec retains its original allocator provenance.
1503 let vec_ref = unsafe { &*ptr };
1504 ptr::slice_from_raw_parts_mut(vec_ref.as_ptr().cast_mut(), vec_ref.len())
1505 }
1506
1507 unsafe fn as_mut_ptr(ptr: *mut Self) -> *mut Self::Target {
1508 // SAFETY: Only creates &Vec (SharedReadOnly), not &mut Vec.
1509 unsafe { (*ptr).as_ptr().cast_mut() }
1510 }
1511
1512 fn len(&self) -> usize {
1513 self.len()
1514 }
1515}
1516
1517/// SAFETY: Pure pointer operations only — no references created.
1518/// The array data is inline (same allocation as the UnsafeCell), so we
1519/// must not create `&[V; N]` or `&[V]` which would conflict with guards.
1520/// Length is the compile-time constant `N`.
1521unsafe impl<V: Copy, const N: usize> AsMutPtr for [V; N] {
1522 type Target = V;
1523
1524 unsafe fn as_mut_slice(ptr: *mut Self) -> *mut [Self::Target] {
1525 ptr::slice_from_raw_parts_mut(ptr.cast::<V>(), N)
1526 }
1527
1528 unsafe fn as_mut_ptr(ptr: *mut Self) -> *mut V {
1529 ptr.cast()
1530 }
1531
1532 fn len(&self) -> usize {
1533 N
1534 }
1535}
1536
1537/// SAFETY: Pure pointer operations only — no references created.
1538/// Like arrays, the slice data IS the allocation, so `&[V]` would conflict.
1539unsafe impl<V: Copy> AsMutPtr for [V] {
1540 type Target = V;
1541
1542 unsafe fn as_mut_slice(ptr: *mut Self) -> *mut [Self::Target] {
1543 // *mut [V] is already the right type — just pass it through.
1544 ptr
1545 }
1546
1547 unsafe fn as_mut_ptr(ptr: *mut Self) -> *mut Self::Target {
1548 ptr.cast()
1549 }
1550
1551 fn len(&self) -> usize {
1552 self.len()
1553 }
1554}
1555
1556/// SAFETY: Uses `addr_of_mut!` to obtain `*mut [V]` through the raw pointer
1557/// chain `*mut Box<[V]>` → `*mut [V]` without creating `&[V]` or `&mut [V]`.
1558/// Box deref through a raw-pointer-derived place is a compiler built-in
1559/// operation that does not create intermediate references.
1560///
1561/// This is critical for Stacked Borrows: creating `&[V]` to the heap would
1562/// conflict with concurrent `&mut [V]` guards from other threads.
1563unsafe impl<V: Copy> AsMutPtr for Box<[V]> {
1564 type Target = V;
1565
1566 unsafe fn as_mut_slice(ptr: *mut Self) -> *mut [Self::Target] {
1567 // SAFETY: addr_of_mut! through raw pointer chain — no &[V] created.
1568 // Box deref from a raw-pointer place is a compiler intrinsic that
1569 // follows the Box's internal pointer without creating references.
1570 unsafe { addr_of_mut!(**ptr) }
1571 }
1572
1573 unsafe fn as_mut_ptr(ptr: *mut Self) -> *mut Self::Target {
1574 // SAFETY: Same raw pointer chain as as_mut_slice, then cast to thin pointer.
1575 unsafe { addr_of_mut!(**ptr) }.cast()
1576 }
1577
1578 fn len(&self) -> usize {
1579 (**self).len()
1580 }
1581}
1582
1583// =============================================================================
1584// DisjointMutSlice and DisjointMutArcSlice
1585// =============================================================================
1586
1587/// `DisjointMut` always has tracking fields, so we use `Box<[T]>` as the
1588/// backing store for slice-based DisjointMut instances.
1589pub type DisjointMutSlice<T> = DisjointMut<Box<[T]>>;
1590
1591/// A wrapper around an [`Arc`] of a [`DisjointMut`] slice.
1592/// An `Arc<[_]>` can be created, but adding a [`DisjointMut`] in between
1593/// requires boxing since `DisjointMut` has tracking fields.
1594#[derive(Clone)]
1595pub struct DisjointMutArcSlice<T: Copy> {
1596 /// Use `Deref` instead: `arc_slice.index_mut(0..50)` works directly.
1597 #[doc(hidden)]
1598 pub inner: Arc<DisjointMutSlice<T>>,
1599}
1600
1601impl<T: Copy> Deref for DisjointMutArcSlice<T> {
1602 type Target = DisjointMutSlice<T>;
1603
1604 #[inline]
1605 fn deref(&self) -> &DisjointMutSlice<T> {
1606 &self.inner
1607 }
1608}
1609
1610impl<T: Copy> DisjointMutArcSlice<T> {
1611 /// Create a new `DisjointMutArcSlice` with `n` elements, all set to `value`.
1612 ///
1613 /// Returns `Err` on allocation failure instead of panicking.
1614 pub fn try_new(n: usize, value: T) -> Result<Self, alloc::collections::TryReserveError> {
1615 let mut v = Vec::new();
1616 v.try_reserve(n)?;
1617 v.resize(n, value);
1618 Ok(Self {
1619 inner: Arc::new(DisjointMut::new(v.into_boxed_slice())),
1620 })
1621 }
1622
1623 /// Like [`try_new`](Self::try_new) but without borrow tracking.
1624 ///
1625 /// # Safety
1626 ///
1627 /// See [`DisjointMut::dangerously_unchecked()`].
1628 pub unsafe fn try_new_unchecked(
1629 n: usize,
1630 value: T,
1631 ) -> Result<Self, alloc::collections::TryReserveError> {
1632 let mut v = Vec::new();
1633 v.try_reserve(n)?;
1634 v.resize(n, value);
1635 Ok(Self {
1636 inner: Arc::new(unsafe { DisjointMut::dangerously_unchecked(v.into_boxed_slice()) }),
1637 })
1638 }
1639}
1640
1641impl<T: Copy> FromIterator<T> for DisjointMutArcSlice<T> {
1642 fn from_iter<I: IntoIterator<Item = T>>(iter: I) -> Self {
1643 let box_slice = iter.into_iter().collect::<Box<[_]>>();
1644 Self {
1645 inner: Arc::new(DisjointMut::new(box_slice)),
1646 }
1647 }
1648}
1649
1650impl<T: Copy> Default for DisjointMutArcSlice<T> {
1651 fn default() -> Self {
1652 [].into_iter().collect()
1653 }
1654}
1655
1656// =============================================================================
1657// StridedBuf: raw buffer for use in DisjointMut without external unsafe
1658// =============================================================================
1659
1660#[cfg(feature = "pic-buf")]
1661mod pic_buf {
1662 use super::*;
1663
1664 /// An owned byte buffer for use in [`DisjointMut`].
1665 ///
1666 /// Stores a `Vec<u8>` with an alignment offset and usable length.
1667 /// The `Vec` owns the heap allocation, so `PicBuf` is automatically
1668 /// `Send + Sync` (no raw pointers stored, no manual impls needed).
1669 ///
1670 /// For picture data: created via [`from_vec_aligned`](Self::from_vec_aligned)
1671 /// with pool-allocated Vecs.
1672 ///
1673 /// For scratch buffers: created via [`from_slice_copy`](Self::from_slice_copy)
1674 /// which copies the data into an owned Vec.
1675 #[derive(Default)]
1676 pub struct PicBuf {
1677 buf: Vec<u8>,
1678 /// Byte offset from start of Vec to first usable byte (for alignment).
1679 align_offset: usize,
1680 /// Number of usable bytes starting from `align_offset`.
1681 usable_len: usize,
1682 }
1683
1684 // No manual Send/Sync needed — Vec<u8> and usize are both Send + Sync.
1685
1686 impl PicBuf {
1687 /// Create an owned buffer from a Vec with alignment offset.
1688 ///
1689 /// Takes ownership of the Vec. Computes the alignment offset from the
1690 /// Vec's data pointer so that the usable region starts at the next
1691 /// `alignment`-byte boundary.
1692 ///
1693 /// # Panics
1694 ///
1695 /// Panics if `align_offset + usable_len > vec.len()`.
1696 pub fn from_vec_aligned(vec: Vec<u8>, alignment: usize, usable_len: usize) -> Self {
1697 if usable_len == 0 {
1698 return Self {
1699 buf: vec,
1700 align_offset: 0,
1701 usable_len: 0,
1702 };
1703 }
1704 let align_offset = vec.as_ptr().align_offset(alignment);
1705 assert!(
1706 align_offset + usable_len <= vec.len(),
1707 "PicBuf: aligned region ({} + {}) exceeds Vec length ({})",
1708 align_offset,
1709 usable_len,
1710 vec.len()
1711 );
1712 Self {
1713 buf: vec,
1714 align_offset,
1715 usable_len,
1716 }
1717 }
1718
1719 /// Create an owned buffer by copying a byte slice.
1720 ///
1721 /// Used for scratch buffers: the data is copied into an owned Vec,
1722 /// so no raw pointers or lifetime concerns.
1723 pub fn from_slice_copy(data: &[u8]) -> Self {
1724 let vec = data.to_vec();
1725 let len = vec.len();
1726 Self {
1727 buf: vec,
1728 align_offset: 0,
1729 usable_len: len,
1730 }
1731 }
1732
1733 /// Access the usable byte region as a slice.
1734 ///
1735 /// Used to copy data back from a scratch-dst component after writing.
1736 pub fn as_usable_bytes(&self) -> &[u8] {
1737 &self.buf[self.align_offset..self.align_offset + self.usable_len]
1738 }
1739
1740 /// Take the owned Vec out of this buffer.
1741 ///
1742 /// After this call, the buffer is left in a default (empty) state.
1743 /// Used by picture data to return buffers to a memory pool on drop.
1744 pub fn take_buf(&mut self) -> Option<Vec<u8>> {
1745 if self.buf.is_empty() && self.usable_len == 0 {
1746 None
1747 } else {
1748 self.usable_len = 0;
1749 self.align_offset = 0;
1750 Some(core::mem::take(&mut self.buf))
1751 }
1752 }
1753 }
1754
1755 /// SAFETY: Pointer is derived from the Vec's heap allocation through a shared
1756 /// reference (`&PicBuf` → `&Vec<u8>` → `as_ptr()`). This avoids creating
1757 /// `&mut Vec` (which would cause a Unique retag under Stacked Borrows).
1758 /// The heap data is a separate allocation from the Vec header, so the
1759 /// SharedReadOnly tag on the PicBuf struct does not cover the heap bytes.
1760 unsafe impl AsMutPtr for PicBuf {
1761 type Target = u8;
1762
1763 unsafe fn as_mut_ptr(ptr: *mut Self) -> *mut u8 {
1764 // SAFETY: SharedReadOnly reference to PicBuf. Reading Vec::as_ptr()
1765 // only touches the Vec header (ptr, len, cap), not the heap data.
1766 let this = unsafe { &*ptr };
1767 this.buf.as_ptr().wrapping_add(this.align_offset).cast_mut()
1768 }
1769
1770 unsafe fn as_mut_slice(ptr: *mut Self) -> *mut [u8] {
1771 let this = unsafe { &*ptr };
1772 let data_ptr = this.buf.as_ptr().wrapping_add(this.align_offset).cast_mut();
1773 core::ptr::slice_from_raw_parts_mut(data_ptr, this.usable_len)
1774 }
1775
1776 fn len(&self) -> usize {
1777 self.usable_len
1778 }
1779 }
1780
1781 impl sealed::Sealed for PicBuf {}
1782}
1783
1784#[cfg(feature = "pic-buf")]
1785#[doc(hidden)]
1786pub use pic_buf::PicBuf;
1787
1788// =============================================================================
1789// Tests
1790// =============================================================================
1791
1792#[test]
1793fn test_overlapping_immut() {
1794 let mut v: DisjointMut<Vec<u8>> = Default::default();
1795 v.resize(10, 0u8);
1796
1797 let guard1 = v.index(0..5);
1798 let guard2 = v.index(2..);
1799
1800 assert_eq!(guard1[2], guard2[0]);
1801}
1802
1803#[test]
1804#[should_panic]
1805fn test_overlapping_mut() {
1806 let mut v: DisjointMut<Vec<u8>> = Default::default();
1807 v.resize(10, 0u8);
1808
1809 let guard1 = v.index(0..5);
1810 let mut guard2 = v.index_mut(2..);
1811
1812 guard2[0] = 42;
1813 assert_eq!(guard1[2], 42);
1814}
1815
1816#[test]
1817fn test_range_overlap() {
1818 fn overlaps(a: impl Into<Bounds>, b: impl Into<Bounds>) -> bool {
1819 let a = a.into();
1820 let b = b.into();
1821 a.overlaps(&b)
1822 }
1823
1824 // Range overlap.
1825 assert!(overlaps(5..7, 4..10));
1826 assert!(overlaps(4..10, 5..7));
1827
1828 // RangeFrom overlap.
1829 assert!(overlaps(5.., 4..10));
1830 assert!(overlaps(4..10, 5..));
1831
1832 // RangeTo overlap.
1833 assert!(overlaps(..7, 4..10));
1834 assert!(overlaps(4..10, ..7));
1835
1836 // RangeInclusive overlap.
1837 assert!(overlaps(5..=7, 7..10));
1838 assert!(overlaps(7..10, 5..=7));
1839
1840 // RangeToInclusive overlap.
1841 assert!(overlaps(..=7, 7..10));
1842 assert!(overlaps(7..10, ..=7));
1843
1844 // Range no overlap.
1845 assert!(!overlaps(5..7, 10..20));
1846 assert!(!overlaps(10..20, 5..7));
1847
1848 // RangeFrom no overlap.
1849 assert!(!overlaps(15.., 4..10));
1850 assert!(!overlaps(4..10, 15..));
1851
1852 // RangeTo no overlap.
1853 assert!(!overlaps(..7, 10..20));
1854 assert!(!overlaps(10..20, ..7));
1855
1856 // RangeInclusive no overlap.
1857 assert!(!overlaps(5..=7, 8..10));
1858 assert!(!overlaps(8..10, 5..=7));
1859
1860 // RangeToInclusive no overlap.
1861 assert!(!overlaps(..=7, 8..10));
1862 assert!(!overlaps(8..10, ..=7));
1863}
1864
1865#[test]
1866fn test_dangerously_unchecked_skips_tracking() {
1867 use alloc::vec;
1868 // dangerously_unchecked creates an instance without borrow tracking.
1869 // Overlapping borrows don't panic (but would be UB in real code).
1870 let v = unsafe { DisjointMut::dangerously_unchecked(vec![0u8; 100]) };
1871 assert!(!v.is_checked());
1872
1873 // This would panic on a tracked instance, but succeeds here:
1874 let _g1 = v.index_mut(0..50);
1875 let _g2 = v.index_mut(25..75); // overlaps with g1 — only safe because this is a test
1876}
1877
1878#[test]
1879fn test_new_always_tracked() {
1880 use alloc::vec;
1881 let v = DisjointMut::new(vec![0u8; 100]);
1882 assert!(v.is_checked());
1883}
1884
1885#[test]
1886fn test_range_inclusive_mul() {
1887 // 3..=5 with by=4: elements 3,4,5 → bytes 12,13,...,23
1888 let r = (3..=5usize).mul(4);
1889 assert_eq!(*r.start(), 12);
1890 assert_eq!(*r.end(), 23);
1891
1892 // 0..=0 with by=4: element 0 → bytes 0,1,2,3
1893 let r = (0..=0usize).mul(4);
1894 assert_eq!(*r.start(), 0);
1895 assert_eq!(*r.end(), 3);
1896
1897 // 0..=2 with by=1: elements 0,1,2 → bytes 0,1,2
1898 let r = (0..=2usize).mul(1);
1899 assert_eq!(*r.start(), 0);
1900 assert_eq!(*r.end(), 2);
1901}
1902
1903#[test]
1904fn test_range_to_inclusive_mul() {
1905 // ..=5 with by=4: elements 0..=5 → bytes 0..=23
1906 let r = (..=5usize).mul(4);
1907 assert_eq!(r.end, 23);
1908
1909 // ..=0 with by=4: element 0 → bytes 0..=3
1910 let r = (..=0usize).mul(4);
1911 assert_eq!(r.end, 3);
1912}
1913
1914// NOTE: Tests for aligned/aligned-vec integration are in align.rs