biome_rowan/
arc.rs

1//! Vendored and stripped down version of triomphe
2use std::{
3    alloc::{self, Layout},
4    cmp::Ordering,
5    hash::{Hash, Hasher},
6    marker::PhantomData,
7    mem::{self, offset_of, ManuallyDrop},
8    ops::Deref,
9    ptr::{self, NonNull},
10    sync::atomic::{
11        self,
12        Ordering::{Acquire, Relaxed, Release},
13    },
14};
15
16/// A soft limit on the amount of references that may be made to an `Arc`.
17///
18/// Going above this limit will abort your program (although not
19/// necessarily) at _exactly_ `MAX_REFCOUNT + 1` references.
20const MAX_REFCOUNT: usize = (isize::MAX) as usize;
21
22/// The object allocated by an Arc<T>
23#[repr(C)]
24pub(crate) struct ArcInner<T: ?Sized> {
25    pub(crate) count: atomic::AtomicUsize,
26    pub(crate) data: T,
27}
28
29unsafe impl<T: ?Sized + Sync + Send> Send for ArcInner<T> {}
30unsafe impl<T: ?Sized + Sync + Send> Sync for ArcInner<T> {}
31
32/// An atomically reference counted shared pointer
33///
34/// See the documentation for [`Arc`] in the standard library. Unlike the
35/// standard library `Arc`, this `Arc` does not support weak reference counting.
36///
37/// [`Arc`]: https://doc.rust-lang.org/stable/std/sync/struct.Arc.html
38#[repr(transparent)]
39pub(crate) struct Arc<T: ?Sized> {
40    pub(crate) p: ptr::NonNull<ArcInner<T>>,
41    pub(crate) phantom: PhantomData<T>,
42}
43
44unsafe impl<T: ?Sized + Sync + Send> Send for Arc<T> {}
45unsafe impl<T: ?Sized + Sync + Send> Sync for Arc<T> {}
46
47impl<T> Arc<T> {
48    /// Reconstruct the Arc<T> from a raw pointer obtained from into_raw()
49    ///
50    /// Note: This raw pointer will be offset in the allocation and must be preceded
51    /// by the atomic count.
52    ///
53    /// It is recommended to use OffsetArc for this
54    #[inline]
55    pub(crate) unsafe fn from_raw(ptr: *const T) -> Self {
56        // To find the corresponding pointer to the `ArcInner` we need
57        // to subtract the offset of the `data` field from the pointer.
58        let ptr = (ptr as *const u8).sub(offset_of!(ArcInner<T>, data));
59        Arc {
60            p: ptr::NonNull::new_unchecked(ptr as *mut ArcInner<T>),
61            phantom: PhantomData,
62        }
63    }
64}
65
66impl<T: ?Sized> Arc<T> {
67    #[inline]
68    fn inner(&self) -> &ArcInner<T> {
69        // This unsafety is ok because while this arc is alive we're guaranteed
70        // that the inner pointer is valid. Furthermore, we know that the
71        // `ArcInner` structure itself is `Sync` because the inner data is
72        // `Sync` as well, so we're ok loaning out an immutable pointer to these
73        // contents.
74        unsafe { &*self.ptr() }
75    }
76
77    // Non-inlined part of `drop`. Just invokes the destructor.
78    #[inline(never)]
79    unsafe fn drop_slow(&mut self) {
80        let _ = Box::from_raw(self.ptr());
81    }
82
83    /// Test pointer equality between the two Arcs, i.e. they must be the _same_
84    /// allocation
85    #[inline]
86    pub(crate) fn ptr_eq(this: &Self, other: &Self) -> bool {
87        std::ptr::addr_eq(this.ptr(), other.ptr())
88    }
89
90    pub(crate) fn ptr(&self) -> *mut ArcInner<T> {
91        self.p.as_ptr()
92    }
93
94    #[inline]
95    pub(crate) fn into_raw(self) -> NonNull<T> {
96        let ptr = NonNull::from(&self.inner().data);
97        mem::forget(self);
98        ptr
99    }
100}
101
102impl<T: ?Sized> Clone for Arc<T> {
103    #[inline]
104    fn clone(&self) -> Self {
105        // Using a relaxed ordering is alright here, as knowledge of the
106        // original reference prevents other threads from erroneously deleting
107        // the object.
108        //
109        // As explained in the [Boost documentation][1], Increasing the
110        // reference counter can always be done with memory_order_relaxed: New
111        // references to an object can only be formed from an existing
112        // reference, and passing an existing reference from one thread to
113        // another must already provide any required synchronization.
114        //
115        // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
116        let old_size = self.inner().count.fetch_add(1, Relaxed);
117
118        // However we need to guard against massive refcounts in case someone
119        // is `mem::forget`ing Arcs. If we don't do this the count can overflow
120        // and users will use-after free. We racily saturate to `isize::MAX` on
121        // the assumption that there aren't ~2 billion threads incrementing
122        // the reference count at once. This branch will never be taken in
123        // any realistic program.
124        //
125        // We abort because such a program is incredibly degenerate, and we
126        // don't care to support it.
127        if old_size > MAX_REFCOUNT {
128            std::process::abort();
129        }
130
131        unsafe {
132            Arc {
133                p: ptr::NonNull::new_unchecked(self.ptr()),
134                phantom: PhantomData,
135            }
136        }
137    }
138}
139
140impl<T: ?Sized> Deref for Arc<T> {
141    type Target = T;
142
143    #[inline]
144    fn deref(&self) -> &T {
145        &self.inner().data
146    }
147}
148
149impl<T: ?Sized> Arc<T> {
150    /// Provides mutable access to the contents _if_ the `Arc` is uniquely owned.
151    #[inline]
152    pub(crate) fn get_mut(this: &mut Self) -> Option<&mut T> {
153        if this.is_unique() {
154            unsafe {
155                // See make_mut() for documentation of the threadsafety here.
156                Some(&mut (*this.ptr()).data)
157            }
158        } else {
159            None
160        }
161    }
162
163    /// Whether or not the `Arc` is uniquely owned (is the refcount 1?).
164    pub(crate) fn is_unique(&self) -> bool {
165        // See the extensive discussion in [1] for why this needs to be Acquire.
166        //
167        // [1] https://github.com/servo/servo/issues/21186
168        self.inner().count.load(Acquire) == 1
169    }
170}
171
172impl<T: ?Sized> Drop for Arc<T> {
173    #[inline]
174    fn drop(&mut self) {
175        // Because `fetch_sub` is already atomic, we do not need to synchronize
176        // with other threads unless we are going to delete the object.
177        if self.inner().count.fetch_sub(1, Release) != 1 {
178            return;
179        }
180
181        // FIXME(bholley): Use the updated comment when [2] is merged.
182        //
183        // This load is needed to prevent reordering of use of the data and
184        // deletion of the data.  Because it is marked `Release`, the decreasing
185        // of the reference count synchronizes with this `Acquire` load. This
186        // means that use of the data happens before decreasing the reference
187        // count, which happens before this load, which happens before the
188        // deletion of the data.
189        //
190        // As explained in the [Boost documentation][1],
191        //
192        // > It is important to enforce any possible access to the object in one
193        // > thread (through an existing reference) to *happen before* deleting
194        // > the object in a different thread. This is achieved by a "release"
195        // > operation after dropping a reference (any access to the object
196        // > through this reference must obviously happened before), and an
197        // > "acquire" operation before deleting the object.
198        //
199        // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
200        // [2]: https://github.com/rust-lang/rust/pull/41714
201        self.inner().count.load(Acquire);
202
203        unsafe {
204            self.drop_slow();
205        }
206    }
207}
208
209impl<T: ?Sized + PartialEq> PartialEq for Arc<T> {
210    fn eq(&self, other: &Arc<T>) -> bool {
211        Self::ptr_eq(self, other) || *(*self) == *(*other)
212    }
213}
214
215impl<T: ?Sized + PartialOrd> PartialOrd for Arc<T> {
216    fn partial_cmp(&self, other: &Arc<T>) -> Option<Ordering> {
217        (**self).partial_cmp(&**other)
218    }
219
220    fn lt(&self, other: &Arc<T>) -> bool {
221        *(*self) < *(*other)
222    }
223
224    fn le(&self, other: &Arc<T>) -> bool {
225        *(*self) <= *(*other)
226    }
227
228    fn gt(&self, other: &Arc<T>) -> bool {
229        *(*self) > *(*other)
230    }
231
232    fn ge(&self, other: &Arc<T>) -> bool {
233        *(*self) >= *(*other)
234    }
235}
236
237impl<T: ?Sized + Ord> Ord for Arc<T> {
238    fn cmp(&self, other: &Arc<T>) -> Ordering {
239        (**self).cmp(&**other)
240    }
241}
242
243impl<T: ?Sized + Eq> Eq for Arc<T> {}
244
245impl<T: ?Sized + Hash> Hash for Arc<T> {
246    fn hash<H: Hasher>(&self, state: &mut H) {
247        (**self).hash(state)
248    }
249}
250
251#[derive(Debug, Eq, PartialEq, Hash, PartialOrd)]
252#[repr(C)]
253pub(crate) struct HeaderSlice<H, T: ?Sized> {
254    pub(crate) header: H,
255    length: usize,
256    slice: T,
257}
258
259impl<H, T> HeaderSlice<H, [T]> {
260    pub(crate) fn slice(&self) -> &[T] {
261        &self.slice
262    }
263
264    /// Returns the number of items
265    pub(crate) fn len(&self) -> usize {
266        self.length
267    }
268}
269
270impl<H, T> Deref for HeaderSlice<H, [T; 0]> {
271    type Target = HeaderSlice<H, [T]>;
272
273    fn deref(&self) -> &Self::Target {
274        unsafe {
275            let len = self.length;
276            let fake_slice: *const [T] =
277                ptr::slice_from_raw_parts(self as *const _ as *const T, len);
278            &*(fake_slice as *const HeaderSlice<H, [T]>)
279        }
280    }
281}
282
283/// A "thin" `Arc` containing dynamically sized data
284///
285/// This is functionally equivalent to `Arc<(H, [T])>`
286///
287/// When you create an `Arc` containing a dynamically sized type
288/// like `HeaderSlice<H, [T]>`, the `Arc` is represented on the stack
289/// as a "fat pointer", where the length of the slice is stored
290/// alongside the `Arc`'s pointer. In some situations you may wish to
291/// have a thin pointer instead, perhaps for FFI compatibility
292/// or space efficiency.
293///
294/// Note that we use `[T; 0]` in order to have the right alignment for `T`.
295///
296/// `ThinArc` solves this by storing the length in the allocation itself,
297/// via `HeaderSlice`.
298#[repr(transparent)]
299pub(crate) struct ThinArc<H, T> {
300    ptr: ptr::NonNull<ArcInner<HeaderSlice<H, [T; 0]>>>,
301    phantom: PhantomData<(H, T)>,
302}
303
304unsafe impl<H: Sync + Send, T: Sync + Send> Send for ThinArc<H, T> {}
305unsafe impl<H: Sync + Send, T: Sync + Send> Sync for ThinArc<H, T> {}
306
307// Synthesize a fat pointer from a thin pointer.
308fn thin_to_thick<H, T>(
309    thin: *mut ArcInner<HeaderSlice<H, [T; 0]>>,
310) -> *mut ArcInner<HeaderSlice<H, [T]>> {
311    let len = unsafe { (*thin).data.length };
312    let fake_slice: *mut [T] = ptr::slice_from_raw_parts_mut(thin as *mut T, len);
313    // Transplants metadata.
314    fake_slice as *mut ArcInner<HeaderSlice<H, [T]>>
315}
316
317impl<H, T> ThinArc<H, T> {
318    /// Temporarily converts |self| into a bonafide Arc and exposes it to the
319    /// provided callback. The refcount is not modified.
320    #[inline]
321    pub(crate) fn with_arc<F, U>(&self, f: F) -> U
322    where
323        F: FnOnce(&Arc<HeaderSlice<H, [T]>>) -> U,
324    {
325        // Synthesize transient Arc, which never touches the refcount of the ArcInner.
326        let transient = unsafe {
327            ManuallyDrop::new(Arc {
328                p: ptr::NonNull::new_unchecked(thin_to_thick(self.ptr.as_ptr())),
329                phantom: PhantomData,
330            })
331        };
332
333        // Expose the transient Arc to the callback, which may clone it if it wants.
334        // Forward the result.
335        f(&transient)
336    }
337
338    /// Creates a `ThinArc` for a HeaderSlice using the given header struct and
339    /// iterator to generate the slice.
340    pub(crate) fn from_header_and_iter<I>(header: H, mut items: I) -> Self
341    where
342        I: Iterator<Item = T> + ExactSizeIterator,
343    {
344        assert_ne!(mem::size_of::<T>(), 0, "Need to think about ZST");
345
346        let num_items = items.len();
347
348        // Offset of the start of the slice in the allocation.
349        let inner_to_data_offset = offset_of!(ArcInner<HeaderSlice<H, [T; 0]>>, data);
350        let data_to_slice_offset = offset_of!(HeaderSlice<H, [T; 0]>, slice);
351        let slice_offset = inner_to_data_offset + data_to_slice_offset;
352
353        // Compute the size of the real payload.
354        let slice_size = mem::size_of::<T>()
355            .checked_mul(num_items)
356            .expect("size overflows");
357        let usable_size = slice_offset
358            .checked_add(slice_size)
359            .expect("size overflows");
360
361        // Round up size to alignment.
362        let align = mem::align_of::<ArcInner<HeaderSlice<H, [T; 0]>>>();
363        let size = usable_size.wrapping_add(align - 1) & !(align - 1);
364        assert!(size >= usable_size, "size overflows");
365        let layout = Layout::from_size_align(size, align).expect("invalid layout");
366
367        let ptr: *mut ArcInner<HeaderSlice<H, [T; 0]>>;
368        unsafe {
369            let buffer = alloc::alloc(layout);
370
371            if buffer.is_null() {
372                alloc::handle_alloc_error(layout);
373            }
374
375            // // Synthesize the fat pointer. We do this by claiming we have a direct
376            // // pointer to a [T], and then changing the type of the borrow. The key
377            // // point here is that the length portion of the fat pointer applies
378            // // only to the number of elements in the dynamically-sized portion of
379            // // the type, so the value will be the same whether it points to a [T]
380            // // or something else with a [T] as its last member.
381            // let fake_slice: &mut [T] = slice::from_raw_parts_mut(buffer as *mut T, num_items);
382            // ptr = fake_slice as *mut [T] as *mut ArcInner<HeaderSlice<H, [T]>>;
383            ptr = buffer as *mut _;
384
385            let count = atomic::AtomicUsize::new(1);
386
387            // Write the data.
388            //
389            // Note that any panics here (i.e. from the iterator) are safe, since
390            // we'll just leak the uninitialized memory.
391            ptr::write(ptr::addr_of_mut!((*ptr).count), count);
392            ptr::write(ptr::addr_of_mut!((*ptr).data.header), header);
393            ptr::write(ptr::addr_of_mut!((*ptr).data.length), num_items);
394            if num_items != 0 {
395                let mut current = ptr::addr_of_mut!((*ptr).data.slice) as *mut T;
396                debug_assert_eq!(current as usize - buffer as usize, slice_offset);
397                for _ in 0..num_items {
398                    ptr::write(
399                        current,
400                        items
401                            .next()
402                            .expect("ExactSizeIterator over-reported length"),
403                    );
404                    current = current.offset(1);
405                }
406                assert!(
407                    items.next().is_none(),
408                    "ExactSizeIterator under-reported length"
409                );
410
411                // We should have consumed the buffer exactly.
412                debug_assert_eq!(current as *mut u8, buffer.add(usable_size));
413            }
414            assert!(
415                items.next().is_none(),
416                "ExactSizeIterator under-reported length"
417            );
418        }
419
420        ThinArc {
421            ptr: unsafe { ptr::NonNull::new_unchecked(ptr) },
422            phantom: PhantomData,
423        }
424    }
425}
426
427impl<H, T> Deref for ThinArc<H, T> {
428    type Target = HeaderSlice<H, [T]>;
429
430    #[inline]
431    fn deref(&self) -> &Self::Target {
432        unsafe { &(*thin_to_thick(self.ptr.as_ptr())).data }
433    }
434}
435
436impl<H, T> Clone for ThinArc<H, T> {
437    #[inline]
438    fn clone(&self) -> Self {
439        ThinArc::with_arc(self, |a| Arc::into_thin(a.clone()))
440    }
441}
442
443impl<H, T> Drop for ThinArc<H, T> {
444    #[inline]
445    fn drop(&mut self) {
446        let _ = Arc::from_thin(ThinArc {
447            ptr: self.ptr,
448            phantom: PhantomData,
449        });
450    }
451}
452
453impl<H, T> Arc<HeaderSlice<H, [T]>> {
454    /// Converts an `Arc` into a `ThinArc`. This consumes the `Arc`, so the refcount
455    /// is not modified.
456    #[inline]
457    pub(crate) fn into_thin(a: Self) -> ThinArc<H, T> {
458        assert_eq!(
459            a.length,
460            a.slice.len(),
461            "Length needs to be correct for ThinArc to work"
462        );
463        let fat_ptr: *mut ArcInner<HeaderSlice<H, [T]>> = a.ptr();
464        mem::forget(a);
465        let thin_ptr = fat_ptr as *mut [usize] as *mut usize;
466        ThinArc {
467            ptr: unsafe {
468                ptr::NonNull::new_unchecked(thin_ptr as *mut ArcInner<HeaderSlice<H, [T; 0]>>)
469            },
470            phantom: PhantomData,
471        }
472    }
473
474    /// Converts a `ThinArc` into an `Arc`. This consumes the `ThinArc`, so the refcount
475    /// is not modified.
476    #[inline]
477    pub(crate) fn from_thin(a: ThinArc<H, T>) -> Self {
478        let ptr = thin_to_thick(a.ptr.as_ptr());
479        mem::forget(a);
480        unsafe {
481            Arc {
482                p: ptr::NonNull::new_unchecked(ptr),
483                phantom: PhantomData,
484            }
485        }
486    }
487}
488
489impl<H: PartialEq, T: PartialEq> PartialEq for ThinArc<H, T> {
490    #[inline]
491    fn eq(&self, other: &ThinArc<H, T>) -> bool {
492        **self == **other
493    }
494}
495
496impl<H: Eq, T: Eq> Eq for ThinArc<H, T> {}
497
498impl<H: Hash, T: Hash> Hash for ThinArc<H, T> {
499    fn hash<HSR: Hasher>(&self, state: &mut HSR) {
500        (**self).hash(state)
501    }
502}