servo_arc/
lib.rs

1// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2// file at the top-level directory of this distribution and at
3// http://rust-lang.org/COPYRIGHT.
4//
5// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8// option. This file may not be copied, modified, or distributed
9// except according to those terms.
10
11//! Fork of Arc for Servo. This has the following advantages over std::sync::Arc:
12//!
13//! * We don't waste storage on the weak reference count.
14//! * We don't do extra RMU operations to handle the possibility of weak references.
15//! * We can experiment with arena allocation (todo).
16//! * We can add methods to support our custom use cases [1].
17//! * We have support for dynamically-sized types (see from_header_and_iter).
18//! * We have support for thin arcs to unsized types (see ThinArc).
19//! * We have support for references to static data, which don't do any
20//!   refcounting.
21//!
22//! [1]: https://bugzilla.mozilla.org/show_bug.cgi?id=1360883
23
24// The semantics of `Arc` are already documented in the Rust docs, so we don't
25// duplicate those here.
26#![allow(missing_docs)]
27
28#[cfg(feature = "servo")]
29use serde::{Deserialize, Serialize};
30use stable_deref_trait::{CloneStableDeref, StableDeref};
31use std::alloc::{self, Layout};
32use std::borrow;
33use std::cmp::Ordering;
34use std::fmt;
35use std::hash::{Hash, Hasher};
36use std::marker::PhantomData;
37use std::mem::{self, align_of, size_of};
38use std::ops::{Deref, DerefMut};
39use std::os::raw::c_void;
40use std::process;
41use std::ptr;
42use std::sync::atomic;
43use std::sync::atomic::Ordering::{Acquire, Relaxed, Release};
44use std::{isize, usize};
45
46/// A soft limit on the amount of references that may be made to an `Arc`.
47///
48/// Going above this limit will abort your program (although not
49/// necessarily) at _exactly_ `MAX_REFCOUNT + 1` references.
50const MAX_REFCOUNT: usize = (isize::MAX) as usize;
51
52/// Special refcount value that means the data is not reference counted,
53/// and that the `Arc` is really acting as a read-only static reference.
54const STATIC_REFCOUNT: usize = usize::MAX;
55
56/// An atomically reference counted shared pointer
57///
58/// See the documentation for [`Arc`] in the standard library. Unlike the
59/// standard library `Arc`, this `Arc` does not support weak reference counting.
60///
61/// See the discussion in https://github.com/rust-lang/rust/pull/60594 for the
62/// usage of PhantomData.
63///
64/// [`Arc`]: https://doc.rust-lang.org/stable/std/sync/struct.Arc.html
65///
66/// cbindgen:derive-eq=false
67/// cbindgen:derive-neq=false
68#[repr(C)]
69pub struct Arc<T: ?Sized> {
70    p: ptr::NonNull<ArcInner<T>>,
71    phantom: PhantomData<T>,
72}
73
74/// An `Arc` that is known to be uniquely owned
75///
76/// When `Arc`s are constructed, they are known to be
77/// uniquely owned. In such a case it is safe to mutate
78/// the contents of the `Arc`. Normally, one would just handle
79/// this by mutating the data on the stack before allocating the
80/// `Arc`, however it's possible the data is large or unsized
81/// and you need to heap-allocate it earlier in such a way
82/// that it can be freely converted into a regular `Arc` once you're
83/// done.
84///
85/// `UniqueArc` exists for this purpose, when constructed it performs
86/// the same allocations necessary for an `Arc`, however it allows mutable access.
87/// Once the mutation is finished, you can call `.shareable()` and get a regular `Arc`
88/// out of it.
89///
90/// Ignore the doctest below there's no way to skip building with refcount
91/// logging during doc tests (see rust-lang/rust#45599).
92///
93/// ```rust,ignore
94/// # use servo_arc::UniqueArc;
95/// let data = [1, 2, 3, 4, 5];
96/// let mut x = UniqueArc::new(data);
97/// x[4] = 7; // mutate!
98/// let y = x.shareable(); // y is an Arc<T>
99/// ```
100pub struct UniqueArc<T: ?Sized>(Arc<T>);
101
102impl<T> UniqueArc<T> {
103    #[inline]
104    /// Construct a new UniqueArc
105    pub fn new(data: T) -> Self {
106        UniqueArc(Arc::new(data))
107    }
108
109    /// Construct an uninitialized arc
110    #[inline]
111    pub fn new_uninit() -> UniqueArc<mem::MaybeUninit<T>> {
112        unsafe {
113            let layout = Layout::new::<ArcInner<mem::MaybeUninit<T>>>();
114            let ptr = alloc::alloc(layout);
115            let mut p = ptr::NonNull::new(ptr)
116                .unwrap_or_else(|| alloc::handle_alloc_error(layout))
117                .cast::<ArcInner<mem::MaybeUninit<T>>>();
118            ptr::write(&mut p.as_mut().count, atomic::AtomicUsize::new(1));
119            #[cfg(feature = "track_alloc_size")]
120            ptr::write(&mut p.as_mut().alloc_size, layout.size());
121
122            #[cfg(feature = "gecko_refcount_logging")]
123            {
124                NS_LogCtor(p.as_ptr() as *mut _, b"ServoArc\0".as_ptr() as *const _, 8)
125            }
126
127            UniqueArc(Arc {
128                p,
129                phantom: PhantomData,
130            })
131        }
132    }
133
134    #[inline]
135    /// Convert to a shareable Arc<T> once we're done mutating it
136    pub fn shareable(self) -> Arc<T> {
137        self.0
138    }
139}
140
141impl<T> UniqueArc<mem::MaybeUninit<T>> {
142    /// Convert to an initialized Arc.
143    #[inline]
144    pub unsafe fn assume_init(this: Self) -> UniqueArc<T> {
145        UniqueArc(Arc {
146            p: mem::ManuallyDrop::new(this).0.p.cast(),
147            phantom: PhantomData,
148        })
149    }
150}
151
152impl<T> Deref for UniqueArc<T> {
153    type Target = T;
154    fn deref(&self) -> &T {
155        &*self.0
156    }
157}
158
159impl<T> DerefMut for UniqueArc<T> {
160    fn deref_mut(&mut self) -> &mut T {
161        // We know this to be uniquely owned
162        unsafe { &mut (*self.0.ptr()).data }
163    }
164}
165
166unsafe impl<T: ?Sized + Sync + Send> Send for Arc<T> {}
167unsafe impl<T: ?Sized + Sync + Send> Sync for Arc<T> {}
168
169/// The object allocated by an Arc<T>
170///
171/// See https://github.com/mozilla/cbindgen/issues/937 for the derive-{eq,neq}=false. But we don't
172/// use those anyways so we can just disable them.
173/// cbindgen:derive-eq=false
174/// cbindgen:derive-neq=false
175#[repr(C)]
176struct ArcInner<T: ?Sized> {
177    count: atomic::AtomicUsize,
178    // NOTE(emilio): This needs to be here so that HeaderSlice<> is deallocated properly if the
179    // allocator relies on getting the right Layout. We don't need to track the right alignment,
180    // since we know that statically.
181    //
182    // This member could be completely avoided once min_specialization feature is stable (by
183    // implementing a trait for HeaderSlice that gives you the right layout). For now, servo-only
184    // since Gecko doesn't need it (its allocator doesn't need the size for the alignments we care
185    // about). See https://github.com/rust-lang/rust/issues/31844.
186    #[cfg(feature = "track_alloc_size")]
187    alloc_size: usize,
188    data: T,
189}
190
191unsafe impl<T: ?Sized + Sync + Send> Send for ArcInner<T> {}
192unsafe impl<T: ?Sized + Sync + Send> Sync for ArcInner<T> {}
193
194/// Computes the offset of the data field within ArcInner.
195fn data_offset<T>() -> usize {
196    let size = size_of::<ArcInner<()>>();
197    let align = align_of::<T>();
198    // https://github.com/rust-lang/rust/blob/1.36.0/src/libcore/alloc.rs#L187-L207
199    size.wrapping_add(align).wrapping_sub(1) & !align.wrapping_sub(1)
200}
201
202impl<T> Arc<T> {
203    /// Construct an `Arc<T>`
204    #[inline]
205    pub fn new(data: T) -> Self {
206        let layout = Layout::new::<ArcInner<T>>();
207        let p = unsafe {
208            let ptr = ptr::NonNull::new(alloc::alloc(layout))
209                .unwrap_or_else(|| alloc::handle_alloc_error(layout))
210                .cast::<ArcInner<T>>();
211            ptr::write(ptr.as_ptr(), ArcInner {
212                count: atomic::AtomicUsize::new(1),
213                #[cfg(feature = "track_alloc_size")]
214                alloc_size: layout.size(),
215                data,
216            });
217            ptr
218        };
219
220        #[cfg(feature = "gecko_refcount_logging")]
221        unsafe {
222            // FIXME(emilio): Would be so amazing to have
223            // std::intrinsics::type_name() around, so that we could also report
224            // a real size.
225            NS_LogCtor(p.as_ptr() as *mut _, b"ServoArc\0".as_ptr() as *const _, 8);
226        }
227
228        Arc {
229            p,
230            phantom: PhantomData,
231        }
232    }
233
234    /// Construct an intentionally-leaked arc.
235    #[inline]
236    pub fn new_leaked(data: T) -> Self {
237        let arc = Self::new(data);
238        arc.mark_as_intentionally_leaked();
239        arc
240    }
241
242    /// Convert the Arc<T> to a raw pointer, suitable for use across FFI
243    ///
244    /// Note: This returns a pointer to the data T, which is offset in the allocation.
245    #[inline]
246    pub fn into_raw(this: Self) -> *const T {
247        let ptr = unsafe { &((*this.ptr()).data) as *const _ };
248        mem::forget(this);
249        ptr
250    }
251
252    /// Reconstruct the Arc<T> from a raw pointer obtained from into_raw()
253    ///
254    /// Note: This raw pointer will be offset in the allocation and must be preceded
255    /// by the atomic count.
256    #[inline]
257    pub unsafe fn from_raw(ptr: *const T) -> Self {
258        // To find the corresponding pointer to the `ArcInner` we need
259        // to subtract the offset of the `data` field from the pointer.
260        let ptr = (ptr as *const u8).sub(data_offset::<T>());
261        Arc {
262            p: ptr::NonNull::new_unchecked(ptr as *mut ArcInner<T>),
263            phantom: PhantomData,
264        }
265    }
266
267    /// Like from_raw, but returns an addrefed arc instead.
268    #[inline]
269    pub unsafe fn from_raw_addrefed(ptr: *const T) -> Self {
270        let arc = Self::from_raw(ptr);
271        mem::forget(arc.clone());
272        arc
273    }
274
275    /// Create a new static Arc<T> (one that won't reference count the object)
276    /// and place it in the allocation provided by the specified `alloc`
277    /// function.
278    ///
279    /// `alloc` must return a pointer into a static allocation suitable for
280    /// storing data with the `Layout` passed into it. The pointer returned by
281    /// `alloc` will not be freed.
282    #[inline]
283    pub unsafe fn new_static<F>(alloc: F, data: T) -> Arc<T>
284    where
285        F: FnOnce(Layout) -> *mut u8,
286    {
287        let layout = Layout::new::<ArcInner<T>>();
288        let ptr = alloc(layout) as *mut ArcInner<T>;
289
290        let x = ArcInner {
291            count: atomic::AtomicUsize::new(STATIC_REFCOUNT),
292            #[cfg(feature = "track_alloc_size")]
293            alloc_size: layout.size(),
294            data,
295        };
296
297        ptr::write(ptr, x);
298
299        Arc {
300            p: ptr::NonNull::new_unchecked(ptr),
301            phantom: PhantomData,
302        }
303    }
304
305    /// Produce a pointer to the data that can be converted back
306    /// to an Arc. This is basically an `&Arc<T>`, without the extra indirection.
307    /// It has the benefits of an `&T` but also knows about the underlying refcount
308    /// and can be converted into more `Arc<T>`s if necessary.
309    #[inline]
310    pub fn borrow_arc<'a>(&'a self) -> ArcBorrow<'a, T> {
311        ArcBorrow(&**self)
312    }
313
314    /// Returns the address on the heap of the Arc itself -- not the T within it -- for memory
315    /// reporting.
316    ///
317    /// If this is a static reference, this returns null.
318    pub fn heap_ptr(&self) -> *const c_void {
319        if self.inner().count.load(Relaxed) == STATIC_REFCOUNT {
320            ptr::null()
321        } else {
322            self.p.as_ptr() as *const ArcInner<T> as *const c_void
323        }
324    }
325}
326
327impl<T: ?Sized> Arc<T> {
328    #[inline]
329    fn inner(&self) -> &ArcInner<T> {
330        // This unsafety is ok because while this arc is alive we're guaranteed
331        // that the inner pointer is valid. Furthermore, we know that the
332        // `ArcInner` structure itself is `Sync` because the inner data is
333        // `Sync` as well, so we're ok loaning out an immutable pointer to these
334        // contents.
335        unsafe { &*self.ptr() }
336    }
337
338    #[inline(always)]
339    fn record_drop(&self) {
340        #[cfg(feature = "gecko_refcount_logging")]
341        unsafe {
342            NS_LogDtor(self.ptr() as *mut _, b"ServoArc\0".as_ptr() as *const _, 8);
343        }
344    }
345
346    /// Marks this `Arc` as intentionally leaked for the purposes of refcount
347    /// logging.
348    ///
349    /// It's a logic error to call this more than once, but it's not unsafe, as
350    /// it'd just report negative leaks.
351    #[inline(always)]
352    pub fn mark_as_intentionally_leaked(&self) {
353        self.record_drop();
354    }
355
356    // Non-inlined part of `drop`. Just invokes the destructor and calls the
357    // refcount logging machinery if enabled.
358    #[inline(never)]
359    unsafe fn drop_slow(&mut self) {
360        self.record_drop();
361        let inner = self.ptr();
362
363        let layout = Layout::for_value(&*inner);
364        #[cfg(feature = "track_alloc_size")]
365        let layout = Layout::from_size_align_unchecked((*inner).alloc_size, layout.align());
366
367        std::ptr::drop_in_place(inner);
368        alloc::dealloc(inner as *mut _, layout);
369    }
370
371    /// Test pointer equality between the two Arcs, i.e. they must be the _same_
372    /// allocation
373    #[inline]
374    pub fn ptr_eq(this: &Self, other: &Self) -> bool {
375        this.raw_ptr() == other.raw_ptr()
376    }
377
378    fn ptr(&self) -> *mut ArcInner<T> {
379        self.p.as_ptr()
380    }
381
382    /// Returns a raw ptr to the underlying allocation.
383    pub fn raw_ptr(&self) -> ptr::NonNull<()> {
384        self.p.cast()
385    }
386}
387
388#[cfg(feature = "gecko_refcount_logging")]
389extern "C" {
390    fn NS_LogCtor(
391        aPtr: *mut std::os::raw::c_void,
392        aTypeName: *const std::os::raw::c_char,
393        aSize: u32,
394    );
395    fn NS_LogDtor(
396        aPtr: *mut std::os::raw::c_void,
397        aTypeName: *const std::os::raw::c_char,
398        aSize: u32,
399    );
400}
401
402impl<T: ?Sized> Clone for Arc<T> {
403    #[inline]
404    fn clone(&self) -> Self {
405        // NOTE(emilio): If you change anything here, make sure that the
406        // implementation in layout/style/ServoStyleConstsInlines.h matches!
407        //
408        // Using a relaxed ordering to check for STATIC_REFCOUNT is safe, since
409        // `count` never changes between STATIC_REFCOUNT and other values.
410        if self.inner().count.load(Relaxed) != STATIC_REFCOUNT {
411            // Using a relaxed ordering is alright here, as knowledge of the
412            // original reference prevents other threads from erroneously deleting
413            // the object.
414            //
415            // As explained in the [Boost documentation][1], Increasing the
416            // reference counter can always be done with memory_order_relaxed: New
417            // references to an object can only be formed from an existing
418            // reference, and passing an existing reference from one thread to
419            // another must already provide any required synchronization.
420            //
421            // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
422            let old_size = self.inner().count.fetch_add(1, Relaxed);
423
424            // However we need to guard against massive refcounts in case someone
425            // is `mem::forget`ing Arcs. If we don't do this the count can overflow
426            // and users will use-after free. We racily saturate to `isize::MAX` on
427            // the assumption that there aren't ~2 billion threads incrementing
428            // the reference count at once. This branch will never be taken in
429            // any realistic program.
430            //
431            // We abort because such a program is incredibly degenerate, and we
432            // don't care to support it.
433            if old_size > MAX_REFCOUNT {
434                process::abort();
435            }
436        }
437
438        unsafe {
439            Arc {
440                p: ptr::NonNull::new_unchecked(self.ptr()),
441                phantom: PhantomData,
442            }
443        }
444    }
445}
446
447impl<T: ?Sized> Deref for Arc<T> {
448    type Target = T;
449
450    #[inline]
451    fn deref(&self) -> &T {
452        &self.inner().data
453    }
454}
455
456impl<T: Clone> Arc<T> {
457    /// Makes a mutable reference to the `Arc`, cloning if necessary
458    ///
459    /// This is functionally equivalent to [`Arc::make_mut`][mm] from the standard library.
460    ///
461    /// If this `Arc` is uniquely owned, `make_mut()` will provide a mutable
462    /// reference to the contents. If not, `make_mut()` will create a _new_ `Arc`
463    /// with a copy of the contents, update `this` to point to it, and provide
464    /// a mutable reference to its contents.
465    ///
466    /// This is useful for implementing copy-on-write schemes where you wish to
467    /// avoid copying things if your `Arc` is not shared.
468    ///
469    /// [mm]: https://doc.rust-lang.org/stable/std/sync/struct.Arc.html#method.make_mut
470    #[inline]
471    pub fn make_mut(this: &mut Self) -> &mut T {
472        if !this.is_unique() {
473            // Another pointer exists; clone
474            *this = Arc::new((**this).clone());
475        }
476
477        unsafe {
478            // This unsafety is ok because we're guaranteed that the pointer
479            // returned is the *only* pointer that will ever be returned to T. Our
480            // reference count is guaranteed to be 1 at this point, and we required
481            // the Arc itself to be `mut`, so we're returning the only possible
482            // reference to the inner data.
483            &mut (*this.ptr()).data
484        }
485    }
486}
487
488impl<T: ?Sized> Arc<T> {
489    /// Provides mutable access to the contents _if_ the `Arc` is uniquely owned.
490    #[inline]
491    pub fn get_mut(this: &mut Self) -> Option<&mut T> {
492        if this.is_unique() {
493            unsafe {
494                // See make_mut() for documentation of the threadsafety here.
495                Some(&mut (*this.ptr()).data)
496            }
497        } else {
498            None
499        }
500    }
501
502    /// Whether or not the `Arc` is a static reference.
503    #[inline]
504    pub fn is_static(&self) -> bool {
505        // Using a relaxed ordering to check for STATIC_REFCOUNT is safe, since
506        // `count` never changes between STATIC_REFCOUNT and other values.
507        self.inner().count.load(Relaxed) == STATIC_REFCOUNT
508    }
509
510    /// Whether or not the `Arc` is uniquely owned (is the refcount 1?) and not
511    /// a static reference.
512    #[inline]
513    pub fn is_unique(&self) -> bool {
514        // See the extensive discussion in [1] for why this needs to be Acquire.
515        //
516        // [1] https://github.com/servo/servo/issues/21186
517        self.inner().count.load(Acquire) == 1
518    }
519}
520
521impl<T: ?Sized> Drop for Arc<T> {
522    #[inline]
523    fn drop(&mut self) {
524        // NOTE(emilio): If you change anything here, make sure that the
525        // implementation in layout/style/ServoStyleConstsInlines.h matches!
526        if self.is_static() {
527            return;
528        }
529
530        // Because `fetch_sub` is already atomic, we do not need to synchronize
531        // with other threads unless we are going to delete the object.
532        if self.inner().count.fetch_sub(1, Release) != 1 {
533            return;
534        }
535
536        // FIXME(bholley): Use the updated comment when [2] is merged.
537        //
538        // This load is needed to prevent reordering of use of the data and
539        // deletion of the data.  Because it is marked `Release`, the decreasing
540        // of the reference count synchronizes with this `Acquire` load. This
541        // means that use of the data happens before decreasing the reference
542        // count, which happens before this load, which happens before the
543        // deletion of the data.
544        //
545        // As explained in the [Boost documentation][1],
546        //
547        // > It is important to enforce any possible access to the object in one
548        // > thread (through an existing reference) to *happen before* deleting
549        // > the object in a different thread. This is achieved by a "release"
550        // > operation after dropping a reference (any access to the object
551        // > through this reference must obviously happened before), and an
552        // > "acquire" operation before deleting the object.
553        //
554        // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
555        // [2]: https://github.com/rust-lang/rust/pull/41714
556        self.inner().count.load(Acquire);
557
558        unsafe {
559            self.drop_slow();
560        }
561    }
562}
563
564impl<T: ?Sized + PartialEq> PartialEq for Arc<T> {
565    fn eq(&self, other: &Arc<T>) -> bool {
566        Self::ptr_eq(self, other) || *(*self) == *(*other)
567    }
568
569    fn ne(&self, other: &Arc<T>) -> bool {
570        !Self::ptr_eq(self, other) && *(*self) != *(*other)
571    }
572}
573
574impl<T: ?Sized + PartialOrd> PartialOrd for Arc<T> {
575    fn partial_cmp(&self, other: &Arc<T>) -> Option<Ordering> {
576        (**self).partial_cmp(&**other)
577    }
578
579    fn lt(&self, other: &Arc<T>) -> bool {
580        *(*self) < *(*other)
581    }
582
583    fn le(&self, other: &Arc<T>) -> bool {
584        *(*self) <= *(*other)
585    }
586
587    fn gt(&self, other: &Arc<T>) -> bool {
588        *(*self) > *(*other)
589    }
590
591    fn ge(&self, other: &Arc<T>) -> bool {
592        *(*self) >= *(*other)
593    }
594}
595impl<T: ?Sized + Ord> Ord for Arc<T> {
596    fn cmp(&self, other: &Arc<T>) -> Ordering {
597        (**self).cmp(&**other)
598    }
599}
600impl<T: ?Sized + Eq> Eq for Arc<T> {}
601
602impl<T: ?Sized + fmt::Display> fmt::Display for Arc<T> {
603    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
604        fmt::Display::fmt(&**self, f)
605    }
606}
607
608impl<T: ?Sized + fmt::Debug> fmt::Debug for Arc<T> {
609    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
610        fmt::Debug::fmt(&**self, f)
611    }
612}
613
614impl<T: ?Sized> fmt::Pointer for Arc<T> {
615    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
616        fmt::Pointer::fmt(&self.ptr(), f)
617    }
618}
619
620impl<T: Default> Default for Arc<T> {
621    fn default() -> Arc<T> {
622        Arc::new(Default::default())
623    }
624}
625
626impl<T: ?Sized + Hash> Hash for Arc<T> {
627    fn hash<H: Hasher>(&self, state: &mut H) {
628        (**self).hash(state)
629    }
630}
631
632impl<T> From<T> for Arc<T> {
633    #[inline]
634    fn from(t: T) -> Self {
635        Arc::new(t)
636    }
637}
638
639impl<T: ?Sized> borrow::Borrow<T> for Arc<T> {
640    #[inline]
641    fn borrow(&self) -> &T {
642        &**self
643    }
644}
645
646impl<T: ?Sized> AsRef<T> for Arc<T> {
647    #[inline]
648    fn as_ref(&self) -> &T {
649        &**self
650    }
651}
652
653unsafe impl<T: ?Sized> StableDeref for Arc<T> {}
654unsafe impl<T: ?Sized> CloneStableDeref for Arc<T> {}
655
656#[cfg(feature = "servo")]
657impl<'de, T: Deserialize<'de>> Deserialize<'de> for Arc<T> {
658    fn deserialize<D>(deserializer: D) -> Result<Arc<T>, D::Error>
659    where
660        D: ::serde::de::Deserializer<'de>,
661    {
662        T::deserialize(deserializer).map(Arc::new)
663    }
664}
665
666#[cfg(feature = "servo")]
667impl<T: Serialize> Serialize for Arc<T> {
668    fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
669    where
670        S: ::serde::ser::Serializer,
671    {
672        (**self).serialize(serializer)
673    }
674}
675
676/// Structure to allow Arc-managing some fixed-sized data and a variably-sized
677/// slice in a single allocation.
678///
679/// cbindgen:derive-eq=false
680/// cbindgen:derive-neq=false
681#[derive(Eq)]
682#[repr(C)]
683pub struct HeaderSlice<H, T> {
684    /// The fixed-sized data.
685    pub header: H,
686
687    /// The length of the slice at our end.
688    len: usize,
689
690    /// The dynamically-sized data.
691    data: [T; 0],
692}
693
694impl<H: PartialEq, T: PartialEq> PartialEq for HeaderSlice<H, T> {
695    fn eq(&self, other: &Self) -> bool {
696        self.header == other.header && self.slice() == other.slice()
697    }
698}
699
700impl<H, T> Drop for HeaderSlice<H, T> {
701    fn drop(&mut self) {
702        unsafe {
703            let mut ptr = self.data_mut();
704            for _ in 0..self.len {
705                std::ptr::drop_in_place(ptr);
706                ptr = ptr.offset(1);
707            }
708        }
709    }
710}
711
712impl<H: fmt::Debug, T: fmt::Debug> fmt::Debug for HeaderSlice<H, T> {
713    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
714        f.debug_struct("HeaderSlice")
715            .field("header", &self.header)
716            .field("slice", &self.slice())
717            .finish()
718    }
719}
720
721impl<H, T> HeaderSlice<H, T> {
722    /// Returns the dynamically sized slice in this HeaderSlice.
723    #[inline(always)]
724    pub fn slice(&self) -> &[T] {
725        unsafe { std::slice::from_raw_parts(self.data(), self.len) }
726    }
727
728    #[inline(always)]
729    fn data(&self) -> *const T {
730        std::ptr::addr_of!(self.data) as *const _
731    }
732
733    #[inline(always)]
734    fn data_mut(&mut self) -> *mut T {
735        std::ptr::addr_of_mut!(self.data) as *mut _
736    }
737
738    /// Returns the dynamically sized slice in this HeaderSlice.
739    #[inline(always)]
740    pub fn slice_mut(&mut self) -> &mut [T] {
741        unsafe { std::slice::from_raw_parts_mut(self.data_mut(), self.len) }
742    }
743
744    /// Returns the len of the slice.
745    #[inline(always)]
746    pub fn len(&self) -> usize {
747        self.len
748    }
749}
750
751impl<H, T> Arc<HeaderSlice<H, T>> {
752    /// Creates an Arc for a HeaderSlice using the given header struct and
753    /// iterator to generate the slice.
754    ///
755    /// `is_static` indicates whether to create a static Arc.
756    ///
757    /// `alloc` is used to get a pointer to the memory into which the
758    /// dynamically sized ArcInner<HeaderSlice<H, T>> value will be
759    /// written.  If `is_static` is true, then `alloc` must return a
760    /// pointer into some static memory allocation.  If it is false,
761    /// then `alloc` must return an allocation that can be dellocated
762    /// by calling Box::from_raw::<ArcInner<HeaderSlice<H, T>>> on it.
763    #[inline]
764    pub fn from_header_and_iter_alloc<F, I>(
765        alloc: F,
766        header: H,
767        mut items: I,
768        num_items: usize,
769        is_static: bool,
770    ) -> Self
771    where
772        F: FnOnce(Layout) -> *mut u8,
773        I: Iterator<Item = T>,
774    {
775        assert_ne!(size_of::<T>(), 0, "Need to think about ZST");
776
777        let layout = Layout::new::<ArcInner<HeaderSlice<H, T>>>();
778        debug_assert!(layout.align() >= align_of::<T>());
779        debug_assert!(layout.align() >= align_of::<usize>());
780        let array_layout = Layout::array::<T>(num_items).expect("Overflow");
781        let (layout, _offset) = layout.extend(array_layout).expect("Overflow");
782        let p = unsafe {
783            // Allocate the buffer.
784            let buffer = alloc(layout);
785            let mut p = ptr::NonNull::new(buffer)
786                .unwrap_or_else(|| alloc::handle_alloc_error(layout))
787                .cast::<ArcInner<HeaderSlice<H, T>>>();
788
789            // Write the data.
790            //
791            // Note that any panics here (i.e. from the iterator) are safe, since
792            // we'll just leak the uninitialized memory.
793            let count = if is_static {
794                atomic::AtomicUsize::new(STATIC_REFCOUNT)
795            } else {
796                atomic::AtomicUsize::new(1)
797            };
798            ptr::write(&mut p.as_mut().count, count);
799            #[cfg(feature = "track_alloc_size")]
800            ptr::write(&mut p.as_mut().alloc_size, layout.size());
801            ptr::write(&mut p.as_mut().data.header, header);
802            ptr::write(&mut p.as_mut().data.len, num_items);
803            if num_items != 0 {
804                let mut current = std::ptr::addr_of_mut!(p.as_mut().data.data) as *mut T;
805                for _ in 0..num_items {
806                    ptr::write(
807                        current,
808                        items
809                            .next()
810                            .expect("ExactSizeIterator over-reported length"),
811                    );
812                    current = current.offset(1);
813                }
814                // We should have consumed the buffer exactly, maybe accounting
815                // for some padding from the alignment.
816                debug_assert!(
817                    (buffer.add(layout.size()) as usize - current as *mut u8 as usize) < layout.align()
818                );
819            }
820            assert!(
821                items.next().is_none(),
822                "ExactSizeIterator under-reported length"
823            );
824            p
825        };
826        #[cfg(feature = "gecko_refcount_logging")]
827        unsafe {
828            if !is_static {
829                // FIXME(emilio): Would be so amazing to have
830                // std::intrinsics::type_name() around.
831                NS_LogCtor(p.as_ptr() as *mut _, b"ServoArc\0".as_ptr() as *const _, 8)
832            }
833        }
834
835        // Return the fat Arc.
836        assert_eq!(
837            size_of::<Self>(),
838            size_of::<usize>(),
839            "The Arc should be thin"
840        );
841
842        Arc {
843            p,
844            phantom: PhantomData,
845        }
846    }
847
848    /// Creates an Arc for a HeaderSlice using the given header struct and iterator to generate the
849    /// slice. Panics if num_items doesn't match the number of items.
850    #[inline]
851    pub fn from_header_and_iter_with_size<I>(header: H, items: I, num_items: usize) -> Self
852    where
853        I: Iterator<Item = T>,
854    {
855        Arc::from_header_and_iter_alloc(
856            |layout| unsafe { alloc::alloc(layout) },
857            header,
858            items,
859            num_items,
860            /* is_static = */ false,
861        )
862    }
863
864    /// Creates an Arc for a HeaderSlice using the given header struct and
865    /// iterator to generate the slice. The resulting Arc will be fat.
866    #[inline]
867    pub fn from_header_and_iter<I>(header: H, items: I) -> Self
868    where
869        I: Iterator<Item = T> + ExactSizeIterator,
870    {
871        let len = items.len();
872        Self::from_header_and_iter_with_size(header, items, len)
873    }
874}
875
876/// This is functionally equivalent to Arc<(H, [T])>
877///
878/// When you create an `Arc` containing a dynamically sized type like a slice, the `Arc` is
879/// represented on the stack as a "fat pointer", where the length of the slice is stored alongside
880/// the `Arc`'s pointer. In some situations you may wish to have a thin pointer instead, perhaps
881/// for FFI compatibility or space efficiency. `ThinArc` solves this by storing the length in the
882/// allocation itself, via `HeaderSlice`.
883pub type ThinArc<H, T> = Arc<HeaderSlice<H, T>>;
884
885/// See `ArcUnion`. This is a version that works for `ThinArc`s.
886pub type ThinArcUnion<H1, T1, H2, T2> = ArcUnion<HeaderSlice<H1, T1>, HeaderSlice<H2, T2>>;
887
888impl<H, T> UniqueArc<HeaderSlice<H, T>> {
889    #[inline]
890    pub fn from_header_and_iter<I>(header: H, items: I) -> Self
891    where
892        I: Iterator<Item = T> + ExactSizeIterator,
893    {
894        Self(Arc::from_header_and_iter(header, items))
895    }
896
897    #[inline]
898    pub fn from_header_and_iter_with_size<I>(header: H, items: I, num_items: usize) -> Self
899    where
900        I: Iterator<Item = T>,
901    {
902        Self(Arc::from_header_and_iter_with_size(
903            header, items, num_items,
904        ))
905    }
906
907    /// Returns a mutable reference to the header.
908    pub fn header_mut(&mut self) -> &mut H {
909        // We know this to be uniquely owned
910        unsafe { &mut (*self.0.ptr()).data.header }
911    }
912
913    /// Returns a mutable reference to the slice.
914    pub fn data_mut(&mut self) -> &mut [T] {
915        // We know this to be uniquely owned
916        unsafe { (*self.0.ptr()).data.slice_mut() }
917    }
918}
919
920/// A "borrowed `Arc`". This is a pointer to
921/// a T that is known to have been allocated within an
922/// `Arc`.
923///
924/// This is equivalent in guarantees to `&Arc<T>`, however it is
925/// a bit more flexible. To obtain an `&Arc<T>` you must have
926/// an `Arc<T>` instance somewhere pinned down until we're done with it.
927/// It's also a direct pointer to `T`, so using this involves less pointer-chasing
928///
929/// However, C++ code may hand us refcounted things as pointers to T directly,
930/// so we have to conjure up a temporary `Arc` on the stack each time.
931///
932/// `ArcBorrow` lets us deal with borrows of known-refcounted objects
933/// without needing to worry about where the `Arc<T>` is.
934#[derive(Debug, Eq, PartialEq)]
935pub struct ArcBorrow<'a, T: 'a>(&'a T);
936
937impl<'a, T> Copy for ArcBorrow<'a, T> {}
938impl<'a, T> Clone for ArcBorrow<'a, T> {
939    #[inline]
940    fn clone(&self) -> Self {
941        *self
942    }
943}
944
945impl<'a, T> ArcBorrow<'a, T> {
946    /// Clone this as an `Arc<T>`. This bumps the refcount.
947    #[inline]
948    pub fn clone_arc(&self) -> Arc<T> {
949        let arc = unsafe { Arc::from_raw(self.0) };
950        // addref it!
951        mem::forget(arc.clone());
952        arc
953    }
954
955    /// For constructing from a reference known to be Arc-backed,
956    /// e.g. if we obtain such a reference over FFI
957    #[inline]
958    pub unsafe fn from_ref(r: &'a T) -> Self {
959        ArcBorrow(r)
960    }
961
962    /// Compare two `ArcBorrow`s via pointer equality. Will only return
963    /// true if they come from the same allocation
964    pub fn ptr_eq(this: &Self, other: &Self) -> bool {
965        this.0 as *const T == other.0 as *const T
966    }
967
968    /// Temporarily converts |self| into a bonafide Arc and exposes it to the
969    /// provided callback. The refcount is not modified.
970    #[inline]
971    pub fn with_arc<F, U>(&self, f: F) -> U
972    where
973        F: FnOnce(&Arc<T>) -> U,
974        T: 'static,
975    {
976        // Synthesize transient Arc, which never touches the refcount.
977        let transient = unsafe { mem::ManuallyDrop::new(Arc::from_raw(self.0)) };
978
979        // Expose the transient Arc to the callback, which may clone it if it wants.
980        let result = f(&transient);
981
982        // Forward the result.
983        result
984    }
985
986    /// Similar to deref, but uses the lifetime |a| rather than the lifetime of
987    /// self, which is incompatible with the signature of the Deref trait.
988    #[inline]
989    pub fn get(&self) -> &'a T {
990        self.0
991    }
992}
993
994impl<'a, T> Deref for ArcBorrow<'a, T> {
995    type Target = T;
996
997    #[inline]
998    fn deref(&self) -> &T {
999        self.0
1000    }
1001}
1002
1003/// A tagged union that can represent `Arc<A>` or `Arc<B>` while only consuming a
1004/// single word. The type is also `NonNull`, and thus can be stored in an Option
1005/// without increasing size.
1006///
1007/// This is functionally equivalent to
1008/// `enum ArcUnion<A, B> { First(Arc<A>), Second(Arc<B>)` but only takes up
1009/// up a single word of stack space.
1010///
1011/// This could probably be extended to support four types if necessary.
1012pub struct ArcUnion<A, B> {
1013    p: ptr::NonNull<()>,
1014    phantom_a: PhantomData<A>,
1015    phantom_b: PhantomData<B>,
1016}
1017
1018unsafe impl<A: Sync + Send, B: Send + Sync> Send for ArcUnion<A, B> {}
1019unsafe impl<A: Sync + Send, B: Send + Sync> Sync for ArcUnion<A, B> {}
1020
1021impl<A: PartialEq, B: PartialEq> PartialEq for ArcUnion<A, B> {
1022    fn eq(&self, other: &Self) -> bool {
1023        use crate::ArcUnionBorrow::*;
1024        match (self.borrow(), other.borrow()) {
1025            (First(x), First(y)) => x == y,
1026            (Second(x), Second(y)) => x == y,
1027            (_, _) => false,
1028        }
1029    }
1030}
1031
1032impl<A: Eq, B: Eq> Eq for ArcUnion<A, B> {}
1033
1034/// This represents a borrow of an `ArcUnion`.
1035#[derive(Debug)]
1036pub enum ArcUnionBorrow<'a, A: 'a, B: 'a> {
1037    First(ArcBorrow<'a, A>),
1038    Second(ArcBorrow<'a, B>),
1039}
1040
1041impl<A, B> ArcUnion<A, B> {
1042    unsafe fn new(ptr: *mut ()) -> Self {
1043        ArcUnion {
1044            p: ptr::NonNull::new_unchecked(ptr),
1045            phantom_a: PhantomData,
1046            phantom_b: PhantomData,
1047        }
1048    }
1049
1050    /// Returns true if the two values are pointer-equal.
1051    #[inline]
1052    pub fn ptr_eq(this: &Self, other: &Self) -> bool {
1053        this.p == other.p
1054    }
1055
1056    #[inline]
1057    pub fn ptr(&self) -> ptr::NonNull<()> {
1058        self.p
1059    }
1060
1061    /// Returns an enum representing a borrow of either A or B.
1062    #[inline]
1063    pub fn borrow(&self) -> ArcUnionBorrow<A, B> {
1064        if self.is_first() {
1065            let ptr = self.p.as_ptr() as *const ArcInner<A>;
1066            let borrow = unsafe { ArcBorrow::from_ref(&(*ptr).data) };
1067            ArcUnionBorrow::First(borrow)
1068        } else {
1069            let ptr = ((self.p.as_ptr() as usize) & !0x1) as *const ArcInner<B>;
1070            let borrow = unsafe { ArcBorrow::from_ref(&(*ptr).data) };
1071            ArcUnionBorrow::Second(borrow)
1072        }
1073    }
1074
1075    /// Creates an `ArcUnion` from an instance of the first type.
1076    pub fn from_first(other: Arc<A>) -> Self {
1077        let union = unsafe { Self::new(other.ptr() as *mut _) };
1078        mem::forget(other);
1079        union
1080    }
1081
1082    /// Creates an `ArcUnion` from an instance of the second type.
1083    pub fn from_second(other: Arc<B>) -> Self {
1084        let union = unsafe { Self::new(((other.ptr() as usize) | 0x1) as *mut _) };
1085        mem::forget(other);
1086        union
1087    }
1088
1089    /// Returns true if this `ArcUnion` contains the first type.
1090    pub fn is_first(&self) -> bool {
1091        self.p.as_ptr() as usize & 0x1 == 0
1092    }
1093
1094    /// Returns true if this `ArcUnion` contains the second type.
1095    pub fn is_second(&self) -> bool {
1096        !self.is_first()
1097    }
1098
1099    /// Returns a borrow of the first type if applicable, otherwise `None`.
1100    pub fn as_first(&self) -> Option<ArcBorrow<A>> {
1101        match self.borrow() {
1102            ArcUnionBorrow::First(x) => Some(x),
1103            ArcUnionBorrow::Second(_) => None,
1104        }
1105    }
1106
1107    /// Returns a borrow of the second type if applicable, otherwise None.
1108    pub fn as_second(&self) -> Option<ArcBorrow<B>> {
1109        match self.borrow() {
1110            ArcUnionBorrow::First(_) => None,
1111            ArcUnionBorrow::Second(x) => Some(x),
1112        }
1113    }
1114}
1115
1116impl<A, B> Clone for ArcUnion<A, B> {
1117    fn clone(&self) -> Self {
1118        match self.borrow() {
1119            ArcUnionBorrow::First(x) => ArcUnion::from_first(x.clone_arc()),
1120            ArcUnionBorrow::Second(x) => ArcUnion::from_second(x.clone_arc()),
1121        }
1122    }
1123}
1124
1125impl<A, B> Drop for ArcUnion<A, B> {
1126    fn drop(&mut self) {
1127        match self.borrow() {
1128            ArcUnionBorrow::First(x) => unsafe {
1129                let _ = Arc::from_raw(&*x);
1130            },
1131            ArcUnionBorrow::Second(x) => unsafe {
1132                let _ = Arc::from_raw(&*x);
1133            },
1134        }
1135    }
1136}
1137
1138impl<A: fmt::Debug, B: fmt::Debug> fmt::Debug for ArcUnion<A, B> {
1139    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
1140        fmt::Debug::fmt(&self.borrow(), f)
1141    }
1142}
1143
1144#[cfg(test)]
1145mod tests {
1146    use super::{Arc, ThinArc};
1147    use std::clone::Clone;
1148    use std::ops::Drop;
1149    use std::sync::atomic;
1150    use std::sync::atomic::Ordering::{Acquire, SeqCst};
1151
1152    #[derive(PartialEq)]
1153    struct Canary(*mut atomic::AtomicUsize);
1154
1155    impl Drop for Canary {
1156        fn drop(&mut self) {
1157            unsafe {
1158                (*self.0).fetch_add(1, SeqCst);
1159            }
1160        }
1161    }
1162
1163    #[test]
1164    fn empty_thin() {
1165        let x = Arc::from_header_and_iter(100u32, std::iter::empty::<i32>());
1166        assert_eq!(x.header, 100);
1167        assert!(x.slice().is_empty());
1168    }
1169
1170    #[test]
1171    fn thin_assert_padding() {
1172        #[derive(Clone, Default)]
1173        #[repr(C)]
1174        struct Padded {
1175            i: u16,
1176        }
1177
1178        // The header will have more alignment than `Padded`
1179        let items = vec![Padded { i: 0xdead }, Padded { i: 0xbeef }];
1180        let a = ThinArc::from_header_and_iter(0i32, items.into_iter());
1181        assert_eq!(a.len(), 2);
1182        assert_eq!(a.slice()[0].i, 0xdead);
1183        assert_eq!(a.slice()[1].i, 0xbeef);
1184    }
1185
1186    #[test]
1187    fn slices_and_thin() {
1188        let mut canary = atomic::AtomicUsize::new(0);
1189        let c = Canary(&mut canary as *mut atomic::AtomicUsize);
1190        let v = vec![5, 6];
1191        {
1192            let x = Arc::from_header_and_iter(c, v.into_iter());
1193            let _ = x.clone();
1194            let _ = x == x;
1195        }
1196        assert_eq!(canary.load(Acquire), 1);
1197    }
1198}