Skip to main content

portable_atomic_util/
arc.rs

1// SPDX-License-Identifier: Apache-2.0 OR MIT
2
3// This module is based on alloc::sync::Arc.
4//
5// The code has been adjusted to work with stable Rust (and optionally support some unstable features).
6//
7// Source: https://github.com/rust-lang/rust/blob/1.93.0/library/alloc/src/sync.rs
8//
9// Copyright & License of the original code:
10// - https://github.com/rust-lang/rust/blob/1.93.0/COPYRIGHT
11// - https://github.com/rust-lang/rust/blob/1.93.0/LICENSE-APACHE
12// - https://github.com/rust-lang/rust/blob/1.93.0/LICENSE-MIT
13
14#![allow(clippy::must_use_candidate)] // align to alloc::sync::Arc
15#![allow(clippy::undocumented_unsafe_blocks)] // TODO: most of the unsafe codes were inherited from alloc::sync::Arc
16
17use alloc::{
18    alloc::handle_alloc_error,
19    borrow::{Cow, ToOwned},
20    boxed::Box,
21};
22#[cfg(not(portable_atomic_no_maybe_uninit))]
23use alloc::{string::String, vec::Vec};
24#[cfg(not(portable_atomic_no_min_const_generics))]
25use core::convert::TryFrom;
26use core::{
27    alloc::Layout,
28    any::Any,
29    borrow,
30    cmp::Ordering,
31    fmt,
32    hash::{Hash, Hasher},
33    isize,
34    marker::PhantomData,
35    mem::{self, ManuallyDrop},
36    ops::Deref,
37    pin::Pin,
38    ptr::{self, NonNull},
39    usize,
40};
41#[cfg(not(portable_atomic_no_maybe_uninit))]
42use core::{iter::FromIterator, slice};
43#[cfg(portable_atomic_unstable_coerce_unsized)]
44use core::{marker::Unsize, ops::CoerceUnsized};
45
46use portable_atomic::{
47    self as atomic,
48    Ordering::{Acquire, Relaxed, Release},
49    hint,
50};
51
52use crate::utils::ptr as strict;
53#[cfg(portable_atomic_no_strict_provenance)]
54use crate::utils::ptr::PtrExt as _;
55
56/// A soft limit on the amount of references that may be made to an `Arc`.
57///
58/// Going above this limit will abort your program (although not
59/// necessarily) at _exactly_ `MAX_REFCOUNT + 1` references.
60/// Trying to go above it might call a `panic` (if not actually going above it).
61///
62/// This is a global invariant, and also applies when using a compare-exchange loop.
63///
64/// See comment in `Arc::clone`.
65const MAX_REFCOUNT: usize = isize::MAX as usize;
66
67/// The error in case either counter reaches above `MAX_REFCOUNT`, and we can `panic` safely.
68const INTERNAL_OVERFLOW_ERROR: &str = "Arc counter overflow";
69
70#[cfg(not(portable_atomic_sanitize_thread))]
71macro_rules! acquire {
72    ($x:expr) => {
73        atomic::fence(Acquire)
74    };
75}
76
77// ThreadSanitizer does not support memory fences. To avoid false positive
78// reports in Arc / Weak implementation use atomic loads for synchronization
79// instead.
80#[cfg(portable_atomic_sanitize_thread)]
81macro_rules! acquire {
82    ($x:expr) => {
83        $x.load(Acquire)
84    };
85}
86
87/// A thread-safe reference-counting pointer. 'Arc' stands for 'Atomically
88/// Reference Counted'.
89///
90/// This is an equivalent to [`std::sync::Arc`], but using [portable-atomic] for synchronization.
91/// See the documentation for [`std::sync::Arc`] for more details.
92///
93/// **Note:** Unlike `std::sync::Arc`, coercing `Arc<T>` to `Arc<U>` is only possible if
94/// the optional cfg `portable_atomic_unstable_coerce_unsized` is enabled, as documented at the crate-level documentation,
95/// and this optional cfg item is only supported with Rust nightly version.
96/// This is because coercing the pointee requires the
97/// [unstable `CoerceUnsized` trait](https://doc.rust-lang.org/nightly/core/ops/trait.CoerceUnsized.html).
98/// See [this issue comment](https://github.com/taiki-e/portable-atomic/issues/143#issuecomment-1866488569)
99/// for a workaround that works without depending on unstable features.
100///
101/// [portable-atomic]: https://crates.io/crates/portable-atomic
102///
103/// # Examples
104///
105/// ```
106/// use portable_atomic_util::Arc;
107/// use std::thread;
108///
109/// let five = Arc::new(5);
110///
111/// for _ in 0..10 {
112///     let five = Arc::clone(&five);
113///
114///     thread::spawn(move || {
115///         assert_eq!(*five, 5);
116///     });
117/// }
118/// # if cfg!(miri) { std::thread::sleep(std::time::Duration::from_millis(500)); } // wait for background threads closed: https://github.com/rust-lang/miri/issues/1371
119/// ```
120pub struct Arc<T: ?Sized> {
121    ptr: NonNull<ArcInner<T>>,
122    phantom: PhantomData<ArcInner<T>>,
123}
124
125unsafe impl<T: ?Sized + Sync + Send> Send for Arc<T> {}
126unsafe impl<T: ?Sized + Sync + Send> Sync for Arc<T> {}
127
128#[cfg(not(portable_atomic_no_core_unwind_safe))]
129impl<T: ?Sized + core::panic::RefUnwindSafe> core::panic::UnwindSafe for Arc<T> {}
130#[cfg(all(portable_atomic_no_core_unwind_safe, feature = "std"))]
131impl<T: ?Sized + std::panic::RefUnwindSafe> std::panic::UnwindSafe for Arc<T> {}
132
133#[cfg(portable_atomic_unstable_coerce_unsized)]
134impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<Arc<U>> for Arc<T> {}
135
136impl<T: ?Sized> Arc<T> {
137    #[inline]
138    fn into_inner_non_null(this: Self) -> NonNull<ArcInner<T>> {
139        let this = mem::ManuallyDrop::new(this);
140        this.ptr
141    }
142
143    #[inline]
144    unsafe fn from_inner(ptr: NonNull<ArcInner<T>>) -> Self {
145        Self { ptr, phantom: PhantomData }
146    }
147
148    #[inline]
149    unsafe fn from_ptr(ptr: *mut ArcInner<T>) -> Self {
150        // SAFETY: the caller must uphold the safety contract.
151        unsafe { Self::from_inner(NonNull::new_unchecked(ptr)) }
152    }
153}
154
155/// `Weak` is a version of [`Arc`] that holds a non-owning reference to the
156/// managed allocation.
157///
158/// The allocation is accessed by calling [`upgrade`] on the `Weak`
159/// pointer, which returns an <code>[Option]<[Arc]\<T>></code>.
160///
161/// This is an equivalent to [`std::sync::Weak`], but using [portable-atomic] for synchronization.
162/// See the documentation for [`std::sync::Weak`] for more details.
163///
164/// <!-- TODO: support coercing `Weak<T>` to `Weak<U>` with testing, if optional cfg `portable_atomic_unstable_coerce_unsized` is enabled -->
165/// **Note:** Unlike `std::sync::Weak`, coercing `Weak<T>` to `Weak<U>` is not possible, not even if
166/// the optional cfg `portable_atomic_unstable_coerce_unsized` is enabled.
167///
168/// [`upgrade`]: Weak::upgrade
169/// [portable-atomic]: https://crates.io/crates/portable-atomic
170///
171/// # Examples
172///
173/// ```
174/// use portable_atomic_util::Arc;
175/// use std::thread;
176///
177/// let five = Arc::new(5);
178/// let weak_five = Arc::downgrade(&five);
179///
180/// # let t =
181/// thread::spawn(move || {
182///     let five = weak_five.upgrade().unwrap();
183///     assert_eq!(*five, 5);
184/// });
185/// # t.join().unwrap(); // join thread to avoid https://github.com/rust-lang/miri/issues/1371
186/// ```
187pub struct Weak<T: ?Sized> {
188    // This is a `NonNull` to allow optimizing the size of this type in enums,
189    // but it is not necessarily a valid pointer.
190    // `Weak::new` sets this to `usize::MAX` so that it doesn’t need
191    // to allocate space on the heap. That's not a value a real pointer
192    // will ever have because ArcInner has alignment at least 2.
193    ptr: NonNull<ArcInner<T>>,
194}
195
196unsafe impl<T: ?Sized + Sync + Send> Send for Weak<T> {}
197unsafe impl<T: ?Sized + Sync + Send> Sync for Weak<T> {}
198
199impl<T: ?Sized> fmt::Debug for Weak<T> {
200    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
201        f.write_str("(Weak)")
202    }
203}
204
205// This is repr(C) to future-proof against possible field-reordering, which
206// would interfere with otherwise safe [into|from]_raw() of transmutable
207// inner types.
208// Unlike RcInner, repr(align(2)) is not strictly required because atomic types
209// have the alignment same as its size, but we use it for consistency and clarity.
210#[repr(C, align(2))]
211struct ArcInner<T: ?Sized> {
212    strong: atomic::AtomicUsize,
213
214    // the value usize::MAX acts as a sentinel for temporarily "locking" the
215    // ability to upgrade weak pointers or downgrade strong ones; this is used
216    // to avoid races in `make_mut` and `get_mut`.
217    weak: atomic::AtomicUsize,
218
219    data: T,
220}
221
222/// Calculate layout for `ArcInner<T>` using the inner value's layout
223fn arc_inner_layout_for_value_layout(layout: Layout) -> Layout {
224    // Calculate layout using the given value layout.
225    // Previously, layout was calculated on the expression
226    // `&*(ptr as *const ArcInner<T>)`, but this created a misaligned
227    // reference (see #54908).
228    layout::pad_to_align(layout::extend(Layout::new::<ArcInner<()>>(), layout).unwrap().0)
229}
230
231unsafe impl<T: ?Sized + Sync + Send> Send for ArcInner<T> {}
232unsafe impl<T: ?Sized + Sync + Send> Sync for ArcInner<T> {}
233
234impl<T> Arc<T> {
235    /// Constructs a new `Arc<T>`.
236    ///
237    /// # Examples
238    ///
239    /// ```
240    /// use portable_atomic_util::Arc;
241    ///
242    /// let five = Arc::new(5);
243    /// ```
244    #[inline]
245    pub fn new(data: T) -> Self {
246        // Start the weak pointer count as 1 which is the weak pointer that's
247        // held by all the strong pointers (kinda), see std/rc.rs for more info
248        let x: Box<_> = Box::new(ArcInner {
249            strong: atomic::AtomicUsize::new(1),
250            weak: atomic::AtomicUsize::new(1),
251            data,
252        });
253        unsafe { Self::from_inner(Box::leak(x).into()) }
254    }
255
256    /// Constructs a new `Arc<T>` while giving you a `Weak<T>` to the allocation,
257    /// to allow you to construct a `T` which holds a weak pointer to itself.
258    ///
259    /// Generally, a structure circularly referencing itself, either directly or
260    /// indirectly, should not hold a strong reference to itself to prevent a memory leak.
261    /// Using this function, you get access to the weak pointer during the
262    /// initialization of `T`, before the `Arc<T>` is created, such that you can
263    /// clone and store it inside the `T`.
264    ///
265    /// `new_cyclic` first allocates the managed allocation for the `Arc<T>`,
266    /// then calls your closure, giving it a `Weak<T>` to this allocation,
267    /// and only afterwards completes the construction of the `Arc<T>` by placing
268    /// the `T` returned from your closure into the allocation.
269    ///
270    /// Since the new `Arc<T>` is not fully-constructed until `Arc<T>::new_cyclic`
271    /// returns, calling [`upgrade`] on the weak reference inside your closure will
272    /// fail and result in a `None` value.
273    ///
274    /// # Panics
275    ///
276    /// If `data_fn` panics, the panic is propagated to the caller, and the
277    /// temporary [`Weak<T>`] is dropped normally.
278    ///
279    /// # Example
280    ///
281    /// ```
282    /// use portable_atomic_util::{Arc, Weak};
283    ///
284    /// struct Gadget {
285    ///     me: Weak<Gadget>,
286    /// }
287    ///
288    /// impl Gadget {
289    ///     /// Constructs a reference counted Gadget.
290    ///     fn new() -> Arc<Self> {
291    ///         // `me` is a `Weak<Gadget>` pointing at the new allocation of the
292    ///         // `Arc` we're constructing.
293    ///         Arc::new_cyclic(|me| {
294    ///             // Create the actual struct here.
295    ///             Gadget { me: me.clone() }
296    ///         })
297    ///     }
298    ///
299    ///     /// Returns a reference counted pointer to Self.
300    ///     fn me(&self) -> Arc<Self> {
301    ///         self.me.upgrade().unwrap()
302    ///     }
303    /// }
304    /// ```
305    /// [`upgrade`]: Weak::upgrade
306    #[inline]
307    pub fn new_cyclic<F>(data_fn: F) -> Self
308    where
309        F: FnOnce(&Weak<T>) -> T,
310    {
311        // Construct the inner in the "uninitialized" state with a single
312        // weak reference.
313        let init_ptr = Weak::new_uninit_ptr();
314
315        let weak = Weak { ptr: init_ptr };
316
317        // It's important we don't give up ownership of the weak pointer, or
318        // else the memory might be freed by the time `data_fn` returns. If
319        // we really wanted to pass ownership, we could create an additional
320        // weak pointer for ourselves, but this would result in additional
321        // updates to the weak reference count which might not be necessary
322        // otherwise.
323        let data = data_fn(&weak);
324
325        // Now we can properly initialize the inner value and turn our weak
326        // reference into a strong reference.
327        unsafe {
328            let inner = init_ptr.as_ptr();
329            ptr::write(data_ptr::<T>(inner, &data), data);
330
331            // The above write to the data field must be visible to any threads which
332            // observe a non-zero strong count. Therefore we need at least "Release" ordering
333            // in order to synchronize with the `compare_exchange_weak` in `Weak::upgrade`.
334            //
335            // "Acquire" ordering is not required. When considering the possible behaviors
336            // of `data_fn` we only need to look at what it could do with a reference to a
337            // non-upgradeable `Weak`:
338            // - It can *clone* the `Weak`, increasing the weak reference count.
339            // - It can drop those clones, decreasing the weak reference count (but never to zero).
340            //
341            // These side effects do not impact us in any way, and no other side effects are
342            // possible with safe code alone.
343            let prev_value = (*inner).strong.fetch_add(1, Release);
344            debug_assert_eq!(prev_value, 0, "No prior strong references should exist");
345
346            // Strong references should collectively own a shared weak reference,
347            // so don't run the destructor for our old weak reference.
348            mem::forget(weak);
349
350            Self::from_inner(init_ptr)
351        }
352    }
353
354    /// Constructs a new `Arc` with uninitialized contents.
355    ///
356    /// # Examples
357    ///
358    /// ```
359    /// use portable_atomic_util::Arc;
360    ///
361    /// let mut five = Arc::<u32>::new_uninit();
362    ///
363    /// // Deferred initialization:
364    /// Arc::get_mut(&mut five).unwrap().write(5);
365    ///
366    /// let five = unsafe { five.assume_init() };
367    ///
368    /// assert_eq!(*five, 5)
369    /// ```
370    #[cfg(not(portable_atomic_no_maybe_uninit))]
371    #[inline]
372    #[must_use]
373    pub fn new_uninit() -> Arc<mem::MaybeUninit<T>> {
374        unsafe {
375            Arc::from_ptr(Arc::allocate_for_layout(
376                Layout::new::<T>(),
377                |layout| Global.allocate(layout),
378                |ptr| ptr as *mut _,
379            ))
380        }
381    }
382
383    /// Constructs a new `Arc` with uninitialized contents, with the memory
384    /// being filled with `0` bytes.
385    ///
386    /// See [`MaybeUninit::zeroed`][zeroed] for examples of correct and incorrect usage
387    /// of this method.
388    ///
389    /// # Examples
390    ///
391    /// ```
392    /// use portable_atomic_util::Arc;
393    ///
394    /// let zero = Arc::<u32>::new_zeroed();
395    /// let zero = unsafe { zero.assume_init() };
396    ///
397    /// assert_eq!(*zero, 0)
398    /// ```
399    ///
400    /// [zeroed]: mem::MaybeUninit::zeroed
401    #[cfg(not(portable_atomic_no_maybe_uninit))]
402    #[inline]
403    #[must_use]
404    pub fn new_zeroed() -> Arc<mem::MaybeUninit<T>> {
405        unsafe {
406            Arc::from_ptr(Arc::allocate_for_layout(
407                Layout::new::<T>(),
408                |layout| Global.allocate_zeroed(layout),
409                |ptr| ptr as *mut _,
410            ))
411        }
412    }
413
414    /// Constructs a new `Pin<Arc<T>>`. If `T` does not implement `Unpin`, then
415    /// `data` will be pinned in memory and unable to be moved.
416    #[must_use]
417    pub fn pin(data: T) -> Pin<Self> {
418        unsafe { Pin::new_unchecked(Self::new(data)) }
419    }
420
421    /// Returns the inner value, if the `Arc` has exactly one strong reference.
422    ///
423    /// Otherwise, an [`Err`] is returned with the same `Arc` that was
424    /// passed in.
425    ///
426    /// This will succeed even if there are outstanding weak references.
427    ///
428    /// It is strongly recommended to use [`Arc::into_inner`] instead if you don't
429    /// keep the `Arc` in the [`Err`] case.
430    /// Immediately dropping the [`Err`]-value, as the expression
431    /// `Arc::try_unwrap(this).ok()` does, can cause the strong count to
432    /// drop to zero and the inner value of the `Arc` to be dropped.
433    /// For instance, if two threads execute such an expression in parallel,
434    /// there is a race condition without the possibility of unsafety:
435    /// The threads could first both check whether they own the last instance
436    /// in `Arc::try_unwrap`, determine that they both do not, and then both
437    /// discard and drop their instance in the call to [`ok`][`Result::ok`].
438    /// In this scenario, the value inside the `Arc` is safely destroyed
439    /// by exactly one of the threads, but neither thread will ever be able
440    /// to use the value.
441    ///
442    /// # Examples
443    ///
444    /// ```
445    /// use portable_atomic_util::Arc;
446    ///
447    /// let x = Arc::new(3);
448    /// assert_eq!(Arc::try_unwrap(x), Ok(3));
449    ///
450    /// let x = Arc::new(4);
451    /// let _y = Arc::clone(&x);
452    /// assert_eq!(*Arc::try_unwrap(x).unwrap_err(), 4);
453    /// ```
454    #[inline]
455    pub fn try_unwrap(this: Self) -> Result<T, Self> {
456        if this.inner().strong.compare_exchange(1, 0, Relaxed, Relaxed).is_err() {
457            return Err(this);
458        }
459
460        acquire!(this.inner().strong);
461
462        let this = ManuallyDrop::new(this);
463        let elem: T = unsafe { ptr::read(&this.ptr.as_ref().data) };
464
465        // Make a weak pointer to clean up the implicit strong-weak reference
466        let _weak = Weak { ptr: this.ptr };
467
468        Ok(elem)
469    }
470
471    /// Returns the inner value, if the `Arc` has exactly one strong reference.
472    ///
473    /// Otherwise, [`None`] is returned and the `Arc` is dropped.
474    ///
475    /// This will succeed even if there are outstanding weak references.
476    ///
477    /// If `Arc::into_inner` is called on every clone of this `Arc`,
478    /// it is guaranteed that exactly one of the calls returns the inner value.
479    /// This means in particular that the inner value is not dropped.
480    ///
481    /// [`Arc::try_unwrap`] is conceptually similar to `Arc::into_inner`, but it
482    /// is meant for different use-cases. If used as a direct replacement
483    /// for `Arc::into_inner` anyway, such as with the expression
484    /// <code>[Arc::try_unwrap]\(this).[ok][Result::ok]()</code>, then it does
485    /// **not** give the same guarantee as described in the previous paragraph.
486    /// For more information, see the examples below and read the documentation
487    /// of [`Arc::try_unwrap`].
488    ///
489    /// # Examples
490    ///
491    /// Minimal example demonstrating the guarantee that `Arc::into_inner` gives.
492    ///
493    /// ```
494    /// use portable_atomic_util::Arc;
495    ///
496    /// let x = Arc::new(3);
497    /// let y = Arc::clone(&x);
498    ///
499    /// // Two threads calling `Arc::into_inner` on both clones of an `Arc`:
500    /// let x_thread = std::thread::spawn(|| Arc::into_inner(x));
501    /// let y_thread = std::thread::spawn(|| Arc::into_inner(y));
502    ///
503    /// let x_inner_value = x_thread.join().unwrap();
504    /// let y_inner_value = y_thread.join().unwrap();
505    ///
506    /// // One of the threads is guaranteed to receive the inner value:
507    /// assert!(matches!((x_inner_value, y_inner_value), (None, Some(3)) | (Some(3), None)));
508    /// // The result could also be `(None, None)` if the threads called
509    /// // `Arc::try_unwrap(x).ok()` and `Arc::try_unwrap(y).ok()` instead.
510    /// ```
511    ///
512    /// A more practical example demonstrating the need for `Arc::into_inner`:
513    /// ```
514    /// use portable_atomic_util::Arc;
515    ///
516    /// // Definition of a simple singly linked list using `Arc`:
517    /// #[derive(Clone)]
518    /// struct LinkedList<T>(Option<Arc<Node<T>>>);
519    /// struct Node<T>(T, Option<Arc<Node<T>>>);
520    ///
521    /// // Dropping a long `LinkedList<T>` relying on the destructor of `Arc`
522    /// // can cause a stack overflow. To prevent this, we can provide a
523    /// // manual `Drop` implementation that does the destruction in a loop:
524    /// impl<T> Drop for LinkedList<T> {
525    ///     fn drop(&mut self) {
526    ///         let mut link = self.0.take();
527    ///         while let Some(arc_node) = link.take() {
528    ///             if let Some(Node(_value, next)) = Arc::into_inner(arc_node) {
529    ///                 link = next;
530    ///             }
531    ///         }
532    ///     }
533    /// }
534    ///
535    /// // Implementation of `new` and `push` omitted
536    /// impl<T> LinkedList<T> {
537    ///     /* ... */
538    /// #   fn new() -> Self {
539    /// #       LinkedList(None)
540    /// #   }
541    /// #   fn push(&mut self, x: T) {
542    /// #       self.0 = Some(Arc::new(Node(x, self.0.take())));
543    /// #   }
544    /// }
545    ///
546    /// // The following code could have still caused a stack overflow
547    /// // despite the manual `Drop` impl if that `Drop` impl had used
548    /// // `Arc::try_unwrap(arc).ok()` instead of `Arc::into_inner(arc)`.
549    ///
550    /// // Create a long list and clone it
551    /// let mut x = LinkedList::new();
552    /// let size = 100000;
553    /// # let size = if cfg!(miri) { 100 } else { size };
554    /// for i in 0..size {
555    ///     x.push(i); // Adds i to the front of x
556    /// }
557    /// let y = x.clone();
558    ///
559    /// // Drop the clones in parallel
560    /// let x_thread = std::thread::spawn(|| drop(x));
561    /// let y_thread = std::thread::spawn(|| drop(y));
562    /// x_thread.join().unwrap();
563    /// y_thread.join().unwrap();
564    /// ```
565    #[inline]
566    pub fn into_inner(this: Self) -> Option<T> {
567        // Make sure that the ordinary `Drop` implementation isn’t called as well
568        let mut this = mem::ManuallyDrop::new(this);
569
570        // Following the implementation of `drop` and `drop_slow`
571        if this.inner().strong.fetch_sub(1, Release) != 1 {
572            return None;
573        }
574
575        acquire!(this.inner().strong);
576
577        // SAFETY: This mirrors the line
578        //
579        //     unsafe { ptr::drop_in_place(Self::get_mut_unchecked(self)) };
580        //
581        // in `drop_slow`. Instead of dropping the value behind the pointer,
582        // it is read and eventually returned; `ptr::read` has the same
583        // safety conditions as `ptr::drop_in_place`.
584        let inner = unsafe { ptr::read(Self::get_mut_unchecked(&mut this)) };
585
586        drop(Weak { ptr: this.ptr });
587
588        Some(inner)
589    }
590}
591
592#[cfg(not(portable_atomic_no_maybe_uninit))]
593impl<T> Arc<[T]> {
594    /// Constructs a new atomically reference-counted slice with uninitialized contents.
595    ///
596    /// # Examples
597    ///
598    /// ```
599    /// use portable_atomic_util::Arc;
600    ///
601    /// let mut values = Arc::<[u32]>::new_uninit_slice(3);
602    ///
603    /// // Deferred initialization:
604    /// let data = Arc::get_mut(&mut values).unwrap();
605    /// data[0].write(1);
606    /// data[1].write(2);
607    /// data[2].write(3);
608    ///
609    /// let values = unsafe { values.assume_init() };
610    ///
611    /// assert_eq!(*values, [1, 2, 3])
612    /// ```
613    #[inline]
614    #[must_use]
615    pub fn new_uninit_slice(len: usize) -> Arc<[mem::MaybeUninit<T>]> {
616        unsafe { Arc::from_ptr(Arc::allocate_for_slice(len)) }
617    }
618
619    /// Constructs a new atomically reference-counted slice with uninitialized contents, with the memory being
620    /// filled with `0` bytes.
621    ///
622    /// See [`MaybeUninit::zeroed`][zeroed] for examples of correct and
623    /// incorrect usage of this method.
624    ///
625    /// # Examples
626    ///
627    /// ```
628    /// use portable_atomic_util::Arc;
629    ///
630    /// let values = Arc::<[u32]>::new_zeroed_slice(3);
631    /// let values = unsafe { values.assume_init() };
632    ///
633    /// assert_eq!(*values, [0, 0, 0])
634    /// ```
635    ///
636    /// [zeroed]: mem::MaybeUninit::zeroed
637    #[inline]
638    #[must_use]
639    #[allow(clippy::missing_panics_doc)]
640    pub fn new_zeroed_slice(len: usize) -> Arc<[mem::MaybeUninit<T>]> {
641        unsafe {
642            Arc::from_ptr(Arc::allocate_for_layout(
643                layout::array::<T>(len).unwrap(),
644                |layout| Global.allocate_zeroed(layout),
645                |mem| {
646                    // We create a slice just for metadata (we must use `[mem::MaybeUninit<T>]`
647                    // instead of `[T]` because values behind `mem` is not valid initialized `T`s.
648                    // there is no size/alignment issue thanks to layout::array), and create a
649                    // pointer from `mem` and slice's metadata.
650                    //
651                    // We cannot use other ways here:
652                    // - ptr::slice_from_raw_parts_mut is best way here, but requires Rust 1.42.
653                    // - We cannot use slice::from_raw_parts_mut then casting to its pointer to
654                    //   ArcInner due to provenance because the actual size of valid allocation
655                    //   behind `mem` is `layout.size()` bytes (counters + values + padding) but the
656                    //   allocation from the pointer from slice::from_raw_parts_mut only valid for
657                    //   `size_of::<T> * len` bytes (only values).
658                    let meta: *const _ =
659                        slice::from_raw_parts(mem as *const mem::MaybeUninit<T>, len);
660                    strict::with_metadata_of(mem, meta) as *mut ArcInner<[mem::MaybeUninit<T>]>
661                },
662            ))
663        }
664    }
665}
666
667#[cfg(not(portable_atomic_no_maybe_uninit))]
668impl<T> Arc<mem::MaybeUninit<T>> {
669    /// Converts to `Arc<T>`.
670    ///
671    /// # Safety
672    ///
673    /// As with [`MaybeUninit::assume_init`],
674    /// it is up to the caller to guarantee that the inner value
675    /// really is in an initialized state.
676    /// Calling this when the content is not yet fully initialized
677    /// causes immediate undefined behavior.
678    ///
679    /// [`MaybeUninit::assume_init`]: mem::MaybeUninit::assume_init
680    ///
681    /// # Examples
682    ///
683    /// ```
684    /// use portable_atomic_util::Arc;
685    ///
686    /// let mut five = Arc::<u32>::new_uninit();
687    ///
688    /// // Deferred initialization:
689    /// Arc::get_mut(&mut five).unwrap().write(5);
690    ///
691    /// let five = unsafe { five.assume_init() };
692    ///
693    /// assert_eq!(*five, 5)
694    /// ```
695    #[inline]
696    #[must_use = "`self` will be dropped if the result is not used"]
697    pub unsafe fn assume_init(self) -> Arc<T> {
698        let ptr = Arc::into_inner_non_null(self);
699        // SAFETY: MaybeUninit<T> has the same layout as T, and
700        // the caller must guarantee that the data is initialized.
701        unsafe { Arc::from_inner(ptr.cast::<ArcInner<T>>()) }
702    }
703}
704
705impl<T: ?Sized + CloneToUninit> Arc<T> {
706    fn clone_from_ref(value: &T) -> Self {
707        // `in_progress` drops the allocation if we panic before finishing initializing it.
708        let mut in_progress: UniqueArcUninit<T> = UniqueArcUninit::new(value);
709
710        // Initialize with clone of value.
711        unsafe {
712            // Clone. If the clone panics, `in_progress` will be dropped and clean up.
713            value.clone_to_uninit(in_progress.data_ptr() as *mut u8);
714            // Cast type of pointer, now that it is initialized.
715            in_progress.into_arc()
716        }
717    }
718}
719
720#[cfg(not(portable_atomic_no_maybe_uninit))]
721impl<T> Arc<[mem::MaybeUninit<T>]> {
722    /// Converts to `Arc<[T]>`.
723    ///
724    /// # Safety
725    ///
726    /// As with [`MaybeUninit::assume_init`],
727    /// it is up to the caller to guarantee that the inner value
728    /// really is in an initialized state.
729    /// Calling this when the content is not yet fully initialized
730    /// causes immediate undefined behavior.
731    ///
732    /// [`MaybeUninit::assume_init`]: mem::MaybeUninit::assume_init
733    ///
734    /// # Examples
735    ///
736    /// ```
737    /// use portable_atomic_util::Arc;
738    ///
739    /// let mut values = Arc::<[u32]>::new_uninit_slice(3);
740    ///
741    /// // Deferred initialization:
742    /// let data = Arc::get_mut(&mut values).unwrap();
743    /// data[0].write(1);
744    /// data[1].write(2);
745    /// data[2].write(3);
746    ///
747    /// let values = unsafe { values.assume_init() };
748    ///
749    /// assert_eq!(*values, [1, 2, 3])
750    /// ```
751    #[inline]
752    #[must_use = "`self` will be dropped if the result is not used"]
753    pub unsafe fn assume_init(self) -> Arc<[T]> {
754        let ptr = Arc::into_inner_non_null(self);
755        // SAFETY: [MaybeUninit<T>] has the same layout as [T], and
756        // the caller must guarantee that the data is initialized.
757        unsafe { Arc::from_ptr(ptr.as_ptr() as *mut ArcInner<[T]>) }
758    }
759}
760
761impl<T: ?Sized> Arc<T> {
762    /// Constructs an `Arc<T>` from a raw pointer.
763    ///
764    /// # Safety
765    ///
766    /// The raw pointer must have been previously returned by a call to
767    /// [`Arc<U>::into_raw`][into_raw] with the following requirements:
768    ///
769    /// * If `U` is sized, it must have the same size and alignment as `T`. This
770    ///   is trivially true if `U` is `T`.
771    /// * If `U` is unsized, its data pointer must have the same size and
772    ///   alignment as `T`. This is trivially true if `Arc<U>` was constructed
773    ///   through `Arc<T>` and then converted to `Arc<U>` through an [unsized
774    ///   coercion].
775    ///
776    /// Note that if `U` or `U`'s data pointer is not `T` but has the same size
777    /// and alignment, this is basically like transmuting references of
778    /// different types. See [`mem::transmute`] for more information
779    /// on what restrictions apply in this case.
780    ///
781    /// The raw pointer must point to a block of memory allocated by the global allocator.
782    ///
783    /// The user of `from_raw` has to make sure a specific value of `T` is only
784    /// dropped once.
785    ///
786    /// This function is unsafe because improper use may lead to memory unsafety,
787    /// even if the returned `Arc<T>` is never accessed.
788    ///
789    /// [into_raw]: Arc::into_raw
790    /// [unsized coercion]: https://doc.rust-lang.org/reference/type-coercions.html#unsized-coercions
791    ///
792    /// # Examples
793    ///
794    /// ```
795    /// use portable_atomic_util::Arc;
796    ///
797    /// let x = Arc::new("hello".to_owned());
798    /// let x_ptr = Arc::into_raw(x);
799    ///
800    /// unsafe {
801    ///     // Convert back to an `Arc` to prevent leak.
802    ///     let x = Arc::from_raw(x_ptr);
803    ///     assert_eq!(&*x, "hello");
804    ///
805    ///     // Further calls to `Arc::from_raw(x_ptr)` would be memory-unsafe.
806    /// }
807    ///
808    /// // The memory was freed when `x` went out of scope above, so `x_ptr` is now dangling!
809    /// ```
810    ///
811    /// Convert a slice back into its original array:
812    ///
813    /// ```
814    /// use portable_atomic_util::Arc;
815    ///
816    /// let x: Arc<[u32]> = Arc::from([1, 2, 3]);
817    /// let x_ptr: *const [u32] = Arc::into_raw(x);
818    ///
819    /// unsafe {
820    ///     let x: Arc<[u32; 3]> = Arc::from_raw(x_ptr.cast::<[u32; 3]>());
821    ///     assert_eq!(&*x, &[1, 2, 3]);
822    /// }
823    /// ```
824    #[inline]
825    pub unsafe fn from_raw(ptr: *const T) -> Self {
826        unsafe {
827            let offset = data_offset::<T>(&*ptr);
828
829            // Reverse the offset to find the original ArcInner.
830            let arc_ptr = strict::byte_sub(ptr as *mut T, offset) as *mut ArcInner<T>;
831
832            Self::from_ptr(arc_ptr)
833        }
834    }
835
836    /// Consumes the `Arc`, returning the wrapped pointer.
837    ///
838    /// To avoid a memory leak the pointer must be converted back to an `Arc` using
839    /// [`Arc::from_raw`].
840    ///
841    /// # Examples
842    ///
843    /// ```
844    /// use portable_atomic_util::Arc;
845    ///
846    /// let x = Arc::new("hello".to_owned());
847    /// let x_ptr = Arc::into_raw(x);
848    /// assert_eq!(unsafe { &*x_ptr }, "hello");
849    /// # // Prevent leaks for Miri.
850    /// # drop(unsafe { Arc::from_raw(x_ptr) });
851    /// ```
852    #[must_use = "losing the pointer will leak memory"]
853    pub fn into_raw(this: Self) -> *const T {
854        let this = ManuallyDrop::new(this);
855        Self::as_ptr(&*this)
856    }
857
858    /// Increments the strong reference count on the `Arc<T>` associated with the
859    /// provided pointer by one.
860    ///
861    /// # Safety
862    ///
863    /// The pointer must have been obtained through `Arc::into_raw` and must satisfy the
864    /// same layout requirements specified in [`Arc::from_raw`].
865    /// The associated `Arc` instance must be valid (i.e. the strong count must be at
866    /// least 1) for the duration of this method, and `ptr` must point to a block of memory
867    /// allocated by the global allocator.
868    ///
869    /// # Examples
870    ///
871    /// ```
872    /// use portable_atomic_util::Arc;
873    ///
874    /// let five = Arc::new(5);
875    ///
876    /// unsafe {
877    ///     let ptr = Arc::into_raw(five);
878    ///     Arc::increment_strong_count(ptr);
879    ///
880    ///     // This assertion is deterministic because we haven't shared
881    ///     // the `Arc` between threads.
882    ///     let five = Arc::from_raw(ptr);
883    ///     assert_eq!(2, Arc::strong_count(&five));
884    /// #   // Prevent leaks for Miri.
885    /// #   Arc::decrement_strong_count(ptr);
886    /// }
887    /// ```
888    #[inline]
889    pub unsafe fn increment_strong_count(ptr: *const T) {
890        // Retain Arc, but don't touch refcount by wrapping in ManuallyDrop
891        let arc = unsafe { mem::ManuallyDrop::new(Self::from_raw(ptr)) };
892        // Now increase refcount, but don't drop new refcount either
893        let _arc_clone: mem::ManuallyDrop<_> = arc.clone();
894    }
895
896    /// Decrements the strong reference count on the `Arc<T>` associated with the
897    /// provided pointer by one.
898    ///
899    /// # Safety
900    ///
901    /// The pointer must have been obtained through `Arc::into_raw` and must satisfy the
902    /// same layout requirements specified in [`Arc::from_raw`].
903    /// The associated `Arc` instance must be valid (i.e. the strong count must be at
904    /// least 1) when invoking this method, and `ptr` must point to a block of memory
905    /// allocated by the global allocator. This method can be used to release the final
906    /// `Arc` and backing storage, but **should not** be called after the final `Arc` has been
907    /// released.
908    ///
909    /// # Examples
910    ///
911    /// ```
912    /// use portable_atomic_util::Arc;
913    ///
914    /// let five = Arc::new(5);
915    ///
916    /// unsafe {
917    ///     let ptr = Arc::into_raw(five);
918    ///     Arc::increment_strong_count(ptr);
919    ///
920    ///     // Those assertions are deterministic because we haven't shared
921    ///     // the `Arc` between threads.
922    ///     let five = Arc::from_raw(ptr);
923    ///     assert_eq!(2, Arc::strong_count(&five));
924    ///     Arc::decrement_strong_count(ptr);
925    ///     assert_eq!(1, Arc::strong_count(&five));
926    /// }
927    /// ```
928    #[inline]
929    pub unsafe fn decrement_strong_count(ptr: *const T) {
930        // SAFETY: the caller must uphold the safety contract.
931        unsafe { drop(Self::from_raw(ptr)) }
932    }
933
934    /// Provides a raw pointer to the data.
935    ///
936    /// The counts are not affected in any way and the `Arc` is not consumed. The pointer is valid for
937    /// as long as there are strong counts in the `Arc`.
938    ///
939    /// # Examples
940    ///
941    /// ```
942    /// use portable_atomic_util::Arc;
943    ///
944    /// let x = Arc::new("hello".to_owned());
945    /// let y = Arc::clone(&x);
946    /// let x_ptr = Arc::as_ptr(&x);
947    /// assert_eq!(x_ptr, Arc::as_ptr(&y));
948    /// assert_eq!(unsafe { &*x_ptr }, "hello");
949    /// ```
950    #[must_use]
951    pub fn as_ptr(this: &Self) -> *const T {
952        let ptr: *mut ArcInner<T> = NonNull::as_ptr(this.ptr);
953
954        // SAFETY: This cannot go through Deref::deref or ArcInnerPtr::inner because
955        // this is required to retain raw/mut provenance such that e.g. `get_mut` can
956        // write through the pointer after the Arc is recovered through `from_raw`.
957        unsafe { data_ptr::<T>(ptr, &**this) }
958    }
959
960    /// Creates a new [`Weak`] pointer to this allocation.
961    ///
962    /// # Examples
963    ///
964    /// ```
965    /// use portable_atomic_util::Arc;
966    ///
967    /// let five = Arc::new(5);
968    ///
969    /// let weak_five = Arc::downgrade(&five);
970    /// ```
971    #[must_use = "this returns a new `Weak` pointer, \
972                  without modifying the original `Arc`"]
973    #[allow(clippy::missing_panics_doc)]
974    pub fn downgrade(this: &Self) -> Weak<T> {
975        // This Relaxed is OK because we're checking the value in the CAS
976        // below.
977        let mut cur = this.inner().weak.load(Relaxed);
978
979        loop {
980            // check if the weak counter is currently "locked"; if so, spin.
981            if cur == usize::MAX {
982                hint::spin_loop();
983                cur = this.inner().weak.load(Relaxed);
984                continue;
985            }
986
987            // We can't allow the refcount to increase much past `MAX_REFCOUNT`.
988            assert!(cur <= MAX_REFCOUNT, "{}", INTERNAL_OVERFLOW_ERROR);
989
990            // NOTE: this code currently ignores the possibility of overflow
991            // into usize::MAX; in general both Rc and Arc need to be adjusted
992            // to deal with overflow.
993
994            // Unlike with Clone(), we need this to be an Acquire read to
995            // synchronize with the write coming from `is_unique`, so that the
996            // events prior to that write happen before this read.
997            match this.inner().weak.compare_exchange_weak(cur, cur + 1, Acquire, Relaxed) {
998                Ok(_) => {
999                    // Make sure we do not create a dangling Weak
1000                    debug_assert!(!is_dangling(this.ptr.as_ptr()));
1001                    return Weak { ptr: this.ptr };
1002                }
1003                Err(old) => cur = old,
1004            }
1005        }
1006    }
1007
1008    /// Gets the number of [`Weak`] pointers to this allocation.
1009    ///
1010    /// # Safety
1011    ///
1012    /// This method by itself is safe, but using it correctly requires extra care.
1013    /// Another thread can change the weak count at any time,
1014    /// including potentially between calling this method and acting on the result.
1015    ///
1016    /// # Examples
1017    ///
1018    /// ```
1019    /// use portable_atomic_util::Arc;
1020    ///
1021    /// let five = Arc::new(5);
1022    /// let _weak_five = Arc::downgrade(&five);
1023    ///
1024    /// // This assertion is deterministic because we haven't shared
1025    /// // the `Arc` or `Weak` between threads.
1026    /// assert_eq!(1, Arc::weak_count(&five));
1027    /// ```
1028    #[inline]
1029    #[must_use]
1030    pub fn weak_count(this: &Self) -> usize {
1031        let cnt = this.inner().weak.load(Relaxed);
1032        // If the weak count is currently locked, the value of the
1033        // count was 0 just before taking the lock.
1034        if cnt == usize::MAX { 0 } else { cnt - 1 }
1035    }
1036
1037    /// Gets the number of strong (`Arc`) pointers to this allocation.
1038    ///
1039    /// # Safety
1040    ///
1041    /// This method by itself is safe, but using it correctly requires extra care.
1042    /// Another thread can change the strong count at any time,
1043    /// including potentially between calling this method and acting on the result.
1044    ///
1045    /// # Examples
1046    ///
1047    /// ```
1048    /// use portable_atomic_util::Arc;
1049    ///
1050    /// let five = Arc::new(5);
1051    /// let _also_five = Arc::clone(&five);
1052    ///
1053    /// // This assertion is deterministic because we haven't shared
1054    /// // the `Arc` between threads.
1055    /// assert_eq!(2, Arc::strong_count(&five));
1056    /// ```
1057    #[inline]
1058    #[must_use]
1059    pub fn strong_count(this: &Self) -> usize {
1060        this.inner().strong.load(Relaxed)
1061    }
1062
1063    #[inline]
1064    fn inner(&self) -> &ArcInner<T> {
1065        // This unsafety is ok because while this arc is alive we're guaranteed
1066        // that the inner pointer is valid. Furthermore, we know that the
1067        // `ArcInner` structure itself is `Sync` because the inner data is
1068        // `Sync` as well, so we're ok loaning out an immutable pointer to these
1069        // contents.
1070        unsafe { self.ptr.as_ref() }
1071    }
1072
1073    // Non-inlined part of `drop`.
1074    #[inline(never)]
1075    unsafe fn drop_slow(&mut self) {
1076        // Drop the weak ref collectively held by all strong references when this
1077        // variable goes out of scope. This ensures that the memory is deallocated
1078        // even if the destructor of `T` panics.
1079        // Take a reference to `self.alloc` instead of cloning because 1. it'll last long
1080        // enough, and 2. you should be able to drop `Arc`s with unclonable allocators
1081        let _weak = Weak { ptr: self.ptr };
1082
1083        // Destroy the data at this time, even though we must not free the box
1084        // allocation itself (there might still be weak pointers lying around).
1085        // We cannot use `get_mut_unchecked` here, because `self.alloc` is borrowed.
1086        unsafe { ptr::drop_in_place(&mut (*self.ptr.as_ptr()).data) };
1087    }
1088
1089    /// Returns `true` if the two `Arc`s point to the same allocation in a vein similar to
1090    /// [`ptr::eq`]. This function ignores the metadata of  `dyn Trait` pointers.
1091    ///
1092    /// # Examples
1093    ///
1094    /// ```
1095    /// use portable_atomic_util::Arc;
1096    ///
1097    /// let five = Arc::new(5);
1098    /// let same_five = Arc::clone(&five);
1099    /// let other_five = Arc::new(5);
1100    ///
1101    /// assert!(Arc::ptr_eq(&five, &same_five));
1102    /// assert!(!Arc::ptr_eq(&five, &other_five));
1103    /// ```
1104    ///
1105    /// [`ptr::eq`]: core::ptr::eq "ptr::eq"
1106    #[inline]
1107    #[must_use]
1108    pub fn ptr_eq(this: &Self, other: &Self) -> bool {
1109        ptr::eq(this.ptr.as_ptr() as *const (), other.ptr.as_ptr() as *const ())
1110    }
1111}
1112
1113impl<T: ?Sized> Arc<T> {
1114    /// Allocates an `ArcInner<T>` with sufficient space for
1115    /// a possibly-unsized inner value where the value has the layout provided.
1116    ///
1117    /// The function `mem_to_arc_inner` is called with the data pointer
1118    /// and must return back a (potentially fat)-pointer for the `ArcInner<T>`.
1119    unsafe fn allocate_for_layout(
1120        value_layout: Layout,
1121        allocate: impl FnOnce(Layout) -> Option<NonNull<u8>>,
1122        mem_to_arc_inner: impl FnOnce(*mut u8) -> *mut ArcInner<T>,
1123    ) -> *mut ArcInner<T> {
1124        let layout = arc_inner_layout_for_value_layout(value_layout);
1125
1126        let ptr = allocate(layout).unwrap_or_else(|| handle_alloc_error(layout));
1127
1128        unsafe { Self::initialize_arc_inner(ptr, layout, mem_to_arc_inner) }
1129    }
1130
1131    unsafe fn initialize_arc_inner(
1132        ptr: NonNull<u8>,
1133        _layout: Layout,
1134        mem_to_arc_inner: impl FnOnce(*mut u8) -> *mut ArcInner<T>,
1135    ) -> *mut ArcInner<T> {
1136        let inner: *mut ArcInner<T> = mem_to_arc_inner(ptr.as_ptr());
1137        // debug_assert_eq!(unsafe { Layout::for_value_raw(inner) }, layout); // for_value_raw is unstable
1138
1139        // SAFETY: mem_to_arc_inner return a valid pointer to uninitialized ArcInner<T>.
1140        // ArcInner<T> is repr(C), and strong and weak are the first and second fields and
1141        // are the same type, so `inner as *mut atomic::AtomicUsize` is strong and
1142        // `(inner as *mut atomic::AtomicUsize).add(1)` is weak.
1143        unsafe {
1144            let strong = inner as *mut atomic::AtomicUsize;
1145            strong.write(atomic::AtomicUsize::new(1));
1146            let weak = strong.add(1);
1147            weak.write(atomic::AtomicUsize::new(1));
1148        }
1149
1150        inner
1151    }
1152}
1153
1154impl<T: ?Sized> Arc<T> {
1155    /// Allocates an `ArcInner<T>` with sufficient space for an unsized inner value.
1156    #[inline]
1157    unsafe fn allocate_for_value(value: &T) -> *mut ArcInner<T> {
1158        let ptr: *const T = value;
1159        // Allocate for the `ArcInner<T>` using the given value.
1160        unsafe {
1161            Self::allocate_for_layout(
1162                Layout::for_value(value),
1163                |layout| Global.allocate(layout),
1164                |mem| strict::with_metadata_of(mem, ptr as *const ArcInner<T>),
1165            )
1166        }
1167    }
1168
1169    fn from_box(src: Box<T>) -> Arc<T> {
1170        unsafe {
1171            let value_size = mem::size_of_val(&*src);
1172            let ptr = Self::allocate_for_value(&*src);
1173
1174            // Copy value as bytes
1175            ptr::copy_nonoverlapping(
1176                &*src as *const T as *const u8,
1177                data_ptr::<T>(ptr, &*src) as *mut u8,
1178                value_size,
1179            );
1180
1181            // Free the allocation without dropping its contents
1182            let box_ptr = Box::into_raw(src);
1183            let src = Box::from_raw(box_ptr as *mut mem::ManuallyDrop<T>);
1184            drop(src);
1185
1186            Self::from_ptr(ptr)
1187        }
1188    }
1189}
1190
1191#[cfg(not(portable_atomic_no_maybe_uninit))]
1192impl<T> Arc<[T]> {
1193    /// Allocates an `ArcInner<[mem::MaybeUninit<T>]>` with the given length.
1194    unsafe fn allocate_for_slice(len: usize) -> *mut ArcInner<[mem::MaybeUninit<T>]> {
1195        unsafe {
1196            Arc::allocate_for_layout(
1197                layout::array::<T>(len).unwrap(),
1198                |layout| Global.allocate(layout),
1199                |mem| {
1200                    // We create a slice just for metadata (we must use `[mem::MaybeUninit<T>]`
1201                    // instead of `[T]` because values behind `mem` is not valid initialized `T`s.
1202                    // there is no size/alignment issue thanks to layout::array), and create a
1203                    // pointer from `mem` and slice's metadata.
1204                    //
1205                    // We cannot use other ways here:
1206                    // - ptr::slice_from_raw_parts_mut is best way here, but requires Rust 1.42.
1207                    // - We cannot use slice::from_raw_parts_mut then casting to its pointer to
1208                    //   ArcInner due to provenance because the actual size of valid allocation
1209                    //   behind `mem` is `layout.size()` bytes (counters + values + padding) but the
1210                    //   allocation from the pointer from slice::from_raw_parts_mut only valid for
1211                    //   `size_of::<T> * len` bytes (only values).
1212                    let meta: *const _ =
1213                        slice::from_raw_parts(mem as *const mem::MaybeUninit<T>, len);
1214                    strict::with_metadata_of(mem, meta) as *mut ArcInner<[mem::MaybeUninit<T>]>
1215                },
1216            )
1217        }
1218    }
1219
1220    /// Constructs an `Arc<[T]>` from an iterator known to be of a certain size.
1221    ///
1222    /// Behavior is undefined should the size be wrong.
1223    unsafe fn from_iter_exact(iter: impl Iterator<Item = T>, len: usize) -> Self {
1224        // Panic guard while cloning T elements.
1225        // In the event of a panic, elements that have been written
1226        // into the new ArcInner will be dropped, then the memory freed.
1227        struct Guard<T> {
1228            mem: NonNull<u8>,
1229            elems: *mut T,
1230            layout: Layout,
1231            n_elems: usize,
1232        }
1233
1234        impl<T> Drop for Guard<T> {
1235            fn drop(&mut self) {
1236                unsafe {
1237                    let slice = slice::from_raw_parts_mut(self.elems, self.n_elems);
1238                    ptr::drop_in_place(slice);
1239
1240                    Global.deallocate(self.mem, self.layout);
1241                }
1242            }
1243        }
1244
1245        unsafe {
1246            let ptr: *mut ArcInner<[mem::MaybeUninit<T>]> = Arc::allocate_for_slice(len);
1247
1248            let mem = ptr as *mut _ as *mut u8;
1249            let layout = Layout::for_value(&*ptr);
1250
1251            // Pointer to first element
1252            let elems = (*ptr).data.as_mut_ptr() as *mut T;
1253
1254            let mut guard = Guard { mem: NonNull::new_unchecked(mem), elems, layout, n_elems: 0 };
1255
1256            for (i, item) in iter.enumerate() {
1257                ptr::write(elems.add(i), item);
1258                guard.n_elems += 1;
1259            }
1260
1261            // All clear. Forget the guard so it doesn't free the new ArcInner.
1262            mem::forget(guard);
1263
1264            Arc::from_ptr(ptr).assume_init()
1265        }
1266    }
1267}
1268
1269impl<T: ?Sized> Clone for Arc<T> {
1270    /// Makes a clone of the `Arc` pointer.
1271    ///
1272    /// This creates another pointer to the same allocation, increasing the
1273    /// strong reference count.
1274    ///
1275    /// # Examples
1276    ///
1277    /// ```
1278    /// use portable_atomic_util::Arc;
1279    ///
1280    /// let five = Arc::new(5);
1281    ///
1282    /// let _ = Arc::clone(&five);
1283    /// ```
1284    #[inline]
1285    fn clone(&self) -> Self {
1286        // Using a relaxed ordering is alright here, as knowledge of the
1287        // original reference prevents other threads from erroneously deleting
1288        // the object.
1289        //
1290        // As explained in the [Boost documentation][1], Increasing the
1291        // reference counter can always be done with memory_order_relaxed: New
1292        // references to an object can only be formed from an existing
1293        // reference, and passing an existing reference from one thread to
1294        // another must already provide any required synchronization.
1295        //
1296        // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
1297        let old_size = self.inner().strong.fetch_add(1, Relaxed);
1298
1299        // However we need to guard against massive refcounts in case someone is `mem::forget`ing
1300        // Arcs. If we don't do this the count can overflow and users will use-after free. This
1301        // branch will never be taken in any realistic program. We abort because such a program is
1302        // incredibly degenerate, and we don't care to support it.
1303        //
1304        // This check is not 100% water-proof: we error when the refcount grows beyond `isize::MAX`.
1305        // But we do that check *after* having done the increment, so there is a chance here that
1306        // the worst already happened and we actually do overflow the `usize` counter. However, that
1307        // requires the counter to grow from `isize::MAX` to `usize::MAX` between the increment
1308        // above and the `abort` below, which seems exceedingly unlikely.
1309        //
1310        // This is a global invariant, and also applies when using a compare-exchange loop to increment
1311        // counters in other methods.
1312        // Otherwise, the counter could be brought to an almost-overflow using a compare-exchange loop,
1313        // and then overflow using a few `fetch_add`s.
1314        if old_size > MAX_REFCOUNT {
1315            abort();
1316        }
1317
1318        unsafe { Self::from_inner(self.ptr) }
1319    }
1320}
1321
1322impl<T: ?Sized> Deref for Arc<T> {
1323    type Target = T;
1324
1325    #[inline]
1326    fn deref(&self) -> &Self::Target {
1327        &self.inner().data
1328    }
1329}
1330
1331impl<T: ?Sized + CloneToUninit> Arc<T> {
1332    /// Makes a mutable reference into the given `Arc`.
1333    ///
1334    /// If there are other `Arc` pointers to the same allocation, then `make_mut` will
1335    /// [`clone`] the inner value to a new allocation to ensure unique ownership.  This is also
1336    /// referred to as clone-on-write.
1337    ///
1338    /// However, if there are no other `Arc` pointers to this allocation, but some [`Weak`]
1339    /// pointers, then the [`Weak`] pointers will be dissociated and the inner value will not
1340    /// be cloned.
1341    ///
1342    /// See also [`get_mut`], which will fail rather than cloning the inner value
1343    /// or dissociating [`Weak`] pointers.
1344    ///
1345    /// [`clone`]: Clone::clone
1346    /// [`get_mut`]: Arc::get_mut
1347    ///
1348    /// # Examples
1349    ///
1350    /// ```
1351    /// use portable_atomic_util::Arc;
1352    ///
1353    /// let mut data = Arc::new(5);
1354    ///
1355    /// *Arc::make_mut(&mut data) += 1; // Won't clone anything
1356    /// let mut other_data = Arc::clone(&data); // Won't clone inner data
1357    /// *Arc::make_mut(&mut data) += 1; // Clones inner data
1358    /// *Arc::make_mut(&mut data) += 1; // Won't clone anything
1359    /// *Arc::make_mut(&mut other_data) *= 2; // Won't clone anything
1360    ///
1361    /// // Now `data` and `other_data` point to different allocations.
1362    /// assert_eq!(*data, 8);
1363    /// assert_eq!(*other_data, 12);
1364    /// ```
1365    ///
1366    /// [`Weak`] pointers will be dissociated:
1367    ///
1368    /// ```
1369    /// use portable_atomic_util::Arc;
1370    ///
1371    /// let mut data = Arc::new(75);
1372    /// let weak = Arc::downgrade(&data);
1373    ///
1374    /// assert!(75 == *data);
1375    /// assert!(75 == *weak.upgrade().unwrap());
1376    ///
1377    /// *Arc::make_mut(&mut data) += 1;
1378    ///
1379    /// assert!(76 == *data);
1380    /// assert!(weak.upgrade().is_none());
1381    /// ```
1382    #[inline]
1383    pub fn make_mut(this: &mut Self) -> &mut T {
1384        let size_of_val = mem::size_of_val::<T>(&**this);
1385
1386        // Note that we hold both a strong reference and a weak reference.
1387        // Thus, releasing our strong reference only will not, by itself, cause
1388        // the memory to be deallocated.
1389        //
1390        // Use Acquire to ensure that we see any writes to `weak` that happen
1391        // before release writes (i.e., decrements) to `strong`. Since we hold a
1392        // weak count, there's no chance the ArcInner itself could be
1393        // deallocated.
1394        if this.inner().strong.compare_exchange(1, 0, Acquire, Relaxed).is_err() {
1395            // Another strong pointer exists, so we must clone.
1396            *this = Arc::clone_from_ref(&**this);
1397        } else if this.inner().weak.load(Relaxed) != 1 {
1398            // Relaxed suffices in the above because this is fundamentally an
1399            // optimization: we are always racing with weak pointers being
1400            // dropped. Worst case, we end up allocated a new Arc unnecessarily.
1401
1402            // We removed the last strong ref, but there are additional weak
1403            // refs remaining. We'll move the contents to a new Arc, and
1404            // invalidate the other weak refs.
1405
1406            // Note that it is not possible for the read of `weak` to yield
1407            // usize::MAX (i.e., locked), since the weak count can only be
1408            // locked by a thread with a strong reference.
1409
1410            // Materialize our own implicit weak pointer, so that it can clean
1411            // up the ArcInner as needed.
1412            let _weak = Weak { ptr: this.ptr };
1413
1414            // Can just steal the data, all that's left is `Weak`s
1415            //
1416            // We don't need panic-protection like the above branch does, but we might as well
1417            // use the same mechanism.
1418            let mut in_progress: UniqueArcUninit<T> = UniqueArcUninit::new(&**this);
1419            unsafe {
1420                // Initialize `in_progress` with move of **this.
1421                // We have to express this in terms of bytes because `T: ?Sized`; there is no
1422                // operation that just copies a value based on its `size_of_val()`.
1423                ptr::copy_nonoverlapping(
1424                    &**this as *const T as *const u8,
1425                    in_progress.data_ptr() as *mut u8,
1426                    size_of_val,
1427                );
1428
1429                ptr::write(this, in_progress.into_arc());
1430            }
1431        } else {
1432            // We were the sole reference of either kind; bump back up the
1433            // strong ref count.
1434            this.inner().strong.store(1, Release);
1435        }
1436
1437        // As with `get_mut()`, the unsafety is ok because our reference was
1438        // either unique to begin with, or became one upon cloning the contents.
1439        unsafe { Self::get_mut_unchecked(this) }
1440    }
1441}
1442
1443impl<T: Clone> Arc<T> {
1444    /// If we have the only reference to `T` then unwrap it. Otherwise, clone `T` and return the
1445    /// clone.
1446    ///
1447    /// Assuming `arc_t` is of type `Arc<T>`, this function is functionally equivalent to
1448    /// `(*arc_t).clone()`, but will avoid cloning the inner value where possible.
1449    ///
1450    /// # Examples
1451    ///
1452    /// ```
1453    /// use std::ptr;
1454    ///
1455    /// use portable_atomic_util::Arc;
1456    ///
1457    /// let inner = String::from("test");
1458    /// let ptr = inner.as_ptr();
1459    ///
1460    /// let arc = Arc::new(inner);
1461    /// let inner = Arc::unwrap_or_clone(arc);
1462    /// // The inner value was not cloned
1463    /// assert!(ptr::eq(ptr, inner.as_ptr()));
1464    ///
1465    /// let arc = Arc::new(inner);
1466    /// let arc2 = arc.clone();
1467    /// let inner = Arc::unwrap_or_clone(arc);
1468    /// // Because there were 2 references, we had to clone the inner value.
1469    /// assert!(!ptr::eq(ptr, inner.as_ptr()));
1470    /// // `arc2` is the last reference, so when we unwrap it we get back
1471    /// // the original `String`.
1472    /// let inner = Arc::unwrap_or_clone(arc2);
1473    /// assert!(ptr::eq(ptr, inner.as_ptr()));
1474    /// ```
1475    #[inline]
1476    pub fn unwrap_or_clone(this: Self) -> T {
1477        Self::try_unwrap(this).unwrap_or_else(|arc| (*arc).clone())
1478    }
1479}
1480
1481impl<T: ?Sized> Arc<T> {
1482    /// Returns a mutable reference into the given `Arc`, if there are
1483    /// no other `Arc` or [`Weak`] pointers to the same allocation.
1484    ///
1485    /// Returns [`None`] otherwise, because it is not safe to
1486    /// mutate a shared value.
1487    ///
1488    /// See also [`make_mut`][make_mut], which will [`clone`][clone]
1489    /// the inner value when there are other `Arc` pointers.
1490    ///
1491    /// [make_mut]: Arc::make_mut
1492    /// [clone]: Clone::clone
1493    ///
1494    /// # Examples
1495    ///
1496    /// ```
1497    /// use portable_atomic_util::Arc;
1498    ///
1499    /// let mut x = Arc::new(3);
1500    /// *Arc::get_mut(&mut x).unwrap() = 4;
1501    /// assert_eq!(*x, 4);
1502    ///
1503    /// let _y = Arc::clone(&x);
1504    /// assert!(Arc::get_mut(&mut x).is_none());
1505    /// ```
1506    #[inline]
1507    pub fn get_mut(this: &mut Self) -> Option<&mut T> {
1508        if Self::is_unique(this) {
1509            // This unsafety is ok because we're guaranteed that the pointer
1510            // returned is the *only* pointer that will ever be returned to T. Our
1511            // reference count is guaranteed to be 1 at this point, and we required
1512            // the Arc itself to be `mut`, so we're returning the only possible
1513            // reference to the inner data.
1514            unsafe { Some(Self::get_mut_unchecked(this)) }
1515        } else {
1516            None
1517        }
1518    }
1519
1520    #[inline]
1521    unsafe fn get_mut_unchecked(this: &mut Self) -> &mut T {
1522        // We are careful to *not* create a reference covering the "count" fields, as
1523        // this would alias with concurrent access to the reference counts (e.g. by `Weak`).
1524        unsafe { &mut (*this.ptr.as_ptr()).data }
1525    }
1526
1527    #[inline]
1528    fn is_unique(this: &Self) -> bool {
1529        // lock the weak pointer count if we appear to be the sole weak pointer
1530        // holder.
1531        //
1532        // The acquire label here ensures a happens-before relationship with any
1533        // writes to `strong` (in particular in `Weak::upgrade`) prior to decrements
1534        // of the `weak` count (via `Weak::drop`, which uses release). If the upgraded
1535        // weak ref was never dropped, the CAS here will fail so we do not care to synchronize.
1536        if this.inner().weak.compare_exchange(1, usize::MAX, Acquire, Relaxed).is_ok() {
1537            // This needs to be an `Acquire` to synchronize with the decrement of the `strong`
1538            // counter in `drop` -- the only access that happens when any but the last reference
1539            // is being dropped.
1540            let unique = this.inner().strong.load(Acquire) == 1;
1541
1542            // The release write here synchronizes with a read in `downgrade`,
1543            // effectively preventing the above read of `strong` from happening
1544            // after the write.
1545            this.inner().weak.store(1, Release); // release the lock
1546            unique
1547        } else {
1548            false
1549        }
1550    }
1551}
1552
1553impl<T: ?Sized> Drop for Arc<T> {
1554    /// Drops the `Arc`.
1555    ///
1556    /// This will decrement the strong reference count. If the strong reference
1557    /// count reaches zero then the only other references (if any) are
1558    /// [`Weak`], so we `drop` the inner value.
1559    ///
1560    /// # Examples
1561    ///
1562    /// ```
1563    /// use portable_atomic_util::Arc;
1564    ///
1565    /// struct Foo;
1566    ///
1567    /// impl Drop for Foo {
1568    ///     fn drop(&mut self) {
1569    ///         println!("dropped!");
1570    ///     }
1571    /// }
1572    ///
1573    /// let foo = Arc::new(Foo);
1574    /// let foo2 = Arc::clone(&foo);
1575    ///
1576    /// drop(foo); // Doesn't print anything
1577    /// drop(foo2); // Prints "dropped!"
1578    /// ```
1579    #[inline]
1580    fn drop(&mut self) {
1581        // Because `fetch_sub` is already atomic, we do not need to synchronize
1582        // with other threads unless we are going to delete the object. This
1583        // same logic applies to the below `fetch_sub` to the `weak` count.
1584        if self.inner().strong.fetch_sub(1, Release) != 1 {
1585            return;
1586        }
1587
1588        // This fence is needed to prevent reordering of use of the data and
1589        // deletion of the data. Because it is marked `Release`, the decreasing
1590        // of the reference count synchronizes with this `Acquire` fence. This
1591        // means that use of the data happens before decreasing the reference
1592        // count, which happens before this fence, which happens before the
1593        // deletion of the data.
1594        //
1595        // As explained in the [Boost documentation][1],
1596        //
1597        // > It is important to enforce any possible access to the object in one
1598        // > thread (through an existing reference) to *happen before* deleting
1599        // > the object in a different thread. This is achieved by a "release"
1600        // > operation after dropping a reference (any access to the object
1601        // > through this reference must obviously happened before), and an
1602        // > "acquire" operation before deleting the object.
1603        //
1604        // In particular, while the contents of an Arc are usually immutable, it's
1605        // possible to have interior writes to something like a Mutex<T>. Since a
1606        // Mutex is not acquired when it is deleted, we can't rely on its
1607        // synchronization logic to make writes in thread A visible to a destructor
1608        // running in thread B.
1609        //
1610        // Also note that the Acquire fence here could probably be replaced with an
1611        // Acquire load, which could improve performance in highly-contended
1612        // situations. See [2].
1613        //
1614        // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
1615        // [2]: (https://github.com/rust-lang/rust/pull/41714)
1616        acquire!(self.inner().strong);
1617
1618        unsafe {
1619            self.drop_slow();
1620        }
1621    }
1622}
1623
1624impl Arc<dyn Any + Send + Sync> {
1625    /// Attempts to downcast the `Arc<dyn Any + Send + Sync>` to a concrete type.
1626    ///
1627    /// # Examples
1628    ///
1629    /// ```
1630    /// use std::any::Any;
1631    ///
1632    /// use portable_atomic_util::Arc;
1633    ///
1634    /// fn print_if_string(value: Arc<dyn Any + Send + Sync>) {
1635    ///     if let Ok(string) = value.downcast::<String>() {
1636    ///         println!("String ({}): {}", string.len(), string);
1637    ///     }
1638    /// }
1639    ///
1640    /// let my_string = "Hello World".to_string();
1641    /// print_if_string(Arc::from(Box::new(my_string) as Box<dyn Any + Send + Sync>));
1642    /// print_if_string(Arc::from(Box::new(0i8) as Box<dyn Any + Send + Sync>));
1643    /// // or with "--cfg portable_atomic_unstable_coerce_unsized" in RUSTFLAGS (requires Rust nightly):
1644    /// // print_if_string(Arc::new(my_string));
1645    /// // print_if_string(Arc::new(0i8));
1646    /// ```
1647    #[inline]
1648    pub fn downcast<T>(self) -> Result<Arc<T>, Self>
1649    where
1650        T: Any + Send + Sync,
1651    {
1652        if (*self).is::<T>() {
1653            unsafe {
1654                let ptr = Arc::into_inner_non_null(self);
1655                Ok(Arc::from_inner(ptr.cast::<ArcInner<T>>()))
1656            }
1657        } else {
1658            Err(self)
1659        }
1660    }
1661}
1662
1663impl<T> Weak<T> {
1664    /// Constructs a new `Weak<T>`, without allocating any memory.
1665    /// Calling [`upgrade`] on the return value always gives [`None`].
1666    ///
1667    /// [`upgrade`]: Weak::upgrade
1668    ///
1669    /// # Examples
1670    ///
1671    /// ```
1672    /// use portable_atomic_util::Weak;
1673    ///
1674    /// let empty: Weak<i64> = Weak::new();
1675    /// assert!(empty.upgrade().is_none());
1676    /// ```
1677    #[inline]
1678    #[must_use]
1679    pub const fn new() -> Self {
1680        Self {
1681            ptr: unsafe {
1682                NonNull::new_unchecked(strict::without_provenance_mut::<ArcInner<T>>(usize::MAX))
1683            },
1684        }
1685    }
1686
1687    #[inline]
1688    #[must_use]
1689    fn new_uninit_ptr() -> NonNull<ArcInner<T>> {
1690        unsafe {
1691            NonNull::new_unchecked(Self::allocate_for_layout(
1692                Layout::new::<T>(),
1693                |layout| Global.allocate(layout),
1694                |ptr| ptr as *mut _,
1695            ))
1696        }
1697    }
1698}
1699
1700/// Helper type to allow accessing the reference counts without
1701/// making any assertions about the data field.
1702struct WeakInner<'a> {
1703    weak: &'a atomic::AtomicUsize,
1704    strong: &'a atomic::AtomicUsize,
1705}
1706
1707// TODO: See todo comment in Weak::from_raw
1708impl<T /*: ?Sized */> Weak<T> {
1709    /// Converts a raw pointer previously created by [`into_raw`] back into `Weak<T>`.
1710    ///
1711    /// This can be used to safely get a strong reference (by calling [`upgrade`]
1712    /// later) or to deallocate the weak count by dropping the `Weak<T>`.
1713    ///
1714    /// It takes ownership of one weak reference (with the exception of pointers created by [`new`],
1715    /// as these don't own anything; the method still works on them).
1716    ///
1717    /// # Safety
1718    ///
1719    /// The pointer must have originated from the [`into_raw`] and must still own its potential
1720    /// weak reference, and must point to a block of memory allocated by global allocator.
1721    ///
1722    /// It is allowed for the strong count to be 0 at the time of calling this. Nevertheless, this
1723    /// takes ownership of one weak reference currently represented as a raw pointer (the weak
1724    /// count is not modified by this operation) and therefore it must be paired with a previous
1725    /// call to [`into_raw`].
1726    /// # Examples
1727    ///
1728    /// ```
1729    /// use portable_atomic_util::{Arc, Weak};
1730    ///
1731    /// let strong = Arc::new("hello".to_owned());
1732    ///
1733    /// let raw_1 = Arc::downgrade(&strong).into_raw();
1734    /// let raw_2 = Arc::downgrade(&strong).into_raw();
1735    ///
1736    /// assert_eq!(2, Arc::weak_count(&strong));
1737    ///
1738    /// assert_eq!("hello", &*unsafe { Weak::from_raw(raw_1) }.upgrade().unwrap());
1739    /// assert_eq!(1, Arc::weak_count(&strong));
1740    ///
1741    /// drop(strong);
1742    ///
1743    /// // Decrement the last weak count.
1744    /// assert!(unsafe { Weak::from_raw(raw_2) }.upgrade().is_none());
1745    /// ```
1746    ///
1747    /// [`new`]: Weak::new
1748    /// [`into_raw`]: Weak::into_raw
1749    /// [`upgrade`]: Weak::upgrade
1750    #[inline]
1751    pub unsafe fn from_raw(ptr: *const T) -> Self {
1752        // See Weak::as_ptr for context on how the input pointer is derived.
1753
1754        let ptr = if is_dangling(ptr) {
1755            // This is a dangling Weak.
1756            ptr as *mut ArcInner<T>
1757        } else {
1758            // Otherwise, we're guaranteed the pointer came from a non-dangling Weak.
1759            // TODO: data_offset calls align_of_val which needs to create a reference
1760            // to data but we cannot create a reference to data here since data in Weak
1761            // can be dropped concurrently from another thread. Therefore, we can
1762            // only support sized types that can avoid references to data
1763            // unless align_of_val_raw is stabilized.
1764            // // SAFETY: data_offset is safe to call, as ptr references a real (potentially dropped) T.
1765            // let offset = unsafe { data_offset(ptr) };
1766            let offset = data_offset_align(mem::align_of::<T>());
1767            // Thus, we reverse the offset to get the whole ArcInner.
1768            // SAFETY: the pointer originated from a Weak, so this offset is safe.
1769            unsafe { strict::byte_sub(ptr as *mut T, offset) as *mut ArcInner<T> }
1770        };
1771
1772        // SAFETY: we now have recovered the original Weak pointer, so can create the Weak.
1773        Self { ptr: unsafe { NonNull::new_unchecked(ptr) } }
1774    }
1775
1776    /// Consumes the `Weak<T>` and turns it into a raw pointer.
1777    ///
1778    /// This converts the weak pointer into a raw pointer, while still preserving the ownership of
1779    /// one weak reference (the weak count is not modified by this operation). It can be turned
1780    /// back into the `Weak<T>` with [`from_raw`].
1781    ///
1782    /// The same restrictions of accessing the target of the pointer as with
1783    /// [`as_ptr`] apply.
1784    ///
1785    /// # Examples
1786    ///
1787    /// ```
1788    /// use portable_atomic_util::{Arc, Weak};
1789    ///
1790    /// let strong = Arc::new("hello".to_owned());
1791    /// let weak = Arc::downgrade(&strong);
1792    /// let raw = weak.into_raw();
1793    ///
1794    /// assert_eq!(1, Arc::weak_count(&strong));
1795    /// assert_eq!("hello", unsafe { &*raw });
1796    ///
1797    /// drop(unsafe { Weak::from_raw(raw) });
1798    /// assert_eq!(0, Arc::weak_count(&strong));
1799    /// ```
1800    ///
1801    /// [`from_raw`]: Weak::from_raw
1802    /// [`as_ptr`]: Weak::as_ptr
1803    #[must_use = "losing the pointer will leak memory"]
1804    pub fn into_raw(self) -> *const T {
1805        ManuallyDrop::new(self).as_ptr()
1806    }
1807}
1808
1809// TODO: See todo comment in Weak::from_raw
1810impl<T /*: ?Sized */> Weak<T> {
1811    /// Returns a raw pointer to the object `T` pointed to by this `Weak<T>`.
1812    ///
1813    /// The pointer is valid only if there are some strong references. The pointer may be dangling,
1814    /// unaligned or even [`null`] otherwise.
1815    ///
1816    /// # Examples
1817    ///
1818    /// ```
1819    /// use std::ptr;
1820    ///
1821    /// use portable_atomic_util::Arc;
1822    ///
1823    /// let strong = Arc::new("hello".to_owned());
1824    /// let weak = Arc::downgrade(&strong);
1825    /// // Both point to the same object
1826    /// assert!(ptr::eq(&*strong, weak.as_ptr()));
1827    /// // The strong here keeps it alive, so we can still access the object.
1828    /// assert_eq!("hello", unsafe { &*weak.as_ptr() });
1829    ///
1830    /// drop(strong);
1831    /// // But not any more. We can do weak.as_ptr(), but accessing the pointer would lead to
1832    /// // undefined behavior.
1833    /// // assert_eq!("hello", unsafe { &*weak.as_ptr() });
1834    /// ```
1835    ///
1836    /// [`null`]: core::ptr::null "ptr::null"
1837    #[must_use]
1838    pub fn as_ptr(&self) -> *const T {
1839        let ptr: *mut ArcInner<T> = NonNull::as_ptr(self.ptr);
1840
1841        if is_dangling(ptr) {
1842            // If the pointer is dangling, we return the sentinel directly. This cannot be
1843            // a valid payload address, as the payload is at least as aligned as ArcInner (usize).
1844            ptr as *const T
1845        } else {
1846            // TODO: See todo comment in Weak::from_raw
1847            // // SAFETY: if is_dangling returns false, then the pointer is dereferenceable.
1848            // // The payload may be dropped at this point, and we have to maintain provenance,
1849            // // so use raw pointer manipulation.
1850            // unsafe { data_ptr::<T>(ptr, &(*ptr).data) }
1851            unsafe {
1852                let offset = data_offset_align(mem::align_of::<T>());
1853                strict::byte_add(ptr, offset) as *const T
1854            }
1855        }
1856    }
1857}
1858
1859impl<T: ?Sized> Weak<T> {
1860    /// Attempts to upgrade the `Weak` pointer to an [`Arc`], delaying
1861    /// dropping of the inner value if successful.
1862    ///
1863    /// Returns [`None`] if the inner value has since been dropped.
1864    ///
1865    /// # Examples
1866    ///
1867    /// ```
1868    /// use portable_atomic_util::Arc;
1869    ///
1870    /// let five = Arc::new(5);
1871    ///
1872    /// let weak_five = Arc::downgrade(&five);
1873    ///
1874    /// let strong_five: Option<Arc<_>> = weak_five.upgrade();
1875    /// assert!(strong_five.is_some());
1876    ///
1877    /// // Destroy all strong pointers.
1878    /// drop(strong_five);
1879    /// drop(five);
1880    ///
1881    /// assert!(weak_five.upgrade().is_none());
1882    /// ```
1883    #[must_use = "this returns a new `Arc`, \
1884                  without modifying the original weak pointer"]
1885    pub fn upgrade(&self) -> Option<Arc<T>> {
1886        #[inline]
1887        fn checked_increment(n: usize) -> Option<usize> {
1888            // Any write of 0 we can observe leaves the field in permanently zero state.
1889            if n == 0 {
1890                return None;
1891            }
1892            // See comments in `Arc::clone` for why we do this (for `mem::forget`).
1893            assert!(n <= MAX_REFCOUNT, "{}", INTERNAL_OVERFLOW_ERROR);
1894            Some(n + 1)
1895        }
1896
1897        // We use a CAS loop to increment the strong count instead of a
1898        // fetch_add as this function should never take the reference count
1899        // from zero to one.
1900        //
1901        // Relaxed is fine for the failure case because we don't have any expectations about the new state.
1902        // Acquire is necessary for the success case to synchronize with `Arc::new_cyclic`, when the inner
1903        // value can be initialized after `Weak` references have already been created. In that case, we
1904        // expect to observe the fully initialized value.
1905        if self.inner()?.strong.fetch_update(Acquire, Relaxed, checked_increment).is_ok() {
1906            // SAFETY: pointer is not null, verified in checked_increment
1907            unsafe { Some(Arc::from_inner(self.ptr)) }
1908        } else {
1909            None
1910        }
1911    }
1912
1913    /// Gets the number of strong (`Arc`) pointers pointing to this allocation.
1914    ///
1915    /// If `self` was created using [`Weak::new`], this will return 0.
1916    #[must_use]
1917    pub fn strong_count(&self) -> usize {
1918        if let Some(inner) = self.inner() { inner.strong.load(Relaxed) } else { 0 }
1919    }
1920
1921    /// Gets an approximation of the number of `Weak` pointers pointing to this
1922    /// allocation.
1923    ///
1924    /// If `self` was created using [`Weak::new`], or if there are no remaining
1925    /// strong pointers, this will return 0.
1926    ///
1927    /// # Accuracy
1928    ///
1929    /// Due to implementation details, the returned value can be off by 1 in
1930    /// either direction when other threads are manipulating any `Arc`s or
1931    /// `Weak`s pointing to the same allocation.
1932    #[must_use]
1933    pub fn weak_count(&self) -> usize {
1934        if let Some(inner) = self.inner() {
1935            let weak = inner.weak.load(Acquire);
1936            let strong = inner.strong.load(Relaxed);
1937            if strong == 0 {
1938                0
1939            } else {
1940                // Since we observed that there was at least one strong pointer
1941                // after reading the weak count, we know that the implicit weak
1942                // reference (present whenever any strong references are alive)
1943                // was still around when we observed the weak count, and can
1944                // therefore safely subtract it.
1945                weak - 1
1946            }
1947        } else {
1948            0
1949        }
1950    }
1951
1952    /// Returns `None` when the pointer is dangling and there is no allocated `ArcInner`,
1953    /// (i.e., when this `Weak` was created by `Weak::new`).
1954    #[inline]
1955    fn inner(&self) -> Option<WeakInner<'_>> {
1956        let ptr = self.ptr.as_ptr();
1957        if is_dangling(ptr) {
1958            None
1959        } else {
1960            // SAFETY: non-dangling Weak has a valid pointer.
1961            // We are careful to *not* create a reference covering the "data" field, as
1962            // the field may be mutated concurrently (for example, if the last `Arc`
1963            // is dropped, the data field will be dropped in-place).
1964            Some(unsafe { WeakInner { strong: &(*ptr).strong, weak: &(*ptr).weak } })
1965        }
1966    }
1967
1968    /// Returns `true` if the two `Weak`s point to the same allocation similar to [`ptr::eq`], or if
1969    /// both don't point to any allocation (because they were created with `Weak::new()`). However,
1970    /// this function ignores the metadata of  `dyn Trait` pointers.
1971    ///
1972    /// # Notes
1973    ///
1974    /// Since this compares pointers it means that `Weak::new()` will equal each
1975    /// other, even though they don't point to any allocation.
1976    ///
1977    /// # Examples
1978    ///
1979    /// ```
1980    /// use portable_atomic_util::Arc;
1981    ///
1982    /// let first_rc = Arc::new(5);
1983    /// let first = Arc::downgrade(&first_rc);
1984    /// let second = Arc::downgrade(&first_rc);
1985    ///
1986    /// assert!(first.ptr_eq(&second));
1987    ///
1988    /// let third_rc = Arc::new(5);
1989    /// let third = Arc::downgrade(&third_rc);
1990    ///
1991    /// assert!(!first.ptr_eq(&third));
1992    /// ```
1993    ///
1994    /// Comparing `Weak::new`.
1995    ///
1996    /// ```
1997    /// use portable_atomic_util::{Arc, Weak};
1998    ///
1999    /// let first = Weak::new();
2000    /// let second = Weak::new();
2001    /// assert!(first.ptr_eq(&second));
2002    ///
2003    /// let third_rc = Arc::new(());
2004    /// let third = Arc::downgrade(&third_rc);
2005    /// assert!(!first.ptr_eq(&third));
2006    /// ```
2007    ///
2008    /// [`ptr::eq`]: core::ptr::eq "ptr::eq"
2009    #[inline]
2010    #[must_use]
2011    pub fn ptr_eq(&self, other: &Self) -> bool {
2012        ptr::eq(self.ptr.as_ptr() as *const (), other.ptr.as_ptr() as *const ())
2013    }
2014}
2015
2016impl<T: ?Sized> Weak<T> {
2017    /// Allocates an `ArcInner<T>` with sufficient space for
2018    /// a possibly-unsized inner value where the value has the layout provided.
2019    ///
2020    /// The function `mem_to_arc_inner` is called with the data pointer
2021    /// and must return back a (potentially fat)-pointer for the `ArcInner<T>`.
2022    unsafe fn allocate_for_layout(
2023        value_layout: Layout,
2024        allocate: impl FnOnce(Layout) -> Option<NonNull<u8>>,
2025        mem_to_arc_inner: impl FnOnce(*mut u8) -> *mut ArcInner<T>,
2026    ) -> *mut ArcInner<T> {
2027        let layout = arc_inner_layout_for_value_layout(value_layout);
2028
2029        let ptr = allocate(layout).unwrap_or_else(|| handle_alloc_error(layout));
2030
2031        unsafe { Self::initialize_arc_inner(ptr, layout, mem_to_arc_inner) }
2032    }
2033
2034    unsafe fn initialize_arc_inner(
2035        ptr: NonNull<u8>,
2036        _layout: Layout,
2037        mem_to_arc_inner: impl FnOnce(*mut u8) -> *mut ArcInner<T>,
2038    ) -> *mut ArcInner<T> {
2039        let inner: *mut ArcInner<T> = mem_to_arc_inner(ptr.as_ptr());
2040        // debug_assert_eq!(unsafe { Layout::for_value_raw(inner) }, layout); // for_value_raw is unstable
2041
2042        // SAFETY: mem_to_arc_inner return a valid pointer to uninitialized ArcInner<T>.
2043        // ArcInner<T> is repr(C), and strong and weak are the first and second fields and
2044        // are the same type, so `inner as *mut atomic::AtomicUsize` is strong and
2045        // `(inner as *mut atomic::AtomicUsize).add(1)` is weak.
2046        unsafe {
2047            let strong = inner as *mut atomic::AtomicUsize;
2048            strong.write(atomic::AtomicUsize::new(0));
2049            let weak = strong.add(1);
2050            weak.write(atomic::AtomicUsize::new(1));
2051        }
2052
2053        inner
2054    }
2055}
2056
2057impl<T: ?Sized> Clone for Weak<T> {
2058    /// Makes a clone of the `Weak` pointer that points to the same allocation.
2059    ///
2060    /// # Examples
2061    ///
2062    /// ```
2063    /// use portable_atomic_util::{Arc, Weak};
2064    ///
2065    /// let weak_five = Arc::downgrade(&Arc::new(5));
2066    ///
2067    /// let _ = Weak::clone(&weak_five);
2068    /// ```
2069    #[inline]
2070    fn clone(&self) -> Self {
2071        if let Some(inner) = self.inner() {
2072            // See comments in Arc::clone() for why this is relaxed. This can use a
2073            // fetch_add (ignoring the lock) because the weak count is only locked
2074            // where are *no other* weak pointers in existence. (So we can't be
2075            // running this code in that case).
2076            let old_size = inner.weak.fetch_add(1, Relaxed);
2077
2078            // See comments in Arc::clone() for why we do this (for mem::forget).
2079            if old_size > MAX_REFCOUNT {
2080                abort();
2081            }
2082        }
2083
2084        Self { ptr: self.ptr }
2085    }
2086}
2087
2088impl<T> Default for Weak<T> {
2089    /// Constructs a new `Weak<T>`, without allocating memory.
2090    /// Calling [`upgrade`] on the return value always
2091    /// gives [`None`].
2092    ///
2093    /// [`upgrade`]: Weak::upgrade
2094    ///
2095    /// # Examples
2096    ///
2097    /// ```
2098    /// use portable_atomic_util::Weak;
2099    ///
2100    /// let empty: Weak<i64> = Default::default();
2101    /// assert!(empty.upgrade().is_none());
2102    /// ```
2103    fn default() -> Self {
2104        Self::new()
2105    }
2106}
2107
2108impl<T: ?Sized> Drop for Weak<T> {
2109    /// Drops the `Weak` pointer.
2110    ///
2111    /// # Examples
2112    ///
2113    /// ```
2114    /// use portable_atomic_util::{Arc, Weak};
2115    ///
2116    /// struct Foo;
2117    ///
2118    /// impl Drop for Foo {
2119    ///     fn drop(&mut self) {
2120    ///         println!("dropped!");
2121    ///     }
2122    /// }
2123    ///
2124    /// let foo = Arc::new(Foo);
2125    /// let weak_foo = Arc::downgrade(&foo);
2126    /// let other_weak_foo = Weak::clone(&weak_foo);
2127    ///
2128    /// drop(weak_foo); // Doesn't print anything
2129    /// drop(foo); // Prints "dropped!"
2130    ///
2131    /// assert!(other_weak_foo.upgrade().is_none());
2132    /// ```
2133    fn drop(&mut self) {
2134        // If we find out that we were the last weak pointer, then its time to
2135        // deallocate the data entirely. See the discussion in Arc::drop() about
2136        // the memory orderings
2137        //
2138        // It's not necessary to check for the locked state here, because the
2139        // weak count can only be locked if there was precisely one weak ref,
2140        // meaning that drop could only subsequently run ON that remaining weak
2141        // ref, which can only happen after the lock is released.
2142        let inner = if let Some(inner) = self.inner() { inner } else { return };
2143
2144        if inner.weak.fetch_sub(1, Release) == 1 {
2145            acquire!(inner.weak);
2146            // Free the allocation without dropping T
2147            let ptr = self.ptr.as_ptr() as *mut ArcInner<mem::ManuallyDrop<T>>;
2148            drop(unsafe { Box::from_raw(ptr) });
2149        }
2150    }
2151}
2152
2153impl<T: ?Sized + PartialEq> PartialEq for Arc<T> {
2154    /// Equality for two `Arc`s.
2155    ///
2156    /// Two `Arc`s are equal if their inner values are equal, even if they are
2157    /// stored in different allocation.
2158    ///
2159    /// If `T` also implements `Eq` (implying reflexivity of equality),
2160    /// two `Arc`s that point to the same allocation are always equal.
2161    ///
2162    /// # Examples
2163    ///
2164    /// ```
2165    /// use portable_atomic_util::Arc;
2166    ///
2167    /// let five = Arc::new(5);
2168    ///
2169    /// assert!(five == Arc::new(5));
2170    /// ```
2171    #[inline]
2172    fn eq(&self, other: &Self) -> bool {
2173        **self == **other
2174    }
2175
2176    /// Inequality for two `Arc`s.
2177    ///
2178    /// Two `Arc`s are not equal if their inner values are not equal.
2179    ///
2180    /// If `T` also implements `Eq` (implying reflexivity of equality),
2181    /// two `Arc`s that point to the same value are always equal.
2182    ///
2183    /// # Examples
2184    ///
2185    /// ```
2186    /// use portable_atomic_util::Arc;
2187    ///
2188    /// let five = Arc::new(5);
2189    ///
2190    /// assert!(five != Arc::new(6));
2191    /// ```
2192    #[allow(clippy::partialeq_ne_impl)]
2193    #[inline]
2194    fn ne(&self, other: &Self) -> bool {
2195        **self != **other
2196    }
2197}
2198
2199impl<T: ?Sized + PartialOrd> PartialOrd for Arc<T> {
2200    /// Partial comparison for two `Arc`s.
2201    ///
2202    /// The two are compared by calling `partial_cmp()` on their inner values.
2203    ///
2204    /// # Examples
2205    ///
2206    /// ```
2207    /// use std::cmp::Ordering;
2208    ///
2209    /// use portable_atomic_util::Arc;
2210    ///
2211    /// let five = Arc::new(5);
2212    ///
2213    /// assert_eq!(Some(Ordering::Less), five.partial_cmp(&Arc::new(6)));
2214    /// ```
2215    fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
2216        (**self).partial_cmp(&**other)
2217    }
2218
2219    /// Less-than comparison for two `Arc`s.
2220    ///
2221    /// The two are compared by calling `<` on their inner values.
2222    ///
2223    /// # Examples
2224    ///
2225    /// ```
2226    /// use portable_atomic_util::Arc;
2227    ///
2228    /// let five = Arc::new(5);
2229    ///
2230    /// assert!(five < Arc::new(6));
2231    /// ```
2232    fn lt(&self, other: &Self) -> bool {
2233        *(*self) < *(*other)
2234    }
2235
2236    /// 'Less than or equal to' comparison for two `Arc`s.
2237    ///
2238    /// The two are compared by calling `<=` on their inner values.
2239    ///
2240    /// # Examples
2241    ///
2242    /// ```
2243    /// use portable_atomic_util::Arc;
2244    ///
2245    /// let five = Arc::new(5);
2246    ///
2247    /// assert!(five <= Arc::new(5));
2248    /// ```
2249    fn le(&self, other: &Self) -> bool {
2250        *(*self) <= *(*other)
2251    }
2252
2253    /// Greater-than comparison for two `Arc`s.
2254    ///
2255    /// The two are compared by calling `>` on their inner values.
2256    ///
2257    /// # Examples
2258    ///
2259    /// ```
2260    /// use portable_atomic_util::Arc;
2261    ///
2262    /// let five = Arc::new(5);
2263    ///
2264    /// assert!(five > Arc::new(4));
2265    /// ```
2266    fn gt(&self, other: &Self) -> bool {
2267        *(*self) > *(*other)
2268    }
2269
2270    /// 'Greater than or equal to' comparison for two `Arc`s.
2271    ///
2272    /// The two are compared by calling `>=` on their inner values.
2273    ///
2274    /// # Examples
2275    ///
2276    /// ```
2277    /// use portable_atomic_util::Arc;
2278    ///
2279    /// let five = Arc::new(5);
2280    ///
2281    /// assert!(five >= Arc::new(5));
2282    /// ```
2283    fn ge(&self, other: &Self) -> bool {
2284        *(*self) >= *(*other)
2285    }
2286}
2287impl<T: ?Sized + Ord> Ord for Arc<T> {
2288    /// Comparison for two `Arc`s.
2289    ///
2290    /// The two are compared by calling `cmp()` on their inner values.
2291    ///
2292    /// # Examples
2293    ///
2294    /// ```
2295    /// use std::cmp::Ordering;
2296    ///
2297    /// use portable_atomic_util::Arc;
2298    ///
2299    /// let five = Arc::new(5);
2300    ///
2301    /// assert_eq!(Ordering::Less, five.cmp(&Arc::new(6)));
2302    /// ```
2303    fn cmp(&self, other: &Self) -> Ordering {
2304        (**self).cmp(&**other)
2305    }
2306}
2307impl<T: ?Sized + Eq> Eq for Arc<T> {}
2308
2309impl<T: ?Sized + fmt::Display> fmt::Display for Arc<T> {
2310    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2311        fmt::Display::fmt(&**self, f)
2312    }
2313}
2314
2315impl<T: ?Sized + fmt::Debug> fmt::Debug for Arc<T> {
2316    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2317        fmt::Debug::fmt(&**self, f)
2318    }
2319}
2320
2321impl<T: ?Sized> fmt::Pointer for Arc<T> {
2322    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2323        fmt::Pointer::fmt(&(&**self as *const T), f)
2324    }
2325}
2326
2327impl<T: Default> Default for Arc<T> {
2328    /// Creates a new `Arc<T>`, with the `Default` value for `T`.
2329    ///
2330    /// # Examples
2331    ///
2332    /// ```
2333    /// use portable_atomic_util::Arc;
2334    ///
2335    /// let x: Arc<i32> = Default::default();
2336    /// assert_eq!(*x, 0);
2337    /// ```
2338    fn default() -> Self {
2339        // TODO: https://github.com/rust-lang/rust/pull/131460 / https://github.com/rust-lang/rust/pull/132031
2340        Self::new(T::default())
2341    }
2342}
2343
2344#[cfg(not(portable_atomic_no_min_const_generics))]
2345impl Default for Arc<str> {
2346    /// Creates an empty str inside an Arc.
2347    ///
2348    /// This may or may not share an allocation with other Arcs.
2349    #[inline]
2350    fn default() -> Self {
2351        let arc: Arc<[u8]> = Arc::default();
2352        debug_assert!(core::str::from_utf8(&arc).is_ok());
2353        let ptr = Arc::into_inner_non_null(arc);
2354        unsafe { Arc::from_ptr(ptr.as_ptr() as *mut ArcInner<str>) }
2355    }
2356}
2357
2358#[cfg(not(portable_atomic_no_min_const_generics))]
2359impl<T> Default for Arc<[T]> {
2360    /// Creates an empty `[T]` inside an Arc.
2361    ///
2362    /// This may or may not share an allocation with other Arcs.
2363    #[inline]
2364    fn default() -> Self {
2365        // TODO: we cannot use non-allocation optimization (https://github.com/rust-lang/rust/blob/1.93.0/library/alloc/src/sync.rs#L3807)
2366        // for now since casting Arc<[T; N]> -> Arc<[T]> requires unstable CoerceUnsized.
2367        let arr: [T; 0] = [];
2368        Arc::from(arr)
2369    }
2370}
2371
2372impl<T> Default for Pin<Arc<T>>
2373where
2374    T: ?Sized,
2375    Arc<T>: Default,
2376{
2377    #[inline]
2378    fn default() -> Self {
2379        unsafe { Pin::new_unchecked(Arc::<T>::default()) }
2380    }
2381}
2382
2383impl<T: ?Sized + Hash> Hash for Arc<T> {
2384    fn hash<H: Hasher>(&self, state: &mut H) {
2385        (**self).hash(state);
2386    }
2387}
2388
2389impl<T> From<T> for Arc<T> {
2390    /// Converts a `T` into an `Arc<T>`
2391    ///
2392    /// The conversion moves the value into a
2393    /// newly allocated `Arc`. It is equivalent to
2394    /// calling `Arc::new(t)`.
2395    ///
2396    /// # Example
2397    ///
2398    /// ```
2399    /// use portable_atomic_util::Arc;
2400    /// let x = 5;
2401    /// let arc = Arc::new(5);
2402    ///
2403    /// assert_eq!(Arc::from(x), arc);
2404    /// ```
2405    fn from(t: T) -> Self {
2406        Self::new(t)
2407    }
2408}
2409
2410// This just outputs the input as is, but can be used like an item-level block by using it with cfg.
2411// Note: This macro is items!({ }), not items! { }.
2412// An extra brace is used in input to make contents rustfmt-able.
2413#[cfg(not(portable_atomic_no_min_const_generics))]
2414macro_rules! items {
2415    ({$($tt:tt)*}) => {
2416        $($tt)*
2417    };
2418}
2419
2420#[cfg(not(portable_atomic_no_min_const_generics))]
2421items!({
2422    impl<T, const N: usize> From<[T; N]> for Arc<[T]> {
2423        /// Converts a [`[T; N]`](prim@array) into an `Arc<[T]>`.
2424        ///
2425        /// The conversion moves the array into a newly allocated `Arc`.
2426        ///
2427        /// # Example
2428        ///
2429        /// ```
2430        /// use portable_atomic_util::Arc;
2431        /// let original: [i32; 3] = [1, 2, 3];
2432        /// let shared: Arc<[i32]> = Arc::from(original);
2433        /// assert_eq!(&[1, 2, 3], &shared[..]);
2434        /// ```
2435        #[inline]
2436        fn from(v: [T; N]) -> Self {
2437            // Casting Arc<[T; N]> -> Arc<[T]> requires unstable CoerceUnsized, so we convert via Box.
2438            // Since the compiler knows the actual size and metadata, the intermediate allocation is
2439            // optimized and generates the same code as when using CoerceUnsized and convert Arc<[T; N]> to Arc<[T]>.
2440            // https://github.com/taiki-e/portable-atomic/issues/143#issuecomment-1866488569
2441            let v: Box<[T]> = Box::<[T; N]>::from(v);
2442            v.into()
2443        }
2444    }
2445});
2446
2447#[cfg(not(portable_atomic_no_maybe_uninit))]
2448impl<T: Clone> From<&[T]> for Arc<[T]> {
2449    /// Allocates a reference-counted slice and fills it by cloning `v`'s items.
2450    ///
2451    /// # Example
2452    ///
2453    /// ```
2454    /// use portable_atomic_util::Arc;
2455    /// let original: &[i32] = &[1, 2, 3];
2456    /// let shared: Arc<[i32]> = Arc::from(original);
2457    /// assert_eq!(&[1, 2, 3], &shared[..]);
2458    /// ```
2459    #[inline]
2460    fn from(v: &[T]) -> Self {
2461        unsafe { Self::from_iter_exact(v.iter().cloned(), v.len()) }
2462    }
2463}
2464
2465#[cfg(not(portable_atomic_no_maybe_uninit))]
2466impl<T: Clone> From<&mut [T]> for Arc<[T]> {
2467    /// Allocates a reference-counted slice and fills it by cloning `v`'s items.
2468    ///
2469    /// # Example
2470    ///
2471    /// ```
2472    /// use portable_atomic_util::Arc;
2473    /// let mut original = [1, 2, 3];
2474    /// let original: &mut [i32] = &mut original;
2475    /// let shared: Arc<[i32]> = Arc::from(original);
2476    /// assert_eq!(&[1, 2, 3], &shared[..]);
2477    /// ```
2478    #[inline]
2479    fn from(v: &mut [T]) -> Self {
2480        Self::from(&*v)
2481    }
2482}
2483
2484#[cfg(not(portable_atomic_no_maybe_uninit))]
2485impl From<&str> for Arc<str> {
2486    /// Allocates a reference-counted `str` and copies `v` into it.
2487    ///
2488    /// # Example
2489    ///
2490    /// ```
2491    /// use portable_atomic_util::Arc;
2492    /// let shared: Arc<str> = Arc::from("eggplant");
2493    /// assert_eq!("eggplant", &shared[..]);
2494    /// ```
2495    #[inline]
2496    fn from(v: &str) -> Self {
2497        let arc = Arc::<[u8]>::from(v.as_bytes());
2498        // SAFETY: `str` has the same layout as `[u8]`.
2499        // https://doc.rust-lang.org/nightly/reference/type-layout.html#str-layout
2500        unsafe { Self::from_raw(Arc::into_raw(arc) as *const str) }
2501    }
2502}
2503
2504#[cfg(not(portable_atomic_no_maybe_uninit))]
2505impl From<&mut str> for Arc<str> {
2506    /// Allocates a reference-counted `str` and copies `v` into it.
2507    ///
2508    /// # Example
2509    ///
2510    /// ```
2511    /// use portable_atomic_util::Arc;
2512    /// let mut original = String::from("eggplant");
2513    /// let original: &mut str = &mut original;
2514    /// let shared: Arc<str> = Arc::from(original);
2515    /// assert_eq!("eggplant", &shared[..]);
2516    /// ```
2517    #[inline]
2518    fn from(v: &mut str) -> Self {
2519        Self::from(&*v)
2520    }
2521}
2522
2523#[cfg(not(portable_atomic_no_maybe_uninit))]
2524impl From<String> for Arc<str> {
2525    /// Allocates a reference-counted `str` and copies `v` into it.
2526    ///
2527    /// # Example
2528    ///
2529    /// ```
2530    /// use portable_atomic_util::Arc;
2531    /// let unique: String = "eggplant".to_owned();
2532    /// let shared: Arc<str> = Arc::from(unique);
2533    /// assert_eq!("eggplant", &shared[..]);
2534    /// ```
2535    #[inline]
2536    fn from(v: String) -> Self {
2537        Self::from(&v[..])
2538    }
2539}
2540
2541impl<T: ?Sized> From<Box<T>> for Arc<T> {
2542    /// Move a boxed object to a new, reference-counted allocation.
2543    ///
2544    /// # Example
2545    ///
2546    /// ```
2547    /// use portable_atomic_util::Arc;
2548    /// let unique: Box<str> = Box::from("eggplant");
2549    /// let shared: Arc<str> = Arc::from(unique);
2550    /// assert_eq!("eggplant", &shared[..]);
2551    /// ```
2552    #[inline]
2553    fn from(v: Box<T>) -> Self {
2554        Self::from_box(v)
2555    }
2556}
2557
2558#[cfg(not(portable_atomic_no_maybe_uninit))]
2559impl<T> From<Vec<T>> for Arc<[T]> {
2560    /// Allocates a reference-counted slice and moves `v`'s items into it.
2561    ///
2562    /// # Example
2563    ///
2564    /// ```
2565    /// use portable_atomic_util::Arc;
2566    /// let unique: Vec<i32> = vec![1, 2, 3];
2567    /// let shared: Arc<[i32]> = Arc::from(unique);
2568    /// assert_eq!(&[1, 2, 3], &shared[..]);
2569    /// ```
2570    #[inline]
2571    fn from(v: Vec<T>) -> Self {
2572        unsafe {
2573            let len = v.len();
2574            let cap = v.capacity();
2575            let vec_ptr = mem::ManuallyDrop::new(v).as_mut_ptr();
2576
2577            let mut arc = Self::new_uninit_slice(len);
2578            let data = Arc::get_mut_unchecked(&mut arc);
2579            ptr::copy_nonoverlapping(vec_ptr, data.as_mut_ptr() as *mut T, len);
2580
2581            // Create a `Vec<T>` with length 0, to deallocate the buffer
2582            // without dropping its contents or the allocator
2583            let _ = Vec::from_raw_parts(vec_ptr, 0, cap);
2584
2585            arc.assume_init()
2586        }
2587    }
2588}
2589
2590impl<'a, B> From<Cow<'a, B>> for Arc<B>
2591where
2592    B: ?Sized + ToOwned,
2593    Arc<B>: From<&'a B> + From<B::Owned>,
2594{
2595    /// Creates an atomically reference-counted pointer from a clone-on-write
2596    /// pointer by copying its content.
2597    ///
2598    /// # Example
2599    ///
2600    /// ```
2601    /// use std::borrow::Cow;
2602    ///
2603    /// use portable_atomic_util::Arc;
2604    ///
2605    /// let cow: Cow<'_, str> = Cow::Borrowed("eggplant");
2606    /// let shared: Arc<str> = Arc::from(cow);
2607    /// assert_eq!("eggplant", &shared[..]);
2608    /// ```
2609    #[inline]
2610    fn from(cow: Cow<'a, B>) -> Self {
2611        match cow {
2612            Cow::Borrowed(s) => Self::from(s),
2613            Cow::Owned(s) => Self::from(s),
2614        }
2615    }
2616}
2617
2618impl From<Arc<str>> for Arc<[u8]> {
2619    /// Converts an atomically reference-counted string slice into a byte slice.
2620    ///
2621    /// # Example
2622    ///
2623    /// ```
2624    /// use portable_atomic_util::Arc;
2625    /// let string: Arc<str> = Arc::from("eggplant");
2626    /// let bytes: Arc<[u8]> = Arc::from(string);
2627    /// assert_eq!("eggplant".as_bytes(), bytes.as_ref());
2628    /// ```
2629    #[inline]
2630    fn from(rc: Arc<str>) -> Self {
2631        // SAFETY: `str` has the same layout as `[u8]`.
2632        // https://doc.rust-lang.org/nightly/reference/type-layout.html#str-layout
2633        unsafe { Self::from_raw(Arc::into_raw(rc) as *const [u8]) }
2634    }
2635}
2636
2637#[cfg(not(portable_atomic_no_min_const_generics))]
2638items!({
2639    impl<T, const N: usize> TryFrom<Arc<[T]>> for Arc<[T; N]> {
2640        type Error = Arc<[T]>;
2641
2642        fn try_from(boxed_slice: Arc<[T]>) -> Result<Self, Self::Error> {
2643            if boxed_slice.len() == N {
2644                let ptr = Arc::into_inner_non_null(boxed_slice);
2645                Ok(unsafe { Self::from_inner(ptr.cast::<ArcInner<[T; N]>>()) })
2646            } else {
2647                Err(boxed_slice)
2648            }
2649        }
2650    }
2651});
2652
2653#[cfg(not(portable_atomic_no_maybe_uninit))]
2654impl<T> FromIterator<T> for Arc<[T]> {
2655    /// Takes each element in the `Iterator` and collects it into an `Arc<[T]>`.
2656    ///
2657    /// # Performance characteristics
2658    ///
2659    /// ## The general case
2660    ///
2661    /// In the general case, collecting into `Arc<[T]>` is done by first
2662    /// collecting into a `Vec<T>`. That is, when writing the following:
2663    ///
2664    /// ```
2665    /// use portable_atomic_util::Arc;
2666    /// let evens: Arc<[u8]> = (0..10).filter(|&x| x % 2 == 0).collect();
2667    /// # assert_eq!(&*evens, &[0, 2, 4, 6, 8]);
2668    /// ```
2669    ///
2670    /// this behaves as if we wrote:
2671    ///
2672    /// ```
2673    /// use portable_atomic_util::Arc;
2674    /// let evens: Arc<[u8]> = (0..10).filter(|&x| x % 2 == 0)
2675    ///     .collect::<Vec<_>>() // The first set of allocations happens here.
2676    ///     .into(); // A second allocation for `Arc<[T]>` happens here.
2677    ///
2678    /// assert_eq!(&*evens, &[0, 2, 4, 6, 8]);
2679    /// ```
2680    ///
2681    /// This will allocate as many times as needed for constructing the `Vec<T>`
2682    /// and then it will allocate once for turning the `Vec<T>` into the `Arc<[T]>`.
2683    ///
2684    /// ## Iterators of known length
2685    ///
2686    /// When your `Iterator` implements `TrustedLen` and is of an exact size,
2687    /// a single allocation will be made for the `Arc<[T]>`. For example:
2688    ///
2689    /// ```
2690    /// use portable_atomic_util::Arc;
2691    /// let evens: Arc<[u8]> = (0..10).collect(); // Just a single allocation happens here.
2692    ///
2693    /// assert_eq!(&*evens, &*(0..10).collect::<Vec<_>>());
2694    /// ```
2695    fn from_iter<I: IntoIterator<Item = T>>(iter: I) -> Self {
2696        iter.into_iter().collect::<Vec<T>>().into()
2697    }
2698}
2699
2700impl<T: ?Sized> borrow::Borrow<T> for Arc<T> {
2701    fn borrow(&self) -> &T {
2702        self
2703    }
2704}
2705
2706impl<T: ?Sized> AsRef<T> for Arc<T> {
2707    fn as_ref(&self) -> &T {
2708        self
2709    }
2710}
2711
2712impl<T: ?Sized> Unpin for Arc<T> {}
2713
2714/// Gets the pointer to data within the given an `ArcInner`.
2715///
2716/// # Safety
2717///
2718/// `arc` must uphold the safety requirements for `.byte_add(data_offset)`.
2719/// This is automatically satisfied if it is a pointer to a valid `ArcInner`.
2720unsafe fn data_ptr<T: ?Sized>(arc: *mut ArcInner<T>, data: &T) -> *mut T {
2721    // SAFETY: the caller must uphold the safety contract.
2722    unsafe {
2723        let offset = data_offset::<T>(data);
2724        strict::byte_add(arc, offset) as *mut T
2725    }
2726}
2727
2728/// Gets the offset within an `ArcInner` for the payload behind a pointer.
2729fn data_offset<T: ?Sized>(ptr: &T) -> usize {
2730    // Align the unsized value to the end of the ArcInner.
2731    // Because ArcInner is repr(C), it will always be the last field in memory.
2732    data_offset_align(mem::align_of_val::<T>(ptr))
2733}
2734
2735#[inline]
2736fn data_offset_align(align: usize) -> usize {
2737    let layout = Layout::new::<ArcInner<()>>();
2738    layout.size() + layout::padding_needed_for(layout, align)
2739}
2740
2741/// A unique owning pointer to an [`ArcInner`] **that does not imply the contents are initialized,**
2742/// but will deallocate it (without dropping the value) when dropped.
2743///
2744/// This is a helper for [`Arc::make_mut()`] to ensure correct cleanup on panic.
2745struct UniqueArcUninit<T: ?Sized> {
2746    ptr: NonNull<ArcInner<T>>,
2747    layout_for_value: Layout,
2748}
2749
2750impl<T: ?Sized> UniqueArcUninit<T> {
2751    /// Allocates an ArcInner with layout suitable to contain `for_value` or a clone of it.
2752    fn new(for_value: &T) -> Self {
2753        let layout = Layout::for_value(for_value);
2754        let ptr = unsafe { Arc::allocate_for_value(for_value) };
2755        Self { ptr: NonNull::new(ptr).unwrap(), layout_for_value: layout }
2756    }
2757
2758    /// Returns the pointer to be written into to initialize the [`Arc`].
2759    fn data_ptr(&mut self) -> *mut T {
2760        let offset = data_offset_align(self.layout_for_value.align());
2761        unsafe { strict::byte_add(self.ptr.as_ptr(), offset) as *mut T }
2762    }
2763
2764    /// Upgrade this into a normal [`Arc`].
2765    ///
2766    /// # Safety
2767    ///
2768    /// The data must have been initialized (by writing to [`Self::data_ptr()`]).
2769    unsafe fn into_arc(self) -> Arc<T> {
2770        let this = ManuallyDrop::new(self);
2771        let ptr = this.ptr.as_ptr();
2772
2773        // SAFETY: The pointer is valid as per `UniqueArcUninit::new`, and the caller is responsible
2774        // for having initialized the data.
2775        unsafe { Arc::from_ptr(ptr) }
2776    }
2777}
2778
2779impl<T: ?Sized> Drop for UniqueArcUninit<T> {
2780    fn drop(&mut self) {
2781        // SAFETY:
2782        // * new() produced a pointer safe to deallocate.
2783        // * We own the pointer unless into_arc() was called, which forgets us.
2784        unsafe {
2785            Global.deallocate(
2786                self.ptr.cast::<u8>(),
2787                arc_inner_layout_for_value_layout(self.layout_for_value),
2788            );
2789        }
2790    }
2791}
2792
2793#[cfg(not(portable_atomic_no_error_in_core))]
2794use core::error;
2795#[cfg(all(portable_atomic_no_error_in_core, feature = "std"))]
2796use std::error;
2797#[cfg(any(not(portable_atomic_no_error_in_core), feature = "std"))]
2798impl<T: ?Sized + error::Error> error::Error for Arc<T> {
2799    #[allow(deprecated)]
2800    fn cause(&self) -> Option<&dyn error::Error> {
2801        error::Error::cause(&**self)
2802    }
2803    fn source(&self) -> Option<&(dyn error::Error + 'static)> {
2804        error::Error::source(&**self)
2805    }
2806}
2807
2808#[cfg(feature = "std")]
2809mod std_impls {
2810    // TODO: Other trait implementations that are stable but we currently don't provide:
2811    // - alloc::ffi
2812    //   - https://doc.rust-lang.org/nightly/alloc/sync/struct.Arc.html#impl-From%3C%26CStr%3E-for-Arc%3CCStr%3E
2813    //   - https://doc.rust-lang.org/nightly/alloc/sync/struct.Arc.html#impl-From%3C%26mut+CStr%3E-for-Arc%3CCStr%3E
2814    //   - https://doc.rust-lang.org/nightly/alloc/sync/struct.Arc.html#impl-From%3CCString%3E-for-Arc%3CCStr%3E
2815    //   - https://doc.rust-lang.org/nightly/alloc/sync/struct.Arc.html#impl-Default-for-Arc%3CCStr%3E
2816    //   - Currently, we cannot implement these since CStr layout is not stable.
2817    // - std::ffi
2818    //   - https://doc.rust-lang.org/nightly/std/sync/struct.Arc.html#impl-From%3C%26OsStr%3E-for-Arc%3COsStr%3E
2819    //   - https://doc.rust-lang.org/nightly/std/sync/struct.Arc.html#impl-From%3C%26mut+OsStr%3E-for-Arc%3COsStr%3E
2820    //   - https://doc.rust-lang.org/nightly/std/sync/struct.Arc.html#impl-From%3COsString%3E-for-Arc%3COsStr%3E
2821    //   - Currently, we cannot implement these since OsStr layout is not stable.
2822    // - std::path
2823    //   - https://doc.rust-lang.org/nightly/std/sync/struct.Arc.html#impl-From%3C%26Path%3E-for-Arc%3CPath%3E
2824    //   - https://doc.rust-lang.org/nightly/std/sync/struct.Arc.html#impl-From%3C%26mut+Path%3E-for-Arc%3CPath%3E
2825    //   - https://doc.rust-lang.org/nightly/std/sync/struct.Arc.html#impl-From%3CPathBuf%3E-for-Arc%3CPath%3E
2826    //   - Currently, we cannot implement these since Path layout is not stable.
2827
2828    use std::io;
2829    // https://doc.rust-lang.org/nightly/std/sync/struct.Arc.html#impl-AsFd-for-Arc%3CT%3E
2830    // https://doc.rust-lang.org/nightly/std/sync/struct.Arc.html#impl-AsHandle-for-Arc%3CT%3E
2831    // https://doc.rust-lang.org/nightly/std/sync/struct.Arc.html#impl-AsRawFd-for-Arc%3CT%3E
2832    // https://doc.rust-lang.org/nightly/std/sync/struct.Arc.html#impl-AsSocket-for-Arc%3CT%3E
2833    // Note:
2834    // - T: ?Sized is currently only allowed on AsFd/AsHandle: https://github.com/rust-lang/rust/pull/114655#issuecomment-1977994288
2835    // - std doesn't implement AsRawHandle/AsRawSocket for Arc as of Rust 1.90.
2836    // - std::os::unix::io::AsRawFd and std::os::windows::io::{AsRawHandle, AsRawSocket} are available in all versions
2837    // - std::os::wasi::prelude::AsRawFd requires 1.56 (https://github.com/rust-lang/rust/commit/e555003e6d6b6d71ce5509a6b6c7a15861208d6c)
2838    // - std::os::unix::io::AsFd, std::os::wasi::prelude::AsFd, and std::os::windows::io::{AsHandle, AsSocket} require Rust 1.63
2839    // - std::os::wasi::io::AsFd requires Rust 1.65 (https://github.com/rust-lang/rust/pull/103308)
2840    // - std::os::fd requires Rust 1.66 (https://github.com/rust-lang/rust/pull/98368)
2841    // - std::os::hermit::io::AsFd requires Rust 1.69 (https://github.com/rust-lang/rust/commit/b5fb4f3d9b1b308d59cab24ef2f9bf23dad948aa)
2842    // - std::os::fd for HermitOS requires Rust 1.81 (https://github.com/rust-lang/rust/pull/126346)
2843    // - std::os::fd for Trusty requires Rust 1.87 (no std support before it, https://github.com/rust-lang/rust/commit/7f6ee12526700e037ef34912b2b0c628028d382c)
2844    // - std::os::solid::io::AsFd is unstable (solid_ext, https://github.com/rust-lang/rust/pull/115159)
2845    // Note: we don't implement unstable ones.
2846    #[cfg(not(portable_atomic_no_io_safety))]
2847    #[cfg(target_os = "trusty")]
2848    use std::os::fd;
2849    #[cfg(not(portable_atomic_no_io_safety))]
2850    #[cfg(target_os = "hermit")]
2851    use std::os::hermit::io as fd;
2852    #[cfg(unix)]
2853    use std::os::unix::io as fd;
2854    #[cfg(not(portable_atomic_no_io_safety))]
2855    #[cfg(target_os = "wasi")]
2856    use std::os::wasi::prelude as fd;
2857
2858    use super::Arc;
2859
2860    /// This impl allows implementing traits that require `AsRawFd` on Arc.
2861    /// ```
2862    /// # #[cfg(target_os = "hermit")]
2863    /// # use std::os::hermit::io::AsRawFd;
2864    /// # #[cfg(target_os = "wasi")]
2865    /// # use std::os::wasi::prelude::AsRawFd;
2866    /// # #[cfg(unix)]
2867    /// # use std::os::unix::io::AsRawFd;
2868    /// use std::net::UdpSocket;
2869    ///
2870    /// use portable_atomic_util::Arc;
2871    ///
2872    /// trait MyTrait: AsRawFd {}
2873    /// impl MyTrait for Arc<UdpSocket> {}
2874    /// ```
2875    #[cfg(any(
2876        unix,
2877        all(
2878            not(portable_atomic_no_io_safety),
2879            any(target_os = "hermit", target_os = "trusty", target_os = "wasi"),
2880        ),
2881    ))]
2882    impl<T: fd::AsRawFd> fd::AsRawFd for Arc<T> {
2883        #[inline]
2884        fn as_raw_fd(&self) -> fd::RawFd {
2885            (**self).as_raw_fd()
2886        }
2887    }
2888    /// This impl allows implementing traits that require `AsFd` on Arc.
2889    /// ```
2890    /// # #[cfg(target_os = "hermit")]
2891    /// # use std::os::hermit::io::AsFd;
2892    /// # #[cfg(target_os = "wasi")]
2893    /// # use std::os::wasi::prelude::AsFd;
2894    /// # #[cfg(unix)]
2895    /// # use std::os::unix::io::AsFd;
2896    /// use std::net::UdpSocket;
2897    ///
2898    /// use portable_atomic_util::Arc;
2899    ///
2900    /// trait MyTrait: AsFd {}
2901    /// impl MyTrait for Arc<UdpSocket> {}
2902    /// ```
2903    #[cfg(not(portable_atomic_no_io_safety))]
2904    #[cfg(any(unix, target_os = "hermit", target_os = "trusty", target_os = "wasi"))]
2905    impl<T: ?Sized + fd::AsFd> fd::AsFd for Arc<T> {
2906        #[inline]
2907        fn as_fd(&self) -> fd::BorrowedFd<'_> {
2908            (**self).as_fd()
2909        }
2910    }
2911    /// This impl allows implementing traits that require `AsHandle` on Arc.
2912    /// ```
2913    /// # use std::os::windows::io::AsHandle;
2914    /// use std::fs::File;
2915    ///
2916    /// use portable_atomic_util::Arc;
2917    ///
2918    /// trait MyTrait: AsHandle {}
2919    /// impl MyTrait for Arc<File> {}
2920    /// ```
2921    #[cfg(not(portable_atomic_no_io_safety))]
2922    #[cfg(windows)]
2923    impl<T: ?Sized + std::os::windows::io::AsHandle> std::os::windows::io::AsHandle for Arc<T> {
2924        #[inline]
2925        fn as_handle(&self) -> std::os::windows::io::BorrowedHandle<'_> {
2926            (**self).as_handle()
2927        }
2928    }
2929    /// This impl allows implementing traits that require `AsSocket` on Arc.
2930    /// ```
2931    /// # use std::os::windows::io::AsSocket;
2932    /// use std::net::UdpSocket;
2933    ///
2934    /// use portable_atomic_util::Arc;
2935    ///
2936    /// trait MyTrait: AsSocket {}
2937    /// impl MyTrait for Arc<UdpSocket> {}
2938    /// ```
2939    #[cfg(not(portable_atomic_no_io_safety))]
2940    #[cfg(windows)]
2941    impl<T: std::os::windows::io::AsSocket> std::os::windows::io::AsSocket for Arc<T> {
2942        #[inline]
2943        fn as_socket(&self) -> std::os::windows::io::BorrowedSocket<'_> {
2944            (**self).as_socket()
2945        }
2946    }
2947
2948    // https://doc.rust-lang.org/nightly/std/sync/struct.Arc.html#impl-Read-for-Arc%3CFile%3E
2949    // https://doc.rust-lang.org/nightly/std/sync/struct.Arc.html#impl-Seek-for-Arc%3CFile%3E
2950    // https://doc.rust-lang.org/nightly/std/sync/struct.Arc.html#impl-Write-for-Arc%3CFile%3E
2951    impl io::Read for Arc<std::fs::File> {
2952        fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
2953            (&**self).read(buf)
2954        }
2955        #[cfg(not(portable_atomic_no_io_vec))]
2956        fn read_vectored(&mut self, bufs: &mut [io::IoSliceMut<'_>]) -> io::Result<usize> {
2957            (&**self).read_vectored(bufs)
2958        }
2959        // fn read_buf(&mut self, cursor: io::BorrowedCursor<'_>) -> io::Result<()> {
2960        //     (&**self).read_buf(cursor)
2961        // }
2962        // #[inline]
2963        // fn is_read_vectored(&self) -> bool {
2964        //     (&**self).is_read_vectored()
2965        // }
2966        fn read_to_end(&mut self, buf: &mut alloc::vec::Vec<u8>) -> io::Result<usize> {
2967            (&**self).read_to_end(buf)
2968        }
2969        fn read_to_string(&mut self, buf: &mut alloc::string::String) -> io::Result<usize> {
2970            (&**self).read_to_string(buf)
2971        }
2972    }
2973    impl io::Write for Arc<std::fs::File> {
2974        fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
2975            (&**self).write(buf)
2976        }
2977        #[cfg(not(portable_atomic_no_io_vec))]
2978        fn write_vectored(&mut self, bufs: &[io::IoSlice<'_>]) -> io::Result<usize> {
2979            (&**self).write_vectored(bufs)
2980        }
2981        // #[inline]
2982        // fn is_write_vectored(&self) -> bool {
2983        //     (&**self).is_write_vectored()
2984        // }
2985        #[inline]
2986        fn flush(&mut self) -> io::Result<()> {
2987            (&**self).flush()
2988        }
2989    }
2990    impl io::Seek for Arc<std::fs::File> {
2991        fn seek(&mut self, pos: io::SeekFrom) -> io::Result<u64> {
2992            (&**self).seek(pos)
2993        }
2994    }
2995    // TODO: TcpStream and UnixStream: https://github.com/rust-lang/rust/pull/134190
2996    // impl io::Read for Arc<std::net::TcpStream> {
2997    //     fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
2998    //         (&**self).read(buf)
2999    //     }
3000    //     // fn read_buf(&mut self, buf: io::BorrowedCursor<'_>) -> io::Result<()> {
3001    //     //     (&**self).read_buf(buf)
3002    //     // }
3003    //     #[cfg(not(portable_atomic_no_io_vec))]
3004    //     fn read_vectored(&mut self, bufs: &mut [io::IoSliceMut<'_>]) -> io::Result<usize> {
3005    //         (&**self).read_vectored(bufs)
3006    //     }
3007    //     // #[inline]
3008    //     // fn is_read_vectored(&self) -> bool {
3009    //     //     (&**self).is_read_vectored()
3010    //     // }
3011    // }
3012    // impl io::Write for Arc<std::net::TcpStream> {
3013    //     fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
3014    //         (&**self).write(buf)
3015    //     }
3016    //     #[cfg(not(portable_atomic_no_io_vec))]
3017    //     fn write_vectored(&mut self, bufs: &[io::IoSlice<'_>]) -> io::Result<usize> {
3018    //         (&**self).write_vectored(bufs)
3019    //     }
3020    //     // #[inline]
3021    //     // fn is_write_vectored(&self) -> bool {
3022    //     //     (&**self).is_write_vectored()
3023    //     // }
3024    //     #[inline]
3025    //     fn flush(&mut self) -> io::Result<()> {
3026    //         (&**self).flush()
3027    //     }
3028    // }
3029    // #[cfg(unix)]
3030    // impl io::Read for Arc<std::os::unix::net::UnixStream> {
3031    //     fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
3032    //         (&**self).read(buf)
3033    //     }
3034    //     // fn read_buf(&mut self, buf: io::BorrowedCursor<'_>) -> io::Result<()> {
3035    //     //     (&**self).read_buf(buf)
3036    //     // }
3037    //     #[cfg(not(portable_atomic_no_io_vec))]
3038    //     fn read_vectored(&mut self, bufs: &mut [io::IoSliceMut<'_>]) -> io::Result<usize> {
3039    //         (&**self).read_vectored(bufs)
3040    //     }
3041    //     // #[inline]
3042    //     // fn is_read_vectored(&self) -> bool {
3043    //     //     (&**self).is_read_vectored()
3044    //     // }
3045    // }
3046    // #[cfg(unix)]
3047    // impl io::Write for Arc<std::os::unix::net::UnixStream> {
3048    //     fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
3049    //         (&**self).write(buf)
3050    //     }
3051    //     #[cfg(not(portable_atomic_no_io_vec))]
3052    //     fn write_vectored(&mut self, bufs: &[io::IoSlice<'_>]) -> io::Result<usize> {
3053    //         (&**self).write_vectored(bufs)
3054    //     }
3055    //     // #[inline]
3056    //     // fn is_write_vectored(&self) -> bool {
3057    //     //     (&**self).is_write_vectored()
3058    //     // }
3059    //     #[inline]
3060    //     fn flush(&mut self) -> io::Result<()> {
3061    //         (&**self).flush()
3062    //     }
3063    // }
3064}
3065
3066use self::clone::CloneToUninit;
3067mod clone {
3068    use core::ptr;
3069    #[cfg(not(portable_atomic_no_maybe_uninit))]
3070    use core::{
3071        mem::{self, MaybeUninit},
3072        slice,
3073    };
3074
3075    #[cfg(not(portable_atomic_no_maybe_uninit))]
3076    use super::strict;
3077
3078    // Based on unstable core::clone::CloneToUninit.
3079    // This trait is private and cannot be implemented for types outside of `portable-atomic-util`.
3080    #[doc(hidden)] // private API
3081    #[allow(unknown_lints, unnameable_types)] // Not public API. unnameable_types is available on Rust 1.79+
3082    pub unsafe trait CloneToUninit {
3083        unsafe fn clone_to_uninit(&self, dest: *mut u8);
3084    }
3085    unsafe impl<T: Clone> CloneToUninit for T {
3086        #[inline]
3087        unsafe fn clone_to_uninit(&self, dest: *mut u8) {
3088            // SAFETY: we're calling a specialization with the same contract
3089            unsafe { clone_one(self, dest as *mut T) }
3090        }
3091    }
3092    #[cfg(not(portable_atomic_no_maybe_uninit))]
3093    unsafe impl<T: Clone> CloneToUninit for [T] {
3094        #[inline]
3095        #[cfg_attr(all(debug_assertions, not(portable_atomic_no_track_caller)), track_caller)]
3096        unsafe fn clone_to_uninit(&self, dest: *mut u8) {
3097            let dest: *mut [T] = strict::with_metadata_of(dest, self);
3098            // SAFETY: we're calling a specialization with the same contract
3099            unsafe { clone_slice(self, dest) }
3100        }
3101    }
3102    #[cfg(not(portable_atomic_no_maybe_uninit))]
3103    unsafe impl CloneToUninit for str {
3104        #[inline]
3105        #[cfg_attr(all(debug_assertions, not(portable_atomic_no_track_caller)), track_caller)]
3106        unsafe fn clone_to_uninit(&self, dest: *mut u8) {
3107            // SAFETY: str is just a [u8] with UTF-8 invariant
3108            unsafe { self.as_bytes().clone_to_uninit(dest) }
3109        }
3110    }
3111    // Note: Currently, we cannot implement this for CStr/OsStr/Path since theirs layout is not stable.
3112
3113    #[inline]
3114    unsafe fn clone_one<T: Clone>(src: &T, dst: *mut T) {
3115        // SAFETY: The safety conditions of clone_to_uninit() are a superset of those of
3116        // ptr::write().
3117        unsafe {
3118            // We hope the optimizer will figure out to create the cloned value in-place,
3119            // skipping ever storing it on the stack and the copy to the destination.
3120            ptr::write(dst, src.clone());
3121        }
3122    }
3123    #[cfg(not(portable_atomic_no_maybe_uninit))]
3124    #[inline]
3125    #[cfg_attr(all(debug_assertions, not(portable_atomic_no_track_caller)), track_caller)]
3126    unsafe fn clone_slice<T: Clone>(src: &[T], dst: *mut [T]) {
3127        let len = src.len();
3128
3129        // SAFETY: The produced `&mut` is valid because:
3130        // * The caller is obligated to provide a pointer which is valid for writes.
3131        // * All bytes pointed to are in MaybeUninit, so we don't care about the memory's
3132        //   initialization status.
3133        let uninit_ref = unsafe { &mut *(dst as *mut [MaybeUninit<T>]) };
3134
3135        // This is the most likely mistake to make, so check it as a debug assertion.
3136        debug_assert_eq!(
3137            len,
3138            uninit_ref.len(), // <*const [T]>::len is unstable
3139            "clone_to_uninit() source and destination must have equal lengths",
3140        );
3141
3142        // Copy the elements
3143        let mut initializing = InitializingSlice::from_fully_uninit(uninit_ref);
3144        for element_ref in src {
3145            // If the clone() panics, `initializing` will take care of the cleanup.
3146            initializing.push(element_ref.clone());
3147        }
3148        // If we reach here, then the entire slice is initialized, and we've satisfied our
3149        // responsibilities to the caller. Disarm the cleanup guard by forgetting it.
3150        mem::forget(initializing);
3151    }
3152
3153    /// Ownership of a collection of values stored in a non-owned `[MaybeUninit<T>]`, some of which
3154    /// are not yet initialized. This is sort of like a `Vec` that doesn't own its allocation.
3155    /// Its responsibility is to provide cleanup on unwind by dropping the values that *are*
3156    /// initialized, unless disarmed by forgetting.
3157    ///
3158    /// This is a helper for `impl<T: Clone> CloneToUninit for [T]`.
3159    #[cfg(not(portable_atomic_no_maybe_uninit))]
3160    struct InitializingSlice<'a, T> {
3161        data: &'a mut [MaybeUninit<T>],
3162        /// Number of elements of `*self.data` that are initialized.
3163        initialized_len: usize,
3164    }
3165    #[cfg(not(portable_atomic_no_maybe_uninit))]
3166    impl<'a, T> InitializingSlice<'a, T> {
3167        #[inline]
3168        fn from_fully_uninit(data: &'a mut [MaybeUninit<T>]) -> Self {
3169            Self { data, initialized_len: 0 }
3170        }
3171        /// Push a value onto the end of the initialized part of the slice.
3172        ///
3173        /// # Panics
3174        ///
3175        /// Panics if the slice is already fully initialized.
3176        #[inline]
3177        fn push(&mut self, value: T) {
3178            self.data[self.initialized_len] = MaybeUninit::new(value);
3179            self.initialized_len += 1;
3180        }
3181    }
3182    #[cfg(not(portable_atomic_no_maybe_uninit))]
3183    impl<T> Drop for InitializingSlice<'_, T> {
3184        #[cold] // will only be invoked on unwind
3185        fn drop(&mut self) {
3186            let initialized_slice = unsafe {
3187                slice::from_raw_parts_mut(self.data.as_mut_ptr() as *mut T, self.initialized_len)
3188            };
3189            // SAFETY:
3190            // * the pointer is valid because it was made from a mutable reference
3191            // * `initialized_len` counts the initialized elements as an invariant of this type,
3192            //   so each of the pointed-to elements is initialized and may be dropped.
3193            unsafe {
3194                ptr::drop_in_place::<[T]>(initialized_slice);
3195            }
3196        }
3197    }
3198}
3199
3200mod layout {
3201    #[cfg(not(portable_atomic_no_maybe_uninit))]
3202    use core::isize;
3203    use core::{alloc::Layout, cmp, usize};
3204
3205    // Based on unstable Layout::padding_needed_for.
3206    #[inline]
3207    #[must_use]
3208    pub(super) fn padding_needed_for(layout: Layout, align: usize) -> usize {
3209        // FIXME: Can we just change the type on this to `Alignment`?
3210        if !align.is_power_of_two() {
3211            return usize::MAX;
3212        }
3213        let len_rounded_up = size_rounded_up_to_custom_align(layout, align);
3214        // SAFETY: Cannot overflow because the rounded-up value is never less
3215        len_rounded_up.wrapping_sub(layout.size()) // can use unchecked_sub
3216    }
3217
3218    /// Returns the smallest multiple of `align` greater than or equal to `self.size()`.
3219    ///
3220    /// This can return at most `Alignment::MAX` (aka `isize::MAX + 1`)
3221    /// because the original size is at most `isize::MAX`.
3222    #[inline]
3223    fn size_rounded_up_to_custom_align(layout: Layout, align: usize) -> usize {
3224        // Rounded up value is:
3225        //   size_rounded_up = (size + align - 1) & !(align - 1);
3226        //
3227        // The arithmetic we do here can never overflow:
3228        //
3229        // 1. align is guaranteed to be > 0, so align - 1 is always
3230        //    valid.
3231        //
3232        // 2. size is at most `isize::MAX`, so adding `align - 1` (which is at
3233        //    most `isize::MAX`) can never overflow a `usize`.
3234        //
3235        // 3. masking by the alignment can remove at most `align - 1`,
3236        //    which is what we just added, thus the value we return is never
3237        //    less than the original `size`.
3238        //
3239        // (Size 0 Align MAX is already aligned, so stays the same, but things like
3240        // Size 1 Align MAX or Size isize::MAX Align 2 round up to `isize::MAX + 1`.)
3241        let align_m1 = align.wrapping_sub(1);
3242        layout.size().wrapping_add(align_m1) & !align_m1
3243    }
3244
3245    // Based on Layout::pad_to_align stabilized in Rust 1.44.
3246    #[inline]
3247    #[must_use]
3248    pub(super) fn pad_to_align(layout: Layout) -> Layout {
3249        // This cannot overflow. Quoting from the invariant of Layout:
3250        // > `size`, when rounded up to the nearest multiple of `align`,
3251        // > must not overflow isize (i.e., the rounded value must be
3252        // > less than or equal to `isize::MAX`)
3253        let new_size = size_rounded_up_to_custom_align(layout, layout.align());
3254
3255        // SAFETY: padded size is guaranteed to not exceed `isize::MAX`.
3256        unsafe { Layout::from_size_align_unchecked(new_size, layout.align()) }
3257    }
3258
3259    // Based on Layout::extend stabilized in Rust 1.44.
3260    #[inline]
3261    pub(super) fn extend(layout: Layout, next: Layout) -> Option<(Layout, usize)> {
3262        let new_align = cmp::max(layout.align(), next.align());
3263        let offset = size_rounded_up_to_custom_align(layout, next.align());
3264
3265        // SAFETY: `offset` is at most `isize::MAX + 1` (such as from aligning
3266        // to `Alignment::MAX`) and `next.size` is at most `isize::MAX` (from the
3267        // `Layout` type invariant).  Thus the largest possible `new_size` is
3268        // `isize::MAX + 1 + isize::MAX`, which is `usize::MAX`, and cannot overflow.
3269        let new_size = offset.wrapping_add(next.size()); // can use unchecked_add
3270
3271        let layout = Layout::from_size_align(new_size, new_align).ok()?;
3272        Some((layout, offset))
3273    }
3274
3275    // Based on Layout::array stabilized in Rust 1.44.
3276    #[cfg(not(portable_atomic_no_maybe_uninit))]
3277    #[inline]
3278    pub(super) fn array<T>(n: usize) -> Option<Layout> {
3279        #[inline(always)]
3280        const fn max_size_for_align(align: usize) -> usize {
3281            // (power-of-two implies align != 0.)
3282
3283            // Rounded up size is:
3284            //   size_rounded_up = (size + align - 1) & !(align - 1);
3285            //
3286            // We know from above that align != 0. If adding (align - 1)
3287            // does not overflow, then rounding up will be fine.
3288            //
3289            // Conversely, &-masking with !(align - 1) will subtract off
3290            // only low-order-bits. Thus if overflow occurs with the sum,
3291            // the &-mask cannot subtract enough to undo that overflow.
3292            //
3293            // Above implies that checking for summation overflow is both
3294            // necessary and sufficient.
3295
3296            // SAFETY: the maximum possible alignment is `isize::MAX + 1`,
3297            // so the subtraction cannot overflow.
3298            (isize::MAX as usize + 1).wrapping_sub(align)
3299        }
3300
3301        #[inline]
3302        fn inner(element_layout: Layout, n: usize) -> Option<Layout> {
3303            let element_size = element_layout.size();
3304            let align = element_layout.align();
3305
3306            // We need to check two things about the size:
3307            //  - That the total size won't overflow a `usize`, and
3308            //  - That the total size still fits in an `isize`.
3309            // By using division we can check them both with a single threshold.
3310            // That'd usually be a bad idea, but thankfully here the element size
3311            // and alignment are constants, so the compiler will fold all of it.
3312            if element_size != 0 && n > max_size_for_align(align) / element_size {
3313                return None;
3314            }
3315
3316            // SAFETY: We just checked that we won't overflow `usize` when we multiply.
3317            // This is a useless hint inside this function, but after inlining this helps
3318            // deduplicate checks for whether the overall capacity is zero (e.g., in `RawVec`'s
3319            // allocation path) before/after this multiplication.
3320            let array_size = element_size.wrapping_mul(n); // can use unchecked_mul
3321
3322            // SAFETY: We just checked above that the `array_size` will not
3323            // exceed `isize::MAX` even when rounded up to the alignment.
3324            // And `Alignment` guarantees it's a power of two.
3325            unsafe { Some(Layout::from_size_align_unchecked(array_size, align)) }
3326        }
3327
3328        // Reduce the amount of code we need to monomorphize per `T`.
3329        inner(Layout::new::<T>(), n)
3330    }
3331}
3332
3333#[cfg(feature = "std")]
3334use std::process::abort;
3335#[cfg(not(feature = "std"))]
3336#[cold]
3337fn abort() -> ! {
3338    struct Abort;
3339    impl Drop for Abort {
3340        fn drop(&mut self) {
3341            panic!();
3342        }
3343    }
3344
3345    let _abort = Abort;
3346    panic!("abort")
3347}
3348
3349fn is_dangling<T: ?Sized>(ptr: *const T) -> bool {
3350    (ptr as *const ()).addr() == usize::MAX
3351}
3352
3353// Based on unstable alloc::alloc::Global.
3354//
3355// Note: unlike alloc::alloc::Global that returns NonNull<[u8]>,
3356// this returns NonNull<u8>.
3357struct Global;
3358#[allow(clippy::unused_self)]
3359impl Global {
3360    #[inline]
3361    #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
3362    fn alloc_impl(&self, layout: Layout, zeroed: bool) -> Option<NonNull<u8>> {
3363        // Layout::dangling is unstable
3364        #[inline]
3365        #[must_use]
3366        fn dangling(layout: Layout) -> NonNull<u8> {
3367            // SAFETY: align is guaranteed to be non-zero
3368            unsafe { NonNull::new_unchecked(strict::without_provenance_mut::<u8>(layout.align())) }
3369        }
3370
3371        match layout.size() {
3372            0 => Some(dangling(layout)),
3373            // SAFETY: `layout` is non-zero in size,
3374            _size => unsafe {
3375                let raw_ptr = if zeroed {
3376                    alloc::alloc::alloc_zeroed(layout)
3377                } else {
3378                    alloc::alloc::alloc(layout)
3379                };
3380                NonNull::new(raw_ptr)
3381            },
3382        }
3383    }
3384    #[inline]
3385    #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
3386    fn allocate(self, layout: Layout) -> Option<NonNull<u8>> {
3387        self.alloc_impl(layout, false)
3388    }
3389    #[cfg(not(portable_atomic_no_maybe_uninit))]
3390    #[inline]
3391    #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
3392    fn allocate_zeroed(self, layout: Layout) -> Option<NonNull<u8>> {
3393        self.alloc_impl(layout, true)
3394    }
3395    #[inline]
3396    #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
3397    unsafe fn deallocate(self, ptr: NonNull<u8>, layout: Layout) {
3398        if layout.size() != 0 {
3399            // SAFETY:
3400            // * We have checked that `layout` is non-zero in size.
3401            // * The caller is obligated to provide a layout that "fits", and in this case,
3402            //   "fit" always means a layout that is equal to the original, because our
3403            //   `allocate()`, `grow()`, and `shrink()` implementations never returns a larger
3404            //   allocation than requested.
3405            // * Other conditions must be upheld by the caller, as per `Allocator::deallocate()`'s
3406            //   safety documentation.
3407            unsafe { alloc::alloc::dealloc(ptr.as_ptr(), layout) }
3408        }
3409    }
3410}