#![allow(clippy::must_use_candidate)] #![allow(clippy::undocumented_unsafe_blocks)]
use alloc::{
alloc::handle_alloc_error,
borrow::{Cow, ToOwned},
boxed::Box,
};
#[cfg(not(portable_atomic_no_maybe_uninit))]
use alloc::{string::String, vec::Vec};
#[cfg(not(portable_atomic_no_min_const_generics))]
use core::convert::TryFrom;
use core::{
alloc::Layout,
any::Any,
borrow,
cmp::Ordering,
fmt,
hash::{Hash, Hasher},
isize,
marker::PhantomData,
mem::{self, ManuallyDrop},
ops::Deref,
pin::Pin,
ptr::{self, NonNull},
usize,
};
#[cfg(not(portable_atomic_no_maybe_uninit))]
use core::{iter::FromIterator, slice};
#[cfg(portable_atomic_unstable_coerce_unsized)]
use core::{marker::Unsize, ops::CoerceUnsized};
use portable_atomic::{
self as atomic,
Ordering::{Acquire, Relaxed, Release},
hint,
};
use crate::utils::ptr as strict;
#[cfg(portable_atomic_no_strict_provenance)]
use crate::utils::ptr::PtrExt as _;
const MAX_REFCOUNT: usize = isize::MAX as usize;
const INTERNAL_OVERFLOW_ERROR: &str = "Arc counter overflow";
#[cfg(not(portable_atomic_sanitize_thread))]
macro_rules! acquire {
($x:expr) => {
atomic::fence(Acquire)
};
}
#[cfg(portable_atomic_sanitize_thread)]
macro_rules! acquire {
($x:expr) => {
$x.load(Acquire)
};
}
pub struct Arc<T: ?Sized> {
ptr: NonNull<ArcInner<T>>,
phantom: PhantomData<ArcInner<T>>,
}
unsafe impl<T: ?Sized + Sync + Send> Send for Arc<T> {}
unsafe impl<T: ?Sized + Sync + Send> Sync for Arc<T> {}
#[cfg(not(portable_atomic_no_core_unwind_safe))]
impl<T: ?Sized + core::panic::RefUnwindSafe> core::panic::UnwindSafe for Arc<T> {}
#[cfg(all(portable_atomic_no_core_unwind_safe, feature = "std"))]
impl<T: ?Sized + std::panic::RefUnwindSafe> std::panic::UnwindSafe for Arc<T> {}
#[cfg(portable_atomic_unstable_coerce_unsized)]
impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<Arc<U>> for Arc<T> {}
impl<T: ?Sized> Arc<T> {
#[inline]
fn into_inner_non_null(this: Self) -> NonNull<ArcInner<T>> {
let this = mem::ManuallyDrop::new(this);
this.ptr
}
#[inline]
unsafe fn from_inner(ptr: NonNull<ArcInner<T>>) -> Self {
Self { ptr, phantom: PhantomData }
}
#[inline]
unsafe fn from_ptr(ptr: *mut ArcInner<T>) -> Self {
unsafe { Self::from_inner(NonNull::new_unchecked(ptr)) }
}
}
pub struct Weak<T: ?Sized> {
ptr: NonNull<ArcInner<T>>,
}
unsafe impl<T: ?Sized + Sync + Send> Send for Weak<T> {}
unsafe impl<T: ?Sized + Sync + Send> Sync for Weak<T> {}
impl<T: ?Sized> fmt::Debug for Weak<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str("(Weak)")
}
}
#[repr(C, align(2))]
struct ArcInner<T: ?Sized> {
strong: atomic::AtomicUsize,
weak: atomic::AtomicUsize,
data: T,
}
fn arc_inner_layout_for_value_layout(layout: Layout) -> Layout {
layout::pad_to_align(layout::extend(Layout::new::<ArcInner<()>>(), layout).unwrap().0)
}
unsafe impl<T: ?Sized + Sync + Send> Send for ArcInner<T> {}
unsafe impl<T: ?Sized + Sync + Send> Sync for ArcInner<T> {}
impl<T> Arc<T> {
#[inline]
pub fn new(data: T) -> Self {
let x: Box<_> = Box::new(ArcInner {
strong: atomic::AtomicUsize::new(1),
weak: atomic::AtomicUsize::new(1),
data,
});
unsafe { Self::from_inner(Box::leak(x).into()) }
}
#[inline]
pub fn new_cyclic<F>(data_fn: F) -> Self
where
F: FnOnce(&Weak<T>) -> T,
{
let init_ptr = Weak::new_uninit_ptr();
let weak = Weak { ptr: init_ptr };
let data = data_fn(&weak);
unsafe {
let inner = init_ptr.as_ptr();
ptr::write(data_ptr::<T>(inner, &data), data);
let prev_value = (*inner).strong.fetch_add(1, Release);
debug_assert_eq!(prev_value, 0, "No prior strong references should exist");
mem::forget(weak);
Self::from_inner(init_ptr)
}
}
#[cfg(not(portable_atomic_no_maybe_uninit))]
#[inline]
#[must_use]
pub fn new_uninit() -> Arc<mem::MaybeUninit<T>> {
unsafe {
Arc::from_ptr(Arc::allocate_for_layout(
Layout::new::<T>(),
|layout| Global.allocate(layout),
|ptr| ptr as *mut _,
))
}
}
#[cfg(not(portable_atomic_no_maybe_uninit))]
#[inline]
#[must_use]
pub fn new_zeroed() -> Arc<mem::MaybeUninit<T>> {
unsafe {
Arc::from_ptr(Arc::allocate_for_layout(
Layout::new::<T>(),
|layout| Global.allocate_zeroed(layout),
|ptr| ptr as *mut _,
))
}
}
#[must_use]
pub fn pin(data: T) -> Pin<Self> {
unsafe { Pin::new_unchecked(Self::new(data)) }
}
#[inline]
pub fn try_unwrap(this: Self) -> Result<T, Self> {
if this.inner().strong.compare_exchange(1, 0, Relaxed, Relaxed).is_err() {
return Err(this);
}
acquire!(this.inner().strong);
let this = ManuallyDrop::new(this);
let elem: T = unsafe { ptr::read(&this.ptr.as_ref().data) };
let _weak = Weak { ptr: this.ptr };
Ok(elem)
}
#[inline]
pub fn into_inner(this: Self) -> Option<T> {
let mut this = mem::ManuallyDrop::new(this);
if this.inner().strong.fetch_sub(1, Release) != 1 {
return None;
}
acquire!(this.inner().strong);
let inner = unsafe { ptr::read(Self::get_mut_unchecked(&mut this)) };
drop(Weak { ptr: this.ptr });
Some(inner)
}
}
#[cfg(not(portable_atomic_no_maybe_uninit))]
impl<T> Arc<[T]> {
#[inline]
#[must_use]
pub fn new_uninit_slice(len: usize) -> Arc<[mem::MaybeUninit<T>]> {
unsafe { Arc::from_ptr(Arc::allocate_for_slice(len)) }
}
#[inline]
#[must_use]
#[allow(clippy::missing_panics_doc)]
pub fn new_zeroed_slice(len: usize) -> Arc<[mem::MaybeUninit<T>]> {
unsafe {
Arc::from_ptr(Arc::allocate_for_layout(
layout::array::<T>(len).unwrap(),
|layout| Global.allocate_zeroed(layout),
|mem| {
let meta: *const _ =
slice::from_raw_parts(mem as *const mem::MaybeUninit<T>, len);
strict::with_metadata_of(mem, meta) as *mut ArcInner<[mem::MaybeUninit<T>]>
},
))
}
}
}
#[cfg(not(portable_atomic_no_maybe_uninit))]
impl<T> Arc<mem::MaybeUninit<T>> {
#[inline]
#[must_use = "`self` will be dropped if the result is not used"]
pub unsafe fn assume_init(self) -> Arc<T> {
let ptr = Arc::into_inner_non_null(self);
unsafe { Arc::from_inner(ptr.cast::<ArcInner<T>>()) }
}
}
impl<T: ?Sized + CloneToUninit> Arc<T> {
fn clone_from_ref(value: &T) -> Self {
let mut in_progress: UniqueArcUninit<T> = UniqueArcUninit::new(value);
unsafe {
value.clone_to_uninit(in_progress.data_ptr() as *mut u8);
in_progress.into_arc()
}
}
}
#[cfg(not(portable_atomic_no_maybe_uninit))]
impl<T> Arc<[mem::MaybeUninit<T>]> {
#[inline]
#[must_use = "`self` will be dropped if the result is not used"]
pub unsafe fn assume_init(self) -> Arc<[T]> {
let ptr = Arc::into_inner_non_null(self);
unsafe { Arc::from_ptr(ptr.as_ptr() as *mut ArcInner<[T]>) }
}
}
impl<T: ?Sized> Arc<T> {
#[inline]
pub unsafe fn from_raw(ptr: *const T) -> Self {
unsafe {
let offset = data_offset::<T>(&*ptr);
let arc_ptr = strict::byte_sub(ptr as *mut T, offset) as *mut ArcInner<T>;
Self::from_ptr(arc_ptr)
}
}
#[must_use = "losing the pointer will leak memory"]
pub fn into_raw(this: Self) -> *const T {
let this = ManuallyDrop::new(this);
Self::as_ptr(&*this)
}
#[inline]
pub unsafe fn increment_strong_count(ptr: *const T) {
let arc = unsafe { mem::ManuallyDrop::new(Self::from_raw(ptr)) };
let _arc_clone: mem::ManuallyDrop<_> = arc.clone();
}
#[inline]
pub unsafe fn decrement_strong_count(ptr: *const T) {
unsafe { drop(Self::from_raw(ptr)) }
}
#[must_use]
pub fn as_ptr(this: &Self) -> *const T {
let ptr: *mut ArcInner<T> = NonNull::as_ptr(this.ptr);
unsafe { data_ptr::<T>(ptr, &**this) }
}
#[must_use = "this returns a new `Weak` pointer, \
without modifying the original `Arc`"]
#[allow(clippy::missing_panics_doc)]
pub fn downgrade(this: &Self) -> Weak<T> {
let mut cur = this.inner().weak.load(Relaxed);
loop {
if cur == usize::MAX {
hint::spin_loop();
cur = this.inner().weak.load(Relaxed);
continue;
}
assert!(cur <= MAX_REFCOUNT, "{}", INTERNAL_OVERFLOW_ERROR);
match this.inner().weak.compare_exchange_weak(cur, cur + 1, Acquire, Relaxed) {
Ok(_) => {
debug_assert!(!is_dangling(this.ptr.as_ptr()));
return Weak { ptr: this.ptr };
}
Err(old) => cur = old,
}
}
}
#[inline]
#[must_use]
pub fn weak_count(this: &Self) -> usize {
let cnt = this.inner().weak.load(Relaxed);
if cnt == usize::MAX { 0 } else { cnt - 1 }
}
#[inline]
#[must_use]
pub fn strong_count(this: &Self) -> usize {
this.inner().strong.load(Relaxed)
}
#[inline]
fn inner(&self) -> &ArcInner<T> {
unsafe { self.ptr.as_ref() }
}
#[inline(never)]
unsafe fn drop_slow(&mut self) {
let _weak = Weak { ptr: self.ptr };
unsafe { ptr::drop_in_place(&mut (*self.ptr.as_ptr()).data) };
}
#[inline]
#[must_use]
pub fn ptr_eq(this: &Self, other: &Self) -> bool {
ptr::eq(this.ptr.as_ptr() as *const (), other.ptr.as_ptr() as *const ())
}
}
impl<T: ?Sized> Arc<T> {
unsafe fn allocate_for_layout(
value_layout: Layout,
allocate: impl FnOnce(Layout) -> Option<NonNull<u8>>,
mem_to_arc_inner: impl FnOnce(*mut u8) -> *mut ArcInner<T>,
) -> *mut ArcInner<T> {
let layout = arc_inner_layout_for_value_layout(value_layout);
let ptr = allocate(layout).unwrap_or_else(|| handle_alloc_error(layout));
unsafe { Self::initialize_arc_inner(ptr, layout, mem_to_arc_inner) }
}
unsafe fn initialize_arc_inner(
ptr: NonNull<u8>,
_layout: Layout,
mem_to_arc_inner: impl FnOnce(*mut u8) -> *mut ArcInner<T>,
) -> *mut ArcInner<T> {
let inner: *mut ArcInner<T> = mem_to_arc_inner(ptr.as_ptr());
unsafe {
let strong = inner as *mut atomic::AtomicUsize;
strong.write(atomic::AtomicUsize::new(1));
let weak = strong.add(1);
weak.write(atomic::AtomicUsize::new(1));
}
inner
}
}
impl<T: ?Sized> Arc<T> {
#[inline]
unsafe fn allocate_for_value(value: &T) -> *mut ArcInner<T> {
let ptr: *const T = value;
unsafe {
Self::allocate_for_layout(
Layout::for_value(value),
|layout| Global.allocate(layout),
|mem| strict::with_metadata_of(mem, ptr as *const ArcInner<T>),
)
}
}
fn from_box(src: Box<T>) -> Arc<T> {
unsafe {
let value_size = mem::size_of_val(&*src);
let ptr = Self::allocate_for_value(&*src);
ptr::copy_nonoverlapping(
&*src as *const T as *const u8,
data_ptr::<T>(ptr, &*src) as *mut u8,
value_size,
);
let box_ptr = Box::into_raw(src);
let src = Box::from_raw(box_ptr as *mut mem::ManuallyDrop<T>);
drop(src);
Self::from_ptr(ptr)
}
}
}
#[cfg(not(portable_atomic_no_maybe_uninit))]
impl<T> Arc<[T]> {
unsafe fn allocate_for_slice(len: usize) -> *mut ArcInner<[mem::MaybeUninit<T>]> {
unsafe {
Arc::allocate_for_layout(
layout::array::<T>(len).unwrap(),
|layout| Global.allocate(layout),
|mem| {
let meta: *const _ =
slice::from_raw_parts(mem as *const mem::MaybeUninit<T>, len);
strict::with_metadata_of(mem, meta) as *mut ArcInner<[mem::MaybeUninit<T>]>
},
)
}
}
unsafe fn from_iter_exact(iter: impl Iterator<Item = T>, len: usize) -> Self {
struct Guard<T> {
mem: NonNull<u8>,
elems: *mut T,
layout: Layout,
n_elems: usize,
}
impl<T> Drop for Guard<T> {
fn drop(&mut self) {
unsafe {
let slice = slice::from_raw_parts_mut(self.elems, self.n_elems);
ptr::drop_in_place(slice);
Global.deallocate(self.mem, self.layout);
}
}
}
unsafe {
let ptr: *mut ArcInner<[mem::MaybeUninit<T>]> = Arc::allocate_for_slice(len);
let mem = ptr as *mut _ as *mut u8;
let layout = Layout::for_value(&*ptr);
let elems = (*ptr).data.as_mut_ptr() as *mut T;
let mut guard = Guard { mem: NonNull::new_unchecked(mem), elems, layout, n_elems: 0 };
for (i, item) in iter.enumerate() {
ptr::write(elems.add(i), item);
guard.n_elems += 1;
}
mem::forget(guard);
Arc::from_ptr(ptr).assume_init()
}
}
}
impl<T: ?Sized> Clone for Arc<T> {
#[inline]
fn clone(&self) -> Self {
let old_size = self.inner().strong.fetch_add(1, Relaxed);
if old_size > MAX_REFCOUNT {
abort();
}
unsafe { Self::from_inner(self.ptr) }
}
}
impl<T: ?Sized> Deref for Arc<T> {
type Target = T;
#[inline]
fn deref(&self) -> &Self::Target {
&self.inner().data
}
}
impl<T: ?Sized + CloneToUninit> Arc<T> {
#[inline]
pub fn make_mut(this: &mut Self) -> &mut T {
let size_of_val = mem::size_of_val::<T>(&**this);
if this.inner().strong.compare_exchange(1, 0, Acquire, Relaxed).is_err() {
*this = Arc::clone_from_ref(&**this);
} else if this.inner().weak.load(Relaxed) != 1 {
let _weak = Weak { ptr: this.ptr };
let mut in_progress: UniqueArcUninit<T> = UniqueArcUninit::new(&**this);
unsafe {
ptr::copy_nonoverlapping(
&**this as *const T as *const u8,
in_progress.data_ptr() as *mut u8,
size_of_val,
);
ptr::write(this, in_progress.into_arc());
}
} else {
this.inner().strong.store(1, Release);
}
unsafe { Self::get_mut_unchecked(this) }
}
}
impl<T: Clone> Arc<T> {
#[inline]
pub fn unwrap_or_clone(this: Self) -> T {
Self::try_unwrap(this).unwrap_or_else(|arc| (*arc).clone())
}
}
impl<T: ?Sized> Arc<T> {
#[inline]
pub fn get_mut(this: &mut Self) -> Option<&mut T> {
if Self::is_unique(this) {
unsafe { Some(Self::get_mut_unchecked(this)) }
} else {
None
}
}
#[inline]
unsafe fn get_mut_unchecked(this: &mut Self) -> &mut T {
unsafe { &mut (*this.ptr.as_ptr()).data }
}
#[inline]
fn is_unique(this: &Self) -> bool {
if this.inner().weak.compare_exchange(1, usize::MAX, Acquire, Relaxed).is_ok() {
let unique = this.inner().strong.load(Acquire) == 1;
this.inner().weak.store(1, Release); unique
} else {
false
}
}
}
impl<T: ?Sized> Drop for Arc<T> {
#[inline]
fn drop(&mut self) {
if self.inner().strong.fetch_sub(1, Release) != 1 {
return;
}
acquire!(self.inner().strong);
unsafe {
self.drop_slow();
}
}
}
impl Arc<dyn Any + Send + Sync> {
#[inline]
pub fn downcast<T>(self) -> Result<Arc<T>, Self>
where
T: Any + Send + Sync,
{
if (*self).is::<T>() {
unsafe {
let ptr = Arc::into_inner_non_null(self);
Ok(Arc::from_inner(ptr.cast::<ArcInner<T>>()))
}
} else {
Err(self)
}
}
}
impl<T> Weak<T> {
#[inline]
#[must_use]
pub const fn new() -> Self {
Self {
ptr: unsafe {
NonNull::new_unchecked(strict::without_provenance_mut::<ArcInner<T>>(usize::MAX))
},
}
}
#[inline]
#[must_use]
fn new_uninit_ptr() -> NonNull<ArcInner<T>> {
unsafe {
NonNull::new_unchecked(Self::allocate_for_layout(
Layout::new::<T>(),
|layout| Global.allocate(layout),
|ptr| ptr as *mut _,
))
}
}
}
struct WeakInner<'a> {
weak: &'a atomic::AtomicUsize,
strong: &'a atomic::AtomicUsize,
}
impl<T > Weak<T> {
#[inline]
pub unsafe fn from_raw(ptr: *const T) -> Self {
let ptr = if is_dangling(ptr) {
ptr as *mut ArcInner<T>
} else {
let offset = data_offset_align(mem::align_of::<T>());
unsafe { strict::byte_sub(ptr as *mut T, offset) as *mut ArcInner<T> }
};
Self { ptr: unsafe { NonNull::new_unchecked(ptr) } }
}
#[must_use = "losing the pointer will leak memory"]
pub fn into_raw(self) -> *const T {
ManuallyDrop::new(self).as_ptr()
}
}
impl<T > Weak<T> {
#[must_use]
pub fn as_ptr(&self) -> *const T {
let ptr: *mut ArcInner<T> = NonNull::as_ptr(self.ptr);
if is_dangling(ptr) {
ptr as *const T
} else {
unsafe {
let offset = data_offset_align(mem::align_of::<T>());
strict::byte_add(ptr, offset) as *const T
}
}
}
}
impl<T: ?Sized> Weak<T> {
#[must_use = "this returns a new `Arc`, \
without modifying the original weak pointer"]
pub fn upgrade(&self) -> Option<Arc<T>> {
#[inline]
fn checked_increment(n: usize) -> Option<usize> {
if n == 0 {
return None;
}
assert!(n <= MAX_REFCOUNT, "{}", INTERNAL_OVERFLOW_ERROR);
Some(n + 1)
}
if self.inner()?.strong.fetch_update(Acquire, Relaxed, checked_increment).is_ok() {
unsafe { Some(Arc::from_inner(self.ptr)) }
} else {
None
}
}
#[must_use]
pub fn strong_count(&self) -> usize {
if let Some(inner) = self.inner() { inner.strong.load(Relaxed) } else { 0 }
}
#[must_use]
pub fn weak_count(&self) -> usize {
if let Some(inner) = self.inner() {
let weak = inner.weak.load(Acquire);
let strong = inner.strong.load(Relaxed);
if strong == 0 {
0
} else {
weak - 1
}
} else {
0
}
}
#[inline]
fn inner(&self) -> Option<WeakInner<'_>> {
let ptr = self.ptr.as_ptr();
if is_dangling(ptr) {
None
} else {
Some(unsafe { WeakInner { strong: &(*ptr).strong, weak: &(*ptr).weak } })
}
}
#[inline]
#[must_use]
pub fn ptr_eq(&self, other: &Self) -> bool {
ptr::eq(self.ptr.as_ptr() as *const (), other.ptr.as_ptr() as *const ())
}
}
impl<T: ?Sized> Weak<T> {
unsafe fn allocate_for_layout(
value_layout: Layout,
allocate: impl FnOnce(Layout) -> Option<NonNull<u8>>,
mem_to_arc_inner: impl FnOnce(*mut u8) -> *mut ArcInner<T>,
) -> *mut ArcInner<T> {
let layout = arc_inner_layout_for_value_layout(value_layout);
let ptr = allocate(layout).unwrap_or_else(|| handle_alloc_error(layout));
unsafe { Self::initialize_arc_inner(ptr, layout, mem_to_arc_inner) }
}
unsafe fn initialize_arc_inner(
ptr: NonNull<u8>,
_layout: Layout,
mem_to_arc_inner: impl FnOnce(*mut u8) -> *mut ArcInner<T>,
) -> *mut ArcInner<T> {
let inner: *mut ArcInner<T> = mem_to_arc_inner(ptr.as_ptr());
unsafe {
let strong = inner as *mut atomic::AtomicUsize;
strong.write(atomic::AtomicUsize::new(0));
let weak = strong.add(1);
weak.write(atomic::AtomicUsize::new(1));
}
inner
}
}
impl<T: ?Sized> Clone for Weak<T> {
#[inline]
fn clone(&self) -> Self {
if let Some(inner) = self.inner() {
let old_size = inner.weak.fetch_add(1, Relaxed);
if old_size > MAX_REFCOUNT {
abort();
}
}
Self { ptr: self.ptr }
}
}
impl<T> Default for Weak<T> {
fn default() -> Self {
Self::new()
}
}
impl<T: ?Sized> Drop for Weak<T> {
fn drop(&mut self) {
let inner = if let Some(inner) = self.inner() { inner } else { return };
if inner.weak.fetch_sub(1, Release) == 1 {
acquire!(inner.weak);
let ptr = self.ptr.as_ptr() as *mut ArcInner<mem::ManuallyDrop<T>>;
drop(unsafe { Box::from_raw(ptr) });
}
}
}
impl<T: ?Sized + PartialEq> PartialEq for Arc<T> {
#[inline]
fn eq(&self, other: &Self) -> bool {
**self == **other
}
#[allow(clippy::partialeq_ne_impl)]
#[inline]
fn ne(&self, other: &Self) -> bool {
**self != **other
}
}
impl<T: ?Sized + PartialOrd> PartialOrd for Arc<T> {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
(**self).partial_cmp(&**other)
}
fn lt(&self, other: &Self) -> bool {
*(*self) < *(*other)
}
fn le(&self, other: &Self) -> bool {
*(*self) <= *(*other)
}
fn gt(&self, other: &Self) -> bool {
*(*self) > *(*other)
}
fn ge(&self, other: &Self) -> bool {
*(*self) >= *(*other)
}
}
impl<T: ?Sized + Ord> Ord for Arc<T> {
fn cmp(&self, other: &Self) -> Ordering {
(**self).cmp(&**other)
}
}
impl<T: ?Sized + Eq> Eq for Arc<T> {}
impl<T: ?Sized + fmt::Display> fmt::Display for Arc<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Display::fmt(&**self, f)
}
}
impl<T: ?Sized + fmt::Debug> fmt::Debug for Arc<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Debug::fmt(&**self, f)
}
}
impl<T: ?Sized> fmt::Pointer for Arc<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Pointer::fmt(&(&**self as *const T), f)
}
}
impl<T: Default> Default for Arc<T> {
fn default() -> Self {
Self::new(T::default())
}
}
#[cfg(not(portable_atomic_no_min_const_generics))]
impl Default for Arc<str> {
#[inline]
fn default() -> Self {
let arc: Arc<[u8]> = Arc::default();
debug_assert!(core::str::from_utf8(&arc).is_ok());
let ptr = Arc::into_inner_non_null(arc);
unsafe { Arc::from_ptr(ptr.as_ptr() as *mut ArcInner<str>) }
}
}
#[cfg(not(portable_atomic_no_min_const_generics))]
impl<T> Default for Arc<[T]> {
#[inline]
fn default() -> Self {
let arr: [T; 0] = [];
Arc::from(arr)
}
}
impl<T> Default for Pin<Arc<T>>
where
T: ?Sized,
Arc<T>: Default,
{
#[inline]
fn default() -> Self {
unsafe { Pin::new_unchecked(Arc::<T>::default()) }
}
}
impl<T: ?Sized + Hash> Hash for Arc<T> {
fn hash<H: Hasher>(&self, state: &mut H) {
(**self).hash(state);
}
}
impl<T> From<T> for Arc<T> {
fn from(t: T) -> Self {
Self::new(t)
}
}
#[cfg(not(portable_atomic_no_min_const_generics))]
macro_rules! items {
({$($tt:tt)*}) => {
$($tt)*
};
}
#[cfg(not(portable_atomic_no_min_const_generics))]
items!({
impl<T, const N: usize> From<[T; N]> for Arc<[T]> {
#[inline]
fn from(v: [T; N]) -> Self {
let v: Box<[T]> = Box::<[T; N]>::from(v);
v.into()
}
}
});
#[cfg(not(portable_atomic_no_maybe_uninit))]
impl<T: Clone> From<&[T]> for Arc<[T]> {
#[inline]
fn from(v: &[T]) -> Self {
unsafe { Self::from_iter_exact(v.iter().cloned(), v.len()) }
}
}
#[cfg(not(portable_atomic_no_maybe_uninit))]
impl<T: Clone> From<&mut [T]> for Arc<[T]> {
#[inline]
fn from(v: &mut [T]) -> Self {
Self::from(&*v)
}
}
#[cfg(not(portable_atomic_no_maybe_uninit))]
impl From<&str> for Arc<str> {
#[inline]
fn from(v: &str) -> Self {
let arc = Arc::<[u8]>::from(v.as_bytes());
unsafe { Self::from_raw(Arc::into_raw(arc) as *const str) }
}
}
#[cfg(not(portable_atomic_no_maybe_uninit))]
impl From<&mut str> for Arc<str> {
#[inline]
fn from(v: &mut str) -> Self {
Self::from(&*v)
}
}
#[cfg(not(portable_atomic_no_maybe_uninit))]
impl From<String> for Arc<str> {
#[inline]
fn from(v: String) -> Self {
Self::from(&v[..])
}
}
impl<T: ?Sized> From<Box<T>> for Arc<T> {
#[inline]
fn from(v: Box<T>) -> Self {
Self::from_box(v)
}
}
#[cfg(not(portable_atomic_no_maybe_uninit))]
impl<T> From<Vec<T>> for Arc<[T]> {
#[inline]
fn from(v: Vec<T>) -> Self {
unsafe {
let len = v.len();
let cap = v.capacity();
let vec_ptr = mem::ManuallyDrop::new(v).as_mut_ptr();
let mut arc = Self::new_uninit_slice(len);
let data = Arc::get_mut_unchecked(&mut arc);
ptr::copy_nonoverlapping(vec_ptr, data.as_mut_ptr() as *mut T, len);
let _ = Vec::from_raw_parts(vec_ptr, 0, cap);
arc.assume_init()
}
}
}
impl<'a, B> From<Cow<'a, B>> for Arc<B>
where
B: ?Sized + ToOwned,
Arc<B>: From<&'a B> + From<B::Owned>,
{
#[inline]
fn from(cow: Cow<'a, B>) -> Self {
match cow {
Cow::Borrowed(s) => Self::from(s),
Cow::Owned(s) => Self::from(s),
}
}
}
impl From<Arc<str>> for Arc<[u8]> {
#[inline]
fn from(rc: Arc<str>) -> Self {
unsafe { Self::from_raw(Arc::into_raw(rc) as *const [u8]) }
}
}
#[cfg(not(portable_atomic_no_min_const_generics))]
items!({
impl<T, const N: usize> TryFrom<Arc<[T]>> for Arc<[T; N]> {
type Error = Arc<[T]>;
fn try_from(boxed_slice: Arc<[T]>) -> Result<Self, Self::Error> {
if boxed_slice.len() == N {
let ptr = Arc::into_inner_non_null(boxed_slice);
Ok(unsafe { Self::from_inner(ptr.cast::<ArcInner<[T; N]>>()) })
} else {
Err(boxed_slice)
}
}
}
});
#[cfg(not(portable_atomic_no_maybe_uninit))]
impl<T> FromIterator<T> for Arc<[T]> {
fn from_iter<I: IntoIterator<Item = T>>(iter: I) -> Self {
iter.into_iter().collect::<Vec<T>>().into()
}
}
impl<T: ?Sized> borrow::Borrow<T> for Arc<T> {
fn borrow(&self) -> &T {
self
}
}
impl<T: ?Sized> AsRef<T> for Arc<T> {
fn as_ref(&self) -> &T {
self
}
}
impl<T: ?Sized> Unpin for Arc<T> {}
unsafe fn data_ptr<T: ?Sized>(arc: *mut ArcInner<T>, data: &T) -> *mut T {
unsafe {
let offset = data_offset::<T>(data);
strict::byte_add(arc, offset) as *mut T
}
}
fn data_offset<T: ?Sized>(ptr: &T) -> usize {
data_offset_align(mem::align_of_val::<T>(ptr))
}
#[inline]
fn data_offset_align(align: usize) -> usize {
let layout = Layout::new::<ArcInner<()>>();
layout.size() + layout::padding_needed_for(layout, align)
}
struct UniqueArcUninit<T: ?Sized> {
ptr: NonNull<ArcInner<T>>,
layout_for_value: Layout,
}
impl<T: ?Sized> UniqueArcUninit<T> {
fn new(for_value: &T) -> Self {
let layout = Layout::for_value(for_value);
let ptr = unsafe { Arc::allocate_for_value(for_value) };
Self { ptr: NonNull::new(ptr).unwrap(), layout_for_value: layout }
}
fn data_ptr(&mut self) -> *mut T {
let offset = data_offset_align(self.layout_for_value.align());
unsafe { strict::byte_add(self.ptr.as_ptr(), offset) as *mut T }
}
unsafe fn into_arc(self) -> Arc<T> {
let this = ManuallyDrop::new(self);
let ptr = this.ptr.as_ptr();
unsafe { Arc::from_ptr(ptr) }
}
}
impl<T: ?Sized> Drop for UniqueArcUninit<T> {
fn drop(&mut self) {
unsafe {
Global.deallocate(
self.ptr.cast::<u8>(),
arc_inner_layout_for_value_layout(self.layout_for_value),
);
}
}
}
#[cfg(not(portable_atomic_no_error_in_core))]
use core::error;
#[cfg(all(portable_atomic_no_error_in_core, feature = "std"))]
use std::error;
#[cfg(any(not(portable_atomic_no_error_in_core), feature = "std"))]
impl<T: ?Sized + error::Error> error::Error for Arc<T> {
#[allow(deprecated)]
fn cause(&self) -> Option<&dyn error::Error> {
error::Error::cause(&**self)
}
fn source(&self) -> Option<&(dyn error::Error + 'static)> {
error::Error::source(&**self)
}
}
#[cfg(feature = "std")]
mod std_impls {
use std::io;
#[cfg(not(portable_atomic_no_io_safety))]
#[cfg(target_os = "trusty")]
use std::os::fd;
#[cfg(not(portable_atomic_no_io_safety))]
#[cfg(target_os = "hermit")]
use std::os::hermit::io as fd;
#[cfg(unix)]
use std::os::unix::io as fd;
#[cfg(not(portable_atomic_no_io_safety))]
#[cfg(target_os = "wasi")]
use std::os::wasi::prelude as fd;
use super::Arc;
#[cfg(any(
unix,
all(
not(portable_atomic_no_io_safety),
any(target_os = "hermit", target_os = "trusty", target_os = "wasi"),
),
))]
impl<T: fd::AsRawFd> fd::AsRawFd for Arc<T> {
#[inline]
fn as_raw_fd(&self) -> fd::RawFd {
(**self).as_raw_fd()
}
}
#[cfg(not(portable_atomic_no_io_safety))]
#[cfg(any(unix, target_os = "hermit", target_os = "trusty", target_os = "wasi"))]
impl<T: ?Sized + fd::AsFd> fd::AsFd for Arc<T> {
#[inline]
fn as_fd(&self) -> fd::BorrowedFd<'_> {
(**self).as_fd()
}
}
#[cfg(not(portable_atomic_no_io_safety))]
#[cfg(windows)]
impl<T: ?Sized + std::os::windows::io::AsHandle> std::os::windows::io::AsHandle for Arc<T> {
#[inline]
fn as_handle(&self) -> std::os::windows::io::BorrowedHandle<'_> {
(**self).as_handle()
}
}
#[cfg(not(portable_atomic_no_io_safety))]
#[cfg(windows)]
impl<T: std::os::windows::io::AsSocket> std::os::windows::io::AsSocket for Arc<T> {
#[inline]
fn as_socket(&self) -> std::os::windows::io::BorrowedSocket<'_> {
(**self).as_socket()
}
}
impl io::Read for Arc<std::fs::File> {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
(&**self).read(buf)
}
#[cfg(not(portable_atomic_no_io_vec))]
fn read_vectored(&mut self, bufs: &mut [io::IoSliceMut<'_>]) -> io::Result<usize> {
(&**self).read_vectored(bufs)
}
fn read_to_end(&mut self, buf: &mut alloc::vec::Vec<u8>) -> io::Result<usize> {
(&**self).read_to_end(buf)
}
fn read_to_string(&mut self, buf: &mut alloc::string::String) -> io::Result<usize> {
(&**self).read_to_string(buf)
}
}
impl io::Write for Arc<std::fs::File> {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
(&**self).write(buf)
}
#[cfg(not(portable_atomic_no_io_vec))]
fn write_vectored(&mut self, bufs: &[io::IoSlice<'_>]) -> io::Result<usize> {
(&**self).write_vectored(bufs)
}
#[inline]
fn flush(&mut self) -> io::Result<()> {
(&**self).flush()
}
}
impl io::Seek for Arc<std::fs::File> {
fn seek(&mut self, pos: io::SeekFrom) -> io::Result<u64> {
(&**self).seek(pos)
}
}
}
use self::clone::CloneToUninit;
mod clone {
use core::ptr;
#[cfg(not(portable_atomic_no_maybe_uninit))]
use core::{
mem::{self, MaybeUninit},
slice,
};
#[cfg(not(portable_atomic_no_maybe_uninit))]
use super::strict;
#[doc(hidden)] #[allow(unknown_lints, unnameable_types)] pub unsafe trait CloneToUninit {
unsafe fn clone_to_uninit(&self, dest: *mut u8);
}
unsafe impl<T: Clone> CloneToUninit for T {
#[inline]
unsafe fn clone_to_uninit(&self, dest: *mut u8) {
unsafe { clone_one(self, dest as *mut T) }
}
}
#[cfg(not(portable_atomic_no_maybe_uninit))]
unsafe impl<T: Clone> CloneToUninit for [T] {
#[inline]
#[cfg_attr(all(debug_assertions, not(portable_atomic_no_track_caller)), track_caller)]
unsafe fn clone_to_uninit(&self, dest: *mut u8) {
let dest: *mut [T] = strict::with_metadata_of(dest, self);
unsafe { clone_slice(self, dest) }
}
}
#[cfg(not(portable_atomic_no_maybe_uninit))]
unsafe impl CloneToUninit for str {
#[inline]
#[cfg_attr(all(debug_assertions, not(portable_atomic_no_track_caller)), track_caller)]
unsafe fn clone_to_uninit(&self, dest: *mut u8) {
unsafe { self.as_bytes().clone_to_uninit(dest) }
}
}
#[inline]
unsafe fn clone_one<T: Clone>(src: &T, dst: *mut T) {
unsafe {
ptr::write(dst, src.clone());
}
}
#[cfg(not(portable_atomic_no_maybe_uninit))]
#[inline]
#[cfg_attr(all(debug_assertions, not(portable_atomic_no_track_caller)), track_caller)]
unsafe fn clone_slice<T: Clone>(src: &[T], dst: *mut [T]) {
let len = src.len();
let uninit_ref = unsafe { &mut *(dst as *mut [MaybeUninit<T>]) };
debug_assert_eq!(
len,
uninit_ref.len(), "clone_to_uninit() source and destination must have equal lengths",
);
let mut initializing = InitializingSlice::from_fully_uninit(uninit_ref);
for element_ref in src {
initializing.push(element_ref.clone());
}
mem::forget(initializing);
}
#[cfg(not(portable_atomic_no_maybe_uninit))]
struct InitializingSlice<'a, T> {
data: &'a mut [MaybeUninit<T>],
initialized_len: usize,
}
#[cfg(not(portable_atomic_no_maybe_uninit))]
impl<'a, T> InitializingSlice<'a, T> {
#[inline]
fn from_fully_uninit(data: &'a mut [MaybeUninit<T>]) -> Self {
Self { data, initialized_len: 0 }
}
#[inline]
fn push(&mut self, value: T) {
self.data[self.initialized_len] = MaybeUninit::new(value);
self.initialized_len += 1;
}
}
#[cfg(not(portable_atomic_no_maybe_uninit))]
impl<T> Drop for InitializingSlice<'_, T> {
#[cold] fn drop(&mut self) {
let initialized_slice = unsafe {
slice::from_raw_parts_mut(self.data.as_mut_ptr() as *mut T, self.initialized_len)
};
unsafe {
ptr::drop_in_place::<[T]>(initialized_slice);
}
}
}
}
mod layout {
#[cfg(not(portable_atomic_no_maybe_uninit))]
use core::isize;
use core::{alloc::Layout, cmp, usize};
#[inline]
#[must_use]
pub(super) fn padding_needed_for(layout: Layout, align: usize) -> usize {
if !align.is_power_of_two() {
return usize::MAX;
}
let len_rounded_up = size_rounded_up_to_custom_align(layout, align);
len_rounded_up.wrapping_sub(layout.size()) }
#[inline]
fn size_rounded_up_to_custom_align(layout: Layout, align: usize) -> usize {
let align_m1 = align.wrapping_sub(1);
layout.size().wrapping_add(align_m1) & !align_m1
}
#[inline]
#[must_use]
pub(super) fn pad_to_align(layout: Layout) -> Layout {
let new_size = size_rounded_up_to_custom_align(layout, layout.align());
unsafe { Layout::from_size_align_unchecked(new_size, layout.align()) }
}
#[inline]
pub(super) fn extend(layout: Layout, next: Layout) -> Option<(Layout, usize)> {
let new_align = cmp::max(layout.align(), next.align());
let offset = size_rounded_up_to_custom_align(layout, next.align());
let new_size = offset.wrapping_add(next.size());
let layout = Layout::from_size_align(new_size, new_align).ok()?;
Some((layout, offset))
}
#[cfg(not(portable_atomic_no_maybe_uninit))]
#[inline]
pub(super) fn array<T>(n: usize) -> Option<Layout> {
#[inline(always)]
const fn max_size_for_align(align: usize) -> usize {
(isize::MAX as usize + 1).wrapping_sub(align)
}
#[inline]
fn inner(element_layout: Layout, n: usize) -> Option<Layout> {
let element_size = element_layout.size();
let align = element_layout.align();
if element_size != 0 && n > max_size_for_align(align) / element_size {
return None;
}
let array_size = element_size.wrapping_mul(n);
unsafe { Some(Layout::from_size_align_unchecked(array_size, align)) }
}
inner(Layout::new::<T>(), n)
}
}
#[cfg(feature = "std")]
use std::process::abort;
#[cfg(not(feature = "std"))]
#[cold]
fn abort() -> ! {
struct Abort;
impl Drop for Abort {
fn drop(&mut self) {
panic!();
}
}
let _abort = Abort;
panic!("abort")
}
fn is_dangling<T: ?Sized>(ptr: *const T) -> bool {
(ptr as *const ()).addr() == usize::MAX
}
struct Global;
#[allow(clippy::unused_self)]
impl Global {
#[inline]
#[cfg_attr(miri, track_caller)] fn alloc_impl(&self, layout: Layout, zeroed: bool) -> Option<NonNull<u8>> {
#[inline]
#[must_use]
fn dangling(layout: Layout) -> NonNull<u8> {
unsafe { NonNull::new_unchecked(strict::without_provenance_mut::<u8>(layout.align())) }
}
match layout.size() {
0 => Some(dangling(layout)),
_size => unsafe {
let raw_ptr = if zeroed {
alloc::alloc::alloc_zeroed(layout)
} else {
alloc::alloc::alloc(layout)
};
NonNull::new(raw_ptr)
},
}
}
#[inline]
#[cfg_attr(miri, track_caller)] fn allocate(self, layout: Layout) -> Option<NonNull<u8>> {
self.alloc_impl(layout, false)
}
#[cfg(not(portable_atomic_no_maybe_uninit))]
#[inline]
#[cfg_attr(miri, track_caller)] fn allocate_zeroed(self, layout: Layout) -> Option<NonNull<u8>> {
self.alloc_impl(layout, true)
}
#[inline]
#[cfg_attr(miri, track_caller)] unsafe fn deallocate(self, ptr: NonNull<u8>, layout: Layout) {
if layout.size() != 0 {
unsafe { alloc::alloc::dealloc(ptr.as_ptr(), layout) }
}
}
}