#![cfg_attr(feature = "dyn_unstable", feature(unsize))]
#![cfg_attr(feature = "dyn_unstable", feature(coerce_unsized))]
#![cfg_attr(feature = "dyn_unstable", feature(receiver_trait))]
#![cfg_attr(feature = "dyn_unstable", feature(dispatch_from_dyn))]
#[deny(clippy::all)]
#[cfg(test)]
mod tests;
#[cfg(not(target_has_atomic = "ptr"))]
compile_error!("Cannot use `Trc` on a system without atomics.");
use std::{
alloc::{alloc, Layout},
borrow::Borrow,
cmp,
error::Error,
fmt::{self, Debug, Display, Pointer},
hash::{Hash, Hasher},
mem::{forget, ManuallyDrop, MaybeUninit},
ops::Deref,
panic::UnwindSafe,
pin::Pin,
ptr::{self, addr_of, addr_of_mut, slice_from_raw_parts_mut, write, NonNull},
sync::atomic::{
fence, AtomicUsize,
Ordering::{self, AcqRel, Acquire, Relaxed, Release},
},
};
#[cfg(not(target_os = "windows"))]
use std::os::fd::{AsFd, AsRawFd};
#[cfg(target_os = "windows")]
use std::os::windows::io::{AsHandle, AsRawHandle, AsRawSocket, AsSocket};
#[cfg(feature = "dyn_unstable")]
use std::any::Any;
#[cfg(feature = "dyn_unstable")]
use std::ops;
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
#[cfg(feature = "stable_deref_trait")]
use stable_deref_trait::{CloneStableDeref, StableDeref};
const MAX_REFCOUNT: usize = (isize::MAX) as usize;
#[repr(C)]
struct SharedTrcInternal<T: ?Sized> {
atomicref: AtomicUsize,
weakcount: AtomicUsize,
data: T,
}
pub struct Trc<T: ?Sized> {
shared: NonNull<SharedTrcInternal<T>>,
threadref: NonNull<usize>,
}
pub struct SharedTrc<T: ?Sized> {
data: NonNull<SharedTrcInternal<T>>,
}
pub struct Weak<T: ?Sized> {
data: NonNull<SharedTrcInternal<T>>,
}
impl<T: ?Sized> SharedTrc<T> {
#[inline]
#[must_use]
pub fn from_trc(trc: &Trc<T>) -> Self {
let prev = sum_value(&unsafe { trc.shared.as_ref() }.atomicref, 1, Acquire);
assert!(
prev <= MAX_REFCOUNT,
"Overflow of maximum atomic reference count."
);
Self { data: trc.shared }
}
#[must_use]
pub fn to_trc(this: Self) -> Trc<T> {
let tbx = Box::new(1);
let res = Trc {
threadref: NonNull::from(Box::leak(tbx)),
shared: this.data,
};
forget(this);
res
}
#[inline]
#[must_use]
pub fn atomic_count(this: &Self) -> usize {
return unsafe { this.data.as_ref() }.atomicref.load(Relaxed);
}
}
#[cfg(feature = "dyn_unstable")]
impl SharedTrc<dyn Any + Send + Sync> {
pub fn downcast<T>(self) -> Result<SharedTrc<T>, Self>
where
T: Any + Send + Sync,
{
if (*self).is::<T>() {
let data = self.data.cast::<SharedTrcInternal<T>>();
forget(self);
Ok(SharedTrc { data })
} else {
Err(self)
}
}
}
impl<T: ?Sized> Clone for SharedTrc<T> {
#[inline]
fn clone(&self) -> Self {
let prev = sum_value(&unsafe { self.data.as_ref() }.atomicref, 1, AcqRel);
assert!(
prev <= MAX_REFCOUNT,
"Overflow of maximum atomic reference count."
);
Self { data: self.data }
}
}
impl<T: ?Sized> Drop for SharedTrc<T> {
#[inline]
fn drop(&mut self) {
if sub_value(unsafe { &(*self.data.as_ptr()).atomicref }, 1, Release) != 1 {
return;
}
let weak = unsafe { &(*self.data.as_ptr()).weakcount }.load(Acquire);
if weak == 1 {
fence(Acquire);
unsafe { ptr::drop_in_place(addr_of_mut!((*self.data.as_ptr()).data)) };
Weak { data: self.data };
}
}
}
impl<T: ?Sized> From<SharedTrc<T>> for Trc<T> {
fn from(value: SharedTrc<T>) -> Self {
SharedTrc::to_trc(value)
}
}
impl<T: ?Sized> From<&Trc<T>> for SharedTrc<T> {
fn from(value: &Trc<T>) -> Self {
Self::from_trc(value)
}
}
impl<T: ?Sized> From<Trc<T>> for SharedTrc<T> {
fn from(value: Trc<T>) -> Self {
Self::from_trc(&value)
}
}
impl<T: ?Sized> SharedTrc<T> {
#[inline]
#[must_use]
pub fn weak_count(this: &Self) -> usize {
return unsafe { this.data.as_ref() }.weakcount.load(Relaxed);
}
#[inline]
#[must_use]
pub fn ptr_eq(this: &Self, other: &Self) -> bool {
this.data.as_ptr() == other.data.as_ptr()
}
#[inline]
#[must_use]
pub fn as_ptr(this: &Self) -> *const T {
let sharedptr = NonNull::as_ptr(this.data);
unsafe { addr_of_mut!((*sharedptr).data) }
}
#[must_use]
pub fn into_raw(this: Self) -> *const T {
let ptr = Self::as_ptr(&this);
forget(this);
ptr
}
}
impl<T> SharedTrc<T> {
#[inline]
pub fn new(value: T) -> Self {
let shareddata = SharedTrcInternal {
atomicref: AtomicUsize::new(1),
weakcount: AtomicUsize::new(1),
data: value,
};
let sharedbx = Box::new(shareddata);
return Self {
data: NonNull::from(Box::leak(sharedbx)),
};
}
#[inline]
#[must_use]
pub fn new_uninit() -> SharedTrc<MaybeUninit<T>> {
let shareddata = SharedTrcInternal {
atomicref: AtomicUsize::new(1),
weakcount: AtomicUsize::new(1),
data: MaybeUninit::<T>::uninit(),
};
let sharedbx = Box::new(shareddata);
return SharedTrc {
data: NonNull::from(Box::leak(sharedbx)),
};
}
#[inline]
pub fn new_cyclic<F>(data_fn: F) -> Self
where
F: FnOnce(&Weak<T>) -> T,
{
let shareddata: NonNull<_> = Box::leak(Box::new(SharedTrcInternal {
atomicref: AtomicUsize::new(0),
weakcount: AtomicUsize::new(1),
data: MaybeUninit::<T>::uninit(),
}))
.into();
let init_ptr: NonNull<SharedTrcInternal<T>> = shareddata.cast();
let weak: Weak<T> = Weak { data: init_ptr };
let data = data_fn(&weak);
forget(weak);
unsafe {
let ptr = init_ptr.as_ptr();
ptr::write(ptr::addr_of_mut!((*ptr).data), data);
let prev = sum_value(&init_ptr.as_ref().atomicref, 1, AcqRel);
assert!(
prev <= MAX_REFCOUNT,
"Overflow of maximum atomic reference count."
);
}
Self { data: init_ptr }
}
pub unsafe fn from_raw(ptr: *const T) -> Self {
let layout = Layout::new::<SharedTrcInternal<()>>();
let n = layout.size();
let data_ptr = ptr.cast::<u8>().sub(n) as *mut SharedTrcInternal<T>;
Self {
data: NonNull::new_unchecked(data_ptr),
}
}
pub unsafe fn decrement_local_count(ptr: *const T) {
drop(Self::from_raw(ptr));
}
pub unsafe fn increment_local_count(ptr: *const T) {
let trc = ManuallyDrop::new(Self::from_raw(ptr));
let _: ManuallyDrop<_> = trc.clone();
}
}
impl<T> SharedTrc<[T]> {
#[must_use]
pub fn new_uninit_slice(len: usize) -> SharedTrc<[MaybeUninit<T>]> {
let value_layout = Layout::array::<T>(len).unwrap();
let layout = Layout::new::<SharedTrcInternal<()>>()
.extend(value_layout)
.unwrap()
.0
.pad_to_align();
let res = slice_from_raw_parts_mut(unsafe { alloc(layout) }.cast::<T>(), len)
as *mut SharedTrcInternal<[MaybeUninit<T>]>;
unsafe { write(&mut (*res).atomicref, AtomicUsize::new(1)) };
unsafe { write(&mut (*res).weakcount, AtomicUsize::new(1)) };
let elems = unsafe { addr_of_mut!((*res).data) }.cast::<std::mem::MaybeUninit<T>>();
for i in 0..len {
unsafe {
write(elems.add(i), MaybeUninit::<T>::uninit());
}
}
SharedTrc {
data: unsafe { NonNull::new_unchecked(res) },
}
}
}
impl<T> SharedTrc<MaybeUninit<T>> {
#[must_use]
pub unsafe fn assume_init(self) -> SharedTrc<T> {
SharedTrc {
data: NonNull::new_unchecked(ManuallyDrop::new(self).data.as_ptr().cast()),
}
}
}
impl<T> SharedTrc<[MaybeUninit<T>]> {
#[must_use]
pub unsafe fn assume_init(self) -> SharedTrc<[T]> {
SharedTrc {
data: NonNull::new_unchecked(
ManuallyDrop::new(self).data.as_ptr() as *mut SharedTrcInternal<[T]>
),
}
}
}
impl<T: ?Sized> Deref for SharedTrc<T> {
type Target = T;
#[inline]
fn deref(&self) -> &Self::Target {
return &unsafe { self.data.as_ref() }.data;
}
}
#[inline(always)]
fn sum_value(value: &AtomicUsize, offset: usize, ordering: Ordering) -> usize {
#[cfg(immortals)]
if value.load(Acquire) != usize::MAX {
value.fetch_add(offset, ordering)
} else {
usize::MAX
}
#[cfg(not(immortals))]
return value.fetch_add(offset, ordering);
}
#[inline(always)]
fn sub_value(value: &AtomicUsize, offset: usize, ordering: Ordering) -> usize {
#[cfg(immortals)]
if value.load(Acquire) != usize::MAX {
value.fetch_sub(offset, ordering)
} else {
usize::MAX
}
#[cfg(not(immortals))]
return value.fetch_sub(offset, ordering);
}
impl<T> Trc<T> {
#[inline]
pub fn new(value: T) -> Self {
let shareddata = SharedTrcInternal {
atomicref: AtomicUsize::new(1),
weakcount: AtomicUsize::new(1),
data: value,
};
let sharedbx = Box::new(shareddata);
let threadbx = Box::new(1);
return Self {
threadref: NonNull::from(Box::leak(threadbx)),
shared: NonNull::from(Box::leak(sharedbx)),
};
}
#[inline]
#[must_use]
pub fn new_uninit() -> Trc<MaybeUninit<T>> {
let shareddata = SharedTrcInternal {
atomicref: AtomicUsize::new(1),
weakcount: AtomicUsize::new(1),
data: MaybeUninit::<T>::uninit(),
};
let sharedbx = Box::new(shareddata);
let threadbx = Box::new(1);
return Trc {
threadref: NonNull::from(Box::leak(threadbx)),
shared: NonNull::from(Box::leak(sharedbx)),
};
}
#[inline]
pub fn new_cyclic<F>(data_fn: F) -> Self
where
F: FnOnce(&Weak<T>) -> T,
{
let shareddata: NonNull<_> = Box::leak(Box::new(SharedTrcInternal {
atomicref: AtomicUsize::new(0),
weakcount: AtomicUsize::new(1),
data: MaybeUninit::<T>::uninit(),
}))
.into();
let init_ptr: NonNull<SharedTrcInternal<T>> = shareddata.cast();
let weak: Weak<T> = Weak { data: init_ptr };
let data = data_fn(&weak);
forget(weak);
unsafe {
let ptr = init_ptr.as_ptr();
ptr::write(ptr::addr_of_mut!((*ptr).data), data);
let prev = sum_value(&init_ptr.as_ref().atomicref, 1, AcqRel);
assert!(
prev <= MAX_REFCOUNT,
"Overflow of maximum atomic reference count."
)
};
let tbx = Box::new(1);
return Self {
threadref: NonNull::from(Box::leak(tbx)),
shared: init_ptr,
};
}
#[inline]
pub fn pin(data: T) -> Pin<Self> {
unsafe { Pin::new_unchecked(Self::new(data)) }
}
#[inline]
pub fn try_unwrap(mut this: Self) -> Result<T, Self> {
if unsafe { this.shared.as_ref() }.atomicref.load(Acquire) != 1
|| *unsafe { this.threadref.as_ref() } != 1
{
return Err(this);
}
*unsafe { this.threadref.as_mut() } -= 1;
fence(Acquire);
unsafe {
let elem = ptr::read(&this.shared.as_ref().data);
drop(Box::from_raw(this.threadref.as_ptr()));
drop(Weak { data: this.shared });
forget(this);
Ok(elem)
}
}
#[inline]
#[must_use]
pub fn into_inner(this: Self) -> Option<T> {
let this = ManuallyDrop::new(this);
if sub_value(&unsafe { this.shared.as_ref() }.atomicref, 1, Release) != 1
|| *unsafe { this.threadref.as_ref() } != 1
{
drop(unsafe { Box::from_raw(this.threadref.as_ptr()) });
return None;
}
fence(Acquire);
let elem = unsafe { ptr::read(addr_of_mut!((*this.shared.as_ptr()).data)) };
drop(unsafe { Box::from_raw(this.threadref.as_ptr()) });
drop(Weak { data: this.shared });
Some(elem)
}
}
impl<T> Trc<[T]> {
#[must_use]
pub fn new_uninit_slice(len: usize) -> Trc<[MaybeUninit<T>]> {
let value_layout = Layout::array::<T>(len).unwrap();
let layout = Layout::new::<SharedTrcInternal<()>>()
.extend(value_layout)
.unwrap()
.0
.pad_to_align();
let res = slice_from_raw_parts_mut(unsafe { alloc(layout) }.cast::<T>(), len)
as *mut SharedTrcInternal<[MaybeUninit<T>]>;
unsafe { write(&mut (*res).atomicref, AtomicUsize::new(1)) };
unsafe { write(&mut (*res).weakcount, AtomicUsize::new(1)) };
let elems = unsafe { addr_of_mut!((*res).data) }.cast::<std::mem::MaybeUninit<T>>();
for i in 0..len {
unsafe {
write(elems.add(i), MaybeUninit::<T>::uninit());
}
}
let tbx = Box::new(1);
return Trc {
threadref: NonNull::from(Box::leak(tbx)),
shared: unsafe { NonNull::new_unchecked(res) },
};
}
}
impl<T> Trc<MaybeUninit<T>> {
#[must_use]
pub unsafe fn assume_init(self) -> Trc<T> {
let threadref = self.threadref;
Trc {
shared: NonNull::new_unchecked(ManuallyDrop::new(self).shared.as_ptr().cast()),
threadref,
}
}
}
impl<T> Trc<[MaybeUninit<T>]> {
#[must_use]
pub unsafe fn assume_init(self) -> Trc<[T]> {
let threadref = self.threadref;
Trc {
shared: NonNull::new_unchecked(
ManuallyDrop::new(self).shared.as_ptr() as *mut SharedTrcInternal<[T]>
),
threadref,
}
}
}
impl<T: ?Sized> Trc<T> {
#[inline]
#[must_use]
pub fn local_count(this: &Self) -> usize {
return *unsafe { this.threadref.as_ref() };
}
#[inline]
#[must_use]
pub fn atomic_count(this: &Self) -> usize {
return unsafe { this.shared.as_ref() }.atomicref.load(Relaxed);
}
#[inline]
#[must_use]
pub fn weak_count(this: &Self) -> usize {
return unsafe { this.shared.as_ref() }.weakcount.load(Relaxed);
}
#[inline]
#[must_use]
pub fn ptr_eq(this: &Self, other: &Self) -> bool {
this.shared.as_ptr() == other.shared.as_ptr()
}
#[inline]
#[must_use]
pub fn as_ptr(this: &Self) -> *const T {
let sharedptr = NonNull::as_ptr(this.shared);
unsafe { addr_of_mut!((*sharedptr).data) }
}
#[inline]
pub fn get_mut(this: &mut Self) -> Option<&mut T> {
if unsafe { this.shared.as_ref() }
.weakcount
.compare_exchange(1, usize::MAX, Acquire, Relaxed)
.is_ok()
{
let unique = unsafe { this.shared.as_ref() }.atomicref.load(Acquire) == 1;
unsafe { this.shared.as_ref() }.weakcount.store(1, Release);
if unique && *unsafe { this.threadref.as_ref() } == 1 {
Some(unsafe { &mut (*this.shared.as_ptr()).data })
} else {
None
}
} else {
None
}
}
}
impl<T: Clone> Trc<T> {
#[inline]
#[must_use]
pub fn unwrap_or_clone(this: Self) -> T {
Self::try_unwrap(this).unwrap_or_else(|trc| (*trc).clone())
}
}
#[cfg(feature = "dyn_unstable")]
impl Trc<dyn Any + Send + Sync> {
pub fn downcast<T>(self) -> Result<Trc<T>, Self>
where
T: Any + Send + Sync,
{
if (*self).is::<T>() {
let shared = self.shared.cast::<SharedTrcInternal<T>>();
let threadref = self.threadref;
forget(self);
Ok(Trc { shared, threadref })
} else {
Err(self)
}
}
}
impl<T: ?Sized> Trc<T> {
#[inline]
#[must_use]
pub fn downgrade(trc: &Self) -> Weak<T> {
let prev = sum_value(&unsafe { trc.shared.as_ref() }.weakcount, 1, Acquire);
assert!(
prev <= MAX_REFCOUNT,
"Overflow of maximum weak reference count."
);
Weak { data: trc.shared }
}
}
impl<T: ?Sized> Deref for Trc<T> {
type Target = T;
#[inline]
fn deref(&self) -> &Self::Target {
return &unsafe { self.shared.as_ref() }.data;
}
}
impl<T: ?Sized> Drop for Trc<T> {
#[cfg(immortals)]
#[inline]
fn drop(&mut self) {
if unsafe { self.shared.as_ref() }.atomicref.load(Acquire) != usize::MAX {
*unsafe { self.threadref.as_mut() } -= 1;
if *unsafe { self.threadref.as_ref() } == 0 {
drop(unsafe { Box::from_raw(self.threadref.as_ptr()) });
if sub_value(&unsafe { self.shared.as_ref() }.atomicref, 1, Release) != 1 {
return;
}
sync::atomic::fence(Acquire);
unsafe { ptr::drop_in_place(addr_of_mut!((*self.shared.as_ptr()).data)) };
Weak { data: self.shared };
}
}
}
#[cfg(not(immortals))]
#[inline]
fn drop(&mut self) {
*unsafe { self.threadref.as_mut() } -= 1;
if *unsafe { self.threadref.as_ref() } == 0 {
drop(unsafe { Box::from_raw(self.threadref.as_ptr()) });
if sub_value(&unsafe { self.shared.as_ref() }.atomicref, 1, Release) != 1 {
return;
}
fence(Acquire);
unsafe { ptr::drop_in_place(addr_of_mut!((*self.shared.as_ptr()).data)) };
Weak { data: self.shared };
}
}
}
impl<T: ?Sized> Clone for Trc<T> {
#[inline(always)]
fn clone(&self) -> Self {
#[cfg(immortals)]
if value.load(Acquire) == usize::MAX {
return Trc {
shared: self.shared,
threadref: self.threadref,
};
}
unsafe { *self.threadref.as_ptr() += 1 };
assert!(
unsafe { *self.threadref.as_ptr() } <= MAX_REFCOUNT,
"Overflow of maximum atomic reference count."
);
Self {
shared: self.shared,
threadref: self.threadref,
}
}
}
impl<T: ?Sized> AsRef<T> for Trc<T> {
fn as_ref(&self) -> &T {
return Self::deref(self);
}
}
impl<T: ?Sized> AsRef<T> for SharedTrc<T> {
fn as_ref(&self) -> &T {
return Self::deref(self);
}
}
impl<T: ?Sized> Borrow<T> for Trc<T> {
fn borrow(&self) -> &T {
return self.as_ref();
}
}
impl<T: ?Sized> Borrow<T> for SharedTrc<T> {
fn borrow(&self) -> &T {
return self.as_ref();
}
}
impl<T: ?Sized + Default> Default for Trc<T> {
fn default() -> Self {
Self::new(Default::default())
}
}
impl<T: ?Sized + Default> Default for SharedTrc<T> {
fn default() -> Self {
Self::from_trc(&Trc::new(Default::default()))
}
}
impl<T: Display> Display for Trc<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Display::fmt(&*(*self), f)
}
}
impl<T: Display> Display for SharedTrc<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Display::fmt(&*(*self), f)
}
}
impl<T: Debug> Debug for Trc<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Debug::fmt(&*(*self), f)
}
}
impl<T: Debug> Debug for SharedTrc<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Debug::fmt(&*(*self), f)
}
}
impl<T: ?Sized> Pointer for Trc<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Pointer::fmt(&addr_of!(unsafe { self.shared.as_ref() }.data), f)
}
}
impl<T: ?Sized> Pointer for SharedTrc<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Pointer::fmt(&addr_of!(unsafe { self.data.as_ref() }.data), f)
}
}
impl<T> From<T> for Trc<T> {
fn from(value: T) -> Self {
Self::new(value)
}
}
impl<T: Hash> Hash for Trc<T> {
#[inline]
fn hash<H: Hasher>(&self, state: &mut H) {
self.deref().hash(state);
}
}
impl<T: Hash> Hash for SharedTrc<T> {
#[inline]
fn hash<H: Hasher>(&self, state: &mut H) {
self.deref().hash(state);
}
}
impl<T: PartialOrd> PartialOrd for Trc<T> {
#[inline]
fn ge(&self, other: &Self) -> bool {
return self.deref().ge(&**other);
}
#[inline]
fn le(&self, other: &Self) -> bool {
return self.deref().ge(&**other);
}
#[inline]
fn gt(&self, other: &Self) -> bool {
return self.deref().gt(&**other);
}
#[inline]
fn lt(&self, other: &Self) -> bool {
return self.deref().lt(&**other);
}
#[inline]
fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> {
return self.deref().partial_cmp(&**other);
}
}
impl<T: PartialOrd> PartialOrd for SharedTrc<T> {
#[inline]
fn ge(&self, other: &Self) -> bool {
return self.deref().ge(&**other);
}
#[inline]
fn le(&self, other: &Self) -> bool {
return self.deref().ge(&**other);
}
#[inline]
fn gt(&self, other: &Self) -> bool {
return self.deref().gt(&**other);
}
#[inline]
fn lt(&self, other: &Self) -> bool {
return self.deref().lt(&**other);
}
#[inline]
fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> {
return self.deref().partial_cmp(&**other);
}
}
impl<T: Ord> Ord for Trc<T> {
#[inline]
fn cmp(&self, other: &Self) -> cmp::Ordering {
return self.deref().cmp(&**other);
}
}
impl<T: Ord> Ord for SharedTrc<T> {
#[inline]
fn cmp(&self, other: &Self) -> cmp::Ordering {
return self.deref().cmp(&**other);
}
}
impl<T: Eq> Eq for Trc<T> {}
impl<T: Eq> Eq for SharedTrc<T> {}
impl<T: PartialEq> PartialEq for Trc<T> {
#[inline]
fn eq(&self, other: &Self) -> bool {
return self.deref().eq(&**other);
}
#[allow(clippy::partialeq_ne_impl)]
#[inline]
fn ne(&self, other: &Self) -> bool {
return self.deref().ne(&**other);
}
}
impl<T: PartialEq> PartialEq for SharedTrc<T> {
#[inline]
fn eq(&self, other: &Self) -> bool {
return self.deref().eq(&**other);
}
#[allow(clippy::partialeq_ne_impl)]
#[inline]
fn ne(&self, other: &Self) -> bool {
return self.deref().ne(&**other);
}
}
#[cfg(not(target_os = "windows"))]
impl<T: AsFd> AsFd for Trc<T> {
fn as_fd(&self) -> std::os::fd::BorrowedFd<'_> {
return (**self).as_fd();
}
}
#[cfg(not(target_os = "windows"))]
impl<T: AsFd> AsFd for SharedTrc<T> {
fn as_fd(&self) -> std::os::fd::BorrowedFd<'_> {
return (**self).as_fd();
}
}
#[cfg(target_os = "windows")]
impl<T: AsRawHandle> AsRawHandle for Trc<T> {
fn as_raw_handle(&self) -> std::os::windows::io::RawHandle {
(**self).as_raw_handle()
}
}
#[cfg(target_os = "windows")]
impl<T: AsRawHandle> AsRawHandle for SharedTrc<T> {
fn as_raw_handle(&self) -> std::os::windows::io::RawHandle {
(**self).as_raw_handle()
}
}
#[cfg(target_os = "windows")]
impl<T: AsHandle> AsHandle for Trc<T> {
fn as_handle(&self) -> std::os::windows::io::BorrowedHandle<'_> {
(**self).as_handle()
}
}
#[cfg(target_os = "windows")]
impl<T: AsHandle> AsHandle for SharedTrc<T> {
fn as_handle(&self) -> std::os::windows::io::BorrowedHandle<'_> {
(**self).as_handle()
}
}
#[cfg(not(target_os = "windows"))]
impl<T: AsRawFd> AsRawFd for Trc<T> {
fn as_raw_fd(&self) -> std::os::fd::RawFd {
(**self).as_raw_fd()
}
}
#[cfg(not(target_os = "windows"))]
impl<T: AsRawFd> AsRawFd for SharedTrc<T> {
fn as_raw_fd(&self) -> std::os::fd::RawFd {
(**self).as_raw_fd()
}
}
#[cfg(target_os = "windows")]
impl<T: AsRawSocket> AsRawSocket for Trc<T> {
fn as_raw_socket(&self) -> std::os::windows::io::RawSocket {
(**self).as_raw_socket()
}
}
#[cfg(target_os = "windows")]
impl<T: AsRawSocket> AsRawSocket for SharedTrc<T> {
fn as_raw_socket(&self) -> std::os::windows::io::RawSocket {
(**self).as_raw_socket()
}
}
#[cfg(target_os = "windows")]
impl<T: AsSocket> AsSocket for Trc<T> {
fn as_socket(&self) -> std::os::windows::io::BorrowedSocket<'_> {
(**self).as_socket()
}
}
#[cfg(target_os = "windows")]
impl<T: AsSocket> AsSocket for SharedTrc<T> {
fn as_socket(&self) -> std::os::windows::io::BorrowedSocket<'_> {
(**self).as_socket()
}
}
#[allow(deprecated)]
impl<T: Error> Error for Trc<T> {
fn cause(&self) -> Option<&dyn Error> {
return (**self).cause();
}
fn description(&self) -> &str {
return (**self).description();
}
fn source(&self) -> Option<&(dyn Error + 'static)> {
return (**self).source();
}
}
#[allow(deprecated)]
impl<T: Error> Error for SharedTrc<T> {
fn cause(&self) -> Option<&dyn Error> {
return (**self).cause();
}
fn description(&self) -> &str {
return (**self).description();
}
fn source(&self) -> Option<&(dyn Error + 'static)> {
return (**self).source();
}
}
#[cfg(feature = "serde")]
impl<'de, T: Deserialize<'de>> Deserialize<'de> for Trc<T> {
fn deserialize<D>(deserializer: D) -> Result<Trc<T>, D::Error>
where
D: ::serde::de::Deserializer<'de>,
{
T::deserialize(deserializer).map(Trc::new)
}
}
#[cfg(feature = "serde")]
impl<T: Serialize> Serialize for Trc<T> {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: ::serde::ser::Serializer,
{
(**self).serialize(serializer)
}
}
#[cfg(feature = "stable_deref_trait")]
unsafe impl<T: ?Sized> StableDeref for Trc<T> {}
#[cfg(feature = "stable_deref_trait")]
unsafe impl<T: ?Sized> CloneStableDeref for Trc<T> {}
#[cfg(feature = "serde")]
impl<'de, T: Deserialize<'de>> Deserialize<'de> for SharedTrc<T> {
fn deserialize<D>(deserializer: D) -> Result<SharedTrc<T>, D::Error>
where
D: ::serde::de::Deserializer<'de>,
{
T::deserialize(deserializer).map(SharedTrc::new)
}
}
#[cfg(feature = "serde")]
impl<T: Serialize> Serialize for SharedTrc<T> {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: ::serde::ser::Serializer,
{
(**self).serialize(serializer)
}
}
#[cfg(feature = "stable_deref_trait")]
unsafe impl<T: ?Sized> StableDeref for SharedTrc<T> {}
#[cfg(feature = "stable_deref_trait")]
unsafe impl<T: ?Sized> CloneStableDeref for SharedTrc<T> {}
impl<T: ?Sized> Unpin for Trc<T> {}
impl<T: ?Sized> UnwindSafe for Trc<T> {}
impl<T: ?Sized> Unpin for SharedTrc<T> {}
impl<T: ?Sized> UnwindSafe for SharedTrc<T> {}
unsafe impl<T: Sync + Send> Send for SharedTrc<T> {}
unsafe impl<T: Sync + Send> Sync for SharedTrc<T> {}
unsafe impl<T: Sync + Send> Send for Weak<T> {}
unsafe impl<T: Sync + Send> Sync for Weak<T> {}
fn create_from_iterator_exact<T>(
iterator: impl Iterator<Item = T> + ExactSizeIterator,
) -> *mut SharedTrcInternal<[T]> {
let value_layout = Layout::array::<T>(iterator.len()).unwrap();
let layout = Layout::new::<SharedTrcInternal<()>>()
.extend(value_layout)
.unwrap()
.0
.pad_to_align();
let res = slice_from_raw_parts_mut(unsafe { alloc(layout) }.cast::<T>(), iterator.len())
as *mut SharedTrcInternal<[T]>;
unsafe { write(&mut (*res).atomicref, AtomicUsize::new(1)) };
unsafe { write(&mut (*res).weakcount, AtomicUsize::new(1)) };
let elems = unsafe { addr_of_mut!((*res).data) }.cast::<T>();
for (n, i) in iterator.enumerate() {
unsafe {
write(elems.add(n), i);
}
}
res
}
trait TrcFromIter<T> {
fn from_iter(slice: impl Iterator<Item = T> + ExactSizeIterator) -> Self;
}
impl<T: Clone + ?Sized> TrcFromIter<T> for Trc<[T]> {
fn from_iter(slice: impl Iterator<Item = T> + ExactSizeIterator) -> Self {
let shared = create_from_iterator_exact(slice);
let tbx = Box::new(1);
return Self {
threadref: NonNull::from(Box::leak(tbx)),
shared: unsafe { NonNull::new_unchecked(shared) },
};
}
}
impl<T: Clone + ?Sized> From<&[T]> for Trc<[T]> {
fn from(value: &[T]) -> Self {
return <Self as TrcFromIter<T>>::from_iter(value.iter().cloned());
}
}
impl<T: Clone + ?Sized> FromIterator<T> for Trc<[T]> {
fn from_iter<I: IntoIterator<Item = T>>(iter: I) -> Self {
Self::from(&*iter.into_iter().collect::<Vec<_>>())
}
}
#[cfg(feature = "dyn_unstable")]
impl<T: ?Sized + std::marker::Unsize<U>, U: ?Sized> std::ops::CoerceUnsized<Trc<U>> for Trc<T> {}
#[cfg(feature = "dyn_unstable")]
impl<T: ?Sized> std::ops::Receiver for Trc<T> {}
#[cfg(feature = "dyn_unstable")]
impl<T: ?Sized + std::marker::Unsize<U>, U: ?Sized> std::ops::CoerceUnsized<SharedTrc<U>>
for SharedTrc<T>
{
}
#[cfg(feature = "dyn_unstable")]
impl<T: ?Sized> std::ops::Receiver for SharedTrc<T> {}
#[cfg(feature = "dyn_unstable")]
impl<T: ?Sized, U: ?Sized> ops::DispatchFromDyn<SharedTrc<U>> for SharedTrc<T> where
T: std::marker::Unsize<U>
{
}
impl<T: ?Sized> Drop for Weak<T> {
#[inline]
fn drop(&mut self) {
if sub_value(unsafe { &(*self.data.as_ptr()).weakcount }, 1, Release) != 1 {
return;
}
fence(Acquire);
let layout = Layout::for_value(unsafe { &*self.data.as_ptr() });
unsafe {
std::alloc::dealloc(self.data.as_ptr().cast(), layout);
}
}
}
impl<T: ?Sized> Weak<T> {
#[inline]
#[must_use]
pub fn upgrade(&self) -> Option<Trc<T>> {
#[cfg(immortals)]
if value.load(Acquire) == usize::MAX {
let tbx = Box::new(1);
Trc {
threadref: NonNull::from(Box::leak(tbx)),
shared: self.data,
}
}
unsafe { self.data.as_ref() }
.atomicref
.fetch_update(Acquire, Relaxed, |n| {
if n == 0 {
return None;
}
assert!(
n <= MAX_REFCOUNT,
"Overflow of maximum atomic reference count."
);
Some(n + 1)
})
.ok()
.map(|_| {
let tbx = Box::new(1);
return Trc {
threadref: NonNull::from(Box::leak(tbx)),
shared: self.data,
};
})
}
#[inline]
#[must_use]
pub fn as_ptr(this: &Self) -> *const T {
let sharedptr = NonNull::as_ptr(this.data);
unsafe { addr_of_mut!((*sharedptr).data) }
}
#[must_use]
pub fn into_raw(this: Self) -> *const T {
let ptr = Self::as_ptr(&this);
forget(this);
ptr
}
}
impl<T> Weak<T> {
pub unsafe fn from_raw(ptr: *const T) -> Self {
let layout = Layout::new::<SharedTrcInternal<()>>();
let n = layout.size();
let data_ptr = ptr.cast::<u8>().sub(n) as *mut SharedTrcInternal<T>;
Self {
data: NonNull::new_unchecked(data_ptr),
}
}
#[must_use]
pub fn new() -> Weak<MaybeUninit<T>> {
let data = MaybeUninit::<T>::uninit();
let shareddata = SharedTrcInternal {
atomicref: AtomicUsize::new(0),
weakcount: AtomicUsize::new(1),
data,
};
let sbx = Box::new(shareddata);
return Weak {
data: NonNull::from(Box::leak(sbx)),
};
}
#[inline]
#[must_use]
pub fn atomic_count(this: &Self) -> usize {
return unsafe { this.data.as_ref() }.atomicref.load(Relaxed);
}
#[inline]
#[must_use]
pub fn weak_count(this: &Self) -> usize {
return unsafe { this.data.as_ref() }.weakcount.load(Relaxed);
}
}
impl<T: ?Sized> Clone for Weak<T> {
#[inline]
fn clone(&self) -> Self {
let prev = sum_value(&unsafe { self.data.as_ref() }.weakcount, 1, Relaxed);
assert!(
prev <= MAX_REFCOUNT,
"Overflow of maximum weak reference count."
);
Self { data: self.data }
}
}