#![no_std]
#![cfg_attr(feature = "nightly", feature(ptr_metadata))]
#![deny(warnings)]
#![forbid(clippy::undocumented_unsafe_blocks)]
#![deny(missing_docs)]
#[cfg(feature = "std")]
extern crate std;
extern crate alloc;
use alloc::boxed::Box;
use core::alloc::Layout;
use crate::deferred_panics_helper::DropHandler;
use crate::deferred_panics_helper::IDropHandler;
use core::cell::UnsafeCell;
use core::fmt::{Debug, Formatter};
use core::marker::PhantomData;
#[allow(unused)]
use core::mem::align_of_val;
use core::mem::size_of;
#[allow(unused)]
use core::mem::size_of_val;
use core::mem::{transmute, ManuallyDrop};
use core::panic::UnwindSafe;
use core::ptr::{addr_of_mut, null, null_mut, NonNull};
use core::sync::atomic::Ordering;
mod deferred_panics_helper;
#[cfg(all(not(loom), not(feature = "shuttle")))]
mod atomic {
pub use core::sync::atomic::{fence, AtomicPtr, AtomicUsize, Ordering};
#[allow(unused)]
#[cfg(feature = "std")]
pub use std::sync::{Arc, Mutex};
#[allow(unused)]
#[cfg(feature = "std")]
pub use std::thread;
}
#[cfg(feature = "shuttle")]
mod atomic {
#[allow(unused)]
pub use shuttle::sync::atomic::{fence, AtomicPtr, AtomicUsize, Ordering};
#[allow(unused)]
pub use shuttle::sync::{Arc, Mutex};
#[allow(unused)]
pub use shuttle::thread;
}
#[cfg(loom)]
mod atomic {
pub use loom::sync::atomic::{fence, AtomicPtr, AtomicUsize, Ordering};
#[allow(unused)]
pub use loom::sync::{Arc, Mutex};
#[allow(unused)]
pub use loom::thread;
}
#[doc(hidden)]
#[cfg(all(feature = "std", feature = "debug", not(loom)))]
#[macro_export]
macro_rules! debug_println {
($($x:tt)*) => {
std::println!("{:?}: {}", $crate::atomic::thread::current().id(), std::format!($($x)*))
}
}
#[doc(hidden)]
#[cfg(all(feature = "std", feature = "debug", loom))]
#[macro_export]
macro_rules! debug_println {
($($x:tt)*) => { std::println!($($x)*) }
}
#[doc(hidden)]
#[cfg(any(not(feature = "std"), not(feature = "debug")))]
#[macro_export]
macro_rules! debug_println {
($($x:tt)*) => {{}};
}
pub struct ArcShift<T: ?Sized> {
item: NonNull<ItemHolderDummy<T>>,
pd: PhantomData<*mut T>,
}
impl<T> UnwindSafe for ArcShift<T> {}
impl<T: ?Sized> Debug for ArcShift<T>
where
T: Debug,
{
fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result {
write!(f, "ArcShift({:?})", self.shared_non_reloading_get())
}
}
impl<T: Default> Default for ArcShift<T> {
fn default() -> Self {
ArcShift::new(Default::default())
}
}
impl<T: ?Sized> Debug for ArcShiftWeak<T>
where
T: Debug,
{
fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result {
write!(f, "ArcShiftWeak(..)")
}
}
pub struct ArcShiftWeak<T: ?Sized> {
item: NonNull<ItemHolderDummy<T>>,
}
pub mod cell;
#[inline(always)]
const fn is_sized<T: ?Sized>() -> bool {
size_of::<&T>() == size_of::<&()>()
}
unsafe impl<T: Sync + Send + ?Sized> Sync for ArcShift<T> {}
unsafe impl<T: Sync + Send + ?Sized> Send for ArcShift<T> {}
unsafe impl<T: Sync + Send + ?Sized> Sync for ArcShiftWeak<T> {}
unsafe impl<T: Sync + Send + ?Sized> Send for ArcShiftWeak<T> {}
#[repr(transparent)]
struct UnsizedMetadata<T: ?Sized> {
#[cfg(feature = "nightly")]
meta: <T as std::ptr::Pointee>::Metadata,
#[cfg(not(feature = "nightly"))]
meta: *const (),
phantom: PhantomData<T>,
}
impl<T: ?Sized> Clone for UnsizedMetadata<T> {
fn clone(&self) -> Self {
*self
}
}
impl<T: ?Sized> Copy for UnsizedMetadata<T> {}
impl<T: ?Sized> core::fmt::Debug for UnsizedMetadata<T> {
fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result {
write!(f, "Metadata({:?})", self.meta)
}
}
#[allow(dead_code)]
#[repr(C)]
struct FatPtr<T: ?Sized> {
ptr: *mut u8,
meta: UnsizedMetadata<T>,
}
#[inline]
fn arc_from_raw_parts_mut<T: ?Sized, M: IMetadata>(
data_ptr: *mut u8,
metadata: UnsizedMetadata<T>,
) -> *mut ItemHolder<T, M> {
#[cfg(not(feature = "nightly"))]
unsafe {
core::mem::transmute_copy(&FatPtr {
ptr: data_ptr,
meta: metadata,
})
}
#[cfg(feature = "nightly")]
{
std::ptr::from_raw_parts_mut(data_ptr, metadata.meta)
}
}
#[inline]
#[cfg(not(any(feature = "std", feature = "nostd_unchecked_panics")))]
#[cfg_attr(test, mutants::skip)]
pub(crate) fn ptr_from_raw_parts_mut<T: ?Sized>(
data_ptr: *mut u8,
metadata: UnsizedMetadata<T>,
) -> *mut T {
#[cfg(not(feature = "nightly"))]
unsafe {
core::mem::transmute_copy(&FatPtr {
ptr: data_ptr,
meta: metadata,
})
}
#[cfg(feature = "nightly")]
{
std::ptr::from_raw_parts_mut(data_ptr, metadata.meta)
}
}
impl<T: ?Sized> UnsizedMetadata<T> {
#[inline]
#[cfg(not(feature = "nightly"))]
fn polyfill_metadata(cur_ptr: *const T) -> *const () {
if is_sized::<T>() {
unreachable!() } else {
let ptrptr = &cur_ptr as *const *const T as *const *const ();
unsafe { *ptrptr.wrapping_offset(1) }
}
}
#[inline]
pub fn new(cur_ptr: *const T) -> UnsizedMetadata<T> {
UnsizedMetadata {
#[cfg(feature = "nightly")]
meta: std::ptr::metadata(cur_ptr),
#[cfg(not(feature = "nightly"))]
meta: Self::polyfill_metadata(cur_ptr),
phantom: PhantomData,
}
}
}
fn get_holder_layout<T: ?Sized>(ptr: *const T) -> Layout {
let payload_layout = Layout::for_value(unsafe { &*ptr });
if is_sized::<T>() {
let layout = get_holder_base_layout::<ItemHolder<(), SizedMetadata>>();
let (layout, _) = layout.extend(payload_layout).unwrap();
layout.pad_to_align()
} else {
let layout = Layout::new::<UnsizedMetadata<T>>();
let (layout, _) = layout
.extend(get_holder_base_layout::<ItemHolder<(), UnsizedMetadata<T>>>())
.unwrap();
let (layout, _) = layout.extend(payload_layout).unwrap();
layout.pad_to_align()
}
}
#[inline(always)]
fn to_dummy<T: ?Sized, M: IMetadata>(ptr: *const ItemHolder<T, M>) -> *const ItemHolderDummy<T> {
ptr as *mut ItemHolderDummy<T>
}
#[inline(always)]
fn from_dummy<T: ?Sized, M: IMetadata>(ptr: *const ItemHolderDummy<T>) -> *const ItemHolder<T, M> {
get_full_ptr_raw::<T, M>(ptr)
}
macro_rules! with_holder {
($p: expr, $t: ty, $item: ident, $f:expr) => {
if is_sized::<$t>() {
let $item = from_dummy::<$t, SizedMetadata>($p.as_ptr());
$f
} else {
let $item = from_dummy::<$t, UnsizedMetadata<$t>>($p.as_ptr());
$f
}
};
}
fn make_sized_or_unsized_holder_from_box<T: ?Sized>(
item: Box<T>,
prev: *mut ItemHolderDummy<T>,
) -> *const ItemHolderDummy<T> {
if is_sized::<T>() {
to_dummy(make_sized_holder_from_box(item, prev))
} else {
to_dummy(make_unsized_holder_from_box(item, prev))
}
}
#[allow(clippy::let_and_return)]
fn get_weak_count(count: usize) -> usize {
let count = count & ((1 << (usize::BITS - 2)) - 1);
#[cfg(feature = "validate")]
assert!(count < MAX_REF_COUNT / 2);
count
}
const WEAK_HAVE_NEXT: usize = 1 << (usize::BITS - 1);
const WEAK_HAVE_PREV: usize = 1 << (usize::BITS - 2);
const MAX_REF_COUNT: usize = usize::MAX / 8;
fn get_weak_prev(count: usize) -> bool {
(count & WEAK_HAVE_PREV) != 0
}
#[allow(unused)]
fn get_weak_next(count: usize) -> bool {
(count & WEAK_HAVE_NEXT) != 0
}
#[cfg_attr(test, mutants::skip)]
fn initial_weak_count<T: ?Sized>(prev: *const ItemHolderDummy<T>) -> usize {
if prev.is_null() {
1
} else {
1 | WEAK_HAVE_PREV
}
}
#[allow(unused)]
#[cfg(all(feature = "std", feature = "debug"))]
#[cfg_attr(test, mutants::skip)]
fn format_weak(weak: usize) -> std::string::String {
let count = weak & ((1 << (usize::BITS - 2)) - 1);
let have_next = (weak & WEAK_HAVE_NEXT) != 0;
let have_prev = (weak & WEAK_HAVE_PREV) != 0;
std::format!("(weak: {count} prev: {have_prev} next: {have_next})")
}
#[allow(unused)]
fn make_unsized_holder_from_box<T: ?Sized>(
item: Box<T>,
prev: *mut ItemHolderDummy<T>,
) -> *const ItemHolder<T, UnsizedMetadata<T>> {
let cur_ptr = Box::into_raw(item);
debug_println!("thesize: {:?}", cur_ptr);
let item_holder_ptr: *mut ItemHolder<T, UnsizedMetadata<T>>;
let payload_layout = Layout::for_value(unsafe { &*cur_ptr });
let the_size = payload_layout.size();
if is_sized::<T>() {
unreachable!()
} else {
let layout = get_holder_layout::<T>(cur_ptr);
let metadata = UnsizedMetadata::new(cur_ptr);
debug_println!("Layout: {:?}, meta: {:?}", layout, metadata);
item_holder_ptr =
unsafe { arc_from_raw_parts_mut(alloc::alloc::alloc(layout) as *mut _, metadata) };
debug_println!("Sized result ptr: {:?}", item_holder_ptr);
unsafe {
addr_of_mut!((*item_holder_ptr).the_meta).write(metadata);
}
}
unsafe {
(addr_of_mut!((*item_holder_ptr).payload) as *mut u8)
.copy_from(cur_ptr as *mut u8, the_size);
}
unsafe {
addr_of_mut!((*item_holder_ptr).prev).write(atomic::AtomicPtr::new(prev as *mut _));
}
unsafe {
addr_of_mut!((*item_holder_ptr).next).write(atomic::AtomicPtr::default());
}
unsafe {
addr_of_mut!((*item_holder_ptr).strong_count).write(atomic::AtomicUsize::new(1));
}
unsafe {
addr_of_mut!((*item_holder_ptr).weak_count)
.write(atomic::AtomicUsize::new(initial_weak_count(prev)));
}
unsafe {
addr_of_mut!((*item_holder_ptr).advance_count).write(atomic::AtomicUsize::new(0));
}
#[cfg(feature = "validate")]
unsafe {
addr_of_mut!((*item_holder_ptr).magic1)
.write(std::sync::atomic::AtomicUsize::new(0xbeef8111));
}
#[cfg(feature = "validate")]
unsafe {
addr_of_mut!((*item_holder_ptr).magic2)
.write(std::sync::atomic::AtomicUsize::new(0x12348111));
}
let _t: Box<ManuallyDrop<T>> = unsafe { Box::from_raw(cur_ptr as *mut ManuallyDrop<T>) };
item_holder_ptr
}
#[allow(unused)]
fn make_sized_holder<T>(
payload: T,
prev: *const ItemHolderDummy<T>,
) -> *mut ItemHolder<T, SizedMetadata> {
let holder = ItemHolder {
the_meta: SizedMetadata,
#[cfg(feature = "validate")]
magic1: std::sync::atomic::AtomicUsize::new(0xbeef8111),
next: Default::default(),
prev: atomic::AtomicPtr::new(prev as *mut _),
strong_count: atomic::AtomicUsize::new(1),
weak_count: atomic::AtomicUsize::new(initial_weak_count::<T>(prev)),
advance_count: atomic::AtomicUsize::new(0),
#[cfg(feature = "validate")]
magic2: std::sync::atomic::AtomicUsize::new(0x12348111),
payload: UnsafeCell::new(ManuallyDrop::new(payload)),
};
Box::into_raw(Box::new(holder))
}
#[allow(unused)]
fn make_sized_holder_from_box<T: ?Sized>(
item: Box<T>,
prev: *mut ItemHolderDummy<T>,
) -> *mut ItemHolder<T, SizedMetadata> {
let cur_ptr = Box::into_raw(item);
debug_println!("thesize: {:?}", cur_ptr);
let payload_layout = Layout::for_value(unsafe { &*cur_ptr });
let the_size = payload_layout.size();
let layout = get_holder_layout::<T>(cur_ptr);
let item_holder_ptr: *mut ItemHolder<T, SizedMetadata> =
unsafe { core::mem::transmute_copy(&alloc::alloc::alloc(layout)) };
unsafe {
(addr_of_mut!((*item_holder_ptr).payload) as *mut u8)
.copy_from(cur_ptr as *mut u8, the_size);
}
unsafe {
addr_of_mut!((*item_holder_ptr).prev).write(atomic::AtomicPtr::new(prev as *mut _));
}
unsafe {
addr_of_mut!((*item_holder_ptr).next).write(atomic::AtomicPtr::default());
}
unsafe {
addr_of_mut!((*item_holder_ptr).strong_count).write(atomic::AtomicUsize::new(1));
}
unsafe {
addr_of_mut!((*item_holder_ptr).weak_count)
.write(atomic::AtomicUsize::new(initial_weak_count(prev)));
}
unsafe {
addr_of_mut!((*item_holder_ptr).advance_count).write(atomic::AtomicUsize::new(0));
}
#[cfg(feature = "validate")]
unsafe {
addr_of_mut!((*item_holder_ptr).magic1)
.write(std::sync::atomic::AtomicUsize::new(0xbeef8111));
}
#[cfg(feature = "validate")]
unsafe {
addr_of_mut!((*item_holder_ptr).magic2)
.write(std::sync::atomic::AtomicUsize::new(0x12348111));
}
let _t: Box<ManuallyDrop<T>> = unsafe { Box::from_raw(cur_ptr as *mut ManuallyDrop<T>) };
item_holder_ptr
}
fn get_full_ptr_raw<T: ?Sized, M: IMetadata>(
dummy: *const ItemHolderDummy<T>,
) -> *const ItemHolder<T, M> {
if is_sized::<T>() {
assert_eq!(
size_of::<*const ItemHolder<T, M>>(),
size_of::<*const ItemHolderDummy<T>>()
);
unsafe { core::mem::transmute_copy(&dummy) }
} else {
assert_eq!(size_of::<M>(), size_of::<usize>());
let ptr_data = dummy as *mut _;
debug_println!("Dummy data: {:?}", ptr_data);
let metadata_ptr = dummy as *const UnsizedMetadata<T>;
debug_println!(
"Unsized, meta: {:?}, val: {:?}",
metadata_ptr,
unsafe { *metadata_ptr }
);
let metadata = unsafe { *metadata_ptr };
arc_from_raw_parts_mut(ptr_data, metadata)
}
}
pub(crate) struct SizedMetadata;
trait IMetadata {}
impl IMetadata for SizedMetadata {}
impl<T: ?Sized> IMetadata for UnsizedMetadata<T> {}
#[doc(hidden)]
#[repr(transparent)]
pub struct ItemHolderDummy<T: ?Sized> {
_dummy: u8,
_phantom_data: PhantomData<T>,
}
#[repr(align(8))]
#[repr(C)] struct ItemHolder<T: ?Sized, M: IMetadata> {
the_meta: M,
#[cfg(feature = "validate")]
magic1: std::sync::atomic::AtomicUsize,
advance_count: atomic::AtomicUsize,
prev: atomic::AtomicPtr<ItemHolderDummy<T>>,
strong_count: atomic::AtomicUsize,
weak_count: atomic::AtomicUsize,
#[cfg(feature = "validate")]
magic2: std::sync::atomic::AtomicUsize,
next: atomic::AtomicPtr<ItemHolderDummy<T>>,
pub(crate) payload: UnsafeCell<ManuallyDrop<T>>,
}
const fn get_holder_base_layout<T: Sized>() -> Layout {
if core::mem::size_of::<atomic::AtomicUsize>() % (core::mem::size_of::<usize>()) != 0 {
panic!("platform had unsupported size of atomic usize")
}
if core::mem::size_of::<atomic::AtomicPtr<ItemHolderDummy<T>>>()
% (core::mem::size_of::<usize>())
!= 0
{
panic!("platform had unsupported size of atomic pointers")
}
if core::mem::align_of::<atomic::AtomicUsize>() != core::mem::size_of::<usize>() {
panic!("platform had unsupported size of atomic usize")
}
if core::mem::align_of::<atomic::AtomicPtr<ItemHolderDummy<T>>>()
!= core::mem::size_of::<usize>()
{
panic!("platform had unsupported size of atomic pointers")
}
let base_size = 3 * size_of::<atomic::AtomicUsize>()
+ 2 * size_of::<atomic::AtomicPtr<ItemHolderDummy<T>>>();
#[cfg(not(feature = "validate"))]
const EXTRA_WORDS: usize = 0;
#[cfg(feature = "validate")]
const EXTRA_WORDS: usize = 2;
unsafe {
Layout::from_size_align_unchecked(
EXTRA_WORDS * size_of::<usize>() + base_size,
core::mem::align_of::<usize>(),
)
}
}
impl<T: ?Sized, M: IMetadata> PartialEq for ItemHolder<T, M> {
#[allow(clippy::ptr_eq)] fn eq(&self, other: &ItemHolder<T, M>) -> bool {
self as *const _ as *const u8 == other as *const _ as *const u8
}
}
impl<T: ?Sized, M: IMetadata> ItemHolder<T, M> {
fn set_next(&self, undecorated_new_next: *const ItemHolderDummy<T>) {
#[cfg(feature = "validate")]
assert_eq!(
get_decoration(undecorated_new_next),
ItemStateEnum::UndisturbedUndecorated
);
let mut prior_next = self.next.load(atomic::Ordering::Relaxed);
loop {
let new_next = decorate(undecorated_new_next, get_decoration(prior_next));
match self.next.compare_exchange(
prior_next,
new_next as *mut _,
atomic::Ordering::SeqCst, atomic::Ordering::SeqCst,
) {
Ok(_) => {
debug_println!(
"set {:x?}.next = {:x?}",
to_dummy(self as *const _),
new_next
);
return;
}
Err(err) => {
prior_next = err;
}
}
}
}
#[allow(unused)]
#[cfg(test)]
#[cfg(feature = "std")]
fn debug_all_to_left<'a>(&self) -> std::vec::Vec<&'a ItemHolder<T, M>> {
let mut ret = alloc::vec::Vec::new();
let mut item_ptr = self as *const ItemHolder<T, M>;
loop {
ret.push(unsafe { &*item_ptr });
let prev = from_dummy::<T, M>(unsafe { (*item_ptr).prev.load(Ordering::Relaxed) });
if prev.is_null() {
break;
}
item_ptr = prev;
}
ret
}
#[inline(always)]
unsafe fn payload<'a>(&self) -> &'a T {
unsafe { transmute::<&T, &'a T>(&*(self.payload.get())) }
}
#[cfg(not(any(feature = "std", feature = "nostd_unchecked_panics")))]
unsafe fn take_boxed_payload(&self) -> Box<T> {
let payload = &self.payload;
debug_println!("boxing payload");
let payload_val: &mut T = unsafe { &mut **(&mut *payload.get()) };
let size = size_of_val(payload_val);
let align = align_of_val(payload_val);
let layout = Layout::from_size_align(size, align).unwrap();
let thin_dest_ptr = if size == 0 {
1 as *mut u8
} else {
alloc::alloc::alloc(layout)
};
let fat_dest_ptr = if is_sized::<T>() {
core::ptr::copy_nonoverlapping(payload_val as *mut _ as *mut u8, thin_dest_ptr, size);
core::mem::transmute_copy(&thin_dest_ptr)
} else {
core::ptr::copy_nonoverlapping(payload_val as *mut _ as *mut u8, thin_dest_ptr, size);
let meta = UnsizedMetadata::new(payload_val);
ptr_from_raw_parts_mut(thin_dest_ptr, meta)
};
Box::from_raw(fat_dest_ptr)
}
fn lock_node_for_gc(&self) -> bool {
loop {
let cur_next = self.next.load(atomic::Ordering::SeqCst);
let decoration = get_decoration(cur_next);
debug_println!(
"loaded {:x?}.next, got {:?} (decoration: {:?})",
self as *const ItemHolder<T, M>,
cur_next,
decoration
);
if decoration.is_unlocked() {
let decorated = decorate(undecorate(cur_next), decoration.with_gc());
let success = self
.next
.compare_exchange(
cur_next,
decorated as *mut _,
Ordering::SeqCst, atomic::Ordering::SeqCst,
)
.is_ok();
debug_println!(
"Locking node {:x?}, result: {} (prior decoration: {:?}, new {:?})",
self as *const ItemHolder<T, M>,
success,
decoration,
get_decoration(decorated)
);
if !success {
continue;
}
return true;
} else {
debug_println!(
"Locking node {:x?} failed, already decorated: {:?}",
self as *const ItemHolder<T, M>,
decoration
);
if !decoration.is_disturbed() {
let decorated = decorate(undecorate(cur_next), decoration.with_disturbed());
match self.next.compare_exchange(
cur_next,
decorated as *mut _,
Ordering::SeqCst, atomic::Ordering::SeqCst,
) {
Ok(_) => {
debug_println!("Locking node {:x?} failed, set disturbed flag, new value: {:x?} (={:?})", self as * const ItemHolder<T, M>, decorated, get_decoration(decorated));
}
Err(prior) => {
if !get_decoration(prior).is_disturbed() {
continue;
}
}
}
}
return false;
}
}
}
#[must_use] fn unlock_node(&self) -> bool {
debug_println!("Unlocking node {:x?}", self as *const ItemHolder<T, M>);
loop {
let cur_next = self.next.load(atomic::Ordering::Relaxed);
let decoration = get_decoration(cur_next);
let undecorated = decorate(undecorate(cur_next), decoration.without_gc_and_disturbed());
#[cfg(feature = "validate")]
assert!(
decoration.is_locked(),
"node {:x?} was not actually locked, decoration: {:?}",
self as *const _,
decoration
);
debug_println!("Unlocked dec {:x?}: {:x?}", self as *const Self, decoration);
if self
.next
.compare_exchange(
cur_next,
undecorated as *mut _,
Ordering::SeqCst, Ordering::Relaxed,
)
.is_ok()
{
return decoration.is_disturbed();
}
}
}
}
#[cfg(all(feature = "std", any(loom, feature = "shuttle"), feature = "validate"))]
static MAGIC: std::sync::atomic::AtomicU16 = std::sync::atomic::AtomicU16::new(0);
impl<T: ?Sized, M: IMetadata> ItemHolder<T, M> {
#[cfg_attr(test, mutants::skip)]
#[cfg(feature = "validate")]
fn verify(ptr2: *const ItemHolder<T, M>) {
{
let ptr = ptr2 as *const ItemHolder<(), M>;
let atomic_magic1 = unsafe { &*std::ptr::addr_of!((*ptr).magic1) };
let atomic_magic2 = unsafe { &*std::ptr::addr_of!((*ptr).magic2) };
let magic1 = atomic_magic1.load(Ordering::Relaxed);
let magic2 = atomic_magic2.load(Ordering::Relaxed);
if magic1 >> 16 != 0xbeef {
debug_println!(
"Internal error - bad magic1 in {:?}: {} ({:x})",
ptr,
magic1,
magic1
);
debug_println!("Backtrace: {}", std::backtrace::Backtrace::capture());
std::process::abort();
}
if magic2 >> 16 != 0x1234 {
debug_println!(
"Internal error - bad magic2 in {:?}: {} ({:x})",
ptr,
magic2,
magic2
);
debug_println!("Backtrace: {}", std::backtrace::Backtrace::capture());
std::process::abort();
}
#[cfg(not(any(loom, feature = "shuttle")))]
{
let m1 = magic1 & 0xffff;
let m2 = magic2 & 0xffff;
if m1 != 0x8111 || m2 != 0x8111 {
debug_println!("Internal error - bad magic in {:?} {:x} {:x}", ptr, m1, m2);
}
}
#[cfg(any(loom, feature = "shuttle"))]
{
let diff = (magic1 & 0xffff) as isize - (magic2 & 0xffff) as isize;
if diff != 0 {
debug_println!(
"Internal error - bad magics in {:?}: {} ({:x}) and {} ({:x})",
ptr,
magic1,
magic1,
magic2,
magic2
);
debug_println!("Backtrace: {}", std::backtrace::Backtrace::capture());
panic!();
}
let magic = MAGIC.fetch_add(1, Ordering::Relaxed);
let magic = magic as isize as usize;
atomic_magic1.fetch_and(0xffff_0000, Ordering::Relaxed);
atomic_magic2.fetch_and(0xffff_0000, Ordering::Relaxed);
atomic_magic1.fetch_or(magic, Ordering::Relaxed);
atomic_magic2.fetch_or(magic, Ordering::Relaxed);
}
}
}
}
#[inline]
#[allow(unused)]
#[cfg_attr(test, mutants::skip)]
fn verify_item_impl<T: ?Sized, M: IMetadata>(_ptr: *const ItemHolderDummy<T>) {
#[cfg(feature = "validate")]
{
assert_is_undecorated::<T, M>(_ptr);
let ptr = _ptr;
let x = undecorate(ptr);
if x != ptr {
panic!("Internal error in ArcShift: Pointer given to verify was decorated, it shouldn't have been! {:?}", ptr);
}
if x.is_null() {
return;
}
ItemHolder::<T, M>::verify(from_dummy::<T, M>(x))
}
}
#[inline]
#[cfg_attr(test, mutants::skip)]
fn verify_item<T: ?Sized>(_ptr: *const ItemHolderDummy<T>) {
#[cfg(feature = "validate")]
{
if is_sized::<T>() {
verify_item_impl::<T, SizedMetadata>(_ptr)
} else {
verify_item_impl::<T, UnsizedMetadata<T>>(_ptr)
}
}
}
const ITEM_STATE_LOCKED_FLAG: u8 = 1;
const ITEM_STATE_DROPPED_FLAG: u8 = 2;
const ITEM_STATE_DISTURBED_FLAG: u8 = 4;
#[repr(u8)]
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
#[allow(unused)]
enum ItemStateEnum {
UndisturbedUndecorated = 0,
UndisturbedGcIsActive = 1,
UndisturbedPayloadDropped = 2,
UndisturbedPayloadDroppedAndGcActive = 3,
DisturbedUndecorated = 4,
DisturbedGcIsActive = 5,
DisturbedPayloadDropped = 6,
DisturbedPayloadDroppedAndGcActive = 7,
}
impl ItemStateEnum {
#[allow(unused)]
fn is_locked(self) -> bool {
(self as u8 & ITEM_STATE_LOCKED_FLAG) != 0
}
fn is_disturbed(self) -> bool {
(self as u8 & ITEM_STATE_DISTURBED_FLAG) != 0
}
fn is_unlocked(self) -> bool {
(self as u8 & ITEM_STATE_LOCKED_FLAG) == 0
}
fn dropped(self) -> ItemStateEnum {
unsafe { core::mem::transmute::<u8, ItemStateEnum>(self as u8 | ITEM_STATE_DROPPED_FLAG) }
}
fn with_gc(self) -> ItemStateEnum {
unsafe { core::mem::transmute::<u8, ItemStateEnum>(self as u8 | ITEM_STATE_LOCKED_FLAG) }
}
fn with_disturbed(self) -> ItemStateEnum {
unsafe { core::mem::transmute::<u8, ItemStateEnum>(self as u8 | ITEM_STATE_DISTURBED_FLAG) }
}
fn without_gc_and_disturbed(self) -> ItemStateEnum {
unsafe {
core::mem::transmute::<u8, ItemStateEnum>(
self as u8 & (!ITEM_STATE_LOCKED_FLAG) & (!ITEM_STATE_DISTURBED_FLAG),
)
}
}
fn is_dropped(self) -> bool {
(self as u8 & ITEM_STATE_DROPPED_FLAG) != 0
}
}
fn decorate<T: ?Sized>(
ptr: *const ItemHolderDummy<T>,
e: ItemStateEnum,
) -> *const ItemHolderDummy<T> {
let curdecoration = (ptr as usize) & 7;
((ptr as *const u8).wrapping_offset((e as isize) - (curdecoration as isize)))
as *const ItemHolderDummy<T>
}
fn get_decoration<T: ?Sized>(ptr: *const ItemHolderDummy<T>) -> ItemStateEnum {
if ptr.is_null() {
return ItemStateEnum::UndisturbedUndecorated;
}
let raw = ((ptr as usize) & 7) as u8;
unsafe { core::mem::transmute::<u8, ItemStateEnum>(raw) }
}
#[allow(unused)]
#[cfg_attr(test, mutants::skip)]
#[inline]
fn assert_is_undecorated<T: ?Sized, M: IMetadata>(_ptr: *const ItemHolderDummy<T>) {
#[cfg(feature = "validate")]
{
let raw = _ptr as usize & 7;
if raw != 0 {
panic!("Internal error in ArcShift - unexpected decorated pointer");
}
}
}
#[inline(always)]
fn undecorate<T: ?Sized>(cand: *const ItemHolderDummy<T>) -> *const ItemHolderDummy<T> {
let raw = cand as usize & 7;
(cand as *const u8).wrapping_offset(-(raw as isize)) as *const ItemHolderDummy<T>
}
impl<T: ?Sized> Clone for ArcShift<T> {
fn clone(&self) -> Self {
let t = with_holder!(self.item, T, item, {
let mut drop_jobs = DropHandler::default();
let result = do_clone_strong(item, &mut drop_jobs);
drop_jobs.resume_any_panics();
result
});
ArcShift {
item: unsafe { NonNull::new_unchecked(t as *mut _) },
pd: PhantomData,
}
}
}
impl<T: ?Sized> Clone for ArcShiftWeak<T> {
fn clone(&self) -> Self {
let t = with_holder!(self.item, T, item, do_clone_weak(item));
ArcShiftWeak {
item: unsafe { NonNull::new_unchecked(t as *mut _) },
}
}
}
fn do_clone_strong<T: ?Sized, M: IMetadata>(
item_ptr: *const ItemHolder<T, M>,
drop_job_queue: &mut impl IDropHandler<T, M>,
) -> *const ItemHolderDummy<T> {
let item = unsafe { &*item_ptr };
debug_println!(
"Strong clone, about to access strong count of {:x?}",
item_ptr
);
let strong_count = item.strong_count.fetch_add(1, Ordering::Relaxed);
if strong_count > MAX_REF_COUNT {
item.strong_count.fetch_sub(1, Ordering::Relaxed);
panic!("strong ref count max limit exceeded")
}
debug_println!(
"strong-clone, new strong count: {:x?} is {}",
item_ptr,
strong_count + 1
);
#[cfg(feature = "validate")]
assert!(strong_count > 0);
do_advance_strong::<T, M>(to_dummy::<T, M>(item_ptr), drop_job_queue)
}
fn do_clone_weak<T: ?Sized, M: IMetadata>(
item_ptr: *const ItemHolder<T, M>,
) -> *const ItemHolderDummy<T> {
let item = unsafe { &*item_ptr };
let weak_count = item.weak_count.fetch_add(1, Ordering::Relaxed);
if get_weak_count(weak_count) > MAX_REF_COUNT {
item.weak_count.fetch_sub(1, Ordering::Relaxed);
panic!("weak ref count max limit exceeded")
}
debug_println!(
"==> weak-clone, fetch_add, new weak count: {:x?} is {}",
item_ptr,
format_weak(weak_count + 1)
);
#[cfg(feature = "validate")]
assert!(weak_count > 0);
to_dummy(item_ptr)
}
fn do_upgrade_weak<T: ?Sized, M: IMetadata>(
item_ptr: *const ItemHolder<T, M>,
jobq: &mut DropHandler<T>,
) -> Option<*const ItemHolderDummy<T>> {
debug_println!("executing do_upgrade_weak");
let start_item = unsafe { &*(item_ptr) };
{
let weak_count = start_item.weak_count.fetch_add(1, Ordering::Relaxed);
if get_weak_count(weak_count) > MAX_REF_COUNT {
start_item.weak_count.fetch_sub(1, Ordering::Relaxed);
panic!("weak ref count max limit exceeded")
}
debug_println!(
"==> do_upgrade_weak {:x?} incr weak count to {}",
item_ptr,
format_weak(weak_count + 1)
);
}
let mut item_ptr = to_dummy(item_ptr);
loop {
item_ptr = do_advance_weak::<T, M>(item_ptr);
let item = unsafe { &*from_dummy::<T, M>(item_ptr) };
let prior_strong_count = item.strong_count.load(Ordering::SeqCst); let item_next = item.next.load(Ordering::SeqCst);
if !undecorate(item_next).is_null() {
continue;
}
if !get_decoration(item_next).is_dropped() {
if prior_strong_count == 0 {
let _prior_weak_count = item.weak_count.fetch_add(1, Ordering::SeqCst); debug_println!(
"==> pre-upgrade strong=0 {:x?}, increase weak to {}",
item_ptr,
get_weak_count(_prior_weak_count) + 1
);
}
if item
.strong_count
.compare_exchange(
prior_strong_count,
prior_strong_count + 1,
Ordering::SeqCst, Ordering::Relaxed,
)
.is_ok()
{
let item = unsafe { &*from_dummy::<T, M>(item_ptr) };
let item_next = item.next.load(Ordering::SeqCst); if !undecorate(item_next).is_null() {
let new_prior_strong = item.strong_count.fetch_sub(1, Ordering::SeqCst); if new_prior_strong == 1 {
let _prior_weak_count = item.weak_count.fetch_sub(1, Ordering::SeqCst); #[cfg(feature = "validate")]
assert!(get_weak_count(_prior_weak_count) > 1);
debug_println!(
"==> {:x?} reduced weak to {}",
item_ptr,
get_weak_count(_prior_weak_count - 1)
);
}
continue;
}
let _reduce_weak = item.weak_count.fetch_sub(1, Ordering::SeqCst); debug_println!(
"==> upgrade success, new strong_count: {}, new weak: {}",
prior_strong_count + 1,
format_weak(_reduce_weak - 1)
);
#[cfg(feature = "validate")]
assert!(
get_weak_count(_reduce_weak) > 1,
"assertion failed: in do_upgrade_weak(1), reduced weak {item_ptr:x?} to 0"
);
return Some(item_ptr);
} else {
if prior_strong_count == 0 {
let _prior_weak_count = item.weak_count.fetch_sub(1, Ordering::SeqCst); debug_println!(
"==> pre-upgrade strong=0 race2 {:x?}, decrease weak to {}",
item_ptr,
get_weak_count(_prior_weak_count) - 1
);
#[cfg(feature = "validate")]
assert!(get_weak_count(_prior_weak_count) > 1);
}
debug_println!("Race on strong_count _increase_ - loop.");
continue; }
} else {
do_drop_weak::<T, M>(item_ptr, jobq);
return None;
}
}
}
fn do_advance_impl<T: ?Sized, M: IMetadata>(
mut item_ptr: *const ItemHolderDummy<T>,
mut update: impl FnMut(*const ItemHolder<T, M>, *const ItemHolder<T, M>) -> bool,
) -> *const ItemHolderDummy<T> {
let start_ptr = item_ptr;
verify_item(item_ptr);
debug_println!("advancing from {:x?}", item_ptr);
loop {
debug_println!("In advance-loop, item_ptr: {:x?}", item_ptr);
let item: &ItemHolder<T, M> = unsafe { &*from_dummy(item_ptr as *mut _) };
let next_ptr = undecorate(item.next.load(Ordering::SeqCst)); debug_println!("advancing from {:x?}, next_ptr = {:x?}", item_ptr, next_ptr);
if next_ptr.is_null() {
#[allow(clippy::collapsible_if)]
if item_ptr != start_ptr {
if !update(from_dummy(start_ptr), from_dummy(item_ptr)) {
debug_println!("upgrade failed, probably no payload");
continue;
}
}
return item_ptr;
}
let _advanced = item.advance_count.fetch_add(1, Ordering::SeqCst); debug_println!(
"advance: Increasing {:x?}.advance_count to {}",
item_ptr,
_advanced + 1
);
atomic::fence(Ordering::SeqCst);
let next_ptr = undecorate(item.next.load(Ordering::SeqCst));
#[cfg(feature = "validate")]
assert_ne!(next_ptr, item_ptr);
let next: &ItemHolder<T, M> = unsafe { &*from_dummy(next_ptr) };
debug_println!("advance: Increasing next(={:x?}).weak_count", next_ptr);
let _res = next.weak_count.fetch_add(1, Ordering::SeqCst); debug_println!(
"==> do_advance_impl: increment weak of {:?}, was {}, now {}",
next_ptr,
format_weak(_res),
format_weak(_res + 1),
);
let _advanced = item.advance_count.fetch_sub(1, Ordering::SeqCst); debug_println!(
"advance: Decreasing {:x?}.advance_count to {}",
item_ptr,
_advanced - 1
);
if item_ptr != start_ptr {
debug_println!("do_advance_impl: decrease weak from {:x?}", item_ptr);
let _res = item.weak_count.fetch_sub(1, Ordering::SeqCst); debug_println!(
"==> do_advance_impl: decrease weak from {:x?} - decreased to {}",
item_ptr,
format_weak(_res - 1)
);
#[cfg(feature = "validate")]
assert!(
get_weak_count(_res) > 1,
"non rightmost node {:x?} can't be left with 0 weak count, but weak became {}",
item_ptr,
get_weak_count(_res) - 1
);
}
item_ptr = next_ptr;
}
}
fn do_advance_weak<T: ?Sized, M: IMetadata>(
item_ptr: *const ItemHolderDummy<T>,
) -> *const ItemHolderDummy<T> {
do_advance_impl(
item_ptr,
|a: *const ItemHolder<T, M>, _b: *const ItemHolder<T, M>| {
let _a_weak = unsafe { (*a).weak_count.fetch_sub(1, Ordering::SeqCst) };
debug_println!(
"==> weak advance {:x?}, decremented weak counts to a:{:x?}={}",
a,
a,
format_weak(_a_weak.wrapping_sub(1)),
);
#[cfg(feature = "validate")]
assert!(get_weak_count(_a_weak) > 1);
true
},
)
}
#[inline(never)]
fn do_advance_strong<T: ?Sized, M: IMetadata>(
item_ptr: *const ItemHolderDummy<T>,
drop_job_queue: &mut impl IDropHandler<T, M>,
) -> *const ItemHolderDummy<T> {
do_advance_impl::<_, M>(item_ptr, move |a, b| {
let mut b_strong = unsafe { (*b).strong_count.load(Ordering::SeqCst) };
loop {
debug_println!(
"do_advance_strong {:x?} -> {:x?} (b-count = {})",
a,
b,
b_strong
);
if b_strong == 0 {
let _prior_weak = unsafe { (*b).weak_count.fetch_add(1, Ordering::SeqCst) }; debug_println!(
"==> Prior to strong_count increment {:x?} from 0, added weak count. Weak now: {}",
b,
format_weak(_prior_weak + 1)
);
}
match unsafe {
(*b).strong_count.compare_exchange(
b_strong,
b_strong + 1,
Ordering::SeqCst, atomic::Ordering::SeqCst,
)
} {
Ok(_) => {
debug_println!(
"b-strong increased {:x?}: {} -> {}",
b,
b_strong,
b_strong + 1
);
let next_ptr = unsafe { (*b).next.load(Ordering::SeqCst) }; if !undecorate(next_ptr).is_null() {
let prior_strong =
unsafe { (*b).strong_count.fetch_sub(1, Ordering::SeqCst) }; debug_println!("do_advance_strong:rare race, new _next_ appeared when increasing strong count: {:?} (decr strong back to {}) <|||||||||||||||||||||||||||||||||||||||||||||||||||||||>", b, prior_strong -1);
#[cfg(feature = "validate")]
assert!(
prior_strong > 0,
"strong count {b:x?} must be >0, but was 0"
);
if prior_strong == 1 {
let _b_weak_count =
unsafe { (*b).weak_count.fetch_sub(1, Ordering::SeqCst) }; debug_println!("==> do_advance_strong:rare2 {:x?} reduced strong count back to 0, reduced weak to: {}", b, get_weak_count(_b_weak_count-1));
#[cfg(feature = "validate")]
assert!(get_weak_count(_b_weak_count) > 1);
}
debug_println!("race, other thread added new node");
return false;
}
#[cfg(feature = "validate")]
assert!(!get_decoration(next_ptr).is_dropped());
let _b_weak_count = unsafe { (*b).weak_count.fetch_sub(1, Ordering::SeqCst) }; debug_println!(
"==> strong advance {:x?}, reducing free weak b-count to {:?}",
b,
format_weak(_b_weak_count - 1)
);
#[cfg(feature = "validate")]
assert!(get_weak_count(_b_weak_count) > 1);
let a_strong = unsafe { (*a).strong_count.fetch_sub(1, Ordering::SeqCst) }; #[cfg(feature = "validate")]
assert_ne!(a_strong, 0);
debug_println!(
"a-strong {:x?} decreased {} -> {}",
a,
a_strong,
a_strong - 1,
);
if a_strong == 1 {
do_drop_payload_if_possible(a, false, drop_job_queue);
let _a_weak = unsafe { (*a).weak_count.fetch_sub(1, Ordering::SeqCst) }; debug_println!("==> do_advance_strong:maybe dropping payload of a {:x?} (because strong-count is now 0). Adjusted weak to {}", a, format_weak(_a_weak.wrapping_sub(1)));
#[cfg(feature = "validate")]
assert!(get_weak_count(_a_weak) > 0);
}
return true;
}
Err(err_value) => {
if b_strong == 0 {
let _prior_weak = unsafe { (*b).weak_count.fetch_sub(1, Ordering::SeqCst) }; debug_println!("==> do_advance_strong: race on strong_count for {:x?} (restore-decr weak to {})", b, get_weak_count(_prior_weak - 1));
#[cfg(feature = "validate")]
assert!(get_weak_count(_prior_weak) > 1);
}
b_strong = err_value;
}
}
}
})
}
fn raw_deallocate_node<T: ?Sized, M: IMetadata>(
item_ptr: *const ItemHolder<T, M>,
jobq: &mut impl IDropHandler<T, M>,
) {
verify_item(to_dummy(item_ptr));
raw_do_unconditional_drop_payload_if_not_dropped(item_ptr as *mut ItemHolder<T, M>, jobq);
#[cfg(feature = "validate")]
assert_eq!(
unsafe { (*item_ptr).strong_count.load(Ordering::SeqCst) },
0,
"{item_ptr:x?} strong count must be 0 when deallocating"
);
#[cfg(feature = "validate")]
{
let item_ref = unsafe { &*(item_ptr as *mut ItemHolder<T, M>) };
item_ref.magic1.store(0xdeaddead, Ordering::Relaxed);
item_ref.magic2.store(0xdeaddea2, Ordering::Relaxed);
}
do_dealloc(item_ptr);
}
#[derive(PartialEq)]
enum NodeStrongStatus {
StrongRefsFound,
NoStrongRefsExist,
Indeterminate,
}
fn do_janitor_task<T: ?Sized, M: IMetadata>(
start_ptr: *const ItemHolder<T, M>,
jobq: &mut impl IDropHandler<T, M>,
) -> (bool , NodeStrongStatus) {
verify_item(to_dummy(start_ptr));
let mut have_seen_left_strong_refs = false;
fn get_strong_status(strong: bool, leftmost: bool) -> NodeStrongStatus {
if strong {
NodeStrongStatus::StrongRefsFound
} else if leftmost {
NodeStrongStatus::NoStrongRefsExist
} else {
NodeStrongStatus::Indeterminate
}
}
debug_println!("Janitor task for {:x?}", start_ptr);
let start = unsafe { &*start_ptr };
let start_ptr = to_dummy(start_ptr);
if !start.lock_node_for_gc() {
debug_println!("Janitor task for {:x?} - gc already active!", start_ptr);
return (false, get_strong_status(have_seen_left_strong_refs, false));
}
debug_println!("Loading prev of {:x?}", start_ptr);
let mut cur_ptr: *const _ = start.prev.load(Ordering::SeqCst); debug_println!("Loaded prev of {:x?}, got: {:x?}", start_ptr, cur_ptr);
let rightmost_candidate_ptr = cur_ptr;
if cur_ptr.is_null() {
debug_println!("Janitor task for {:x?} - no earlier nodes exist", start_ptr);
let need_rerun = start.unlock_node();
return (
need_rerun,
get_strong_status(have_seen_left_strong_refs, true),
);
}
let mut rightmost_deletable = null();
{
let mut any_failed_locks = false;
let cur_lock_start = cur_ptr;
let mut cur_ptr = cur_ptr;
debug_println!("Lock sweep start at {:x?}", cur_lock_start);
let mut failed_lock = null();
loop {
let cur: &ItemHolder<T, M> = unsafe { &*from_dummy(cur_ptr) };
if !cur.lock_node_for_gc() {
failed_lock = cur_ptr;
any_failed_locks = true;
debug_println!(
"Janitor task for {:x?} - found node already being gced: {:x?}",
start_ptr,
cur_ptr
);
break;
}
let prev_ptr: *const _ = cur.prev.load(Ordering::SeqCst); debug_println!("Janitor loop considering {:x?} -> {:x?}", cur_ptr, prev_ptr);
if prev_ptr.is_null() {
debug_println!(
"Janitor task for {:x?} - found leftmost node: {:x?}",
start_ptr,
cur_ptr
);
break;
}
cur_ptr = prev_ptr;
}
if any_failed_locks {
let mut cur_ptr = cur_lock_start;
debug_println!("Cleanup sweep start at {:x?}", cur_lock_start);
let mut anyrerun = false;
loop {
debug_println!("Cleanup sweep at {:x?}", cur_ptr);
if cur_ptr.is_null() {
anyrerun = true;
break;
}
if cur_ptr == failed_lock {
debug_println!("Cleanup sweep is at end {:x?}", cur_ptr);
break;
}
let cur: &ItemHolder<T, M> = unsafe { &*from_dummy(cur_ptr) };
let prev_ptr: *const _ = cur.prev.load(Ordering::SeqCst); anyrerun |= cur.unlock_node();
debug_println!("Cleanup sweep going to {:x?}", prev_ptr);
cur_ptr = prev_ptr;
}
anyrerun |= start.unlock_node();
debug_println!("Janitor failed to grab locks. Rerun: {:?}", anyrerun);
return (
anyrerun,
get_strong_status(have_seen_left_strong_refs, false),
);
}
}
loop {
debug_println!("Accessing {:x?} in first janitor loop", cur_ptr);
let cur: &ItemHolder<T, M> = unsafe { &*from_dummy(cur_ptr) };
debug_println!("Setting next of {:x?} to {:x?}", cur_ptr, start_ptr,);
#[cfg(feature = "validate")]
assert_ne!(cur_ptr, start_ptr);
if cur.strong_count.load(Ordering::SeqCst) > 0 {
have_seen_left_strong_refs = true;
}
let prev_ptr: *const _ = cur.prev.load(Ordering::SeqCst); debug_println!("Janitor loop considering {:x?} -> {:x?}", cur_ptr, prev_ptr);
if prev_ptr.is_null() {
debug_println!(
"Janitor task for {:x?} - found leftmost node: {:x?}",
start_ptr,
cur_ptr
);
break;
}
let prev: &ItemHolder<T, M> = unsafe { &*from_dummy(prev_ptr) };
#[cfg(feature = "validate")]
assert_ne!(prev_ptr, start_ptr);
prev.set_next(start_ptr);
atomic::fence(Ordering::SeqCst);
let adv_count = prev.advance_count.load(Ordering::SeqCst); debug_println!("advance_count of {:x?} is {}", prev_ptr, adv_count);
if adv_count > 0 {
debug_println!(
"Janitor task for {:x?} - node {:x?} has advance in progress",
start_ptr,
prev_ptr
);
rightmost_deletable = cur_ptr;
}
cur_ptr = prev_ptr;
}
fn delete_nodes_that_can_be_deleted<T: ?Sized, M: IMetadata>(
mut item_ptr: *const ItemHolderDummy<T>,
jobq: &mut impl IDropHandler<T, M>,
) -> Option<*const ItemHolderDummy<T>> {
let mut deleted_count = 0;
loop {
if item_ptr.is_null() {
debug_println!(
"Find non-deleted {:x?}, count = {}, item_ptr = {:x?}",
item_ptr,
deleted_count,
item_ptr,
);
return (deleted_count > 0).then_some(item_ptr);
}
let item: &ItemHolder<T, M> = unsafe { &*from_dummy(item_ptr as *mut _) };
let item_weak_count = item.weak_count.load(Ordering::SeqCst); if get_weak_count(item_weak_count) > 1 {
debug_println!(
"==> Find non-deleted {:x?}, count = {}, found node with weak count > 1 ({})",
item_ptr,
deleted_count,
format_weak(item_weak_count)
);
return (deleted_count > 0).then_some(item_ptr);
}
debug_println!(
"==> Deallocating node in janitor task: {:x?}, dec: ?, weak count: {}",
item_ptr,
format_weak(item_weak_count),
);
#[cfg(feature = "validate")]
{
let prior_item_weak_count = item.weak_count.load(Ordering::SeqCst);
#[cfg(feature = "validate")]
assert_eq!(
get_weak_count(prior_item_weak_count),
1,
"{item_ptr:x?} weak_count should still be 1, and we decrement it to 0"
);
}
let prev_item_ptr = item.prev.load(Ordering::SeqCst); raw_deallocate_node(from_dummy::<T, M>(item_ptr as *mut _), jobq);
deleted_count += 1;
item_ptr = prev_item_ptr;
}
}
let mut cur_ptr = rightmost_candidate_ptr;
let mut right_ptr = start_ptr;
let mut must_see_before_deletes_can_be_made = rightmost_deletable;
debug_println!("Starting final janitor loop");
let mut need_rerun = false;
let mut unlock_carry: Option<*const ItemHolder<T, M>> = None;
let mut do_unlock = |node: Option<*const ItemHolder<T, M>>| {
if let Some(unlock) = unlock_carry.take() {
need_rerun |= unsafe { (*unlock).unlock_node() };
}
unlock_carry = node;
};
#[cfg(feature = "validate")]
assert!(!cur_ptr.is_null());
while !cur_ptr.is_null() {
let new_predecessor = must_see_before_deletes_can_be_made
.is_null()
.then(|| delete_nodes_that_can_be_deleted::<T, M>(cur_ptr, jobq))
.flatten();
if cur_ptr == must_see_before_deletes_can_be_made {
debug_println!("Found must_see node: {:x?}", cur_ptr);
must_see_before_deletes_can_be_made = null_mut();
}
if let Some(new_predecessor_ptr) = new_predecessor {
let right: &ItemHolder<T, M> = unsafe { &*from_dummy(right_ptr) };
debug_println!(
"After janitor delete, setting {:x?}.prev = {:x?}",
right_ptr,
new_predecessor_ptr
);
if new_predecessor_ptr.is_null() {
right
.weak_count
.fetch_and(!WEAK_HAVE_PREV, Ordering::SeqCst); }
right
.prev
.store(new_predecessor_ptr as *mut _, Ordering::SeqCst);
if new_predecessor_ptr.is_null() {
debug_println!("new_predecessor is null");
break;
}
debug_println!("found new_predecessor: {:x?}", new_predecessor_ptr);
right_ptr = new_predecessor_ptr;
let new_predecessor = unsafe { &*from_dummy::<T, M>(new_predecessor_ptr) };
cur_ptr = new_predecessor.prev.load(Ordering::SeqCst); #[cfg(feature = "validate")]
assert_ne!(
new_predecessor_ptr as *const _,
null(),
"assert failed, {new_predecessor_ptr:?} was endptr"
);
do_unlock(Some(new_predecessor));
debug_println!("New candidate advanced to {:x?}", cur_ptr);
} else {
debug_println!("Advancing to left, no node to delete found {:x?}", cur_ptr);
let cur: &ItemHolder<T, M> = unsafe { &*from_dummy(cur_ptr) };
right_ptr = cur_ptr;
cur_ptr = cur.prev.load(Ordering::SeqCst); debug_println!("Advancing to left, advanced to {:x?}", cur_ptr);
do_unlock(Some(cur));
}
}
do_unlock(None);
debug_println!("Unlock start-node {:x?}", start_ptr);
need_rerun |= start.unlock_node();
debug_println!("start-node unlocked: {:x?}", start_ptr);
(
need_rerun,
get_strong_status(have_seen_left_strong_refs, true),
)
}
fn do_drop_weak<T: ?Sized, M: IMetadata>(
original_item_ptr: *const ItemHolderDummy<T>,
drop_job_queue: &mut impl IDropHandler<T, M>,
) {
debug_println!("drop weak {:x?}", original_item_ptr);
let mut item_ptr = original_item_ptr;
loop {
debug_println!("drop weak loop {:?}", item_ptr);
item_ptr = do_advance_weak::<T, M>(item_ptr);
let (need_rerun, strong_refs) =
do_janitor_task(from_dummy::<T, M>(item_ptr), drop_job_queue);
if need_rerun {
debug_println!("Janitor {:?} was disturbed. Need rerun", item_ptr);
continue;
}
debug_println!("Accessing {:?}", item_ptr);
let item: &ItemHolder<T, M> = unsafe { &*from_dummy(item_ptr) };
let prior_weak = item.weak_count.load(Ordering::SeqCst);
#[cfg(feature = "validate")]
assert!(get_weak_count(prior_weak) > 0);
let next_ptr = item.next.load(Ordering::SeqCst); let have_next = !undecorate(next_ptr).is_null();
if have_next {
debug_println!(
"add race {:x?}, prior_weak: {}",
item_ptr,
format_weak(prior_weak)
);
continue;
}
#[cfg(feature = "debug")]
if get_weak_next(prior_weak) {
debug_println!("raced in drop weak - {:x?} was previously advanced to rightmost, but now it has a 'next' again (next is ?)", item_ptr);
}
debug_println!("No add race, prior_weak: {}", format_weak(prior_weak));
{
let o_item = unsafe { &*from_dummy::<T, M>(item_ptr) };
let o_strong = o_item.strong_count.load(Ordering::SeqCst); if o_strong == 0 {
let can_drop_now_despite_next_check = if strong_refs
== NodeStrongStatus::NoStrongRefsExist
{
debug_println!("final drop analysis {:x?}: Can drop this payload, because no strong refs exists anywhere", item_ptr);
true
} else {
debug_println!("final drop analysis {:x?}: no exemption condition found, can't drop this payload", item_ptr);
false
};
if can_drop_now_despite_next_check {
do_drop_payload_if_possible(
o_item,
can_drop_now_despite_next_check,
drop_job_queue,
);
}
}
}
match item.weak_count.compare_exchange(
prior_weak,
prior_weak - 1,
Ordering::SeqCst, Ordering::SeqCst,
) {
Ok(_) => {
debug_println!(
"==> drop weak {:x?}, did reduce weak to {}",
item_ptr,
format_weak(prior_weak - 1)
);
}
Err(_err_weak) => {
debug_println!(
"drop weak {:x?}, raced on count decrement (was {}, expected {})",
item_ptr,
format_weak(_err_weak),
format_weak(prior_weak)
);
continue;
}
}
debug_println!(
"Prior weak of {:x?} is {}",
item_ptr,
format_weak(prior_weak)
);
if get_weak_count(prior_weak) == 1 {
#[cfg(feature = "validate")]
if get_weak_next(prior_weak) {
std::eprintln!("This case should not be possible, since it implies the 'next' object didn't increase the refcount");
std::process::abort();
}
if !get_weak_prev(prior_weak) {
debug_println!("drop weak {:x?}, prior count = 1, raw deallocate", item_ptr);
drop_job_queue.report_sole_user();
raw_deallocate_node(from_dummy::<T, M>(item_ptr), drop_job_queue);
} else {
debug_println!(
"drop weak {:x?}, couldn't drop node, because there are nodes to the left",
item_ptr
);
}
}
return;
}
}
fn do_update<T: ?Sized, M: IMetadata>(
initial_item_ptr: *const ItemHolder<T, M>,
mut val_dummy_factory: impl FnMut(&ItemHolder<T, M>) -> Option<*const ItemHolderDummy<T>>,
drop_job_queue: &mut impl IDropHandler<T, M>,
) -> *const ItemHolderDummy<T> {
let mut item_ptr = to_dummy::<T, M>(initial_item_ptr);
verify_item(item_ptr);
loop {
item_ptr = do_advance_strong::<T, M>(item_ptr, drop_job_queue);
let item = unsafe { &*from_dummy::<T, M>(item_ptr) };
debug_println!(
"Updating {:x?} , loading next of {:x?}",
initial_item_ptr,
item_ptr
);
let cur_next = item.next.load(Ordering::SeqCst); if !undecorate(cur_next).is_null() {
continue;
}
let Some(val_dummy) = val_dummy_factory(item) else {
return item_ptr;
};
let new_next = decorate(val_dummy, get_decoration(cur_next));
debug_println!("Updating {:x?} to {:x?}", item_ptr, val_dummy);
let val = from_dummy::<T, M>(val_dummy) as *mut ItemHolder<T, M>;
unsafe { (*val).prev = atomic::AtomicPtr::new(item_ptr as *mut _) };
let _weak_was = item.weak_count.fetch_add(1, Ordering::SeqCst);
#[cfg(feature = "validate")]
assert!(_weak_was > 0);
debug_println!(
"do_update: increment weak of {:?} (weak now: {})",
item_ptr,
format_weak(_weak_was + 1)
);
debug_println!("bef item.next.compare_exchange");
match item.next.compare_exchange(
cur_next,
new_next as *mut _,
Ordering::SeqCst, Ordering::SeqCst,
) {
Ok(_) => {
item.weak_count.fetch_or(WEAK_HAVE_NEXT, Ordering::SeqCst); debug_println!("aft1 item.next.compare_exchange");
}
Err(_) => {
debug_println!("aft2 item.next.compare_exchange");
debug_println!("race, update of {:x?} to {:x?} failed", item_ptr, new_next);
let _res = item.weak_count.fetch_sub(1, Ordering::SeqCst); debug_println!(
"==> race, decreasing {:x?} weak to {}",
item_ptr,
format_weak(_res - 1)
);
#[cfg(feature = "validate")]
assert!(get_weak_count(_res) > 1);
continue;
}
}
let strong_count = item.strong_count.fetch_sub(1, Ordering::SeqCst); debug_println!(
"do_update: strong count {:x?} is now decremented to {}",
item_ptr,
strong_count - 1,
);
if strong_count == 1 {
do_drop_payload_if_possible(from_dummy::<T, M>(item_ptr), false, drop_job_queue);
let _weak_count = item.weak_count.fetch_sub(1, Ordering::SeqCst); debug_println!(
"==> do_update: decrement weak of {:?}, new weak: {}",
item_ptr,
format_weak(_weak_count.saturating_sub(1))
);
#[cfg(feature = "validate")]
assert!(get_weak_count(_weak_count) > 1);
}
item_ptr = val_dummy;
if strong_count == 1 {
loop {
let (need_rerun, _strong_refs) =
do_janitor_task(from_dummy::<T, M>(item_ptr), drop_job_queue);
if need_rerun {
item_ptr = do_advance_strong::<T, M>(item_ptr, drop_job_queue);
debug_println!("Janitor {:?} was disturbed. Need rerun", item_ptr);
continue;
}
break;
}
}
return item_ptr;
}
}
fn do_drop_payload_if_possible<T: ?Sized, M: IMetadata>(
item_ptr: *const ItemHolder<T, M>,
override_rightmost_check: bool,
drop_queue: &mut impl IDropHandler<T, M>,
) {
let item = unsafe { &*(item_ptr) };
loop {
let next_ptr = item.next.load(Ordering::Relaxed);
let decoration = get_decoration(next_ptr);
if decoration.is_dropped() || (undecorate(next_ptr).is_null() && !override_rightmost_check)
{
debug_println!(
"Not dropping {:x?} payload, because it's rightmost",
item_ptr
);
return;
}
debug_println!(
"Drop if possible, {:x?}, cur dec: {:?}, dropped dec {:?}",
item_ptr,
decoration,
decoration.dropped()
);
match item.next.compare_exchange(
next_ptr,
decorate(undecorate(next_ptr), decoration.dropped()) as *mut _,
Ordering::SeqCst, Ordering::SeqCst,
) {
Ok(_) => {
break;
}
Err(err_ptr) => {
if !get_decoration(err_ptr).is_dropped() {
debug_println!("Raced, new attempt to drop {:x?} payload", item_ptr);
continue;
}
debug_println!("Raced, {:x?} now already dropped", item_ptr);
return;
}
}
}
debug_println!("Dropping payload {:x?} (dec: ?)", item_ptr,);
drop_queue.do_drop(item_ptr as *mut _);
}
fn raw_do_unconditional_drop_payload_if_not_dropped<T: ?Sized, M: IMetadata>(
item_ptr: *mut ItemHolder<T, M>,
drop_job_queue: &mut impl IDropHandler<T, M>,
) {
debug_println!("Unconditional drop of payload {:x?}", item_ptr);
let item = unsafe { &*(item_ptr) };
let next_ptr = item.next.load(Ordering::SeqCst); let decoration = get_decoration(next_ptr);
if !decoration.is_dropped() {
debug_println!(
"Actual drop of payload {:x?}, it wasn't already dropped",
item_ptr
);
drop_job_queue.do_drop(item_ptr);
}
}
fn do_dealloc<T: ?Sized, M: IMetadata>(item_ptr: *const ItemHolder<T, M>) {
let layout = get_holder_layout(&unsafe { &*item_ptr }.payload);
debug_println!("Calling dealloc {:x?}", item_ptr);
#[cfg(feature = "validate")]
{
let item_ref = unsafe { &*(item_ptr as *mut ItemHolder<T, M>) };
item_ref.magic1.store(0xdeaddead, Ordering::Relaxed);
item_ref.magic2.store(0xdeaddea2, Ordering::Relaxed);
}
unsafe { alloc::alloc::dealloc(item_ptr as *mut _, layout) }
}
fn do_drop_strong<T: ?Sized, M: IMetadata>(
full_item_ptr: *const ItemHolder<T, M>,
drop_job_queue: &mut impl IDropHandler<T, M>,
) {
debug_println!("drop strong of {:x?}", full_item_ptr,);
let mut item_ptr = to_dummy(full_item_ptr);
item_ptr = do_advance_strong::<T, M>(item_ptr, drop_job_queue);
let item: &ItemHolder<T, M> = unsafe { &*from_dummy(item_ptr) };
let prior_strong = item.strong_count.fetch_sub(1, Ordering::SeqCst); debug_println!(
"drop strong of {:x?}, prev count: {}, now {}",
item_ptr,
prior_strong,
prior_strong - 1,
);
if prior_strong == 1 {
do_drop_weak::<T, M>(item_ptr, drop_job_queue);
}
}
impl<T: ?Sized> Drop for ArcShift<T> {
fn drop(&mut self) {
verify_item(self.item.as_ptr());
debug_println!("executing ArcShift::drop({:x?})", self.item.as_ptr());
with_holder!(self.item, T, item, {
let mut jobq = DropHandler::default();
do_drop_strong(item, &mut jobq);
jobq.resume_any_panics();
})
}
}
impl<T: ?Sized> Drop for ArcShiftWeak<T> {
fn drop(&mut self) {
verify_item(self.item.as_ptr());
fn drop_weak_helper<T: ?Sized, M: IMetadata>(item: *const ItemHolder<T, M>) {
let mut jobq = DropHandler::default();
do_drop_weak::<T, M>(to_dummy(item), &mut jobq);
jobq.resume_any_panics();
}
with_holder!(self.item, T, item, drop_weak_helper(item))
}
}
pub struct NoLongerAvailableMarker {
_priv: (),
}
impl<T: ?Sized> ArcShiftWeak<T> {
pub fn upgrade(&self) -> Option<ArcShift<T>> {
let t = with_holder!(self.item, T, item, {
let mut drop_handler = DropHandler::default();
let temp = do_upgrade_weak(item, &mut drop_handler);
drop_handler.resume_any_panics();
temp
});
Some(ArcShift {
item: unsafe { NonNull::new_unchecked(t? as *mut _) },
pd: PhantomData,
})
}
}
impl<T> ArcShift<T> {
pub fn new(val: T) -> ArcShift<T> {
let holder = make_sized_holder(val, null_mut());
ArcShift {
item: unsafe { NonNull::new_unchecked(to_dummy(holder) as *mut _) },
pd: PhantomData,
}
}
pub fn try_into_inner(self) -> Option<T> {
let mut drop_handler =
deferred_panics_helper::stealing_drop::StealingDropHandler::default();
verify_item(self.item.as_ptr());
debug_println!("executing ArcShift::into_inner({:x?})", self.item.as_ptr());
do_drop_strong(
from_dummy::<T, SizedMetadata>(self.item.as_ptr()),
&mut drop_handler,
);
core::mem::forget(self);
drop_handler.take_stolen()
}
pub fn update(&mut self, val: T) {
let holder = make_sized_holder(val, self.item.as_ptr() as *const ItemHolderDummy<T>);
with_holder!(self.item, T, item, {
let mut jobq = DropHandler::default();
let new_item = unsafe {
NonNull::new_unchecked(
do_update(item, |_| Some(holder as *const _), &mut jobq) as *mut _
)
};
jobq.resume_any_panics();
self.item = new_item;
});
}
pub fn rcu(&mut self, mut update: impl FnMut(&T) -> T) -> &T {
self.rcu_maybe(|x| Some(update(x)));
(*self).shared_non_reloading_get()
}
pub fn rcu_maybe(&mut self, mut update: impl FnMut(&T) -> Option<T>) -> bool {
let mut holder: Option<*const ItemHolderDummy<T>> = None;
let mut cancelled = false;
let mut jobq = DropHandler::default();
let new_item_ptr = do_update(
from_dummy::<T, SizedMetadata>(self.item.as_ptr()),
|prev: &ItemHolder<T, SizedMetadata>| -> Option<*const ItemHolderDummy<T>> {
let prev_payload: &ManuallyDrop<T> = unsafe { &*prev.payload.get() };
let prev_payload: &T = prev_payload;
let new_candidate: Option<T> = update(prev_payload);
let Some(new_candidate) = new_candidate else {
if let Some(holder) = holder {
let mut jobq = DropHandler::default();
let full_ptr = from_dummy::<T, SizedMetadata>(holder);
raw_do_unconditional_drop_payload_if_not_dropped(
full_ptr as *mut ItemHolder<T, SizedMetadata>,
&mut jobq,
);
do_dealloc(full_ptr);
jobq.resume_any_panics();
}
cancelled = true;
return None;
};
if let Some(holder) = holder {
let holder_mut: &mut ItemHolder<T, SizedMetadata> =
unsafe { &mut *(from_dummy::<T, SizedMetadata>(holder) as *mut _) };
unsafe {
ManuallyDrop::drop(&mut *holder_mut.payload.get());
}
holder_mut.payload = UnsafeCell::new(ManuallyDrop::new(new_candidate));
Some(holder)
} else {
let new_holder = to_dummy(make_sized_holder(
new_candidate,
self.item.as_ptr() as *const ItemHolderDummy<T>,
));
holder = Some(new_holder);
Some(new_holder)
}
},
&mut jobq,
);
let new_unique_ptr = unsafe { NonNull::new_unchecked(new_item_ptr as *mut _) };
jobq.resume_any_panics();
self.item = new_unique_ptr;
!cancelled
}
}
#[doc(hidden)]
#[deprecated = "ArcShiftLight has been removed. Please consider using ArcShiftWeak. It is similar, though not a direct replacement."]
pub struct ArcShiftLight {}
pub enum SharedGetGuard<'a, T: ?Sized> {
Raw(&'a T),
LightCloned {
#[doc(hidden)]
next: *const ItemHolderDummy<T>,
},
FullClone {
#[doc(hidden)]
cloned: ArcShift<T>,
},
}
impl<T: ?Sized> Drop for SharedGetGuard<'_, T> {
#[inline(always)]
fn drop(&mut self) {
match self {
SharedGetGuard::Raw(_) => {}
SharedGetGuard::FullClone { .. } => {}
SharedGetGuard::LightCloned { next } => {
if is_sized::<T>() {
let item_ptr = from_dummy::<_, SizedMetadata>(*next);
let item = unsafe { &*item_ptr };
let mut dropq = DropHandler::default();
do_drop_strong(item, &mut dropq);
dropq.resume_any_panics();
} else {
let item_ptr = from_dummy::<_, UnsizedMetadata<T>>(*next);
let item = unsafe { &*item_ptr };
let mut dropq = DropHandler::default();
do_drop_strong(item, &mut dropq);
dropq.resume_any_panics();
}
}
}
}
}
impl<T: ?Sized> core::ops::Deref for SharedGetGuard<'_, T> {
type Target = T;
#[inline(always)]
fn deref(&self) -> &Self::Target {
match self {
SharedGetGuard::Raw(r) => r,
SharedGetGuard::LightCloned { next } => {
if is_sized::<T>() {
unsafe { (*from_dummy::<_, SizedMetadata>(*next)).payload() }
} else {
unsafe { (*from_dummy::<_, UnsizedMetadata<T>>(*next)).payload() }
}
}
SharedGetGuard::FullClone { cloned } => cloned.shared_non_reloading_get(),
}
}
}
fn slow_shared_get<T: ?Sized, M: IMetadata>(
item: &ItemHolder<T, M>,
) -> Option<SharedGetGuard<'_, T>> {
debug_println!("slow_shared_get: {:?} (advancing count)", item as *const _);
item.advance_count.fetch_add(1, Ordering::SeqCst);
#[cfg(loom)]
atomic::fence(Ordering::SeqCst);
debug_println!("advanced count");
let next = item.next.load(Ordering::SeqCst);
assert!(!undecorate(next).is_null());
debug_println!("slow_shared_get: {:?}, next = {:?}", item as *const _, next);
let next_val = from_dummy::<_, SizedMetadata>(undecorate(next));
let sc = unsafe { (*next_val).strong_count.load(Ordering::Relaxed) };
if sc == 0 {
debug_println!("slow_shared sc == 0");
item.advance_count.fetch_sub(1, Ordering::SeqCst);
return None;
}
debug_println!("slow_shared sc #1 for {:?}", next_val);
let exc = unsafe {
(*next_val)
.strong_count
.compare_exchange(sc, sc + 1, Ordering::SeqCst, Ordering::SeqCst)
};
debug_println!("slow_shared sc #1.5");
if exc.is_err() {
debug_println!("slow_shared sc race");
debug_println!("slow_shared_get: {:?}, sc == 0", item as *const _);
item.advance_count.fetch_sub(1, Ordering::SeqCst);
return None;
}
debug_println!("slow_shared sc #2");
let next_next = unsafe { (*next_val).next.load(Ordering::SeqCst) };
if !undecorate(next_next).is_null() {
debug_println!("slow_shared_get: {:?}, was dropped", item as *const _);
let mut dropq = DropHandler::default();
do_drop_strong(next_val, &mut dropq);
item.advance_count.fetch_sub(1, Ordering::SeqCst);
dropq.resume_any_panics();
return None;
}
debug_println!("slow_shared sc #3");
item.advance_count.fetch_sub(1, Ordering::SeqCst);
debug_println!(
"slow_shared_get: {:?}, advanced to {:?}",
item as *const _,
next
);
Some(SharedGetGuard::LightCloned {
next: undecorate(next),
})
}
impl<T: ?Sized> ArcShift<T> {
#[doc(hidden)]
#[cfg_attr(test, mutants::skip)]
#[deprecated = "rcu_project was completely removed in ArcShift 0.2. Please use method 'rcu' instead, and just manually access the desired field."]
pub fn rcu_project(&mut self, _marker: NoLongerAvailableMarker) {
unreachable!("method cannot be called")
}
#[doc(hidden)]
#[cfg_attr(test, mutants::skip)]
#[deprecated = "rcu_maybe2 was completely removed in ArcShift 0.2. Please use 'rcu_maybe' instead. It has the same features."]
pub fn rcu_maybe2(&mut self, _marker: NoLongerAvailableMarker) {
unreachable!("method cannot be called")
}
#[doc(hidden)]
#[cfg_attr(test, mutants::skip)]
#[deprecated = "update_shared_box was completely removed in ArcShift 0.2. Shared references can no longer be updated. Please file an issue if this is a blocker!"]
pub fn update_shared_box(&mut self, _marker: NoLongerAvailableMarker) {
unreachable!("method cannot be called")
}
#[doc(hidden)]
#[cfg_attr(test, mutants::skip)]
#[deprecated = "update_shared was completely removed in ArcShift 0.2. Shared references can no longer be updated. Please file an issue if this is a blocker!"]
pub fn update_shared(&mut self, _marker: NoLongerAvailableMarker) {
unreachable!("method cannot be called")
}
#[doc(hidden)]
#[cfg_attr(test, mutants::skip)]
#[deprecated = "make_light was completely removed in ArcShift 0.2. Please use ArcShift::downgrade(&item) instead."]
pub fn make_light(&mut self, _marker: NoLongerAvailableMarker) {
unreachable!("method cannot be called")
}
pub fn from_box(input: Box<T>) -> ArcShift<T> {
let holder = make_sized_or_unsized_holder_from_box(input, null_mut());
ArcShift {
item: unsafe { NonNull::new_unchecked(holder as *mut _) },
pd: PhantomData,
}
}
pub fn try_get_mut(&mut self) -> Option<&mut T> {
self.reload();
with_holder!(self.item, T, item_ptr, {
let item = unsafe { &*item_ptr };
let weak_count = item.weak_count.load(Ordering::SeqCst); if get_weak_count(weak_count) == 1
&& !get_weak_prev(weak_count)
&& item.strong_count.load(Ordering::SeqCst) == 1
{
let weak_count = item.weak_count.load(Ordering::SeqCst); if get_weak_count(weak_count) == 1 {
let temp: &mut ManuallyDrop<T> = unsafe { &mut *item.payload.get() };
Some(&mut **temp)
} else {
None
}
} else {
None
}
})
}
pub fn update_box(&mut self, new_payload: Box<T>) {
let holder = make_sized_or_unsized_holder_from_box(new_payload, self.item.as_ptr());
with_holder!(self.item, T, item, {
let mut jobq = DropHandler::default();
let new_item = unsafe {
NonNull::new_unchecked(do_update(item, |_| Some(holder), &mut jobq) as *mut _)
};
jobq.resume_any_panics();
self.item = new_item;
});
}
#[inline]
pub fn get(&mut self) -> &T {
if is_sized::<T>() {
let ptr = from_dummy::<T, SizedMetadata>(self.item.as_ptr());
let item = unsafe { &*ptr };
let next = item.next.load(Ordering::Relaxed);
if !undecorate(next).is_null() {
return Self::advance_strong_helper2::<SizedMetadata>(&mut self.item, true);
}
unsafe { &*(item.payload.get() as *const T) }
} else {
let ptr = from_dummy::<T, UnsizedMetadata<T>>(self.item.as_ptr());
let item = unsafe { &*ptr };
let next = item.next.load(Ordering::Relaxed);
if !undecorate(next).is_null() {
return Self::advance_strong_helper2::<UnsizedMetadata<T>>(&mut self.item, true);
}
unsafe { item.payload() }
}
}
#[inline(always)]
pub fn shared_get(&self) -> SharedGetGuard<'_, T> {
if is_sized::<T>() {
let ptr = from_dummy::<T, SizedMetadata>(self.item.as_ptr());
let item = unsafe { &*ptr };
let next = item.next.load(Ordering::Relaxed);
if undecorate(next).is_null() {
return SharedGetGuard::Raw(unsafe { item.payload() });
}
slow_shared_get(item).unwrap_or_else(|| SharedGetGuard::FullClone {
cloned: self.clone(),
})
} else {
let ptr = from_dummy::<T, UnsizedMetadata<T>>(self.item.as_ptr());
let item = unsafe { &*ptr };
let next = item.next.load(Ordering::Relaxed);
if undecorate(next).is_null() {
return SharedGetGuard::Raw(unsafe { item.payload() });
}
slow_shared_get(item).unwrap_or_else(|| SharedGetGuard::FullClone {
cloned: self.clone(),
})
}
}
#[inline(always)]
pub fn shared_non_reloading_get(&self) -> &T {
with_holder!(self.item, T, item, {
unsafe { (*item).payload() }
})
}
#[must_use = "this returns a new `ArcShiftWeak` pointer, \
without modifying the original `ArcShift`"]
pub fn downgrade(this: &ArcShift<T>) -> ArcShiftWeak<T> {
let t = with_holder!(this.item, T, item, do_clone_weak(item));
ArcShiftWeak {
item: unsafe { NonNull::new_unchecked(t as *mut _) },
}
}
#[allow(unused)]
pub(crate) fn weak_count(&self) -> usize {
with_holder!(self.item, T, item, {
unsafe { get_weak_count((*item).weak_count.load(Ordering::SeqCst)) }
})
}
#[allow(unused)]
pub(crate) fn strong_count(&self) -> usize {
with_holder!(self.item, T, item, {
unsafe { (*item).strong_count.load(Ordering::SeqCst) }
})
}
fn advance_strong_helper2<M: IMetadata>(
old_item_ptr: &mut NonNull<ItemHolderDummy<T>>,
gc: bool,
) -> &T {
let mut jobq = DropHandler::default();
let mut item_ptr = old_item_ptr.as_ptr() as *const _;
loop {
item_ptr = do_advance_strong::<T, M>(item_ptr, &mut jobq);
if gc {
let (need_rerun, _strong_refs) =
do_janitor_task(from_dummy::<T, M>(item_ptr), &mut jobq);
if need_rerun {
debug_println!("Janitor {:?} was disturbed. Need rerun", item_ptr);
continue;
}
}
jobq.resume_any_panics();
*old_item_ptr = unsafe { NonNull::new_unchecked(item_ptr as *mut _) };
let item = unsafe { &*from_dummy::<T, M>(item_ptr) };
return unsafe { item.payload() };
}
}
#[inline(always)]
pub fn reload(&mut self) {
if is_sized::<T>() {
let ptr = from_dummy::<T, SizedMetadata>(self.item.as_ptr());
let item = unsafe { &*ptr };
let next = item.next.load(Ordering::Relaxed);
if next.is_null() {
return;
}
Self::advance_strong_helper2::<SizedMetadata>(&mut self.item, true);
} else {
let ptr = from_dummy::<T, UnsizedMetadata<T>>(self.item.as_ptr());
let item = unsafe { &*ptr };
let next = item.next.load(Ordering::Relaxed);
if next.is_null() {
return;
}
Self::advance_strong_helper2::<UnsizedMetadata<T>>(&mut self.item, true);
}
}
#[allow(unused)]
#[cfg(test)]
#[cfg(feature = "std")]
pub(crate) unsafe fn debug_validate(
strong_handles: &[&Self],
weak_handles: &[&ArcShiftWeak<T>],
) {
let first = if !strong_handles.is_empty() {
&strong_handles[0].item
} else {
&weak_handles[0].item
};
with_holder!(first, T, item, {
Self::debug_validate_impl(strong_handles, weak_handles, item);
});
}
#[allow(unused)]
#[cfg(test)]
#[cfg(feature = "std")]
fn debug_validate_impl<M: IMetadata>(
strong_handles: &[&ArcShift<T>],
weak_handles: &[&ArcShiftWeak<T>],
prototype: *const ItemHolder<T, M>,
) {
let mut last = prototype;
debug_println!("Start traverse right at {:?}", last);
loop {
debug_println!("loop traverse right at {:?}", last);
let next = unsafe { undecorate((*last).next.load(Ordering::SeqCst)) };
debug_println!("Traversed to {:?}", next);
if next.is_null() {
break;
}
last = from_dummy::<T, M>(next);
}
let mut true_weak_refs =
std::collections::HashMap::<*const ItemHolderDummy<T>, usize>::new();
let mut true_strong_refs =
std::collections::HashMap::<*const ItemHolderDummy<T>, usize>::new();
let all_nodes = unsafe { (*last).debug_all_to_left() };
let strong_refs_exist = all_nodes
.iter()
.any(|x| x.strong_count.load(Ordering::SeqCst) > 0);
if strong_refs_exist {
debug_println!("Reading {:?}.next", to_dummy(last));
let last_next = unsafe { (*last).next.load(Ordering::SeqCst) };
debug_println!("Reading {:?}.next gave {:?}", to_dummy(last), last_next);
assert!(!get_decoration(last_next).is_dropped(), "the rightmost node must never be dropped (at least at rest, but regardless of refcount)");
}
for _node in all_nodes.iter() {
debug_println!("Node: {:?}", *_node as *const ItemHolder<T, _>);
}
for node in all_nodes.iter().copied() {
let next = undecorate(node.next.load(Ordering::SeqCst));
if !next.is_null() {
assert!(all_nodes.contains(unsafe { &&*from_dummy::<T, M>(next) }));
}
let prev: *const _ = node.prev.load(Ordering::SeqCst);
if !prev.is_null() {
*true_weak_refs.entry(prev).or_default() += 1;
assert!(all_nodes.contains(unsafe { &&*from_dummy::<T, M>(prev) }));
}
let advance_count = node.advance_count.load(Ordering::SeqCst);
assert_eq!(advance_count, 0, "advance_count must always be 0 at rest");
}
for handle in weak_handles.iter() {
assert!(all_nodes.contains(unsafe { &&*from_dummy::<T, M>(handle.item.as_ptr()) }));
let true_weak_count = true_weak_refs.entry(handle.item.as_ptr()).or_default();
*true_weak_count += 1;
}
for handle in strong_handles.iter() {
assert!(all_nodes.contains(unsafe { &&*from_dummy::<T, M>(handle.item.as_ptr()) }));
let handle_next: *const _ = unsafe {
(*from_dummy::<T, M>(handle.item.as_ptr()))
.next
.load(Ordering::SeqCst)
};
assert!(
!get_decoration(handle_next).is_dropped(),
"nodes referenced by strong handles must not be dropped, but {:?} was",
handle.item.as_ptr()
);
let true_strong_count = true_strong_refs.entry(handle.item.as_ptr()).or_default();
if *true_strong_count == 0 {
*true_weak_refs.entry(handle.item.as_ptr()).or_default() += 1;
}
*true_strong_count += 1;
}
for node in all_nodes.iter().copied() {
let node_dummy = to_dummy(node);
let strong_count = node.strong_count.load(Ordering::SeqCst);
let weak_count = get_weak_count(node.weak_count.load(Ordering::SeqCst));
let expected_strong_count = true_strong_refs.get(&node_dummy).unwrap_or(&0);
let expected_weak_count = true_weak_refs.get(&node_dummy).unwrap_or(&0);
assert_eq!(
strong_count, *expected_strong_count,
"strong count of {:x?} should be {}",
node as *const _, *expected_strong_count
);
assert_eq!(
weak_count, *expected_weak_count,
"weak count of {:x?} should be {}",
node as *const _, *expected_weak_count
);
let next = node.next.load(Ordering::SeqCst);
if strong_count > 0 {
assert!(
!get_decoration(next).is_dropped(),
"as strong count is >0, node {:x?} should not be dropped",
node as *const _
);
}
}
}
}
#[cfg(test)]
#[cfg(not(any(loom, feature = "shuttle")))]
pub(crate) mod no_std_tests;
#[cfg(all(test, feature = "std"))]
pub(crate) mod tests;