use crate::retired::{INVPTR, REFC_PROTECT, RetiredNode, rnode_mark};
use crate::slot::{self, ASMRState, EPOCH_FREQ, HR_NUM, RETIRE_FREQ};
use alloc::boxed::Box;
use core::cell::Cell;
use core::marker::PhantomData as marker;
use core::sync::atomic::fence;
use core::sync::atomic::{AtomicUsize, Ordering};
pub struct Guard {
_private: (),
marker: marker<*mut ()>,
}
impl Drop for Guard {
#[inline]
fn drop(&mut self) {
#[cfg(feature = "nightly")]
{
let count = HANDLE.pin_count.get();
HANDLE.pin_count.set(count.saturating_sub(1));
}
#[cfg(not(feature = "nightly"))]
{
let _ = HANDLE.try_with(|handle| {
let count = handle.pin_count.get();
handle.pin_count.set(count.saturating_sub(1));
});
}
}
}
struct Handle {
global: Cell<Option<&'static ASMRState>>,
tid: Cell<Option<usize>>,
pin_count: Cell<usize>,
batch_first: Cell<*mut RetiredNode>,
batch_last: Cell<*mut RetiredNode>,
batch_count: Cell<usize>,
alloc_counter: Cell<usize>,
free_list: Cell<*mut RetiredNode>,
list_count: Cell<usize>,
cached_epoch: Cell<u64>,
in_reclaim: Cell<bool>,
}
impl Handle {
const fn new() -> Self {
Self {
global: Cell::new(None),
tid: Cell::new(None),
pin_count: Cell::new(0),
batch_first: Cell::new(core::ptr::null_mut()),
batch_last: Cell::new(core::ptr::null_mut()),
batch_count: Cell::new(0),
alloc_counter: Cell::new(0),
free_list: Cell::new(core::ptr::null_mut()),
list_count: Cell::new(0),
cached_epoch: Cell::new(0),
in_reclaim: Cell::new(false),
}
}
#[inline]
fn global(&self) -> &'static ASMRState {
match self.global.get() {
Some(g) => g,
None => {
let g = slot::global();
self.global.set(Some(g));
g
}
}
}
#[inline]
fn tid(&self) -> usize {
match self.tid.get() {
Some(tid) => tid,
None => {
let tid = self.global().alloc_tid();
self.tid.set(Some(tid));
#[cfg(feature = "nightly")]
{
HANDLE_CLEANUP_SENTINEL.with(|_| {});
}
tid
}
}
}
#[inline]
fn protect_load(&self, data: &AtomicUsize, order: Ordering) -> usize {
let ptr = data.load(order);
let global = self.global();
let curr_epoch = global.get_epoch();
let prev_epoch = self.cached_epoch.get();
if curr_epoch != prev_epoch {
let tid = self.tid();
let slots = global.thread_slots(tid);
slots.epoch[0].store_lo(curr_epoch, Ordering::SeqCst);
self.cached_epoch.set(curr_epoch);
return data.load(order);
}
ptr
}
#[cold]
fn do_update(&self, curr_epoch: u64, index: usize, tid: usize) -> u64 {
let global = self.global();
let slots = global.thread_slots(tid);
let mut curr_epoch = curr_epoch;
let list_lo = slots.first[index].load_lo();
if list_lo != 0 {
let first = slots.first[index].exchange_lo(0, Ordering::AcqRel);
if first != 0 && first != INVPTR as u64 {
let mut free_list = self.free_list.get();
let mut list_count = self.list_count.get();
self.free_list.set(core::ptr::null_mut());
self.list_count.set(0);
let was_reclaiming = self.in_reclaim.get();
self.in_reclaim.set(true);
unsafe {
crate::reclaim::traverse_cache(
&mut free_list,
&mut list_count,
first as *mut RetiredNode,
);
}
self.in_reclaim.set(was_reclaiming);
self.free_list.set(free_list);
self.list_count.set(list_count);
}
curr_epoch = global.get_epoch();
slots.epoch[index].store_lo(curr_epoch, Ordering::SeqCst);
} else {
slots.epoch[index].store_lo(curr_epoch, Ordering::SeqCst);
}
self.cached_epoch.set(curr_epoch);
curr_epoch
}
fn pin(&self) -> Guard {
let count = self.pin_count.get();
self.pin_count.set(count + 1);
if count > 0 {
return Guard {
_private: (),
marker,
};
}
let tid = self.tid();
let global = self.global();
let index = 0;
let mut prev_epoch = global.thread_slots(tid).epoch[index].load_lo();
let mut attempts = 16usize;
loop {
let curr_epoch = global.get_epoch();
if curr_epoch == prev_epoch {
return Guard {
_private: (),
marker,
};
}
prev_epoch = self.do_update(curr_epoch, index, tid);
attempts -= 1;
if attempts == 0 {
break;
}
}
self.slow_path(index, tid);
Guard {
_private: (),
marker,
}
}
#[cold]
fn slow_path(&self, index: usize, tid: usize) {
let global = self.global();
let slots = global.thread_slots(tid);
self.in_reclaim.set(true);
let mut prev_epoch = slots.epoch[index].load_lo();
global.inc_slow();
slots.state[index].pointer.store(0, Ordering::Release);
slots.state[index].parent.store(0, Ordering::Release);
slots.state[index].epoch.store(0, Ordering::Release);
let seqno = slots.epoch[index].load_hi();
slots.state[index]
.result
.store(INVPTR as u64, seqno, Ordering::Release);
#[allow(unused_assignments)]
let mut first: *mut RetiredNode = core::ptr::null_mut();
loop {
let curr_epoch = global.get_epoch();
if curr_epoch == prev_epoch {
if slots.state[index]
.result
.compare_exchange(INVPTR as u64, seqno, 0, 0)
.is_ok()
{
slots.epoch[index].store_hi(seqno + 2, Ordering::Release);
slots.first[index].store_hi(seqno + 2, Ordering::Release);
global.dec_slow();
self.in_reclaim.set(false);
return;
}
}
let list_lo = slots.first[index].load_lo();
if list_lo != 0 && list_lo != INVPTR as u64 {
let exchanged = slots.first[index].exchange_lo(0, Ordering::AcqRel);
if slots.first[index].load_hi() != seqno {
first = exchanged as *mut RetiredNode;
break; }
if exchanged != INVPTR as u64 {
let mut free_list = self.free_list.get();
let mut list_count = self.list_count.get();
unsafe {
crate::reclaim::traverse_cache(
&mut free_list,
&mut list_count,
exchanged as *mut RetiredNode,
);
}
self.free_list.set(free_list);
self.list_count.set(list_count);
}
let _ = global.get_epoch(); }
first = core::ptr::null_mut();
let _ = slots.epoch[index].compare_exchange(prev_epoch, seqno, curr_epoch, seqno);
prev_epoch = curr_epoch;
let result_ptr = slots.state[index].result.load_lo();
if result_ptr != INVPTR as u64 {
break; }
}
let _ = slots.epoch[index].compare_exchange(prev_epoch, seqno, prev_epoch, seqno + 1);
{
let (mut old_lo, mut old_hi) = slots.first[index].load();
while old_hi == seqno {
match slots.first[index].compare_exchange_weak(old_lo, old_hi, 0, seqno + 1) {
Ok(_) => {
if old_lo != INVPTR as u64 {
first = old_lo as *mut RetiredNode;
}
break;
}
Err((lo, hi)) => {
old_lo = lo;
old_hi = hi;
}
}
}
}
let seqno = seqno + 1;
slots.epoch[index].store_hi(seqno + 1, Ordering::Release);
let result_epoch = slots.state[index].result.load_hi();
slots.epoch[index].store_lo(result_epoch, Ordering::Release);
slots.first[index].store_hi(seqno + 1, Ordering::Release);
let result_ptr = slots.state[index].result.load_lo() & 0xFFFFFFFFFFFFFFFC;
if result_ptr != 0 {
let ptr_node = result_ptr as *mut RetiredNode;
let batch_link = unsafe { (*ptr_node).batch_link.load(Ordering::Acquire) };
if !batch_link.is_null() {
let refs = unsafe { crate::reclaim::get_refs_node(ptr_node) };
unsafe {
(*refs).refs_or_next.fetch_add(1, Ordering::AcqRel);
}
let mut free_list = self.free_list.get();
let mut list_count = self.list_count.get();
if first as u64 != INVPTR as u64 && !first.is_null() {
unsafe {
crate::reclaim::traverse_cache(&mut free_list, &mut list_count, first);
}
}
let rnode = rnode_mark(refs);
let old_first = slots.first[index].exchange_lo(rnode as u64, Ordering::AcqRel);
if old_first != INVPTR as u64 && old_first != 0 {
unsafe {
crate::reclaim::traverse_cache(
&mut free_list,
&mut list_count,
old_first as *mut RetiredNode,
);
}
}
self.free_list.set(free_list);
self.list_count.set(list_count);
global.dec_slow();
self.drain_free_list();
self.in_reclaim.set(false);
return;
} else {
let _ = slots.first[index].compare_exchange(0, seqno, 0, seqno + 1);
}
}
global.dec_slow();
if !first.is_null() && first as u64 != INVPTR as u64 {
let mut free_list = self.free_list.get();
let mut list_count = self.list_count.get();
unsafe {
crate::reclaim::traverse_cache(&mut free_list, &mut list_count, first);
}
self.free_list.set(free_list);
self.list_count.set(list_count);
}
self.drain_free_list();
self.in_reclaim.set(false);
}
#[cold]
fn help_read(&self, mytid: usize) {
let global = self.global();
if global.slow_counter() == 0 {
return;
}
let max_threads = global.max_threads();
let hr_num = global.hr_num();
for i in 0..max_threads {
let slots = global.thread_slots(i);
for j in 0..hr_num {
let result_ptr = slots.state[j].result.load_lo();
if result_ptr == INVPTR as u64 {
self.help_thread(i, j, mytid);
}
}
}
}
#[cold]
fn help_thread(&self, helpee_tid: usize, index: usize, mytid: usize) {
let global = self.global();
let hr_num = global.hr_num();
let (last_result_lo, last_result_hi) =
global.thread_slots(helpee_tid).state[index].result.load();
if last_result_lo != INVPTR as u64 {
return;
}
let birth_epoch = global.thread_slots(helpee_tid).state[index]
.epoch
.load(Ordering::Acquire);
let parent = global.thread_slots(helpee_tid).state[index]
.parent
.load(Ordering::Acquire);
if parent != 0 {
global.thread_slots(mytid).epoch[hr_num].store_lo(birth_epoch, Ordering::SeqCst);
global.thread_slots(mytid).first[hr_num].store_lo(0, Ordering::SeqCst);
}
global.thread_slots(mytid).state[hr_num]
.parent
.store(parent, Ordering::SeqCst);
let _obj = global.thread_slots(helpee_tid).state[index]
.pointer
.load(Ordering::Acquire);
let seqno = global.thread_slots(helpee_tid).epoch[index].load_hi();
if last_result_hi == seqno {
let mut prev_epoch = global.get_epoch();
let mut last_result_lo = last_result_lo;
let mut last_result_hi = last_result_hi;
loop {
prev_epoch = self.do_update(prev_epoch, hr_num + 1, mytid);
let curr_epoch = global.get_epoch();
if curr_epoch == prev_epoch {
if global.thread_slots(helpee_tid).state[index]
.result
.compare_exchange(
last_result_lo,
last_result_hi,
0,
curr_epoch, )
.is_ok()
{
let _ = global.thread_slots(helpee_tid).epoch[index].compare_exchange(
prev_epoch,
seqno,
prev_epoch,
seqno + 1,
);
let (mut old_lo, mut old_hi) =
global.thread_slots(helpee_tid).first[index].load();
while old_hi == seqno {
match global.thread_slots(helpee_tid).first[index]
.compare_exchange_weak(old_lo, old_hi, 0, seqno + 1)
{
Ok(_) => {
if old_lo != INVPTR as u64 && old_lo != 0 {
let mut free_list = self.free_list.get();
let mut list_count = self.list_count.get();
unsafe {
crate::reclaim::traverse_cache(
&mut free_list,
&mut list_count,
old_lo as *mut RetiredNode,
);
}
self.free_list.set(free_list);
self.list_count.set(list_count);
}
break;
}
Err((lo, hi)) => {
old_lo = lo;
old_hi = hi;
}
}
}
let seqno = seqno + 1;
let (mut old_lo, mut old_hi) =
global.thread_slots(helpee_tid).epoch[index].load();
while old_hi == seqno {
match global.thread_slots(helpee_tid).epoch[index]
.compare_exchange_weak(old_lo, old_hi, curr_epoch, seqno + 1)
{
Ok(_) => break,
Err((lo, hi)) => {
old_lo = lo;
old_hi = hi;
}
}
}
let _ = global.thread_slots(helpee_tid).first[index].compare_exchange(
0,
seqno,
0,
seqno + 1,
);
}
break;
}
prev_epoch = curr_epoch;
let (lo, hi) = global.thread_slots(helpee_tid).state[index].result.load();
last_result_lo = lo;
last_result_hi = hi;
if last_result_lo != INVPTR as u64 {
break;
}
}
let epoch_lo =
global.thread_slots(mytid).epoch[hr_num + 1].exchange_lo(0, Ordering::SeqCst);
if epoch_lo != 0 {
let first = global.thread_slots(mytid).first[hr_num + 1]
.exchange_lo(INVPTR as u64, Ordering::AcqRel);
if first != INVPTR as u64 && first != 0 {
let mut free_list = self.free_list.get();
let mut list_count = self.list_count.get();
unsafe {
crate::reclaim::traverse_cache(
&mut free_list,
&mut list_count,
first as *mut RetiredNode,
);
}
self.free_list.set(free_list);
self.list_count.set(list_count);
}
}
}
let old_parent = global.thread_slots(mytid).state[hr_num]
.parent
.swap(0, Ordering::SeqCst);
if old_parent != parent {
let refs = unsafe { crate::reclaim::get_refs_node(parent as *mut RetiredNode) };
let old = unsafe { (*refs).refs_or_next.fetch_sub(1, Ordering::AcqRel) };
if old == 1 {
let mut free_list = self.free_list.get();
unsafe {
(*refs).next.store(free_list, Ordering::Relaxed);
}
free_list = refs;
self.free_list.set(free_list);
}
}
let epoch_lo = global.thread_slots(mytid).epoch[hr_num].exchange_lo(0, Ordering::SeqCst);
if epoch_lo != 0 {
let first = global.thread_slots(mytid).first[hr_num]
.exchange_lo(INVPTR as u64, Ordering::AcqRel);
if first != INVPTR as u64 && first != 0 {
let mut free_list = self.free_list.get();
let mut list_count = self.list_count.get();
unsafe {
crate::reclaim::traverse_cache(
&mut free_list,
&mut list_count,
first as *mut RetiredNode,
);
}
self.free_list.set(free_list);
self.list_count.set(list_count);
}
}
self.drain_free_list();
}
unsafe fn enqueue_node(&self, node_ptr: *mut RetiredNode) {
unsafe {
(*node_ptr)
.batch_link
.store(core::ptr::null_mut(), Ordering::Relaxed);
}
let first = self.batch_first.get();
if first.is_null() {
self.batch_last.set(node_ptr);
unsafe {
(*node_ptr)
.refs_or_next
.store(REFC_PROTECT, Ordering::Relaxed);
}
} else {
let last = self.batch_last.get();
let birth_epoch = unsafe { (*node_ptr).birth_epoch() };
if unsafe { (*last).birth_epoch() } > birth_epoch {
unsafe { (*last).set_birth_epoch(birth_epoch) };
}
unsafe {
(*node_ptr).batch_link.store(last, Ordering::SeqCst);
(*node_ptr).set_batch_next(first);
}
}
self.batch_first.set(node_ptr);
let count = self.batch_count.get() + 1;
self.batch_count.set(count);
let alloc_count = self.alloc_counter.get() + 1;
self.alloc_counter.set(alloc_count);
if alloc_count.is_multiple_of(EPOCH_FREQ) {
let tid = self.tid();
let was_reclaiming = self.in_reclaim.get();
self.in_reclaim.set(true);
self.help_read(tid);
self.in_reclaim.set(was_reclaiming);
self.global().advance_epoch();
}
if count.is_multiple_of(RETIRE_FREQ) {
let last = self.batch_last.get();
let first = self.batch_first.get();
unsafe {
(*last)
.batch_link
.store(rnode_mark(first), Ordering::SeqCst);
}
let was_reclaiming = self.in_reclaim.get();
self.in_reclaim.set(true);
self.try_retire();
self.in_reclaim.set(was_reclaiming);
self.batch_first.set(core::ptr::null_mut());
self.batch_last.set(core::ptr::null_mut());
self.batch_count.set(0);
}
}
unsafe fn retire<T>(&self, ptr: *mut T)
where
T: 'static,
{
let node_ptr = ptr as *mut RetiredNode;
unsafe fn destructor<T>(ptr: *mut RetiredNode) {
let typed_ptr = ptr as *mut T;
unsafe {
drop(Box::from_raw(typed_ptr));
}
}
unsafe {
(*node_ptr).set_destructor(Some(destructor::<T>));
}
unsafe { self.enqueue_node(node_ptr) };
}
unsafe fn retire_raw(&self, node_ptr: *mut RetiredNode) {
unsafe { self.enqueue_node(node_ptr) };
}
fn try_retire(&self) {
let global = self.global();
let max_threads = global.max_threads();
let hr_num = global.hr_num();
let mut curr = self.batch_first.get();
let refs = self.batch_last.get();
let min_epoch = unsafe { (*refs).birth_epoch() };
fence(Ordering::SeqCst);
let mut last = curr;
for i in 0..max_threads {
let slots = global.thread_slots(i);
let mut j = 0;
while j < hr_num {
let first_lo = slots.first[j].load_lo();
if first_lo == INVPTR as u64 {
j += 1;
continue;
}
if slots.first[j].load_hi() & 1 != 0 {
j += 1;
continue;
}
let epoch = slots.epoch[j].load_lo();
if epoch < min_epoch {
j += 1;
continue;
}
if slots.epoch[j].load_hi() & 1 != 0 {
j += 1;
continue;
}
if last == refs {
return; }
unsafe {
(*last).set_slot_info(i, j);
}
last = unsafe { (*last).batch_next() };
j += 1;
}
while j < hr_num + 2 {
let first_lo = slots.first[j].load_lo();
if first_lo == INVPTR as u64 {
j += 1;
continue;
}
let epoch = slots.epoch[j].load_lo();
if epoch < min_epoch {
j += 1;
continue;
}
if last == refs {
return;
}
unsafe {
(*last).set_slot_info(i, j);
}
last = unsafe { (*last).batch_next() };
j += 1;
}
}
let mut adjs: usize = REFC_PROTECT.wrapping_neg();
while curr != last {
let (slot_tid, slot_idx) = unsafe { (*curr).get_slot_info() };
let slot_first_ref = &global.thread_slots(slot_tid).first[slot_idx];
let slot_epoch = &global.thread_slots(slot_tid).epoch[slot_idx];
unsafe {
(*curr).next.store(core::ptr::null_mut(), Ordering::Relaxed);
}
if slot_first_ref.load_lo() == INVPTR as u64 {
curr = unsafe { (*curr).batch_next() };
continue;
}
let epoch = slot_epoch.load_lo();
if epoch < min_epoch {
curr = unsafe { (*curr).batch_next() };
continue;
}
let prev = slot_first_ref.exchange_lo(curr as u64, Ordering::AcqRel);
if prev != 0 {
if prev == INVPTR as u64 {
let exp = curr as u64;
let (lo, hi) = slot_first_ref.load();
if lo == exp && slot_first_ref.compare_exchange(exp, hi, 0, hi).is_ok() {
curr = unsafe { (*curr).batch_next() };
continue;
}
adjs = adjs.wrapping_add(1);
curr = unsafe { (*curr).batch_next() };
continue;
} else {
let prev_ptr = prev as *mut RetiredNode;
if unsafe {
(*curr).next.compare_exchange(
core::ptr::null_mut(),
prev_ptr,
Ordering::AcqRel,
Ordering::Relaxed,
)
}
.is_err()
{
let mut free_list: *mut RetiredNode = core::ptr::null_mut();
unsafe {
crate::reclaim::traverse(&mut free_list, prev_ptr);
crate::reclaim::free_batch_list(free_list);
}
}
}
}
adjs = adjs.wrapping_add(1);
curr = unsafe { (*curr).batch_next() };
}
let old = unsafe { (*refs).refs_or_next.fetch_add(adjs, Ordering::AcqRel) };
if old == adjs.wrapping_neg() {
unsafe {
(*refs).next.store(core::ptr::null_mut(), Ordering::Relaxed);
crate::reclaim::free_batch_list(refs);
}
}
}
fn drain_free_list(&self) {
let was_reclaiming = self.in_reclaim.get();
self.in_reclaim.set(true);
loop {
let free_list = self.free_list.get();
if free_list.is_null() {
break;
}
self.free_list.set(core::ptr::null_mut());
self.list_count.set(0);
unsafe {
crate::reclaim::free_batch_list(free_list);
}
}
self.in_reclaim.set(was_reclaiming);
}
fn flush(&self) {
if self.tid.get().is_none() {
return; }
if self.in_reclaim.get() {
return;
}
self.in_reclaim.set(true);
let saved_pin = self.pin_count.get();
self.pin_count.set(saved_pin + 1);
let tid = self.tid();
let global = self.global();
let count = self.batch_count.get();
if count > 0 {
let last = self.batch_last.get();
let first = self.batch_first.get();
unsafe {
(*last)
.batch_link
.store(rnode_mark(first), Ordering::SeqCst);
}
self.try_retire();
self.batch_first.set(core::ptr::null_mut());
self.batch_last.set(core::ptr::null_mut());
self.batch_count.set(0);
}
let max = global.max_threads() + 2;
for _ in 0..max {
global.advance_epoch();
}
let hr_num = global.hr_num();
for i in 0..hr_num {
let first = global.thread_slots(tid).first[i].exchange_lo(0, Ordering::AcqRel);
if first != 0 && first != INVPTR as u64 {
let mut free_list = self.free_list.get();
let mut list_count = self.list_count.get();
self.free_list.set(core::ptr::null_mut());
self.list_count.set(0);
unsafe {
crate::reclaim::traverse_cache(
&mut free_list,
&mut list_count,
first as *mut RetiredNode,
);
}
self.free_list.set(free_list);
self.list_count.set(list_count);
}
}
self.drain_free_list();
self.pin_count.set(saved_pin);
self.in_reclaim.set(false);
}
}
impl Handle {
fn cleanup(&self) {
if let Some(tid) = self.tid.get() {
self.in_reclaim.set(true);
let saved_pin = self.pin_count.get();
self.pin_count.set(saved_pin + 1);
let global = self.global();
for i in 0..HR_NUM {
let first =
global.thread_slots(tid).first[i].exchange_lo(INVPTR as u64, Ordering::AcqRel);
if first != INVPTR as u64 && first != 0 {
let mut free_list = self.free_list.get();
let mut list_count = self.list_count.get();
self.free_list.set(core::ptr::null_mut());
self.list_count.set(0);
unsafe {
crate::reclaim::traverse_cache(
&mut free_list,
&mut list_count,
first as *mut RetiredNode,
);
}
self.free_list.set(free_list);
self.list_count.set(list_count);
}
}
let count = self.batch_count.get();
if count > 0 {
let last = self.batch_last.get();
let first = self.batch_first.get();
unsafe {
(*last)
.batch_link
.store(rnode_mark(first), Ordering::SeqCst);
}
self.try_retire();
}
self.batch_first.set(core::ptr::null_mut());
self.batch_last.set(core::ptr::null_mut());
self.batch_count.set(0);
self.drain_free_list();
self.pin_count.set(saved_pin);
self.in_reclaim.set(false);
self.tid.set(None);
global.free_tid(tid);
}
}
}
impl Drop for Handle {
fn drop(&mut self) {
self.cleanup();
}
}
#[cfg(feature = "nightly")]
#[thread_local]
static HANDLE: Handle = Handle::new();
#[cfg(feature = "nightly")]
struct HandleCleanupSentinel;
#[cfg(feature = "nightly")]
impl Drop for HandleCleanupSentinel {
fn drop(&mut self) {
HANDLE.cleanup();
}
}
#[cfg(feature = "nightly")]
thread_local! {
static HANDLE_CLEANUP_SENTINEL: HandleCleanupSentinel = const { HandleCleanupSentinel };
}
#[cfg(not(feature = "nightly"))]
thread_local! {
static HANDLE: Handle = const { Handle::new() };
}
#[inline]
pub(crate) fn protect_load(data: &AtomicUsize, order: Ordering) -> usize {
#[cfg(feature = "nightly")]
{
HANDLE.protect_load(data, order)
}
#[cfg(not(feature = "nightly"))]
{
HANDLE
.try_with(|handle| handle.protect_load(data, order))
.unwrap_or_else(|_| data.load(order))
}
}
#[inline]
pub fn pin() -> Guard {
#[cfg(feature = "nightly")]
{
HANDLE.pin()
}
#[cfg(not(feature = "nightly"))]
{
HANDLE.try_with(|handle| handle.pin()).unwrap_or(Guard {
_private: (),
marker,
})
}
}
#[inline]
pub unsafe fn retire<T: 'static>(ptr: *mut T) {
#[cfg(feature = "nightly")]
{
unsafe { HANDLE.retire(ptr) }
}
#[cfg(not(feature = "nightly"))]
{
let _ = HANDLE.try_with(|handle| unsafe { handle.retire(ptr) });
}
}
pub fn flush() {
#[cfg(feature = "nightly")]
{
HANDLE.flush()
}
#[cfg(not(feature = "nightly"))]
{
let _ = HANDLE.try_with(|handle| handle.flush());
}
}
#[inline]
pub(crate) unsafe fn retire_raw(node_ptr: *mut RetiredNode) {
#[cfg(feature = "nightly")]
{
unsafe { HANDLE.retire_raw(node_ptr) }
}
#[cfg(not(feature = "nightly"))]
{
let _ = HANDLE.try_with(|handle| unsafe { handle.retire_raw(node_ptr) });
}
}