use crate::atomic::{Ordering, PyAtomic, Radium};
const FLAG_BITS: u32 = 3;
const DESTRUCTED: usize = 1 << (usize::BITS - 1);
const LEAKED: usize = 1 << (usize::BITS - 3);
const TOTAL_COUNT_WIDTH: u32 = usize::BITS - FLAG_BITS;
const WEAK_WIDTH: u32 = TOTAL_COUNT_WIDTH / 2;
const STRONG_WIDTH: u32 = TOTAL_COUNT_WIDTH - WEAK_WIDTH;
const STRONG: usize = (1 << STRONG_WIDTH) - 1;
const COUNT: usize = 1;
const WEAK_COUNT: usize = 1 << STRONG_WIDTH;
#[inline(never)]
#[cold]
fn refcount_overflow() -> ! {
#[cfg(feature = "std")]
std::process::abort();
#[cfg(not(feature = "std"))]
core::panic!("refcount overflow");
}
#[derive(Clone, Copy)]
struct State {
inner: usize,
}
impl State {
#[inline]
fn from_raw(inner: usize) -> Self {
Self { inner }
}
#[inline]
fn as_raw(self) -> usize {
self.inner
}
#[inline]
fn strong(self) -> u32 {
((self.inner & STRONG) / COUNT) as u32
}
#[inline]
fn destructed(self) -> bool {
(self.inner & DESTRUCTED) != 0
}
#[inline]
fn leaked(self) -> bool {
(self.inner & LEAKED) != 0
}
#[inline]
fn add_strong(self, val: u32) -> Self {
Self::from_raw(self.inner + (val as usize) * COUNT)
}
#[inline]
fn with_leaked(self, leaked: bool) -> Self {
Self::from_raw((self.inner & !LEAKED) | if leaked { LEAKED } else { 0 })
}
}
pub struct RefCount {
state: PyAtomic<usize>,
}
impl Default for RefCount {
fn default() -> Self {
Self::new()
}
}
impl RefCount {
pub fn new() -> Self {
Self {
state: Radium::new(COUNT + WEAK_COUNT),
}
}
#[inline]
pub fn get(&self) -> usize {
State::from_raw(self.state.load(Ordering::Relaxed)).strong() as usize
}
#[inline]
pub fn inc(&self) {
let val = State::from_raw(self.state.fetch_add(COUNT, Ordering::Relaxed));
if val.destructed() || (val.strong() as usize) > STRONG - 1 {
refcount_overflow();
}
if val.strong() == 0 {
self.state.fetch_add(COUNT, Ordering::Relaxed);
}
}
#[inline]
pub fn inc_by(&self, n: usize) {
debug_assert!(n <= STRONG);
let val = State::from_raw(self.state.fetch_add(n * COUNT, Ordering::Relaxed));
if val.destructed() || (val.strong() as usize) > STRONG - n {
refcount_overflow();
}
}
#[inline]
#[must_use]
pub fn safe_inc(&self) -> bool {
let mut old = State::from_raw(self.state.load(Ordering::Relaxed));
loop {
if old.destructed() || old.strong() == 0 {
return false;
}
if (old.strong() as usize) >= STRONG {
refcount_overflow();
}
let new_state = old.add_strong(1);
match self.state.compare_exchange_weak(
old.as_raw(),
new_state.as_raw(),
Ordering::Relaxed,
Ordering::Relaxed,
) {
Ok(_) => return true,
Err(curr) => old = State::from_raw(curr),
}
}
}
#[inline]
#[must_use]
pub fn dec(&self) -> bool {
let old = State::from_raw(self.state.fetch_sub(COUNT, Ordering::Release));
if old.leaked() {
return false;
}
if old.strong() == 1 {
core::sync::atomic::fence(Ordering::Acquire);
return true;
}
false
}
pub fn leak(&self) {
debug_assert!(!self.is_leaked());
let mut old = State::from_raw(self.state.load(Ordering::Relaxed));
loop {
let new_state = old.with_leaked(true);
match self.state.compare_exchange_weak(
old.as_raw(),
new_state.as_raw(),
Ordering::AcqRel,
Ordering::Relaxed,
) {
Ok(_) => return,
Err(curr) => old = State::from_raw(curr),
}
}
}
pub fn is_leaked(&self) -> bool {
State::from_raw(self.state.load(Ordering::Acquire)).leaked()
}
}
#[cfg(feature = "std")]
use core::cell::{Cell, RefCell};
#[cfg(feature = "std")]
thread_local! {
static IN_DEFERRED_CONTEXT: Cell<bool> = const { Cell::new(false) };
static DEFERRED_QUEUE: RefCell<Vec<Box<dyn FnOnce()>>> = const { RefCell::new(Vec::new()) };
}
#[cfg(feature = "std")]
struct DeferredDropGuard {
was_in_context: bool,
}
#[cfg(feature = "std")]
impl Drop for DeferredDropGuard {
fn drop(&mut self) {
IN_DEFERRED_CONTEXT.with(|in_ctx| {
in_ctx.set(self.was_in_context);
});
if !self.was_in_context && !std::thread::panicking() {
flush_deferred_drops();
}
}
}
#[cfg(feature = "std")]
#[inline]
pub fn with_deferred_drops<F, R>(f: F) -> R
where
F: FnOnce() -> R,
{
let _guard = IN_DEFERRED_CONTEXT.with(|in_ctx| {
let was_in_context = in_ctx.get();
in_ctx.set(true);
DeferredDropGuard { was_in_context }
});
f()
}
#[cfg(feature = "std")]
#[inline]
pub fn try_defer_drop<F>(f: F)
where
F: FnOnce() + 'static,
{
let should_defer = IN_DEFERRED_CONTEXT.with(|in_ctx| in_ctx.get());
if should_defer {
DEFERRED_QUEUE.with(|q| {
q.borrow_mut().push(Box::new(f));
});
} else {
f();
}
}
#[cfg(feature = "std")]
#[inline]
pub fn flush_deferred_drops() {
DEFERRED_QUEUE.with(|q| {
let ops: Vec<_> = q.borrow_mut().drain(..).collect();
for op in ops {
op();
}
});
}