rustpython-common 0.5.0

General python functions and algorithms for use in RustPython
Documentation
use crate::atomic::{Ordering, PyAtomic, Radium};

// State layout (usize):
//   [1 bit: destructed] [1 bit: reserved] [1 bit: leaked] [N bits: weak_count] [M bits: strong_count]
// 64-bit: N=30, M=31.  32-bit: N=14, M=15.
const FLAG_BITS: u32 = 3;
const DESTRUCTED: usize = 1 << (usize::BITS - 1);
const LEAKED: usize = 1 << (usize::BITS - 3);
const TOTAL_COUNT_WIDTH: u32 = usize::BITS - FLAG_BITS;
const WEAK_WIDTH: u32 = TOTAL_COUNT_WIDTH / 2;
const STRONG_WIDTH: u32 = TOTAL_COUNT_WIDTH - WEAK_WIDTH;
const STRONG: usize = (1 << STRONG_WIDTH) - 1;
const COUNT: usize = 1;
const WEAK_COUNT: usize = 1 << STRONG_WIDTH;

#[inline(never)]
#[cold]
fn refcount_overflow() -> ! {
    #[cfg(feature = "std")]
    std::process::abort();
    #[cfg(not(feature = "std"))]
    core::panic!("refcount overflow");
}

/// State wraps reference count + flags in a single word (platform usize)
#[derive(Clone, Copy)]
struct State {
    inner: usize,
}

impl State {
    #[inline]
    fn from_raw(inner: usize) -> Self {
        Self { inner }
    }

    #[inline]
    fn as_raw(self) -> usize {
        self.inner
    }

    #[inline]
    fn strong(self) -> u32 {
        ((self.inner & STRONG) / COUNT) as u32
    }

    #[inline]
    fn destructed(self) -> bool {
        (self.inner & DESTRUCTED) != 0
    }

    #[inline]
    fn leaked(self) -> bool {
        (self.inner & LEAKED) != 0
    }

    #[inline]
    fn add_strong(self, val: u32) -> Self {
        Self::from_raw(self.inner + (val as usize) * COUNT)
    }

    #[inline]
    fn with_leaked(self, leaked: bool) -> Self {
        Self::from_raw((self.inner & !LEAKED) | if leaked { LEAKED } else { 0 })
    }
}

/// Reference count using state layout with LEAKED support.
///
/// State layout (usize):
/// 64-bit: [1 bit: destructed] [1 bit: reserved] [1 bit: leaked] [30 bits: weak_count] [31 bits: strong_count]
/// 32-bit: [1 bit: destructed] [1 bit: reserved] [1 bit: leaked] [14 bits: weak_count] [15 bits: strong_count]
pub struct RefCount {
    state: PyAtomic<usize>,
}

impl Default for RefCount {
    fn default() -> Self {
        Self::new()
    }
}

impl RefCount {
    /// Create a new RefCount with strong count = 1
    pub fn new() -> Self {
        // Initial state: strong=1, weak=1 (implicit weak for strong refs)
        Self {
            state: Radium::new(COUNT + WEAK_COUNT),
        }
    }

    /// Get current strong count
    #[inline]
    pub fn get(&self) -> usize {
        State::from_raw(self.state.load(Ordering::Relaxed)).strong() as usize
    }

    /// Increment strong count
    #[inline]
    pub fn inc(&self) {
        let val = State::from_raw(self.state.fetch_add(COUNT, Ordering::Relaxed));
        if val.destructed() || (val.strong() as usize) > STRONG - 1 {
            refcount_overflow();
        }
        if val.strong() == 0 {
            // The previous fetch_add created a permission to run decrement again
            self.state.fetch_add(COUNT, Ordering::Relaxed);
        }
    }

    #[inline]
    pub fn inc_by(&self, n: usize) {
        debug_assert!(n <= STRONG);
        let val = State::from_raw(self.state.fetch_add(n * COUNT, Ordering::Relaxed));
        if val.destructed() || (val.strong() as usize) > STRONG - n {
            refcount_overflow();
        }
    }

    /// Returns true if successful
    #[inline]
    #[must_use]
    pub fn safe_inc(&self) -> bool {
        let mut old = State::from_raw(self.state.load(Ordering::Relaxed));
        loop {
            if old.destructed() || old.strong() == 0 {
                return false;
            }
            if (old.strong() as usize) >= STRONG {
                refcount_overflow();
            }
            let new_state = old.add_strong(1);
            match self.state.compare_exchange_weak(
                old.as_raw(),
                new_state.as_raw(),
                Ordering::Relaxed,
                Ordering::Relaxed,
            ) {
                Ok(_) => return true,
                Err(curr) => old = State::from_raw(curr),
            }
        }
    }

    /// Decrement strong count. Returns true when count drops to 0.
    #[inline]
    #[must_use]
    pub fn dec(&self) -> bool {
        let old = State::from_raw(self.state.fetch_sub(COUNT, Ordering::Release));

        // LEAKED objects never reach 0
        if old.leaked() {
            return false;
        }

        if old.strong() == 1 {
            core::sync::atomic::fence(Ordering::Acquire);
            return true;
        }
        false
    }

    /// Mark this object as leaked (interned). It will never be deallocated.
    pub fn leak(&self) {
        debug_assert!(!self.is_leaked());
        let mut old = State::from_raw(self.state.load(Ordering::Relaxed));
        loop {
            let new_state = old.with_leaked(true);
            match self.state.compare_exchange_weak(
                old.as_raw(),
                new_state.as_raw(),
                Ordering::AcqRel,
                Ordering::Relaxed,
            ) {
                Ok(_) => return,
                Err(curr) => old = State::from_raw(curr),
            }
        }
    }

    /// Check if this object is leaked (interned).
    pub fn is_leaked(&self) -> bool {
        State::from_raw(self.state.load(Ordering::Acquire)).leaked()
    }
}

// Deferred Drop Infrastructure
//
// This mechanism allows untrack_object() calls to be deferred until after
// the GC collection phase completes, preventing deadlocks that occur when
// clear (pop_edges) triggers object destruction while holding the tracked_objects lock.

#[cfg(feature = "std")]
use core::cell::{Cell, RefCell};

#[cfg(feature = "std")]
thread_local! {
    /// Flag indicating if we're inside a deferred drop context.
    /// When true, drop operations should defer untrack calls.
    static IN_DEFERRED_CONTEXT: Cell<bool> = const { Cell::new(false) };

    /// Queue of deferred untrack operations.
    /// No Send bound needed - this is thread-local and only accessed from the same thread.
    static DEFERRED_QUEUE: RefCell<Vec<Box<dyn FnOnce()>>> = const { RefCell::new(Vec::new()) };
}

#[cfg(feature = "std")]
struct DeferredDropGuard {
    was_in_context: bool,
}

#[cfg(feature = "std")]
impl Drop for DeferredDropGuard {
    fn drop(&mut self) {
        IN_DEFERRED_CONTEXT.with(|in_ctx| {
            in_ctx.set(self.was_in_context);
        });
        // Only flush if we're the outermost context and not already panicking
        // (flushing during unwinding risks double-panic → process abort).
        if !self.was_in_context && !std::thread::panicking() {
            flush_deferred_drops();
        }
    }
}

/// Execute a function within a deferred drop context.
/// Any calls to `try_defer_drop` within this context will be queued
/// and executed when the context exits (even on panic).
#[cfg(feature = "std")]
#[inline]
pub fn with_deferred_drops<F, R>(f: F) -> R
where
    F: FnOnce() -> R,
{
    let _guard = IN_DEFERRED_CONTEXT.with(|in_ctx| {
        let was_in_context = in_ctx.get();
        in_ctx.set(true);
        DeferredDropGuard { was_in_context }
    });
    f()
}

/// Try to defer a drop-related operation.
/// If inside a deferred context, the operation is queued.
/// Otherwise, it executes immediately.
#[cfg(feature = "std")]
#[inline]
pub fn try_defer_drop<F>(f: F)
where
    F: FnOnce() + 'static,
{
    let should_defer = IN_DEFERRED_CONTEXT.with(|in_ctx| in_ctx.get());

    if should_defer {
        DEFERRED_QUEUE.with(|q| {
            q.borrow_mut().push(Box::new(f));
        });
    } else {
        f();
    }
}

/// Flush all deferred drop operations.
/// This is automatically called when exiting a deferred context.
#[cfg(feature = "std")]
#[inline]
pub fn flush_deferred_drops() {
    DEFERRED_QUEUE.with(|q| {
        // Take all queued operations
        let ops: Vec<_> = q.borrow_mut().drain(..).collect();
        // Execute them outside the borrow
        for op in ops {
            op();
        }
    });
}