Skip to main content

rustpython_common/
refcount.rs

1use crate::atomic::{Ordering, PyAtomic, Radium};
2
3// State layout (usize):
4//   [1 bit: destructed] [1 bit: reserved] [1 bit: leaked] [N bits: weak_count] [M bits: strong_count]
5// 64-bit: N=30, M=31.  32-bit: N=14, M=15.
6const FLAG_BITS: u32 = 3;
7const DESTRUCTED: usize = 1 << (usize::BITS - 1);
8const LEAKED: usize = 1 << (usize::BITS - 3);
9const TOTAL_COUNT_WIDTH: u32 = usize::BITS - FLAG_BITS;
10const WEAK_WIDTH: u32 = TOTAL_COUNT_WIDTH / 2;
11const STRONG_WIDTH: u32 = TOTAL_COUNT_WIDTH - WEAK_WIDTH;
12const STRONG: usize = (1 << STRONG_WIDTH) - 1;
13const COUNT: usize = 1;
14const WEAK_COUNT: usize = 1 << STRONG_WIDTH;
15
16#[inline(never)]
17#[cold]
18fn refcount_overflow() -> ! {
19    #[cfg(feature = "std")]
20    std::process::abort();
21    #[cfg(not(feature = "std"))]
22    core::panic!("refcount overflow");
23}
24
25/// State wraps reference count + flags in a single word (platform usize)
26#[derive(Clone, Copy)]
27struct State {
28    inner: usize,
29}
30
31impl State {
32    #[inline]
33    fn from_raw(inner: usize) -> Self {
34        Self { inner }
35    }
36
37    #[inline]
38    fn as_raw(self) -> usize {
39        self.inner
40    }
41
42    #[inline]
43    fn strong(self) -> u32 {
44        ((self.inner & STRONG) / COUNT) as u32
45    }
46
47    #[inline]
48    fn destructed(self) -> bool {
49        (self.inner & DESTRUCTED) != 0
50    }
51
52    #[inline]
53    fn leaked(self) -> bool {
54        (self.inner & LEAKED) != 0
55    }
56
57    #[inline]
58    fn add_strong(self, val: u32) -> Self {
59        Self::from_raw(self.inner + (val as usize) * COUNT)
60    }
61
62    #[inline]
63    fn with_leaked(self, leaked: bool) -> Self {
64        Self::from_raw((self.inner & !LEAKED) | if leaked { LEAKED } else { 0 })
65    }
66}
67
68/// Reference count using state layout with LEAKED support.
69///
70/// State layout (usize):
71/// 64-bit: [1 bit: destructed] [1 bit: reserved] [1 bit: leaked] [30 bits: weak_count] [31 bits: strong_count]
72/// 32-bit: [1 bit: destructed] [1 bit: reserved] [1 bit: leaked] [14 bits: weak_count] [15 bits: strong_count]
73pub struct RefCount {
74    state: PyAtomic<usize>,
75}
76
77impl Default for RefCount {
78    fn default() -> Self {
79        Self::new()
80    }
81}
82
83impl RefCount {
84    /// Create a new RefCount with strong count = 1
85    pub fn new() -> Self {
86        // Initial state: strong=1, weak=1 (implicit weak for strong refs)
87        Self {
88            state: Radium::new(COUNT + WEAK_COUNT),
89        }
90    }
91
92    /// Get current strong count
93    #[inline]
94    pub fn get(&self) -> usize {
95        State::from_raw(self.state.load(Ordering::Relaxed)).strong() as usize
96    }
97
98    /// Increment strong count
99    #[inline]
100    pub fn inc(&self) {
101        let val = State::from_raw(self.state.fetch_add(COUNT, Ordering::Relaxed));
102        if val.destructed() || (val.strong() as usize) > STRONG - 1 {
103            refcount_overflow();
104        }
105        if val.strong() == 0 {
106            // The previous fetch_add created a permission to run decrement again
107            self.state.fetch_add(COUNT, Ordering::Relaxed);
108        }
109    }
110
111    #[inline]
112    pub fn inc_by(&self, n: usize) {
113        debug_assert!(n <= STRONG);
114        let val = State::from_raw(self.state.fetch_add(n * COUNT, Ordering::Relaxed));
115        if val.destructed() || (val.strong() as usize) > STRONG - n {
116            refcount_overflow();
117        }
118    }
119
120    /// Returns true if successful
121    #[inline]
122    #[must_use]
123    pub fn safe_inc(&self) -> bool {
124        let mut old = State::from_raw(self.state.load(Ordering::Relaxed));
125        loop {
126            if old.destructed() || old.strong() == 0 {
127                return false;
128            }
129            if (old.strong() as usize) >= STRONG {
130                refcount_overflow();
131            }
132            let new_state = old.add_strong(1);
133            match self.state.compare_exchange_weak(
134                old.as_raw(),
135                new_state.as_raw(),
136                Ordering::Relaxed,
137                Ordering::Relaxed,
138            ) {
139                Ok(_) => return true,
140                Err(curr) => old = State::from_raw(curr),
141            }
142        }
143    }
144
145    /// Decrement strong count. Returns true when count drops to 0.
146    #[inline]
147    #[must_use]
148    pub fn dec(&self) -> bool {
149        let old = State::from_raw(self.state.fetch_sub(COUNT, Ordering::Release));
150
151        // LEAKED objects never reach 0
152        if old.leaked() {
153            return false;
154        }
155
156        if old.strong() == 1 {
157            core::sync::atomic::fence(Ordering::Acquire);
158            return true;
159        }
160        false
161    }
162
163    /// Mark this object as leaked (interned). It will never be deallocated.
164    pub fn leak(&self) {
165        debug_assert!(!self.is_leaked());
166        let mut old = State::from_raw(self.state.load(Ordering::Relaxed));
167        loop {
168            let new_state = old.with_leaked(true);
169            match self.state.compare_exchange_weak(
170                old.as_raw(),
171                new_state.as_raw(),
172                Ordering::AcqRel,
173                Ordering::Relaxed,
174            ) {
175                Ok(_) => return,
176                Err(curr) => old = State::from_raw(curr),
177            }
178        }
179    }
180
181    /// Check if this object is leaked (interned).
182    pub fn is_leaked(&self) -> bool {
183        State::from_raw(self.state.load(Ordering::Acquire)).leaked()
184    }
185}
186
187// Deferred Drop Infrastructure
188//
189// This mechanism allows untrack_object() calls to be deferred until after
190// the GC collection phase completes, preventing deadlocks that occur when
191// clear (pop_edges) triggers object destruction while holding the tracked_objects lock.
192
193#[cfg(feature = "std")]
194use core::cell::{Cell, RefCell};
195
196#[cfg(feature = "std")]
197thread_local! {
198    /// Flag indicating if we're inside a deferred drop context.
199    /// When true, drop operations should defer untrack calls.
200    static IN_DEFERRED_CONTEXT: Cell<bool> = const { Cell::new(false) };
201
202    /// Queue of deferred untrack operations.
203    /// No Send bound needed - this is thread-local and only accessed from the same thread.
204    static DEFERRED_QUEUE: RefCell<Vec<Box<dyn FnOnce()>>> = const { RefCell::new(Vec::new()) };
205}
206
207#[cfg(feature = "std")]
208struct DeferredDropGuard {
209    was_in_context: bool,
210}
211
212#[cfg(feature = "std")]
213impl Drop for DeferredDropGuard {
214    fn drop(&mut self) {
215        IN_DEFERRED_CONTEXT.with(|in_ctx| {
216            in_ctx.set(self.was_in_context);
217        });
218        // Only flush if we're the outermost context and not already panicking
219        // (flushing during unwinding risks double-panic → process abort).
220        if !self.was_in_context && !std::thread::panicking() {
221            flush_deferred_drops();
222        }
223    }
224}
225
226/// Execute a function within a deferred drop context.
227/// Any calls to `try_defer_drop` within this context will be queued
228/// and executed when the context exits (even on panic).
229#[cfg(feature = "std")]
230#[inline]
231pub fn with_deferred_drops<F, R>(f: F) -> R
232where
233    F: FnOnce() -> R,
234{
235    let _guard = IN_DEFERRED_CONTEXT.with(|in_ctx| {
236        let was_in_context = in_ctx.get();
237        in_ctx.set(true);
238        DeferredDropGuard { was_in_context }
239    });
240    f()
241}
242
243/// Try to defer a drop-related operation.
244/// If inside a deferred context, the operation is queued.
245/// Otherwise, it executes immediately.
246#[cfg(feature = "std")]
247#[inline]
248pub fn try_defer_drop<F>(f: F)
249where
250    F: FnOnce() + 'static,
251{
252    let should_defer = IN_DEFERRED_CONTEXT.with(|in_ctx| in_ctx.get());
253
254    if should_defer {
255        DEFERRED_QUEUE.with(|q| {
256            q.borrow_mut().push(Box::new(f));
257        });
258    } else {
259        f();
260    }
261}
262
263/// Flush all deferred drop operations.
264/// This is automatically called when exiting a deferred context.
265#[cfg(feature = "std")]
266#[inline]
267pub fn flush_deferred_drops() {
268    DEFERRED_QUEUE.with(|q| {
269        // Take all queued operations
270        let ops: Vec<_> = q.borrow_mut().drain(..).collect();
271        // Execute them outside the borrow
272        for op in ops {
273            op();
274        }
275    });
276}