rustpython_common/
refcount.rs1use crate::atomic::{Ordering, PyAtomic, Radium};
2
3const FLAG_BITS: u32 = 3;
7const DESTRUCTED: usize = 1 << (usize::BITS - 1);
8const LEAKED: usize = 1 << (usize::BITS - 3);
9const TOTAL_COUNT_WIDTH: u32 = usize::BITS - FLAG_BITS;
10const WEAK_WIDTH: u32 = TOTAL_COUNT_WIDTH / 2;
11const STRONG_WIDTH: u32 = TOTAL_COUNT_WIDTH - WEAK_WIDTH;
12const STRONG: usize = (1 << STRONG_WIDTH) - 1;
13const COUNT: usize = 1;
14const WEAK_COUNT: usize = 1 << STRONG_WIDTH;
15
16#[inline(never)]
17#[cold]
18fn refcount_overflow() -> ! {
19 #[cfg(feature = "std")]
20 std::process::abort();
21 #[cfg(not(feature = "std"))]
22 core::panic!("refcount overflow");
23}
24
25#[derive(Clone, Copy)]
27struct State {
28 inner: usize,
29}
30
31impl State {
32 #[inline]
33 fn from_raw(inner: usize) -> Self {
34 Self { inner }
35 }
36
37 #[inline]
38 fn as_raw(self) -> usize {
39 self.inner
40 }
41
42 #[inline]
43 fn strong(self) -> u32 {
44 ((self.inner & STRONG) / COUNT) as u32
45 }
46
47 #[inline]
48 fn destructed(self) -> bool {
49 (self.inner & DESTRUCTED) != 0
50 }
51
52 #[inline]
53 fn leaked(self) -> bool {
54 (self.inner & LEAKED) != 0
55 }
56
57 #[inline]
58 fn add_strong(self, val: u32) -> Self {
59 Self::from_raw(self.inner + (val as usize) * COUNT)
60 }
61
62 #[inline]
63 fn with_leaked(self, leaked: bool) -> Self {
64 Self::from_raw((self.inner & !LEAKED) | if leaked { LEAKED } else { 0 })
65 }
66}
67
68pub struct RefCount {
74 state: PyAtomic<usize>,
75}
76
77impl Default for RefCount {
78 fn default() -> Self {
79 Self::new()
80 }
81}
82
83impl RefCount {
84 pub fn new() -> Self {
86 Self {
88 state: Radium::new(COUNT + WEAK_COUNT),
89 }
90 }
91
92 #[inline]
94 pub fn get(&self) -> usize {
95 State::from_raw(self.state.load(Ordering::Relaxed)).strong() as usize
96 }
97
98 #[inline]
100 pub fn inc(&self) {
101 let val = State::from_raw(self.state.fetch_add(COUNT, Ordering::Relaxed));
102 if val.destructed() || (val.strong() as usize) > STRONG - 1 {
103 refcount_overflow();
104 }
105 if val.strong() == 0 {
106 self.state.fetch_add(COUNT, Ordering::Relaxed);
108 }
109 }
110
111 #[inline]
112 pub fn inc_by(&self, n: usize) {
113 debug_assert!(n <= STRONG);
114 let val = State::from_raw(self.state.fetch_add(n * COUNT, Ordering::Relaxed));
115 if val.destructed() || (val.strong() as usize) > STRONG - n {
116 refcount_overflow();
117 }
118 }
119
120 #[inline]
122 #[must_use]
123 pub fn safe_inc(&self) -> bool {
124 let mut old = State::from_raw(self.state.load(Ordering::Relaxed));
125 loop {
126 if old.destructed() || old.strong() == 0 {
127 return false;
128 }
129 if (old.strong() as usize) >= STRONG {
130 refcount_overflow();
131 }
132 let new_state = old.add_strong(1);
133 match self.state.compare_exchange_weak(
134 old.as_raw(),
135 new_state.as_raw(),
136 Ordering::Relaxed,
137 Ordering::Relaxed,
138 ) {
139 Ok(_) => return true,
140 Err(curr) => old = State::from_raw(curr),
141 }
142 }
143 }
144
145 #[inline]
147 #[must_use]
148 pub fn dec(&self) -> bool {
149 let old = State::from_raw(self.state.fetch_sub(COUNT, Ordering::Release));
150
151 if old.leaked() {
153 return false;
154 }
155
156 if old.strong() == 1 {
157 core::sync::atomic::fence(Ordering::Acquire);
158 return true;
159 }
160 false
161 }
162
163 pub fn leak(&self) {
165 debug_assert!(!self.is_leaked());
166 let mut old = State::from_raw(self.state.load(Ordering::Relaxed));
167 loop {
168 let new_state = old.with_leaked(true);
169 match self.state.compare_exchange_weak(
170 old.as_raw(),
171 new_state.as_raw(),
172 Ordering::AcqRel,
173 Ordering::Relaxed,
174 ) {
175 Ok(_) => return,
176 Err(curr) => old = State::from_raw(curr),
177 }
178 }
179 }
180
181 pub fn is_leaked(&self) -> bool {
183 State::from_raw(self.state.load(Ordering::Acquire)).leaked()
184 }
185}
186
187#[cfg(feature = "std")]
194use core::cell::{Cell, RefCell};
195
196#[cfg(feature = "std")]
197thread_local! {
198 static IN_DEFERRED_CONTEXT: Cell<bool> = const { Cell::new(false) };
201
202 static DEFERRED_QUEUE: RefCell<Vec<Box<dyn FnOnce()>>> = const { RefCell::new(Vec::new()) };
205}
206
207#[cfg(feature = "std")]
208struct DeferredDropGuard {
209 was_in_context: bool,
210}
211
212#[cfg(feature = "std")]
213impl Drop for DeferredDropGuard {
214 fn drop(&mut self) {
215 IN_DEFERRED_CONTEXT.with(|in_ctx| {
216 in_ctx.set(self.was_in_context);
217 });
218 if !self.was_in_context && !std::thread::panicking() {
221 flush_deferred_drops();
222 }
223 }
224}
225
226#[cfg(feature = "std")]
230#[inline]
231pub fn with_deferred_drops<F, R>(f: F) -> R
232where
233 F: FnOnce() -> R,
234{
235 let _guard = IN_DEFERRED_CONTEXT.with(|in_ctx| {
236 let was_in_context = in_ctx.get();
237 in_ctx.set(true);
238 DeferredDropGuard { was_in_context }
239 });
240 f()
241}
242
243#[cfg(feature = "std")]
247#[inline]
248pub fn try_defer_drop<F>(f: F)
249where
250 F: FnOnce() + 'static,
251{
252 let should_defer = IN_DEFERRED_CONTEXT.with(|in_ctx| in_ctx.get());
253
254 if should_defer {
255 DEFERRED_QUEUE.with(|q| {
256 q.borrow_mut().push(Box::new(f));
257 });
258 } else {
259 f();
260 }
261}
262
263#[cfg(feature = "std")]
266#[inline]
267pub fn flush_deferred_drops() {
268 DEFERRED_QUEUE.with(|q| {
269 let ops: Vec<_> = q.borrow_mut().drain(..).collect();
271 for op in ops {
273 op();
274 }
275 });
276}