1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
use crate::atomic::{Ordering::*, PyAtomic, Radium};
const MAX_REFCOUNT: usize = isize::MAX as usize;
pub struct RefCount {
strong: PyAtomic<usize>,
}
impl Default for RefCount {
fn default() -> Self {
Self::new()
}
}
impl RefCount {
const MASK: usize = MAX_REFCOUNT;
pub fn new() -> Self {
RefCount {
strong: Radium::new(1),
}
}
#[inline]
pub fn get(&self) -> usize {
self.strong.load(SeqCst)
}
#[inline]
pub fn inc(&self) {
let old_size = self.strong.fetch_add(1, Relaxed);
if old_size & Self::MASK == Self::MASK {
std::process::abort();
}
}
#[inline]
pub fn safe_inc(&self) -> bool {
self.strong
.fetch_update(AcqRel, Acquire, |prev| (prev != 0).then_some(prev + 1))
.is_ok()
}
#[inline]
pub fn dec(&self) -> bool {
if self.strong.fetch_sub(1, Release) != 1 {
return false;
}
PyAtomic::<usize>::fence(Acquire);
true
}
}
impl RefCount {
pub fn leak(&self) {
debug_assert!(!self.is_leaked());
const BIT_MARKER: usize = (std::isize::MAX as usize) + 1;
debug_assert_eq!(BIT_MARKER.count_ones(), 1);
debug_assert_eq!(BIT_MARKER.leading_zeros(), 0);
self.strong.fetch_add(BIT_MARKER, Relaxed);
}
pub fn is_leaked(&self) -> bool {
(self.strong.load(Acquire) as isize) < 0
}
}