compact_rc/refcount.rs
1use std::cell::Cell;
2use std::sync::atomic::{AtomicU16, AtomicU32, AtomicU64, AtomicU8, AtomicUsize, Ordering};
3
4/// Trait for refcount
5///
6/// Some method names explain required memory ordering in multi-threaded context.
7pub trait RefCount {
8 /// Type of count
9 type Value;
10
11 /// Creates a new RefCount object.
12 ///
13 /// The object is initialized as "one".
14 fn one() -> Self;
15
16 /// Checks whether a value equals to one.
17 fn is_one(val: &Self::Value) -> bool;
18
19 /// Gets a current value.
20 ///
21 /// # Memory ordering
22 /// [Acquire](std::sync::atomic::Ordering::Acquire) or stronger ordering is required if
23 /// implementors are atomic types.
24 fn load_acquire(&self) -> Self::Value;
25
26 /// Memory fence.
27 ///
28 /// This method is needed only for multi-threaded refcount.
29 /// In single-threaded implementors, this can be NO-OP.
30 ///
31 /// # Memory ordering
32 /// [Acquire](std::sync::atomic::Ordering::Acquire) or stronger ordering is required if
33 /// implementors are atomic types.
34 fn fence_acquire(&self);
35
36 /// Increments its value and returns previous value.
37 ///
38 /// # Memory ordering
39 /// [Relaxed](std::sync::atomic::Ordering::Relaxed) ordering is allowed if implementors are
40 /// atomic types.
41 fn fetch_inc_relaxed(&self) -> Self::Value;
42
43 /// Decrements its value and returns previous value.
44 ///
45 /// # Memory ordering
46 /// [Release](std::sync::atomic::Ordering::Release) or stronger ordering is required if
47 /// implementors are atomic types.
48 fn fetch_dec_release(&self) -> Self::Value;
49}
50
51/// Implements RefCount for Cell<$type>.
52/// Its implementors are used as single-threaded refcount.
53macro_rules! impl_cell_refcount {
54 ($type:ty) => {
55 impl RefCount for Cell<$type> {
56 type Value = $type;
57
58 #[inline(always)]
59 fn one() -> Self {
60 Self::new(1)
61 }
62
63 #[inline(always)]
64 fn is_one(val: &Self::Value) -> bool {
65 *val == 1
66 }
67
68 #[inline(always)]
69 fn load_acquire(&self) -> Self::Value {
70 self.get()
71 }
72
73 #[inline(always)]
74 fn fence_acquire(&self) {
75 // noop
76 }
77
78 #[inline(always)]
79 fn fetch_inc_relaxed(&self) -> Self::Value {
80 let count = self.load_acquire();
81 assume!(count != 0);
82 match count.checked_add(1) {
83 Some(c) => {
84 self.set(c);
85 count
86 }
87 None => std::process::abort(), // Overflow
88 }
89 }
90
91 #[inline(always)]
92 fn fetch_dec_release(&self) -> Self::Value {
93 let count = self.load_acquire();
94 assume!(0 < count);
95 self.set(count - 1);
96 count
97 }
98 }
99 };
100}
101
102impl_cell_refcount!(u8);
103impl_cell_refcount!(u16);
104impl_cell_refcount!(u32);
105impl_cell_refcount!(u64);
106impl_cell_refcount!(usize);
107
108/// Implements RefCount for atomic $types.
109/// Its implementors are used as multi-threaded refcount.
110macro_rules! impl_atomic_refcount {
111 ($type:ty, $value_type:ty) => {
112 impl RefCount for $type {
113 type Value = $value_type;
114
115 #[inline(always)]
116 fn one() -> Self {
117 Self::new(1)
118 }
119
120 #[inline(always)]
121 fn is_one(val: &Self::Value) -> bool {
122 *val == 1
123 }
124
125 #[inline(always)]
126 fn load_acquire(&self) -> Self::Value {
127 self.load(Ordering::Acquire)
128 }
129
130 #[inline(always)]
131 fn fence_acquire(&self) {
132 // Load-Acquire is used instead of a fence for performance[1].
133 // [1] https://developer.arm.com/documentation/102336/0100/Load-Acquire-and-Store-Release-instructions
134 let _count = self.load_acquire();
135 }
136
137 #[inline(always)]
138 fn fetch_inc_relaxed(&self) -> Self::Value {
139 let count = self.fetch_add(1, Ordering::Relaxed);
140 if count == <$value_type>::MAX {
141 // Overflow
142 std::process::abort();
143 }
144 count
145 }
146
147 #[inline(always)]
148 fn fetch_dec_release(&self) -> Self::Value {
149 let count = self.fetch_sub(1, Ordering::Release);
150 assume!(0 < count);
151 count
152 }
153 }
154 };
155}
156
157impl_atomic_refcount!(AtomicU8, u8);
158impl_atomic_refcount!(AtomicU16, u16);
159impl_atomic_refcount!(AtomicU32, u32);
160impl_atomic_refcount!(AtomicU64, u64);
161impl_atomic_refcount!(AtomicUsize, usize);