1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
use std::panic::{RefUnwindSafe, UnwindSafe};
use std::ptr::{NonNull, null};
use super::Epoch;
use super::collector::Collector;
use super::link::DeferredClosure;
use super::link::Link;
use super::private_collector::SharedGarbageBag;
/// [`Guard`] allows the user to read [`AtomicOwned`](super::AtomicOwned),
/// [`AtomicShared`](super::AtomicShared), and [`AtomicRaw`](super::AtomicRaw) while keeping the
/// underlying instance pinned in the current thread.
///
/// [`Guard`] internally prevents the global epoch from advancing past the value announced by
/// the current thread, thereby preventing reachable instances in the thread from being garbage
/// collected.
#[derive(Debug)]
pub struct Guard {
collector_ptr: NonNull<Collector>,
}
impl Guard {
/// Creates a new [`Guard`].
///
/// # Panics
///
/// Panics if the maximum number of [`Guard`] instances in a thread, `u32::MAX`, is exceeded.
///
/// # Examples
///
/// ```
/// use sdd::Guard;
///
/// let guard = Guard::new();
/// ```
#[inline]
#[must_use]
pub fn new() -> Self {
let collector_ptr = Collector::current();
Collector::new_guard(collector_ptr);
Self { collector_ptr }
}
/// Returns the epoch currently witnessed by the thread.
///
/// This method can be used to determine whether a retired memory region is potentially
/// reachable. A memory region retired in a witnessed [`Epoch`] can be deallocated only after
/// the thread has observed three subsequent epochs. For instance, if the witnessed epoch
/// value is `1` while the global epoch is `2`, and an instance is retired in the same thread,
/// that instance can be dropped when the thread witnesses epoch `4`, which is three epochs
/// away from `1`.
///
/// # Examples
///
/// ```
/// use sdd::{Guard, Owned};
/// use std::sync::atomic::AtomicBool;
/// use std::sync::atomic::Ordering::Relaxed;
///
/// static DROPPED: AtomicBool = AtomicBool::new(false);
///
/// struct D(&'static AtomicBool);
///
/// impl Drop for D {
/// fn drop(&mut self) {
/// self.0.store(true, Relaxed);
/// }
/// }
///
/// let owned = Owned::new(D(&DROPPED));
///
/// let epoch_before = Guard::new().epoch();
///
/// drop(owned);
/// assert!(!DROPPED.load(Relaxed));
///
/// while Guard::new().epoch() == epoch_before {
/// assert!(!DROPPED.load(Relaxed));
/// }
///
/// while Guard::new().epoch() == epoch_before.next() {
/// assert!(!DROPPED.load(Relaxed));
/// }
///
/// while Guard::new().epoch() == epoch_before.next().next() {
/// assert!(!DROPPED.load(Relaxed));
/// }
///
/// assert!(DROPPED.load(Relaxed));
/// assert_eq!(Guard::new().epoch(), epoch_before.next().next().next());
/// ```
#[inline]
#[must_use]
pub fn epoch(&self) -> Epoch {
Collector::current_epoch()
}
/// Returns `true` if the thread-local garbage collector may contain garbage.
///
/// This method may return `true` even when no garbage exists if
/// [`set_has_garbage`](Self::set_has_garbage) was recently called.
///
/// # Examples
///
/// ```
/// use sdd::{Guard, Shared};
///
/// let guard = Guard::new();
///
/// assert!(!guard.has_garbage());
///
/// drop(Shared::new(1_usize));
/// assert!(guard.has_garbage());
/// ```
#[inline]
#[must_use]
pub fn has_garbage(&self) -> bool {
Collector::has_garbage(self.collector_ptr)
}
/// Sets the garbage flag to allow the thread to advance the global epoch.
///
/// # Examples
///
/// ```
/// use sdd::Guard;
///
/// let guard = Guard::new();
///
/// assert!(!guard.has_garbage());
/// guard.set_has_garbage();
/// assert!(guard.has_garbage());
/// ```
#[inline]
pub const fn set_has_garbage(&self) {
Collector::set_has_garbage(self.collector_ptr);
}
/// Signals to the [`Guard`] that it should try to advance to a new epoch when dropped.
///
/// # Examples
///
/// ```
/// use sdd::Guard;
///
/// let guard = Guard::new();
///
/// let epoch = guard.epoch();
/// guard.accelerate();
///
/// drop(guard);
///
/// assert_ne!(epoch, Guard::new().epoch());
/// ```
#[inline]
pub const fn accelerate(&self) {
Collector::accelerate(self.collector_ptr);
}
/// Executes the supplied closure at a later point in time.
///
/// The closure is guaranteed to execute after all [`Guard`] instances present when the method
/// was invoked have been dropped, though the exact timing is non-deterministic.
///
/// # Examples
///
/// ```
/// use sdd::Guard;
///
/// let guard = Guard::new();
/// guard.defer_execute(|| println!("deferred"));
/// ```
#[inline]
pub fn defer_execute<F: 'static + FnOnce()>(&self, f: F) {
Collector::collect(self.collector_ptr, DeferredClosure::alloc(f), null());
}
/// Collects a memory chunk into the thread-local garbage collector.
pub(super) fn collect(&self, ptr: *mut Link, shared_garbage_bag: *const SharedGarbageBag) {
Collector::collect(self.collector_ptr, ptr, shared_garbage_bag);
}
/// Purges all memory regions from the thread-local private garbage collector.
pub(super) fn purge(&self, shared_garbage_bag: *const SharedGarbageBag) {
Collector::purge(self.collector_ptr, shared_garbage_bag);
}
}
impl Default for Guard {
#[inline]
fn default() -> Self {
Self::new()
}
}
impl Drop for Guard {
#[inline]
fn drop(&mut self) {
Collector::end_guard(self.collector_ptr);
}
}
impl RefUnwindSafe for Guard {}
impl UnwindSafe for Guard {}