1use parking_lot::{Condvar, Mutex};
2use std::sync::{atomic::AtomicUsize, atomic::Ordering, Arc};
3pub struct Barrier {
4 active: Mutex<usize>,
5 done: Condvar,
6}
7
8impl Barrier {
9 pub fn new() -> Barrier {
10 Barrier {
11 active: Mutex::new(0),
12 done: Condvar::new(),
13 }
14 }
15
16 pub fn guard(&self, safepoint_id: usize) {
17 let mut active = self.active.lock();
18 assert_eq!(*active, 0);
19 assert_ne!(safepoint_id, 0);
20 *active = safepoint_id;
21 }
22
23 pub fn resume(&self, safepoint_id: usize) {
24 let mut active = self.active.lock();
25 assert_eq!(*active, safepoint_id);
26 assert_ne!(safepoint_id, 0);
27 *active = 0;
28 self.done.notify_all();
29 }
30
31 pub fn wait(&self, safepoint_id: usize) {
32 let mut active = self.active.lock();
33 assert_ne!(safepoint_id, 0);
34
35 while *active == safepoint_id {
36 self.done.wait(&mut active);
37 }
38 }
39}
40
41pub struct StateManager {
42 mtx: Mutex<(ThreadState, usize)>,
43}
44
45impl StateManager {
46 fn new() -> StateManager {
47 StateManager {
48 mtx: Mutex::new((ThreadState::Running, 0)),
49 }
50 }
51
52 fn state(&self) -> ThreadState {
53 let mtx = self.mtx.lock();
54 mtx.0
55 }
56
57 fn park(&self) {
58 let mut mtx = self.mtx.lock();
59 assert!(mtx.0.is_running());
60 mtx.0 = ThreadState::Parked;
61 }
62
63 fn unpark(&self) {
64 let mut mtx = self.mtx.lock();
65 assert!(mtx.0.is_parked());
66 mtx.0 = ThreadState::Running;
67 }
68
69 fn block(&self, safepoint_id: usize) {
70 let mut mtx = self.mtx.lock();
71 assert!(mtx.0.is_running());
72 mtx.0 = ThreadState::Blocked;
73 mtx.1 = safepoint_id;
74 }
75
76 fn unblock(&self) {
77 let mut mtx = self.mtx.lock();
78 assert!(mtx.0.is_blocked());
79 mtx.0 = ThreadState::Running;
80 mtx.1 = 0;
81 }
82
83 fn in_safepoint(&self, safepoint_id: usize) -> bool {
84 assert_ne!(safepoint_id, 0);
85 let mtx = self.mtx.lock();
86
87 match mtx.0 {
88 ThreadState::Running => false,
89 ThreadState::Blocked => mtx.1 == safepoint_id,
90 ThreadState::Parked => true,
91 }
92 }
93}
94
95#[derive(Copy, Clone, PartialEq, Eq, Debug)]
96pub enum ThreadState {
97 Running = 0,
98 Parked = 1,
99 Blocked = 2,
100}
101
102impl From<usize> for ThreadState {
103 fn from(value: usize) -> ThreadState {
104 match value {
105 0 => ThreadState::Running,
106 1 => ThreadState::Parked,
107 2 => ThreadState::Blocked,
108 _ => unreachable!(),
109 }
110 }
111}
112
113impl ThreadState {
114 pub fn is_running(&self) -> bool {
115 match *self {
116 ThreadState::Running => true,
117 _ => false,
118 }
119 }
120
121 pub fn is_parked(&self) -> bool {
122 match *self {
123 ThreadState::Parked => true,
124 _ => false,
125 }
126 }
127
128 pub fn is_blocked(&self) -> bool {
129 match *self {
130 ThreadState::Blocked => true,
131 _ => false,
132 }
133 }
134
135 pub fn to_usize(&self) -> usize {
136 *self as usize
137 }
138}
139
140impl Default for ThreadState {
141 fn default() -> ThreadState {
142 ThreadState::Running
143 }
144}
145
146pub struct MutatorThread {
147 pub state: StateManager,
148 pub rootset: std::cell::RefCell<Vec<*mut dyn super::api::RootedTrait>>,
149}
150
151impl MutatorThread {
152 pub fn new() -> Self {
153 Self {
154 state: StateManager::new(),
155 rootset: std::cell::RefCell::new(vec![]),
156 }
157 }
158 pub fn state(&self) -> ThreadState {
159 self.state.state()
160 }
161
162 pub fn park(&self) {
163 self.state.park();
164 }
165
166 pub fn unpark(&self) {
167 if super::heap::HEAP.threads.safepoint_id() != 0 {
168 crate::safepoint::block(self);
169 }
170
171 self.state.unpark();
172 }
173
174 pub fn block(&self, safepoint_id: usize) {
175 self.state.block(safepoint_id);
176 }
177
178 pub fn unblock(&self) {
179 self.state.unblock();
180 }
181
182 pub fn in_safepoint(&self, safepoint_id: usize) -> bool {
183 self.state.in_safepoint(safepoint_id)
184 }
185}
186
187unsafe impl Send for MutatorThread {}
188unsafe impl Sync for MutatorThread {}
189pub struct Threads {
190 pub threads: Mutex<Vec<Arc<MutatorThread>>>,
191 pub cond_join: Condvar,
192
193 pub next_id: AtomicUsize,
194 pub safepoint: Mutex<(usize, usize)>,
195
196 pub barrier: Barrier,
197}
198
199impl Threads {
200 pub fn new() -> Threads {
201 Threads {
202 threads: Mutex::new(Vec::new()),
203 cond_join: Condvar::new(),
204 next_id: AtomicUsize::new(1),
205 safepoint: Mutex::new((0, 1)),
206 barrier: Barrier::new(),
207 }
208 }
209
210 pub fn attach_current_thread(&self) {
211 THREAD.with(|thread| {
212 let mut threads = self.threads.lock();
213 threads.push(thread.borrow().clone());
214 });
215 }
216
217 pub fn attach_thread(&self, thread: Arc<MutatorThread>) {
218 let mut threads = self.threads.lock();
219 threads.push(thread);
220 }
221
222 pub fn next_id(&self) -> usize {
223 self.next_id.fetch_add(1, Ordering::SeqCst)
224 }
225
226 pub fn safepoint_id(&self) -> usize {
227 let safepoint = self.safepoint.lock();
228 safepoint.0
229 }
230
231 pub fn safepoint_requested(&self) -> bool {
232 let safepoint = self.safepoint.lock();
233 safepoint.0 != 0
234 }
235
236 pub fn request_safepoint(&self) -> usize {
237 let mut safepoint = self.safepoint.lock();
238 assert_eq!(safepoint.0, 0);
239 safepoint.0 = safepoint.1;
240 safepoint.1 += 1;
241
242 safepoint.0
243 }
244
245 pub fn clear_safepoint_request(&self) {
246 let mut safepoint = self.safepoint.lock();
247 assert_ne!(safepoint.0, 0);
248 safepoint.0 = 0;
249 }
250
251 pub fn detach_current_thread(&self) {
252 THREAD.with(|thread| {
253 thread.borrow().park();
254 let mut threads = self.threads.lock();
255 threads.retain(|elem| !Arc::ptr_eq(elem, &*thread.borrow()));
256 self.cond_join.notify_all();
257 });
258 }
259
260 pub fn join_all(&self) {
261 let mut threads = self.threads.lock();
262
263 while threads.len() > 0 {
264 self.cond_join.wait(&mut threads);
265 }
266 }
267
268 pub fn each<F>(&self, mut f: F)
269 where
270 F: FnMut(&Arc<MutatorThread>),
271 {
272 let threads = self.threads.lock();
273
274 for thread in threads.iter() {
275 f(thread)
276 }
277 }
278}
279
280thread_local! {
281 pub static THREAD: std::cell::RefCell<Arc<MutatorThread>> = std::cell::RefCell::new(Arc::new(MutatorThread::new()));
282}
283
284pub extern "C" fn attach_current_thread() {
285 crate::heap::HEAP.threads.attach_current_thread();
286}
287
288pub extern "C" fn detach_current_thread() {
289 crate::heap::HEAP.threads.attach_current_thread();
290}
291use crate::api::*;
292pub fn mt_alloc<T: Trace + Sized + 'static>(value: T, finalize: bool) -> Rooted<T> {
296 let mem = crate::heap::HEAP.allocate(value, finalize);
297 let rooted = Box::into_raw(Box::new(RootedInner {
298 rooted: true,
299 inner: mem,
300 }));
301
302 THREAD.with(|th| th.borrow().rootset.borrow_mut().push(rooted));
303
304 Rooted { inner: rooted }
305}
306
307pub fn mt_root<T: Trace + 'static + Sized>(handle: Handle<T>) -> Rooted<T> {
308 let rooted = Box::into_raw(Box::new(RootedInner {
309 rooted: true,
310 inner: handle.inner,
311 }));
312 THREAD.with(|th| th.borrow().rootset.borrow_mut().push(rooted));
313 Rooted { inner: rooted }
314}