miden_utils_sync/rw_lock.rs
1#[cfg(not(loom))]
2use core::{
3 hint,
4 sync::atomic::{AtomicUsize, Ordering},
5};
6
7use lock_api::RawRwLock;
8#[cfg(loom)]
9use loom::{
10 hint,
11 sync::atomic::{AtomicUsize, Ordering},
12};
13
14/// An implementation of a reader-writer lock, based on a spinlock primitive, no-std compatible
15///
16/// See [lock_api::RwLock] for usage.
17pub type RwLock<T> = lock_api::RwLock<Spinlock, T>;
18
19/// See [lock_api::RwLockReadGuard] for usage.
20pub type RwLockReadGuard<'a, T> = lock_api::RwLockReadGuard<'a, Spinlock, T>;
21
22/// See [lock_api::RwLockWriteGuard] for usage.
23pub type RwLockWriteGuard<'a, T> = lock_api::RwLockWriteGuard<'a, Spinlock, T>;
24
25/// The underlying raw reader-writer primitive that implements [lock_api::RawRwLock]
26///
27/// This is fundamentally a spinlock, in that blocking operations on the lock will spin until
28/// they succeed in acquiring/releasing the lock.
29///
30/// To achieve the ability to share the underlying data with multiple readers, or hold
31/// exclusive access for one writer, the lock state is based on a "locked" count, where shared
32/// access increments the count by an even number, and acquiring exclusive access relies on the
33/// use of the lowest order bit to stop further shared acquisition, and indicate that the lock
34/// is exclusively held (the difference between the two is irrelevant from the perspective of
35/// a thread attempting to acquire the lock, but internally the state uses `usize::MAX` as the
36/// "exclusively locked" sentinel).
37///
38/// This mechanism gets us the following:
39///
40/// * Whether the lock has been acquired (shared or exclusive)
41/// * Whether the lock is being exclusively acquired
42/// * How many times the lock has been acquired
43/// * Whether the acquisition(s) are exclusive or shared
44///
45/// Further implementation details, such as how we manage draining readers once an attempt to
46/// exclusively acquire the lock occurs, are described below.
47///
48/// NOTE: This is a simple implementation, meant for use in no-std environments; there are much
49/// more robust/performant implementations available when OS primitives can be used.
50pub struct Spinlock {
51 /// The state of the lock, primarily representing the acquisition count, but relying on
52 /// the distinction between even and odd values to indicate whether or not exclusive access
53 /// is being acquired.
54 state: AtomicUsize,
55 /// A counter used to wake a parked writer once the last shared lock is released during
56 /// acquisition of an exclusive lock. The actual count is not acutally important, and
57 /// simply wraps around on overflow, but what is important is that when the value changes,
58 /// the writer will wake and resume attempting to acquire the exclusive lock.
59 writer_wake_counter: AtomicUsize,
60}
61
62impl Default for Spinlock {
63 #[inline(always)]
64 fn default() -> Self {
65 Self::new()
66 }
67}
68
69impl Spinlock {
70 #[cfg(not(loom))]
71 pub const fn new() -> Self {
72 Self {
73 state: AtomicUsize::new(0),
74 writer_wake_counter: AtomicUsize::new(0),
75 }
76 }
77
78 #[cfg(loom)]
79 pub fn new() -> Self {
80 Self {
81 state: AtomicUsize::new(0),
82 writer_wake_counter: AtomicUsize::new(0),
83 }
84 }
85}
86
87unsafe impl RawRwLock for Spinlock {
88 #[cfg(loom)]
89 const INIT: Spinlock = unimplemented!();
90
91 #[cfg(not(loom))]
92 // This is intentional on the part of the [RawRwLock] API, basically a hack to provide
93 // initial values as static items.
94 #[allow(clippy::declare_interior_mutable_const)]
95 const INIT: Spinlock = Spinlock::new();
96
97 type GuardMarker = lock_api::GuardSend;
98
99 /// The operation invoked when calling `RwLock::read`, blocks the caller until acquired
100 fn lock_shared(&self) {
101 let mut s = self.state.load(Ordering::Relaxed);
102 loop {
103 // If the exclusive bit is unset, attempt to acquire a read lock
104 if s & 1 == 0 {
105 match self.state.compare_exchange_weak(
106 s,
107 s + 2,
108 Ordering::Acquire,
109 Ordering::Relaxed,
110 ) {
111 Ok(_) => return,
112 // Someone else beat us to the punch and acquired a lock
113 Err(e) => s = e,
114 }
115 }
116 // If an exclusive lock is held/being acquired, loop until the lock state changes
117 // at which point, try to acquire the lock again
118 if s & 1 == 1 {
119 loop {
120 let next = self.state.load(Ordering::Relaxed);
121 if s == next {
122 hint::spin_loop();
123 continue;
124 } else {
125 s = next;
126 break;
127 }
128 }
129 }
130 }
131 }
132
133 /// The operation invoked when calling `RwLock::try_read`, returns whether or not the
134 /// lock was acquired
135 fn try_lock_shared(&self) -> bool {
136 let s = self.state.load(Ordering::Relaxed);
137 if s & 1 == 0 {
138 self.state
139 .compare_exchange_weak(s, s + 2, Ordering::Acquire, Ordering::Relaxed)
140 .is_ok()
141 } else {
142 false
143 }
144 }
145
146 /// The operation invoked when dropping a `RwLockReadGuard`
147 unsafe fn unlock_shared(&self) {
148 if self.state.fetch_sub(2, Ordering::Release) == 3 {
149 // The lock is being exclusively acquired, and we're the last shared acquisition
150 // to be released, so wake the writer by incrementing the wake counter
151 self.writer_wake_counter.fetch_add(1, Ordering::Release);
152 }
153 }
154
155 /// The operation invoked when calling `RwLock::write`, blocks the caller until acquired
156 fn lock_exclusive(&self) {
157 let mut s = self.state.load(Ordering::Relaxed);
158 loop {
159 // Attempt to acquire the lock immediately, or complete acquistion of the lock
160 // if we're continuing the loop after acquiring the exclusive bit. If another
161 // thread acquired it first, we race to be the first thread to acquire it once
162 // released, by busy looping here.
163 if s <= 1 {
164 match self.state.compare_exchange(
165 s,
166 usize::MAX,
167 Ordering::Acquire,
168 Ordering::Relaxed,
169 ) {
170 Ok(_) => return,
171 Err(e) => {
172 s = e;
173 hint::spin_loop();
174 continue;
175 },
176 }
177 }
178
179 // Only shared locks have been acquired, attempt to acquire the exclusive bit,
180 // which will prevent further shared locks from being acquired. It does not
181 // in and of itself grant us exclusive access however.
182 if s & 1 == 0 {
183 if let Err(e) =
184 self.state.compare_exchange(s, s + 1, Ordering::Relaxed, Ordering::Relaxed)
185 {
186 // The lock state has changed before we could acquire the exclusive bit,
187 // update our view of the lock state and try again
188 s = e;
189 continue;
190 }
191 }
192
193 // We've acquired the exclusive bit, now we need to busy wait until all shared
194 // acquisitions are released.
195 let w = self.writer_wake_counter.load(Ordering::Acquire);
196 s = self.state.load(Ordering::Relaxed);
197
198 // "Park" the thread here (by busy looping), until the release of the last shared
199 // lock, which is communicated to us by it incrementing the wake counter.
200 if s >= 2 {
201 while self.writer_wake_counter.load(Ordering::Acquire) == w {
202 hint::spin_loop();
203 }
204 s = self.state.load(Ordering::Relaxed);
205 }
206
207 // All shared locks have been released, go back to the top and try to complete
208 // acquisition of exclusive access.
209 }
210 }
211
212 /// The operation invoked when calling `RwLock::try_write`, returns whether or not the
213 /// lock was acquired
214 fn try_lock_exclusive(&self) -> bool {
215 let s = self.state.load(Ordering::Relaxed);
216 if s <= 1 {
217 self.state
218 .compare_exchange(s, usize::MAX, Ordering::Acquire, Ordering::Relaxed)
219 .is_ok()
220 } else {
221 false
222 }
223 }
224
225 /// The operation invoked when dropping a `RwLockWriteGuard`
226 unsafe fn unlock_exclusive(&self) {
227 // Infallible, as we hold an exclusive lock
228 //
229 // Note the use of `Release` ordering here, which ensures any loads of the lock state
230 // by other threads, are ordered after this store.
231 self.state.store(0, Ordering::Release);
232 // This fetch_add isn't important for signaling purposes, however it serves a key
233 // purpose, in that it imposes a memory ordering on any loads of this field that
234 // have an `Acquire` ordering, i.e. they will read the value stored here. Without
235 // a `Release` store, loads/stores of this field could be reordered relative to
236 // each other.
237 self.writer_wake_counter.fetch_add(1, Ordering::Release);
238 }
239}
240
241#[cfg(all(loom, test))]
242mod test {
243 use alloc::vec::Vec;
244
245 use loom::{model::Builder, sync::Arc};
246
247 use super::{RwLock, Spinlock};
248
249 #[test]
250 fn test_rwlock_loom() {
251 let mut builder = Builder::default();
252 builder.max_duration = Some(std::time::Duration::from_secs(60));
253 builder.log = true;
254 builder.check(|| {
255 let raw_rwlock = Spinlock::new();
256 let n = Arc::new(RwLock::from_raw(raw_rwlock, 0usize));
257 let mut readers = Vec::new();
258 let mut writers = Vec::new();
259
260 let num_readers = 2;
261 let num_writers = 2;
262 let num_iterations = 2;
263
264 // Readers should never observe a non-zero value
265 for _ in 0..num_readers {
266 let n0 = n.clone();
267 let t = loom::thread::spawn(move || {
268 for _ in 0..num_iterations {
269 let guard = n0.read();
270 assert_eq!(*guard, 0);
271 }
272 });
273
274 readers.push(t);
275 }
276
277 // Writers should never observe a non-zero value once they've
278 // acquired the lock, and should never observe a value > 1
279 // while holding the lock
280 for _ in 0..num_writers {
281 let n0 = n.clone();
282 let t = loom::thread::spawn(move || {
283 for _ in 0..num_iterations {
284 let mut guard = n0.write();
285 assert_eq!(*guard, 0);
286 *guard += 1;
287 assert_eq!(*guard, 1);
288 *guard -= 1;
289 assert_eq!(*guard, 0);
290 }
291 });
292
293 writers.push(t);
294 }
295
296 for t in readers {
297 t.join().unwrap();
298 }
299
300 for t in writers {
301 t.join().unwrap();
302 }
303 })
304 }
305}