miden_utils_sync/
rw_lock.rs

1#[cfg(not(loom))]
2use core::{
3    hint,
4    sync::atomic::{AtomicUsize, Ordering},
5};
6
7use lock_api::RawRwLock;
8#[cfg(loom)]
9use loom::{
10    hint,
11    sync::atomic::{AtomicUsize, Ordering},
12};
13
14/// An implementation of a reader-writer lock, based on a spinlock primitive, no-std compatible
15///
16/// See [lock_api::RwLock] for usage.
17pub type RwLock<T> = lock_api::RwLock<Spinlock, T>;
18
19/// See [lock_api::RwLockReadGuard] for usage.
20pub type RwLockReadGuard<'a, T> = lock_api::RwLockReadGuard<'a, Spinlock, T>;
21
22/// See [lock_api::RwLockWriteGuard] for usage.
23pub type RwLockWriteGuard<'a, T> = lock_api::RwLockWriteGuard<'a, Spinlock, T>;
24
25/// The underlying raw reader-writer primitive that implements [lock_api::RawRwLock]
26///
27/// This is fundamentally a spinlock, in that blocking operations on the lock will spin until
28/// they succeed in acquiring/releasing the lock.
29///
30/// To achieve the ability to share the underlying data with multiple readers, or hold
31/// exclusive access for one writer, the lock state is based on a "locked" count, where shared
32/// access increments the count by an even number, and acquiring exclusive access relies on the
33/// use of the lowest order bit to stop further shared acquisition, and indicate that the lock
34/// is exclusively held (the difference between the two is irrelevant from the perspective of
35/// a thread attempting to acquire the lock, but internally the state uses `usize::MAX` as the
36/// "exclusively locked" sentinel).
37///
38/// This mechanism gets us the following:
39///
40/// * Whether the lock has been acquired (shared or exclusive)
41/// * Whether the lock is being exclusively acquired
42/// * How many times the lock has been acquired
43/// * Whether the acquisition(s) are exclusive or shared
44///
45/// Further implementation details, such as how we manage draining readers once an attempt to
46/// exclusively acquire the lock occurs, are described below.
47///
48/// NOTE: This is a simple implementation, meant for use in no-std environments; there are much
49/// more robust/performant implementations available when OS primitives can be used.
50pub struct Spinlock {
51    /// The state of the lock, primarily representing the acquisition count, but relying on
52    /// the distinction between even and odd values to indicate whether or not exclusive access
53    /// is being acquired.
54    state: AtomicUsize,
55    /// A counter used to wake a parked writer once the last shared lock is released during
56    /// acquisition of an exclusive lock. The actual count is not acutally important, and
57    /// simply wraps around on overflow, but what is important is that when the value changes,
58    /// the writer will wake and resume attempting to acquire the exclusive lock.
59    writer_wake_counter: AtomicUsize,
60}
61
62impl Default for Spinlock {
63    #[inline(always)]
64    fn default() -> Self {
65        Self::new()
66    }
67}
68
69impl Spinlock {
70    #[cfg(not(loom))]
71    pub const fn new() -> Self {
72        Self {
73            state: AtomicUsize::new(0),
74            writer_wake_counter: AtomicUsize::new(0),
75        }
76    }
77
78    #[cfg(loom)]
79    pub fn new() -> Self {
80        Self {
81            state: AtomicUsize::new(0),
82            writer_wake_counter: AtomicUsize::new(0),
83        }
84    }
85}
86
87unsafe impl RawRwLock for Spinlock {
88    #[cfg(loom)]
89    const INIT: Spinlock = unimplemented!();
90
91    #[cfg(not(loom))]
92    // This is intentional on the part of the [RawRwLock] API, basically a hack to provide
93    // initial values as static items.
94    #[allow(clippy::declare_interior_mutable_const)]
95    const INIT: Spinlock = Spinlock::new();
96
97    type GuardMarker = lock_api::GuardSend;
98
99    /// The operation invoked when calling `RwLock::read`, blocks the caller until acquired
100    fn lock_shared(&self) {
101        let mut s = self.state.load(Ordering::Relaxed);
102        loop {
103            // If the exclusive bit is unset, attempt to acquire a read lock
104            if s & 1 == 0 {
105                match self.state.compare_exchange_weak(
106                    s,
107                    s + 2,
108                    Ordering::Acquire,
109                    Ordering::Relaxed,
110                ) {
111                    Ok(_) => return,
112                    // Someone else beat us to the punch and acquired a lock
113                    Err(e) => s = e,
114                }
115            }
116            // If an exclusive lock is held/being acquired, loop until the lock state changes
117            // at which point, try to acquire the lock again
118            if s & 1 == 1 {
119                loop {
120                    let next = self.state.load(Ordering::Relaxed);
121                    if s == next {
122                        hint::spin_loop();
123                        continue;
124                    } else {
125                        s = next;
126                        break;
127                    }
128                }
129            }
130        }
131    }
132
133    /// The operation invoked when calling `RwLock::try_read`, returns whether or not the
134    /// lock was acquired
135    fn try_lock_shared(&self) -> bool {
136        let s = self.state.load(Ordering::Relaxed);
137        if s & 1 == 0 {
138            self.state
139                .compare_exchange_weak(s, s + 2, Ordering::Acquire, Ordering::Relaxed)
140                .is_ok()
141        } else {
142            false
143        }
144    }
145
146    /// The operation invoked when dropping a `RwLockReadGuard`
147    unsafe fn unlock_shared(&self) {
148        if self.state.fetch_sub(2, Ordering::Release) == 3 {
149            // The lock is being exclusively acquired, and we're the last shared acquisition
150            // to be released, so wake the writer by incrementing the wake counter
151            self.writer_wake_counter.fetch_add(1, Ordering::Release);
152        }
153    }
154
155    /// The operation invoked when calling `RwLock::write`, blocks the caller until acquired
156    fn lock_exclusive(&self) {
157        let mut s = self.state.load(Ordering::Relaxed);
158        loop {
159            // Attempt to acquire the lock immediately, or complete acquistion of the lock
160            // if we're continuing the loop after acquiring the exclusive bit. If another
161            // thread acquired it first, we race to be the first thread to acquire it once
162            // released, by busy looping here.
163            if s <= 1 {
164                match self.state.compare_exchange(
165                    s,
166                    usize::MAX,
167                    Ordering::Acquire,
168                    Ordering::Relaxed,
169                ) {
170                    Ok(_) => return,
171                    Err(e) => {
172                        s = e;
173                        hint::spin_loop();
174                        continue;
175                    },
176                }
177            }
178
179            // Only shared locks have been acquired, attempt to acquire the exclusive bit,
180            // which will prevent further shared locks from being acquired. It does not
181            // in and of itself grant us exclusive access however.
182            if s & 1 == 0
183                && let Err(e) =
184                    self.state.compare_exchange(s, s + 1, Ordering::Relaxed, Ordering::Relaxed)
185            {
186                // The lock state has changed before we could acquire the exclusive bit,
187                // update our view of the lock state and try again
188                s = e;
189                continue;
190            }
191
192            // We've acquired the exclusive bit, now we need to busy wait until all shared
193            // acquisitions are released.
194            let w = self.writer_wake_counter.load(Ordering::Acquire);
195            s = self.state.load(Ordering::Relaxed);
196
197            // "Park" the thread here (by busy looping), until the release of the last shared
198            // lock, which is communicated to us by it incrementing the wake counter.
199            if s >= 2 {
200                while self.writer_wake_counter.load(Ordering::Acquire) == w {
201                    hint::spin_loop();
202                }
203                s = self.state.load(Ordering::Relaxed);
204            }
205
206            // All shared locks have been released, go back to the top and try to complete
207            // acquisition of exclusive access.
208        }
209    }
210
211    /// The operation invoked when calling `RwLock::try_write`, returns whether or not the
212    /// lock was acquired
213    fn try_lock_exclusive(&self) -> bool {
214        let s = self.state.load(Ordering::Relaxed);
215        if s <= 1 {
216            self.state
217                .compare_exchange(s, usize::MAX, Ordering::Acquire, Ordering::Relaxed)
218                .is_ok()
219        } else {
220            false
221        }
222    }
223
224    /// The operation invoked when dropping a `RwLockWriteGuard`
225    unsafe fn unlock_exclusive(&self) {
226        // Infallible, as we hold an exclusive lock
227        //
228        // Note the use of `Release` ordering here, which ensures any loads of the lock state
229        // by other threads, are ordered after this store.
230        self.state.store(0, Ordering::Release);
231        // This fetch_add isn't important for signaling purposes, however it serves a key
232        // purpose, in that it imposes a memory ordering on any loads of this field that
233        // have an `Acquire` ordering, i.e. they will read the value stored here. Without
234        // a `Release` store, loads/stores of this field could be reordered relative to
235        // each other.
236        self.writer_wake_counter.fetch_add(1, Ordering::Release);
237    }
238}
239
240#[cfg(all(loom, test))]
241mod test {
242    use alloc::vec::Vec;
243
244    use loom::{model::Builder, sync::Arc};
245
246    use super::{RwLock, Spinlock};
247
248    #[test]
249    fn test_rwlock_loom() {
250        let mut builder = Builder::default();
251        builder.max_duration = Some(std::time::Duration::from_secs(60));
252        builder.log = true;
253        builder.check(|| {
254            let raw_rwlock = Spinlock::new();
255            let n = Arc::new(RwLock::from_raw(raw_rwlock, 0usize));
256            let mut readers = Vec::new();
257            let mut writers = Vec::new();
258
259            let num_readers = 2;
260            let num_writers = 2;
261            let num_iterations = 2;
262
263            // Readers should never observe a non-zero value
264            for _ in 0..num_readers {
265                let n0 = n.clone();
266                let t = loom::thread::spawn(move || {
267                    for _ in 0..num_iterations {
268                        let guard = n0.read();
269                        assert_eq!(*guard, 0);
270                    }
271                });
272
273                readers.push(t);
274            }
275
276            // Writers should never observe a non-zero value once they've
277            // acquired the lock, and should never observe a value > 1
278            // while holding the lock
279            for _ in 0..num_writers {
280                let n0 = n.clone();
281                let t = loom::thread::spawn(move || {
282                    for _ in 0..num_iterations {
283                        let mut guard = n0.write();
284                        assert_eq!(*guard, 0);
285                        *guard += 1;
286                        assert_eq!(*guard, 1);
287                        *guard -= 1;
288                        assert_eq!(*guard, 0);
289                    }
290                });
291
292                writers.push(t);
293            }
294
295            for t in readers {
296                t.join().unwrap();
297            }
298
299            for t in writers {
300                t.join().unwrap();
301            }
302        })
303    }
304}