pflock/
lib.rs

1//! This library provides a phase-fair reader-writer lock, as described in the
2//! paper ["Reader-Writer Synchronization for Shared-Memory Multiprocessor
3//! Real-Time Systems"](https://www.cs.unc.edu/~anderson/papers/ecrts09b.pdf)
4//! by Brandenburg et. al.
5//!
6//! > Reader preference, writer preference, and task-fair reader-writer locks are
7//! > shown to cause undue blocking in multiprocessor real-time systems. A new
8//! > phase-fair reader-writer lock is proposed as an alternative that
9//! > significantly reduces worst-case blocking for readers.
10//!
11//! # Example
12//!
13//! ```
14//! use pflock::PFLock;
15//!
16//! let lock = PFLock::new(5);
17//!
18//! // many reader locks can be held at once
19//! {
20//!     let r1 = lock.read();
21//!     let r2 = lock.read();
22//!     assert_eq!(*r1, 5);
23//!     assert_eq!(*r2, 5);
24//! } // read locks are dropped at this point
25//!
26//! // only one write lock may be held, however
27//! {
28//!     let mut w = lock.write();
29//!     *w += 1;
30//!     assert_eq!(*w, 6);
31//! } // write lock is dropped here
32//! ```
33//!
34//! # Spin vs. suspend
35//!
36//! `PFLock` is a spinlock specifically targeted at **short critical sections** and
37//! does not suspend threads while blocking. Section 3 of the paper addresses this:
38//!
39//! > The terms “short” and “long” arise because (intuitively) spinning is
40//! > appropriate only for short critical sections, since spinning wastes processor
41//! > time. However, two recent studies have shown that, in terms of
42//! > schedulability, spinning is usually preferable to suspending when overheads
43//! > are considered [11, 15]. Based on these trends (and due to space
44//! > constraints), we restrict our focus to short resources in this paper and
45//! > delegate RW synchronization of long resources to future work.
46
47#![no_std]
48
49use core::hint::spin_loop;
50use core::sync::atomic::{AtomicUsize, Ordering};
51use lock_api::{GuardSend, RawRwLock, RwLock};
52
53pub struct RawPFLock {
54    rin: AtomicUsize,
55    rout: AtomicUsize,
56    win: AtomicUsize,
57    wout: AtomicUsize,
58}
59
60const RINC: usize = 0x100; // reader increment
61const WBITS: usize = 0x3; // writer bits in rin
62const PRES: usize = 0x2; // writer present bit
63const PHID: usize = 0x1; // phase ID bit
64
65const ZERO_MASK: usize = !255usize;
66
67unsafe impl RawRwLock for RawPFLock {
68    const INIT: RawPFLock = RawPFLock {
69        rin: AtomicUsize::new(0),
70        rout: AtomicUsize::new(0),
71        win: AtomicUsize::new(0),
72        wout: AtomicUsize::new(0),
73    };
74
75    type GuardMarker = GuardSend;
76
77    fn lock_shared(&self) {
78        // Increment the rin count and read the writer bits
79        let w = self.rin.fetch_add(RINC, Ordering::Relaxed) & WBITS;
80
81        // Spin (wait) if there is a writer present (w != 0), until either PRES
82        // and/or PHID flips
83        while (w != 0) && (w == (self.rin.load(Ordering::Relaxed) & WBITS)) {
84            spin_loop();
85        }
86    }
87
88    unsafe fn unlock_shared(&self) {
89        // Increment rout to mark the read-lock returned
90        self.rout.fetch_add(RINC, Ordering::Relaxed);
91    }
92
93    fn try_lock_shared(&self) -> bool {
94        let w = self.rin.fetch_add(RINC, Ordering::Relaxed) & WBITS;
95
96        if w == 0 || w != (self.rin.load(Ordering::Relaxed) & WBITS) {
97            true
98        } else {
99            self.rout.fetch_add(RINC, Ordering::Relaxed);
100            false
101        }
102    }
103
104    fn lock_exclusive(&self) {
105        // Wait until it is my turn to write-lock the resource
106        let wticket = self.win.fetch_add(1, Ordering::Relaxed);
107        while wticket != self.wout.load(Ordering::Relaxed) {
108            spin_loop();
109        }
110
111        // Set the write-bits of rin to indicate this writer is here
112        let w = PRES | (wticket & PHID);
113        let rticket = self.rin.fetch_add(w, Ordering::Relaxed);
114
115        // Wait until all current readers have finished (i.e. rout catches up)
116        while rticket != self.rout.load(Ordering::Relaxed) {
117            spin_loop();
118        }
119    }
120
121    unsafe fn unlock_exclusive(&self) {
122        // Clear the least-significant byte of rin
123        self.rin.fetch_and(ZERO_MASK, Ordering::Relaxed);
124
125        // Increment wout to indicate this write has released the lock
126        // Only one writer should ever be here
127        self.wout.fetch_add(1, Ordering::Relaxed);
128    }
129
130    fn try_lock_exclusive(&self) -> bool {
131        let wticket = self.win.fetch_add(1, Ordering::Relaxed);
132        if wticket != self.wout.load(Ordering::Relaxed) {
133            self.wout.fetch_add(1, Ordering::Relaxed);
134            return false;
135        }
136        let w = PRES | (wticket & PHID);
137        let rticket = self.rin.fetch_add(w, Ordering::Relaxed);
138
139        if rticket != self.rout.load(Ordering::Relaxed) {
140            self.rin.fetch_and(ZERO_MASK, Ordering::Relaxed);
141            self.wout.fetch_add(1, Ordering::Relaxed);
142            return false;
143        }
144
145        true
146    }
147}
148
149/// A phase-fair reader-writer lock.
150pub type PFLock<T> = RwLock<RawPFLock, T>;
151pub type PFLockGuard<'a, T> = lock_api::MutexGuard<'a, RawPFLock, T>;