miden_utils_sync/racy_lock.rs
1use alloc::boxed::Box;
2use core::{
3 fmt,
4 ops::Deref,
5 ptr,
6 sync::atomic::{AtomicPtr, Ordering},
7};
8
9/// Thread-safe, non-blocking, lazily evaluated lock with the same interface
10/// as [`std::sync::LazyLock`].
11///
12/// Concurrent threads will race to set the value atomically, and memory allocated by losing threads
13/// will be dropped immediately after they fail to set the pointer.
14///
15/// The underlying implementation is based on `once_cell::race::OnceBox` which relies on
16/// [`core::sync::atomic::AtomicPtr`] to ensure that the data race results in a single successful
17/// write to the relevant pointer, namely the first write.
18/// See <https://github.com/matklad/once_cell/blob/v1.19.0/src/race.rs#L294>.
19///
20/// Performs lazy evaluation and can be used for statics.
21pub struct RacyLock<T, F = fn() -> T>
22where
23 F: Fn() -> T,
24{
25 inner: AtomicPtr<T>,
26 f: F,
27}
28
29impl<T, F> RacyLock<T, F>
30where
31 F: Fn() -> T,
32{
33 /// Creates a new lazy, racy value with the given initializing function.
34 pub const fn new(f: F) -> Self {
35 Self {
36 inner: AtomicPtr::new(ptr::null_mut()),
37 f,
38 }
39 }
40
41 /// Forces the evaluation of the locked value and returns a reference to
42 /// the result. This is equivalent to the [`Self::deref`].
43 ///
44 /// There is no blocking involved in this operation. Instead, concurrent
45 /// threads will race to set the underlying pointer. Memory allocated by
46 /// losing threads will be dropped immediately after they fail to set the pointer.
47 ///
48 /// This function's interface is designed around [`std::sync::LazyLock::force`] but
49 /// the implementation is derived from `once_cell::race::OnceBox::get_or_try_init`.
50 pub fn force(this: &RacyLock<T, F>) -> &T {
51 let mut ptr = this.inner.load(Ordering::Acquire);
52
53 // Pointer is not yet set, attempt to set it ourselves.
54 if ptr.is_null() {
55 // Execute the initialization function and allocate.
56 let val = (this.f)();
57 ptr = Box::into_raw(Box::new(val));
58
59 // Attempt atomic store.
60 let exchange = this.inner.compare_exchange(
61 ptr::null_mut(),
62 ptr,
63 Ordering::AcqRel,
64 Ordering::Acquire,
65 );
66
67 // Pointer already set, load.
68 if let Err(old) = exchange {
69 drop(unsafe { Box::from_raw(ptr) });
70 ptr = old;
71 }
72 }
73
74 unsafe { &*ptr }
75 }
76}
77
78impl<T: Default> Default for RacyLock<T> {
79 /// Creates a new lock that will evaluate the underlying value based on `T::default`.
80 #[inline]
81 fn default() -> RacyLock<T> {
82 RacyLock::new(T::default)
83 }
84}
85
86impl<T, F> fmt::Debug for RacyLock<T, F>
87where
88 T: fmt::Debug,
89 F: Fn() -> T,
90{
91 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
92 write!(f, "RacyLock({:?})", self.inner.load(Ordering::Relaxed))
93 }
94}
95
96impl<T, F> Deref for RacyLock<T, F>
97where
98 F: Fn() -> T,
99{
100 type Target = T;
101
102 /// Either sets or retrieves the value, and dereferences it.
103 ///
104 /// See [`Self::force`] for more details.
105 #[inline]
106 fn deref(&self) -> &T {
107 RacyLock::force(self)
108 }
109}
110
111impl<T, F> Drop for RacyLock<T, F>
112where
113 F: Fn() -> T,
114{
115 /// Drops the underlying pointer.
116 fn drop(&mut self) {
117 let ptr = *self.inner.get_mut();
118 if !ptr.is_null() {
119 // SAFETY: for any given value of `ptr`, we are guaranteed to have at most a single
120 // instance of `RacyLock` holding that value. Hence, synchronizing threads
121 // in `drop()` is not necessary, and we are guaranteed never to double-free.
122 // In short, since `RacyLock` doesn't implement `Clone`, the only scenario
123 // where there can be multiple instances of `RacyLock` across multiple threads
124 // referring to the same `ptr` value is when `RacyLock` is used in a static variable.
125 drop(unsafe { Box::from_raw(ptr) });
126 }
127 }
128}
129
130#[cfg(test)]
131mod tests {
132 use alloc::vec::Vec;
133
134 use super::*;
135
136 #[test]
137 fn deref_default() {
138 // Lock a copy type and validate default value.
139 let lock: RacyLock<i32> = RacyLock::default();
140 assert_eq!(*lock, 0);
141 }
142
143 #[test]
144 fn deref_copy() {
145 // Lock a copy type and validate value.
146 let lock = RacyLock::new(|| 42);
147 assert_eq!(*lock, 42);
148 }
149
150 #[test]
151 fn deref_clone() {
152 // Lock a no copy type.
153 let lock = RacyLock::new(|| Vec::from([1, 2, 3]));
154
155 // Use the value so that the compiler forces us to clone.
156 let mut v = lock.clone();
157 v.push(4);
158
159 // Validate the value.
160 assert_eq!(v, Vec::from([1, 2, 3, 4]));
161 }
162
163 #[test]
164 fn deref_static() {
165 // Create a static lock.
166 static VEC: RacyLock<Vec<i32>> = RacyLock::new(|| Vec::from([1, 2, 3]));
167
168 // Validate that the address of the value does not change.
169 let addr = &*VEC as *const Vec<i32>;
170 for _ in 0..5 {
171 assert_eq!(*VEC, [1, 2, 3]);
172 assert_eq!(addr, &(*VEC) as *const Vec<i32>)
173 }
174 }
175
176 #[test]
177 fn type_inference() {
178 // Check that we can infer `T` from closure's type.
179 let _ = RacyLock::new(|| ());
180 }
181
182 #[test]
183 fn is_sync_send() {
184 fn assert_traits<T: Send + Sync>() {}
185 assert_traits::<RacyLock<Vec<i32>>>();
186 }
187}