Skip to main content

os_dev_toolkit/
sync.rs

1//! Minimal synchronization primitives for `no_std` kernels.
2//!
3//! These primitives are intentionally simple and are meant as building blocks.
4//! They are *not* a full-featured synchronization library.
5//!
6//! ## Important caveats
7//!
8//! - **No fairness guarantees**: spin locks may starve.
9//! - **Not preemption-safe by themselves**: if your kernel is preemptive or can be interrupted,
10//!   you may need to disable interrupts or otherwise ensure lock-holding sections are safe.
11//! - **Single-core vs multi-core**: on SMP systems, these use atomics and are safe provided your
12//!   platform's memory model matches Rust's atomic semantics.
13
14use core::cell::UnsafeCell;
15use core::mem::MaybeUninit;
16use core::ops::{Deref, DerefMut};
17use core::sync::atomic::{AtomicBool, AtomicU8, Ordering};
18
19/// A simple spin lock.
20///
21/// The lock is acquired via a compare-exchange loop and released on guard drop.
22///
23/// ## Safety
24///
25/// This type is `Send`/`Sync` when `T: Send` and relies on correct atomic behavior.
26/// As with any spin lock, make sure lock hold times are short.
27pub struct SpinLock<T> {
28    locked: AtomicBool,
29    value: UnsafeCell<T>,
30}
31
32unsafe impl<T: Send> Send for SpinLock<T> {}
33unsafe impl<T: Send> Sync for SpinLock<T> {}
34
35impl<T> SpinLock<T> {
36    /// Creates a new spin lock protecting `value`.
37    pub const fn new(value: T) -> Self {
38        Self {
39            locked: AtomicBool::new(false),
40            value: UnsafeCell::new(value),
41        }
42    }
43
44    /// Acquires the lock and returns a guard.
45    ///
46    /// This spins until the lock becomes available.
47    pub fn lock(&self) -> SpinLockGuard<'_, T> {
48        while self
49            .locked
50            .compare_exchange(false, true, Ordering::Acquire, Ordering::Relaxed)
51            .is_err()
52        {
53            core::hint::spin_loop();
54        }
55
56        SpinLockGuard { lock: self }
57    }
58}
59
60/// RAII guard returned by [`SpinLock::lock`].
61///
62/// Releases the lock when dropped.
63pub struct SpinLockGuard<'a, T> {
64    lock: &'a SpinLock<T>,
65}
66
67impl<T> Deref for SpinLockGuard<'_, T> {
68    type Target = T;
69
70    fn deref(&self) -> &Self::Target {
71        unsafe { &*self.lock.value.get() }
72    }
73}
74
75impl<T> DerefMut for SpinLockGuard<'_, T> {
76    fn deref_mut(&mut self) -> &mut Self::Target {
77        unsafe { &mut *self.lock.value.get() }
78    }
79}
80
81impl<T> Drop for SpinLockGuard<'_, T> {
82    fn drop(&mut self) {
83        self.lock.locked.store(false, Ordering::Release);
84    }
85}
86
87/// A minimal, spin-based `Once` initializer.
88///
89/// This type is useful for global singletons in `no_std` environments.
90/// It supports one-time initialization with `call_once`.
91///
92/// ## State machine
93///
94/// - `0`: uninitialized
95/// - `1`: initialization in progress
96/// - `2`: initialized
97pub struct Once<T> {
98    state: AtomicU8,
99    value: UnsafeCell<MaybeUninit<T>>,
100}
101
102impl<T> Default for Once<T> {
103    fn default() -> Self {
104        Self::new()
105    }
106}
107
108unsafe impl<T: Send + Sync> Sync for Once<T> {}
109unsafe impl<T: Send> Send for Once<T> {}
110
111impl<T> Once<T> {
112    /// Creates a new uninitialized cell.
113    pub const fn new() -> Self {
114        Self {
115            state: AtomicU8::new(0),
116            value: UnsafeCell::new(MaybeUninit::uninit()),
117        }
118    }
119
120    /// Returns `true` if the value has been initialized.
121    pub fn is_initialized(&self) -> bool {
122        self.state.load(Ordering::Acquire) == 2
123    }
124
125    /// Returns a reference to the value if initialized.
126    pub fn get(&self) -> Option<&T> {
127        if self.is_initialized() {
128            Some(unsafe { self.get_unchecked() })
129        } else {
130            None
131        }
132    }
133
134    /// Initializes the cell with `init` at most once and returns a reference to the stored value.
135    ///
136    /// If another core is currently initializing, this method will spin until initialization
137    /// completes.
138    pub fn call_once(&self, init: impl FnOnce() -> T) -> &T {
139        if self.is_initialized() {
140            return unsafe { self.get_unchecked() };
141        }
142
143        if self
144            .state
145            .compare_exchange(0, 1, Ordering::AcqRel, Ordering::Acquire)
146            .is_ok()
147        {
148            let v = init();
149            unsafe {
150                (*self.value.get()).write(v);
151            }
152            self.state.store(2, Ordering::Release);
153            unsafe { self.get_unchecked() }
154        } else {
155            while !self.is_initialized() {
156                core::hint::spin_loop();
157            }
158            unsafe { self.get_unchecked() }
159        }
160    }
161
162    /// Returns a reference to the stored value without checking initialization.
163    ///
164    /// # Safety
165    ///
166    /// The caller must guarantee that initialization has completed.
167    unsafe fn get_unchecked(&self) -> &T {
168        unsafe { &*(*self.value.get()).as_ptr() }
169    }
170}