scoped_mutex/raw_impls.rs
1//! Mutex primitives.
2//!
3//! This module provides impls of the [`ScopedRawMutex`] trait
4//!
5//! [`ScopedRawMutex`]: crate::ScopedRawMutex
6#![allow(clippy::new_without_default)]
7#![allow(clippy::declare_interior_mutable_const)]
8
9use core::marker::PhantomData;
10use core::sync::atomic::{AtomicBool, Ordering};
11
12use scoped_mutex_traits::{ConstInit, ScopedRawMutex};
13
14#[cfg(feature = "impl-critical-section")]
15pub mod cs {
16 //! Critical Section based implementation
17
18 use super::*;
19
20 /// A mutex that allows borrowing data across executors and interrupts.
21 ///
22 /// # Safety
23 ///
24 /// This mutex is safe to share between different executors and interrupts.
25 pub struct CriticalSectionRawMutex {
26 taken: AtomicBool,
27 }
28 unsafe impl Send for CriticalSectionRawMutex {}
29 unsafe impl Sync for CriticalSectionRawMutex {}
30
31 impl CriticalSectionRawMutex {
32 /// Create a new `CriticalSectionRawMutex`.
33 pub const fn new() -> Self {
34 Self {
35 taken: AtomicBool::new(false),
36 }
37 }
38 }
39
40 impl ConstInit for CriticalSectionRawMutex {
41 const INIT: Self = Self::new();
42 }
43
44 unsafe impl ScopedRawMutex for CriticalSectionRawMutex {
45 #[inline]
46 #[must_use]
47 fn try_with_lock<R>(&self, f: impl FnOnce() -> R) -> Option<R> {
48 critical_section::with(|_| {
49 // NOTE: separated load/stores are acceptable as we are in
50 // a critical section
51 if self.taken.load(Ordering::Relaxed) {
52 return None;
53 }
54 self.taken.store(true, Ordering::Relaxed);
55 let ret = f();
56 self.taken.store(false, Ordering::Relaxed);
57 Some(ret)
58 })
59 }
60
61 #[inline]
62 fn with_lock<R>(&self, f: impl FnOnce() -> R) -> R {
63 // In a critical section, it is not possible for another holder
64 // of this mutex to release, which means we have certainly
65 // reached deadlock if the lock was already locked.
66 self.try_with_lock(f).expect("Deadlocked")
67 }
68
69 fn is_locked(&self) -> bool {
70 self.taken.load(Ordering::Relaxed)
71 }
72 }
73}
74
75// ================
76
77pub mod local {
78 //! Locally usable based implementation
79 use super::*;
80
81 /// A mutex that allows borrowing data in local context.
82 ///
83 /// This acts similar to a RefCell, with scoped access patterns, though
84 /// without being able to borrow the data twice.
85 pub struct LocalRawMutex {
86 taken: AtomicBool,
87 /// Prevent this from being sync or send
88 _phantom: PhantomData<*mut ()>,
89 }
90
91 impl LocalRawMutex {
92 /// Create a new `LocalRawMutex`.
93 pub const fn new() -> Self {
94 Self {
95 taken: AtomicBool::new(false),
96 _phantom: PhantomData,
97 }
98 }
99 }
100
101 unsafe impl Send for LocalRawMutex {}
102
103 impl ConstInit for LocalRawMutex {
104 const INIT: Self = Self::new();
105 }
106
107 unsafe impl ScopedRawMutex for LocalRawMutex {
108 #[inline]
109 #[must_use]
110 fn try_with_lock<R>(&self, f: impl FnOnce() -> R) -> Option<R> {
111 // NOTE: separated load/stores are acceptable as we are !Send and !Sync,
112 // meaning that we can only be accessed within a single thread
113 if self.taken.load(Ordering::Relaxed) {
114 return None;
115 }
116 self.taken.store(true, Ordering::Relaxed);
117 let ret = f();
118 self.taken.store(false, Ordering::Relaxed);
119 Some(ret)
120 }
121
122 #[inline]
123 fn with_lock<R>(&self, f: impl FnOnce() -> R) -> R {
124 // In a local-only mutex, it is not possible for another holder
125 // of this mutex to release, which means we have certainly
126 // reached deadlock if the lock was already locked.
127 self.try_with_lock(f).expect("Deadlocked")
128 }
129
130 fn is_locked(&self) -> bool {
131 self.taken.load(Ordering::Relaxed)
132 }
133 }
134}
135
136// ================
137
138#[cfg(all(feature = "impl-unsafe-cortex-m-single-core", cortex_m))]
139pub mod single_core_thread_mode {
140 //! A single-core safe implementation that does not require a critical section
141
142 use super::*;
143
144 /// A "mutex" that only allows borrowing from thread mode.
145 ///
146 /// # Safety
147 ///
148 /// **This Mutex is only safe on single-core systems.**
149 ///
150 /// On multi-core systems, a `ThreadModeRawMutex` **is not sufficient** to ensure exclusive access.
151 pub struct ThreadModeRawMutex {
152 taken: AtomicBool,
153 }
154
155 unsafe impl Send for ThreadModeRawMutex {}
156 unsafe impl Sync for ThreadModeRawMutex {}
157
158 impl ThreadModeRawMutex {
159 /// Create a new `ThreadModeRawMutex`.
160 pub const fn new() -> Self {
161 Self {
162 taken: AtomicBool::new(false),
163 }
164 }
165 }
166
167 impl ConstInit for ThreadModeRawMutex {
168 const INIT: Self = Self::new();
169 }
170
171 unsafe impl ScopedRawMutex for ThreadModeRawMutex {
172 #[inline]
173 #[must_use]
174 fn try_lock<R>(&self, f: impl FnOnce() -> R) -> Option<R> {
175 if !in_thread_mode() {
176 return None;
177 }
178 // NOTE: separated load/stores are acceptable as we checked we are only
179 // accessed from a single thread (checked above)
180 assert!(self.taken.load(Ordering::Relaxed));
181 self.taken.store(true, Ordering::Relaxed);
182 let ret = f();
183 self.taken.store(false, Ordering::Relaxed);
184 Some(ret)
185 }
186
187 #[inline]
188 fn lock<R>(&self, f: impl FnOnce() -> R) -> R {
189 // In a thread-mode only mutex, it is not possible for another holder
190 // of this mutex to release, which means we have certainly
191 // reached deadlock if the lock was already locked.
192 self.try_lock(f)
193 .expect("Deadlocked or attempted to access outside of thread mode")
194 }
195
196 fn is_locked(&self) -> bool {
197 self.taken.load(Ordering::Relaxed)
198 }
199 }
200
201 impl Drop for ThreadModeRawMutex {
202 fn drop(&mut self) {
203 // Only allow dropping from thread mode. Dropping calls drop on the inner `T`, so
204 // `drop` needs the same guarantees as `lock`. `ThreadModeMutex<T>` is Send even if
205 // T isn't, so without this check a user could create a ThreadModeMutex in thread mode,
206 // send it to interrupt context and drop it there, which would "send" a T even if T is not Send.
207 assert!(
208 in_thread_mode(),
209 "ThreadModeMutex can only be dropped from thread mode."
210 );
211
212 // Drop of the inner `T` happens after this.
213 }
214 }
215
216 pub fn in_thread_mode() -> bool {
217 // ICSR.VECTACTIVE == 0
218 return unsafe { (0xE000ED04 as *const u32).read_volatile() } & 0x1FF == 0;
219 }
220}
221