atomic_ref/
lib.rs

1//! Atomic References
2//!
3//! These types act similarially to the Atomic types from std::sync::atomic,
4//! Except that instead of containing an integer type or a pointer, they contain
5//! an `Option<&'a T>` value.
6//!
7//! Like other option values, these types present operations which, when used
8//! correctly, synchronize updates between threads. This type is a form of
9//! interior mutability, like `Cell<T>`, `RefCell<T>`, or `Mutex<T>`.
10//!
11//! This type in static position is often used for lazy global initialization.
12//!
13//! `AtomicRef` may only contain `Sized` types, as unsized types have wide
14//! pointers which cannot be atomically written to or read from.
15//!
16//! # Ordering
17//!
18//! It is unsound to load or store an atomic reference with the `Relaxed` memory
19//! ordering, as these operations provide no ordering on writes to the data
20//! behind the reference. To avoid this issue, loads and stores with `Relaxed`
21//! memory ordering are actually performed with `Acquire`, `Release`, or
22//! `AcqRel` ordering, as appropriate.
23//!
24//! # Examples
25//!
26//! Static logger state
27//!
28//! ```
29//! use atomic_ref::AtomicRef;
30//! use std::sync::atomic::Ordering;
31//! use std::io::{stdout, Write};
32//!
33//! // Define the idea of a logger
34//! trait Logger {
35//!     fn log(&self, msg: &str) {}
36//! }
37//! struct LoggerInfo {
38//!     logger: &'static (dyn Logger + Sync)
39//! }
40//!
41//! // The methods for working with our currently defined static logger
42//! static LOGGER: AtomicRef<LoggerInfo> = AtomicRef::new(None);
43//! fn log(msg: &str) -> bool {
44//!     if let Some(info) = LOGGER.load(Ordering::SeqCst) {
45//!         info.logger.log(msg);
46//!         true
47//!     } else {
48//!         false
49//!     }
50//! }
51//! fn set_logger(logger: Option<&'static LoggerInfo>) {
52//!     LOGGER.store(logger, Ordering::SeqCst);
53//! }
54//!
55//! // Defining the standard out example logger
56//! struct StdoutLogger;
57//! impl Logger for StdoutLogger {
58//!     fn log(&self, msg: &str) {
59//!         stdout().write(msg.as_bytes());
60//!     }
61//! }
62//! static STDOUT_LOGGER: LoggerInfo = LoggerInfo { logger: &StdoutLogger };
63//!
64//! fn main() {
65//!     let res = log("This will fail");
66//!     assert!(!res);
67//!     set_logger(Some(&STDOUT_LOGGER));
68//!     let res = log("This will succeed");
69//!     assert!(res);
70//! }
71//! ```
72#![no_std]
73
74use core::default::Default;
75use core::fmt;
76use core::marker::PhantomData;
77use core::mem;
78use core::ptr;
79use core::sync::atomic::{AtomicPtr, Ordering};
80
81/// A mutable `Option<&'a T>` type which can be safely shared between threads.
82#[repr(C)]
83pub struct AtomicRef<'a, T: 'a> {
84    data: AtomicPtr<T>,
85    // Make `AtomicRef` invariant over `'a` and `T`
86    _marker: PhantomData<Invariant<'a, T>>,
87}
88
89// Work-around for the construction of `PhantomData<&mut _>` requiring
90// `#![feature(const_fn)]`
91struct Invariant<'a, T: 'a>(&'a mut &'a T);
92
93/// An internal helper function for converting `Option<&'a T>` values to
94/// `*mut T` for storing in the `AtomicUsize`.
95#[inline(always)]
96const fn from_opt<'a, T>(p: Option<&'a T>) -> *mut T {
97    match p {
98        Some(p) => p as *const T as *mut T,
99        None => ptr::null_mut(),
100    }
101}
102
103/// An internal helper function for converting `*mut T` values stored in the
104/// `AtomicUsize` back into `Option<&'a T>` values.
105#[inline(always)]
106unsafe fn to_opt<'a, T>(p: *mut T) -> Option<&'a T> {
107    p.as_ref()
108}
109
110// As noted in #5, the use of `Relaxed` ordering with `atomic_ref` is unsound,
111// as `Relaxed` ordering performs no synchronization on the data behind the
112// reference. These methods restrict the load ordering to safe orderings by
113// requiring at least `Acquire` ordering for load operations, and `Release`
114// ordering for store operations.
115
116/// Restrict memory ordering for atomic load operations.
117#[inline]
118fn enforce_load_ordering(order: Ordering) -> Ordering {
119    match order {
120        Ordering::Relaxed | Ordering::Acquire => Ordering::Acquire,
121        Ordering::SeqCst => Ordering::SeqCst,
122        Ordering::Release => panic!("there is no such thing as a release load"),
123        Ordering::AcqRel => panic!("there is no such thing as an acquire/release load"),
124        _ => panic!("unsupported memory ordering: {:?}", order),
125    }
126}
127
128/// Restrict memory ordering for atomic store operations.
129#[inline]
130fn enforce_store_ordering(order: Ordering) -> Ordering {
131    match order {
132        Ordering::Relaxed | Ordering::Release => Ordering::Release,
133        Ordering::SeqCst => Ordering::SeqCst,
134        Ordering::Acquire => panic!("there is no such thing as an acquire store"),
135        Ordering::AcqRel => panic!("there is no such thing as an acquire/release store"),
136        _ => panic!("unsupported memory ordering: {:?}", order),
137    }
138}
139
140/// Restrict memory ordering for atomic RMW operations.
141#[inline]
142fn enforce_swap_ordering(order: Ordering) -> Ordering {
143    match order {
144        Ordering::Relaxed | Ordering::Acquire | Ordering::Release | Ordering::AcqRel => {
145            Ordering::AcqRel
146        }
147        Ordering::SeqCst => Ordering::SeqCst,
148        _ => panic!("unsupported memory ordering: {:?}", order),
149    }
150}
151
152impl<'a, T> AtomicRef<'a, T> {
153    /// Creates a new `AtomicRef`.
154    ///
155    /// # Examples
156    ///
157    /// ```
158    /// use atomic_ref::AtomicRef;
159    ///
160    /// static VALUE: i32 = 10;
161    /// let atomic_ref = AtomicRef::new(Some(&VALUE));
162    /// ```
163    pub const fn new(p: Option<&'a T>) -> AtomicRef<'a, T> {
164        AtomicRef {
165            data: AtomicPtr::new(from_opt(p)),
166            _marker: PhantomData,
167        }
168    }
169
170    /// Returns a mutable reference to the underlying `Option<&'a T>`.
171    ///
172    /// This is safe because the mutable reference guarantees that no other
173    /// threads are concurrently accessing the atomic data.
174    ///
175    /// # Examples
176    ///
177    /// ```
178    /// use std::sync::atomic::Ordering;
179    /// use atomic_ref::AtomicRef;
180    ///
181    /// let value: i32 = 10;
182    /// let value2: i32 = 20;
183    ///
184    /// let mut some_ref = AtomicRef::new(Some(&value));
185    /// assert_eq!(*some_ref.get_mut(), Some(&value));
186    /// *some_ref.get_mut() = Some(&value2);
187    /// assert_eq!(some_ref.load(Ordering::SeqCst), Some(&value2));
188    /// ```
189    pub fn get_mut(&mut self) -> &mut Option<&'a T> {
190        debug_assert_eq!(mem::size_of::<Option<&'a T>>(), mem::size_of::<*mut T>());
191        unsafe { mem::transmute::<&mut *mut T, &mut Option<&'a T>>(self.data.get_mut()) }
192    }
193
194    /// Consumes the atomic and returns the contained value.
195    ///
196    /// This is safe because passing `self` by value guarantees that no other
197    /// threads are concurrently accessing the atomic data.
198    ///
199    /// # Examples
200    ///
201    /// ```
202    /// use atomic_ref::AtomicRef;
203    ///
204    /// let some_ref = AtomicRef::new(Some(&5));
205    /// assert_eq!(some_ref.into_inner(), Some(&5));
206    /// ```
207    pub fn into_inner(self) -> Option<&'a T> {
208        unsafe { to_opt(self.data.into_inner()) }
209    }
210
211    /// Loads the value stored in the `AtomicRef`.
212    ///
213    /// `load` takes an `Ordering` argument which describes the memory ordering
214    /// of this operation.
215    ///
216    /// Calls with ordering weaker than `Acquire` will be performed with
217    /// `Acquire` ordering.
218    ///
219    /// # Panics
220    ///
221    /// Panics if `order` is `Release` or `AcqRel`.
222    ///
223    /// # Examples
224    ///
225    /// ```
226    /// use std::sync::atomic::Ordering;
227    /// use atomic_ref::AtomicRef;
228    ///
229    /// static VALUE: i32 = 10;
230    ///
231    /// let some_ref = AtomicRef::new(Some(&VALUE));
232    /// assert_eq!(some_ref.load(Ordering::Acquire), Some(&10));
233    /// ```
234    pub fn load(&self, ordering: Ordering) -> Option<&'a T> {
235        unsafe { to_opt(self.data.load(enforce_load_ordering(ordering))) }
236    }
237
238    /// Stores a value into the `AtomicRef`.
239    ///
240    /// `store` takes an `Ordering` argument which describes the memory ordering
241    /// of this operation.
242    ///
243    /// Calls with ordering weaker than `Release` will be performed with
244    /// `Release` ordering.
245    ///
246    /// # Panics
247    ///
248    /// Panics if `order` is `Acquire` or `AcqRel`.
249    ///
250    /// # Examples
251    ///
252    /// ```
253    /// use std::sync::atomic::Ordering;
254    /// use atomic_ref::AtomicRef;
255    ///
256    /// static VALUE: i32 = 10;
257    ///
258    /// let some_ptr = AtomicRef::new(None);
259    /// some_ptr.store(Some(&VALUE), Ordering::SeqCst);
260    /// ```
261    pub fn store(&self, ptr: Option<&'a T>, order: Ordering) {
262        self.data
263            .store(from_opt(ptr), enforce_store_ordering(order))
264    }
265
266    /// Stores a value into the `AtomicRef`, returning the old value.
267    ///
268    /// `swap` takes an `Ordering` argument which describes the memory ordering
269    /// of this operation.
270    ///
271    /// Calls with ordering weaker than `AcqRel` will be performed with `AcqRel`
272    /// ordering.
273    ///
274    /// # Examples
275    ///
276    /// ```
277    /// use std::sync::atomic::Ordering;
278    /// use atomic_ref::AtomicRef;
279    ///
280    /// static VALUE: i32 = 10;
281    /// static OTHER_VALUE: i32 = 20;
282    ///
283    /// let some_ptr = AtomicRef::new(Some(&VALUE));
284    /// let value = some_ptr.swap(Some(&OTHER_VALUE), Ordering::SeqCst);
285    /// ```
286    pub fn swap(&self, p: Option<&'a T>, order: Ordering) -> Option<&'a T> {
287        unsafe { to_opt(self.data.swap(from_opt(p), enforce_swap_ordering(order))) }
288    }
289
290    /// Stores a value into the `AtomicRef` if the current value is the "same" as
291    /// the `current` value.
292    ///
293    /// The return value is always the previous value. If it the "same" as
294    /// `current`, then the value was updated.
295    ///
296    /// This method considers two `Option<&'a T>`s to be the "same" if they are
297    /// both `Some` and have the same pointer value, or if they are both `None`.
298    /// This method does not use `Eq` or `PartialEq` for comparison.
299    ///
300    /// `compare_and_swap` also takes an `Ordering` argument which describes the
301    /// memory ordering of this operation.
302    ///
303    /// Calls with ordering weaker than `AcqRel` will be performed with `AcqRel`
304    /// ordering.
305    ///
306    /// # Examples
307    ///
308    /// ```
309    /// use std::sync::atomic::Ordering;
310    /// use atomic_ref::AtomicRef;
311    ///
312    /// static VALUE: i32 = 10;
313    /// static OTHER_VALUE: i32 = 20;
314    ///
315    /// let some_ptr = AtomicRef::new(Some(&VALUE));
316    /// let value = some_ptr.compare_and_swap(Some(&OTHER_VALUE), None, Ordering::SeqCst);
317    /// ```
318    #[allow(deprecated)]
319    pub fn compare_and_swap(
320        &self,
321        current: Option<&'a T>,
322        new: Option<&'a T>,
323        order: Ordering,
324    ) -> Option<&'a T> {
325        unsafe {
326            to_opt(self.data.compare_and_swap(
327                from_opt(current),
328                from_opt(new),
329                enforce_swap_ordering(order),
330            ))
331        }
332    }
333
334    /// Stores a value into the `AtomicRef` if the current value is the "same" as
335    /// the `current` value.
336    ///
337    /// The return value is a result indicating whether the new value was
338    /// written, and containing the previous value. On success this value is
339    /// guaranteed to be the "same" as `new`.
340    ///
341    /// This method considers two `Option<&'a T>`s to be the "same" if they are
342    /// both `Some` and have the same pointer value, or if they are both `None`.
343    /// This method does not use `Eq` or `PartialEq` for comparison.
344    ///
345    /// `compare_exchange` takes two `Ordering` arguments to describe the memory
346    /// ordering of this operation. The first describes the required ordering if
347    /// the operation succeeds while the second describes the required ordering
348    /// when the operation fails. The failure ordering can't be `Release` or
349    /// `AcqRel` and must be equivalent or weaker than the success ordering.
350    ///
351    /// Calls with a success ordering weaker than `AcqRel` or failure ordering
352    /// weaker than `Acquire` will be performed with those orderings.
353    ///
354    /// # Examples
355    ///
356    /// ```
357    /// use std::sync::atomic::Ordering;
358    /// use atomic_ref::AtomicRef;
359    ///
360    /// static VALUE: i32 = 10;
361    /// static OTHER_VALUE: i32 = 20;
362    ///
363    /// let some_ptr = AtomicRef::new(Some(&VALUE));
364    /// let value = some_ptr.compare_exchange(Some(&OTHER_VALUE), None,
365    ///                                       Ordering::SeqCst, Ordering::Acquire);
366    /// ```
367    pub fn compare_exchange(
368        &self,
369        current: Option<&'a T>,
370        new: Option<&'a T>,
371        success: Ordering,
372        failure: Ordering,
373    ) -> Result<Option<&'a T>, Option<&'a T>> {
374        unsafe {
375            match self.data.compare_exchange(
376                from_opt(current),
377                from_opt(new),
378                enforce_swap_ordering(success),
379                enforce_load_ordering(failure),
380            ) {
381                Ok(p) => Ok(to_opt(p)),
382                Err(p) => Err(to_opt(p)),
383            }
384        }
385    }
386
387    /// Stores a value into the pointer if the current value is the same as the `current` value.
388    ///
389    /// Unlike `compare_exchange`, this function is allowed to spuriously fail even when the
390    /// comparison succeeds, which can result in more efficient code on some platforms. The
391    /// return value is a result indicating whether the new value was written and containing the
392    /// previous value.
393    ///
394    /// `compare_exchange_weak` takes two `Ordering` arguments to describe the memory
395    /// ordering of this operation. The first describes the required ordering if the operation
396    /// succeeds while the second describes the required ordering when the operation fails. The
397    /// failure ordering can't be `Release` or `AcqRel` and must be equivalent or weaker than the
398    /// success ordering.
399    ///
400    /// Calls with a success ordering weaker than `AcqRel` or failure ordering
401    /// weaker than `Acquire` will be performed with those orderings.
402    ///
403    /// # Examples
404    ///
405    /// ```
406    /// use std::sync::atomic::Ordering;
407    /// use atomic_ref::AtomicRef;
408    ///
409    /// static VALUE: i32 = 10;
410    /// static OTHER_VALUE: i32 = 20;
411    ///
412    /// let some_ptr = AtomicRef::new(Some(&VALUE));
413    ///
414    /// let mut old = some_ptr.load(Ordering::Acquire);
415    /// loop {
416    ///     match some_ptr.compare_exchange_weak(old, Some(&VALUE),
417    ///                                          Ordering::SeqCst, Ordering::Acquire) {
418    ///         Ok(_) => break,
419    ///         Err(x) => old = x,
420    ///     }
421    /// }
422    /// ```
423    pub fn compare_exchange_weak(
424        &self,
425        current: Option<&'a T>,
426        new: Option<&'a T>,
427        success: Ordering,
428        failure: Ordering,
429    ) -> Result<Option<&'a T>, Option<&'a T>> {
430        unsafe {
431            match self.data.compare_exchange_weak(
432                from_opt(current),
433                from_opt(new),
434                enforce_swap_ordering(success),
435                enforce_load_ordering(failure),
436            ) {
437                Ok(p) => Ok(to_opt(p)),
438                Err(p) => Err(to_opt(p)),
439            }
440        }
441    }
442}
443
444impl<'a, T: fmt::Debug> fmt::Debug for AtomicRef<'a, T> {
445    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
446        fmt::Debug::fmt(&self.load(Ordering::SeqCst), f)
447    }
448}
449
450impl<'a, T> Default for AtomicRef<'a, T> {
451    fn default() -> AtomicRef<'a, T> {
452        AtomicRef::new(None)
453    }
454}
455
456impl<'a, T> From<Option<&'a T>> for AtomicRef<'a, T> {
457    fn from(other: Option<&'a T>) -> AtomicRef<'a, T> {
458        AtomicRef::new(other)
459    }
460}
461
462#[cfg(test)]
463mod tests {
464    use super::AtomicRef;
465    use core::sync::atomic::Ordering;
466
467    static FOO: AtomicRef<i32> = AtomicRef::new(None);
468
469    static A: i32 = 10;
470
471    #[test]
472    fn it_works() {
473        assert!(FOO.load(Ordering::SeqCst) == None);
474        FOO.store(Some(&A), Ordering::SeqCst);
475        assert!(FOO.load(Ordering::SeqCst) == Some(&A));
476        assert!(FOO.load(Ordering::SeqCst).unwrap() as *const _ == &A as *const _);
477    }
478}