atomicbox_nostd/
atomic_box.rs

1use alloc::boxed::Box;
2use core::fmt::{self, Debug, Formatter};
3use core::marker::PhantomData;
4use core::mem::forget;
5use core::ptr::{self, null_mut};
6use core::sync::atomic::{AtomicPtr, Ordering};
7
8/// A type that holds a single `Box<T>` value and can be safely shared between
9/// threads.
10pub struct AtomicBox<T> {
11    ptr: AtomicPtr<T>,
12
13    /// Marker to prevent the `Send` and `Sync` traits from being implemented.
14    no_send_sync: PhantomData<*mut T>,
15}
16
17impl<T> AtomicBox<T> {
18    /// Creates a new `AtomicBox` with the given value.
19    ///
20    /// # Examples
21    ///
22    ///     use atomicbox::AtomicBox;
23    ///
24    ///     let atomic_box = AtomicBox::new(Box::new(0));
25    ///
26    pub fn new(value: Box<T>) -> AtomicBox<T> {
27        let abox = AtomicBox {
28            ptr: AtomicPtr::new(null_mut()),
29            no_send_sync: PhantomData,
30        };
31        abox.ptr.store(Box::into_raw(value), Ordering::Release);
32        abox
33    }
34
35    /// Atomically set this `AtomicBox` to `other` and return the previous value.
36    ///
37    /// This does not allocate or free memory, and it neither clones nor drops
38    /// any values.  `other` is moved into `self`; the value previously in
39    /// `self` is returned.
40    ///
41    /// `ordering` must be either `Ordering::AcqRel` or `Ordering::SeqCst`,
42    /// as other values would not be safe if `T` contains any data.
43    ///
44    /// # Panics
45    ///
46    /// Panics if `ordering` is not one of the two allowed values.
47    ///
48    /// # Examples
49    ///
50    ///     use std::sync::atomic::Ordering;
51    ///     use atomicbox::AtomicBox;
52    ///
53    ///     let atom = AtomicBox::new(Box::new("one"));
54    ///     let prev_value = atom.swap(Box::new("two"), Ordering::AcqRel);
55    ///     assert_eq!(*prev_value, "one");
56    ///
57    pub fn swap(&self, other: Box<T>, order: Ordering) -> Box<T> {
58        let mut result = other;
59        self.swap_mut(&mut result, order);
60        result
61    }
62
63    /// Atomically swaps the contents of this `AtomicBox` with the contents of `other`.
64    ///
65    /// This does not allocate or free memory, and it neither clones nor drops
66    /// any values. The pointers in `*other` and `self` are simply exchanged.
67    ///
68    /// `ordering` must be either `Ordering::AcqRel` or `Ordering::SeqCst`,
69    /// as other values would not be safe if `T` contains any data.
70    ///
71    /// # Panics
72    ///
73    /// Panics if `ordering` is not one of the two allowed values.
74    ///
75    /// # Examples
76    ///
77    ///     use std::sync::atomic::Ordering;
78    ///     use atomicbox::AtomicBox;
79    ///
80    ///     let atom = AtomicBox::new(Box::new("one"));
81    ///     let mut boxed = Box::new("two");
82    ///     atom.swap_mut(&mut boxed, Ordering::AcqRel);
83    ///     assert_eq!(*boxed, "one");
84    ///
85    pub fn swap_mut(&self, other: &mut Box<T>, order: Ordering) {
86        match order {
87            Ordering::AcqRel | Ordering::SeqCst => {}
88            _ => panic!("invalid ordering for atomic swap"),
89        }
90
91        let other_ptr = Box::into_raw(unsafe { ptr::read(other) });
92        let ptr = self.ptr.swap(other_ptr, order);
93        unsafe {
94            ptr::write(other, Box::from_raw(ptr));
95        }
96    }
97
98    /// Consume this `AtomicBox`, returning the last box value it contained.
99    ///
100    /// # Examples
101    ///
102    ///     use atomicbox::AtomicBox;
103    ///
104    ///     let atom = AtomicBox::new(Box::new("hello"));
105    ///     assert_eq!(atom.into_inner(), Box::new("hello"));
106    ///
107    pub fn into_inner(self) -> Box<T> {
108        let last_ptr = self.ptr.load(Ordering::Acquire);
109        forget(self);
110        unsafe { Box::from_raw(last_ptr) }
111    }
112
113    /// Returns a mutable reference to the contained value.
114    ///
115    /// This is safe because it borrows the `AtomicBox` mutably, which ensures
116    /// that no other threads can concurrently access either the atomic pointer field
117    /// or the boxed data it points to.
118    pub fn get_mut(&mut self) -> &mut T {
119        // Relaxed suffices here because this thread must already have
120        // rendezvoused with any other thread that's been modifying shared
121        // data, and executed an Acquire barrier, in order for the caller to
122        // have a `mut` reference.  Symmetrically, no barrier is needed when
123        // the reference expires, because this thread must rendezvous with
124        // other threads, and execute a Release barrier, before this AtomicBox
125        // becomes shared again.
126        let ptr = self.ptr.load(Ordering::Relaxed);
127        unsafe { &mut *ptr }
128    }
129}
130
131unsafe impl<T: Send> Send for AtomicBox<T> {}
132unsafe impl<T: Sync> Sync for AtomicBox<T> {}
133
134impl<T> Drop for AtomicBox<T> {
135    /// Dropping an `AtomicBox<T>` drops the final `Box<T>` value stored in it.
136    fn drop(&mut self) {
137        let ptr = self.ptr.load(Ordering::Acquire);
138        unsafe {
139            Box::from_raw(ptr);
140        }
141    }
142}
143
144impl<T> Default for AtomicBox<T>
145where
146    Box<T>: Default,
147{
148    /// The default `AtomicBox<T>` value boxes the default `T` value.
149    fn default() -> AtomicBox<T> {
150        AtomicBox::new(Default::default())
151    }
152}
153
154impl<T> Debug for AtomicBox<T> {
155    /// The `{:?}` format of an `AtomicBox<T>` looks like `"AtomicBox(0x12341234)"`.
156    fn fmt(&self, f: &mut Formatter) -> Result<(), fmt::Error> {
157        let p = self.ptr.load(Ordering::Relaxed);
158        f.write_str("AtomicBox(")?;
159        fmt::Pointer::fmt(&p, f)?;
160        f.write_str(")")?;
161        Ok(())
162    }
163}
164
165#[cfg(test)]
166mod tests {
167    use super::*;
168    use core::sync::atomic::Ordering;
169    use std::sync::{Arc, Barrier};
170    use std::thread::spawn;
171
172    #[test]
173    fn atomic_box_swap_works() {
174        let b = AtomicBox::new(Box::new("hello world"));
175        let bis = Box::new("bis");
176        assert_eq!(b.swap(bis, Ordering::AcqRel), Box::new("hello world"));
177        assert_eq!(b.swap(Box::new(""), Ordering::AcqRel), Box::new("bis"));
178    }
179
180    #[test]
181    fn atomic_box_swap_mut_works() {
182        let b = AtomicBox::new(Box::new("hello world"));
183        let mut bis = Box::new("bis");
184        b.swap_mut(&mut bis, Ordering::AcqRel);
185        assert_eq!(bis, Box::new("hello world"));
186        b.swap_mut(&mut bis, Ordering::AcqRel);
187        assert_eq!(bis, Box::new("bis"));
188    }
189
190    #[test]
191    fn atomic_box_pointer_identity() {
192        let box1 = Box::new(1);
193        let p1 = format!("{:p}", box1);
194        let atom = AtomicBox::new(box1);
195
196        let box2 = Box::new(2);
197        let p2 = format!("{:p}", box2);
198        assert!(p2 != p1);
199
200        let box3 = atom.swap(box2, Ordering::AcqRel); // box1 out, box2 in
201        let p3 = format!("{:p}", box3);
202        assert_eq!(p3, p1); // box3 is box1
203
204        let box4 = atom.swap(Box::new(5), Ordering::AcqRel); // box2 out, throwaway value in
205        let p4 = format!("{:p}", box4);
206        assert_eq!(p4, p2); // box4 is box2
207    }
208
209    #[test]
210    fn atomic_box_drops() {
211        use std::sync::atomic::{AtomicUsize, Ordering};
212        use std::sync::Arc;
213
214        struct K(Arc<AtomicUsize>, usize);
215
216        impl Drop for K {
217            fn drop(&mut self) {
218                self.0.fetch_add(self.1, Ordering::Relaxed);
219            }
220        }
221
222        let n = Arc::new(AtomicUsize::new(0));
223        {
224            let ab = AtomicBox::new(Box::new(K(n.clone(), 5)));
225            assert_eq!(n.load(Ordering::Relaxed), 0);
226            let first = ab.swap(Box::new(K(n.clone(), 13)), Ordering::AcqRel);
227            assert_eq!(n.load(Ordering::Relaxed), 0);
228            drop(first);
229            assert_eq!(n.load(Ordering::Relaxed), 5);
230        }
231        assert_eq!(n.load(Ordering::Relaxed), 5 + 13);
232    }
233
234    #[test]
235    fn atomic_threads() {
236        const NTHREADS: usize = 9;
237
238        let gate = Arc::new(Barrier::new(NTHREADS));
239        let abox: Arc<AtomicBox<Vec<u8>>> = Arc::new(Default::default());
240        let handles: Vec<_> = (0..NTHREADS as u8)
241            .map(|t| {
242                let my_gate = gate.clone();
243                let my_box = abox.clone();
244                spawn(move || {
245                    my_gate.wait();
246                    let mut my_vec = Box::new(vec![]);
247                    for _ in 0..100 {
248                        my_vec = my_box.swap(my_vec, Ordering::AcqRel);
249                        my_vec.push(t);
250                    }
251                    my_vec
252                })
253            })
254            .collect();
255
256        let mut counts = [0usize; NTHREADS];
257        for h in handles {
258            for val in *h.join().unwrap() {
259                counts[val as usize] += 1;
260            }
261        }
262
263        // Don't forget the data still in `abox`!
264        // There are NTHREADS+1 vectors in all.
265        for val in *abox.swap(Box::new(vec![]), Ordering::AcqRel) {
266            counts[val as usize] += 1;
267        }
268
269        println!("{:?}", counts);
270        for t in 0..NTHREADS {
271            assert_eq!(counts[t], 100);
272        }
273    }
274
275    #[test]
276    #[should_panic(expected = "invalid ordering for atomic swap")]
277    fn cant_use_foolish_swap_ordering_type() {
278        let atom = AtomicBox::new(Box::new(0));
279        atom.swap(Box::new(44), Ordering::Release); // nope
280    }
281
282    #[test]
283    fn debug_fmt() {
284        let my_box = Box::new(32);
285        let expected = format!("AtomicBox({:p})", my_box);
286        assert_eq!(format!("{:?}", AtomicBox::new(my_box)), expected);
287    }
288}