atomicbox_nostd/
atomic_box.rs1use alloc::boxed::Box;
2use core::fmt::{self, Debug, Formatter};
3use core::marker::PhantomData;
4use core::mem::forget;
5use core::ptr::{self, null_mut};
6use core::sync::atomic::{AtomicPtr, Ordering};
7
8pub struct AtomicBox<T> {
11 ptr: AtomicPtr<T>,
12
13 no_send_sync: PhantomData<*mut T>,
15}
16
17impl<T> AtomicBox<T> {
18 pub fn new(value: Box<T>) -> AtomicBox<T> {
27 let abox = AtomicBox {
28 ptr: AtomicPtr::new(null_mut()),
29 no_send_sync: PhantomData,
30 };
31 abox.ptr.store(Box::into_raw(value), Ordering::Release);
32 abox
33 }
34
35 pub fn swap(&self, other: Box<T>, order: Ordering) -> Box<T> {
58 let mut result = other;
59 self.swap_mut(&mut result, order);
60 result
61 }
62
63 pub fn swap_mut(&self, other: &mut Box<T>, order: Ordering) {
86 match order {
87 Ordering::AcqRel | Ordering::SeqCst => {}
88 _ => panic!("invalid ordering for atomic swap"),
89 }
90
91 let other_ptr = Box::into_raw(unsafe { ptr::read(other) });
92 let ptr = self.ptr.swap(other_ptr, order);
93 unsafe {
94 ptr::write(other, Box::from_raw(ptr));
95 }
96 }
97
98 pub fn into_inner(self) -> Box<T> {
108 let last_ptr = self.ptr.load(Ordering::Acquire);
109 forget(self);
110 unsafe { Box::from_raw(last_ptr) }
111 }
112
113 pub fn get_mut(&mut self) -> &mut T {
119 let ptr = self.ptr.load(Ordering::Relaxed);
127 unsafe { &mut *ptr }
128 }
129}
130
131unsafe impl<T: Send> Send for AtomicBox<T> {}
132unsafe impl<T: Sync> Sync for AtomicBox<T> {}
133
134impl<T> Drop for AtomicBox<T> {
135 fn drop(&mut self) {
137 let ptr = self.ptr.load(Ordering::Acquire);
138 unsafe {
139 Box::from_raw(ptr);
140 }
141 }
142}
143
144impl<T> Default for AtomicBox<T>
145where
146 Box<T>: Default,
147{
148 fn default() -> AtomicBox<T> {
150 AtomicBox::new(Default::default())
151 }
152}
153
154impl<T> Debug for AtomicBox<T> {
155 fn fmt(&self, f: &mut Formatter) -> Result<(), fmt::Error> {
157 let p = self.ptr.load(Ordering::Relaxed);
158 f.write_str("AtomicBox(")?;
159 fmt::Pointer::fmt(&p, f)?;
160 f.write_str(")")?;
161 Ok(())
162 }
163}
164
165#[cfg(test)]
166mod tests {
167 use super::*;
168 use core::sync::atomic::Ordering;
169 use std::sync::{Arc, Barrier};
170 use std::thread::spawn;
171
172 #[test]
173 fn atomic_box_swap_works() {
174 let b = AtomicBox::new(Box::new("hello world"));
175 let bis = Box::new("bis");
176 assert_eq!(b.swap(bis, Ordering::AcqRel), Box::new("hello world"));
177 assert_eq!(b.swap(Box::new(""), Ordering::AcqRel), Box::new("bis"));
178 }
179
180 #[test]
181 fn atomic_box_swap_mut_works() {
182 let b = AtomicBox::new(Box::new("hello world"));
183 let mut bis = Box::new("bis");
184 b.swap_mut(&mut bis, Ordering::AcqRel);
185 assert_eq!(bis, Box::new("hello world"));
186 b.swap_mut(&mut bis, Ordering::AcqRel);
187 assert_eq!(bis, Box::new("bis"));
188 }
189
190 #[test]
191 fn atomic_box_pointer_identity() {
192 let box1 = Box::new(1);
193 let p1 = format!("{:p}", box1);
194 let atom = AtomicBox::new(box1);
195
196 let box2 = Box::new(2);
197 let p2 = format!("{:p}", box2);
198 assert!(p2 != p1);
199
200 let box3 = atom.swap(box2, Ordering::AcqRel); let p3 = format!("{:p}", box3);
202 assert_eq!(p3, p1); let box4 = atom.swap(Box::new(5), Ordering::AcqRel); let p4 = format!("{:p}", box4);
206 assert_eq!(p4, p2); }
208
209 #[test]
210 fn atomic_box_drops() {
211 use std::sync::atomic::{AtomicUsize, Ordering};
212 use std::sync::Arc;
213
214 struct K(Arc<AtomicUsize>, usize);
215
216 impl Drop for K {
217 fn drop(&mut self) {
218 self.0.fetch_add(self.1, Ordering::Relaxed);
219 }
220 }
221
222 let n = Arc::new(AtomicUsize::new(0));
223 {
224 let ab = AtomicBox::new(Box::new(K(n.clone(), 5)));
225 assert_eq!(n.load(Ordering::Relaxed), 0);
226 let first = ab.swap(Box::new(K(n.clone(), 13)), Ordering::AcqRel);
227 assert_eq!(n.load(Ordering::Relaxed), 0);
228 drop(first);
229 assert_eq!(n.load(Ordering::Relaxed), 5);
230 }
231 assert_eq!(n.load(Ordering::Relaxed), 5 + 13);
232 }
233
234 #[test]
235 fn atomic_threads() {
236 const NTHREADS: usize = 9;
237
238 let gate = Arc::new(Barrier::new(NTHREADS));
239 let abox: Arc<AtomicBox<Vec<u8>>> = Arc::new(Default::default());
240 let handles: Vec<_> = (0..NTHREADS as u8)
241 .map(|t| {
242 let my_gate = gate.clone();
243 let my_box = abox.clone();
244 spawn(move || {
245 my_gate.wait();
246 let mut my_vec = Box::new(vec![]);
247 for _ in 0..100 {
248 my_vec = my_box.swap(my_vec, Ordering::AcqRel);
249 my_vec.push(t);
250 }
251 my_vec
252 })
253 })
254 .collect();
255
256 let mut counts = [0usize; NTHREADS];
257 for h in handles {
258 for val in *h.join().unwrap() {
259 counts[val as usize] += 1;
260 }
261 }
262
263 for val in *abox.swap(Box::new(vec![]), Ordering::AcqRel) {
266 counts[val as usize] += 1;
267 }
268
269 println!("{:?}", counts);
270 for t in 0..NTHREADS {
271 assert_eq!(counts[t], 100);
272 }
273 }
274
275 #[test]
276 #[should_panic(expected = "invalid ordering for atomic swap")]
277 fn cant_use_foolish_swap_ordering_type() {
278 let atom = AtomicBox::new(Box::new(0));
279 atom.swap(Box::new(44), Ordering::Release); }
281
282 #[test]
283 fn debug_fmt() {
284 let my_box = Box::new(32);
285 let expected = format!("AtomicBox({:p})", my_box);
286 assert_eq!(format!("{:?}", AtomicBox::new(my_box)), expected);
287 }
288}