Skip to main content

crossync/atomic/
cell.rs

1use crate::sync::{RawMutex, WatchGuardMut, WatchGuardRef};
2use crossbeam_utils::CachePadded;
3use std::cell::UnsafeCell;
4use std::mem::{self, MaybeUninit};
5use std::panic::{RefUnwindSafe, UnwindSafe};
6use std::sync::atomic::{self, AtomicUsize, Ordering};
7
8/// Internal representation of the atomic cell.
9/// Provides interior mutability through a Mutex.
10/// Reference counting allows cloning the AtomicCell.
11struct Inner<T> {
12    /// Value stored in the cell, possibly uninitialized
13    val: UnsafeCell<MaybeUninit<T>>,
14    /// Mutex protecting concurrent access
15    state: RawMutex,
16    /// Reference count for clone/drop semantics
17    ref_count: CachePadded<AtomicUsize>,
18}
19
20impl<T> Inner<T> {
21    /// Creates a new Inner cell containing `val`
22    fn new(val: T) -> Self {
23        Self {
24            val: UnsafeCell::new(MaybeUninit::new(val)),
25            state: RawMutex::new(),
26            ref_count: CachePadded::new(AtomicUsize::new(1)),
27        }
28    }
29}
30
31// Interior mutability: safe only because access is protected by the lock
32unsafe impl<T: Send> Send for AtomicCell<T> {}
33unsafe impl<T: Sync> Sync for AtomicCell<T> {}
34
35// Safe for unwind scenarios
36impl<T> UnwindSafe for AtomicCell<T> {}
37impl<T> RefUnwindSafe for AtomicCell<T> {}
38
39/// Thread-safe atomic cell wrapper
40#[repr(transparent)]
41pub struct AtomicCell<T> {
42    ptr: *const Inner<T>,
43}
44
45impl<T> AtomicCell<T> {
46    /// Create a new atomic cell containing `val`
47    pub fn new(val: T) -> Self {
48        let inner = Box::new(Inner::new(val));
49        let ptr = Box::into_raw(inner);
50        Self { ptr }
51    }
52
53    /// Returns a reference to the inner struct
54    #[inline(always)]
55    fn inner(&self) -> &Inner<T> {
56        unsafe { &*self.ptr }
57    }
58
59    /// Shared (read-only) access via sync
60    pub fn get(&self) -> WatchGuardRef<'_, T> {
61        let lock = &self.inner().state;
62        lock.lock_shared();
63        let val = unsafe { (&*self.inner().val.get()).assume_init_ref() };
64        WatchGuardRef::new(val, lock)
65    }
66
67    /// Executes a closure while holding a shared guard.
68    pub fn with<R>(&self, f: impl FnOnce(&T) -> R) -> R {
69        let guard = self.get();
70        f(&guard)
71    }
72
73    /// Exclusive mutable access via sync
74    pub fn get_mut(&self) -> WatchGuardMut<'_, T> {
75        let lock = &self.inner().state;
76        lock.lock_exclusive();
77        let val = unsafe { (&mut *self.inner().val.get()).assume_init_mut() };
78        WatchGuardMut::new(val, lock)
79    }
80
81    /// Executes a closure while holding an exclusive guard.
82    pub fn with_mut<R>(&self, f: impl FnOnce(&mut T) -> R) -> R {
83        let mut guard = self.get_mut();
84        f(&mut guard)
85    }
86
87    /// Returns a raw pointer to the inner value
88    #[inline]
89    pub fn as_ptr(&self) -> *mut T {
90        self.inner().val.get().cast::<T>()
91    }
92
93    /// Consumes the AtomicCell and returns the inner value.
94    ///
95    /// # Panics
96    /// Panics if there are other clones of this AtomicCell still alive.
97    pub fn into_inner(self) -> T {
98        // Check that we're the sole owner
99        let ref_count = self.inner().ref_count.load(Ordering::Acquire);
100        assert_eq!(
101            ref_count, 1,
102            "cannot call into_inner with multiple references"
103        );
104
105        // Get the raw pointer and prevent AtomicCell::drop from running
106        let ptr = self.ptr as *mut Inner<T>;
107        mem::forget(self);
108
109        // Take ownership of the boxed inner
110        let boxed = unsafe { Box::from_raw(ptr) };
111
112        // Read the value out. MaybeUninit doesn't drop its contents,
113        // so when boxed is dropped, only the Inner struct is deallocated.
114        let value = unsafe { boxed.val.get().read().assume_init() };
115
116        // boxed drops here, deallocating the Inner memory
117        value
118    }
119
120    /// Atomic update with exclusive closure
121    pub fn update<F>(&self, mut f: F)
122    where
123        F: FnMut(&mut T),
124    {
125        let mut guard = self.get_mut();
126        f(&mut *guard);
127    }
128
129    /// Swap the value, returning the old one
130    pub fn swap(&self, val: T) -> T {
131        let mut guard = self.get_mut();
132        mem::replace(&mut *guard, val)
133    }
134
135    /// Store a new value, dropping the old one
136    pub fn store(&self, val: T) {
137        let mut guard = self.get_mut();
138        *guard = val;
139    }
140}
141
142impl<T: Copy + Eq> AtomicCell<T> {
143    /// Compare-and-swap: if current == stored, write new and return Ok(old).
144    /// Otherwise return Err with the guard still held.
145    pub fn compare_exchange(&self, current: T, new: T) -> Result<T, WatchGuardMut<'_, T>> {
146        let mut guard = self.get_mut();
147        if *guard == current {
148            let old = mem::replace(&mut *guard, new);
149            Ok(old)
150        } else {
151            Err(guard)
152        }
153    }
154}
155
156impl<T> Clone for AtomicCell<T> {
157    fn clone(&self) -> Self {
158        self.inner().ref_count.fetch_add(1, Ordering::Relaxed);
159        Self { ptr: self.ptr }
160    }
161}
162
163impl<T> Drop for AtomicCell<T> {
164    fn drop(&mut self) {
165        let inner = unsafe { &*self.ptr };
166        if inner.ref_count.fetch_sub(1, Ordering::Release) == 1 {
167            // Ensure memory ordering before destruction
168            atomic::fence(Ordering::Acquire);
169
170            // SAFETY: We're the last reference, so we can safely drop the inner value
171            unsafe {
172                // First, drop the contained value
173                let val_ptr = (*self.ptr).val.get();
174                std::ptr::drop_in_place((*val_ptr).assume_init_mut());
175
176                // Then deallocate the Inner struct
177                drop(Box::from_raw(self.ptr as *mut Inner<T>));
178            }
179        }
180    }
181}