flexrc/algorithm/
hybrid.rs

1use core::cell::Cell;
2use core::marker::PhantomData;
3use core::sync::atomic;
4#[cfg(feature = "track_threads")]
5use core::sync::atomic::AtomicUsize;
6use core::sync::atomic::{AtomicU32, Ordering};
7
8use static_assertions::{assert_eq_align, assert_eq_size, assert_impl_all, assert_not_impl_any};
9
10use crate::algorithm::abort;
11#[cfg(feature = "track_threads")]
12use crate::algorithm::hybrid_threads::THREAD_ID;
13use crate::{Algorithm, FlexRc, FlexRcInner};
14
15#[cfg(not(feature = "track_threads"))]
16assert_eq_size!(HybridMeta<LocalMode>, u64);
17#[cfg(not(feature = "track_threads"))]
18assert_eq_size!(HybridMeta<SharedMode>, u64);
19
20assert_eq_size!(HybridMeta<LocalMode>, HybridMeta<SharedMode>);
21assert_eq_align!(HybridMeta<LocalMode>, HybridMeta<SharedMode>);
22assert_eq_size!(LocalInner<usize>, SharedInner<usize>);
23assert_eq_align!(LocalInner<usize>, SharedInner<usize>);
24assert_eq_size!(LocalHybridRc<usize>, SharedHybridRc<usize>);
25assert_eq_align!(LocalHybridRc<usize>, SharedHybridRc<usize>);
26
27assert_impl_all!(SharedHybridRc<usize>: Send, Sync);
28assert_not_impl_any!(LocalHybridRc<usize>: Send, Sync);
29
30#[cfg(feature = "track_threads")]
31const THREAD_ID_LOCKED: usize = (usize::MAX >> 1) + 1;
32#[cfg(feature = "track_threads")]
33const THREAD_ID_UNLOCKED: usize = usize::MAX >> 1;
34
35// Entire counter is usable for local
36const MAX_LOCAL_COUNT: u32 = u32::MAX;
37// Save top bit for "local present" bit and second to top for overflow
38const MAX_SHARED_COUNT: u32 = u32::MAX >> 2;
39// Top bit of shared counter signifies local present (or not)
40const LOCAL_PRESENT: u32 = (u32::MAX >> 1) + 1;
41// All bits set except top
42const CLEAR_LOCAL: u32 = u32::MAX >> 1;
43
44pub struct LocalMode;
45pub struct SharedMode;
46
47#[repr(C)]
48pub struct HybridMeta<MODE> {
49    #[cfg(feature = "track_threads")]
50    thread_id: AtomicUsize,
51    local_count: Cell<u32>,
52    shared_count: AtomicU32,
53    phantom: PhantomData<MODE>,
54}
55
56pub type LocalHybridRc<T> = FlexRc<HybridMeta<LocalMode>, HybridMeta<SharedMode>, T>;
57
58type LocalInner<T> = FlexRcInner<HybridMeta<LocalMode>, HybridMeta<SharedMode>, T>;
59type SharedInner<T> = FlexRcInner<HybridMeta<SharedMode>, HybridMeta<LocalMode>, T>;
60
61impl Algorithm<HybridMeta<LocalMode>, HybridMeta<SharedMode>> for HybridMeta<LocalMode> {
62    #[inline]
63    fn create() -> Self {
64        Self {
65            #[cfg(feature = "track_threads")]
66            thread_id: AtomicUsize::new(THREAD_ID.with(|t| t.0)),
67            local_count: Cell::new(1),
68            shared_count: AtomicU32::new(LOCAL_PRESENT),
69            phantom: PhantomData,
70        }
71    }
72
73    #[inline]
74    fn is_unique(&self) -> bool {
75        // if LOCAL_PRESENT is shared counter value that means only high bit is set and shared count == 0
76        // Long discussion on why this ordering is required: https://github.com/servo/servo/issues/21186
77        self.local_count.get() == 1 && self.shared_count.load(Ordering::Acquire) == LOCAL_PRESENT
78    }
79
80    #[inline(always)]
81    fn clone(&self) {
82        let old = self.local_count.get();
83
84        // TODO: This check adds 15-16% clone overhead - truly needed?
85        if old == MAX_LOCAL_COUNT {
86            abort()
87        }
88        self.local_count.set(old + 1);
89    }
90
91    #[inline(always)]
92    fn drop(&self) -> bool {
93        self.local_count.set(self.local_count.get() - 1);
94
95        if self.local_count.get() == 0 {
96            // FIXME: Verify correct Ordering
97            let old = self.shared_count.fetch_and(CLEAR_LOCAL, Ordering::Release);
98
99            // If the value is just `LOCAL_PRESENT` that means only the top bit was set and the
100            // shared counter was zero
101            old == LOCAL_PRESENT
102        } else {
103            false
104        }
105    }
106
107    #[inline]
108    fn try_into_other<T: ?Sized>(
109        &self,
110        inner: *mut LocalInner<T>,
111    ) -> Result<*mut SharedInner<T>, *mut LocalInner<T>> {
112        // This is always allowed
113
114        // Safety: These are literally the same type - we invented the `SharedMode` and `LocalMode` tags
115        // to FORCE new types where there wouldn't otherwise be so this is safe to cast
116        let inner = inner as *mut SharedInner<T>;
117
118        // Since a) creating a new instance, not reusing b) using a diff ref counter field we now
119        // need to force a clone
120        // SAFETY: See above
121        unsafe {
122            (*inner).metadata.clone();
123        }
124        Ok(inner)
125    }
126
127    #[inline]
128    fn try_to_other<T: ?Sized>(
129        &self,
130        inner: *mut LocalInner<T>,
131    ) -> Result<*mut SharedInner<T>, *mut LocalInner<T>> {
132        // Since we can always keep the original, both are the same
133        self.try_into_other(inner)
134    }
135}
136
137pub type SharedHybridRc<T> = FlexRc<HybridMeta<SharedMode>, HybridMeta<LocalMode>, T>;
138
139// SAFETY: We ensure what we are holding is Sync/Send and we have been careful to ensure invariants
140// that allow these marked to be safe
141unsafe impl<T: Send + Sync> Send for SharedHybridRc<T> {}
142unsafe impl<T: Send + Sync> Sync for SharedHybridRc<T> {}
143
144impl Algorithm<HybridMeta<SharedMode>, HybridMeta<LocalMode>> for HybridMeta<SharedMode> {
145    #[inline]
146    fn create() -> Self {
147        Self {
148            #[cfg(feature = "track_threads")]
149            // No thread ID set yet
150            thread_id: AtomicUsize::new(0),
151            local_count: Cell::new(0),
152            shared_count: AtomicU32::new(1),
153            phantom: PhantomData,
154        }
155    }
156
157    #[inline]
158    fn is_unique(&self) -> bool {
159        // If set to 1, that means there are no local mode type left and this is last shared
160        // Long discussion on why this ordering is required: https://github.com/servo/servo/issues/21186
161        self.shared_count.load(Ordering::Acquire) == 1
162    }
163
164    #[inline(always)]
165    fn clone(&self) {
166        let old = self.shared_count.fetch_add(1, Ordering::Relaxed);
167
168        if old > MAX_SHARED_COUNT {
169            abort()
170        }
171    }
172
173    #[inline(always)]
174    fn drop(&self) -> bool {
175        // If the value was 1 previously, that means LOCAL_PRESENT wasn't set which means this
176        // is the last remaining counter
177        if self.shared_count.fetch_sub(1, Ordering::Release) == 1 {
178            atomic::fence(Ordering::Acquire);
179            true
180        } else {
181            false
182        }
183    }
184
185    #[cfg(feature = "track_threads")]
186    #[inline]
187    fn try_into_other<T: ?Sized>(
188        &self,
189        inner: *mut SharedInner<T>,
190    ) -> Result<*mut LocalInner<T>, *mut SharedInner<T>> {
191        let thread_id = THREAD_ID.with(|thread_id| thread_id.0);
192
193        // Spinlock to ensure only one thread can access this at a time
194        let old_thread_id = loop {
195            // FIXME: Verify correct Ordering
196            let old_thread_id = self.thread_id.fetch_or(THREAD_ID_LOCKED, Ordering::Acquire);
197
198            // If we obtained lock than old value would have lock bit unset
199            if old_thread_id < THREAD_ID_LOCKED {
200                break old_thread_id;
201            }
202            std::hint::spin_loop();
203        };
204
205        // Try and make this thread into the local one by setting LOCAL_PRESENT bit.
206        // FIXME: Verify correct Ordering
207        let old_shared_count = self.shared_count.fetch_or(LOCAL_PRESENT, Ordering::Acquire);
208
209        // If we are the local thread OR there is no local thread
210        if thread_id == old_thread_id || old_shared_count < LOCAL_PRESENT {
211            // FIXME: Verify correct Ordering
212            // Store our thread ID which also acts to release the spinlock
213            self.thread_id.store(thread_id, Ordering::Release);
214
215            // Safety: These are literally the same type - we invented the `SharedMode` and `LocalMode` tags
216            // to FORCE new types where there wouldn't otherwise be so this is safe to cast
217            let inner = inner as *mut LocalInner<T>;
218
219            // Since a) creating a new instance, not reusing b) using a diff ref counter field we now
220            // need to force a clone
221            // SAFETY: See above
222            unsafe {
223                (*inner).metadata.clone();
224            }
225            Ok(inner)
226        } else {
227            // FIXME: Verify correct Ordering
228            // Release spinlock and return error
229            self.thread_id
230                .fetch_and(THREAD_ID_UNLOCKED, Ordering::Release);
231            Err(inner)
232        }
233    }
234
235    #[cfg(not(feature = "track_threads"))]
236    #[inline]
237    fn try_into_other<T: ?Sized>(
238        &self,
239        inner: *mut SharedInner<T>,
240    ) -> Result<*mut LocalInner<T>, *mut SharedInner<T>> {
241        // Try and make this thread into the local one by setting LOCAL_PRESENT bit. If old value
242        // is less than LOCAL_PRESENT we know it wasn't previously set (NOTE: Without tracking and
243        // comparing a thread ID field it means we can only call this once and it will fail on
244        // successive invocations, even when called from the proper thread)
245        // FIXME: Verify correct Ordering
246        if self.shared_count.fetch_or(LOCAL_PRESENT, Ordering::Acquire) < LOCAL_PRESENT {
247            // Safety: These are literally the same type - we invented the `SharedMode` and `LocalMode` tags
248            // to FORCE new types where there wouldn't otherwise be so this is safe to cast
249            let inner = inner as *mut LocalInner<T>;
250
251            // Since a) creating a new instance, not reusing b) using a diff ref counter field we now
252            // need to force a clone
253            // SAFETY: See above
254            unsafe {
255                (*inner).metadata.clone();
256            }
257            Ok(inner)
258        } else {
259            Err(inner)
260        }
261    }
262
263    #[inline]
264    fn try_to_other<T: ?Sized>(
265        &self,
266        inner: *mut SharedInner<T>,
267    ) -> Result<*mut LocalInner<T>, *mut SharedInner<T>> {
268        // Since we can always keep the original, both are the same
269        self.try_into_other(inner)
270    }
271}