flexrc/algorithm/
regular.rs

1use core::cell::Cell;
2use core::sync::atomic;
3use core::sync::atomic::{AtomicUsize, Ordering};
4
5use static_assertions::{assert_eq_align, assert_eq_size, assert_impl_all, assert_not_impl_any};
6
7use crate::algorithm::abort;
8use crate::{Algorithm, FlexRc, FlexRcInner};
9
10assert_eq_size!(LocalMeta, SharedMeta);
11assert_eq_align!(LocalMeta, SharedMeta);
12assert_eq_size!(LocalInner<usize>, SharedInner<usize>);
13assert_eq_align!(LocalInner<usize>, SharedInner<usize>);
14assert_eq_size!(LocalRc<usize>, SharedRc<usize>);
15assert_eq_align!(LocalRc<usize>, SharedRc<usize>);
16
17assert_impl_all!(SharedRc<usize>: Send, Sync);
18assert_not_impl_any!(LocalRc<usize>: Send, Sync);
19
20const MAX_LOCAL_COUNT: usize = usize::MAX;
21// Allow some room for overflow
22const MAX_SHARED_COUNT: usize = usize::MAX >> 1;
23
24#[repr(C)]
25pub struct LocalMeta {
26    count: Cell<usize>,
27}
28
29pub type LocalRc<T> = FlexRc<LocalMeta, SharedMeta, T>;
30
31type LocalInner<T> = FlexRcInner<LocalMeta, SharedMeta, T>;
32type SharedInner<T> = FlexRcInner<SharedMeta, LocalMeta, T>;
33
34impl Algorithm<LocalMeta, SharedMeta> for LocalMeta {
35    #[inline]
36    fn create() -> Self {
37        Self {
38            count: Cell::new(1),
39        }
40    }
41
42    #[inline]
43    fn is_unique(&self) -> bool {
44        self.count.get() == 1
45    }
46
47    #[inline(always)]
48    fn clone(&self) {
49        let old = self.count.get();
50
51        // TODO: This check adds 15-16% clone overhead - truly needed?
52        if old == MAX_LOCAL_COUNT {
53            abort()
54        }
55
56        self.count.set(old + 1);
57    }
58
59    #[inline(always)]
60    fn drop(&self) -> bool {
61        self.count.set(self.count.get() - 1);
62        self.count.get() == 0
63    }
64
65    #[inline]
66    fn try_into_other<T: ?Sized>(
67        &self,
68        inner: *mut LocalInner<T>,
69    ) -> Result<*mut SharedInner<T>, *mut LocalInner<T>> {
70        if self.is_unique() {
71            // Safety:
72            // a) both types are the same struct and identical other than usage of different META types
73            // b) type is `repr(C)` so we know the layout
74            // c) although not required, we will ensure same alignment
75            // d) we will validate at compile time `LocalMeta` and `SharedMeta` are same size
76            // e) Cell<usize> and AtomicUsize are same size and layout
77            // f) only the two pre-defined metadata pairs are allowed
78            Ok(inner as *mut SharedInner<T>)
79        } else {
80            Err(inner)
81        }
82    }
83
84    #[inline]
85    fn try_to_other<T: ?Sized>(
86        &self,
87        inner: *mut LocalInner<T>,
88    ) -> Result<*mut SharedInner<T>, *mut LocalInner<T>> {
89        // This is never safe to do
90        Err(inner)
91    }
92}
93
94#[repr(C)]
95pub struct SharedMeta {
96    count: AtomicUsize,
97}
98
99pub type SharedRc<T> = FlexRc<SharedMeta, LocalMeta, T>;
100
101// SAFETY: We ensure what we are holding is Sync/Send and we have been careful to ensure invariants
102// that allow these marked to be safe
103unsafe impl<T: Send + Sync> Send for SharedRc<T> {}
104unsafe impl<T: Send + Sync> Sync for SharedRc<T> {}
105
106impl Algorithm<SharedMeta, LocalMeta> for SharedMeta {
107    #[inline]
108    fn create() -> Self {
109        Self {
110            count: AtomicUsize::new(1),
111        }
112    }
113
114    #[inline]
115    fn is_unique(&self) -> bool {
116        // Long discussion on why this ordering is required: https://github.com/servo/servo/issues/21186
117        self.count.load(Ordering::Acquire) == 1
118    }
119
120    #[inline(always)]
121    fn clone(&self) {
122        let old = self.count.fetch_add(1, Ordering::Relaxed);
123
124        if old > MAX_SHARED_COUNT {
125            abort()
126        }
127    }
128
129    #[inline(always)]
130    fn drop(&self) -> bool {
131        if self.count.fetch_sub(1, Ordering::Release) == 1 {
132            atomic::fence(Ordering::Acquire);
133            true
134        } else {
135            false
136        }
137    }
138
139    #[inline]
140    fn try_into_other<T: ?Sized>(
141        &self,
142        inner: *mut SharedInner<T>,
143    ) -> Result<*mut LocalInner<T>, *mut SharedInner<T>> {
144        if self.is_unique() {
145            // Safety:
146            // a) both types are the same struct and identical other than usage of different META types
147            // b) type is `repr(C)` so we know the layout
148            // c) although not required, we will ensure same alignment (TODO)
149            // d) we will validate at compile time `LocalMeta` and `SharedMeta` are same size (TODO)
150            // e) Cell<usize> and AtomicUsize are same size and layout
151            // f) only the two pre-defined metadata pairs are allowed
152            Ok(inner as *mut LocalInner<T>)
153        } else {
154            Err(inner)
155        }
156    }
157
158    #[inline]
159    fn try_to_other<T: ?Sized>(
160        &self,
161        inner: *mut SharedInner<T>,
162    ) -> Result<*mut LocalInner<T>, *mut SharedInner<T>> {
163        // This is never safe to do
164        Err(inner)
165    }
166}