urcu/rcu/
reference.rs

1use std::marker::PhantomData;
2use std::ops::Deref;
3use std::ptr::NonNull;
4
5use crate::rcu::callback::{RcuCallFn, RcuDeferFn};
6use crate::rcu::context::{RcuContext, RcuDeferContext, RcuReadContext};
7use crate::rcu::flavor::RcuFlavor;
8use crate::utility::*;
9
10/// This trait defines a RCU reference that can be owned after a RCU grace period.
11///
12/// #### Safety
13///
14/// * The underlying reference must be cleaned up upon dropping (see below).
15/// * There may be immutable borrows to the underlying reference.
16/// * There must not be any mutable borrows to the underlying reference.
17///
18/// #### Dropping
19///
20/// An [`RcuRef`] should always cleanup when [`Drop::drop`] is executed by taking
21/// ownership and dropping the underlying value.
22///
23/// * We cannot call [`RcuContext::rcu_synchronize`] since we can't be sure that
24///   a RCU read lock is currently held or not[^mborrow].
25///
26/// Because an [`RcuRef`] can be sent to any thread, we cannot guarantee that a
27/// thread executing [`Drop::drop`] is properly registered.
28///
29/// * We cannot call [`RcuDeferContext::rcu_defer`] since we can't enforce that the
30///   thread is registered with the RCU defer mecanisms[^mborrow].
31/// * We cannot call [`RcuReadContext::rcu_call`] since we can't enforce that the
32///   thread is registered with the RCU read mecanisms[^cborrow].
33///
34/// The only way to keep the safety guarantees of this crate is to use the custom
35/// cleanup thread through [`RcuRef::safe_cleanup`]. It is similar to the built-in
36/// [`RcuReadContext::rcu_call`], except it doesn't expect the calling thread to be
37/// registered with RCU in any way.
38///
39/// The downside is that it is most likely worst than [`RcuReadContext::rcu_call`] in
40/// every way. If it is a performance problem, the owner of an [`RcuRef`] can alway
41/// use [`RcuRef::defer_cleanup`] and [`RcuRef::call_cleanup`] before [`Drop::drop`]
42/// is called.
43///
44/// [^mborrow]: Unless your [`RcuRef`] has a mutable borrow of an [`RcuContext`].
45/// [^cborrow]: Unless your [`RcuRef`] has an immutable borrow of an [`RcuContext`].
46#[must_use]
47pub unsafe trait RcuRef<F> {
48    /// The output type after taking ownership.
49    type Output;
50
51    /// Take ownership of the reference.
52    ///
53    /// #### Safety
54    ///
55    /// You must wait for the grace period before taking ownership.
56    unsafe fn take_ownership_unchecked(self) -> Self::Output;
57
58    /// Take ownership of the reference.
59    fn take_ownership<C>(self, context: &mut C) -> Self::Output
60    where
61        Self: Sized,
62        C: RcuContext<Flavor = F>,
63    {
64        context.rcu_synchronize();
65
66        // SAFETY: RCU grace period has ended.
67        unsafe { self.take_ownership_unchecked() }
68    }
69
70    /// Configure a cleanup callback to be called after the grace period.
71    ///
72    /// #### Note
73    ///
74    /// The function might internally call [`RcuContext::rcu_synchronize`] and block.
75    ///
76    /// The callback is guaranteed to be executed on the current thread.
77    fn defer_cleanup<C>(self, context: &mut C)
78    where
79        Self: Sized,
80        C: RcuDeferContext<Flavor = F>,
81    {
82        context.rcu_defer(RcuDeferFn::<_, F>::new(move || {
83            // SAFETY: The caller already executed a RCU syncronization.
84            unsafe {
85                self.take_ownership_unchecked();
86            }
87        }))
88    }
89
90    /// Configure a cleanup callback to be called after the grace period.
91    ///
92    /// #### Note
93    ///
94    /// The function will internally call [`RcuReadContext::rcu_read_lock`].
95    ///
96    /// The reference must implement [`Send`] since the cleanup will be executed in an helper thread.
97    fn call_cleanup<C>(self, context: &C)
98    where
99        Self: Sized + Send + 'static,
100        C: RcuReadContext<Flavor = F> + 'static,
101    {
102        context.rcu_call(RcuCallFn::new(move || {
103            // SAFETY: The caller already executed a RCU syncronization.
104            unsafe {
105                self.take_ownership_unchecked();
106            }
107        }));
108    }
109
110    fn safe_cleanup(self)
111    where
112        Self: Sized + Send + 'static,
113        F: RcuFlavor,
114    {
115        F::rcu_cleanup(Box::new(move |context| {
116            context.rcu_synchronize();
117
118            // SAFETY: An RCU syncronization barrier was called.
119            unsafe {
120                self.take_ownership_unchecked();
121            }
122        }));
123    }
124}
125
126/// #### Safety
127///
128/// It is the responsability of the underlying type to be safe.
129unsafe impl<T, F> RcuRef<F> for Option<T>
130where
131    T: RcuRef<F>,
132{
133    type Output = Option<T::Output>;
134
135    unsafe fn take_ownership_unchecked(self) -> Self::Output {
136        self.map(|r| r.take_ownership_unchecked())
137    }
138}
139
140/// #### Safety
141///
142/// It is the responsability of the underlying type to be safe.
143unsafe impl<T, F> RcuRef<F> for Vec<T>
144where
145    T: RcuRef<F>,
146{
147    type Output = Vec<T::Output>;
148
149    unsafe fn take_ownership_unchecked(self) -> Self::Output {
150        self.into_iter()
151            .map(|r| r.take_ownership_unchecked())
152            .collect()
153    }
154}
155
156macro_rules! impl_rcu_ref_for_tuple {
157    ($($x:literal),*) => {
158        paste::paste!{
159            /// #### Safety
160            ///
161            /// It is the responsability of the underlying types to be safe.
162            unsafe impl<$([<T $x>]),*, F> RcuRef<F> for ($([<T $x>]),*)
163            where
164                $([<T $x>]: RcuRef<F>),*,
165            {
166                type Output = ($([<T $x>]::Output),*,);
167
168                unsafe fn take_ownership_unchecked(self) -> Self::Output {
169                    (
170                        $(self.$x.take_ownership_unchecked()),*,
171                    )
172                }
173            }
174        }
175    };
176}
177
178impl_rcu_ref_for_tuple!(0, 1);
179impl_rcu_ref_for_tuple!(0, 1, 2);
180impl_rcu_ref_for_tuple!(0, 1, 2, 3);
181impl_rcu_ref_for_tuple!(0, 1, 2, 3, 4);
182impl_rcu_ref_for_tuple!(0, 1, 2, 3, 4, 5);
183impl_rcu_ref_for_tuple!(0, 1, 2, 3, 4, 5, 6);
184
185/// An owned RCU reference to a element removed from a container.
186pub struct BoxRefOwned<T>(Box<T>);
187
188impl<T> Deref for BoxRefOwned<T>
189where
190    T: Deref,
191{
192    type Target = T::Target;
193
194    fn deref(&self) -> &Self::Target {
195        self.0.deref().deref()
196    }
197}
198
199/// #### Safety
200///
201/// It is safe to send to another thread if the underlying `T` is `Send`.
202unsafe impl<T: Send> Send for BoxRefOwned<T> {}
203
204/// #### Safety
205///
206/// It is safe to have references from multiple threads if the underlying `T` is `Sync`.
207unsafe impl<T: Sync> Sync for BoxRefOwned<T> {}
208
209/// Defines a RCU reference to a element removed from a container.
210pub struct RcuRefBox<T, F>
211where
212    T: Send + 'static,
213    F: RcuFlavor + 'static,
214{
215    ptr: *mut T,
216    _unsend: PhantomUnsend<(T, F)>,
217    _unsync: PhantomUnsync<(T, F)>,
218}
219
220impl<T, F> RcuRefBox<T, F>
221where
222    T: Send,
223    F: RcuFlavor,
224{
225    pub(crate) fn new(ptr: NonNull<T>) -> Self {
226        Self {
227            ptr: ptr.as_ptr(),
228            _unsend: PhantomData,
229            _unsync: PhantomData,
230        }
231    }
232}
233
234/// #### Safety
235///
236/// * The underlying reference is cleaned up upon dropping.
237/// * There may be immutable borrows to the underlying reference.
238/// * There cannot be mutable borrows to the underlying reference.
239unsafe impl<T, F> RcuRef<F> for RcuRefBox<T, F>
240where
241    T: Send,
242    F: RcuFlavor,
243{
244    type Output = BoxRefOwned<T>;
245
246    unsafe fn take_ownership_unchecked(mut self) -> Self::Output {
247        // SAFETY: There are no readers after the RCU grace period.
248        let output = BoxRefOwned(Box::from_raw(self.ptr));
249
250        // SAFETY: We don't want to cleanup when dropping `self`.
251        self.ptr = std::ptr::null_mut();
252
253        output
254    }
255}
256
257/// #### Safety
258///
259/// An RCU reference can be sent to another thread if `T` implements [`Send`].
260unsafe impl<T, F> Send for RcuRefBox<T, F>
261where
262    T: Send,
263    F: RcuFlavor,
264{
265}
266
267impl<T, F> Drop for RcuRefBox<T, F>
268where
269    T: Send + 'static,
270    F: RcuFlavor + 'static,
271{
272    fn drop(&mut self) {
273        if let Some(ptr) = NonNull::new(self.ptr) {
274            Self::new(ptr).safe_cleanup();
275        }
276    }
277}
278
279impl<T, F> Deref for RcuRefBox<T, F>
280where
281    T: Send + Deref,
282    F: RcuFlavor,
283{
284    type Target = T::Target;
285
286    fn deref(&self) -> &Self::Target {
287        // SAFETY: The pointer is only null when dropping.
288        unsafe { self.ptr.as_ref_unchecked().deref() }
289    }
290}
291
292mod asserts {
293    use super::*;
294
295    use static_assertions::{assert_impl_all, assert_not_impl_all};
296
297    use crate::rcu::default::RcuDefaultFlavor;
298    use crate::utility::asserts::*;
299
300    mod rcu_ref {
301        use super::*;
302
303        // T: Send + !Sync
304        assert_impl_all!(RcuRefBox<SendButNotSync, RcuDefaultFlavor>: Send);
305        assert_not_impl_all!(RcuRefBox<SendButNotSync, RcuDefaultFlavor>: Sync);
306
307        // T: Send + Sync
308        assert_impl_all!(RcuRefBox<SendAndSync, RcuDefaultFlavor>: Send);
309        assert_not_impl_all!(RcuRefBox<SendAndSync, RcuDefaultFlavor>: Sync);
310    }
311
312    mod rcu_ref_owned {
313        use super::*;
314
315        // T: !Send + !Sync
316        assert_not_impl_all!(BoxRefOwned<NotSendNotSync>: Send);
317        assert_not_impl_all!(BoxRefOwned<NotSendNotSync>: Sync);
318
319        // T: Send + !Sync
320        assert_impl_all!(BoxRefOwned<SendButNotSync>: Send);
321        assert_not_impl_all!(BoxRefOwned<SendButNotSync>: Sync);
322
323        // T: !Send + Sync
324        assert_not_impl_all!(BoxRefOwned<NotSendButSync>: Send);
325        assert_impl_all!(BoxRefOwned<NotSendButSync>: Sync);
326
327        // T: Send + Sync
328        assert_impl_all!(BoxRefOwned<SendAndSync>: Send);
329        assert_impl_all!(BoxRefOwned<SendAndSync>: Sync);
330    }
331}