concurrent_slotmap/
epoch.rs

1// This module is heavily inspired by crossbeam-epoch v0.9, licensed under either of
2// * Apache License, Version 2.0 (LICENSE-APACHE or http://www.apache.org/licenses/LICENSE-2.0)
3// * MIT license (LICENSE-MIT or http://opensource.org/licenses/MIT)
4// at your option.
5//
6// The differences include:
7// * Being stripped down to only the global and local epochs and the guard.
8// * Allowing retrieving the epoch a guard is pinned in.
9// * The list of locals is not lock-free (as there's no useful work for threads to do concurrently
10//   anyway) but instead protected by a mutex.
11// * Implementing `Clone` for `Guard` to allow use inside `Cow`.
12// * Other small miscellaneous changes.
13
14use crate::{Backoff, CacheAligned};
15use alloc::{borrow::Cow, boxed::Box};
16use core::{
17    cell::Cell,
18    fmt,
19    marker::PhantomData,
20    ptr::NonNull,
21    sync::atomic::{
22        self, AtomicBool, AtomicU32, AtomicUsize,
23        Ordering::{Acquire, Relaxed, Release, SeqCst},
24    },
25};
26
27/// The bit of `Local::epoch` which signifies that the participant is pinned.
28const PINNED_BIT: u32 = 1 << 0;
29
30/// The number of pinnings between a participant tries to advance the global epoch.
31#[cfg(not(miri))]
32pub(crate) const PINNINGS_BETWEEN_ADVANCE: usize = 128;
33#[cfg(miri)]
34pub(crate) const PINNINGS_BETWEEN_ADVANCE: usize = 4;
35
36/// A handle to a global epoch.
37pub struct GlobalHandle {
38    ptr: NonNull<Global>,
39}
40
41// SAFETY: `Global` is `Send + Sync` and its lifetime is enforced with reference counting.
42unsafe impl Send for GlobalHandle {}
43
44// SAFETY: `Global` is `Send + Sync` and its lifetime is enforced with reference counting.
45unsafe impl Sync for GlobalHandle {}
46
47impl Default for GlobalHandle {
48    /// Creates a new global epoch.
49    fn default() -> Self {
50        Self::new()
51    }
52}
53
54impl GlobalHandle {
55    /// Creates a new global epoch.
56    #[must_use]
57    pub fn new() -> Self {
58        Global::register()
59    }
60
61    /// Registers a new local epoch in the global list of participants.
62    #[inline]
63    #[must_use]
64    pub fn register_local(&self) -> UniqueLocalHandle {
65        Local::register(self)
66    }
67
68    #[inline]
69    pub(crate) fn epoch(&self) -> u32 {
70        self.global().epoch.load(Relaxed)
71    }
72
73    #[inline]
74    fn global(&self) -> &Global {
75        // SAFETY: The constructor of `GlobalHandle` must ensure that the pointer stays valid for
76        // the lifetime of the handle.
77        unsafe { self.ptr.as_ref() }
78    }
79}
80
81impl Clone for GlobalHandle {
82    /// Creates a new handle to the same global epoch.
83    #[inline]
84    fn clone(&self) -> Self {
85        #[allow(clippy::cast_sign_loss)]
86        if self.global().handle_count.fetch_add(1, Relaxed) > isize::MAX as usize {
87            abort();
88        }
89
90        // SAFETY: We incremented the `handle_count` above, such that the handle's drop
91        // implementation cannot drop the `Global` while another handle still exists.
92        unsafe { GlobalHandle { ptr: self.ptr } }
93    }
94}
95
96impl fmt::Debug for GlobalHandle {
97    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
98        f.debug_struct("GlobalHandle").finish_non_exhaustive()
99    }
100}
101
102impl PartialEq for GlobalHandle {
103    /// Returns `true` if both handles refer to the same global epoch.
104    #[inline]
105    fn eq(&self, other: &Self) -> bool {
106        self.ptr == other.ptr
107    }
108}
109
110impl Eq for GlobalHandle {}
111
112impl Drop for GlobalHandle {
113    /// Drops the handle.
114    ///
115    /// If there are no other handles to this global epoch, the global epoch will be dropped.
116    #[inline]
117    fn drop(&mut self) {
118        if self.global().handle_count.fetch_sub(1, Release) == 1 {
119            // SAFETY: The handle count has gone to zero, which means that no other threads can
120            // register a new handle. `Global::unregister` ensures that the drop is synchronized
121            // with the above decrement, such that no access to the `Global` can be ordered after
122            // the drop.
123            unsafe { Global::unregister(self.ptr) };
124        }
125    }
126}
127
128/// A [`LocalHandle`] that can be safely sent to other threads.
129///
130/// This is enforced through ownership over the local epoch and by using borrowed guards.
131pub struct UniqueLocalHandle {
132    inner: LocalHandle,
133}
134
135impl UniqueLocalHandle {
136    /// This function behaves the same as [`LocalHandle::pin`], except that it returns a guard
137    /// whose lifetime is bound to this handle.
138    #[inline]
139    #[must_use]
140    pub fn pin(&self) -> Guard<'_> {
141        self.inner.pin()
142    }
143
144    /// Returns a handle to the global epoch.
145    #[inline]
146    #[must_use]
147    pub fn global(&self) -> &GlobalHandle {
148        self.inner.global()
149    }
150
151    /// Returns the inner handle, consuming the unique wrapper.
152    ///
153    /// This allows you to make clones of the handle and get access to `'static` guards, however
154    /// you lose the ability to send the handle to another thread in turn.
155    #[inline]
156    #[must_use]
157    pub fn into_inner(self) -> LocalHandle {
158        self.inner
159    }
160}
161
162// SAFETY: The constructor of `UniqueLocalHandle` must ensure that the handle is unique, which
163// means that no other `LocalHandle`s referencing the same `Local` can exist. This, together with
164// the fact that we don't allow any access to the `Local` other than borrowed, ensures that the
165// `Local` cannot be accessed from more than one thread at a time.
166unsafe impl Send for UniqueLocalHandle {}
167
168impl fmt::Debug for UniqueLocalHandle {
169    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
170        f.debug_struct("UniqueLocalHandle").finish_non_exhaustive()
171    }
172}
173
174/// A handle to a local epoch.
175pub struct LocalHandle {
176    ptr: NonNull<Local>,
177}
178
179impl LocalHandle {
180    /// Pins the local epoch, such that no accesses done while the returned `Guard` exists can
181    /// cross an epoch boundary. It is important to pin the local epoch before doing any kind of
182    /// access, such that no accesses can bleed into the previous epoch. Similarly, the pin must
183    /// persist for as long as any accesses from the pinned epoch can persist.
184    //
185    // The `unwrap` below can't actually happen in any reasonable program.
186    #[allow(clippy::missing_panics_doc)]
187    #[inline]
188    #[must_use]
189    pub fn pin(&self) -> Guard<'static> {
190        let local = self.local();
191        let global = local.global();
192
193        let guard_count = local.guard_count.get();
194        local.guard_count.set(guard_count.checked_add(1).unwrap());
195
196        if guard_count == 0 {
197            let global_epoch = global.epoch.load(Relaxed);
198            let new_epoch = global_epoch | PINNED_BIT;
199            local.epoch.store(new_epoch, Relaxed);
200
201            // This fence acts two-fold:
202            // * It synchronizes with the `Release` store of the global epoch in
203            //   `Global::try_advance`, which in turn ensures that the accesses all other
204            //   participants did in a previous epoch are visible to us going forward when crossing
205            //   an epoch boundary.
206            // * It ensures that that no accesses we do going forward can be ordered before this
207            //   point, therefore "bleeding" into the previous epoch, when crossing an epoch
208            //   boundary.
209            atomic::fence(SeqCst);
210
211            local.pin_count.set(local.pin_count.get().wrapping_add(1));
212
213            if local.pin_count.get() % PINNINGS_BETWEEN_ADVANCE == 0 {
214                global.try_advance();
215            }
216        }
217
218        // SAFETY:
219        // * We incremented the `guard_count` above, such that the guard's drop implementation
220        //   cannot unpin the participant while another guard still exists.
221        // * We made sure to pin the participant if it wasn't already and made sure that accesses
222        //   from this point on can't leak into the previous epoch.
223        unsafe { Guard::new(self.ptr) }
224    }
225
226    /// Returns a handle to the global epoch.
227    #[inline]
228    #[must_use]
229    pub fn global(&self) -> &GlobalHandle {
230        &self.local().global
231    }
232
233    #[inline]
234    fn local(&self) -> &Local {
235        // SAFETY: The constructor of `LocalHandle` must ensure that the pointer stays valid for the
236        // lifetime of the handle.
237        unsafe { self.ptr.as_ref() }
238    }
239}
240
241impl Clone for LocalHandle {
242    /// Creates a new handle to the same local epoch.
243    #[inline]
244    fn clone(&self) -> Self {
245        let local = self.local();
246
247        local.handle_count.set(local.handle_count.get() + 1);
248
249        // SAFETY: We incremented the `handle_count` above, such that the handle's drop
250        // implementation cannot drop the `Local` while another handle still exists.
251        unsafe { LocalHandle { ptr: self.ptr } }
252    }
253}
254
255impl fmt::Debug for LocalHandle {
256    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
257        f.debug_struct("LocalHandle").finish_non_exhaustive()
258    }
259}
260
261impl Drop for LocalHandle {
262    /// Drops the handle.
263    ///
264    /// If there are no other handles or guards that refer to this local epoch, it will be
265    /// unregistered from the global list of participants.
266    #[inline]
267    fn drop(&mut self) {
268        let local = self.local();
269
270        // SAFETY: The constructor of `LocalHandle` must ensure that `handle_count` has been
271        // incremented before construction of the handle.
272        unsafe { local.handle_count.set(local.handle_count.get() - 1) };
273
274        if local.handle_count.get() == 0 && local.guard_count.get() == 0 {
275            // SAFETY: We checked that both the handle count and guard count went to zero, which
276            // means that no other references to the `Local` can exist after this point.
277            unsafe { Local::unregister(self.ptr) };
278        }
279    }
280}
281
282/// A guard that keeps the local epoch pinned.
283pub struct Guard<'a> {
284    local: NonNull<Local>,
285    marker: PhantomData<&'a ()>,
286}
287
288impl Guard<'_> {
289    unsafe fn new(local: NonNull<Local>) -> Self {
290        Guard {
291            local,
292            marker: PhantomData,
293        }
294    }
295
296    /// Returns a handle to the global epoch.
297    #[inline]
298    #[must_use]
299    pub fn global(&self) -> &GlobalHandle {
300        &self.local().global
301    }
302
303    /// Tries to advance the global epoch. Returns `true` if the epoch was successfully advanced.
304    #[allow(clippy::must_use_candidate)]
305    #[inline]
306    pub fn try_advance_global(&self) -> bool {
307        let local = self.local();
308        // This prevents us from trying to advance the global epoch in `LocalHandle::pin`.
309        local.pin_count.set(0);
310
311        local.global().try_advance()
312    }
313
314    #[inline]
315    fn local(&self) -> &Local {
316        // SAFETY: The constructor of `Guard` must ensure that the pointer stays valid for the
317        // lifetime of the guard.
318        unsafe { self.local.as_ref() }
319    }
320}
321
322impl Clone for Guard<'_> {
323    /// Creates a new guard for the same local epoch.
324    #[inline]
325    fn clone(&self) -> Self {
326        let local = self.local();
327
328        let guard_count = local.guard_count.get();
329        local.guard_count.set(guard_count.checked_add(1).unwrap());
330
331        // SAFETY:
332        // * We incremented the `guard_count` above, such that the guard's drop implementation
333        //   cannot unpin the participant while another guard still exists.
334        // * The participant is already pinned, as this guard's existence is a proof of that.
335        unsafe { Guard::new(self.local) }
336    }
337}
338
339impl fmt::Debug for Guard<'_> {
340    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
341        f.debug_struct("Guard").finish_non_exhaustive()
342    }
343}
344
345impl Drop for Guard<'_> {
346    /// Drops the guard.
347    ///
348    /// If there are no other guards keeping the local epoch pinned, it will be unpinned. If there
349    /// are also no handles to the local epoch, it will be unregistered from the global list of
350    /// participants.
351    #[inline]
352    fn drop(&mut self) {
353        let local = self.local();
354
355        // SAFETY: The constructor of `Guard` must ensure that the `guard_count` has been
356        // incremented before construction of the guard.
357        unsafe { local.guard_count.set(local.guard_count.get() - 1) };
358
359        if local.guard_count.get() == 0 {
360            // SAFETY:
361            // * The `Release` ordering synchronizes with the `Acquire` fence in
362            //   `Global::try_advance`, which in turn ensures that all accesses done by us until
363            //   this point are visible to all other participants when crossing an epoch boundary.
364            // * We checked that the guard count went to zero, which means that no other guards can
365            //   exist and it is safe to unpin the participant.
366            unsafe { local.epoch.store(0, Release) };
367
368            if local.handle_count.get() == 0 {
369                // SAFETY: We checked that both the handle count and guard count went to zero, which
370                // means that no other references to the `Local` can exist after this point.
371                unsafe { Local::unregister(self.local) };
372            }
373        }
374    }
375}
376
377impl<'a> From<&'a Guard<'a>> for Cow<'a, Guard<'a>> {
378    #[inline]
379    fn from(guard: &'a Guard<'a>) -> Self {
380        Cow::Borrowed(guard)
381    }
382}
383
384impl<'a> From<Guard<'a>> for Cow<'_, Guard<'a>> {
385    #[inline]
386    fn from(guard: Guard<'a>) -> Self {
387        Cow::Owned(guard)
388    }
389}
390
391#[repr(C)]
392struct Global {
393    /// The head of the global list of `Local`s participating in the memory reclamation.
394    local_list_head: Cell<Option<NonNull<Local>>>,
395
396    /// The lock that protects the local list.
397    local_list_lock: AtomicBool,
398
399    /// The number of `GlobalHandle`s to this `Global` that exist.
400    handle_count: AtomicUsize,
401
402    _alignment: CacheAligned,
403
404    /// The global epoch counter. This can only be advanced if all pinned local epochs are pinned
405    /// in the current global epoch.
406    epoch: AtomicU32,
407}
408
409// SAFETY: Access to the linked list of locals is synchronized with a mutex.
410unsafe impl Sync for Global {}
411
412impl Global {
413    fn register() -> GlobalHandle {
414        let global = Box::new(Global {
415            local_list_head: Cell::new(None),
416            local_list_lock: AtomicBool::new(false),
417            handle_count: AtomicUsize::new(1),
418            _alignment: CacheAligned,
419            epoch: AtomicU32::new(0),
420        });
421
422        // SAFETY: `Box` is guaranteed to be non-null.
423        let ptr = unsafe { NonNull::new_unchecked(Box::into_raw(global)) };
424
425        // SAFETY: We initialized the `handle_count` to 1, such that the handle's drop
426        // implementation cannot drop the `Global` while another handle still exists.
427        unsafe { GlobalHandle { ptr } }
428    }
429
430    #[inline(never)]
431    unsafe fn unregister(global: NonNull<Global>) {
432        // `Acquire` synchronizes with the `Release` ordering in `GlobalHandle::drop`.
433        //
434        // SAFETY: The caller must ensure that `global` is a valid pointer to a `Global`.
435        unsafe { global.as_ref() }.handle_count.load(Acquire);
436
437        // SAFETY: The caller must ensure that the `Global` can't be accessed after this point.
438        let _ = unsafe { Box::from_raw(global.as_ptr()) };
439    }
440
441    fn lock_local_list(&self) {
442        let mut backoff = Backoff::new();
443
444        loop {
445            match self
446                .local_list_lock
447                .compare_exchange_weak(false, true, Acquire, Relaxed)
448            {
449                Ok(_) => break,
450                Err(_) => backoff.spin(),
451            }
452        }
453    }
454
455    fn try_lock_local_list(&self) -> bool {
456        self.local_list_lock
457            .compare_exchange(false, true, Acquire, Relaxed)
458            .is_ok()
459    }
460
461    unsafe fn unlock_local_list(&self) {
462        self.local_list_lock.store(false, Release);
463    }
464
465    #[inline(never)]
466    fn try_advance(&self) -> bool {
467        let global_epoch = self.epoch.load(Relaxed);
468
469        // Ensure that none of the loads of the local epochs can be ordered before the load of the
470        // global epoch.
471        atomic::fence(SeqCst);
472
473        if !self.try_lock_local_list() {
474            // Another thread beat us to it.
475            return false;
476        }
477
478        let mut head = self.local_list_head.get();
479
480        while let Some(local) = head {
481            // SAFETY: The list of locals always contains valid pointers.
482            let local = unsafe { local.as_ref() };
483            let local_epoch = local.epoch.load(Relaxed);
484
485            if local_epoch & PINNED_BIT != 0 && local_epoch & !PINNED_BIT != global_epoch {
486                // SAFETY: We locked the local list above.
487                unsafe { self.unlock_local_list() };
488
489                return false;
490            }
491
492            head = local.next.get();
493        }
494
495        // SAFETY: We locked the local list above.
496        unsafe { self.unlock_local_list() };
497
498        let new_epoch = global_epoch.wrapping_add(2);
499
500        // This essentially acts as a global `AcqRel` barrier. Only after ensuring that all
501        // participants are pinned in the current global epoch do we synchronize with them using
502        // the `Acquire` fence, which synchronizes with every participant's `Release` store of its
503        // local epoch in `Guard::drop`, ensuring that all accesses done by every participant until
504        // they were unpinned are visible here. The `Release` ordering on the global epoch then
505        // ensures that all participants subsequently pinned in the new epoch also see all accesses
506        // of all other participants from the previous epoch.
507        atomic::fence(Acquire);
508        self.epoch.store(new_epoch, Release);
509
510        true
511    }
512}
513
514#[repr(C)]
515struct Local {
516    /// The next `Local` in the global list of participants.
517    next: Cell<Option<NonNull<Self>>>,
518
519    /// The previous `Local` in the global list of participants.
520    prev: Cell<Option<NonNull<Self>>>,
521
522    /// The local epoch counter. When this epoch is pinned, it ensures that the global epoch cannot
523    /// be advanced more than one step until it is unpinned.
524    epoch: AtomicU32,
525
526    _alignment: CacheAligned,
527
528    /// A handle to the `Global` which this `Local` is participating in.
529    global: GlobalHandle,
530
531    /// The number of `LocalHandle`s to this participant that exist.
532    handle_count: Cell<usize>,
533
534    /// The number of `Guard`s of this participant that exist.
535    guard_count: Cell<usize>,
536
537    /// The number of pinnings this participant has gone through in total.
538    pin_count: Cell<usize>,
539}
540
541impl Local {
542    #[inline(never)]
543    fn register(global: &GlobalHandle) -> UniqueLocalHandle {
544        let mut local = Box::new(Local {
545            next: Cell::new(None),
546            prev: Cell::new(None),
547            epoch: AtomicU32::new(0),
548            _alignment: CacheAligned,
549            global: global.clone(),
550            handle_count: Cell::new(1),
551            guard_count: Cell::new(0),
552            pin_count: Cell::new(0),
553        });
554
555        let global = global.global();
556
557        global.lock_local_list();
558
559        let head = global.local_list_head.get();
560        local.next = Cell::new(head);
561
562        // SAFETY: `Box` is guaranteed to be non-null.
563        let ptr = unsafe { NonNull::new_unchecked(Box::into_raw(local)) };
564
565        global.local_list_head.set(Some(ptr));
566
567        if let Some(head) = head {
568            // SAFETY: The list of locals always contains valid pointers.
569            unsafe { head.as_ref() }.prev.set(Some(ptr));
570        }
571
572        // SAFETY: We locked the local list above.
573        unsafe { global.unlock_local_list() };
574
575        // SAFETY: We initialized the `handle_count` to 1, such that the handle's drop
576        // implementation cannot drop the `Local` while another handle still exists.
577        let handle = unsafe { LocalHandle { ptr } };
578
579        // SAFETY: We just allocated this `Local`, and this is the only existing handle to it.
580        unsafe { UniqueLocalHandle { inner: handle } }
581    }
582
583    #[inline(never)]
584    unsafe fn unregister(ptr: NonNull<Self>) {
585        // SAFETY: The caller must ensure that `local` is in the list of locals, which means it must
586        // be valid.
587        let local = unsafe { ptr.as_ref() };
588        let global = local.global.global();
589
590        global.lock_local_list();
591
592        if let Some(prev) = local.prev.get() {
593            // SAFETY: The list of locals always contains valid pointers.
594            unsafe { prev.as_ref() }.next.set(local.next.get());
595        } else {
596            global.local_list_head.set(local.next.get());
597        }
598
599        if let Some(next) = local.next.get() {
600            // SAFETY: The list of locals always contains valid pointers.
601            unsafe { next.as_ref() }.prev.set(local.prev.get());
602        }
603
604        // SAFETY: We locked the local list above.
605        unsafe { global.unlock_local_list() };
606
607        // SAFETY: The caller must ensure that `local` can't be accessed after this point.
608        let _ = unsafe { Box::from_raw(ptr.as_ptr()) };
609    }
610
611    #[inline]
612    fn global(&self) -> &Global {
613        self.global.global()
614    }
615}
616
617/// Polyfill for `core::intrinsics::abort`.
618#[cold]
619fn abort() -> ! {
620    struct PanicOnDrop;
621
622    impl Drop for PanicOnDrop {
623        fn drop(&mut self) {
624            panic!();
625        }
626    }
627
628    let _p = PanicOnDrop;
629    panic!();
630}