hazptr/hazard/
list.rs

1//! Concurrent linked list implementation for globally storing all allocated
2//! hazard pointers.
3//!
4//! A thread requesting a hazard pointer first traverses this list and searches
5//! for an already allocated one that is not currently in use.
6//! If there is none, the list allocates a new one, appends it to the end of the
7//! list and returns a reference (`&'static Hazard`) to it.
8//! Once allocated, hazard pointers are never de-allocated again during the
9//! lifetime of the program (i.e. they have `'static` lifetime).
10//! When a thread does no longer need an acquired hazard pointer, marks it as
11//! no longer in use, which allows other threads to acquire it during the list
12//! traversal instead of having to allocate a new one.
13//! Additionally, each thread maintains a small cache of previously acquired
14//! hazard pointers, which are specifically reserved for use by that thread.
15//!
16//! # Synchronization
17//!
18//! ```ignore
19//! struct Node {
20//!     protected: #[repr(align(64))] AtomicPtr<()>,
21//!     next:      #[repr(align(64))] AtomicPtr<HazardNode>,
22//! }
23//! ```
24//!
25//! Above is an approximate and simplified description of a node in the global
26//! linked list of hazard pointers.
27//! Both fields of this struct are aligned to the size of a cache-line in order
28//! to prevent false sharing.
29//! This is desirable, since the `next` field is effectively constant once a
30//! node is inserted and is no longer at the tail, while the `protected` field
31//! can be frequently written to.
32//!
33//! All atomic operations on the `next` field can be synchronized using
34//! acquire-release semantics, since all threads are required to synchronize
35//! through the **same** variable (i.e. the current tail of the list).
36//! All stores to the `protected` field that mark a specific pointer as
37//! protected from reclamation, however, **must** establish a total order and
38//! thus require sequential consistency (HAZ:2 and LIS:3P).
39//! Similarly, the loads on that field made during a scan of all active hazard
40//! pointers must also be sequentially consistent (GLO:1).
41//! Otherwise, a thread scanning the global list of hazard pointers might not
42//! see a consistent view of all protected pointers, since stores to the various
43//! `protected` fields are all independent writes.
44//! Consequently, a thread might go ahead and deallocate a retired record for
45//! which a hazard pointer has previously been successfully acquired but the
46//! corresponding store has not yet become visible to the reclaiming thread,
47//! potentially leading to a critical **use after free** error.
48//! All stores that write a sentinel value (e.g. `0x0` for `FREE` and `0x1` for
49//! `RESERVED`) to a `protected` field, on the other hand, do not require such
50//! strict ordering constraints.
51//! If such a store is delayed and not visible during a thread's scan prior to
52//! reclamation the worst-case outcome is a record not being reclaimed that
53//! would actually be a valid candidate for reclamation.
54
55#[cfg(not(any(test, feature = "std")))]
56use alloc::boxed::Box;
57
58use core::iter::FusedIterator;
59use core::mem;
60use core::ptr::NonNull;
61use core::sync::atomic::{
62    self,
63    Ordering::{self, Acquire, Relaxed, Release, SeqCst},
64};
65
66use reclaim::align::CacheAligned;
67use reclaim::leak::Owned;
68
69use crate::hazard::{Hazard, FREE, THREAD_RESERVED};
70use crate::sanitize::{RELEASE_FAIL, RELEASE_SUCCESS};
71
72type Atomic<T> = reclaim::leak::Atomic<T, reclaim::typenum::U0>;
73type Shared<'g, T> = reclaim::leak::Shared<'g, T, reclaim::typenum::U0>;
74
75////////////////////////////////////////////////////////////////////////////////////////////////////
76// HazardList
77////////////////////////////////////////////////////////////////////////////////////////////////////
78
79/// Linked list for storing hazard pointers
80#[derive(Debug, Default)]
81pub(crate) struct HazardList {
82    head: Atomic<HazardNode>,
83}
84
85impl HazardList {
86    /// Creates a new empty list.
87    #[inline]
88    pub const fn new() -> Self {
89        Self { head: Atomic::null() }
90    }
91
92    /// Creates a (fused) iterator for the list.
93    #[inline]
94    pub fn iter(&self) -> Iter {
95        Iter {
96            // (LIS:1) this `Acquire` load synchronizes-with the `Release` CAS (LIS:5)
97            current: self.head.load_shared(Acquire),
98        }
99    }
100
101    /// Acquires an already inserted and inactive hazard pointer or allocates a
102    /// new one at the tail and returns a reference to it.
103    #[inline]
104    pub fn get_hazard(&self, protect: Option<NonNull<()>>) -> &Hazard {
105        // this should be evaluated at compile-time
106        let (ptr, order) = match protect {
107            Some(protect) => (protect.as_ptr(), SeqCst),
108            None => (THREAD_RESERVED, Release),
109        };
110
111        self.get_hazard_for(ptr, order)
112    }
113
114    #[inline]
115    fn get_hazard_for(&self, ptr: *mut (), order: Ordering) -> &Hazard {
116        let mut prev = &self.head;
117        // (LIS:2) this `Acquire` load synchronizes-with the `Release` CAS (LIS:5)
118        let mut curr = prev.load_shared(Acquire);
119
120        while let Some(node) = curr.map(Shared::into_ref) {
121            if node.hazard().protected.load(Relaxed) == FREE {
122                // (LIS:3P) this `SeqCst`/`Release` CAS synchronizes-with the `SeqCst` fence (LOC:2)
123                // and enforces a total order in case BOTH are `SeqCst`
124                let prev = node.hazard.protected.compare_and_swap(FREE, ptr, order);
125
126                if prev == FREE {
127                    return node.hazard();
128                }
129            }
130
131            prev = node.next();
132            // (LIS:4) this `Acquire` load synchronizes-with the `Release` CAS (LIS:5)
133            curr = node.next().load_shared(Acquire);
134        }
135
136        self.insert_back(prev, ptr)
137    }
138
139    /// Allocates and inserts a new node (hazard pointer) at the tail of the list.
140    #[inline]
141    fn insert_back(&self, mut tail: &Atomic<HazardNode>, ptr: *mut ()) -> &Hazard {
142        let node = unsafe {
143            Owned::leak_shared(Owned::new(HazardNode {
144                hazard: CacheAligned(Hazard::new(ptr)),
145                next: CacheAligned(Atomic::null()),
146            }))
147        };
148
149        loop {
150            // (LIS:5) this `Release` CAS synchronizes-with the `Acquire` loads (LIS:1), (LIS:2),
151            // (LIS:4) and the `Acquire` fence (LIS:7)
152            match tail.compare_exchange_weak(Shared::none(), node, RELEASE_SUCCESS, RELEASE_FAIL) {
153                Ok(_) => return &*Shared::into_ref(node).hazard,
154                Err(fail) => {
155                    // (LIS:6) this `Acquire` fence synchronizes-with the `Release` CAS (LIS:5)
156                    atomic::fence(Acquire);
157
158                    // this is safe because nodes are never retired or reclaimed
159                    if let Some(node) = fail.loaded {
160                        tail = unsafe { &node.deref_unprotected().next };
161                    }
162                }
163            }
164        }
165    }
166}
167
168impl Drop for HazardList {
169    #[inline]
170    fn drop(&mut self) {
171        let mut curr = self.head.take();
172        while let Some(mut owned) = curr {
173            curr = owned.next.take();
174            mem::drop(owned);
175        }
176    }
177}
178
179////////////////////////////////////////////////////////////////////////////////////////////////////
180// Iter
181////////////////////////////////////////////////////////////////////////////////////////////////////
182
183/// Iterator for a `HazardList`
184#[derive(Debug)]
185pub(crate) struct Iter<'a> {
186    current: Option<Shared<'a, HazardNode>>,
187}
188
189impl<'a> Iterator for Iter<'a> {
190    type Item = &'a Hazard;
191
192    #[inline]
193    fn next(&mut self) -> Option<Self::Item> {
194        self.current.take().map(|node| {
195            let node = Shared::into_ref(node);
196            self.current = node.next.load_shared(Acquire);
197            &*node.hazard
198        })
199    }
200}
201
202impl<'a> FusedIterator for Iter<'a> {}
203
204////////////////////////////////////////////////////////////////////////////////////////////////////
205// HazardNode
206////////////////////////////////////////////////////////////////////////////////////////////////////
207
208#[derive(Debug)]
209struct HazardNode {
210    hazard: CacheAligned<Hazard>,
211    next: CacheAligned<Atomic<HazardNode>>,
212}
213
214impl HazardNode {
215    #[inline]
216    fn hazard(&self) -> &Hazard {
217        &*self.hazard
218    }
219
220    #[inline]
221    fn next(&self) -> &Atomic<HazardNode> {
222        &*self.next
223    }
224}
225
226#[cfg(test)]
227mod tests {
228    use std::ptr::NonNull;
229    use std::sync::atomic::Ordering;
230
231    use super::HazardList;
232
233    #[test]
234    fn insert_one() {
235        let ptr = NonNull::new(0xDEAD_BEEF as *mut ()).unwrap();
236
237        let list = HazardList::new();
238        let hazard = list.get_hazard(Some(ptr));
239        assert_eq!(hazard.protected.load(Ordering::Relaxed), 0xDEAD_BEEF as *mut ());
240    }
241
242    #[test]
243    fn iter() {
244        let ptr = NonNull::new(0xDEAD_BEEF as *mut ()).unwrap();
245
246        let list = HazardList::new();
247        let _ = list.get_hazard(Some(ptr));
248        let _ = list.get_hazard(Some(ptr));
249        let _ = list.get_hazard(Some(ptr));
250
251        assert!(list
252            .iter()
253            .fuse()
254            .all(|hazard| hazard.protected.load(Ordering::Relaxed) == ptr.as_ptr()));
255    }
256}