scc/
linked_list.rs

1use super::ebr::{AtomicShared, Guard, Ptr, Shared, Tag};
2use std::fmt::{self, Debug, Display};
3use std::ops::{Deref, DerefMut};
4use std::sync::atomic::Ordering::{self, AcqRel, Acquire, Relaxed};
5
6/// [`LinkedList`] is a type trait implementing a lock-free singly linked list.
7pub trait LinkedList: Sized {
8    /// Returns a reference to the forward link.
9    ///
10    /// The pointer value may be tagged if [`Self::mark`] or [`Self::delete_self`] has been
11    /// invoked. The [`AtomicShared`] must only be updated through [`LinkedList`] in order to keep
12    /// the linked list consistent.
13    fn link_ref(&self) -> &AtomicShared<Self>;
14
15    /// Returns `true` if `self` is reachable and not marked.
16    ///
17    /// # Examples
18    ///
19    /// ```
20    /// use scc::LinkedList;
21    /// use scc::ebr::{AtomicShared, Tag};
22    /// use std::sync::atomic::Ordering::Relaxed;
23    ///
24    /// #[derive(Default)]
25    /// struct L(AtomicShared<L>, usize);
26    /// impl LinkedList for L {
27    ///     fn link_ref(&self) -> &AtomicShared<L> {
28    ///         &self.0
29    ///     }
30    /// }
31    ///
32    /// let head: L = L::default();
33    /// assert!(head.is_clear(Relaxed));
34    /// assert!(head.mark(Relaxed));
35    /// assert!(!head.is_clear(Relaxed));
36    /// assert!(head.delete_self(Relaxed));
37    /// assert!(!head.is_clear(Relaxed));
38    /// ```
39    #[inline]
40    fn is_clear(&self, order: Ordering) -> bool {
41        self.link_ref().tag(order) == Tag::None
42    }
43
44    /// Marks `self` with an internal flag to denote that `self` is in a special state.
45    ///
46    /// Returns `false` if a flag has already been marked on `self`.
47    ///
48    /// # Examples
49    ///
50    /// ```
51    /// use scc::LinkedList;
52    /// use scc::ebr::AtomicShared;
53    /// use std::sync::atomic::Ordering::Relaxed;
54    ///
55    /// #[derive(Default)]
56    /// struct L(AtomicShared<L>, usize);
57    /// impl LinkedList for L {
58    ///     fn link_ref(&self) -> &AtomicShared<L> {
59    ///         &self.0
60    ///     }
61    /// }
62    ///
63    /// let head: L = L::default();
64    /// assert!(head.mark(Relaxed));
65    /// ```
66    #[inline]
67    fn mark(&self, order: Ordering) -> bool {
68        self.link_ref()
69            .update_tag_if(Tag::First, |ptr| ptr.tag() == Tag::None, order, Relaxed)
70    }
71
72    /// Removes any mark from `self`.
73    ///
74    /// Returns `false` if no flag has been marked on `self`.
75    ///
76    /// # Examples
77    ///
78    /// ```
79    /// use scc::LinkedList;
80    /// use scc::ebr::AtomicShared;
81    /// use std::sync::atomic::Ordering::Relaxed;
82    ///
83    /// #[derive(Default)]
84    /// struct L(AtomicShared<L>, usize);
85    /// impl LinkedList for L {
86    ///     fn link_ref(&self) -> &AtomicShared<L> {
87    ///         &self.0
88    ///     }
89    /// }
90    ///
91    /// let head: L = L::default();
92    /// assert!(!head.unmark(Relaxed));
93    /// assert!(head.mark(Relaxed));
94    /// assert!(head.unmark(Relaxed));
95    /// assert!(!head.is_marked(Relaxed));
96    /// ```
97    #[inline]
98    fn unmark(&self, order: Ordering) -> bool {
99        self.link_ref()
100            .update_tag_if(Tag::None, |ptr| ptr.tag() == Tag::First, order, Relaxed)
101    }
102
103    /// Returns `true` if `self` has a mark on it.
104    ///
105    /// # Examples
106    ///
107    /// ```
108    /// use scc::LinkedList;
109    /// use scc::ebr::AtomicShared;
110    /// use std::sync::atomic::Ordering::Relaxed;
111    ///
112    /// #[derive(Default)]
113    /// struct L(AtomicShared<L>, usize);
114    /// impl LinkedList for L {
115    ///     fn link_ref(&self) -> &AtomicShared<L> {
116    ///         &self.0
117    ///     }
118    /// }
119    ///
120    /// let head: L = L::default();
121    /// assert!(!head.is_marked(Relaxed));
122    /// assert!(head.mark(Relaxed));
123    /// assert!(head.is_marked(Relaxed));
124    /// ```
125    #[inline]
126    fn is_marked(&self, order: Ordering) -> bool {
127        self.link_ref().tag(order) == Tag::First
128    }
129
130    /// Deletes `self`.
131    ///
132    /// Returns `false` if `self` already has `deleted` marked on it.
133    ///
134    /// # Examples
135    ///
136    /// ```
137    /// use scc::LinkedList;
138    /// use scc::ebr::{AtomicShared, Guard, Shared};
139    /// use std::sync::atomic::Ordering::Relaxed;
140    ///
141    /// #[derive(Default)]
142    /// struct L(AtomicShared<L>, usize);
143    /// impl LinkedList for L {
144    ///     fn link_ref(&self) -> &AtomicShared<L> {
145    ///         &self.0
146    ///     }
147    /// }
148    ///
149    /// let guard = Guard::new();
150    ///
151    /// let head: L = L::default();
152    /// let tail: Shared<L> = Shared::new(L::default());
153    /// assert!(head.push_back(tail.clone(), false, Relaxed, &guard).is_ok());
154    ///
155    /// tail.delete_self(Relaxed);
156    /// assert!(head.next_ptr(Relaxed, &guard).as_ref().is_none());
157    /// ```
158    #[inline]
159    fn delete_self(&self, order: Ordering) -> bool {
160        self.link_ref().update_tag_if(
161            Tag::Second,
162            |ptr| {
163                let tag = ptr.tag();
164                tag == Tag::None || tag == Tag::First
165            },
166            order,
167            Relaxed,
168        )
169    }
170
171    /// Returns `true` if `self` has been deleted.
172    ///
173    /// # Examples
174    ///
175    /// ```
176    /// use scc::LinkedList;
177    /// use scc::ebr::AtomicShared;
178    /// use std::sync::atomic::Ordering::Relaxed;
179    ///
180    /// #[derive(Default)]
181    /// struct L(AtomicShared<L>, usize);
182    /// impl LinkedList for L {
183    ///     fn link_ref(&self) -> &AtomicShared<L> {
184    ///         &self.0
185    ///     }
186    /// }
187    ///
188    /// let entry: L = L::default();
189    /// assert!(!entry.is_deleted(Relaxed));
190    /// entry.delete_self(Relaxed);
191    /// assert!(entry.is_deleted(Relaxed));
192    /// ```
193    #[inline]
194    fn is_deleted(&self, order: Ordering) -> bool {
195        let tag = self.link_ref().tag(order);
196        tag == Tag::Second || tag == Tag::Both
197    }
198
199    /// Appends the given entry to `self` and returns a pointer to the entry.
200    ///
201    /// If `mark` is given `true`, it atomically marks an internal flag on `self` when updating
202    /// the linked list, otherwise it removes marks.
203    ///
204    /// # Errors
205    ///
206    /// Returns the supplied [`Shared`] when it finds `self` deleted.
207    ///
208    /// # Examples
209    ///
210    /// ```
211    /// use scc::LinkedList;
212    /// use scc::ebr::{AtomicShared, Guard, Shared};
213    /// use std::sync::atomic::Ordering::{Relaxed, Release};
214    ///
215    /// #[derive(Default)]
216    /// struct L(AtomicShared<L>, usize);
217    /// impl LinkedList for L {
218    ///     fn link_ref(&self) -> &AtomicShared<L> {
219    ///         &self.0
220    ///     }
221    /// }
222    ///
223    /// let guard = Guard::new();
224    ///
225    /// let head: L = L::default();
226    /// assert!(head.push_back(Shared::new(L::default()), true, Release, &guard).is_ok());
227    /// assert!(head.is_marked(Relaxed));
228    /// assert!(head.push_back(Shared::new(L::default()), false, Release, &guard).is_ok());
229    /// assert!(!head.is_marked(Relaxed));
230    ///
231    /// head.delete_self(Relaxed);
232    /// assert!(!head.is_marked(Relaxed));
233    /// assert!(head.push_back(Shared::new(L::default()), false, Release, &guard).is_err());
234    /// ```
235    #[inline]
236    fn push_back<'g>(
237        &self,
238        mut entry: Shared<Self>,
239        mark: bool,
240        order: Ordering,
241        guard: &'g Guard,
242    ) -> Result<Ptr<'g, Self>, Shared<Self>> {
243        let new_tag = if mark { Tag::First } else { Tag::None };
244        let mut next_ptr = self.link_ref().load(Relaxed, guard);
245
246        loop {
247            let tag = next_ptr.tag();
248            if tag == Tag::Second || tag == Tag::Both {
249                break;
250            }
251
252            entry
253                .link_ref()
254                .swap((next_ptr.get_shared(), Tag::None), Relaxed);
255            match self.link_ref().compare_exchange_weak(
256                next_ptr,
257                (Some(entry), new_tag),
258                order,
259                Relaxed,
260                guard,
261            ) {
262                Ok((_, updated)) => {
263                    return Ok(updated);
264                }
265                Err((passed, actual)) => {
266                    entry = unsafe { passed.unwrap_unchecked() };
267                    next_ptr = actual;
268                }
269            }
270        }
271
272        // `current` has been deleted.
273        Err(entry)
274    }
275
276    /// Returns the closest next valid entry.
277    ///
278    /// It unlinks deleted entries until it reaches a valid one.
279    ///
280    /// # Examples
281    ///
282    /// ```
283    /// use scc::LinkedList;
284    /// use scc::ebr::{AtomicShared, Guard, Shared};
285    /// use std::sync::atomic::Ordering::{Acquire, Relaxed, Release};
286    ///
287    /// #[derive(Default)]
288    /// struct L(AtomicShared<L>, usize);
289    /// impl LinkedList for L {
290    ///     fn link_ref(&self) -> &AtomicShared<L> {
291    ///         &self.0
292    ///     }
293    /// }
294    ///
295    /// let guard = Guard::new();
296    ///
297    /// let head: L = L::default();
298    /// assert!(
299    ///     head.push_back(Shared::new(L(AtomicShared::null(), 1)), false, Release, &guard).is_ok());
300    /// head.mark(Relaxed);
301    ///
302    /// let next_ptr = head.next_ptr(Acquire, &guard);
303    /// assert_eq!(next_ptr.as_ref().unwrap().1, 1);
304    /// assert!(head.is_marked(Relaxed));
305    /// ```
306    #[inline]
307    fn next_ptr<'g>(&self, order: Ordering, guard: &'g Guard) -> Ptr<'g, Self> {
308        next_ptr_recursive(self, order, 64, guard)
309    }
310
311    /// Returns a [`Shared`] handle to the closest next valid entry.
312    ///
313    /// It unlinks deleted entries until it reaches a valid one.
314    ///
315    /// # Examples
316    ///
317    /// ```
318    /// use scc::LinkedList;
319    /// use scc::ebr::{AtomicShared, Guard, Shared};
320    /// use std::sync::atomic::Ordering::{Acquire, Relaxed, Release};
321    ///
322    /// #[derive(Default)]
323    /// struct L(AtomicShared<L>, usize);
324    /// impl LinkedList for L {
325    ///     fn link_ref(&self) -> &AtomicShared<L> {
326    ///         &self.0
327    ///     }
328    /// }
329    ///
330    /// let guard = Guard::new();
331    ///
332    /// let head: L = L::default();
333    /// assert!(
334    ///     head.push_back(Shared::new(L(AtomicShared::null(), 1)), false, Release, &guard).is_ok());
335    /// head.mark(Relaxed);
336    ///
337    /// let next_shared = head.next_shared(Acquire, &guard);
338    /// assert_eq!(next_shared.unwrap().1, 1);
339    /// assert!(head.is_marked(Relaxed));
340    /// ```
341    #[inline]
342    fn next_shared(&self, order: Ordering, guard: &Guard) -> Option<Shared<Self>> {
343        let mut next_ptr = self.next_ptr(order, guard);
344        let mut next_entry = next_ptr.get_shared();
345        while !next_ptr.is_null() && next_entry.is_none() {
346            // The entry was released in the mean time.
347            next_ptr = next_ptr
348                .as_ref()
349                .map_or_else(Ptr::null, |n| n.next_ptr(Acquire, guard));
350            next_entry = next_ptr.get_shared();
351        }
352        next_entry
353    }
354}
355
356/// [`Entry`] stores an instance of `T` and a link to the next entry.
357pub struct Entry<T> {
358    /// `instance` is always `Some` unless [`Self::take_inner`] is called.
359    instance: Option<T>,
360
361    /// `next` points to the next entry in a linked list.
362    next: AtomicShared<Self>,
363}
364
365impl<T> Entry<T> {
366    /// Extracts the inner instance of `T`.
367    ///
368    /// # Safety
369    ///
370    /// This method has to be called at most once per [`Entry`], and the caller needs to make sure
371    /// that the [`Entry`] is not accessed via [`LinkedList`] methods.
372    ///
373    /// # Examples
374    ///
375    /// ```
376    /// use scc::Stack;
377    ///
378    /// let stack: Stack<usize> = Stack::default();
379    ///
380    /// stack.push(37);
381    ///
382    /// let mut entry = stack.pop().unwrap();
383    /// let pushed = unsafe { entry.get_mut().unwrap().take_inner() };
384    /// assert_eq!(pushed, 37);
385    /// ```
386    #[inline]
387    pub unsafe fn take_inner(&mut self) -> T {
388        self.instance.take().unwrap_unchecked()
389    }
390
391    #[inline]
392    pub(super) fn new(val: T) -> Self {
393        Self {
394            instance: Some(val),
395            next: AtomicShared::default(),
396        }
397    }
398
399    /// Returns a reference to `next`.
400    #[inline]
401    pub(super) fn next(&self) -> &AtomicShared<Self> {
402        &self.next
403    }
404}
405
406impl<T> AsRef<T> for Entry<T> {
407    #[inline]
408    fn as_ref(&self) -> &T {
409        unsafe { self.instance.as_ref().unwrap_unchecked() }
410    }
411}
412
413impl<T> AsMut<T> for Entry<T> {
414    #[inline]
415    fn as_mut(&mut self) -> &mut T {
416        unsafe { self.instance.as_mut().unwrap_unchecked() }
417    }
418}
419
420impl<T: Clone> Clone for Entry<T> {
421    #[inline]
422    fn clone(&self) -> Self {
423        Self {
424            instance: self.instance.clone(),
425            next: AtomicShared::default(),
426        }
427    }
428}
429
430impl<T: Debug> Debug for Entry<T> {
431    #[inline]
432    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
433        f.debug_struct("Entry")
434            .field("instance", &self.instance)
435            .field("next", &self.next)
436            .field("removed", &self.is_deleted(Relaxed))
437            .finish()
438    }
439}
440
441impl<T> Deref for Entry<T> {
442    type Target = T;
443
444    #[inline]
445    fn deref(&self) -> &Self::Target {
446        unsafe { self.instance.as_ref().unwrap_unchecked() }
447    }
448}
449
450impl<T> DerefMut for Entry<T> {
451    #[inline]
452    fn deref_mut(&mut self) -> &mut Self::Target {
453        unsafe { self.instance.as_mut().unwrap_unchecked() }
454    }
455}
456
457impl<T> Drop for Entry<T> {
458    #[inline]
459    fn drop(&mut self) {
460        if !self.next.is_null(Relaxed) {
461            let guard = Guard::new();
462            if let Some(next_entry) = self.next.load(Relaxed, &guard).as_ref() {
463                next_ptr_recursive(next_entry, Relaxed, 64, &guard);
464            }
465        }
466    }
467}
468
469impl<T: Display> Display for Entry<T> {
470    #[inline]
471    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
472        if let Some(instance) = self.instance.as_ref() {
473            write!(f, "Some({instance})")
474        } else {
475            write!(f, "None")
476        }
477    }
478}
479
480impl<T: Eq> Eq for Entry<T> {}
481
482impl<T> LinkedList for Entry<T> {
483    #[inline]
484    fn link_ref(&self) -> &AtomicShared<Self> {
485        &self.next
486    }
487}
488
489impl<T: PartialEq> PartialEq for Entry<T> {
490    #[inline]
491    fn eq(&self, other: &Self) -> bool {
492        self.instance == other.instance
493    }
494}
495
496/// Recursively cleans up the linked list starting from the supplied head.
497fn next_ptr_recursive<'g, T: LinkedList>(
498    head: &T,
499    order: Ordering,
500    mut depth: usize,
501    guard: &'g Guard,
502) -> Ptr<'g, T> {
503    let mut head_next_ptr = head.link_ref().load(order, guard);
504    let mut head_tag = head_next_ptr.tag();
505    loop {
506        let mut next_ptr = head_next_ptr;
507        let next_valid_ptr = loop {
508            if let Some(next_ref) = next_ptr.as_ref() {
509                let next_next_ptr = next_ref.link_ref().load(order, guard);
510                if next_next_ptr.tag() != Tag::Second {
511                    break next_ptr;
512                }
513                if depth != 0 {
514                    break next_ptr_recursive(next_ref, order, depth - 1, guard);
515                }
516                next_ptr = next_next_ptr;
517            } else {
518                break Ptr::null();
519            }
520        };
521
522        // Do not allow a recursive call more than once.
523        depth = 0;
524
525        // Updates its link if there is an invalid entry between itself and the next valid one.
526        if next_valid_ptr.with_tag(head_tag) != head_next_ptr {
527            let next_valid_entry = next_valid_ptr.get_shared();
528            if !next_valid_ptr.is_null() && next_valid_entry.is_none() {
529                // The entry was unlinked in the meantime.
530                head_next_ptr = head.link_ref().load(order, guard);
531                head_tag = head_next_ptr.tag();
532                continue;
533            }
534            match head.link_ref().compare_exchange(
535                head_next_ptr,
536                (next_valid_entry, head_tag),
537                AcqRel,
538                Acquire,
539                guard,
540            ) {
541                Ok((prev, _)) => {
542                    if let Some(removed) = prev {
543                        let _: bool = removed.release();
544                    }
545                }
546                Err((_, actual)) => {
547                    head_next_ptr = actual;
548                    head_tag = actual.tag();
549                    continue;
550                }
551            }
552        }
553
554        return next_valid_ptr;
555    }
556}