ccl_crossbeam_epoch/
atomic.rs

1use alloc::boxed::Box;
2use core::borrow::{Borrow, BorrowMut};
3use core::cmp;
4use core::fmt;
5use core::marker::PhantomData;
6use core::mem;
7use core::ops::{Deref, DerefMut};
8use core::ptr;
9use core::sync::atomic::{AtomicUsize, Ordering};
10
11use crossbeam_utils::atomic::AtomicConsume;
12use guard::Guard;
13
14/// Given ordering for the success case in a compare-exchange operation, returns the strongest
15/// appropriate ordering for the failure case.
16#[inline]
17fn strongest_failure_ordering(ord: Ordering) -> Ordering {
18    use self::Ordering::*;
19    match ord {
20        Relaxed | Release => Relaxed,
21        Acquire | AcqRel => Acquire,
22        _ => SeqCst,
23    }
24}
25
26/// The error returned on failed compare-and-set operation.
27pub struct CompareAndSetError<'g, T: 'g, P: Pointer<T>> {
28    /// The value in the atomic pointer at the time of the failed operation.
29    pub current: Shared<'g, T>,
30
31    /// The new value, which the operation failed to store.
32    pub new: P,
33}
34
35impl<'g, T: 'g, P: Pointer<T> + fmt::Debug> fmt::Debug for CompareAndSetError<'g, T, P> {
36    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
37        f.debug_struct("CompareAndSetError")
38            .field("current", &self.current)
39            .field("new", &self.new)
40            .finish()
41    }
42}
43
44/// Memory orderings for compare-and-set operations.
45///
46/// A compare-and-set operation can have different memory orderings depending on whether it
47/// succeeds or fails. This trait generalizes different ways of specifying memory orderings.
48///
49/// The two ways of specifying orderings for compare-and-set are:
50///
51/// 1. Just one `Ordering` for the success case. In case of failure, the strongest appropriate
52///    ordering is chosen.
53/// 2. A pair of `Ordering`s. The first one is for the success case, while the second one is
54///    for the failure case.
55pub trait CompareAndSetOrdering {
56    /// The ordering of the operation when it succeeds.
57    fn success(&self) -> Ordering;
58
59    /// The ordering of the operation when it fails.
60    ///
61    /// The failure ordering can't be `Release` or `AcqRel` and must be equivalent or weaker than
62    /// the success ordering.
63    fn failure(&self) -> Ordering;
64}
65
66impl CompareAndSetOrdering for Ordering {
67    #[inline]
68    fn success(&self) -> Ordering {
69        *self
70    }
71
72    #[inline]
73    fn failure(&self) -> Ordering {
74        strongest_failure_ordering(*self)
75    }
76}
77
78impl CompareAndSetOrdering for (Ordering, Ordering) {
79    #[inline]
80    fn success(&self) -> Ordering {
81        self.0
82    }
83
84    #[inline]
85    fn failure(&self) -> Ordering {
86        self.1
87    }
88}
89
90/// Panics if the pointer is not properly unaligned.
91#[inline]
92fn ensure_aligned<T>(raw: *const T) {
93    assert_eq!(raw as usize & low_bits::<T>(), 0, "unaligned pointer");
94}
95
96/// Returns a bitmask containing the unused least significant bits of an aligned pointer to `T`.
97#[inline]
98fn low_bits<T>() -> usize {
99    (1 << mem::align_of::<T>().trailing_zeros()) - 1
100}
101
102/// Given a tagged pointer `data`, returns the same pointer, but tagged with `tag`.
103///
104/// `tag` is truncated to fit into the unused bits of the pointer to `T`.
105#[inline]
106fn data_with_tag<T>(data: usize, tag: usize) -> usize {
107    (data & !low_bits::<T>()) | (tag & low_bits::<T>())
108}
109
110/// Decomposes a tagged pointer `data` into the pointer and the tag.
111#[inline]
112fn decompose_data<T>(data: usize) -> (*mut T, usize) {
113    let raw = (data & !low_bits::<T>()) as *mut T;
114    let tag = data & low_bits::<T>();
115    (raw, tag)
116}
117
118/// An atomic pointer that can be safely shared between threads.
119///
120/// The pointer must be properly aligned. Since it is aligned, a tag can be stored into the unused
121/// least significant bits of the address.  More precisely, a tag should be less than `(1 <<
122/// mem::align_of::<T>().trailing_zeros())`.
123///
124/// Any method that loads the pointer must be passed a reference to a [`Guard`].
125///
126/// [`Guard`]: struct.Guard.html
127pub struct Atomic<T> {
128    data: AtomicUsize,
129    _marker: PhantomData<*mut T>,
130}
131
132unsafe impl<T: Send + Sync> Send for Atomic<T> {}
133unsafe impl<T: Send + Sync> Sync for Atomic<T> {}
134
135impl<T> Atomic<T> {
136    /// Returns a new atomic pointer pointing to the tagged pointer `data`.
137    #[inline]
138    fn from_usize(data: usize) -> Self {
139        Self {
140            data: AtomicUsize::new(data),
141            _marker: PhantomData,
142        }
143    }
144
145    /// Returns a new null atomic pointer.
146    ///
147    /// # Examples
148    ///
149    /// ```
150    /// use crossbeam_epoch::Atomic;
151    ///
152    /// let a = Atomic::<i32>::null();
153    /// ```
154    #[cfg(not(feature = "nightly"))]
155    #[inline]
156    pub fn null() -> Atomic<T> {
157        Self {
158            data: AtomicUsize::new(0),
159            _marker: PhantomData,
160        }
161    }
162
163    /// Returns a new null atomic pointer.
164    ///
165    /// # Examples
166    ///
167    /// ```
168    /// use crossbeam_epoch::Atomic;
169    ///
170    /// let a = Atomic::<i32>::null();
171    /// ```
172    #[cfg(feature = "nightly")]
173    #[inline]
174    pub const fn null() -> Atomic<T> {
175        Self {
176            data: AtomicUsize::new(0),
177            _marker: PhantomData,
178        }
179    }
180
181    /// Allocates `value` on the heap and returns a new atomic pointer pointing to it.
182    ///
183    /// # Examples
184    ///
185    /// ```
186    /// use crossbeam_epoch::Atomic;
187    ///
188    /// let a = Atomic::new(1234);
189    /// ```
190    #[inline]
191    pub fn new(value: T) -> Atomic<T> {
192        Self::from(Owned::new(value))
193    }
194
195    /// Loads a `Shared` from the atomic pointer.
196    ///
197    /// This method takes an [`Ordering`] argument which describes the memory ordering of this
198    /// operation.
199    ///
200    /// [`Ordering`]: https://doc.rust-lang.org/std/sync/atomic/enum.Ordering.html
201    ///
202    /// # Examples
203    ///
204    /// ```
205    /// use crossbeam_epoch::{self as epoch, Atomic};
206    /// use std::sync::atomic::Ordering::SeqCst;
207    ///
208    /// let a = Atomic::new(1234);
209    /// let guard = &epoch::pin();
210    /// let p = a.load(SeqCst, guard);
211    /// ```
212    #[inline]
213    pub fn load<'g>(&self, ord: Ordering, _: &'g Guard) -> Shared<'g, T> {
214        unsafe { Shared::from_usize(self.data.load(ord)) }
215    }
216
217    /// Loads a `Shared` from the atomic pointer using a "consume" memory ordering.
218    ///
219    /// This is similar to the "acquire" ordering, except that an ordering is
220    /// only guaranteed with operations that "depend on" the result of the load.
221    /// However consume loads are usually much faster than acquire loads on
222    /// architectures with a weak memory model since they don't require memory
223    /// fence instructions.
224    ///
225    /// The exact definition of "depend on" is a bit vague, but it works as you
226    /// would expect in practice since a lot of software, especially the Linux
227    /// kernel, rely on this behavior.
228    ///
229    /// # Examples
230    ///
231    /// ```
232    /// use crossbeam_epoch::{self as epoch, Atomic};
233    ///
234    /// let a = Atomic::new(1234);
235    /// let guard = &epoch::pin();
236    /// let p = a.load_consume(guard);
237    /// ```
238    #[inline]
239    pub fn load_consume<'g>(&self, _: &'g Guard) -> Shared<'g, T> {
240        unsafe { Shared::from_usize(self.data.load_consume()) }
241    }
242
243    /// Stores a `Shared` or `Owned` pointer into the atomic pointer.
244    ///
245    /// This method takes an [`Ordering`] argument which describes the memory ordering of this
246    /// operation.
247    ///
248    /// [`Ordering`]: https://doc.rust-lang.org/std/sync/atomic/enum.Ordering.html
249    ///
250    /// # Examples
251    ///
252    /// ```
253    /// use crossbeam_epoch::{self as epoch, Atomic, Owned, Shared};
254    /// use std::sync::atomic::Ordering::SeqCst;
255    ///
256    /// let a = Atomic::new(1234);
257    /// a.store(Shared::null(), SeqCst);
258    /// a.store(Owned::new(1234), SeqCst);
259    /// ```
260    #[inline]
261    pub fn store<'g, P: Pointer<T>>(&self, new: P, ord: Ordering) {
262        self.data.store(new.into_usize(), ord);
263    }
264
265    /// Stores a `Shared` or `Owned` pointer into the atomic pointer, returning the previous
266    /// `Shared`.
267    ///
268    /// This method takes an [`Ordering`] argument which describes the memory ordering of this
269    /// operation.
270    ///
271    /// [`Ordering`]: https://doc.rust-lang.org/std/sync/atomic/enum.Ordering.html
272    ///
273    /// # Examples
274    ///
275    /// ```
276    /// use crossbeam_epoch::{self as epoch, Atomic, Owned, Shared};
277    /// use std::sync::atomic::Ordering::SeqCst;
278    ///
279    /// let a = Atomic::new(1234);
280    /// let guard = &epoch::pin();
281    /// let p = a.swap(Shared::null(), SeqCst, guard);
282    /// ```
283    #[inline]
284    pub fn swap<'g, P: Pointer<T>>(&self, new: P, ord: Ordering, _: &'g Guard) -> Shared<'g, T> {
285        unsafe { Shared::from_usize(self.data.swap(new.into_usize(), ord)) }
286    }
287
288    /// Stores the pointer `new` (either `Shared` or `Owned`) into the atomic pointer if the current
289    /// value is the same as `current`. The tag is also taken into account, so two pointers to the
290    /// same object, but with different tags, will not be considered equal.
291    ///
292    /// The return value is a result indicating whether the new pointer was written. On success the
293    /// pointer that was written is returned. On failure the actual current value and `new` are
294    /// returned.
295    ///
296    /// This method takes a [`CompareAndSetOrdering`] argument which describes the memory
297    /// ordering of this operation.
298    ///
299    /// [`CompareAndSetOrdering`]: trait.CompareAndSetOrdering.html
300    ///
301    /// # Examples
302    ///
303    /// ```
304    /// use crossbeam_epoch::{self as epoch, Atomic, Owned, Shared};
305    /// use std::sync::atomic::Ordering::SeqCst;
306    ///
307    /// let a = Atomic::new(1234);
308    ///
309    /// let guard = &epoch::pin();
310    /// let mut curr = a.load(SeqCst, guard);
311    /// let res1 = a.compare_and_set(curr, Shared::null(), SeqCst, guard);
312    /// let res2 = a.compare_and_set(curr, Owned::new(5678), SeqCst, guard);
313    /// ```
314    #[inline]
315    pub fn compare_and_set<'g, O, P>(
316        &self,
317        current: Shared<T>,
318        new: P,
319        ord: O,
320        _: &'g Guard,
321    ) -> Result<Shared<'g, T>, CompareAndSetError<'g, T, P>>
322    where
323        O: CompareAndSetOrdering,
324        P: Pointer<T>,
325    {
326        let new = new.into_usize();
327        self.data
328            .compare_exchange(current.into_usize(), new, ord.success(), ord.failure())
329            .map(|_| unsafe { Shared::from_usize(new) })
330            .map_err(|current| unsafe {
331                CompareAndSetError {
332                    current: Shared::from_usize(current),
333                    new: P::from_usize(new),
334                }
335            })
336    }
337
338    /// Stores the pointer `new` (either `Shared` or `Owned`) into the atomic pointer if the current
339    /// value is the same as `current`. The tag is also taken into account, so two pointers to the
340    /// same object, but with different tags, will not be considered equal.
341    ///
342    /// Unlike [`compare_and_set`], this method is allowed to spuriously fail even when comparison
343    /// succeeds, which can result in more efficient code on some platforms.  The return value is a
344    /// result indicating whether the new pointer was written. On success the pointer that was
345    /// written is returned. On failure the actual current value and `new` are returned.
346    ///
347    /// This method takes a [`CompareAndSetOrdering`] argument which describes the memory
348    /// ordering of this operation.
349    ///
350    /// [`compare_and_set`]: struct.Atomic.html#method.compare_and_set
351    /// [`CompareAndSetOrdering`]: trait.CompareAndSetOrdering.html
352    ///
353    /// # Examples
354    ///
355    /// ```
356    /// use crossbeam_epoch::{self as epoch, Atomic, Owned, Shared};
357    /// use std::sync::atomic::Ordering::SeqCst;
358    ///
359    /// let a = Atomic::new(1234);
360    /// let guard = &epoch::pin();
361    ///
362    /// let mut new = Owned::new(5678);
363    /// let mut ptr = a.load(SeqCst, guard);
364    /// loop {
365    ///     match a.compare_and_set_weak(ptr, new, SeqCst, guard) {
366    ///         Ok(p) => {
367    ///             ptr = p;
368    ///             break;
369    ///         }
370    ///         Err(err) => {
371    ///             ptr = err.current;
372    ///             new = err.new;
373    ///         }
374    ///     }
375    /// }
376    ///
377    /// let mut curr = a.load(SeqCst, guard);
378    /// loop {
379    ///     match a.compare_and_set_weak(curr, Shared::null(), SeqCst, guard) {
380    ///         Ok(_) => break,
381    ///         Err(err) => curr = err.current,
382    ///     }
383    /// }
384    /// ```
385    #[inline]
386    pub fn compare_and_set_weak<'g, O, P>(
387        &self,
388        current: Shared<T>,
389        new: P,
390        ord: O,
391        _: &'g Guard,
392    ) -> Result<Shared<'g, T>, CompareAndSetError<'g, T, P>>
393    where
394        O: CompareAndSetOrdering,
395        P: Pointer<T>,
396    {
397        let new = new.into_usize();
398        self.data
399            .compare_exchange_weak(current.into_usize(), new, ord.success(), ord.failure())
400            .map(|_| unsafe { Shared::from_usize(new) })
401            .map_err(|current| unsafe {
402                CompareAndSetError {
403                    current: Shared::from_usize(current),
404                    new: P::from_usize(new),
405                }
406            })
407    }
408
409    /// Bitwise "and" with the current tag.
410    ///
411    /// Performs a bitwise "and" operation on the current tag and the argument `val`, and sets the
412    /// new tag to the result. Returns the previous pointer.
413    ///
414    /// This method takes an [`Ordering`] argument which describes the memory ordering of this
415    /// operation.
416    ///
417    /// [`Ordering`]: https://doc.rust-lang.org/std/sync/atomic/enum.Ordering.html
418    ///
419    /// # Examples
420    ///
421    /// ```
422    /// use crossbeam_epoch::{self as epoch, Atomic, Shared};
423    /// use std::sync::atomic::Ordering::SeqCst;
424    ///
425    /// let a = Atomic::<i32>::from(Shared::null().with_tag(3));
426    /// let guard = &epoch::pin();
427    /// assert_eq!(a.fetch_and(2, SeqCst, guard).tag(), 3);
428    /// assert_eq!(a.load(SeqCst, guard).tag(), 2);
429    /// ```
430    #[inline]
431    pub fn fetch_and<'g>(&self, val: usize, ord: Ordering, _: &'g Guard) -> Shared<'g, T> {
432        unsafe { Shared::from_usize(self.data.fetch_and(val | !low_bits::<T>(), ord)) }
433    }
434
435    /// Bitwise "or" with the current tag.
436    ///
437    /// Performs a bitwise "or" operation on the current tag and the argument `val`, and sets the
438    /// new tag to the result. Returns the previous pointer.
439    ///
440    /// This method takes an [`Ordering`] argument which describes the memory ordering of this
441    /// operation.
442    ///
443    /// [`Ordering`]: https://doc.rust-lang.org/std/sync/atomic/enum.Ordering.html
444    ///
445    /// # Examples
446    ///
447    /// ```
448    /// use crossbeam_epoch::{self as epoch, Atomic, Shared};
449    /// use std::sync::atomic::Ordering::SeqCst;
450    ///
451    /// let a = Atomic::<i32>::from(Shared::null().with_tag(1));
452    /// let guard = &epoch::pin();
453    /// assert_eq!(a.fetch_or(2, SeqCst, guard).tag(), 1);
454    /// assert_eq!(a.load(SeqCst, guard).tag(), 3);
455    /// ```
456    #[inline]
457    pub fn fetch_or<'g>(&self, val: usize, ord: Ordering, _: &'g Guard) -> Shared<'g, T> {
458        unsafe { Shared::from_usize(self.data.fetch_or(val & low_bits::<T>(), ord)) }
459    }
460
461    /// Bitwise "xor" with the current tag.
462    ///
463    /// Performs a bitwise "xor" operation on the current tag and the argument `val`, and sets the
464    /// new tag to the result. Returns the previous pointer.
465    ///
466    /// This method takes an [`Ordering`] argument which describes the memory ordering of this
467    /// operation.
468    ///
469    /// [`Ordering`]: https://doc.rust-lang.org/std/sync/atomic/enum.Ordering.html
470    ///
471    /// # Examples
472    ///
473    /// ```
474    /// use crossbeam_epoch::{self as epoch, Atomic, Shared};
475    /// use std::sync::atomic::Ordering::SeqCst;
476    ///
477    /// let a = Atomic::<i32>::from(Shared::null().with_tag(1));
478    /// let guard = &epoch::pin();
479    /// assert_eq!(a.fetch_xor(3, SeqCst, guard).tag(), 1);
480    /// assert_eq!(a.load(SeqCst, guard).tag(), 2);
481    /// ```
482    #[inline]
483    pub fn fetch_xor<'g>(&self, val: usize, ord: Ordering, _: &'g Guard) -> Shared<'g, T> {
484        unsafe { Shared::from_usize(self.data.fetch_xor(val & low_bits::<T>(), ord)) }
485    }
486
487    /// Takes ownership of the pointee.
488    ///
489    /// This consumes the atomic and converts it into [`Owned`]. As [`Atomic`] doesn't have a
490    /// destructor and doesn't drop the pointee while [`Owned`] does, this is suitable for
491    /// destructors of data structures.
492    ///
493    /// # Panics
494    ///
495    /// Panics if this pointer is null, but only in debug mode.
496    ///
497    /// # Safety
498    ///
499    /// This method may be called only if the pointer is valid and nobody else is holding a
500    /// reference to the same object.
501    ///
502    /// # Examples
503    ///
504    /// ```rust
505    /// # use std::mem;
506    /// # use crossbeam_epoch::Atomic;
507    /// struct DataStructure {
508    ///     ptr: Atomic<usize>,
509    /// }
510    ///
511    /// impl Drop for DataStructure {
512    ///     fn drop(&mut self) {
513    ///         // By now the DataStructure lives only in our thread and we are sure we don't hold
514    ///         // any Shared or & to it ourselves.
515    ///         unsafe {
516    ///             drop(mem::replace(&mut self.ptr, Atomic::null()).into_owned());
517    ///         }
518    ///     }
519    /// }
520    /// ```
521    #[inline]
522    pub unsafe fn into_owned(self) -> Owned<T> {
523        Owned::from_usize(self.data.into_inner())
524    }
525}
526
527impl<T> fmt::Debug for Atomic<T> {
528    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
529        let data = self.data.load(Ordering::SeqCst);
530        let (raw, tag) = decompose_data::<T>(data);
531
532        f.debug_struct("Atomic")
533            .field("raw", &raw)
534            .field("tag", &tag)
535            .finish()
536    }
537}
538
539impl<T> fmt::Pointer for Atomic<T> {
540    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
541        let data = self.data.load(Ordering::SeqCst);
542        let (raw, _) = decompose_data::<T>(data);
543        fmt::Pointer::fmt(&raw, f)
544    }
545}
546
547impl<T> Clone for Atomic<T> {
548    /// Returns a copy of the atomic value.
549    ///
550    /// Note that a `Relaxed` load is used here. If you need synchronization, use it with other
551    /// atomics or fences.
552    #[inline]
553    fn clone(&self) -> Self {
554        let data = self.data.load(Ordering::Relaxed);
555        Atomic::from_usize(data)
556    }
557}
558
559impl<T> Default for Atomic<T> {
560    #[inline]
561    fn default() -> Self {
562        Atomic::null()
563    }
564}
565
566impl<T> From<Owned<T>> for Atomic<T> {
567    /// Returns a new atomic pointer pointing to `owned`.
568    ///
569    /// # Examples
570    ///
571    /// ```
572    /// use crossbeam_epoch::{Atomic, Owned};
573    ///
574    /// let a = Atomic::<i32>::from(Owned::new(1234));
575    /// ```
576    #[inline]
577    fn from(owned: Owned<T>) -> Self {
578        let data = owned.data;
579        mem::forget(owned);
580        Self::from_usize(data)
581    }
582}
583
584impl<T> From<Box<T>> for Atomic<T> {
585    #[inline]
586    fn from(b: Box<T>) -> Self {
587        Self::from(Owned::from(b))
588    }
589}
590
591impl<T> From<T> for Atomic<T> {
592    #[inline]
593    fn from(t: T) -> Self {
594        Self::new(t)
595    }
596}
597
598impl<'g, T> From<Shared<'g, T>> for Atomic<T> {
599    /// Returns a new atomic pointer pointing to `ptr`.
600    ///
601    /// # Examples
602    ///
603    /// ```
604    /// use crossbeam_epoch::{Atomic, Shared};
605    ///
606    /// let a = Atomic::<i32>::from(Shared::<i32>::null());
607    /// ```
608    #[inline]
609    fn from(ptr: Shared<'g, T>) -> Self {
610        Self::from_usize(ptr.data)
611    }
612}
613
614impl<T> From<*const T> for Atomic<T> {
615    /// Returns a new atomic pointer pointing to `raw`.
616    ///
617    /// # Examples
618    ///
619    /// ```
620    /// use std::ptr;
621    /// use crossbeam_epoch::Atomic;
622    ///
623    /// let a = Atomic::<i32>::from(ptr::null::<i32>());
624    /// ```
625    #[inline]
626    fn from(raw: *const T) -> Self {
627        Self::from_usize(raw as usize)
628    }
629}
630
631/// A trait for either `Owned` or `Shared` pointers.
632pub trait Pointer<T> {
633    /// Returns the machine representation of the pointer.
634    fn into_usize(self) -> usize;
635
636    /// Returns a new pointer pointing to the tagged pointer `data`.
637    unsafe fn from_usize(data: usize) -> Self;
638}
639
640/// An owned heap-allocated object.
641///
642/// This type is very similar to `Box<T>`.
643///
644/// The pointer must be properly aligned. Since it is aligned, a tag can be stored into the unused
645/// least significant bits of the address.
646pub struct Owned<T> {
647    data: usize,
648    _marker: PhantomData<Box<T>>,
649}
650
651impl<T> Pointer<T> for Owned<T> {
652    #[inline]
653    fn into_usize(self) -> usize {
654        let data = self.data;
655        mem::forget(self);
656        data
657    }
658
659    /// Returns a new pointer pointing to the tagged pointer `data`.
660    ///
661    /// # Panics
662    ///
663    /// Panics if the data is zero in debug mode.
664    #[inline]
665    unsafe fn from_usize(data: usize) -> Self {
666        debug_assert!(data != 0, "converting zero into `Owned`");
667        Owned {
668            data: data,
669            _marker: PhantomData,
670        }
671    }
672}
673
674impl<T> Owned<T> {
675    /// Allocates `value` on the heap and returns a new owned pointer pointing to it.
676    ///
677    /// # Examples
678    ///
679    /// ```
680    /// use crossbeam_epoch::Owned;
681    ///
682    /// let o = Owned::new(1234);
683    /// ```
684    #[inline]
685    pub fn new(value: T) -> Owned<T> {
686        Self::from(Box::new(value))
687    }
688
689    /// Returns a new owned pointer pointing to `raw`.
690    ///
691    /// This function is unsafe because improper use may lead to memory problems. Argument `raw`
692    /// must be a valid pointer. Also, a double-free may occur if the function is called twice on
693    /// the same raw pointer.
694    ///
695    /// # Panics
696    ///
697    /// Panics if `raw` is not properly aligned.
698    ///
699    /// # Examples
700    ///
701    /// ```
702    /// use crossbeam_epoch::Owned;
703    ///
704    /// let o = unsafe { Owned::from_raw(Box::into_raw(Box::new(1234))) };
705    /// ```
706    #[inline]
707    pub unsafe fn from_raw(raw: *mut T) -> Owned<T> {
708        ensure_aligned(raw);
709        Self::from_usize(raw as usize)
710    }
711
712    /// Converts the owned pointer into a [`Shared`].
713    ///
714    /// # Examples
715    ///
716    /// ```
717    /// use crossbeam_epoch::{self as epoch, Owned};
718    ///
719    /// let o = Owned::new(1234);
720    /// let guard = &epoch::pin();
721    /// let p = o.into_shared(guard);
722    /// ```
723    ///
724    /// [`Shared`]: struct.Shared.html
725    #[inline]
726    pub fn into_shared<'g>(self, _: &'g Guard) -> Shared<'g, T> {
727        unsafe { Shared::from_usize(self.into_usize()) }
728    }
729
730    /// Converts the owned pointer into a `Box`.
731    ///
732    /// # Examples
733    ///
734    /// ```
735    /// use crossbeam_epoch::{self as epoch, Owned};
736    ///
737    /// let o = Owned::new(1234);
738    /// let b: Box<i32> = o.into_box();
739    /// assert_eq!(*b, 1234);
740    /// ```
741    #[inline]
742    pub fn into_box(self) -> Box<T> {
743        let (raw, _) = decompose_data::<T>(self.data);
744        mem::forget(self);
745        unsafe { Box::from_raw(raw) }
746    }
747
748    /// Returns the tag stored within the pointer.
749    ///
750    /// # Examples
751    ///
752    /// ```
753    /// use crossbeam_epoch::Owned;
754    ///
755    /// assert_eq!(Owned::new(1234).tag(), 0);
756    /// ```
757    #[inline]
758    pub fn tag(&self) -> usize {
759        let (_, tag) = decompose_data::<T>(self.data);
760        tag
761    }
762
763    /// Returns the same pointer, but tagged with `tag`. `tag` is truncated to be fit into the
764    /// unused bits of the pointer to `T`.
765    ///
766    /// # Examples
767    ///
768    /// ```
769    /// use crossbeam_epoch::Owned;
770    ///
771    /// let o = Owned::new(0u64);
772    /// assert_eq!(o.tag(), 0);
773    /// let o = o.with_tag(2);
774    /// assert_eq!(o.tag(), 2);
775    /// ```
776    #[inline]
777    pub fn with_tag(self, tag: usize) -> Owned<T> {
778        let data = self.into_usize();
779        unsafe { Self::from_usize(data_with_tag::<T>(data, tag)) }
780    }
781}
782
783impl<T> fmt::Debug for Owned<T> {
784    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
785        let (raw, tag) = decompose_data::<T>(self.data);
786
787        f.debug_struct("Owned")
788            .field("raw", &raw)
789            .field("tag", &tag)
790            .finish()
791    }
792}
793
794impl<T: Clone> Clone for Owned<T> {
795    #[inline]
796    fn clone(&self) -> Self {
797        Owned::new((**self).clone()).with_tag(self.tag())
798    }
799}
800
801impl<T> Deref for Owned<T> {
802    type Target = T;
803
804    #[inline]
805    fn deref(&self) -> &T {
806        let (raw, _) = decompose_data::<T>(self.data);
807        unsafe { &*raw }
808    }
809}
810
811impl<T> DerefMut for Owned<T> {
812    #[inline]
813    fn deref_mut(&mut self) -> &mut T {
814        let (raw, _) = decompose_data::<T>(self.data);
815        unsafe { &mut *raw }
816    }
817}
818
819impl<T> From<T> for Owned<T> {
820    #[inline]
821    fn from(t: T) -> Self {
822        Owned::new(t)
823    }
824}
825
826impl<T> From<Box<T>> for Owned<T> {
827    /// Returns a new owned pointer pointing to `b`.
828    ///
829    /// # Panics
830    ///
831    /// Panics if the pointer (the `Box`) is not properly aligned.
832    ///
833    /// # Examples
834    ///
835    /// ```
836    /// use crossbeam_epoch::Owned;
837    ///
838    /// let o = unsafe { Owned::from_raw(Box::into_raw(Box::new(1234))) };
839    /// ```
840    #[inline]
841    fn from(b: Box<T>) -> Self {
842        unsafe { Self::from_raw(Box::into_raw(b)) }
843    }
844}
845
846impl<T> Borrow<T> for Owned<T> {
847    #[inline]
848    fn borrow(&self) -> &T {
849        &**self
850    }
851}
852
853impl<T> BorrowMut<T> for Owned<T> {
854    #[inline]
855    fn borrow_mut(&mut self) -> &mut T {
856        &mut **self
857    }
858}
859
860impl<T> AsRef<T> for Owned<T> {
861    #[inline]
862    fn as_ref(&self) -> &T {
863        &**self
864    }
865}
866
867impl<T> AsMut<T> for Owned<T> {
868    #[inline]
869    fn as_mut(&mut self) -> &mut T {
870        &mut **self
871    }
872}
873
874/// A pointer to an object protected by the epoch GC.
875///
876/// The pointer is valid for use only during the lifetime `'g`.
877///
878/// The pointer must be properly aligned. Since it is aligned, a tag can be stored into the unused
879/// least significant bits of the address.
880pub struct Shared<'g, T: 'g> {
881    data: usize,
882    _marker: PhantomData<(&'g (), *const T)>,
883}
884
885impl<'g, T> Clone for Shared<'g, T> {
886    #[inline]
887    fn clone(&self) -> Self {
888        Shared {
889            data: self.data,
890            _marker: PhantomData,
891        }
892    }
893}
894
895impl<'g, T> Copy for Shared<'g, T> {}
896
897impl<'g, T> Pointer<T> for Shared<'g, T> {
898    #[inline]
899    fn into_usize(self) -> usize {
900        self.data
901    }
902
903    #[inline]
904    unsafe fn from_usize(data: usize) -> Self {
905        Shared {
906            data: data,
907            _marker: PhantomData,
908        }
909    }
910}
911
912impl<'g, T> Shared<'g, T> {
913    /// Returns a new null pointer.
914    ///
915    /// # Examples
916    ///
917    /// ```
918    /// use crossbeam_epoch::Shared;
919    ///
920    /// let p = Shared::<i32>::null();
921    /// assert!(p.is_null());
922    /// ```
923    #[inline]
924    pub fn null() -> Shared<'g, T> {
925        Shared {
926            data: 0,
927            _marker: PhantomData,
928        }
929    }
930
931    /// Returns `true` if the pointer is null.
932    ///
933    /// # Examples
934    ///
935    /// ```
936    /// use crossbeam_epoch::{self as epoch, Atomic, Owned};
937    /// use std::sync::atomic::Ordering::SeqCst;
938    ///
939    /// let a = Atomic::null();
940    /// let guard = &epoch::pin();
941    /// assert!(a.load(SeqCst, guard).is_null());
942    /// a.store(Owned::new(1234), SeqCst);
943    /// assert!(!a.load(SeqCst, guard).is_null());
944    /// ```
945    #[inline]
946    pub fn is_null(&self) -> bool {
947        self.as_raw().is_null()
948    }
949
950    /// Converts the pointer to a raw pointer (without the tag).
951    ///
952    /// # Examples
953    ///
954    /// ```
955    /// use crossbeam_epoch::{self as epoch, Atomic, Owned};
956    /// use std::sync::atomic::Ordering::SeqCst;
957    ///
958    /// let o = Owned::new(1234);
959    /// let raw = &*o as *const _;
960    /// let a = Atomic::from(o);
961    ///
962    /// let guard = &epoch::pin();
963    /// let p = a.load(SeqCst, guard);
964    /// assert_eq!(p.as_raw(), raw);
965    /// ```
966    #[inline]
967    pub fn as_raw(&self) -> *const T {
968        let (raw, _) = decompose_data::<T>(self.data);
969        raw
970    }
971
972    /// Dereferences the pointer.
973    ///
974    /// Returns a reference to the pointee that is valid during the lifetime `'g`.
975    ///
976    /// # Safety
977    ///
978    /// Dereferencing a pointer is unsafe because it could be pointing to invalid memory.
979    ///
980    /// Another concern is the possiblity of data races due to lack of proper synchronization.
981    /// For example, consider the following scenario:
982    ///
983    /// 1. A thread creates a new object: `a.store(Owned::new(10), Relaxed)`
984    /// 2. Another thread reads it: `*a.load(Relaxed, guard).as_ref().unwrap()`
985    ///
986    /// The problem is that relaxed orderings don't synchronize initialization of the object with
987    /// the read from the second thread. This is a data race. A possible solution would be to use
988    /// `Release` and `Acquire` orderings.
989    ///
990    /// # Examples
991    ///
992    /// ```
993    /// use crossbeam_epoch::{self as epoch, Atomic};
994    /// use std::sync::atomic::Ordering::SeqCst;
995    ///
996    /// let a = Atomic::new(1234);
997    /// let guard = &epoch::pin();
998    /// let p = a.load(SeqCst, guard);
999    /// unsafe {
1000    ///     assert_eq!(p.deref(), &1234);
1001    /// }
1002    /// ```
1003    #[inline]
1004    pub unsafe fn deref(&self) -> &'g T {
1005        &*self.as_raw()
1006    }
1007
1008    /// Dereferences the pointer.
1009    ///
1010    /// Returns a mutable reference to the pointee that is valid during the lifetime `'g`.
1011    ///
1012    /// # Safety
1013    ///
1014    /// * There is no guarantee that there are no more threads attempting to read/write from/to the
1015    ///   actual object at the same time.
1016    ///
1017    ///   The user must know that there are no concurrent accesses towards the object itself.
1018    ///
1019    /// * Other than the above, all safety concerns of `deref()` applies here.
1020    ///
1021    /// # Examples
1022    ///
1023    /// ```
1024    /// use crossbeam_epoch::{self as epoch, Atomic};
1025    /// use std::sync::atomic::Ordering::SeqCst;
1026    ///
1027    /// let a = Atomic::new(vec![1, 2, 3, 4]);
1028    /// let guard = &epoch::pin();
1029    ///
1030    /// let mut p = a.load(SeqCst, guard);
1031    /// unsafe {
1032    ///     assert!(!p.is_null());
1033    ///     let b = p.deref_mut();
1034    ///     assert_eq!(b, &vec![1, 2, 3, 4]);
1035    ///     b.push(5);
1036    ///     assert_eq!(b, &vec![1, 2, 3, 4, 5]);
1037    /// }
1038    ///
1039    /// let p = a.load(SeqCst, guard);
1040    /// unsafe {
1041    ///     assert_eq!(p.deref(), &vec![1, 2, 3, 4, 5]);
1042    /// }
1043    /// ```
1044    #[inline]
1045    pub unsafe fn deref_mut(&mut self) -> &'g mut T {
1046        &mut *(self.as_raw() as *mut T)
1047    }
1048
1049    /// Converts the pointer to a reference.
1050    ///
1051    /// Returns `None` if the pointer is null, or else a reference to the object wrapped in `Some`.
1052    ///
1053    /// # Safety
1054    ///
1055    /// Dereferencing a pointer is unsafe because it could be pointing to invalid memory.
1056    ///
1057    /// Another concern is the possiblity of data races due to lack of proper synchronization.
1058    /// For example, consider the following scenario:
1059    ///
1060    /// 1. A thread creates a new object: `a.store(Owned::new(10), Relaxed)`
1061    /// 2. Another thread reads it: `*a.load(Relaxed, guard).as_ref().unwrap()`
1062    ///
1063    /// The problem is that relaxed orderings don't synchronize initialization of the object with
1064    /// the read from the second thread. This is a data race. A possible solution would be to use
1065    /// `Release` and `Acquire` orderings.
1066    ///
1067    /// # Examples
1068    ///
1069    /// ```
1070    /// use crossbeam_epoch::{self as epoch, Atomic};
1071    /// use std::sync::atomic::Ordering::SeqCst;
1072    ///
1073    /// let a = Atomic::new(1234);
1074    /// let guard = &epoch::pin();
1075    /// let p = a.load(SeqCst, guard);
1076    /// unsafe {
1077    ///     assert_eq!(p.as_ref(), Some(&1234));
1078    /// }
1079    /// ```
1080    #[inline]
1081    pub unsafe fn as_ref(&self) -> Option<&'g T> {
1082        self.as_raw().as_ref()
1083    }
1084
1085    /// Takes ownership of the pointee.
1086    ///
1087    /// # Panics
1088    ///
1089    /// Panics if this pointer is null, but only in debug mode.
1090    ///
1091    /// # Safety
1092    ///
1093    /// This method may be called only if the pointer is valid and nobody else is holding a
1094    /// reference to the same object.
1095    ///
1096    /// # Examples
1097    ///
1098    /// ```
1099    /// use crossbeam_epoch::{self as epoch, Atomic};
1100    /// use std::sync::atomic::Ordering::SeqCst;
1101    ///
1102    /// let a = Atomic::new(1234);
1103    /// unsafe {
1104    ///     let guard = &epoch::unprotected();
1105    ///     let p = a.load(SeqCst, guard);
1106    ///     drop(p.into_owned());
1107    /// }
1108    /// ```
1109    #[inline]
1110    pub unsafe fn into_owned(self) -> Owned<T> {
1111        debug_assert!(
1112            self.as_raw() != ptr::null(),
1113            "converting a null `Shared` into `Owned`"
1114        );
1115        Owned::from_usize(self.data)
1116    }
1117
1118    /// Returns the tag stored within the pointer.
1119    ///
1120    /// # Examples
1121    ///
1122    /// ```
1123    /// use crossbeam_epoch::{self as epoch, Atomic, Owned};
1124    /// use std::sync::atomic::Ordering::SeqCst;
1125    ///
1126    /// let a = Atomic::<u64>::from(Owned::new(0u64).with_tag(2));
1127    /// let guard = &epoch::pin();
1128    /// let p = a.load(SeqCst, guard);
1129    /// assert_eq!(p.tag(), 2);
1130    /// ```
1131    #[inline]
1132    pub fn tag(&self) -> usize {
1133        let (_, tag) = decompose_data::<T>(self.data);
1134        tag
1135    }
1136
1137    /// Returns the same pointer, but tagged with `tag`. `tag` is truncated to be fit into the
1138    /// unused bits of the pointer to `T`.
1139    ///
1140    /// # Examples
1141    ///
1142    /// ```
1143    /// use crossbeam_epoch::{self as epoch, Atomic};
1144    /// use std::sync::atomic::Ordering::SeqCst;
1145    ///
1146    /// let a = Atomic::new(0u64);
1147    /// let guard = &epoch::pin();
1148    /// let p1 = a.load(SeqCst, guard);
1149    /// let p2 = p1.with_tag(2);
1150    ///
1151    /// assert_eq!(p1.tag(), 0);
1152    /// assert_eq!(p2.tag(), 2);
1153    /// assert_eq!(p1.as_raw(), p2.as_raw());
1154    /// ```
1155    #[inline]
1156    pub fn with_tag(&self, tag: usize) -> Shared<'g, T> {
1157        unsafe { Self::from_usize(data_with_tag::<T>(self.data, tag)) }
1158    }
1159}
1160
1161impl<'g, T> From<*const T> for Shared<'g, T> {
1162    /// Returns a new pointer pointing to `raw`.
1163    ///
1164    /// # Panics
1165    ///
1166    /// Panics if `raw` is not properly aligned.
1167    ///
1168    /// # Examples
1169    ///
1170    /// ```
1171    /// use crossbeam_epoch::Shared;
1172    ///
1173    /// let p = unsafe { Shared::from(Box::into_raw(Box::new(1234)) as *const _) };
1174    /// assert!(!p.is_null());
1175    /// ```
1176    #[inline]
1177    fn from(raw: *const T) -> Self {
1178        ensure_aligned(raw);
1179        unsafe { Self::from_usize(raw as usize) }
1180    }
1181}
1182
1183impl<'g, T> PartialEq<Shared<'g, T>> for Shared<'g, T> {
1184    #[inline]
1185    fn eq(&self, other: &Self) -> bool {
1186        self.data == other.data
1187    }
1188}
1189
1190impl<'g, T> Eq for Shared<'g, T> {}
1191
1192impl<'g, T> PartialOrd<Shared<'g, T>> for Shared<'g, T> {
1193    #[inline]
1194    fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> {
1195        self.data.partial_cmp(&other.data)
1196    }
1197}
1198
1199impl<'g, T> Ord for Shared<'g, T> {
1200    #[inline]
1201    fn cmp(&self, other: &Self) -> cmp::Ordering {
1202        self.data.cmp(&other.data)
1203    }
1204}
1205
1206impl<'g, T> fmt::Debug for Shared<'g, T> {
1207    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
1208        let (raw, tag) = decompose_data::<T>(self.data);
1209
1210        f.debug_struct("Shared")
1211            .field("raw", &raw)
1212            .field("tag", &tag)
1213            .finish()
1214    }
1215}
1216
1217impl<'g, T> fmt::Pointer for Shared<'g, T> {
1218    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
1219        fmt::Pointer::fmt(&self.as_raw(), f)
1220    }
1221}
1222
1223impl<'g, T> Default for Shared<'g, T> {
1224    #[inline]
1225    fn default() -> Self {
1226        Shared::null()
1227    }
1228}
1229
1230#[cfg(test)]
1231mod tests {
1232    use super::Shared;
1233
1234    #[test]
1235    fn valid_tag_i8() {
1236        Shared::<i8>::null().with_tag(0);
1237    }
1238
1239    #[test]
1240    fn valid_tag_i64() {
1241        Shared::<i64>::null().with_tag(7);
1242    }
1243}