intid_allocator/unique/
atomic.rs

1use crate::IdExhaustedError;
2#[allow(unused_imports)] // used by docs
3use crate::UniqueIdAllocator;
4use core::marker::PhantomData;
5use core::sync::atomic::Ordering;
6use intid::{uint, IntegerIdCounter};
7
8/// Allocates unique integer ids across multiple threads.
9///
10/// This is an [`UniqueIdAllocator`] that uses atomic instructions,
11/// and so is safe to share across threads.
12#[derive(Debug)]
13pub struct UniqueIdAllocatorAtomic<T: IntegerIdCounter> {
14    // This could be improved by adding a T: bytemuck::NoUninit bound to IntegerIdCounter
15    // It would allow us to avoid potentially costly conversions T <-> T::Int
16    // and avoid the need for a separate with_start_const function
17    //
18    // The downside is it would add bytemuck as a required dependency,
19    // and require more work in the intid-derive (would we derive nouninit or would bytemuck?)
20    // As another alternative, we could switch to crossbeam-utils
21    next_id: atomic::Atomic<T::Int>,
22    marker: PhantomData<T>,
23}
24impl<T: IntegerIdCounter> Default for UniqueIdAllocatorAtomic<T> {
25    fn default() -> Self {
26        Self::new()
27    }
28}
29impl<T: IntegerIdCounter> UniqueIdAllocatorAtomic<T> {
30    /// Create a new allocator,
31    /// using [`T::START`] as the first id (usually zero).
32    ///
33    /// [`T::START`]: IntegerIdCounter::START
34    #[inline]
35    pub const fn new() -> Self {
36        UniqueIdAllocatorAtomic {
37            next_id: atomic::Atomic::new(T::START_INT),
38            marker: PhantomData,
39        }
40    }
41
42    /// Create a new allocator,
43    /// using the specified value as the first id.
44    ///
45    /// Use [`Self::with_start_const`] if you need a constant function.
46    #[inline]
47    pub fn with_start(start: T) -> Self {
48        UniqueIdAllocatorAtomic {
49            next_id: atomic::Atomic::new(start.to_int()),
50            marker: PhantomData,
51        }
52    }
53
54    /// Create a new allocator,
55    /// using the specified value as the first id.
56    ///
57    /// In order to be usable from a `const` function,
58    /// this requires that `T` implement the [`bytemuck::NoUninit`] trait
59    /// and have the same size and representation as [`T::Int`](intid::IntegerId::Int).
60    /// If that does not happen, this method will fail to compile with a const panic.
61    ///
62    /// ## Safety
63    /// This function cannot cause undefined behavior.
64    #[track_caller]
65    pub const fn with_start_const(start: T) -> Self
66    where
67        T: bytemuck::NoUninit,
68    {
69        let start = bytemuck::must_cast::<T, T::Int>(start);
70        UniqueIdAllocatorAtomic {
71            next_id: atomic::Atomic::new(start),
72            marker: PhantomData,
73        }
74    }
75
76    /// Estimate the maximum currently used id,
77    /// or `None` if no ids have been allocated yet.
78    ///
79    /// Unlike [`UniqueIdAllocator::max_used_id`]
80    /// this is only an approximation.
81    /// This is because other threads may be concurrently allocating a new id,
82    /// and the load uses a [relaxed](core::sync::atomic::Ordering) ordering.
83    /// In the current implementation, this should always be an under-estimate,
84    /// since the counter only goes upwards.
85    /// However, this should not be relied upon.
86    #[inline]
87    pub fn approx_max_used_id(&self) -> Option<T> {
88        IntegerIdCounter::checked_sub(
89            T::from_int_checked(self.next_id.load(Ordering::Relaxed))?,
90            uint::one(),
91        )
92    }
93
94    /// Attempt to allocate a new id,
95    /// returning an error if exhausted.
96    #[inline]
97    pub fn try_alloc(&self) -> Result<T, IdExhaustedError<T>> {
98        // Effectively this is "fused" because T: IntegerIdCounter => T: IntegerIdContiguous,
99        // so once addition overflows all future calls will error
100        self.next_id
101            .fetch_update(Ordering::AcqRel, Ordering::Relaxed, |x| {
102                uint::checked_add(x, uint::one())
103            })
104            .ok()
105            .and_then(T::from_int_checked)
106            .ok_or_else(IdExhaustedError::new)
107    }
108
109    /// Attempt to allocate a new id,
110    /// panicking if exhausted.
111    #[inline]
112    #[must_use]
113    pub fn alloc(&self) -> T {
114        match self.try_alloc() {
115            Ok(x) => x,
116            Err(e) => e.panic(),
117        }
118    }
119}