intid_allocator/unique/
atomic.rs

1use crate::IdExhaustedError;
2#[allow(unused_imports)] // used by docs
3use crate::{IntegerId, UniqueIdAllocator};
4use core::marker::PhantomData;
5use core::sync::atomic::Ordering;
6use intid::{uint, IntegerIdCounter};
7
8/// Allocates unique integer ids atomically,
9/// in a way safe to use from multiple threads.
10///
11/// # Thread Safety
12/// This type makes the following guarantees about allocations:
13/// - Absent a call to [`Self::reset`], each call to [`Self::alloc`] returns a new value.
14///   Consequently, if only a single allocator is used, all the ids will be unique.
15/// - All available ids will be used before an [`IdExhaustedError`] is returned.
16///   Equivalently, allocation will never skip over ids (although it may appear to from the perspective of a single thread).
17/// - Once an [`IdExhaustedError`] is returned, all future allocations will fail unless [`Self::reset`] is called.
18///   This is similar to the guarantees of [`Iterator::fuse`].
19///
20/// This type only makes guarantees about atomicity, not about synchronization with other operations.
21/// In other words, without a [`core::sync::atomic::fence`],
22/// there are no guarantees about the relative-ordering between this counter and other memory locations.
23/// It is not meant to be used as a synchronization primitive; It is only meant to allocate unique ids.
24///
25/// An incorrect implementation of [`IntegerId`] or [`IntegerIdCounter`] can break some or all of these guarantees,
26/// but will not be able to trigger undefined behavior.
27#[derive(Debug)]
28pub struct UniqueIdAllocatorAtomic<T: IntegerIdCounter> {
29    // This could be improved by adding a T: bytemuck::NoUninit bound to IntegerIdCounter
30    // It would allow us to avoid potentially costly conversions T <-> T::Int
31    // and avoid the need for a separate with_start_const function
32    //
33    // The downside is it would add bytemuck as a required dependency,
34    // and require more work in the intid-derive (would we derive nouninit or would bytemuck?)
35    // As another alternative, we could switch to crossbeam-utils
36    next_id: atomic::Atomic<T::Int>,
37    marker: PhantomData<T>,
38}
39impl<T: IntegerIdCounter> Default for UniqueIdAllocatorAtomic<T> {
40    fn default() -> Self {
41        Self::new()
42    }
43}
44impl<T: IntegerIdCounter> UniqueIdAllocatorAtomic<T> {
45    /// Create a new allocator,
46    /// using [`T::START`] as the first id (usually zero).
47    ///
48    /// [`T::START`]: IntegerIdCounter::START
49    #[inline]
50    pub const fn new() -> Self {
51        UniqueIdAllocatorAtomic {
52            next_id: atomic::Atomic::new(T::START_INT),
53            marker: PhantomData,
54        }
55    }
56
57    /// Create a new allocator,
58    /// using the specified value as the first id.
59    ///
60    /// Use [`Self::with_start_const`] if you need a constant function.
61    #[inline]
62    pub fn with_start(start: T) -> Self {
63        UniqueIdAllocatorAtomic {
64            next_id: atomic::Atomic::new(start.to_int()),
65            marker: PhantomData,
66        }
67    }
68
69    /// Create a new allocator,
70    /// using the specified value as the first id.
71    ///
72    /// In order to be usable from a `const` function,
73    /// this requires that `T` implement the [`bytemuck::NoUninit`] trait
74    /// and have the same size and representation as [`T::Int`](intid::IntegerId::Int).
75    /// If that does not happen, this method will fail to compile with a const panic.
76    ///
77    /// ## Safety
78    /// This function cannot cause undefined behavior.
79    #[track_caller]
80    pub const fn with_start_const(start: T) -> Self
81    where
82        T: bytemuck::NoUninit,
83    {
84        let start = bytemuck::must_cast::<T, T::Int>(start);
85        UniqueIdAllocatorAtomic {
86            next_id: atomic::Atomic::new(start),
87            marker: PhantomData,
88        }
89    }
90
91    /// Estimate the maximum currently used id,
92    /// or `None` if no ids have been allocated yet.
93    ///
94    /// Unlike [`UniqueIdAllocator::max_used_id`], this is only an approximation.
95    /// This is because other threads may be concurrently allocating a new id.
96    #[inline]
97    pub fn approx_max_used_id(&self) -> Option<T> {
98        IntegerIdCounter::checked_sub(
99            T::from_int_checked(self.next_id.load(Ordering::Relaxed))?,
100            uint::one(),
101        )
102    }
103
104    /// Attempt to allocate a new id, returning an error if exhausted.
105    ///
106    /// This operation is guaranteed to be atomic,
107    /// and will never reuse ids unless [`Self::reset`] is called.
108    /// However, it should not be used as a tool for synchronization.
109    /// See type-level docs for more details.
110    ///
111    /// # Errors
112    /// Once the number of allocated ids exceeds the range of the underlying
113    /// [`IntegerIdCounter`], then this function will return an error.
114    /// This function will never skip over valid ids,
115    /// so the error can only occur if all ids have ben used.
116    #[inline]
117    pub fn try_alloc(&self) -> Result<T, IdExhaustedError<T>> {
118        // Effectively this is "fused" because T: IntegerIdCounter => T: IntegerIdContiguous,
119        // so once addition overflows all future calls will error
120        //
121        // See the comment in the Self::reset call for a way to potentially eliminate the CAS loop.
122        //
123        // Safe to used relaxed ordering because we only guarantee atomicity, not synchronization
124        self.next_id
125            .fetch_update(Ordering::Relaxed, Ordering::Relaxed, |x| {
126                uint::checked_add(x, uint::one())
127            })
128            .ok()
129            .and_then(T::from_int_checked)
130            .ok_or_else(IdExhaustedError::new)
131    }
132
133    /// Attempt to allocate a new id, panicking if exhausted.
134    ///
135    /// This operation is guaranteed to be atomic,
136    /// and will never reuse ids unless [`Self::reset`] is called.
137    /// However, it should not be used as a tool for synchronization.
138    /// See type-level docs for more details.
139    ///
140    /// # Panics
141    /// Panics if ids are exhausted, when [`Self::try_alloc`] would have returned an error.
142    #[inline]
143    #[must_use]
144    pub fn alloc(&self) -> T {
145        match self.try_alloc() {
146            Ok(x) => x,
147            Err(e) => e.panic(),
148        }
149    }
150
151    /// Reset the allocator to a pristine state,
152    /// beginning allocations all over again.
153    ///
154    /// This is equivalent to running `*allocator = UniqueIdAllocatorAtomic::new()`,
155    /// but is done atomically and does not require a `&mut Self` reference.
156    ///
157    /// This may cause unexpected behavior if ids are expected to be monotonically increasing,
158    /// or if the new ids conflict with ones still in use.
159    /// To avoid this, keep the id allocator private.
160    ///
161    /// There is no counterpart [`UniqueIdAllocator::set_next_id`],
162    /// because the ability to force the counter to jump forwards
163    /// could prevent future optimizations.
164    #[inline]
165    pub fn reset(&self) {
166        /*
167         * I said this might prevent future optimizations.
168         * What I am referring to is the potential to convert the CAS loop
169         * into a fetch_add similar to how Arc::clone does.
170         * Based on the assumption there are fewer than isize::MAX threads,
171         * Arc::clone only has to worry about overflow if the counter exceeds that value.
172         *
173         * This seems like a micro-optimization but it could become important at some point.
174         */
175        self.next_id.store(T::START.to_int(), Ordering::Relaxed);
176    }
177}