pid_allocator/
allocator.rs

1use core::ops::Deref;
2
3use alloc::sync::Arc;
4use spin::mutex::SpinMutex;
5
6/// A thread-safe PID allocator that can allocate and recycle PIDs efficiently.
7/// It encapsulates the allocator's state within an `Arc<SpinMutex<...>>` to allow safe shared access across threads.
8/// This design ensures that PIDs can be allocated and recycled from multiple threads without data races or consistency issues.
9///
10/// The `ORDER` generic parameter determines the capacity of the allocator,
11/// with the total number of allocatable PIDs being `ORDER * usize::BITS`. This makes
12/// the allocator flexible for various use cases, whether you need a few dozen PIDs or thousands.
13///
14/// # Examples
15///
16/// Creating a new PID Allocator:
17///
18/// ```
19/// use pid_allocator::PidAllocator;
20///
21/// let allocator = PidAllocator::<8>::new(); // Creates an allocator that can manage up to 8 * usize::BITS PIDs.
22///
23/// //Allocating a PID:
24/// if let Some(pid) = allocator.allocate() {
25///     println!("Successfully allocated PID: {}", *pid);
26///     // When the `pid` goes out of scope, it will be automatically recycled.
27/// } else {
28///     println!("Failed to allocate a PID. The allocator might be full.");
29/// }
30///
31/// //Checking if a PID is allocated (using `contains` method):
32/// let pid = allocator.allocate().expect("Failed to allocate PID");
33/// assert!(allocator.contains(*pid), "The PID should be marked as allocated.");
34/// 
35/// //Recycle a PID by simply dropping it:
36/// {
37///     let pid = allocator.allocate().expect("Failed to allocate PID");
38///     // PID is in use within this scope
39/// }
40/// // PID is automatically recycled here when `pid` goes out of scope
41/// ```
42///
43/// # Note
44///
45/// This allocator is designed to be used in environments where PID recycling is frequent,
46/// and thread safety is a concern. Its performance characteristics are optimized for scenarios
47/// where both allocation and deallocation (recycling) of PIDs are common operations.
48#[derive(Debug, Default)]
49pub struct PidAllocator<const ORDER: usize> {
50    inner: Arc<SpinMutex<PidAllocatorInner<ORDER>>>,
51}
52
53/// The internal state of the PID allocator, containing the layers of available PIDs.
54#[derive(Debug)]
55pub(crate) struct PidAllocatorInner<const ORDER: usize> {
56    top_layer: usize,
57    bottom_layers: [usize; ORDER],
58}
59
60impl<const ORDER: usize> PidAllocator<ORDER> {
61    /// Creates a new instance of the PID allocator.
62    ///
63    /// This constructor initializes the PID allocator, setting up its internal
64    /// structure to keep track of allocated and available PIDs. The allocator
65    /// supports up to `ORDER * usize::BITS` unique PIDs, where `ORDER` is a
66    /// compile-time constant defining the number of layers in the allocator.
67    ///
68    /// # Examples
69    ///
70    /// ```
71    /// use pid_allocator::PidAllocator;
72    ///
73    /// let allocator = PidAllocator::<8>::new();
74    /// ```
75    ///
76    /// This creates a PID allocator with 8 layers, capable of managing
77    /// a total of `8 * usize::BITS` PIDs, which depends on the architecture
78    /// (e.g., 256 PIDs for a 32-bit architecture or 512 PIDs for a 64-bit architecture).
79    pub fn new() -> Self {
80        Self {
81            inner: Arc::new(SpinMutex::new(PidAllocatorInner::new())),
82        }
83    }
84
85    /// Attempts to allocate a new PID. Returns `Some(Pid)` if successful, or `None` if all PIDs are currently allocated.
86    /// The allocated PID is wrapped in a `Pid` object, which will automatically recycle the PID when dropped.
87    ///
88    /// This method locks the internal state, searches for a free PID, marks it as used,
89    /// and returns a `Pid` instance representing the allocated PID. If no PIDs are available,
90    /// it returns `None`.
91    ///
92    /// # Returns
93    ///
94    /// * `Some(Pid<ORDER>)` containing the allocated PID if allocation is successful.
95    /// * `None` if all PIDs are already allocated.
96    ///
97    /// # Examples
98    ///
99    /// Successful allocation:
100    ///
101    /// ```
102    /// use pid_allocator::PidAllocator;
103    ///
104    /// let allocator = PidAllocator::<8>::new();
105    /// if let Some(pid) = allocator.allocate() {
106    ///     println!("Allocated PID: {}", *pid);
107    /// }
108    /// ```
109    ///
110    /// Handling failure to allocate a PID:
111    ///
112    /// ```
113    /// use pid_allocator::PidAllocator;
114    ///
115    /// let allocator = PidAllocator::<8>::new();
116    /// let mut pids = Vec::new();
117    /// while let Some(pid) = allocator.allocate() {
118    ///     pids.push(pid);
119    /// }
120    /// // At this point, no more PIDs can be allocated.
121    /// assert!(allocator.allocate().is_none());
122    /// ```
123    ///
124    /// In this example, PIDs are continuously allocated until no more are available,
125    /// at which point `allocate()` returns `None`.
126    pub fn allocate(&self) -> Option<Pid<ORDER>> {
127        let mut inner = self.inner.lock();
128        inner.allocate().map(|number| Pid {
129            number,
130            allocator: self.inner.clone(),
131        })
132    }
133
134    /// Checks whether a given PID is currently allocated.
135    ///
136    /// # Parameters
137    ///
138    /// * `number`: The PID number to check for allocation.
139    ///
140    /// # Returns
141    ///
142    /// * `true` if the PID is currently allocated.
143    /// * `false` otherwise.
144    ///
145    /// # Example
146    ///
147    /// ```
148    /// use pid_allocator::PidAllocator;
149    ///
150    /// let allocator = PidAllocator::<32>::new();
151    /// let pid = allocator.allocate().expect("Failed to allocate PID");
152    ///
153    /// assert!(allocator.contains(*pid), "The PID should be marked as allocated.");
154    /// ```
155    ///
156    /// # Note
157    ///
158    /// This method performs a read-only operation on the allocator's state and is thread-safe,
159    /// thanks to the internal use of `Arc<SpinMutex<...>>`. However, because the state of the allocator
160    /// can change in concurrent environments, the returned allocation state might not remain valid
161    /// immediately after this method is called.
162    pub fn contains(&self, number: usize) -> bool {
163        self.inner.lock().contains(number)
164    }
165}
166
167impl<const ORDER: usize> PidAllocatorInner<ORDER> {
168    pub(crate) fn new() -> Self {
169        Self {
170            top_layer: 0,
171            bottom_layers: [0; ORDER],
172        }
173    }
174
175    /// Allocates a PID from the internal state. This method should only be called with exclusive access to the state.
176    pub(crate) fn allocate(&mut self) -> Option<usize> {
177        for (index, &layer) in self.bottom_layers.iter().enumerate() {
178            if layer != usize::MAX {
179                let free_bit = (!layer).trailing_zeros() as usize;
180                self.bottom_layers[index] |= 1 << free_bit;
181                if self.bottom_layers[index] == usize::MAX {
182                    self.top_layer |= 1 << index;
183                }
184                return Some(index * (usize::BITS as usize) + free_bit);
185            }
186        }
187        None
188    }
189
190    /// Recycles the given PID, making it available for allocation again.
191    pub(crate) fn recycle(&mut self, number: usize) {
192        const BITS_PER_LAYER_SHIFT: usize = usize::BITS.trailing_zeros() as usize;
193
194        let layer_index = number >> BITS_PER_LAYER_SHIFT;
195        let bit_index = number & (usize::BITS - 1) as usize;
196
197        self.bottom_layers[layer_index] &= !(1 << bit_index);
198        self.top_layer &= !(1 << layer_index);
199    }
200
201    /// Checks whether a given PID is currently allocated.
202    pub(crate) fn contains(&self, number: usize) -> bool {
203        const BITS_PER_LAYER_SHIFT: usize = usize::BITS.trailing_zeros() as usize;
204        let layer_index = number >> BITS_PER_LAYER_SHIFT;
205        let bit_index = number & ((1 << BITS_PER_LAYER_SHIFT) - 1);
206
207        if layer_index < self.bottom_layers.len() {
208            (self.bottom_layers[layer_index] & (1 << bit_index)) != 0
209        } else {
210            false
211        }
212    }
213}
214
215impl<const ORDER: usize> Default for PidAllocatorInner<ORDER> {
216    fn default() -> Self {
217        Self::new()
218    }
219}
220
221/// A handle to an allocated PID. When dropped, the PID is automatically recycled back into the allocator.
222#[derive(Debug)]
223pub struct Pid<const ORDER: usize> {
224    number: usize,
225    allocator: Arc<SpinMutex<PidAllocatorInner<ORDER>>>,
226}
227
228impl<const ORDER: usize> Deref for Pid<ORDER> {
229    type Target = usize;
230
231    fn deref(&self) -> &Self::Target {
232        &self.number
233    }
234}
235
236impl<const ORDER: usize> Drop for Pid<ORDER> {
237    fn drop(&mut self) {
238        self.allocator.lock().recycle(self.number);
239    }
240}