ordered_pool_allocator/
lib.rs

1use core::slice;
2use std::{
3    cmp::Ordering,
4    marker::PhantomData,
5    mem::{self, MaybeUninit},
6    ops::{Index, IndexMut},
7    ptr::{self, NonNull},
8};
9
10/// An allocator for `T` objects, with a block index of `N` bytes.
11pub struct OrderedPoolAllocator<'buf, T, const N: usize = { mem::size_of::<usize>() }> {
12    physical_blocks_ptr: *mut MaybeUninit<T>,
13    physical_block_indices_ptr: *mut [u8; N],
14    virtual_block_indices_ptr: *mut [u8; N],
15    blocks_len: usize,
16    deallocated_blocks_len: usize,
17    blocks_capacity: usize,
18    _marker: PhantomData<&'buf MaybeUninit<T>>,
19}
20
21impl<'buf, T, const N: usize> OrderedPoolAllocator<'buf, T, N> {
22    pub fn new_in(buf: &'buf mut [u8]) -> Self {
23        let blocks_capacity = usize::MAX
24            .min(2usize.pow(8).pow(N as u32) - 1)
25            .min(buf.len() / (mem::size_of::<MaybeUninit<T>>() + 2 * mem::size_of::<[u8; N]>()));
26
27        unsafe {
28            Self {
29                physical_blocks_ptr: buf.as_mut_ptr().cast(),
30                physical_block_indices_ptr: buf
31                    .as_mut_ptr()
32                    .add(blocks_capacity * mem::size_of::<MaybeUninit<T>>())
33                    .cast(),
34                virtual_block_indices_ptr: buf
35                    .as_mut_ptr()
36                    .add(
37                        blocks_capacity
38                            * (mem::size_of::<MaybeUninit<T>>() + mem::size_of::<[u8; N]>()),
39                    )
40                    .cast(),
41                blocks_len: 0,
42                deallocated_blocks_len: 0,
43                blocks_capacity,
44                _marker: PhantomData,
45            }
46        }
47    }
48
49    /// Allocates a block and returns a pointer to the block.
50    ///
51    /// This method will always prioritize reallocating an existing deallocated block over allocating a new block.
52    ///
53    /// # Safety
54    ///
55    /// Behavior is undefined if the returned pointer points to an uninitialized instance of `T` when the allocator is
56    /// dropped.
57    pub unsafe fn allocate(&mut self) -> Result<NonNull<T>, AllocError> {
58        let physical_block_index = match self.deallocated_blocks_len {
59            0 if self.is_full() => return Err(AllocError),
60            0 => self.blocks_len,
61            _ => {
62                let last_deallocated_physical_block_index = Self::bytes_to_usize(unsafe {
63                    self.physical_block_indices_ptr
64                        .add(self.blocks_capacity - self.deallocated_blocks_len)
65                });
66
67                self.deallocated_blocks_len -= 1;
68                last_deallocated_physical_block_index
69            }
70        };
71
72        let virtual_block_index = self.blocks_len;
73        self.blocks_len += 1;
74
75        unsafe {
76            self.physical_block_indices_ptr
77                .add(virtual_block_index)
78                .write(Self::usize_to_bytes(physical_block_index));
79
80            self.virtual_block_indices_ptr
81                .add(physical_block_index)
82                .write(Self::usize_to_bytes(virtual_block_index));
83
84            Ok(NonNull::new_unchecked(
85                self.physical_blocks_ptr.add(physical_block_index).cast(),
86            ))
87        }
88    }
89
90    /// Deallocates the block at the specified pointer.
91    ///
92    /// Side effect: swaps the virtual block indices of the specified block with the last allocated virtual block.
93    ///
94    /// # Safety
95    ///
96    /// Behavior is undefined if any of the following conditions are violated:
97    ///
98    /// * `ptr` must point to an instance of `T` allocated by this allocator.
99    /// * `ptr` must not point to an instance of `T` that has already been dropped or deallocated by this allocator.
100    pub unsafe fn deallocate(&mut self, ptr: NonNull<T>) {
101        if self.is_empty() {
102            return;
103        }
104
105        let physical_block_index =
106            ptr.as_ptr().offset_from(self.physical_blocks_ptr.cast()) as usize;
107
108        let virtual_block_index =
109            Self::bytes_to_usize(self.virtual_block_indices_ptr.add(physical_block_index));
110
111        let last_allocated_virtual_block_index = self.blocks_len - 1;
112
113        unsafe {
114            let last_allocated_physical_block_index = Self::bytes_to_usize(
115                self.physical_block_indices_ptr
116                    .add(last_allocated_virtual_block_index),
117            );
118
119            ptr::swap(
120                self.physical_block_indices_ptr.add(virtual_block_index),
121                self.physical_block_indices_ptr
122                    .add(last_allocated_virtual_block_index),
123            );
124
125            self.virtual_block_indices_ptr
126                .add(last_allocated_physical_block_index)
127                .write(Self::usize_to_bytes(virtual_block_index));
128        }
129
130        let deallocated_virtual_block_index =
131            self.blocks_capacity - self.deallocated_blocks_len - 1;
132
133        unsafe {
134            self.physical_block_indices_ptr
135                .add(deallocated_virtual_block_index)
136                .write(Self::usize_to_bytes(physical_block_index));
137        }
138
139        self.blocks_len -= 1;
140        self.deallocated_blocks_len += 1;
141
142        ptr.as_ptr().drop_in_place();
143    }
144
145    pub const fn len(&self) -> usize {
146        self.blocks_len
147    }
148
149    pub const fn capacity(&self) -> usize {
150        self.blocks_capacity
151    }
152
153    pub const fn is_empty(&self) -> bool {
154        self.blocks_len == 0
155    }
156
157    pub const fn is_full(&self) -> bool {
158        self.blocks_len == self.blocks_capacity
159    }
160
161    pub fn sort_unstable_by<F>(&mut self, mut compare: F)
162    where
163        F: FnMut(&MaybeUninit<T>, &MaybeUninit<T>) -> Ordering,
164    {
165        let blocks_len = self.blocks_len;
166        let physical_blocks_ptr = self.physical_blocks_ptr;
167
168        unsafe {
169            slice::from_raw_parts_mut(self.physical_block_indices_ptr, self.blocks_capacity)
170                [..blocks_len]
171                .sort_unstable_by(|a, b| {
172                    compare(
173                        &*physical_blocks_ptr.add(Self::bytes_to_usize(a.as_ptr().cast())),
174                        &*physical_blocks_ptr.add(Self::bytes_to_usize(b.as_ptr().cast())),
175                    )
176                });
177
178            for virtual_block_index in 0..blocks_len {
179                let physical_block_index =
180                    Self::bytes_to_usize(self.physical_block_indices_ptr.add(virtual_block_index));
181
182                self.virtual_block_indices_ptr
183                    .add(physical_block_index)
184                    .write(Self::usize_to_bytes(virtual_block_index))
185            }
186        }
187    }
188
189    fn bytes_to_usize(block_index: *const [u8; N]) -> usize {
190        if N == mem::size_of::<usize>() {
191            unsafe { return usize::from_le_bytes(*block_index.cast()) }
192        }
193
194        let mut buf = [0u8; mem::size_of::<usize>()];
195
196        unsafe {
197            ptr::copy_nonoverlapping(
198                block_index.cast(),
199                buf.as_mut_ptr(),
200                N.min(mem::size_of::<usize>()),
201            )
202        }
203
204        usize::from_le_bytes(buf)
205    }
206
207    fn usize_to_bytes(block_index: usize) -> [u8; N] {
208        let mut buf = [0u8; N];
209
210        unsafe {
211            ptr::copy_nonoverlapping(
212                block_index.to_le_bytes().as_mut_ptr(),
213                buf.as_mut_ptr(),
214                N.min(mem::size_of::<usize>()),
215            )
216        }
217
218        buf
219    }
220}
221
222impl<T, const N: usize> Index<usize> for OrderedPoolAllocator<'_, T, N> {
223    type Output = MaybeUninit<T>;
224
225    fn index(&self, index: usize) -> &Self::Output {
226        unsafe {
227            let physical_block_index =
228                Self::bytes_to_usize(self.physical_block_indices_ptr.add(index));
229
230            &*self.physical_blocks_ptr.add(physical_block_index)
231        }
232    }
233}
234
235impl<T, const N: usize> IndexMut<usize> for OrderedPoolAllocator<'_, T, N> {
236    fn index_mut(&mut self, index: usize) -> &mut Self::Output {
237        unsafe {
238            let physical_block_index =
239                Self::bytes_to_usize(self.physical_block_indices_ptr.add(index));
240
241            &mut *self.physical_blocks_ptr.add(physical_block_index)
242        }
243    }
244}
245
246impl<T, const N: usize> Drop for OrderedPoolAllocator<'_, T, N> {
247    fn drop(&mut self) {
248        for i in 0..self.blocks_len {
249            unsafe {
250                let physical_block_index =
251                    Self::bytes_to_usize(self.physical_block_indices_ptr.add(i));
252
253                self.physical_blocks_ptr
254                    .add(physical_block_index)
255                    .cast::<T>()
256                    .drop_in_place()
257            }
258        }
259    }
260}
261
262#[derive(Debug)]
263pub struct AllocError;
264
265#[cfg(test)]
266mod tests {
267    use rand::{rngs::ThreadRng, Rng};
268
269    use super::*;
270
271    #[repr(transparent)]
272    struct SecureU32(u32);
273
274    impl Drop for SecureU32 {
275        fn drop(&mut self) {
276            self.0 = 0;
277        }
278    }
279
280    #[test]
281    fn allocate_and_deallocate() {
282        unsafe {
283            let mut buf = [0u8; 128];
284            let mut sut = OrderedPoolAllocator::<u32, 1>::new_in(&mut buf);
285
286            assert_eq!(0, sut.len());
287            assert_eq!(21, sut.capacity());
288            assert_eq!(true, sut.is_empty());
289            assert_eq!(false, sut.is_full());
290
291            for (i, value) in [2, 3, 1].into_iter().enumerate() {
292                sut.allocate().unwrap_unchecked().as_ptr().write(value);
293                assert_eq!(value, sut[i].assume_init())
294            }
295
296            assert_eq!(3, sut.len());
297            assert_eq!(false, sut.is_empty());
298
299            let ptr = NonNull::new_unchecked(sut[0].as_mut_ptr());
300            sut.deallocate(ptr);
301            assert_eq!(2, sut.len());
302
303            for (i, value) in [1, 3].into_iter().enumerate() {
304                assert_eq!(value, sut[i].assume_init())
305            }
306
307            sut.allocate().unwrap_unchecked().as_ptr().write(2);
308            assert_eq!(3, sut.len());
309
310            for (i, value) in [1, 3, 2].into_iter().enumerate() {
311                assert_eq!(value, sut[i].assume_init())
312            }
313        }
314    }
315
316    #[test]
317    fn allocate_and_deallocate_at_scale() {
318        unsafe fn allocate(sut: &mut OrderedPoolAllocator<SecureU32, 1>, value: u32) {
319            let ptr = sut.allocate().unwrap_unchecked();
320            assert_eq!(0, ptr.as_ref().0);
321            ptr.as_ptr().write(SecureU32(value))
322        }
323
324        unsafe fn deallocate(sut: &mut OrderedPoolAllocator<SecureU32, 1>, rng: &mut ThreadRng) {
325            let i = rng.random_range(0..sut.len());
326            let ptr = NonNull::new_unchecked(sut[i].as_mut_ptr());
327            sut.deallocate(ptr);
328            assert_eq!(0, ptr.as_ref().0)
329        }
330
331        unsafe {
332            let mut buf = [0u8; 128];
333            let mut sut = OrderedPoolAllocator::<SecureU32, 1>::new_in(&mut buf);
334            let mut rng = rand::rng();
335
336            for _ in 0..20_000_000 {
337                if sut.is_empty() {
338                    allocate(&mut sut, u32::MAX)
339                } else if sut.is_full() {
340                    deallocate(&mut sut, &mut rng)
341                } else if rng.random_bool(0.5) {
342                    allocate(&mut sut, u32::MAX)
343                } else {
344                    deallocate(&mut sut, &mut rng)
345                }
346            }
347        }
348    }
349
350    #[test]
351    fn sort_unstable_by() {
352        unsafe {
353            let mut buf = [0u8; 128];
354            let mut sut = OrderedPoolAllocator::<u32, 1>::new_in(&mut buf);
355
356            for value in [2, 3, 1] {
357                sut.allocate().unwrap_unchecked().as_ptr().write(value)
358            }
359
360            sut.sort_unstable_by(|a, b| a.assume_init_ref().cmp(b.assume_init_ref()));
361
362            for (i, value) in [1, 2, 3].into_iter().enumerate() {
363                assert_eq!(value, sut[i].assume_init())
364            }
365
366            let ptr = NonNull::new_unchecked(sut[0].as_mut_ptr());
367            sut.deallocate(ptr);
368
369            for (i, value) in [3, 2].into_iter().enumerate() {
370                assert_eq!(value, sut[i].assume_init())
371            }
372
373            sut.allocate().unwrap_unchecked().as_ptr().write(4);
374
375            for (i, value) in [3, 2, 4].into_iter().enumerate() {
376                assert_eq!(value, sut[i].assume_init())
377            }
378
379            sut.sort_unstable_by(|a, b| a.assume_init_ref().cmp(b.assume_init_ref()));
380
381            for (i, value) in [2, 3, 4].into_iter().enumerate() {
382                assert_eq!(value, sut[i].assume_init())
383            }
384        }
385    }
386
387    #[test]
388    fn drop() {
389        unsafe {
390            let mut buf = [0u8; 128];
391            let mut sut = OrderedPoolAllocator::<SecureU32, 1>::new_in(&mut buf);
392
393            let ptr = sut.allocate().unwrap_unchecked();
394            ptr.as_ptr().write(SecureU32(u32::MAX));
395
396            mem::drop(sut);
397
398            assert_eq!(0, ptr.as_ref().0)
399        }
400    }
401}